001 /** 002 * Copyright (c) 2010 Yahoo! Inc. All rights reserved. 003 * Licensed under the Apache License, Version 2.0 (the "License"); 004 * you may not use this file except in compliance with the License. 005 * You may obtain a copy of the License at 006 * 007 * http://www.apache.org/licenses/LICENSE-2.0 008 * 009 * Unless required by applicable law or agreed to in writing, software 010 * distributed under the License is distributed on an "AS IS" BASIS, 011 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 012 * See the License for the specific language governing permissions and 013 * limitations under the License. See accompanying LICENSE file. 014 */ 015 package org.apache.oozie.service; 016 017 import org.apache.hadoop.mapred.JobClient; 018 import org.apache.hadoop.mapred.JobConf; 019 import org.apache.hadoop.fs.FileSystem; 020 import org.apache.hadoop.fs.Path; 021 import org.apache.hadoop.conf.Configuration; 022 import org.apache.hadoop.security.UserGroupInformation; 023 import org.apache.hadoop.security.token.Token; 024 import org.apache.hadoop.filecache.DistributedCache; 025 import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier; 026 import org.apache.hadoop.io.Text; 027 import org.apache.oozie.util.XLog; 028 import org.apache.oozie.util.XConfiguration; 029 import org.apache.oozie.util.ParamChecker; 030 import org.apache.oozie.ErrorCode; 031 import org.apache.oozie.service.HadoopAccessorService; 032 import org.apache.oozie.service.HadoopAccessorException; 033 import org.apache.oozie.service.Service; 034 import org.apache.oozie.service.ServiceException; 035 036 import java.io.IOException; 037 import java.net.URI; 038 import java.net.URISyntaxException; 039 import java.security.PrivilegedExceptionAction; 040 import java.util.concurrent.ConcurrentMap; 041 import java.util.concurrent.ConcurrentHashMap; 042 043 /** 044 * The HadoopAccessorService returns HadoopAccessor instances configured to work on behalf of a user-group. <p/> The 045 * default accessor used is the base accessor which just injects the UGI into the configuration instance used to 046 * create/obtain JobClient and ileSystem instances. <p/> The HadoopAccess class to use can be configured in the 047 * <code>oozie-site.xml</code> using the <code>oozie.service.HadoopAccessorService.accessor.class</code> property. 048 */ 049 public class KerberosHadoopAccessorService extends HadoopAccessorService { 050 051 public static final String CONF_PREFIX = Service.CONF_PREFIX + "HadoopAccessorService."; 052 053 public static final String KERBEROS_AUTH_ENABLED = CONF_PREFIX + "kerberos.enabled"; 054 public static final String KERBEROS_KEYTAB = CONF_PREFIX + "keytab.file"; 055 public static final String KERBEROS_PRINCIPAL = CONF_PREFIX + "kerberos.principal"; 056 057 private ConcurrentMap<String, UserGroupInformation> userUgiMap; 058 059 private String localRealm; 060 061 public void init(Configuration serviceConf) throws ServiceException { 062 boolean kerberosAuthOn = serviceConf.getBoolean(KERBEROS_AUTH_ENABLED, true); 063 XLog.getLog(getClass()).info("Oozie Kerberos Authentication [{0}]", (kerberosAuthOn) ? "enabled" : "disabled"); 064 if (kerberosAuthOn) { 065 try { 066 String keytabFile = serviceConf.get(KERBEROS_KEYTAB, 067 System.getProperty("user.home") + "/oozie.keytab").trim(); 068 if (keytabFile.length() == 0) { 069 throw new ServiceException(ErrorCode.E0026, KERBEROS_KEYTAB); 070 } 071 String principal = serviceConf.get(KERBEROS_PRINCIPAL, "oozie/localhost@LOCALHOST"); 072 if (principal.length() == 0) { 073 throw new ServiceException(ErrorCode.E0026, KERBEROS_PRINCIPAL); 074 } 075 Configuration conf = new Configuration(); 076 conf.set("hadoop.security.authentication", "kerberos"); 077 UserGroupInformation.setConfiguration(conf); 078 UserGroupInformation.loginUserFromKeytab(principal, keytabFile); 079 XLog.getLog(getClass()).info("Got Kerberos ticket, keytab [{0}], Oozie principal principal [{1}]", 080 keytabFile, principal); 081 } 082 catch (ServiceException ex) { 083 throw ex; 084 } 085 catch (Exception ex) { 086 throw new ServiceException(ErrorCode.E0100, getClass().getName(), ex.getMessage(), ex); 087 } 088 } 089 else { 090 Configuration conf = new Configuration(); 091 conf.set("hadoop.security.authentication", "simple"); 092 UserGroupInformation.setConfiguration(conf); 093 } 094 localRealm = serviceConf.get("local.realm"); 095 096 userUgiMap = new ConcurrentHashMap<String, UserGroupInformation>(); 097 } 098 099 public void destroy() { 100 userUgiMap = null; 101 super.destroy(); 102 } 103 104 private UserGroupInformation getUGI(String user) throws IOException { 105 UserGroupInformation ugi = userUgiMap.get(user); 106 if (ugi == null) { 107 // taking care of a race condition, the latest UGI will be discarded 108 ugi = UserGroupInformation.createProxyUser(user, UserGroupInformation.getLoginUser()); 109 userUgiMap.putIfAbsent(user, ugi); 110 } 111 return ugi; 112 } 113 114 /** 115 * Return a JobClient created with the provided user/group. 116 * 117 * @param conf JobConf with all necessary information to create the JobClient. 118 * @return JobClient created with the provided user/group. 119 * @throws HadoopAccessorException if the client could not be created. 120 */ 121 public JobClient createJobClient(String user, String group, final JobConf conf) throws HadoopAccessorException { 122 ParamChecker.notEmpty(user, "user"); 123 ParamChecker.notEmpty(group, "group"); 124 validateJobTracker(conf.get("mapred.job.tracker")); 125 try { 126 UserGroupInformation ugi = getUGI(user); 127 JobClient jobClient = ugi.doAs(new PrivilegedExceptionAction<JobClient>() { 128 public JobClient run() throws Exception { 129 validateConf(conf); 130 return new JobClient(conf); 131 } 132 }); 133 Token<DelegationTokenIdentifier> mrdt = jobClient.getDelegationToken(new Text("mr token")); 134 conf.getCredentials().addToken(new Text("mr token"), mrdt); 135 return jobClient; 136 } 137 catch (InterruptedException ex) { 138 throw new HadoopAccessorException(ErrorCode.E0902, ex); 139 } 140 catch (IOException ex) { 141 throw new HadoopAccessorException(ErrorCode.E0902, ex); 142 } 143 } 144 145 /** 146 * Return a FileSystem created with the provided user/group. 147 * 148 * @param conf Configuration with all necessary information to create the FileSystem. 149 * @return FileSystem created with the provided user/group. 150 * @throws HadoopAccessorException if the filesystem could not be created. 151 */ 152 public FileSystem createFileSystem(String user, String group, final Configuration conf) 153 throws HadoopAccessorException { 154 ParamChecker.notEmpty(user, "user"); 155 ParamChecker.notEmpty(group, "group"); 156 try { 157 validateNameNode(new URI(conf.get("fs.default.name")).getAuthority()); 158 UserGroupInformation ugi = getUGI(user); 159 return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() { 160 public FileSystem run() throws Exception { 161 Configuration defaultConf = new Configuration(); 162 XConfiguration.copy(conf, defaultConf); 163 validateConf(conf); 164 return FileSystem.get(defaultConf); 165 } 166 }); 167 } 168 catch (InterruptedException ex) { 169 throw new HadoopAccessorException(ErrorCode.E0902, ex); 170 } 171 catch (IOException ex) { 172 throw new HadoopAccessorException(ErrorCode.E0902, ex); 173 } 174 catch (URISyntaxException ex) { 175 throw new HadoopAccessorException(ErrorCode.E0902, ex); 176 } 177 } 178 179 /** 180 * Return a FileSystem created with the provided user/group for the specified URI. 181 * 182 * @param uri file system URI. 183 * @param conf Configuration with all necessary information to create the FileSystem. 184 * @return FileSystem created with the provided user/group. 185 * @throws HadoopAccessorException if the filesystem could not be created. 186 */ 187 public FileSystem createFileSystem(String user, String group, final URI uri, final Configuration conf) 188 throws HadoopAccessorException { 189 ParamChecker.notEmpty(user, "user"); 190 ParamChecker.notEmpty(group, "group"); 191 validateNameNode(uri.getAuthority()); 192 try { 193 UserGroupInformation ugi = getUGI(user); 194 return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() { 195 public FileSystem run() throws Exception { 196 Configuration defaultConf = new Configuration(); 197 198 defaultConf.set(WorkflowAppService.HADOOP_JT_KERBEROS_NAME, "mapred/_HOST@" + localRealm); 199 defaultConf.set(WorkflowAppService.HADOOP_NN_KERBEROS_NAME, "hdfs/_HOST@" + localRealm); 200 201 XConfiguration.copy(conf, defaultConf); 202 validateConf(conf); 203 return FileSystem.get(uri, defaultConf); 204 } 205 }); 206 } 207 catch (InterruptedException ex) { 208 throw new HadoopAccessorException(ErrorCode.E0902, ex); 209 } 210 catch (IOException ex) { 211 throw new HadoopAccessorException(ErrorCode.E0902, ex); 212 } 213 } 214 215 private void validateConf(Configuration conf) throws HadoopAccessorException { 216 if (conf.get(WorkflowAppService.HADOOP_JT_KERBEROS_NAME) == null) { 217 throw new HadoopAccessorException(ErrorCode.E0903); 218 } 219 if (conf.get(WorkflowAppService.HADOOP_NN_KERBEROS_NAME) == null) { 220 throw new HadoopAccessorException(ErrorCode.E0904); 221 } 222 } 223 224 }