Deprecated API


Contents
Deprecated Interfaces
org.apache.hadoop.io.Closeable
          use java.io.Closeable 
org.apache.hadoop.mapred.lib.db.DBWritable
          Use DBWritable instead 
 

Deprecated Classes
org.apache.hadoop.mapred.lib.BinaryPartitioner
          Use BinaryPartitioner instead. 
org.apache.hadoop.mapred.lib.db.DBConfiguration
          Use DBConfiguration instead 
org.apache.hadoop.mapred.lib.db.DBInputFormat
          Use DBInputFormat instead. 
org.apache.hadoop.mapred.lib.db.DBOutputFormat
          Use org.apache.hadoop.mapreduce.lib.db.DBOutputFormat instead 
org.apache.hadoop.fs.InMemoryFileSystem
           
org.apache.hadoop.mapred.LineRecordReader.LineReader
          Use LineReader instead. 
org.apache.hadoop.mapred.MultiFileInputFormat
          Use CombineFileInputFormat instead 
org.apache.hadoop.mapred.MultiFileSplit
          Use CombineFileSplit instead 
org.apache.hadoop.mapred.lib.MultipleOutputFormat
          Use MultipleOutputs instead 
org.apache.hadoop.mapred.lib.MultipleOutputs
          Use MultipleOutputs instead 
org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat
          Use MultipleOutputs instead 
org.apache.hadoop.mapred.lib.MultipleTextOutputFormat
          Use MultipleOutputs instead 
org.apache.hadoop.io.UTF8
          replaced by Text 
org.apache.hadoop.streaming.UTF8ByteArrayUtils
          use UTF8ByteArrayUtils and StreamKeyValUtil instead 
 

Deprecated Exceptions
org.apache.hadoop.fs.permission.AccessControlException
          Use AccessControlException instead. 
 

Deprecated Fields
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY
            
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_MAXPMEM_PROPERTY
            
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_MAXVMEM_PROPERTY
            
org.apache.hadoop.mapred.JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY
            
 

Deprecated Methods
org.apache.hadoop.http.HttpServer.addInternalServlet(String, String, Class)
          this is a temporary method 
org.apache.hadoop.http.HttpServer.addSslListener(InetSocketAddress, String, String, String)
          Use HttpServer.addSslListener(InetSocketAddress, Configuration, boolean) 
org.apache.hadoop.fs.FsShell.byteDesc(long)
          Consider using StringUtils.byteDesc(long) instead. 
org.apache.hadoop.ipc.RPC.call(Method, Object[][], InetSocketAddress[], Configuration)
          Use RPC.call(Method, Object[][], InetSocketAddress[], UserGroupInformation, Configuration) instead 
org.apache.hadoop.ipc.Client.call(Writable[], InetSocketAddress[])
          Use Client.call(Writable[], InetSocketAddress[], Class, UserGroupInformation) instead 
org.apache.hadoop.ipc.Client.call(Writable, InetSocketAddress)
          Use Client.call(Writable, InetSocketAddress, Class, UserGroupInformation) instead 
org.apache.hadoop.ipc.Client.call(Writable, InetSocketAddress, UserGroupInformation)
          Use Client.call(Writable, InetSocketAddress, Class, UserGroupInformation) instead 
org.apache.hadoop.ipc.Server.call(Writable, long)
          Use Server.call(Class, Writable, long) instead 
org.apache.hadoop.io.WritableUtils.cloneInto(Writable, Writable)
          use ReflectionUtils.cloneInto instead. 
org.apache.hadoop.util.ReflectionUtils.cloneWritableInto(Writable, Writable)
           
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.createValue()
            
org.apache.hadoop.fs.FileSystem.delete(Path)
          Use delete(Path, boolean) instead 
org.apache.hadoop.fs.FilterFileSystem.delete(Path)
           
org.apache.hadoop.fs.RawLocalFileSystem.delete(Path)
           
org.apache.hadoop.fs.ftp.FTPFileSystem.delete(Path)
          Use delete(Path, boolean) instead 
org.apache.hadoop.fs.kfs.KosmosFileSystem.delete(Path)
           
org.apache.hadoop.fs.s3.S3FileSystem.delete(Path)
           
org.apache.hadoop.fs.s3native.NativeS3FileSystem.delete(Path)
           
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findByte(byte[], int, int, byte)
          use UTF8ByteArrayUtils.findByte(byte[], int, int, byte) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findBytes(byte[], int, int, byte[])
          use UTF8ByteArrayUtils.findBytes(byte[], int, int, byte[]) 
org.apache.hadoop.mapred.Counters.findCounter(String, int, String)
            
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findNthByte(byte[], byte, int)
          use UTF8ByteArrayUtils.findNthByte(byte[], byte, int) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findNthByte(byte[], int, int, byte, int)
          use UTF8ByteArrayUtils.findNthByte(byte[], int, int, byte, int) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findTab(byte[])
          use StreamKeyValUtil.findTab(byte[]) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findTab(byte[], int, int)
          use StreamKeyValUtil.findTab(byte[], int, int) 
org.apache.hadoop.fs.FileUtil.fullyDelete(FileSystem, Path)
          Use FileSystem.delete(Path, boolean) 
org.apache.hadoop.io.BytesWritable.get()
          Use BytesWritable.getBytes() instead. 
org.apache.hadoop.fs.FileSystem.getBlockSize(Path)
          Use getFileStatus() instead 
org.apache.hadoop.streaming.StreamJob.getClusterNick()
           
org.apache.hadoop.mapred.JobTracker.getClusterStatus()
          use JobTracker.getClusterStatus(boolean) 
org.apache.hadoop.io.SequenceFile.getCompressionType(Configuration)
          Use SequenceFileOutputFormat.getOutputCompressionType(org.apache.hadoop.mapred.JobConf) to get SequenceFile.CompressionType for job-outputs. 
org.apache.hadoop.mapred.Counters.Group.getCounter(int, String)
          use Counters.Group.getCounter(String) instead 
org.apache.hadoop.mapred.JobClient.getJob(String)
          Applications should rather use JobClient.getJob(JobID). 
org.apache.hadoop.mapred.JobProfile.getJobId()
          use getJobID() instead 
org.apache.hadoop.mapred.JobStatus.getJobId()
          use getJobID instead 
org.apache.hadoop.mapred.RunningJob.getJobID()
          This method is deprecated and will be removed. Applications should rather use RunningJob.getID(). 
org.apache.hadoop.mapred.JobID.getJobIDsPattern(String, Integer)
           
org.apache.hadoop.fs.FileSystem.getLength(Path)
          Use getFileStatus() instead 
org.apache.hadoop.fs.kfs.KosmosFileSystem.getLength(Path)
           
org.apache.hadoop.mapred.jobcontrol.Job.getMapredJobID()
          use Job.getAssignedJobID() instead 
org.apache.hadoop.mapred.JobClient.getMapTaskReports(String)
          Applications should rather use JobClient.getMapTaskReports(JobID) 
org.apache.hadoop.mapred.JobConf.getMaxPhysicalMemoryForTask()
          this variable is deprecated and nolonger in use. 
org.apache.hadoop.mapred.JobConf.getMaxVirtualMemoryForTask()
          Use JobConf.getMemoryForMapTask() and JobConf.getMemoryForReduceTask() 
org.apache.hadoop.fs.FileSystem.getName()
          call #getUri() instead. 
org.apache.hadoop.fs.FilterFileSystem.getName()
          call #getUri() instead. 
org.apache.hadoop.fs.kfs.KosmosFileSystem.getName()
           
org.apache.hadoop.fs.FileSystem.getNamed(String, Configuration)
          call #get(URI,Configuration) instead. 
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.getPos()
            
org.apache.hadoop.mapred.JobClient.getReduceTaskReports(String)
          Applications should rather use JobClient.getReduceTaskReports(JobID) 
org.apache.hadoop.fs.FileSystem.getReplication(Path)
          Use getFileStatus() instead 
org.apache.hadoop.fs.kfs.KosmosFileSystem.getReplication(Path)
           
org.apache.hadoop.net.NetUtils.getServerAddress(Configuration, String, String, String)
           
org.apache.hadoop.io.BytesWritable.getSize()
          Use BytesWritable.getLength() instead. 
org.apache.hadoop.fs.FileSystem.getStatistics()
          use FileSystem.getAllStatistics() instead 
org.apache.hadoop.mapred.TaskAttemptID.getTaskAttemptIDsPattern(String, Integer, Boolean, Integer, Integer)
           
org.apache.hadoop.mapred.TaskCompletionEvent.getTaskId()
          use TaskCompletionEvent.getTaskAttemptId() instead. 
org.apache.hadoop.mapred.TaskReport.getTaskId()
          use TaskReport.getTaskID() instead 
org.apache.hadoop.mapred.TaskID.getTaskIDsPattern(String, Integer, Boolean, Integer)
           
org.apache.hadoop.mapred.JobClient.getTaskOutputFilter()
           
org.apache.hadoop.streaming.StreamJob.go()
          use StreamJob.run(String[]) instead. 
org.apache.hadoop.fs.FileSystem.isDirectory(Path)
          Use getFileStatus() instead 
org.apache.hadoop.fs.kfs.KosmosFileSystem.isDirectory(Path)
           
org.apache.hadoop.fs.kfs.KosmosFileSystem.isFile(Path)
           
org.apache.hadoop.mapred.RunningJob.killTask(String, boolean)
          Applications should rather use RunningJob.killTask(TaskAttemptID, boolean) 
org.apache.hadoop.fs.FsShell.limitDecimalTo2(double)
          Consider using StringUtils.limitDecimalTo2(double) instead. 
org.apache.hadoop.fs.kfs.KosmosFileSystem.lock(Path, boolean)
           
org.apache.hadoop.mapred.JobHistory.MapAttempt.logFailed(TaskAttemptID, long, String, String)
          Use JobHistory.MapAttempt.logFailed(TaskAttemptID, long, String, String, String) 
org.apache.hadoop.mapred.JobHistory.ReduceAttempt.logFailed(TaskAttemptID, long, String, String)
          Use JobHistory.ReduceAttempt.logFailed(TaskAttemptID, long, String, String, String) 
org.apache.hadoop.mapred.JobHistory.ReduceAttempt.logFinished(TaskAttemptID, long, long, long, String)
          Use JobHistory.ReduceAttempt.logFinished(TaskAttemptID, long, long, long, String, String, String, Counters) 
org.apache.hadoop.mapred.JobHistory.MapAttempt.logFinished(TaskAttemptID, long, String)
          Use JobHistory.MapAttempt.logFinished(TaskAttemptID, long, String, String, String, Counters) 
org.apache.hadoop.mapred.JobHistory.JobInfo.logJobInfo(JobID, long, long, int)
          Use JobHistory.JobInfo.logJobInfo(JobID, long, long) instead. 
org.apache.hadoop.mapred.JobHistory.MapAttempt.logKilled(TaskAttemptID, long, String, String)
          Use JobHistory.MapAttempt.logKilled(TaskAttemptID, long, String, String, String) 
org.apache.hadoop.mapred.JobHistory.ReduceAttempt.logKilled(TaskAttemptID, long, String, String)
          Use JobHistory.ReduceAttempt.logKilled(TaskAttemptID, long, String, String, String) 
org.apache.hadoop.mapred.JobHistory.JobInfo.logStarted(JobID, long, int, int)
          Use JobHistory.JobInfo.logInited(JobID, long, int, int) and JobHistory.JobInfo.logStarted(JobID) 
org.apache.hadoop.mapred.JobHistory.MapAttempt.logStarted(TaskAttemptID, long, String)
          Use JobHistory.MapAttempt.logStarted(TaskAttemptID, long, String, int, String) 
org.apache.hadoop.mapred.JobHistory.ReduceAttempt.logStarted(TaskAttemptID, long, String)
          Use JobHistory.ReduceAttempt.logStarted(TaskAttemptID, long, String, int, String) 
org.apache.hadoop.mapred.JobHistory.JobInfo.logSubmitted(JobID, JobConf, String, long)
          Use JobHistory.JobInfo.logSubmitted(JobID, JobConf, String, long, boolean) instead. 
org.apache.hadoop.io.SequenceFile.Reader.next(DataOutputBuffer)
          Call SequenceFile.Reader.nextRaw(DataOutputBuffer,SequenceFile.ValueBytes). 
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.next(LongWritable, T)
          Use DBRecordReader.nextKeyValue() 
org.apache.hadoop.mapred.TaskAttemptID.read(DataInput)
           
org.apache.hadoop.mapred.TaskID.read(DataInput)
           
org.apache.hadoop.mapred.JobID.read(DataInput)
           
org.apache.hadoop.streaming.UTF8ByteArrayUtils.readLine(LineReader, Text)
          use StreamKeyValUtil.readLine(LineReader, Text) 
org.apache.hadoop.fs.kfs.KosmosFileSystem.release(Path)
           
org.apache.hadoop.io.SequenceFile.setCompressionType(Configuration, SequenceFile.CompressionType)
          Use the one of the many SequenceFile.createWriter methods to specify the SequenceFile.CompressionType while creating the SequenceFile or SequenceFileOutputFormat.setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType) to specify the SequenceFile.CompressionType for job-outputs. or 
org.apache.hadoop.security.UserGroupInformation.setCurrentUGI(UserGroupInformation)
          Use UserGroupInformation.setCurrentUser(UserGroupInformation) 
org.apache.hadoop.mapreduce.Counter.setDisplayName(String)
           
org.apache.hadoop.mapred.jobcontrol.Job.setMapredJobID(String)
          use Job.setAssignedJobID(JobID) instead 
org.apache.hadoop.mapred.JobConf.setMaxPhysicalMemoryForTask(long)
           
org.apache.hadoop.mapred.JobConf.setMaxVirtualMemoryForTask(long)
          Use JobConf.setMemoryForMapTask(long mem) and Use JobConf.setMemoryForReduceTask(long mem) 
org.apache.hadoop.mapred.TaskCompletionEvent.setTaskId(String)
          use TaskCompletionEvent.setTaskID(TaskAttemptID) instead. 
org.apache.hadoop.mapred.JobClient.setTaskOutputFilter(JobClient.TaskStatusFilter)
           
org.apache.hadoop.streaming.UTF8ByteArrayUtils.splitKeyVal(byte[], int, int, Text, Text, int)
          use StreamKeyValUtil.splitKeyVal(byte[], int, int, Text, Text, int) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.splitKeyVal(byte[], int, int, Text, Text, int, int)
          use StreamKeyValUtil.splitKeyVal(byte[], int, int, Text, Text, int, int) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.splitKeyVal(byte[], Text, Text, int)
          use StreamKeyValUtil.splitKeyVal(byte[], Text, Text, int) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.splitKeyVal(byte[], Text, Text, int, int)
          use StreamKeyValUtil.splitKeyVal(byte[], Text, Text, int, int) 
org.apache.hadoop.mapred.pipes.Submitter.submitJob(JobConf)
          Use Submitter.runJob(JobConf) 
 

Deprecated Constructors
org.apache.hadoop.mapred.FileSplit(Path, long, long, JobConf)
            
org.apache.hadoop.fs.FSDataOutputStream(OutputStream)
           
org.apache.hadoop.mapred.JobProfile(String, String, String, String, String)
          use JobProfile(String, JobID, String, String, String) instead 
org.apache.hadoop.io.SetFile.Writer(FileSystem, String, Class)
          pass a Configuration too 
org.apache.hadoop.streaming.StreamJob(String[], boolean)
          use StreamJob() with ToolRunner or set the Configuration using StreamJob.setConf(Configuration) and run with StreamJob.run(String[]). 
 



Copyright © 2009 The Apache Software Foundation