Deprecated Methods |
org.apache.hadoop.conf.Configuration.addDeprecation(String, String[])
use Configuration.addDeprecation(String key, String newKey) instead |
org.apache.hadoop.conf.Configuration.addDeprecation(String, String[], String)
use Configuration.addDeprecation(String key, String newKey,
String customMessage) instead |
org.apache.hadoop.filecache.DistributedCache.addLocalArchives(Configuration, String)
|
org.apache.hadoop.filecache.DistributedCache.addLocalFiles(Configuration, String)
|
org.apache.hadoop.mapred.JobClient.cancelDelegationToken(Token)
Use Token.cancel(org.apache.hadoop.conf.Configuration) instead |
org.apache.hadoop.mapreduce.Cluster.cancelDelegationToken(Token)
Use Token.cancel(org.apache.hadoop.conf.Configuration) instead |
org.apache.hadoop.mapred.FileOutputCommitter.cleanupJob(JobContext)
|
org.apache.hadoop.mapred.OutputCommitter.cleanupJob(JobContext)
Use OutputCommitter.commitJob(JobContext) or
OutputCommitter.abortJob(JobContext, int) instead. |
org.apache.hadoop.mapred.OutputCommitter.cleanupJob(JobContext)
Use OutputCommitter.commitJob(org.apache.hadoop.mapreduce.JobContext)
or OutputCommitter.abortJob(org.apache.hadoop.mapreduce.JobContext, org.apache.hadoop.mapreduce.JobStatus.State)
instead. |
org.apache.hadoop.mapreduce.OutputCommitter.cleanupJob(JobContext)
Use OutputCommitter.commitJob(JobContext) and
OutputCommitter.abortJob(JobContext, JobStatus.State) instead. |
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.cleanupJob(JobContext)
|
org.apache.hadoop.io.WritableUtils.cloneInto(Writable, Writable)
use ReflectionUtils.cloneInto instead. |
org.apache.hadoop.util.ReflectionUtils.cloneWritableInto(Writable, Writable)
|
org.apache.hadoop.mapred.Counters.Counter.contentEquals(Counters.Counter)
|
org.apache.hadoop.filecache.DistributedCache.createAllSymlink(Configuration, File, File)
Internal to MapReduce framework. Use DistributedCacheManager
instead. |
org.apache.hadoop.fs.FileSystem.createNonRecursive(Path, boolean, int, short, long, Progressable)
API only for 0.20-append |
org.apache.hadoop.fs.FileSystem.createNonRecursive(Path, FsPermission, boolean, int, short, long, Progressable)
API only for 0.20-append |
org.apache.hadoop.fs.RawLocalFileSystem.createNonRecursive(Path, FsPermission, EnumSet, int, short, long, Progressable)
|
org.apache.hadoop.fs.FilterFileSystem.createNonRecursive(Path, FsPermission, EnumSet, int, short, long, Progressable)
|
org.apache.hadoop.fs.FileSystem.createNonRecursive(Path, FsPermission, EnumSet, int, short, long, Progressable)
API only for 0.20-append |
org.apache.hadoop.mapred.lib.CombineFileInputFormat.createPool(JobConf, List)
Use CombineFileInputFormat.createPool(List) . |
org.apache.hadoop.mapred.lib.CombineFileInputFormat.createPool(JobConf, PathFilter...)
Use CombineFileInputFormat.createPool(PathFilter...) . |
org.apache.hadoop.yarn.client.RMProxy.createRMProxy(Configuration, Class, InetSocketAddress)
This method is deprecated and is not used by YARN internally any more.
To create a proxy to the RM, use ClientRMProxy#createRMProxy or
ServerRMProxy#createRMProxy.
Create a proxy to the ResourceManager at the specified address. |
org.apache.hadoop.mapreduce.Job.createSymlink()
|
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.createValue()
|
org.apache.hadoop.io.SequenceFile.createWriter(Configuration, FSDataOutputStream, Class, Class, SequenceFile.CompressionType, CompressionCodec)
Use SequenceFile.createWriter(Configuration, Writer.Option...)
instead. |
org.apache.hadoop.io.SequenceFile.createWriter(Configuration, FSDataOutputStream, Class, Class, SequenceFile.CompressionType, CompressionCodec, SequenceFile.Metadata)
Use SequenceFile.createWriter(Configuration, Writer.Option...)
instead. |
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class)
Use SequenceFile.createWriter(Configuration, Writer.Option...)
instead. |
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, int, short, long, boolean, SequenceFile.CompressionType, CompressionCodec, SequenceFile.Metadata)
|
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, int, short, long, SequenceFile.CompressionType, CompressionCodec, Progressable, SequenceFile.Metadata)
Use SequenceFile.createWriter(Configuration, Writer.Option...)
instead. |
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType)
Use SequenceFile.createWriter(Configuration, Writer.Option...)
instead. |
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType, CompressionCodec)
Use SequenceFile.createWriter(Configuration, Writer.Option...)
instead. |
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType, CompressionCodec, Progressable)
Use SequenceFile.createWriter(Configuration, Writer.Option...)
instead. |
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType, CompressionCodec, Progressable, SequenceFile.Metadata)
Use SequenceFile.createWriter(Configuration, Writer.Option...)
instead. |
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType, Progressable)
Use SequenceFile.createWriter(Configuration, Writer.Option...)
instead. |
org.apache.hadoop.fs.FileSystem.delete(Path)
Use FileSystem.delete(Path, boolean) instead. |
org.apache.hadoop.mapred.JobConf.deleteLocalFiles()
|
org.apache.hadoop.mapred.Counters.findCounter(String, int, String)
use Counters.findCounter(String, String) instead |
org.apache.hadoop.fs.FileUtil.fullyDelete(FileSystem, Path)
Use FileSystem.delete(Path, boolean) |
org.apache.hadoop.io.BytesWritable.get()
Use BytesWritable.getBytes() instead. |
org.apache.hadoop.mapreduce.Cluster.getAllJobs()
Use Cluster.getAllJobStatuses() instead. |
org.apache.hadoop.fs.FileSystem.getBlockSize(Path)
Use getFileStatus() instead |
org.apache.hadoop.mapred.Counters.Group.getCounter(int, String)
use Counters.Group.findCounter(String) instead |
org.apache.hadoop.fs.FileSystem.getDefaultBlockSize()
use FileSystem.getDefaultBlockSize(Path) instead |
org.apache.hadoop.fs.FileSystem.getDefaultReplication()
use FileSystem.getDefaultReplication(Path) instead |
org.apache.hadoop.mapreduce.security.TokenCache.getDelegationToken(Credentials, String)
Use Credentials.getToken(org.apache.hadoop.io.Text)
instead, this method is included for compatibility against Hadoop-1 |
org.apache.hadoop.filecache.DistributedCache.getFileStatus(Configuration, URI)
|
org.apache.hadoop.mapred.ClusterStatus.getGraylistedTrackerNames()
|
org.apache.hadoop.mapred.ClusterStatus.getGraylistedTrackers()
|
org.apache.hadoop.mapreduce.Job.getInstance(Cluster)
Use Job.getInstance() |
org.apache.hadoop.mapreduce.Job.getInstance(Cluster, Configuration)
Use Job.getInstance(Configuration) |
org.apache.hadoop.mapred.JobClient.getJob(String)
Applications should rather use JobClient.getJob(JobID) . |
org.apache.hadoop.mapred.JobStatus.getJobId()
use getJobID instead |
org.apache.hadoop.mapred.RunningJob.getJobID()
This method is deprecated and will be removed. Applications should
rather use RunningJob.getID() . |
org.apache.hadoop.mapred.JobID.getJobIDsPattern(String, Integer)
|
org.apache.hadoop.mapred.ClusterStatus.getJobTrackerState()
|
org.apache.hadoop.fs.FileSystem.getLength(Path)
Use getFileStatus() instead |
org.apache.hadoop.mapreduce.JobContext.getLocalCacheArchives()
the array returned only includes the items the were
downloaded. There is no way to map this to what is returned by
JobContext.getCacheArchives() . |
org.apache.hadoop.mapreduce.JobContext.getLocalCacheFiles()
the array returned only includes the items the were
downloaded. There is no way to map this to what is returned by
JobContext.getCacheFiles() . |
org.apache.hadoop.mapred.JobClient.getMapTaskReports(String)
Applications should rather use JobClient.getMapTaskReports(JobID) |
org.apache.hadoop.mapred.ClusterStatus.getMaxMemory()
|
org.apache.hadoop.mapred.JobConf.getMaxPhysicalMemoryForTask()
this variable is deprecated and nolonger in use. |
org.apache.hadoop.mapred.JobConf.getMaxVirtualMemoryForTask()
Use JobConf.getMemoryForMapTask() and
JobConf.getMemoryForReduceTask() |
org.apache.hadoop.fs.FileSystem.getName()
call #getUri() instead. |
org.apache.hadoop.fs.FileSystem.getNamed(String, Configuration)
call #get(URI,Configuration) instead. |
org.apache.hadoop.mapred.lib.TotalOrderPartitioner.getPartitionFile(JobConf)
Use
TotalOrderPartitioner.getPartitionFile(Configuration)
instead |
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.getPos()
|
org.apache.hadoop.mapred.JobQueueInfo.getQueueState()
|
org.apache.hadoop.mapred.JobClient.getReduceTaskReports(String)
Applications should rather use JobClient.getReduceTaskReports(JobID) |
org.apache.hadoop.fs.FileSystem.getReplication(Path)
Use getFileStatus() instead |
org.apache.hadoop.fs.FileSystem.getServerDefaults()
use FileSystem.getServerDefaults(Path) instead |
org.apache.hadoop.mapred.JobConf.getSessionId()
|
org.apache.hadoop.io.BytesWritable.getSize()
Use BytesWritable.getLength() instead. |
org.apache.hadoop.fs.FileSystem.getStatistics()
use FileSystem.getAllStatistics() instead |
org.apache.hadoop.mapreduce.JobContext.getSymlink()
|
org.apache.hadoop.mapred.TaskAttemptID.getTaskAttemptIDsPattern(String, Integer, Boolean, Integer, Integer)
|
org.apache.hadoop.mapred.TaskAttemptID.getTaskAttemptIDsPattern(String, Integer, TaskType, Integer, Integer)
|
org.apache.hadoop.mapred.TaskCompletionEvent.getTaskId()
use TaskCompletionEvent.getTaskAttemptId() instead. |
org.apache.hadoop.mapred.TaskID.getTaskIDsPattern(String, Integer, Boolean, Integer)
Use TaskID.getTaskIDsPattern(String, Integer, TaskType,
Integer) |
org.apache.hadoop.mapred.TaskID.getTaskIDsPattern(String, Integer, TaskType, Integer)
|
org.apache.hadoop.mapred.JobClient.getTaskOutputFilter()
|
org.apache.hadoop.filecache.DistributedCache.getTimestamp(Configuration, URI)
|
org.apache.hadoop.mapred.ClusterStatus.getUsedMemory()
|
org.apache.hadoop.fs.FileStatus.isDir()
Use FileStatus.isFile() ,
FileStatus.isDirectory() , and FileStatus.isSymlink()
instead. |
org.apache.hadoop.mapreduce.TaskAttemptID.isMap()
|
org.apache.hadoop.mapreduce.TaskID.isMap()
|
org.apache.hadoop.mapred.FileOutputCommitter.isRecoverySupported()
|
org.apache.hadoop.mapred.OutputCommitter.isRecoverySupported()
Use OutputCommitter.isRecoverySupported(JobContext) instead. |
org.apache.hadoop.mapreduce.OutputCommitter.isRecoverySupported()
Use OutputCommitter.isRecoverySupported(JobContext) instead. |
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.isRecoverySupported()
|
org.apache.hadoop.mapred.RunningJob.killTask(String, boolean)
Applications should rather use RunningJob.killTask(TaskAttemptID, boolean) |
org.apache.hadoop.mapreduce.security.TokenCache.loadTokens(String, Configuration)
Use Credentials.readTokenStorageFile(org.apache.hadoop.fs.Path, org.apache.hadoop.conf.Configuration) instead,
this method is included for compatibility against Hadoop-1. |
org.apache.hadoop.mapreduce.security.TokenCache.loadTokens(String, JobConf)
Use Credentials.readTokenStorageFile(org.apache.hadoop.fs.Path, org.apache.hadoop.conf.Configuration) instead,
this method is included for compatibility against Hadoop-1. |
org.apache.hadoop.fs.Path.makeQualified(FileSystem)
|
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.next(LongWritable, T)
Use DBRecordReader.nextKeyValue() |
org.apache.hadoop.fs.FileSystem.primitiveCreate(Path, FsPermission, EnumSet, int, short, long, Progressable, Options.ChecksumOpt)
|
org.apache.hadoop.fs.FileSystem.primitiveMkdir(Path, FsPermission)
|
org.apache.hadoop.fs.FileSystem.primitiveMkdir(Path, FsPermission, boolean)
|
org.apache.hadoop.mapred.TaskAttemptID.read(DataInput)
|
org.apache.hadoop.mapred.JobID.read(DataInput)
|
org.apache.hadoop.mapred.TaskID.read(DataInput)
|
org.apache.hadoop.fs.FileSystem.rename(Path, Path, Options.Rename...)
|
org.apache.hadoop.mapred.JobClient.renewDelegationToken(Token)
Use Token.renew(org.apache.hadoop.conf.Configuration) instead |
org.apache.hadoop.mapreduce.Cluster.renewDelegationToken(Token)
Use Token.renew(org.apache.hadoop.conf.Configuration) instead |
org.apache.hadoop.filecache.DistributedCache.setArchiveTimestamps(Configuration, String)
|
org.apache.hadoop.mapred.jobcontrol.Job.setAssignedJobID(JobID)
setAssignedJobID should not be called.
JOBID is set by the framework. |
org.apache.hadoop.mapreduce.Counter.setDisplayName(String)
(and no-op by default) |
org.apache.hadoop.filecache.DistributedCache.setFileTimestamps(Configuration, String)
|
org.apache.hadoop.filecache.DistributedCache.setLocalArchives(Configuration, String)
|
org.apache.hadoop.filecache.DistributedCache.setLocalFiles(Configuration, String)
|
org.apache.hadoop.mapred.jobcontrol.Job.setMapredJobID(String)
|
org.apache.hadoop.mapred.JobConf.setMaxPhysicalMemoryForTask(long)
|
org.apache.hadoop.mapred.JobConf.setMaxVirtualMemoryForTask(long)
Use JobConf.setMemoryForMapTask(long mem) and
Use JobConf.setMemoryForReduceTask(long mem) |
org.apache.hadoop.mapred.lib.TotalOrderPartitioner.setPartitionFile(JobConf, Path)
Use
TotalOrderPartitioner.setPartitionFile(Configuration, Path)
instead |
org.apache.hadoop.mapred.JobConf.setSessionId(String)
|
org.apache.hadoop.mapred.jobcontrol.Job.setState(int)
|
org.apache.hadoop.mapred.TaskCompletionEvent.setTaskId(String)
use TaskCompletionEvent.setTaskAttemptId(TaskAttemptID) instead. |
org.apache.hadoop.mapred.TaskCompletionEvent.setTaskID(TaskAttemptID)
use TaskCompletionEvent.setTaskAttemptId(TaskAttemptID) instead. |
org.apache.hadoop.mapred.JobClient.setTaskOutputFilter(JobClient.TaskStatusFilter)
|
org.apache.hadoop.mapred.Counters.size()
use AbstractCounters.countCounters() instead |
org.apache.hadoop.mapred.pipes.Submitter.submitJob(JobConf)
Use Submitter.runJob(JobConf) |
org.apache.hadoop.fs.Syncable.sync()
As of HADOOP 0.21.0, replaced by hflush |
org.apache.hadoop.fs.FSDataOutputStream.sync()
|