public class Hadoop20SShims extends HadoopShimsSecure
Modifier and Type | Class and Description |
---|---|
class |
Hadoop20SShims.Hadoop20SFileStatus |
class |
Hadoop20SShims.KerberosNameShim
Shim for KerberosName
|
class |
Hadoop20SShims.MiniDFSShim
MiniDFSShim.
|
class |
Hadoop20SShims.MiniMrShim
Shim for MiniMrCluster
|
HadoopShimsSecure.CombineFileInputFormatShim<K,V>, HadoopShimsSecure.CombineFileRecordReader<K,V>, HadoopShimsSecure.InputSplitShim
HadoopShims.ByteBufferPoolShim, HadoopShims.DirectCompressionType, HadoopShims.DirectDecompressorShim, HadoopShims.HCatHadoopShims, HadoopShims.HdfsEncryptionShim, HadoopShims.HdfsFileStatus, HadoopShims.JobTrackerState, HadoopShims.NoopHdfsEncryptionShim, HadoopShims.StoragePolicyShim, HadoopShims.StoragePolicyValue, HadoopShims.WebHCatJTShim, HadoopShims.ZeroCopyReaderShim
Constructor and Description |
---|
Hadoop20SShims() |
Modifier and Type | Method and Description |
---|---|
void |
addDelegationTokens(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.security.Credentials cred,
String uname)
Get Delegation token and add it to Credential.
|
HadoopShims.HdfsEncryptionShim |
createHdfsEncryptionShim(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.conf.Configuration conf)
Returns a new instance of the HdfsEncryption shim.
|
org.apache.hadoop.fs.FileSystem |
createProxyFileSystem(org.apache.hadoop.fs.FileSystem fs,
URI uri)
Create a proxy file system that can serve a given scheme/authority using some
other file system.
|
HadoopShims.CombineFileInputFormatShim |
getCombineFileInputFormat() |
org.apache.hadoop.conf.Configuration |
getConfiguration(org.apache.hadoop.mapreduce.JobContext context)
Get configuration from JobContext
|
org.apache.hadoop.fs.Path |
getCurrentTrashPath(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs) |
long |
getDefaultBlockSize(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path)
Get the default block size for the path.
|
short |
getDefaultReplication(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path)
Get the default replication for a path.
|
HadoopShims.DirectDecompressorShim |
getDirectDecompressor(HadoopShims.DirectCompressionType codec) |
HadoopShims.HdfsFileStatus |
getFullFileStatus(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path file)
For a given file, return a file status
|
Map<String,String> |
getHadoopConfNames() |
HadoopShims.HCatHadoopShims |
getHCatShim() |
org.apache.hadoop.mapred.JobConf |
getJobConf(org.apache.hadoop.mapred.JobContext context)
Get job conf from the old style JobContext.
|
String |
getJobLauncherHttpAddress(org.apache.hadoop.conf.Configuration conf)
All references to jobtracker/resource manager http address
in the configuration should be done through this shim
|
String |
getJobLauncherRpcAddress(org.apache.hadoop.conf.Configuration conf)
All retrieval of jobtracker/resource manager rpc address
in the configuration should be done through this shim
|
HadoopShims.JobTrackerState |
getJobTrackerState(org.apache.hadoop.mapred.ClusterStatus clusterStatus)
Convert the ClusterStatus to its Thrift equivalent: JobTrackerState.
|
Hadoop20SShims.KerberosNameShim |
getKerberosNameShim(String name)
Returns a shim to wrap KerberosName
|
org.apache.hadoop.fs.BlockLocation[] |
getLocations(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.FileStatus status)
For file status returned by listLocatedStatus, convert them into a list
of block locations.
|
TreeMap<Long,org.apache.hadoop.fs.BlockLocation> |
getLocationsWithOffset(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.FileStatus status)
For the block locations returned by getLocations() convert them into a Treemap
|
Comparator<org.apache.hadoop.io.LongWritable> |
getLongComparator() |
void |
getMergedCredentials(org.apache.hadoop.mapred.JobConf jobConf) |
HadoopShims.MiniDFSShim |
getMiniDfs(org.apache.hadoop.conf.Configuration conf,
int numDataNodes,
boolean format,
String[] racks)
Returns a shim to wrap MiniDFSCluster.
|
Hadoop20SShims.MiniMrShim |
getMiniMrCluster(org.apache.hadoop.conf.Configuration conf,
int numberOfTaskTrackers,
String nameNode,
int numDir)
Returns a shim to wrap MiniMrCluster
|
Hadoop20SShims.MiniMrShim |
getMiniSparkCluster(org.apache.hadoop.conf.Configuration conf,
int numberOfTaskTrackers,
String nameNode,
int numDir) |
Hadoop20SShims.MiniMrShim |
getMiniTezCluster(org.apache.hadoop.conf.Configuration conf,
int numberOfTaskTrackers,
String nameNode,
int numDir) |
org.apache.hadoop.fs.FileSystem |
getNonCachedFileSystem(URI uri,
org.apache.hadoop.conf.Configuration conf) |
String |
getPassword(org.apache.hadoop.conf.Configuration conf,
String name)
Use password API (if available) to fetch credentials/password
|
org.apache.hadoop.fs.Path |
getPathWithoutSchemeAndAuthority(org.apache.hadoop.fs.Path path) |
HadoopShims.StoragePolicyShim |
getStoragePolicyShim(org.apache.hadoop.fs.FileSystem fs)
obtain a storage policy shim associated with the filesystem.
|
String |
getTaskAttemptLogUrl(org.apache.hadoop.mapred.JobConf conf,
String taskTrackerHttpAddress,
String taskAttemptId)
Constructs and Returns TaskAttempt Log Url
or null if the TaskLogServlet is not available
|
HadoopShims.WebHCatJTShim |
getWebHCatShim(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.security.UserGroupInformation ugi)
Provides a Hadoop JobTracker shim.
|
HadoopShims.ZeroCopyReaderShim |
getZeroCopyReader(org.apache.hadoop.fs.FSDataInputStream in,
HadoopShims.ByteBufferPoolShim pool)
Provides an HDFS ZeroCopyReader shim.
|
boolean |
hasStickyBit(org.apache.hadoop.fs.permission.FsPermission permission)
Check stick bit in the permission
|
void |
hflush(org.apache.hadoop.fs.FSDataOutputStream stream)
Flush and make visible to other users the changes to the given stream.
|
boolean |
isDirectory(org.apache.hadoop.fs.FileStatus fileStatus)
Check whether file is directory.
|
boolean |
isLocalMode(org.apache.hadoop.conf.Configuration conf)
Check wether MR is configured to run in local-mode
|
List<org.apache.hadoop.fs.FileStatus> |
listLocatedStatus(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.PathFilter filter)
Get the block locations for the given directory.
|
void |
mergeCredentials(org.apache.hadoop.mapred.JobConf dest,
org.apache.hadoop.mapred.JobConf src) |
boolean |
moveToAppropriateTrash(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path,
org.apache.hadoop.conf.Configuration conf)
Move the directory/file to trash.
|
org.apache.hadoop.mapreduce.JobContext |
newJobContext(org.apache.hadoop.mapreduce.Job job) |
org.apache.hadoop.mapreduce.TaskAttemptContext |
newTaskAttemptContext(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.util.Progressable progressable) |
org.apache.hadoop.mapreduce.TaskAttemptID |
newTaskAttemptID(org.apache.hadoop.mapreduce.JobID jobId,
boolean isMap,
int taskId,
int id) |
int |
readByteBuffer(org.apache.hadoop.fs.FSDataInputStream file,
ByteBuffer dest)
Reads data into ByteBuffer.
|
void |
refreshDefaultQueue(org.apache.hadoop.conf.Configuration conf,
String userName)
Reset the default fair scheduler queue mapping to end user.
|
boolean |
runDistCp(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dst,
org.apache.hadoop.conf.Configuration conf)
Copies a source dir/file to a destination by orchestrating the copy between hdfs nodes.
|
void |
setFullFileStatus(org.apache.hadoop.conf.Configuration conf,
HadoopShims.HdfsFileStatus sourceStatus,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path target)
For a given file, set a given file status.
|
void |
setJobLauncherRpcAddress(org.apache.hadoop.conf.Configuration conf,
String val)
All updates to jobtracker/resource manager rpc address
in the configuration should be done through this shim
|
void |
setTotalOrderPartitionFile(org.apache.hadoop.mapred.JobConf jobConf,
org.apache.hadoop.fs.Path partitionFile)
The method sets to set the partition file has a different signature between
hadoop versions.
|
void |
startPauseMonitor(org.apache.hadoop.conf.Configuration conf) |
boolean |
supportStickyBit()
check whether current hadoop supports sticky bit
|
boolean |
supportTrashFeature() |
checkFileAccess, run
public HadoopShims.CombineFileInputFormatShim getCombineFileInputFormat()
public String getTaskAttemptLogUrl(org.apache.hadoop.mapred.JobConf conf, String taskTrackerHttpAddress, String taskAttemptId) throws MalformedURLException
HadoopShims
MalformedURLException
public HadoopShims.JobTrackerState getJobTrackerState(org.apache.hadoop.mapred.ClusterStatus clusterStatus) throws Exception
HadoopShims
getJobTrackerState
in interface HadoopShims
getJobTrackerState
in class HadoopShimsSecure
Exception
- if no equivalent JobTrackerState existspublic org.apache.hadoop.mapreduce.TaskAttemptContext newTaskAttemptContext(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.util.Progressable progressable)
newTaskAttemptContext
in interface HadoopShims
newTaskAttemptContext
in class HadoopShimsSecure
public org.apache.hadoop.mapreduce.TaskAttemptID newTaskAttemptID(org.apache.hadoop.mapreduce.JobID jobId, boolean isMap, int taskId, int id)
public org.apache.hadoop.mapreduce.JobContext newJobContext(org.apache.hadoop.mapreduce.Job job)
newJobContext
in interface HadoopShims
newJobContext
in class HadoopShimsSecure
public void startPauseMonitor(org.apache.hadoop.conf.Configuration conf)
public boolean isLocalMode(org.apache.hadoop.conf.Configuration conf)
HadoopShims
isLocalMode
in interface HadoopShims
isLocalMode
in class HadoopShimsSecure
public String getJobLauncherRpcAddress(org.apache.hadoop.conf.Configuration conf)
HadoopShims
getJobLauncherRpcAddress
in interface HadoopShims
getJobLauncherRpcAddress
in class HadoopShimsSecure
public void setJobLauncherRpcAddress(org.apache.hadoop.conf.Configuration conf, String val)
HadoopShims
setJobLauncherRpcAddress
in interface HadoopShims
setJobLauncherRpcAddress
in class HadoopShimsSecure
public String getJobLauncherHttpAddress(org.apache.hadoop.conf.Configuration conf)
HadoopShims
getJobLauncherHttpAddress
in interface HadoopShims
getJobLauncherHttpAddress
in class HadoopShimsSecure
public boolean moveToAppropriateTrash(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path path, org.apache.hadoop.conf.Configuration conf) throws IOException
HadoopShims
moveToAppropriateTrash
in interface HadoopShims
moveToAppropriateTrash
in class HadoopShimsSecure
IOException
public long getDefaultBlockSize(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path path)
HadoopShims
getDefaultBlockSize
in interface HadoopShims
getDefaultBlockSize
in class HadoopShimsSecure
public short getDefaultReplication(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path path)
HadoopShims
getDefaultReplication
in interface HadoopShims
getDefaultReplication
in class HadoopShimsSecure
public void refreshDefaultQueue(org.apache.hadoop.conf.Configuration conf, String userName)
HadoopShims
userName
- end user namepublic void setTotalOrderPartitionFile(org.apache.hadoop.mapred.JobConf jobConf, org.apache.hadoop.fs.Path partitionFile)
HadoopShims
public Comparator<org.apache.hadoop.io.LongWritable> getLongComparator()
public Hadoop20SShims.MiniMrShim getMiniMrCluster(org.apache.hadoop.conf.Configuration conf, int numberOfTaskTrackers, String nameNode, int numDir) throws IOException
IOException
public Hadoop20SShims.MiniMrShim getMiniTezCluster(org.apache.hadoop.conf.Configuration conf, int numberOfTaskTrackers, String nameNode, int numDir) throws IOException
IOException
public Hadoop20SShims.MiniMrShim getMiniSparkCluster(org.apache.hadoop.conf.Configuration conf, int numberOfTaskTrackers, String nameNode, int numDir) throws IOException
IOException
public HadoopShims.MiniDFSShim getMiniDfs(org.apache.hadoop.conf.Configuration conf, int numDataNodes, boolean format, String[] racks) throws IOException
HadoopShims
IOException
public HadoopShims.HCatHadoopShims getHCatShim()
public HadoopShims.WebHCatJTShim getWebHCatShim(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.security.UserGroupInformation ugi) throws IOException
HadoopShims
conf
- not null
IOException
public List<org.apache.hadoop.fs.FileStatus> listLocatedStatus(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path path, org.apache.hadoop.fs.PathFilter filter) throws IOException
HadoopShims
fs
- the file systempath
- the directory name to get the status and block locationsfilter
- a filter that needs to accept the file (or null)IOException
public org.apache.hadoop.fs.BlockLocation[] getLocations(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.FileStatus status) throws IOException
HadoopShims
fs
- the file systemstatus
- the file informationIOException
public TreeMap<Long,org.apache.hadoop.fs.BlockLocation> getLocationsWithOffset(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.FileStatus status) throws IOException
HadoopShims
fs
- the file systemstatus
- the file informationIOException
public void hflush(org.apache.hadoop.fs.FSDataOutputStream stream) throws IOException
HadoopShims
stream
- the stream to hflush.IOException
public HadoopShims.HdfsFileStatus getFullFileStatus(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path file) throws IOException
HadoopShims
IOException
public void setFullFileStatus(org.apache.hadoop.conf.Configuration conf, HadoopShims.HdfsFileStatus sourceStatus, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path target) throws IOException
HadoopShims
IOException
public org.apache.hadoop.fs.FileSystem createProxyFileSystem(org.apache.hadoop.fs.FileSystem fs, URI uri)
HadoopShims
createProxyFileSystem
in interface HadoopShims
createProxyFileSystem
in class HadoopShimsSecure
public HadoopShims.ZeroCopyReaderShim getZeroCopyReader(org.apache.hadoop.fs.FSDataInputStream in, HadoopShims.ByteBufferPoolShim pool) throws IOException
HadoopShims
in
- FSDataInputStream to read from (where the cached/mmap buffers are tied to)IOException
public HadoopShims.DirectDecompressorShim getDirectDecompressor(HadoopShims.DirectCompressionType codec)
public org.apache.hadoop.conf.Configuration getConfiguration(org.apache.hadoop.mapreduce.JobContext context)
HadoopShims
public org.apache.hadoop.mapred.JobConf getJobConf(org.apache.hadoop.mapred.JobContext context)
HadoopShims
context
- job contextpublic org.apache.hadoop.fs.FileSystem getNonCachedFileSystem(URI uri, org.apache.hadoop.conf.Configuration conf) throws IOException
getNonCachedFileSystem
in interface HadoopShims
getNonCachedFileSystem
in class HadoopShimsSecure
IOException
public void getMergedCredentials(org.apache.hadoop.mapred.JobConf jobConf) throws IOException
IOException
public void mergeCredentials(org.apache.hadoop.mapred.JobConf dest, org.apache.hadoop.mapred.JobConf src) throws IOException
IOException
public String getPassword(org.apache.hadoop.conf.Configuration conf, String name)
HadoopShims
public boolean supportStickyBit()
HadoopShims
public boolean hasStickyBit(org.apache.hadoop.fs.permission.FsPermission permission)
HadoopShims
public boolean supportTrashFeature()
public org.apache.hadoop.fs.Path getCurrentTrashPath(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.fs.FileSystem fs)
public boolean isDirectory(org.apache.hadoop.fs.FileStatus fileStatus)
HadoopShims
public Hadoop20SShims.KerberosNameShim getKerberosNameShim(String name) throws IOException
IOException
public HadoopShims.StoragePolicyShim getStoragePolicyShim(org.apache.hadoop.fs.FileSystem fs)
HadoopShims
public boolean runDistCp(org.apache.hadoop.fs.Path src, org.apache.hadoop.fs.Path dst, org.apache.hadoop.conf.Configuration conf) throws IOException
HadoopShims
src
- Path to the source file or directory to copydst
- Path to the destination file or directoryconf
- The hadoop configuration objectIOException
public HadoopShims.HdfsEncryptionShim createHdfsEncryptionShim(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.conf.Configuration conf) throws IOException
HadoopShims
fs
- A FileSystem object to HDFSconf
- A Configuration objectIOException
- If an error occurred while creating the instance.public org.apache.hadoop.fs.Path getPathWithoutSchemeAndAuthority(org.apache.hadoop.fs.Path path)
public int readByteBuffer(org.apache.hadoop.fs.FSDataInputStream file, ByteBuffer dest) throws IOException
HadoopShims
file
- File.dest
- Buffer.IOException
public void addDelegationTokens(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.security.Credentials cred, String uname) throws IOException
HadoopShims
addDelegationTokens
in interface HadoopShims
addDelegationTokens
in class HadoopShimsSecure
fs
- FileSystem object to HDFScred
- Credentials object to add the token to.uname
- user name.IOException
- If an error occurred on adding the token.Copyright © 2017 The Apache Software Foundation. All rights reserved.