public class Hadoop23Shims extends HadoopShimsSecure
Modifier and Type | Class and Description |
---|---|
class |
Hadoop23Shims.HdfsEncryptionShim |
class |
Hadoop23Shims.KerberosNameShim
Shim for KerberosName
|
class |
Hadoop23Shims.MiniDFSShim
MiniDFSShim.
|
class |
Hadoop23Shims.MiniMrShim
Shim for MiniMrCluster
|
class |
Hadoop23Shims.MiniSparkShim
Shim for MiniSparkOnYARNCluster
|
class |
Hadoop23Shims.MiniTezLocalShim |
class |
Hadoop23Shims.MiniTezShim
Shim for MiniTezCluster
|
static class |
Hadoop23Shims.StoragePolicyShim |
HadoopShimsSecure.CombineFileInputFormatShim<K,V>, HadoopShimsSecure.CombineFileRecordReader<K,V>, HadoopShimsSecure.InputSplitShim
HadoopShims.HCatHadoopShims, HadoopShims.HdfsFileStatusWithId, HadoopShims.JobTrackerState, HadoopShims.NoopHdfsEncryptionShim, HadoopShims.StoragePolicyValue, HadoopShims.WebHCatJTShim
Modifier and Type | Field and Description |
---|---|
protected static Method |
accessMethod |
protected static Method |
getPasswordMethod |
Constructor and Description |
---|
Hadoop23Shims() |
Modifier and Type | Method and Description |
---|---|
void |
addDelegationTokens(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.security.Credentials cred,
String uname)
Get Delegation token and add it to Credential.
|
void |
checkFileAccess(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.FileStatus stat,
org.apache.hadoop.fs.permission.FsAction action)
Check if the configured UGI has access to the path for the given file system action.
|
org.apache.hadoop.security.UserGroupInformation |
cloneUgi(org.apache.hadoop.security.UserGroupInformation baseUgi)
Clones the UGI and the Subject.
|
HadoopShims.HdfsEncryptionShim |
createHdfsEncryptionShim(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.conf.Configuration conf)
Returns a new instance of the HdfsEncryption shim.
|
org.apache.hadoop.fs.FileSystem |
createProxyFileSystem(org.apache.hadoop.fs.FileSystem fs,
URI uri)
Create a proxy file system that can serve a given scheme/authority using some
other file system.
|
HadoopShims.CombineFileInputFormatShim |
getCombineFileInputFormat() |
org.apache.hadoop.conf.Configuration |
getConfiguration(org.apache.hadoop.mapreduce.JobContext context)
Get configuration from JobContext
|
org.apache.hadoop.fs.Path |
getCurrentTrashPath(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs) |
long |
getDefaultBlockSize(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path)
Get the default block size for the path.
|
short |
getDefaultReplication(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path)
Get the default replication for a path.
|
long |
getFileId(org.apache.hadoop.fs.FileSystem fs,
String path)
Gets file ID.
|
HadoopShims.HCatHadoopShims |
getHCatShim() |
org.apache.hadoop.mapred.JobConf |
getJobConf(org.apache.hadoop.mapred.JobContext context)
Get job conf from the old style JobContext.
|
String |
getJobLauncherHttpAddress(org.apache.hadoop.conf.Configuration conf)
All references to jobtracker/resource manager http address
in the configuration should be done through this shim
|
String |
getJobLauncherRpcAddress(org.apache.hadoop.conf.Configuration conf)
All retrieval of jobtracker/resource manager rpc address
in the configuration should be done through this shim
|
HadoopShims.JobTrackerState |
getJobTrackerState(org.apache.hadoop.mapred.ClusterStatus clusterStatus)
Convert the ClusterStatus to its Thrift equivalent: JobTrackerState.
|
Hadoop23Shims.KerberosNameShim |
getKerberosNameShim(String name)
Returns a shim to wrap KerberosName
|
HadoopShims.MiniMrShim |
getLocalMiniTezCluster(org.apache.hadoop.conf.Configuration conf,
boolean usingLlap) |
org.apache.hadoop.fs.BlockLocation[] |
getLocations(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.FileStatus status)
For file status returned by listLocatedStatus, convert them into a list
of block locations.
|
TreeMap<Long,org.apache.hadoop.fs.BlockLocation> |
getLocationsWithOffset(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.FileStatus status)
For the block locations returned by getLocations() convert them into a Treemap
|
Comparator<org.apache.hadoop.io.LongWritable> |
getLongComparator() |
void |
getMergedCredentials(org.apache.hadoop.mapred.JobConf jobConf) |
HadoopShims.MiniDFSShim |
getMiniDfs(org.apache.hadoop.conf.Configuration conf,
int numDataNodes,
boolean format,
String[] racks)
Returns a shim to wrap MiniDFSCluster.
|
HadoopShims.MiniDFSShim |
getMiniDfs(org.apache.hadoop.conf.Configuration conf,
int numDataNodes,
boolean format,
String[] racks,
boolean isHA) |
Hadoop23Shims.MiniMrShim |
getMiniMrCluster(org.apache.hadoop.conf.Configuration conf,
int numberOfTaskTrackers,
String nameNode,
int numDir)
Returns a shim to wrap MiniMrCluster
|
Hadoop23Shims.MiniMrShim |
getMiniSparkCluster(org.apache.hadoop.conf.Configuration conf,
int numberOfTaskTrackers,
String nameNode,
int numDir)
Returns a shim to wrap MiniSparkOnYARNCluster
|
Hadoop23Shims.MiniMrShim |
getMiniTezCluster(org.apache.hadoop.conf.Configuration conf,
int numberOfTaskTrackers,
String nameNode,
boolean usingLlap)
Returns a shim to wrap MiniMrTez
|
org.apache.hadoop.fs.FileSystem |
getNonCachedFileSystem(URI uri,
org.apache.hadoop.conf.Configuration conf) |
String |
getPassword(org.apache.hadoop.conf.Configuration conf,
String name)
Use password API (if available) to fetch credentials/password
|
org.apache.hadoop.fs.Path |
getPathWithoutSchemeAndAuthority(org.apache.hadoop.fs.Path path) |
HadoopShims.StoragePolicyShim |
getStoragePolicyShim(org.apache.hadoop.fs.FileSystem fs)
obtain a storage policy shim associated with the filesystem.
|
String |
getTaskAttemptLogUrl(org.apache.hadoop.mapred.JobConf conf,
String taskTrackerHttpAddress,
String taskAttemptId)
Constructs and Returns TaskAttempt Logger Url
or null if the TaskLogServlet is not available
|
HadoopShims.WebHCatJTShim |
getWebHCatShim(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.security.UserGroupInformation ugi)
Provides a Hadoop JobTracker shim.
|
boolean |
hasStickyBit(org.apache.hadoop.fs.permission.FsPermission permission)
Check stick bit in the permission
|
void |
hflush(org.apache.hadoop.fs.FSDataOutputStream stream)
Flush and make visible to other users the changes to the given stream.
|
boolean |
isDirectory(org.apache.hadoop.fs.FileStatus fileStatus)
Check whether file is directory.
|
static boolean |
isHdfsEncryptionSupported() |
boolean |
isLocalMode(org.apache.hadoop.conf.Configuration conf)
Check wether MR is configured to run in local-mode
|
List<HadoopShims.HdfsFileStatusWithId> |
listLocatedHdfsStatus(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path p,
org.apache.hadoop.fs.PathFilter filter) |
void |
mergeCredentials(org.apache.hadoop.mapred.JobConf dest,
org.apache.hadoop.mapred.JobConf src) |
org.apache.hadoop.mapreduce.JobContext |
newJobContext(org.apache.hadoop.mapreduce.Job job) |
org.apache.hadoop.mapreduce.TaskAttemptContext |
newTaskAttemptContext(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.util.Progressable progressable) |
org.apache.hadoop.mapreduce.TaskAttemptID |
newTaskAttemptID(org.apache.hadoop.mapreduce.JobID jobId,
boolean isMap,
int taskId,
int id) |
int |
readByteBuffer(org.apache.hadoop.fs.FSDataInputStream file,
ByteBuffer dest)
Reads data into ByteBuffer.
|
void |
refreshDefaultQueue(org.apache.hadoop.conf.Configuration conf,
String userName)
Load the fair scheduler queue for given user if available.
|
boolean |
runDistCp(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dst,
org.apache.hadoop.conf.Configuration conf)
Copies a source dir/file to a destination by orchestrating the copy between hdfs nodes.
|
void |
setJobLauncherRpcAddress(org.apache.hadoop.conf.Configuration conf,
String val)
All updates to jobtracker/resource manager rpc address
in the configuration should be done through this shim
|
void |
setTotalOrderPartitionFile(org.apache.hadoop.mapred.JobConf jobConf,
org.apache.hadoop.fs.Path partitionFile)
The method sets to set the partition file has a different signature between
hadoop versions.
|
boolean |
supportStickyBit()
check whether current hadoop supports sticky bit
|
boolean |
supportTrashFeature() |
protected static final Method accessMethod
protected static final Method getPasswordMethod
public HadoopShims.CombineFileInputFormatShim getCombineFileInputFormat()
public String getTaskAttemptLogUrl(org.apache.hadoop.mapred.JobConf conf, String taskTrackerHttpAddress, String taskAttemptId) throws MalformedURLException
HadoopShims
MalformedURLException
public HadoopShims.JobTrackerState getJobTrackerState(org.apache.hadoop.mapred.ClusterStatus clusterStatus) throws Exception
HadoopShims
getJobTrackerState
in interface HadoopShims
getJobTrackerState
in class HadoopShimsSecure
Exception
- if no equivalent JobTrackerState existspublic org.apache.hadoop.mapreduce.TaskAttemptContext newTaskAttemptContext(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.util.Progressable progressable)
newTaskAttemptContext
in interface HadoopShims
newTaskAttemptContext
in class HadoopShimsSecure
public org.apache.hadoop.mapreduce.TaskAttemptID newTaskAttemptID(org.apache.hadoop.mapreduce.JobID jobId, boolean isMap, int taskId, int id)
public org.apache.hadoop.mapreduce.JobContext newJobContext(org.apache.hadoop.mapreduce.Job job)
newJobContext
in interface HadoopShims
newJobContext
in class HadoopShimsSecure
public boolean isLocalMode(org.apache.hadoop.conf.Configuration conf)
HadoopShims
isLocalMode
in interface HadoopShims
isLocalMode
in class HadoopShimsSecure
public String getJobLauncherRpcAddress(org.apache.hadoop.conf.Configuration conf)
HadoopShims
getJobLauncherRpcAddress
in interface HadoopShims
getJobLauncherRpcAddress
in class HadoopShimsSecure
public void setJobLauncherRpcAddress(org.apache.hadoop.conf.Configuration conf, String val)
HadoopShims
setJobLauncherRpcAddress
in interface HadoopShims
setJobLauncherRpcAddress
in class HadoopShimsSecure
public String getJobLauncherHttpAddress(org.apache.hadoop.conf.Configuration conf)
HadoopShims
getJobLauncherHttpAddress
in interface HadoopShims
getJobLauncherHttpAddress
in class HadoopShimsSecure
public long getDefaultBlockSize(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path path)
HadoopShims
getDefaultBlockSize
in interface HadoopShims
getDefaultBlockSize
in class HadoopShimsSecure
public short getDefaultReplication(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path path)
HadoopShims
getDefaultReplication
in interface HadoopShims
getDefaultReplication
in class HadoopShimsSecure
public void setTotalOrderPartitionFile(org.apache.hadoop.mapred.JobConf jobConf, org.apache.hadoop.fs.Path partitionFile)
HadoopShims
public Comparator<org.apache.hadoop.io.LongWritable> getLongComparator()
public void refreshDefaultQueue(org.apache.hadoop.conf.Configuration conf, String userName) throws IOException
userName
- end user nameIOException
public Hadoop23Shims.MiniMrShim getMiniMrCluster(org.apache.hadoop.conf.Configuration conf, int numberOfTaskTrackers, String nameNode, int numDir) throws IOException
IOException
public HadoopShims.MiniMrShim getLocalMiniTezCluster(org.apache.hadoop.conf.Configuration conf, boolean usingLlap)
public Hadoop23Shims.MiniMrShim getMiniTezCluster(org.apache.hadoop.conf.Configuration conf, int numberOfTaskTrackers, String nameNode, boolean usingLlap) throws IOException
IOException
public Hadoop23Shims.MiniMrShim getMiniSparkCluster(org.apache.hadoop.conf.Configuration conf, int numberOfTaskTrackers, String nameNode, int numDir) throws IOException
IOException
public HadoopShims.MiniDFSShim getMiniDfs(org.apache.hadoop.conf.Configuration conf, int numDataNodes, boolean format, String[] racks) throws IOException
HadoopShims
IOException
public HadoopShims.MiniDFSShim getMiniDfs(org.apache.hadoop.conf.Configuration conf, int numDataNodes, boolean format, String[] racks, boolean isHA) throws IOException
IOException
public HadoopShims.HCatHadoopShims getHCatShim()
public HadoopShims.WebHCatJTShim getWebHCatShim(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.security.UserGroupInformation ugi) throws IOException
HadoopShims
conf
- not null
IOException
public List<HadoopShims.HdfsFileStatusWithId> listLocatedHdfsStatus(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path p, org.apache.hadoop.fs.PathFilter filter) throws IOException
IOException
public org.apache.hadoop.fs.BlockLocation[] getLocations(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.FileStatus status) throws IOException
HadoopShims
fs
- the file systemstatus
- the file informationIOException
public TreeMap<Long,org.apache.hadoop.fs.BlockLocation> getLocationsWithOffset(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.FileStatus status) throws IOException
HadoopShims
fs
- the file systemstatus
- the file informationIOException
public void hflush(org.apache.hadoop.fs.FSDataOutputStream stream) throws IOException
HadoopShims
stream
- the stream to hflush.IOException
public org.apache.hadoop.fs.FileSystem createProxyFileSystem(org.apache.hadoop.fs.FileSystem fs, URI uri)
HadoopShims
createProxyFileSystem
in interface HadoopShims
createProxyFileSystem
in class HadoopShimsSecure
public org.apache.hadoop.conf.Configuration getConfiguration(org.apache.hadoop.mapreduce.JobContext context)
HadoopShims
public org.apache.hadoop.mapred.JobConf getJobConf(org.apache.hadoop.mapred.JobContext context)
HadoopShims
context
- job contextpublic org.apache.hadoop.fs.FileSystem getNonCachedFileSystem(URI uri, org.apache.hadoop.conf.Configuration conf) throws IOException
getNonCachedFileSystem
in interface HadoopShims
getNonCachedFileSystem
in class HadoopShimsSecure
IOException
public void getMergedCredentials(org.apache.hadoop.mapred.JobConf jobConf) throws IOException
IOException
public void mergeCredentials(org.apache.hadoop.mapred.JobConf dest, org.apache.hadoop.mapred.JobConf src) throws IOException
IOException
public void checkFileAccess(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.FileStatus stat, org.apache.hadoop.fs.permission.FsAction action) throws IOException, AccessControlException, Exception
HadoopShims
checkFileAccess
in interface HadoopShims
checkFileAccess
in class HadoopShimsSecure
IOException
AccessControlException
Exception
public String getPassword(org.apache.hadoop.conf.Configuration conf, String name) throws IOException
HadoopShims
IOException
public boolean supportStickyBit()
HadoopShims
public boolean hasStickyBit(org.apache.hadoop.fs.permission.FsPermission permission)
HadoopShims
public boolean supportTrashFeature()
public org.apache.hadoop.fs.Path getCurrentTrashPath(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.fs.FileSystem fs)
public Hadoop23Shims.KerberosNameShim getKerberosNameShim(String name) throws IOException
IOException
public boolean isDirectory(org.apache.hadoop.fs.FileStatus fileStatus)
HadoopShims
public HadoopShims.StoragePolicyShim getStoragePolicyShim(org.apache.hadoop.fs.FileSystem fs)
HadoopShims
public boolean runDistCp(org.apache.hadoop.fs.Path src, org.apache.hadoop.fs.Path dst, org.apache.hadoop.conf.Configuration conf) throws IOException
HadoopShims
src
- Path to the source file or directory to copydst
- Path to the destination file or directoryconf
- The hadoop configuration objectIOException
public static boolean isHdfsEncryptionSupported()
public HadoopShims.HdfsEncryptionShim createHdfsEncryptionShim(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.conf.Configuration conf) throws IOException
HadoopShims
fs
- A FileSystem object to HDFSconf
- A Configuration objectIOException
- If an error occurred while creating the instance.public org.apache.hadoop.fs.Path getPathWithoutSchemeAndAuthority(org.apache.hadoop.fs.Path path)
public int readByteBuffer(org.apache.hadoop.fs.FSDataInputStream file, ByteBuffer dest) throws IOException
HadoopShims
file
- File.dest
- Buffer.IOException
public void addDelegationTokens(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.security.Credentials cred, String uname) throws IOException
HadoopShims
addDelegationTokens
in interface HadoopShims
addDelegationTokens
in class HadoopShimsSecure
fs
- FileSystem object to HDFScred
- Credentials object to add the token to.uname
- user name.IOException
- If an error occurred on adding the token.public long getFileId(org.apache.hadoop.fs.FileSystem fs, String path) throws IOException
HadoopShims
IOException
public org.apache.hadoop.security.UserGroupInformation cloneUgi(org.apache.hadoop.security.UserGroupInformation baseUgi) throws IOException
HadoopShims
IOException
Copyright © 2021 The Apache Software Foundation. All rights reserved.