public class Context extends Object
Modifier and Type | Class and Description |
---|---|
static class |
Context.DestClausePrefix |
static class |
Context.Operation
These ops require special handling in various places
(note that Insert into Acid table is in OTHER category)
|
Modifier and Type | Field and Description |
---|---|
protected String |
cboInfo |
protected boolean |
cboSucceeded |
protected String |
cmd |
protected ExplainConfiguration |
explainConfig |
protected List<HiveLock> |
hiveLocks |
protected HiveTxnManager |
hiveTxnManager |
protected int |
pathid |
protected int |
tryCount |
Constructor and Description |
---|
Context(org.apache.hadoop.conf.Configuration conf) |
Context(org.apache.hadoop.conf.Configuration conf,
String executionId)
Create a Context with a given executionId.
|
Modifier and Type | Method and Description |
---|---|
void |
addCS(String path,
org.apache.hadoop.fs.ContentSummary cs) |
Context.DestClausePrefix |
addDestNamePrefix(int pos,
Context.DestClausePrefix prefix)
Will make SemanticAnalyzer.Phase1Ctx#dest in subtree rooted at 'tree' use 'prefix'.
|
void |
addMaterializedTable(String cteName,
Table table) |
void |
addViewTokenRewriteStream(String viewFullyQualifiedName,
org.antlr.runtime.TokenRewriteStream tokenRewriteStream) |
void |
checkHeartbeaterLockException() |
void |
clear() |
static String |
generateExecutionId()
Generate a unique executionId.
|
String |
getCboInfo() |
String |
getCmd()
Find the original query command.
|
org.apache.hadoop.conf.Configuration |
getConf() |
org.apache.hadoop.fs.ContentSummary |
getCS(org.apache.hadoop.fs.Path path) |
org.apache.hadoop.fs.ContentSummary |
getCS(String path) |
Context.DestClausePrefix |
getDestNamePrefix(ASTNode curNode)
The suffix is always relative to a given ASTNode
|
ExplainConfiguration.AnalyzeState |
getExplainAnalyze() |
ExplainConfiguration |
getExplainConfig() |
boolean |
getExplainLogical()
Find whether the current query is a logical explain query
|
org.apache.hadoop.fs.Path |
getExternalTmpPath(org.apache.hadoop.fs.Path path)
Get a path to store tmp data destined for external Path.
|
org.apache.hadoop.fs.Path |
getExtTmpPathRelTo(org.apache.hadoop.fs.Path path)
This is similar to getExternalTmpPath() with difference being this method returns temp path
within passed in uri, whereas getExternalTmpPath() ignores passed in path and returns temp
path within /tmp
|
DbTxnManager.Heartbeater |
getHeartbeater() |
List<HiveLock> |
getHiveLocks() |
HiveTxnManager |
getHiveTxnManager() |
boolean |
getIsUpdateDeleteMerge() |
Map<LoadTableDesc,WriteEntity> |
getLoadTableOutputMap() |
org.apache.hadoop.fs.Path |
getLocalScratchDir(boolean mkdir)
Create a local scratch directory on demand and return it.
|
org.apache.hadoop.fs.Path |
getLocalTmpPath()
Get a tmp path on local host to store intermediate data.
|
Table |
getMaterializedTable(String cteName) |
org.apache.hadoop.fs.Path |
getMRScratchDir()
Create a map-reduce scratch directory on demand and return it.
|
org.apache.hadoop.fs.Path |
getMRTmpPath()
Get a path to store map-reduce intermediate data in.
|
org.apache.hadoop.fs.Path |
getMRTmpPath(URI uri) |
CompilationOpContext |
getOpContext() |
Map<WriteEntity,List<HiveLockObj>> |
getOutputLockObjects() |
Map<String,org.apache.hadoop.fs.ContentSummary> |
getPathToCS() |
org.apache.hadoop.fs.Path |
getResDir() |
org.apache.hadoop.fs.Path |
getResFile() |
AtomicInteger |
getSequencer() |
DataInput |
getStream() |
org.apache.hadoop.fs.Path |
getTempDirForPath(org.apache.hadoop.fs.Path path)
Create a temporary directory depending of the path specified.
|
org.apache.hadoop.fs.Path |
getTempDirForPath(org.apache.hadoop.fs.Path path,
boolean isFinalJob)
Create a temporary directory depending of the path specified.
|
org.antlr.runtime.TokenRewriteStream |
getTokenRewriteStream() |
int |
getTryCount() |
org.antlr.runtime.TokenRewriteStream |
getViewTokenRewriteStream(String viewFullyQualifiedName) |
boolean |
isCboSucceeded() |
boolean |
isExplainSkipExecution()
Find whether we should execute the current query due to explain
|
boolean |
isHDFSCleanup() |
boolean |
isLocalOnlyExecutionMode()
Does Hive wants to run tasks entirely on the local machine
(where the query is being compiled)?
Today this translates into running hadoop jobs locally
|
boolean |
isMRTmpFileURI(String uriStr)
Check if path is for intermediate data
|
boolean |
isNeedLockMgr() |
boolean |
isSkipTableMasking() |
void |
removeMaterializedCTEs()
Remove any created directories for CTEs.
|
void |
removeScratchDir()
Remove any created scratch directories.
|
void |
resetOpContext() |
void |
resetStream() |
void |
restoreOriginalTracker() |
void |
setCboInfo(String cboInfo) |
void |
setCboSucceeded(boolean cboSucceeded) |
void |
setCmd(String cmd)
Set the original query command.
|
void |
setExplainConfig(ExplainConfiguration explainConfig) |
void |
setHDFSCleanup(boolean isHDFSCleanup) |
void |
setHeartbeater(DbTxnManager.Heartbeater heartbeater) |
void |
setHiveLocks(List<HiveLock> hiveLocks) |
void |
setHiveTxnManager(HiveTxnManager txnMgr) |
void |
setIsUpdateDeleteMerge(boolean isUpdate) |
void |
setNeedLockMgr(boolean needLockMgr) |
void |
setOperation(Context.Operation operation) |
void |
setOriginalTracker(String originalTracker) |
void |
setResDir(org.apache.hadoop.fs.Path resDir) |
void |
setResFile(org.apache.hadoop.fs.Path resFile) |
void |
setSkipTableMasking(boolean skipTableMasking) |
void |
setTokenRewriteStream(org.antlr.runtime.TokenRewriteStream tokenRewriteStream)
Set the token rewrite stream being used to parse the current top-level SQL
statement.
|
void |
setTryCount(int tryCount) |
protected int pathid
protected ExplainConfiguration explainConfig
protected String cboInfo
protected boolean cboSucceeded
protected String cmd
protected int tryCount
protected HiveTxnManager hiveTxnManager
public Context(org.apache.hadoop.conf.Configuration conf) throws IOException
IOException
public Context(org.apache.hadoop.conf.Configuration conf, String executionId)
public void setOperation(Context.Operation operation)
public Context.DestClausePrefix getDestNamePrefix(ASTNode curNode)
public Context.DestClausePrefix addDestNamePrefix(int pos, Context.DestClausePrefix prefix)
pos
- ordinal index of specific TOK_INSERT as child of TOK_QUERYpublic Map<LoadTableDesc,WriteEntity> getLoadTableOutputMap()
public Map<WriteEntity,List<HiveLockObj>> getOutputLockObjects()
public boolean isExplainSkipExecution()
public boolean getExplainLogical()
public ExplainConfiguration.AnalyzeState getExplainAnalyze()
public void setCmd(String cmd)
cmd
- the original query command stringpublic String getCmd()
public org.apache.hadoop.fs.Path getLocalScratchDir(boolean mkdir)
public org.apache.hadoop.fs.Path getMRScratchDir()
public org.apache.hadoop.fs.Path getTempDirForPath(org.apache.hadoop.fs.Path path, boolean isFinalJob)
BlobStorageUtils.areOptimizationsEnabled(Configuration)
are both true, then return a path on
the blobstore.
- If path is on HDFS, then create a staging directory inside the pathpath
- Path used to verify the Filesystem to use for temporary directoryisFinalJob
- true if the required Path
will be used for the final job (e.g. the final FSOP)public org.apache.hadoop.fs.Path getTempDirForPath(org.apache.hadoop.fs.Path path)
path
- Path used to verify the Filesystem to use for temporary directorypublic void removeScratchDir()
public void removeMaterializedCTEs()
public boolean isMRTmpFileURI(String uriStr)
public org.apache.hadoop.fs.Path getMRTmpPath(URI uri)
public org.apache.hadoop.fs.Path getMRTmpPath()
public org.apache.hadoop.fs.Path getLocalTmpPath()
public org.apache.hadoop.fs.Path getExternalTmpPath(org.apache.hadoop.fs.Path path)
path
- external Path to which the tmp data has to be eventually movedpublic org.apache.hadoop.fs.Path getExtTmpPathRelTo(org.apache.hadoop.fs.Path path)
public org.apache.hadoop.fs.Path getResFile()
public void setResFile(org.apache.hadoop.fs.Path resFile)
resFile
- the resFile to setpublic org.apache.hadoop.fs.Path getResDir()
public void setResDir(org.apache.hadoop.fs.Path resDir)
resDir
- the resDir to setpublic void clear() throws IOException
IOException
public DataInput getStream()
public void resetStream()
public void setTokenRewriteStream(org.antlr.runtime.TokenRewriteStream tokenRewriteStream)
tokenRewriteStream
- the stream being usedpublic org.antlr.runtime.TokenRewriteStream getTokenRewriteStream()
public void addViewTokenRewriteStream(String viewFullyQualifiedName, org.antlr.runtime.TokenRewriteStream tokenRewriteStream)
public org.antlr.runtime.TokenRewriteStream getViewTokenRewriteStream(String viewFullyQualifiedName)
public static String generateExecutionId()
public boolean isLocalOnlyExecutionMode()
public HiveTxnManager getHiveTxnManager()
public void setHiveTxnManager(HiveTxnManager txnMgr)
public void setOriginalTracker(String originalTracker)
public void restoreOriginalTracker()
public void addCS(String path, org.apache.hadoop.fs.ContentSummary cs)
public org.apache.hadoop.fs.ContentSummary getCS(org.apache.hadoop.fs.Path path)
public org.apache.hadoop.fs.ContentSummary getCS(String path)
public org.apache.hadoop.conf.Configuration getConf()
public boolean isHDFSCleanup()
public void setHDFSCleanup(boolean isHDFSCleanup)
isHDFSCleanup
- the isHDFSCleanup to setpublic boolean isNeedLockMgr()
public void setNeedLockMgr(boolean needLockMgr)
public int getTryCount()
public void setTryCount(int tryCount)
public String getCboInfo()
public void setCboInfo(String cboInfo)
public boolean isCboSucceeded()
public void setCboSucceeded(boolean cboSucceeded)
public AtomicInteger getSequencer()
public CompilationOpContext getOpContext()
public DbTxnManager.Heartbeater getHeartbeater()
public void setHeartbeater(DbTxnManager.Heartbeater heartbeater)
public void checkHeartbeaterLockException() throws LockException
LockException
public boolean isSkipTableMasking()
public void setSkipTableMasking(boolean skipTableMasking)
public ExplainConfiguration getExplainConfig()
public void setExplainConfig(ExplainConfiguration explainConfig)
public void resetOpContext()
public boolean getIsUpdateDeleteMerge()
public void setIsUpdateDeleteMerge(boolean isUpdate)
Copyright © 2021 The Apache Software Foundation. All rights reserved.