public class Context extends Object
| Modifier and Type | Class and Description |
|---|---|
static class |
Context.DestClausePrefix |
static class |
Context.Operation
These ops require special handling in various places
(note that Insert into Acid table is in OTHER category)
|
static class |
Context.RewritePolicy |
| Modifier and Type | Field and Description |
|---|---|
protected String |
calcitePlan |
protected String |
cboInfo |
protected boolean |
cboSucceeded |
protected String |
cmd |
protected ExplainConfiguration |
explainConfig |
static String |
EXT_PREFIX |
protected List<HiveLock> |
hiveLocks |
protected HiveTxnManager |
hiveTxnManager |
protected String |
optimizedSql |
protected int |
pathid |
| Modifier | Constructor and Description |
|---|---|
|
Context(org.apache.hadoop.conf.Configuration conf) |
protected |
Context(Context ctx) |
| Modifier and Type | Method and Description |
|---|---|
void |
addCS(String path,
org.apache.hadoop.fs.ContentSummary cs) |
Context.DestClausePrefix |
addDeleteOfUpdateDestNamePrefix(int pos,
Context.DestClausePrefix prefix) |
Context.DestClausePrefix |
addDestNamePrefix(int pos,
Context.DestClausePrefix prefix)
Will make SemanticAnalyzer.Phase1Ctx#dest in subtree rooted at 'tree' use 'prefix'.
|
void |
addMaterializedTable(String cteName,
Table table,
Statistics statistics) |
void |
addSubContext(Context context) |
void |
addViewTokenRewriteStream(String viewFullyQualifiedName,
org.antlr.runtime.TokenRewriteStream tokenRewriteStream) |
void |
checkHeartbeaterLockException() |
void |
clear() |
void |
clear(boolean deleteResultDir) |
boolean |
enableUnparse() |
static String |
generateExecutionId()
Generate a unique executionId.
|
String |
getCalcitePlan() |
String |
getCboInfo() |
String |
getCmd()
Find the original query command.
|
org.apache.hadoop.conf.Configuration |
getConf() |
org.apache.hadoop.fs.ContentSummary |
getCS(org.apache.hadoop.fs.Path path) |
org.apache.hadoop.fs.ContentSummary |
getCS(String path) |
Context.DestClausePrefix |
getDestNamePrefix(ASTNode curNode,
QB queryBlock)
The suffix is always relative to a given ASTNode.
|
String |
getExecutionId() |
int |
getExecutionIndex() |
ExplainConfiguration.AnalyzeState |
getExplainAnalyze() |
ExplainConfiguration |
getExplainConfig() |
boolean |
getExplainLogical()
Find whether the current query is a logical explain query
|
org.apache.hadoop.fs.Path |
getExternalTmpPath(org.apache.hadoop.fs.Path path)
Get a path to store tmp data destined for external Path.
|
org.apache.hadoop.fs.Path |
getExtTmpPathRelTo(org.apache.hadoop.fs.Path path)
This is similar to getExternalTmpPath() with difference being this method returns temp path
within passed in uri, whereas getExternalTmpPath() ignores passed in path and returns temp
path within /tmp
|
org.apache.hadoop.fs.Path |
getFsResultCacheDirs() |
Map<String,org.apache.hadoop.fs.Path> |
getFsScratchDirs() |
DbTxnManager.Heartbeater |
getHeartbeater() |
List<HiveLock> |
getHiveLocks() |
HiveTxnManager |
getHiveTxnManager() |
boolean |
getIsUpdateDeleteMerge() |
Map<LoadTableDesc,WriteEntity> |
getLoadTableOutputMap() |
org.apache.hadoop.fs.Path |
getLocalScratchDir(boolean mkdir)
Create a local scratch directory on demand and return it.
|
org.apache.hadoop.fs.Path |
getLocalTmpPath()
Get a tmp path on local host to store intermediate data.
|
org.apache.hadoop.fs.Path |
getLocation() |
Table |
getMaterializedTable(String cteName) |
Statistics |
getMaterializedTableStats(TableName tableName) |
String |
getMoveTaskId() |
org.apache.hadoop.fs.Path |
getMRScratchDir()
Create a map-reduce scratch directory on demand and return it.
|
org.apache.hadoop.fs.Path |
getMRScratchDir(boolean mkDir)
Create a map-reduce scratch directory on demand and return it.
|
org.apache.hadoop.fs.Path |
getMRTmpPath()
Get a path to store map-reduce intermediate data in.
|
org.apache.hadoop.fs.Path |
getMRTmpPath(boolean mkDir) |
org.apache.hadoop.fs.Path |
getMRTmpPath(URI uri) |
CompilationOpContext |
getOpContext() |
Context.Operation |
getOperation() |
String |
getOptimizedSql() |
Map<WriteEntity,List<HiveLockObj>> |
getOutputLockObjects() |
List<org.apache.commons.lang3.tuple.Pair<String,String>> |
getParsedTables() |
Map<String,org.apache.hadoop.fs.ContentSummary> |
getPathToCS() |
PlanMapper |
getPlanMapper() |
String |
getReplPolicy() |
org.apache.hadoop.fs.Path |
getResDir() |
org.apache.hadoop.fs.Path |
getResFile() |
AtomicInteger |
getSequencer() |
StatsSource |
getStatsSource() |
DataInput |
getStream() |
org.apache.hadoop.fs.Path |
getTempDirForFinalJobPath(org.apache.hadoop.fs.Path path)
Create a temporary directory depending of the path specified.
|
org.apache.hadoop.fs.Path |
getTempDirForInterimJobPath(org.apache.hadoop.fs.Path path)
Create a temporary directory depending of the path specified.
|
Table |
getTempTableForLoad() |
org.antlr.runtime.TokenRewriteStream |
getTokenRewriteStream() |
org.antlr.runtime.TokenRewriteStream |
getViewTokenRewriteStream(String viewFullyQualifiedName) |
WmContext |
getWmContext() |
boolean |
isCboSucceeded() |
boolean |
isDeleteBranchOfUpdate(String dest) |
boolean |
isExplainPlan() |
boolean |
isExplainSkipExecution()
Find whether we should execute the current query due to explain
|
boolean |
isHDFSCleanup() |
boolean |
isLoadingMaterializedView() |
boolean |
isLocalOnlyExecutionMode()
Does Hive wants to run tasks entirely on the local machine
(where the query is being compiled)?
Today this translates into running hadoop jobs locally
|
boolean |
isMRTmpFileURI(String uriStr)
Check if path is for intermediate data
|
boolean |
isNeedLockMgr() |
boolean |
isResultCacheDir(org.apache.hadoop.fs.Path destinationPath)
Check if the path is a result cache dir for this query.
|
boolean |
isScheduledQuery() |
boolean |
isSkipTableMasking() |
void |
removeMaterializedCTEs()
Remove any created directories for CTEs.
|
void |
removeResultCacheDir()
Remove any created scratch directories.
|
void |
removeScratchDir()
Remove any created scratch directories.
|
void |
resetOpContext() |
void |
resetStream() |
void |
restoreOriginalTracker() |
void |
setCalcitePlan(String calcitePlan) |
void |
setCboInfo(String cboInfo) |
void |
setCboSucceeded(boolean cboSucceeded) |
void |
setCmd(String cmd)
Set the original query command.
|
void |
setConf(HiveConf conf) |
void |
setEnableUnparse(boolean enableUnparse) |
void |
setExecutionIndex(int executionIndex) |
void |
setExplainConfig(ExplainConfiguration explainConfig) |
void |
setExplainPlan(boolean t) |
void |
setFsResultCacheDirs(org.apache.hadoop.fs.Path fsResultCacheDirs) |
void |
setHDFSCleanup(boolean isHDFSCleanup) |
void |
setHeartbeater(DbTxnManager.Heartbeater heartbeater) |
void |
setHiveLocks(List<HiveLock> hiveLocks) |
void |
setHiveTxnManager(HiveTxnManager txnMgr) |
void |
setIsLoadingMaterializedView(boolean isLoadingMaterializedView) |
void |
setIsUpdateDeleteMerge(boolean isUpdate) |
void |
setLocation(org.apache.hadoop.fs.Path location) |
void |
setNeedLockMgr(boolean needLockMgr) |
void |
setOpContext(CompilationOpContext opContext) |
void |
setOperation(Context.Operation operation) |
void |
setOperation(Context.Operation operation,
boolean splitUpdate) |
void |
setOptimizedSql(String newSql) |
void |
setOriginalTracker(String originalTracker) |
void |
setParsedTables(List<org.apache.commons.lang3.tuple.Pair<String,String>> parsedTables) |
void |
setPlanMapper(PlanMapper planMapper) |
void |
setReplPolicy(String replPolicy) |
void |
setResDir(org.apache.hadoop.fs.Path resDir) |
void |
setResFile(org.apache.hadoop.fs.Path resFile) |
void |
setScheduledQuery(boolean scheduledQuery) |
void |
setSkipTableMasking(boolean skipTableMasking) |
void |
setStatsSource(StatsSource statsSource) |
void |
setTempTableForLoad(Table tempTableForLoad) |
void |
setTokenRewriteStream(org.antlr.runtime.TokenRewriteStream tokenRewriteStream)
Set the token rewrite stream being used to parse the current top-level SQL
statement.
|
void |
setWmContext(WmContext wmContext) |
protected int pathid
protected ExplainConfiguration explainConfig
protected String cboInfo
protected boolean cboSucceeded
protected String optimizedSql
protected String calcitePlan
protected String cmd
protected HiveTxnManager hiveTxnManager
public static final String EXT_PREFIX
public Context(org.apache.hadoop.conf.Configuration conf)
protected Context(Context ctx)
public void setOperation(Context.Operation operation)
public void setOperation(Context.Operation operation, boolean splitUpdate)
public Context.Operation getOperation()
public WmContext getWmContext()
public void setWmContext(WmContext wmContext)
public void setReplPolicy(String replPolicy)
public String getReplPolicy()
public org.apache.hadoop.fs.Path getLocation()
public void setLocation(org.apache.hadoop.fs.Path location)
public Context.DestClausePrefix getDestNamePrefix(ASTNode curNode, QB queryBlock)
AcidUtils.Operation. See usages
of getDestNamePrefix(ASTNode, QB) and
SemanticAnalyzer.updating(String) and
SemanticAnalyzer.deleting(String).public Context.DestClausePrefix addDestNamePrefix(int pos, Context.DestClausePrefix prefix)
pos - ordinal index of specific TOK_INSERT as child of TOK_QUERYpublic Context.DestClausePrefix addDeleteOfUpdateDestNamePrefix(int pos, Context.DestClausePrefix prefix)
public void setFsResultCacheDirs(org.apache.hadoop.fs.Path fsResultCacheDirs)
public org.apache.hadoop.fs.Path getFsResultCacheDirs()
public Map<LoadTableDesc,WriteEntity> getLoadTableOutputMap()
public Map<WriteEntity,List<HiveLockObj>> getOutputLockObjects()
public boolean isExplainSkipExecution()
public boolean getExplainLogical()
public ExplainConfiguration.AnalyzeState getExplainAnalyze()
public void setCmd(String cmd)
cmd - the original query command stringpublic String getCmd()
public org.apache.hadoop.fs.Path getLocalScratchDir(boolean mkdir)
public org.apache.hadoop.fs.Path getMRScratchDir(boolean mkDir)
mkDir - flag to indicate if scratch dir is to be created or notpublic org.apache.hadoop.fs.Path getMRScratchDir()
public org.apache.hadoop.fs.Path getTempDirForInterimJobPath(org.apache.hadoop.fs.Path path)
BlobStorageUtils.areOptimizationsEnabled(Configuration) are both true, then return a path on
the blobstore.
- If path is on HDFS, then create a staging directory inside the pathpath - Path used to verify the Filesystem to use for temporary directorypublic org.apache.hadoop.fs.Path getTempDirForFinalJobPath(org.apache.hadoop.fs.Path path)
path - Path used to verify the Filesystem to use for temporary directorypublic void removeResultCacheDir()
public void removeScratchDir()
public void removeMaterializedCTEs()
public boolean isMRTmpFileURI(String uriStr)
public boolean isResultCacheDir(org.apache.hadoop.fs.Path destinationPath)
destinationPath - public org.apache.hadoop.fs.Path getMRTmpPath(URI uri)
public org.apache.hadoop.fs.Path getMRTmpPath(boolean mkDir)
public org.apache.hadoop.fs.Path getMRTmpPath()
public org.apache.hadoop.fs.Path getLocalTmpPath()
public org.apache.hadoop.fs.Path getExternalTmpPath(org.apache.hadoop.fs.Path path)
path - external Path to which the tmp data has to be eventually movedpublic org.apache.hadoop.fs.Path getExtTmpPathRelTo(org.apache.hadoop.fs.Path path)
public String getMoveTaskId()
public org.apache.hadoop.fs.Path getResFile()
public void setResFile(org.apache.hadoop.fs.Path resFile)
resFile - the resFile to setpublic org.apache.hadoop.fs.Path getResDir()
public void setResDir(org.apache.hadoop.fs.Path resDir)
resDir - the resDir to setpublic void clear()
throws IOException
IOExceptionpublic void clear(boolean deleteResultDir)
throws IOException
IOExceptionpublic DataInput getStream()
public void resetStream()
public void setTokenRewriteStream(org.antlr.runtime.TokenRewriteStream tokenRewriteStream)
tokenRewriteStream - the stream being usedpublic org.antlr.runtime.TokenRewriteStream getTokenRewriteStream()
public void addViewTokenRewriteStream(String viewFullyQualifiedName, org.antlr.runtime.TokenRewriteStream tokenRewriteStream)
public org.antlr.runtime.TokenRewriteStream getViewTokenRewriteStream(String viewFullyQualifiedName)
public static String generateExecutionId()
public boolean isLocalOnlyExecutionMode()
public HiveTxnManager getHiveTxnManager()
public void setHiveTxnManager(HiveTxnManager txnMgr)
public void setOriginalTracker(String originalTracker)
public void restoreOriginalTracker()
public void addSubContext(Context context)
public void addCS(String path, org.apache.hadoop.fs.ContentSummary cs)
public org.apache.hadoop.fs.ContentSummary getCS(org.apache.hadoop.fs.Path path)
public org.apache.hadoop.fs.ContentSummary getCS(String path)
public org.apache.hadoop.conf.Configuration getConf()
public boolean isHDFSCleanup()
public void setHDFSCleanup(boolean isHDFSCleanup)
isHDFSCleanup - the isHDFSCleanup to setpublic boolean isNeedLockMgr()
public void setNeedLockMgr(boolean needLockMgr)
public String getCboInfo()
public void setCboInfo(String cboInfo)
public String getOptimizedSql()
public void setOptimizedSql(String newSql)
public boolean isCboSucceeded()
public void setCboSucceeded(boolean cboSucceeded)
public String getCalcitePlan()
public void setCalcitePlan(String calcitePlan)
public void addMaterializedTable(String cteName, Table table, Statistics statistics)
public Statistics getMaterializedTableStats(TableName tableName)
public AtomicInteger getSequencer()
public CompilationOpContext getOpContext()
public void setOpContext(CompilationOpContext opContext)
public DbTxnManager.Heartbeater getHeartbeater()
public void setHeartbeater(DbTxnManager.Heartbeater heartbeater)
public void checkHeartbeaterLockException()
throws LockException
LockExceptionpublic boolean isSkipTableMasking()
public void setSkipTableMasking(boolean skipTableMasking)
public ExplainConfiguration getExplainConfig()
public boolean isExplainPlan()
public void setExplainPlan(boolean t)
public void setExplainConfig(ExplainConfiguration explainConfig)
public void resetOpContext()
public boolean getIsUpdateDeleteMerge()
public void setIsUpdateDeleteMerge(boolean isUpdate)
public boolean isLoadingMaterializedView()
public void setIsLoadingMaterializedView(boolean isLoadingMaterializedView)
public String getExecutionId()
public void setPlanMapper(PlanMapper planMapper)
public PlanMapper getPlanMapper()
public void setStatsSource(StatsSource statsSource)
public StatsSource getStatsSource()
public int getExecutionIndex()
public void setExecutionIndex(int executionIndex)
public void setConf(HiveConf conf)
public Table getTempTableForLoad()
public void setTempTableForLoad(Table tempTableForLoad)
public boolean enableUnparse()
public void setEnableUnparse(boolean enableUnparse)
public boolean isScheduledQuery()
public void setScheduledQuery(boolean scheduledQuery)
public void setParsedTables(List<org.apache.commons.lang3.tuple.Pair<String,String>> parsedTables)
public boolean isDeleteBranchOfUpdate(String dest)
Copyright © 2024 The Apache Software Foundation. All rights reserved.