public class Hive extends Object
Modifier and Type | Class and Description |
---|---|
static class |
Hive.SchemaException |
Modifier and Type | Method and Description |
---|---|
void |
abortTransactions(List<Long> txnids) |
void |
addCheckConstraint(List<SQLCheckConstraint> checkConstraints) |
void |
addDefaultConstraint(List<SQLDefaultConstraint> defaultConstraints) |
void |
addForeignKey(List<SQLForeignKey> foreignKeyCols) |
void |
addNotNullConstraint(List<SQLNotNullConstraint> notNullConstraintCols) |
void |
addPrimaryKey(List<SQLPrimaryKey> primaryKeyCols) |
void |
addUniqueConstraint(List<SQLUniqueConstraint> uniqueConstraintCols) |
void |
alterDatabase(String dbName,
Database db) |
void |
alterFunction(String dbName,
String funcName,
Function newFunction) |
void |
alterPartition(String tblName,
Partition newPart,
EnvironmentContext environmentContext)
Updates the existing partition metadata with the new metadata.
|
void |
alterPartition(String dbName,
String tblName,
Partition newPart,
EnvironmentContext environmentContext)
Updates the existing partition metadata with the new metadata.
|
void |
alterPartitions(String tblName,
List<Partition> newParts,
EnvironmentContext environmentContext)
Updates the existing table metadata with the new metadata.
|
WMFullResourcePlan |
alterResourcePlan(String rpName,
WMNullableResourcePlan resourcePlan,
boolean canActivateDisabled,
boolean isForceDeactivate,
boolean isReplace) |
void |
alterTable(String dbName,
String tblName,
Table newTbl,
boolean cascade,
EnvironmentContext environmentContext) |
void |
alterTable(String fullyQlfdTblName,
Table newTbl,
boolean cascade,
EnvironmentContext environmentContext) |
void |
alterTable(String fullyQlfdTblName,
Table newTbl,
EnvironmentContext environmentContext)
Updates the existing table metadata with the new metadata.
|
void |
alterTable(Table newTbl,
EnvironmentContext environmentContext) |
void |
alterWMPool(WMNullablePool pool,
String poolPath) |
void |
alterWMTrigger(WMTrigger trigger) |
void |
cacheFileMetadata(String dbName,
String tableName,
String partName,
boolean allParts) |
void |
cancelDelegationToken(String tokenStrForm) |
static void |
clearDestForSubDirSrc(HiveConf conf,
org.apache.hadoop.fs.Path dest,
org.apache.hadoop.fs.Path src,
boolean isSrcLocal) |
void |
clearFileMetadata(List<Long> fileIds) |
void |
clearMetaCallTiming() |
static void |
closeCurrent() |
void |
compact(String dbname,
String tableName,
String partName,
String compactType,
Map<String,String> tblproperties)
Deprecated.
|
CompactionResponse |
compact2(String dbname,
String tableName,
String partName,
String compactType,
Map<String,String> tblproperties)
Enqueue a compaction request.
|
static Partition |
convertAddSpecToMetaPartition(Table tbl,
AddPartitionDesc.OnePartitionDesc addSpec,
HiveConf conf) |
protected static void |
copyFiles(HiveConf conf,
org.apache.hadoop.fs.Path srcf,
org.apache.hadoop.fs.Path destf,
org.apache.hadoop.fs.FileSystem fs,
boolean isSrcLocal,
boolean isAcidIUD,
boolean isOverwrite,
List<org.apache.hadoop.fs.Path> newFiles,
boolean isBucketed,
boolean isFullAcidTable,
boolean isManaged)
Copy files.
|
void |
createDatabase(Database db)
Create a Database.
|
void |
createDatabase(Database db,
boolean ifNotExist)
Create a database
|
void |
createFunction(Function func) |
void |
createOrDropTriggerToPoolMapping(String resourcePlanName,
String triggerName,
String poolPath,
boolean shouldDrop) |
void |
createOrUpdateWMMapping(WMMapping mapping,
boolean isUpdate) |
Partition |
createPartition(Table tbl,
Map<String,String> partSpec)
Creates a partition.
|
List<Partition> |
createPartitions(AddPartitionDesc addPartitionDesc) |
void |
createResourcePlan(WMResourcePlan resourcePlan,
String copyFromName) |
void |
createRole(String roleName,
String ownerName) |
void |
createTable(String tableName,
List<String> columns,
List<String> partCols,
Class<? extends org.apache.hadoop.mapred.InputFormat> fileInputFormat,
Class<?> fileOutputFormat)
Creates a table metadata and the directory for the table data
|
void |
createTable(String tableName,
List<String> columns,
List<String> partCols,
Class<? extends org.apache.hadoop.mapred.InputFormat> fileInputFormat,
Class<?> fileOutputFormat,
int bucketCount,
List<String> bucketCols)
Creates a table metadata and the directory for the table data
|
void |
createTable(String tableName,
List<String> columns,
List<String> partCols,
Class<? extends org.apache.hadoop.mapred.InputFormat> fileInputFormat,
Class<?> fileOutputFormat,
int bucketCount,
List<String> bucketCols,
Map<String,String> parameters)
Create a table metadata and the directory for the table data
|
void |
createTable(Table tbl)
Creates the table with the give objects
|
void |
createTable(Table tbl,
boolean ifNotExists) |
void |
createTable(Table tbl,
boolean ifNotExists,
List<SQLPrimaryKey> primaryKeys,
List<SQLForeignKey> foreignKeys,
List<SQLUniqueConstraint> uniqueConstraints,
List<SQLNotNullConstraint> notNullConstraints,
List<SQLDefaultConstraint> defaultConstraints,
List<SQLCheckConstraint> checkConstraints)
Creates the table with the given objects.
|
void |
createWMPool(WMPool pool) |
void |
createWMTrigger(WMTrigger trigger) |
boolean |
databaseExists(String dbName)
Query metadata to see if a database with the given name already exists.
|
boolean |
deletePartitionColumnStatistics(String dbName,
String tableName,
String partName,
String colName) |
boolean |
deleteTableColumnStatistics(String dbName,
String tableName,
String colName) |
void |
dropConstraint(String dbName,
String tableName,
String constraintName) |
void |
dropDatabase(String name)
Drop a database.
|
void |
dropDatabase(String name,
boolean deleteData,
boolean ignoreUnknownDb)
Drop a database
|
void |
dropDatabase(String name,
boolean deleteData,
boolean ignoreUnknownDb,
boolean cascade)
Drop a database
|
void |
dropFunction(String dbName,
String funcName) |
boolean |
dropPartition(String tblName,
List<String> part_vals,
boolean deleteData) |
boolean |
dropPartition(String db_name,
String tbl_name,
List<String> part_vals,
boolean deleteData) |
boolean |
dropPartition(String dbName,
String tableName,
List<String> partVals,
PartitionDropOptions options) |
List<Partition> |
dropPartitions(String tblName,
List<DropTableDesc.PartSpec> partSpecs,
boolean deleteData,
boolean ifExists) |
List<Partition> |
dropPartitions(String tblName,
List<DropTableDesc.PartSpec> partSpecs,
PartitionDropOptions dropOptions) |
List<Partition> |
dropPartitions(String dbName,
String tblName,
List<DropTableDesc.PartSpec> partSpecs,
boolean deleteData,
boolean ifExists) |
List<Partition> |
dropPartitions(String dbName,
String tblName,
List<DropTableDesc.PartSpec> partSpecs,
PartitionDropOptions dropOptions) |
List<Partition> |
dropPartitions(Table table,
List<String> partDirNames,
boolean deleteData,
boolean ifExists)
drop the partitions specified as directory names associated with the table.
|
void |
dropResourcePlan(String rpName) |
void |
dropRole(String roleName) |
void |
dropTable(String tableName)
Drops table along with the data in it.
|
void |
dropTable(String tableName,
boolean ifPurge)
Drops table along with the data in it.
|
void |
dropTable(String dbName,
String tableName)
Drops table along with the data in it.
|
void |
dropTable(String dbName,
String tableName,
boolean deleteData,
boolean ignoreUnknownTab)
Drops the table.
|
void |
dropTable(String dbName,
String tableName,
boolean deleteData,
boolean ignoreUnknownTab,
boolean ifPurge)
Drops the table.
|
void |
dropWMMapping(WMMapping mapping) |
void |
dropWMPool(String resourcePlanName,
String poolPath) |
void |
dropWMTrigger(String rpName,
String triggerName) |
com.google.common.collect.ImmutableMap<String,Long> |
dumpAndClearMetaCallTiming(String phase) |
List<Partition> |
exchangeTablePartitions(Map<String,String> partitionSpecs,
String sourceDb,
String sourceTable,
String destDb,
String destinationTableName) |
PrincipalPrivilegeSet |
get_privilege_set(HiveObjectType objectType,
String db_name,
String table_name,
List<String> part_values,
String column_name,
String user_name,
List<String> group_names) |
static Hive |
get() |
static Hive |
get(boolean doRegisterAllFns) |
static Hive |
get(org.apache.hadoop.conf.Configuration c,
Class<?> clazz) |
static Hive |
get(HiveConf c)
Gets hive object for the current thread.
|
static Hive |
get(HiveConf c,
boolean needsRefresh)
get a connection to metastore.
|
WMFullResourcePlan |
getActiveResourcePlan() |
AggrStats |
getAggrColStatsFor(String dbName,
String tblName,
List<String> colNames,
List<String> partName) |
List<String> |
getAllDatabases()
Get all existing database names.
|
List<Function> |
getAllFunctions() |
List<Table> |
getAllMaterializedViewObjects(String dbName)
Get all materialized views for the specified database.
|
List<String> |
getAllMaterializedViews(String dbName)
Get all materialized view names for the specified database.
|
Set<Partition> |
getAllPartitionsOf(Table tbl)
Get all the partitions; unlike
getPartitions(Table) , does not include auth. |
List<WMResourcePlan> |
getAllResourcePlans() |
List<String> |
getAllRoleNames()
Get all existing role names.
|
List<Table> |
getAllTableObjects(String dbName)
Get all tables for the specified database.
|
List<String> |
getAllTables()
Get all table names for the current database.
|
List<String> |
getAllTables(String dbName)
Get all table names for the specified database.
|
List<org.apache.calcite.plan.RelOptMaterialization> |
getAllValidMaterializedViews(List<String> tablesUsed,
boolean forceMVContentsUpToDate)
Get the materialized views that have been enabled for rewriting from the
metastore.
|
List<SQLCheckConstraint> |
getCheckConstraintList(String dbName,
String tblName) |
CheckConstraint |
getCheckConstraints(String dbName,
String tblName) |
HiveConf |
getConf() |
Database |
getDatabase(String dbName)
Get the database by name.
|
Database |
getDatabase(String catName,
String dbName)
Get the database by name.
|
Database |
getDatabaseCurrent()
Get the Database object for current database
|
List<String> |
getDatabasesByPattern(String databasePattern)
Get all existing databases that match the given
pattern.
|
List<SQLDefaultConstraint> |
getDefaultConstraintList(String dbName,
String tblName) |
DefaultConstraint |
getDefaultConstraints(String dbName,
String tblName) |
String |
getDelegationToken(String owner,
String renewer) |
CheckConstraint |
getEnabledCheckConstraints(String dbName,
String tblName)
Get CHECK constraints associated with the table that are enabled
|
DefaultConstraint |
getEnabledDefaultConstraints(String dbName,
String tblName)
Get Default constraints associated with the table that are enabled
|
NotNullConstraint |
getEnabledNotNullConstraints(String dbName,
String tblName)
Get not null constraints associated with the table that are enabled/enforced.
|
static List<FieldSchema> |
getFieldsFromDeserializer(String name,
Deserializer serde) |
static List<FieldSchema> |
getFieldsFromDeserializerForMsStorage(Table tbl,
Deserializer deserializer) |
Iterable<Map.Entry<Long,ByteBuffer>> |
getFileMetadata(List<Long> fileIds) |
Iterable<Map.Entry<Long,MetadataPpdResult>> |
getFileMetadataByExpr(List<Long> fileIds,
ByteBuffer sarg,
boolean doGetFooters) |
List<SQLForeignKey> |
getForeignKeyList(String dbName,
String tblName) |
ForeignKeyInfo |
getForeignKeys(String dbName,
String tblName)
Get all foreign keys associated with the table.
|
Function |
getFunction(String dbName,
String funcName) |
List<String> |
getFunctions(String dbName,
String pattern) |
String |
getMetaConf(String propName) |
IMetaStoreClient |
getMSC() |
IMetaStoreClient |
getMSC(boolean allowEmbedded,
boolean forceCreate) |
List<SQLNotNullConstraint> |
getNotNullConstraintList(String dbName,
String tblName) |
NotNullConstraint |
getNotNullConstraints(String dbName,
String tblName)
Get all not null constraints associated with the table.
|
int |
getNumPartitionsByFilter(Table tbl,
String filter)
Get a number of Partitions by filter.
|
Partition |
getPartition(Table tbl,
Map<String,String> partSpec,
boolean forceCreate) |
Partition |
getPartition(Table tbl,
Map<String,String> partSpec,
boolean forceCreate,
String partPath,
boolean inheritTableSpecs)
Returns partition metadata
|
Map<String,List<ColumnStatisticsObj>> |
getPartitionColumnStatistics(String dbName,
String tableName,
List<String> partNames,
List<String> colNames) |
List<String> |
getPartitionNames(String tblName,
short max) |
List<String> |
getPartitionNames(String dbName,
String tblName,
Map<String,String> partSpec,
short max) |
List<String> |
getPartitionNames(String dbName,
String tblName,
short max) |
List<Partition> |
getPartitions(Table tbl)
get all the partitions that the table has
|
List<Partition> |
getPartitions(Table tbl,
Map<String,String> partialPartSpec)
get all the partitions of the table that matches the given partial
specification.
|
List<Partition> |
getPartitions(Table tbl,
Map<String,String> partialPartSpec,
short limit)
get all the partitions of the table that matches the given partial
specification.
|
boolean |
getPartitionsByExpr(Table tbl,
ExprNodeGenericFuncDesc expr,
HiveConf conf,
List<Partition> result)
Get a list of Partitions by expr.
|
List<Partition> |
getPartitionsByFilter(Table tbl,
String filter)
Get a list of Partitions by filter.
|
List<Partition> |
getPartitionsByNames(Table tbl,
List<String> partNames)
Get all partitions of the table that matches the list of given partition names.
|
List<Partition> |
getPartitionsByNames(Table tbl,
Map<String,String> partialPartSpec)
get all the partitions of the table that matches the given partial
specification.
|
List<SQLPrimaryKey> |
getPrimaryKeyList(String dbName,
String tblName) |
PrimaryKeyInfo |
getPrimaryKeys(String dbName,
String tblName)
Get all primary key columns associated with the table.
|
ForeignKeyInfo |
getReliableForeignKeys(String dbName,
String tblName)
Get foreign keys associated with the table that are available for optimization.
|
NotNullConstraint |
getReliableNotNullConstraints(String dbName,
String tblName)
Get not null constraints associated with the table that are available for optimization.
|
PrimaryKeyInfo |
getReliablePrimaryKeys(String dbName,
String tblName)
Get primary key columns associated with the table that are available for optimization.
|
UniqueConstraint |
getReliableUniqueConstraints(String dbName,
String tblName)
Get unique constraints associated with the table that are available for optimization.
|
WMFullResourcePlan |
getResourcePlan(String rpName) |
List<RolePrincipalGrant> |
getRoleGrantInfoForPrincipal(String principalName,
PrincipalType principalType) |
StorageHandlerInfo |
getStorageHandlerInfo(Table table) |
SynchronizedMetaStoreClient |
getSynchronizedMSC() |
Table |
getTable(String tableName)
Returns metadata for the table named tableName
|
Table |
getTable(String tableName,
boolean throwException)
Returns metadata for the table named tableName
|
Table |
getTable(String dbName,
String tableName)
Returns metadata of the table
|
Table |
getTable(String dbName,
String tableName,
boolean throwException)
Returns metadata of the table
|
List<ColumnStatisticsObj> |
getTableColumnStatistics(String dbName,
String tableName,
List<String> colNames) |
List<String> |
getTablesByPattern(String tablePattern)
Returns all existing tables from default database which match the given
pattern.
|
List<String> |
getTablesByPattern(String dbName,
String tablePattern)
Returns all existing tables from the specified database which match the given
pattern.
|
List<String> |
getTablesByType(String dbName,
String pattern,
TableType type)
Returns all existing tables of a type (VIRTUAL_VIEW|EXTERNAL_TABLE|MANAGED_TABLE) from the specified
database which match the given pattern.
|
List<String> |
getTablesForDb(String database,
String tablePattern)
Returns all existing tables from the given database which match the given
pattern.
|
List<SQLUniqueConstraint> |
getUniqueConstraintList(String dbName,
String tblName) |
UniqueConstraint |
getUniqueConstraints(String dbName,
String tblName)
Get all unique constraints associated with the table.
|
List<org.apache.calcite.plan.RelOptMaterialization> |
getValidMaterializedView(String dbName,
String materializedViewName,
List<String> tablesUsed,
boolean forceMVContentsUpToDate) |
static Hive |
getWithFastCheck(HiveConf c)
Same as
get(HiveConf) , except that it checks only the object identity of existing
MS client, assuming the relevant settings would be unchanged within the same conf object. |
static Hive |
getWithFastCheck(HiveConf c,
boolean doRegisterAllFns)
Same as
get(HiveConf) , except that it checks only the object identity of existing
MS client, assuming the relevant settings would be unchanged within the same conf object. |
static Hive |
getWithoutRegisterFns(HiveConf c)
Same as
get(HiveConf) , except that it does not register all functions. |
boolean |
grantPrivileges(PrivilegeBag privileges) |
boolean |
grantRole(String roleName,
String userName,
PrincipalType principalType,
String grantor,
PrincipalType grantorType,
boolean grantOption) |
static boolean |
isHadoop1() |
static void |
listNewFilesRecursively(org.apache.hadoop.fs.FileSystem destFs,
org.apache.hadoop.fs.Path dest,
List<org.apache.hadoop.fs.Path> newFiles) |
List<Role> |
listRoles(String userName,
PrincipalType principalType) |
Map<Map<String,String>,Partition> |
loadDynamicPartitions(org.apache.hadoop.fs.Path loadPath,
String tableName,
Map<String,String> partSpec,
LoadTableDesc.LoadFileType loadFileType,
int numDP,
int numLB,
boolean isAcid,
long writeId,
int stmtId,
boolean hasFollowingStatsTask,
AcidUtils.Operation operation,
boolean isInsertOverwrite)
Given a source directory name of the load path, load all dynamically generated partitions
into the specified table and return a list of strings that represent the dynamic partition
paths.
|
Partition |
loadPartition(org.apache.hadoop.fs.Path loadPath,
Table tbl,
Map<String,String> partSpec,
LoadTableDesc.LoadFileType loadFileType,
boolean inheritTableSpecs,
boolean isSkewedStoreAsSubdir,
boolean isSrcLocal,
boolean isAcidIUDoperation,
boolean hasFollowingStatsTask,
Long writeId,
int stmtId,
boolean isInsertOverwrite)
Load a directory into a Hive Table Partition - Alters existing content of
the partition with the contents of loadPath.
|
void |
loadTable(org.apache.hadoop.fs.Path loadPath,
String tableName,
LoadTableDesc.LoadFileType loadFileType,
boolean isSrcLocal,
boolean isSkewedStoreAsSubdir,
boolean isAcidIUDoperation,
boolean hasFollowingStatsTask,
Long writeId,
int stmtId,
boolean isInsertOverwrite)
Load a directory into a Hive Table.
|
static void |
moveAcidFiles(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.FileStatus[] stats,
org.apache.hadoop.fs.Path dst,
List<org.apache.hadoop.fs.Path> newFiles) |
static boolean |
moveFile(HiveConf conf,
org.apache.hadoop.fs.Path srcf,
org.apache.hadoop.fs.Path destf,
boolean replace,
boolean isSrcLocal,
boolean isManaged) |
Table |
newTable(String tableName) |
void |
putFileMetadata(List<Long> fileIds,
List<ByteBuffer> metadata) |
void |
recycleDirToCmPath(org.apache.hadoop.fs.Path dataPath,
boolean isPurge)
Recycles the files recursively from the input path to the cmroot directory either by copying or moving it.
|
void |
reloadFunctions() |
void |
renamePartition(Table tbl,
Map<String,String> oldPartSpec,
Partition newPart)
Rename a old partition to new partition
|
protected void |
replaceFiles(org.apache.hadoop.fs.Path tablePath,
org.apache.hadoop.fs.Path srcf,
org.apache.hadoop.fs.Path destf,
org.apache.hadoop.fs.Path oldPath,
HiveConf conf,
boolean isSrcLocal,
boolean purge,
List<org.apache.hadoop.fs.Path> newFiles,
org.apache.hadoop.fs.PathFilter deletePathFilter,
boolean isNeedRecycle,
boolean isManaged)
Replaces files in the partition with new data set specified by srcf.
|
boolean |
revokePrivileges(PrivilegeBag privileges,
boolean grantOption) |
boolean |
revokeRole(String roleName,
String userName,
PrincipalType principalType,
boolean grantOption) |
static void |
set(Hive hive) |
void |
setMetaConf(String propName,
String propValue) |
boolean |
setPartitionColumnStatistics(SetPartitionsStatsRequest request) |
ShowCompactResponse |
showCompactions() |
List<HiveObjectPrivilege> |
showPrivilegeGrant(HiveObjectType objectType,
String principalName,
PrincipalType principalType,
String dbName,
String tableName,
List<String> partValues,
String columnName) |
GetOpenTxnsInfoResponse |
showTransactions() |
static boolean |
trashFiles(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.FileStatus[] statuses,
org.apache.hadoop.conf.Configuration conf,
boolean purge)
Trashes or deletes all files under a directory.
|
void |
truncateTable(String dbDotTableName,
Map<String,String> partSpec)
Truncates the table/partition as per specifications.
|
void |
updateCreationMetadata(String dbName,
String tableName,
CreationMetadata cm) |
void |
validatePartitionNameCharacters(List<String> partVals) |
WMValidateResourcePlanResponse |
validateResourcePlan(String rpName) |
public void reloadFunctions() throws HiveException
HiveException
public static Hive get(org.apache.hadoop.conf.Configuration c, Class<?> clazz) throws HiveException
HiveException
public static Hive get(HiveConf c) throws HiveException
c
- new Hive ConfigurationHiveException
public static Hive getWithFastCheck(HiveConf c) throws HiveException
get(HiveConf)
, except that it checks only the object identity of existing
MS client, assuming the relevant settings would be unchanged within the same conf object.HiveException
public static Hive getWithFastCheck(HiveConf c, boolean doRegisterAllFns) throws HiveException
get(HiveConf)
, except that it checks only the object identity of existing
MS client, assuming the relevant settings would be unchanged within the same conf object.HiveException
public static Hive getWithoutRegisterFns(HiveConf c) throws HiveException
get(HiveConf)
, except that it does not register all functions.HiveException
public static Hive get() throws HiveException
HiveException
public static Hive get(boolean doRegisterAllFns) throws HiveException
HiveException
public static Hive get(HiveConf c, boolean needsRefresh) throws HiveException
c
- new confneedsRefresh
- if true then creates a new oneHiveException
public static void set(Hive hive)
public static void closeCurrent()
public void createDatabase(Database db, boolean ifNotExist) throws AlreadyExistsException, HiveException
db
- ifNotExist
- if true, will ignore AlreadyExistsException exceptionAlreadyExistsException
HiveException
public void createDatabase(Database db) throws AlreadyExistsException, HiveException
db
- AlreadyExistsException
HiveException
public void dropDatabase(String name) throws HiveException, NoSuchObjectException
name
- NoSuchObjectException
HiveException
HiveMetaStoreClient.dropDatabase(java.lang.String)
public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb) throws HiveException, NoSuchObjectException
name
- deleteData
- ignoreUnknownDb
- if true, will ignore NoSuchObjectExceptionHiveException
NoSuchObjectException
public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade) throws HiveException, NoSuchObjectException
name
- deleteData
- ignoreUnknownDb
- if true, will ignore NoSuchObjectExceptioncascade
- if true, delete all tables on the DB if exists. Otherwise, the query
will fail if table still exists.HiveException
NoSuchObjectException
public void createTable(String tableName, List<String> columns, List<String> partCols, Class<? extends org.apache.hadoop.mapred.InputFormat> fileInputFormat, Class<?> fileOutputFormat) throws HiveException
tableName
- name of the tablecolumns
- list of fields of the tablepartCols
- partition keys of the tablefileInputFormat
- Class of the input format of the table data filefileOutputFormat
- Class of the output format of the table data fileHiveException
- thrown if the args are invalid or if the metadata or the data
directory couldn't be createdpublic void createTable(String tableName, List<String> columns, List<String> partCols, Class<? extends org.apache.hadoop.mapred.InputFormat> fileInputFormat, Class<?> fileOutputFormat, int bucketCount, List<String> bucketCols) throws HiveException
tableName
- name of the tablecolumns
- list of fields of the tablepartCols
- partition keys of the tablefileInputFormat
- Class of the input format of the table data filefileOutputFormat
- Class of the output format of the table data filebucketCount
- number of buckets that each partition (or the table itself) should
be divided intoHiveException
- thrown if the args are invalid or if the metadata or the data
directory couldn't be createdpublic void createTable(String tableName, List<String> columns, List<String> partCols, Class<? extends org.apache.hadoop.mapred.InputFormat> fileInputFormat, Class<?> fileOutputFormat, int bucketCount, List<String> bucketCols, Map<String,String> parameters) throws HiveException
tableName
- table namecolumns
- list of fields of the tablepartCols
- partition keys of the tablefileInputFormat
- Class of the input format of the table data filefileOutputFormat
- Class of the output format of the table data filebucketCount
- number of buckets that each partition (or the table itself) should be
divided intobucketCols
- Bucket columnsparameters
- Parameters for the tableHiveException
public void alterTable(Table newTbl, EnvironmentContext environmentContext) throws HiveException
HiveException
public void alterTable(String fullyQlfdTblName, Table newTbl, EnvironmentContext environmentContext) throws HiveException
fullyQlfdTblName
- name of the existing tablenewTbl
- new name of the table. could be the old nameInvalidOperationException
- if the changes in metadata is not acceptableorg.apache.thrift.TException
HiveException
public void alterTable(String fullyQlfdTblName, Table newTbl, boolean cascade, EnvironmentContext environmentContext) throws HiveException
HiveException
public void alterTable(String dbName, String tblName, Table newTbl, boolean cascade, EnvironmentContext environmentContext) throws HiveException
HiveException
public void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm) throws HiveException
HiveException
public void alterPartition(String tblName, Partition newPart, EnvironmentContext environmentContext) throws InvalidOperationException, HiveException
tblName
- name of the existing tablenewPart
- new partitionInvalidOperationException
- if the changes in metadata is not acceptableorg.apache.thrift.TException
HiveException
public void alterPartition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext) throws InvalidOperationException, HiveException
dbName
- name of the exiting table's databasetblName
- name of the existing tablenewPart
- new partitionInvalidOperationException
- if the changes in metadata is not acceptableorg.apache.thrift.TException
HiveException
public void alterPartitions(String tblName, List<Partition> newParts, EnvironmentContext environmentContext) throws InvalidOperationException, HiveException
tblName
- name of the existing tablenewParts
- new partitionsInvalidOperationException
- if the changes in metadata is not acceptableorg.apache.thrift.TException
HiveException
public void renamePartition(Table tbl, Map<String,String> oldPartSpec, Partition newPart) throws HiveException
tbl
- existing tableoldPartSpec
- spec of old partitionnewPart
- new partitionInvalidOperationException
- if the changes in metadata is not acceptableorg.apache.thrift.TException
HiveException
public void alterDatabase(String dbName, Database db) throws HiveException
HiveException
public void createTable(Table tbl) throws HiveException
tbl
- a table objectHiveException
public void createTable(Table tbl, boolean ifNotExists, List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys, List<SQLUniqueConstraint> uniqueConstraints, List<SQLNotNullConstraint> notNullConstraints, List<SQLDefaultConstraint> defaultConstraints, List<SQLCheckConstraint> checkConstraints) throws HiveException
tbl
- a table objectifNotExists
- if true, ignore AlreadyExistsExceptionprimaryKeys
- primary key columns associated with the tableforeignKeys
- foreign key columns associated with the tableuniqueConstraints
- UNIQUE constraints associated with the tablenotNullConstraints
- NOT NULL constraints associated with the tabledefaultConstraints
- DEFAULT constraints associated with the tablecheckConstraints
- CHECK constraints associated with the tableHiveException
public void createTable(Table tbl, boolean ifNotExists) throws HiveException
HiveException
public static List<FieldSchema> getFieldsFromDeserializerForMsStorage(Table tbl, Deserializer deserializer) throws SerDeException, MetaException
SerDeException
MetaException
public void dropTable(String tableName, boolean ifPurge) throws HiveException
tableName
- table to dropifPurge
- completely purge the table (skipping trash) while removing data from warehouseHiveException
- thrown if the drop failspublic void dropTable(String tableName) throws HiveException
tableName
- table to dropHiveException
- thrown if the drop failspublic void dropTable(String dbName, String tableName) throws HiveException
dbName
- database where the table livestableName
- table to dropHiveException
- thrown if the drop failspublic void dropTable(String dbName, String tableName, boolean deleteData, boolean ignoreUnknownTab) throws HiveException
dbName
- tableName
- deleteData
- deletes the underlying data along with metadataignoreUnknownTab
- an exception is thrown if this is false and the table doesn't existHiveException
public void dropTable(String dbName, String tableName, boolean deleteData, boolean ignoreUnknownTab, boolean ifPurge) throws HiveException
dbName
- tableName
- deleteData
- deletes the underlying data along with metadataignoreUnknownTab
- an exception is thrown if this is false and the table doesn't existifPurge
- completely purge the table skipping trash while removing data from warehouseHiveException
public void truncateTable(String dbDotTableName, Map<String,String> partSpec) throws HiveException
dbDotTableName
- name of the tableHiveException
public HiveConf getConf()
public Table getTable(String tableName) throws HiveException
tableName
- the name of the tableHiveException
- if there's an internal error or if the
table doesn't existpublic Table getTable(String tableName, boolean throwException) throws HiveException
tableName
- the name of the tablethrowException
- controls whether an exception is thrown or a returns a nullHiveException
- if there's an internal error or if the
table doesn't existpublic Table getTable(String dbName, String tableName) throws HiveException
dbName
- the name of the databasetableName
- the name of the tableHiveException
- if there's an internal error or if the table doesn't existpublic Table getTable(String dbName, String tableName, boolean throwException) throws HiveException
dbName
- the name of the databasetableName
- the name of the tablethrowException
- controls whether an exception is thrown or a returns a nullHiveException
public List<String> getAllTables() throws HiveException
HiveException
public List<String> getAllTables(String dbName) throws HiveException
dbName
- HiveException
public List<Table> getAllTableObjects(String dbName) throws HiveException
dbName
- HiveException
public List<String> getAllMaterializedViews(String dbName) throws HiveException
dbName
- HiveException
public List<Table> getAllMaterializedViewObjects(String dbName) throws HiveException
dbName
- HiveException
public List<String> getTablesByPattern(String tablePattern) throws HiveException
tablePattern
- java re patternHiveException
public List<String> getTablesByPattern(String dbName, String tablePattern) throws HiveException
dbName
- tablePattern
- HiveException
public List<String> getTablesForDb(String database, String tablePattern) throws HiveException
database
- the database nametablePattern
- java re patternHiveException
public List<String> getTablesByType(String dbName, String pattern, TableType type) throws HiveException
dbName
- Database name to find the tables in. if null, uses the current database in this session.pattern
- A pattern to match for the table names.If null, returns all names from this DB.type
- The type of tables to return. VIRTUAL_VIEWS for views. If null, returns all tables and views.HiveException
public List<org.apache.calcite.plan.RelOptMaterialization> getAllValidMaterializedViews(List<String> tablesUsed, boolean forceMVContentsUpToDate) throws HiveException
HiveException
public List<org.apache.calcite.plan.RelOptMaterialization> getValidMaterializedView(String dbName, String materializedViewName, List<String> tablesUsed, boolean forceMVContentsUpToDate) throws HiveException
HiveException
public List<String> getAllDatabases() throws HiveException
HiveException
public List<String> getDatabasesByPattern(String databasePattern) throws HiveException
databasePattern
- java re patternHiveException
public boolean grantPrivileges(PrivilegeBag privileges) throws HiveException
HiveException
public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) throws HiveException
privileges
- a bag of privilegesHiveException
public boolean databaseExists(String dbName) throws HiveException
dbName
- HiveException
public Database getDatabase(String dbName) throws HiveException
dbName
- the name of the database.HiveException
public Database getDatabase(String catName, String dbName) throws HiveException
catName
- catalog namedbName
- the name of the database.HiveException
public Database getDatabaseCurrent() throws HiveException
HiveException
public Partition loadPartition(org.apache.hadoop.fs.Path loadPath, Table tbl, Map<String,String> partSpec, LoadTableDesc.LoadFileType loadFileType, boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir, boolean isSrcLocal, boolean isAcidIUDoperation, boolean hasFollowingStatsTask, Long writeId, int stmtId, boolean isInsertOverwrite) throws HiveException
loadPath
- Directory containing files to load into Tabletbl
- name of table to be loaded.partSpec
- defines which partition needs to be loadedloadFileType
- if REPLACE_ALL - replace files in the table,
otherwise add files to table (KEEP_EXISTING, OVERWRITE_EXISTING)inheritTableSpecs
- if true, on [re]creating the partition, take the
location/inputformat/outputformat/serde details from table specisSrcLocal
- If the source directory is LOCALisAcidIUDoperation
- true if this is an ACID operation Insert/Update/Delete operationhasFollowingStatsTask
- true if there is a following task which updates the stats, so, this method need not update.writeId
- write ID allocated for the current load operationstmtId
- statement ID of the current load statementisInsertOverwrite
- HiveException
public Map<Map<String,String>,Partition> loadDynamicPartitions(org.apache.hadoop.fs.Path loadPath, String tableName, Map<String,String> partSpec, LoadTableDesc.LoadFileType loadFileType, int numDP, int numLB, boolean isAcid, long writeId, int stmtId, boolean hasFollowingStatsTask, AcidUtils.Operation operation, boolean isInsertOverwrite) throws HiveException
loadPath
- tableName
- partSpec
- loadFileType
- numDP
- number of dynamic partitionsisAcid
- true if this is an ACID operationwriteId
- writeId, can be 0 unless isAcid == trueHiveException
public void loadTable(org.apache.hadoop.fs.Path loadPath, String tableName, LoadTableDesc.LoadFileType loadFileType, boolean isSrcLocal, boolean isSkewedStoreAsSubdir, boolean isAcidIUDoperation, boolean hasFollowingStatsTask, Long writeId, int stmtId, boolean isInsertOverwrite) throws HiveException
loadPath
- Directory containing files to load into TabletableName
- name of table to be loaded.loadFileType
- if REPLACE_ALL - replace files in the table,
otherwise add files to table (KEEP_EXISTING, OVERWRITE_EXISTING)isSrcLocal
- If the source directory is LOCALisSkewedStoreAsSubdir
- if list bucketing enabledhasFollowingStatsTask
- if there is any following stats taskisAcidIUDoperation
- true if this is an ACID based Insert [overwrite]/update/deletewriteId
- write ID allocated for the current load operationstmtId
- statement ID of the current load statementHiveException
public Partition createPartition(Table tbl, Map<String,String> partSpec) throws HiveException
tbl
- table for which partition needs to be createdpartSpec
- partition keys and their valuesHiveException
- if table doesn't exist or partition already existspublic List<Partition> createPartitions(AddPartitionDesc addPartitionDesc) throws HiveException
HiveException
public static Partition convertAddSpecToMetaPartition(Table tbl, AddPartitionDesc.OnePartitionDesc addSpec, HiveConf conf) throws HiveException
HiveException
public Partition getPartition(Table tbl, Map<String,String> partSpec, boolean forceCreate) throws HiveException
HiveException
public Partition getPartition(Table tbl, Map<String,String> partSpec, boolean forceCreate, String partPath, boolean inheritTableSpecs) throws HiveException
tbl
- the partition's tablepartSpec
- partition keys and valuesforceCreate
- if this is true and partition doesn't exist then a partition is
createdpartPath
- the path where the partition data is locatedinheritTableSpecs
- whether to copy over the table specs for if/of/serdeHiveException
public boolean dropPartition(String tblName, List<String> part_vals, boolean deleteData) throws HiveException
HiveException
public boolean dropPartition(String db_name, String tbl_name, List<String> part_vals, boolean deleteData) throws HiveException
HiveException
public boolean dropPartition(String dbName, String tableName, List<String> partVals, PartitionDropOptions options) throws HiveException
HiveException
public List<Partition> dropPartitions(Table table, List<String> partDirNames, boolean deleteData, boolean ifExists) throws HiveException
table
- object for which partition is neededpartDirNames
- partition directories that need to be droppeddeleteData
- whether data should be deleted from file systemifExists
- check for existence before attempting deleteHiveException
public List<Partition> dropPartitions(String tblName, List<DropTableDesc.PartSpec> partSpecs, boolean deleteData, boolean ifExists) throws HiveException
HiveException
public List<Partition> dropPartitions(String dbName, String tblName, List<DropTableDesc.PartSpec> partSpecs, boolean deleteData, boolean ifExists) throws HiveException
HiveException
public List<Partition> dropPartitions(String tblName, List<DropTableDesc.PartSpec> partSpecs, PartitionDropOptions dropOptions) throws HiveException
HiveException
public List<Partition> dropPartitions(String dbName, String tblName, List<DropTableDesc.PartSpec> partSpecs, PartitionDropOptions dropOptions) throws HiveException
HiveException
public List<String> getPartitionNames(String tblName, short max) throws HiveException
HiveException
public List<String> getPartitionNames(String dbName, String tblName, short max) throws HiveException
HiveException
public List<String> getPartitionNames(String dbName, String tblName, Map<String,String> partSpec, short max) throws HiveException
HiveException
public List<Partition> getPartitions(Table tbl) throws HiveException
tbl
- object for which partition is neededHiveException
public Set<Partition> getAllPartitionsOf(Table tbl) throws HiveException
getPartitions(Table)
, does not include auth.tbl
- table for which partitions are neededHiveException
public List<Partition> getPartitions(Table tbl, Map<String,String> partialPartSpec, short limit) throws HiveException
tbl
- object for which partition is needed. Must be partitioned.limit
- number of partitions to returnHiveException
public List<Partition> getPartitions(Table tbl, Map<String,String> partialPartSpec) throws HiveException
tbl
- object for which partition is needed. Must be partitioned.HiveException
public List<Partition> getPartitionsByNames(Table tbl, Map<String,String> partialPartSpec) throws HiveException
tbl
- object for which partition is needed. Must be partitioned.partialPartSpec
- partial partition specification (some subpartitions can be empty).HiveException
public List<Partition> getPartitionsByNames(Table tbl, List<String> partNames) throws HiveException
tbl
- object for which partition is needed. Must be partitioned.partNames
- list of partition namesHiveException
public List<Partition> getPartitionsByFilter(Table tbl, String filter) throws HiveException, MetaException, NoSuchObjectException, org.apache.thrift.TException
tbl
- The table containing the partitions.filter
- A string represent partition predicates.HiveException
MetaException
NoSuchObjectException
org.apache.thrift.TException
public boolean getPartitionsByExpr(Table tbl, ExprNodeGenericFuncDesc expr, HiveConf conf, List<Partition> result) throws HiveException, org.apache.thrift.TException
tbl
- The table containing the partitions.expr
- A serialized expression for partition predicates.conf
- Hive config.result
- the resulting list of partitionsHiveException
org.apache.thrift.TException
public int getNumPartitionsByFilter(Table tbl, String filter) throws HiveException, MetaException, NoSuchObjectException, org.apache.thrift.TException
tbl
- The table containing the partitions.filter
- A string represent partition predicates.HiveException
MetaException
NoSuchObjectException
org.apache.thrift.TException
public void validatePartitionNameCharacters(List<String> partVals) throws HiveException
HiveException
public void createRole(String roleName, String ownerName) throws HiveException
HiveException
public void dropRole(String roleName) throws HiveException
HiveException
public List<String> getAllRoleNames() throws HiveException
HiveException
public List<RolePrincipalGrant> getRoleGrantInfoForPrincipal(String principalName, PrincipalType principalType) throws HiveException
HiveException
public boolean grantRole(String roleName, String userName, PrincipalType principalType, String grantor, PrincipalType grantorType, boolean grantOption) throws HiveException
HiveException
public boolean revokeRole(String roleName, String userName, PrincipalType principalType, boolean grantOption) throws HiveException
HiveException
public List<Role> listRoles(String userName, PrincipalType principalType) throws HiveException
HiveException
public PrincipalPrivilegeSet get_privilege_set(HiveObjectType objectType, String db_name, String table_name, List<String> part_values, String column_name, String user_name, List<String> group_names) throws HiveException
objectType
- hive object typedb_name
- database nametable_name
- table namepart_values
- partition valuescolumn_name
- column nameuser_name
- user namegroup_names
- group namesHiveException
public List<HiveObjectPrivilege> showPrivilegeGrant(HiveObjectType objectType, String principalName, PrincipalType principalType, String dbName, String tableName, List<String> partValues, String columnName) throws HiveException
objectType
- hive object typeprincipalName
- principalType
- dbName
- tableName
- partValues
- columnName
- HiveException
public static void clearDestForSubDirSrc(HiveConf conf, org.apache.hadoop.fs.Path dest, org.apache.hadoop.fs.Path src, boolean isSrcLocal) throws IOException
IOException
public static void listNewFilesRecursively(org.apache.hadoop.fs.FileSystem destFs, org.apache.hadoop.fs.Path dest, List<org.apache.hadoop.fs.Path> newFiles) throws HiveException
HiveException
public void recycleDirToCmPath(org.apache.hadoop.fs.Path dataPath, boolean isPurge) throws HiveException
dataPath
- Path of the data files to be recycled to cmrootisPurge
- When set to true files which needs to be recycled are not moved to TrashHiveException
public static boolean moveFile(HiveConf conf, org.apache.hadoop.fs.Path srcf, org.apache.hadoop.fs.Path destf, boolean replace, boolean isSrcLocal, boolean isManaged) throws HiveException
HiveException
protected static void copyFiles(HiveConf conf, org.apache.hadoop.fs.Path srcf, org.apache.hadoop.fs.Path destf, org.apache.hadoop.fs.FileSystem fs, boolean isSrcLocal, boolean isAcidIUD, boolean isOverwrite, List<org.apache.hadoop.fs.Path> newFiles, boolean isBucketed, boolean isFullAcidTable, boolean isManaged) throws HiveException
conf
- Configuration objectsrcf
- source directory, if bucketed should contain bucket filesdestf
- directory to move files intofs
- FilesystemisSrcLocal
- true if source is on local file systemisAcidIUD
- true if this is an ACID based Insert/Update/DeleteisOverwrite
- if true, then overwrite if destination file exist, else add a duplicate copynewFiles
- if this is non-null, a list of files that were created as a result of this
move will be returned.isManaged
- if table is managed.HiveException
public static void moveAcidFiles(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.FileStatus[] stats, org.apache.hadoop.fs.Path dst, List<org.apache.hadoop.fs.Path> newFiles) throws HiveException
HiveException
protected void replaceFiles(org.apache.hadoop.fs.Path tablePath, org.apache.hadoop.fs.Path srcf, org.apache.hadoop.fs.Path destf, org.apache.hadoop.fs.Path oldPath, HiveConf conf, boolean isSrcLocal, boolean purge, List<org.apache.hadoop.fs.Path> newFiles, org.apache.hadoop.fs.PathFilter deletePathFilter, boolean isNeedRecycle, boolean isManaged) throws HiveException
tablePath
- path of the table. Used to identify permission inheritance.srcf
- Source directory to be renamed to tmppath. It should be a
leaf directory where the final data files reside. However it
could potentially contain subdirectories as well.destf
- The directory where the final data needs to gooldPath
- The directory where the old data location, need to be cleaned up. Most of time, will be the same
as destf, unless its across FileSystem boundaries.purge
- When set to true files which needs to be deleted are not moved to TrashisSrcLocal
- If the source directory is LOCALnewFiles
- Output the list of new files replaced in the destination pathisManaged
- If the table is managed.HiveException
public static boolean trashFiles(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.FileStatus[] statuses, org.apache.hadoop.conf.Configuration conf, boolean purge) throws IOException
fs
- FileSystem to usestatuses
- fileStatuses of files to be deletedconf
- hive configurationIOException
public static boolean isHadoop1()
public List<Partition> exchangeTablePartitions(Map<String,String> partitionSpecs, String sourceDb, String sourceTable, String destDb, String destinationTableName) throws HiveException
HiveException
@InterfaceAudience.LimitedPrivate(value="Hive") @InterfaceStability.Unstable public SynchronizedMetaStoreClient getSynchronizedMSC() throws MetaException
MetaException
@InterfaceAudience.LimitedPrivate(value="Hive") @InterfaceStability.Unstable public IMetaStoreClient getMSC() throws MetaException
MetaException
@InterfaceAudience.LimitedPrivate(value="Hive") @InterfaceStability.Unstable public IMetaStoreClient getMSC(boolean allowEmbedded, boolean forceCreate) throws MetaException
MetaException
public static List<FieldSchema> getFieldsFromDeserializer(String name, Deserializer serde) throws HiveException
HiveException
public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws HiveException
HiveException
public List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, String tableName, List<String> colNames) throws HiveException
HiveException
public Map<String,List<ColumnStatisticsObj>> getPartitionColumnStatistics(String dbName, String tableName, List<String> partNames, List<String> colNames) throws HiveException
HiveException
public AggrStats getAggrColStatsFor(String dbName, String tblName, List<String> colNames, List<String> partName)
public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws HiveException
HiveException
public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, String colName) throws HiveException
HiveException
public Table newTable(String tableName) throws HiveException
HiveException
public String getDelegationToken(String owner, String renewer) throws HiveException
HiveException
public void cancelDelegationToken(String tokenStrForm) throws HiveException
HiveException
@Deprecated public void compact(String dbname, String tableName, String partName, String compactType, Map<String,String> tblproperties) throws HiveException
compact2(String, String, String, String, Map)
HiveException
public CompactionResponse compact2(String dbname, String tableName, String partName, String compactType, Map<String,String> tblproperties) throws HiveException
dbname
- name of the database, if null default will be used.tableName
- name of the table, cannot be nullpartName
- name of the partition, if null table will be compacted (valid only for
non-partitioned tables).compactType
- major or minortblproperties
- the list of tblproperties to overwrite for this compactionHiveException
public ShowCompactResponse showCompactions() throws HiveException
HiveException
public GetOpenTxnsInfoResponse showTransactions() throws HiveException
HiveException
public void abortTransactions(List<Long> txnids) throws HiveException
HiveException
public void createFunction(Function func) throws HiveException
HiveException
public void alterFunction(String dbName, String funcName, Function newFunction) throws HiveException
HiveException
public void dropFunction(String dbName, String funcName) throws HiveException
HiveException
public Function getFunction(String dbName, String funcName) throws HiveException
HiveException
public List<Function> getAllFunctions() throws HiveException
HiveException
public List<String> getFunctions(String dbName, String pattern) throws HiveException
HiveException
public void setMetaConf(String propName, String propValue) throws HiveException
HiveException
public String getMetaConf(String propName) throws HiveException
HiveException
public void clearMetaCallTiming()
public com.google.common.collect.ImmutableMap<String,Long> dumpAndClearMetaCallTiming(String phase)
public Iterable<Map.Entry<Long,ByteBuffer>> getFileMetadata(List<Long> fileIds) throws HiveException
HiveException
public Iterable<Map.Entry<Long,MetadataPpdResult>> getFileMetadataByExpr(List<Long> fileIds, ByteBuffer sarg, boolean doGetFooters) throws HiveException
HiveException
public void clearFileMetadata(List<Long> fileIds) throws HiveException
HiveException
public void putFileMetadata(List<Long> fileIds, List<ByteBuffer> metadata) throws HiveException
HiveException
public void cacheFileMetadata(String dbName, String tableName, String partName, boolean allParts) throws HiveException
HiveException
public void dropConstraint(String dbName, String tableName, String constraintName) throws HiveException, NoSuchObjectException
HiveException
NoSuchObjectException
public List<SQLPrimaryKey> getPrimaryKeyList(String dbName, String tblName) throws HiveException, NoSuchObjectException
HiveException
NoSuchObjectException
public List<SQLForeignKey> getForeignKeyList(String dbName, String tblName) throws HiveException, NoSuchObjectException
HiveException
NoSuchObjectException
public List<SQLUniqueConstraint> getUniqueConstraintList(String dbName, String tblName) throws HiveException, NoSuchObjectException
HiveException
NoSuchObjectException
public List<SQLNotNullConstraint> getNotNullConstraintList(String dbName, String tblName) throws HiveException, NoSuchObjectException
HiveException
NoSuchObjectException
public List<SQLDefaultConstraint> getDefaultConstraintList(String dbName, String tblName) throws HiveException, NoSuchObjectException
HiveException
NoSuchObjectException
public List<SQLCheckConstraint> getCheckConstraintList(String dbName, String tblName) throws HiveException, NoSuchObjectException
HiveException
NoSuchObjectException
public PrimaryKeyInfo getPrimaryKeys(String dbName, String tblName) throws HiveException
dbName
- Database NametblName
- Table NameHiveException
public PrimaryKeyInfo getReliablePrimaryKeys(String dbName, String tblName) throws HiveException
dbName
- Database NametblName
- Table NameHiveException
public ForeignKeyInfo getForeignKeys(String dbName, String tblName) throws HiveException
dbName
- Database NametblName
- Table NameHiveException
public ForeignKeyInfo getReliableForeignKeys(String dbName, String tblName) throws HiveException
dbName
- Database NametblName
- Table NameHiveException
public UniqueConstraint getUniqueConstraints(String dbName, String tblName) throws HiveException
dbName
- Database NametblName
- Table NameHiveException
public UniqueConstraint getReliableUniqueConstraints(String dbName, String tblName) throws HiveException
dbName
- Database NametblName
- Table NameHiveException
public NotNullConstraint getNotNullConstraints(String dbName, String tblName) throws HiveException
dbName
- Database NametblName
- Table NameHiveException
public NotNullConstraint getReliableNotNullConstraints(String dbName, String tblName) throws HiveException
dbName
- Database NametblName
- Table NameHiveException
public NotNullConstraint getEnabledNotNullConstraints(String dbName, String tblName) throws HiveException
dbName
- Database NametblName
- Table NameHiveException
public CheckConstraint getEnabledCheckConstraints(String dbName, String tblName) throws HiveException
dbName
- Database NametblName
- Table NameHiveException
public DefaultConstraint getEnabledDefaultConstraints(String dbName, String tblName) throws HiveException
dbName
- Database NametblName
- Table NameHiveException
public DefaultConstraint getDefaultConstraints(String dbName, String tblName) throws HiveException
HiveException
public CheckConstraint getCheckConstraints(String dbName, String tblName) throws HiveException
HiveException
public void addPrimaryKey(List<SQLPrimaryKey> primaryKeyCols) throws HiveException, NoSuchObjectException
HiveException
NoSuchObjectException
public void addForeignKey(List<SQLForeignKey> foreignKeyCols) throws HiveException, NoSuchObjectException
HiveException
NoSuchObjectException
public void addUniqueConstraint(List<SQLUniqueConstraint> uniqueConstraintCols) throws HiveException, NoSuchObjectException
HiveException
NoSuchObjectException
public void addNotNullConstraint(List<SQLNotNullConstraint> notNullConstraintCols) throws HiveException, NoSuchObjectException
HiveException
NoSuchObjectException
public void addDefaultConstraint(List<SQLDefaultConstraint> defaultConstraints) throws HiveException, NoSuchObjectException
HiveException
NoSuchObjectException
public void addCheckConstraint(List<SQLCheckConstraint> checkConstraints) throws HiveException, NoSuchObjectException
HiveException
NoSuchObjectException
public void createResourcePlan(WMResourcePlan resourcePlan, String copyFromName) throws HiveException
HiveException
public WMFullResourcePlan getResourcePlan(String rpName) throws HiveException
HiveException
public List<WMResourcePlan> getAllResourcePlans() throws HiveException
HiveException
public void dropResourcePlan(String rpName) throws HiveException
HiveException
public WMFullResourcePlan alterResourcePlan(String rpName, WMNullableResourcePlan resourcePlan, boolean canActivateDisabled, boolean isForceDeactivate, boolean isReplace) throws HiveException
HiveException
public WMFullResourcePlan getActiveResourcePlan() throws HiveException
HiveException
public WMValidateResourcePlanResponse validateResourcePlan(String rpName) throws HiveException
HiveException
public void createWMTrigger(WMTrigger trigger) throws HiveException
HiveException
public void alterWMTrigger(WMTrigger trigger) throws HiveException
HiveException
public void dropWMTrigger(String rpName, String triggerName) throws HiveException
HiveException
public void createWMPool(WMPool pool) throws HiveException
HiveException
public void alterWMPool(WMNullablePool pool, String poolPath) throws HiveException
HiveException
public void dropWMPool(String resourcePlanName, String poolPath) throws HiveException
HiveException
public void createOrUpdateWMMapping(WMMapping mapping, boolean isUpdate) throws HiveException
HiveException
public void dropWMMapping(WMMapping mapping) throws HiveException
HiveException
public void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerName, String poolPath, boolean shouldDrop) throws HiveException
HiveException
@Nullable public StorageHandlerInfo getStorageHandlerInfo(Table table) throws HiveException
HiveException
Copyright © 2022 The Apache Software Foundation. All rights reserved.