diff --git a/iceberg/iceberg-catalog/pom.xml b/iceberg/iceberg-catalog/pom.xml index 50058ddfb179..643acc49832d 100644 --- a/iceberg/iceberg-catalog/pom.xml +++ b/iceberg/iceberg-catalog/pom.xml @@ -114,5 +114,17 @@ + + org.apache.httpcomponents.client5 + httpclient5 + + + org.apache.httpcomponents.core5 + httpcore5 + + + org.apache.httpcomponents.core5 + httpcore5-h2 + diff --git a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/CatalogUtils.java b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/CatalogUtils.java new file mode 100644 index 000000000000..f2fbd5401798 --- /dev/null +++ b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/CatalogUtils.java @@ -0,0 +1,226 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.hive; + +import java.util.Map; +import java.util.Optional; +import java.util.Properties; +import java.util.Set; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.iceberg.BaseMetastoreTableOperations; +import org.apache.iceberg.CatalogProperties; +import org.apache.iceberg.CatalogUtil; +import org.apache.iceberg.catalog.TableIdentifier; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet; +import org.apache.iceberg.relocated.com.google.common.collect.Maps; + +public class CatalogUtils { + public static final String NAME = "name"; + public static final String LOCATION = "location"; + public static final String CATALOG_NAME = "iceberg.catalog"; + public static final String CATALOG_CONFIG_PREFIX = "iceberg.catalog."; + public static final String CATALOG_WAREHOUSE_TEMPLATE = "iceberg.catalog.%s.warehouse"; + public static final String CATALOG_IMPL_TEMPLATE = "iceberg.catalog.%s.catalog-impl"; + public static final String CATALOG_DEFAULT_CONFIG_PREFIX = "iceberg.catalog-default."; + public static final String ICEBERG_HADOOP_TABLE_NAME = "location_based_table"; + public static final String ICEBERG_DEFAULT_CATALOG_NAME = "default_iceberg"; + public static final String NO_CATALOG_TYPE = "no catalog"; + public static final Set PROPERTIES_TO_REMOVE = ImmutableSet.of( + // We don't want to push down the metadata location props to Iceberg from HMS, + // since the snapshot pointer in HMS would always be one step ahead + BaseMetastoreTableOperations.METADATA_LOCATION_PROP, + BaseMetastoreTableOperations.PREVIOUS_METADATA_LOCATION_PROP); + + private CatalogUtils() { + + } + + /** + * Calculates the properties we would like to send to the catalog. + * + * @param hmsTable Table for which we are calculating the properties + * @return The properties we can provide for Iceberg functions + */ + public static Properties getCatalogProperties(org.apache.hadoop.hive.metastore.api.Table hmsTable) { + Properties properties = new Properties(); + properties.putAll(toIcebergProperties(hmsTable.getParameters())); + + if (properties.get(LOCATION) == null && hmsTable.getSd() != null && + hmsTable.getSd().getLocation() != null) { + properties.put(LOCATION, hmsTable.getSd().getLocation()); + } + + if (properties.get(NAME) == null) { + properties.put(NAME, TableIdentifier.of(hmsTable.getDbName(), + hmsTable.getTableName()).toString()); + } + + SerDeInfo serdeInfo = hmsTable.getSd().getSerdeInfo(); + if (serdeInfo != null) { + properties.putAll(toIcebergProperties(serdeInfo.getParameters())); + } + + // Remove HMS table parameters we don't want to propagate to Iceberg + PROPERTIES_TO_REMOVE.forEach(properties::remove); + + return properties; + } + + private static Properties toIcebergProperties(Map parameters) { + Properties properties = new Properties(); + parameters.entrySet().stream() + .filter(e -> e.getKey() != null && e.getValue() != null) + .forEach(e -> { + String icebergKey = HMSTablePropertyHelper.translateToIcebergProp(e.getKey()); + properties.put(icebergKey, e.getValue()); + }); + return properties; + } + + /** + * Collect all the catalog specific configuration from the global hive configuration. + * @param conf a Hadoop configuration + * @param catalogName name of the catalog + * @return complete map of catalog properties + */ + public static Map getCatalogProperties(Configuration conf, String catalogName) { + Map catalogProperties = Maps.newHashMap(); + String keyPrefix = CATALOG_CONFIG_PREFIX + catalogName; + conf.forEach(config -> { + if (config.getKey().startsWith(CatalogUtils.CATALOG_DEFAULT_CONFIG_PREFIX)) { + catalogProperties.putIfAbsent( + config.getKey().substring(CatalogUtils.CATALOG_DEFAULT_CONFIG_PREFIX.length()), + config.getValue()); + } else if (config.getKey().startsWith(keyPrefix)) { + catalogProperties.put( + config.getKey().substring(keyPrefix.length() + 1), + config.getValue()); + } + }); + + return catalogProperties; + } + + public static String getCatalogName(Configuration conf) { + return MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CATALOG_DEFAULT); + } + + public static String getCatalogType(Configuration conf) { + return getCatalogType(conf, CatalogUtils.getCatalogName(conf)); + } + + public static boolean isHadoopTable(Configuration conf, Properties catalogProperties) { + String catalogName = catalogProperties.getProperty(CATALOG_NAME); + return ICEBERG_HADOOP_TABLE_NAME.equals(catalogName) || hadoopCatalog(conf, catalogProperties); + } + + public static boolean hadoopCatalog(Configuration conf, Properties props) { + return assertCatalogType(conf, props, CatalogUtil.ICEBERG_CATALOG_TYPE_HADOOP, CatalogUtil.ICEBERG_CATALOG_HADOOP); + } + + /** + * Get Hadoop config key of a catalog property based on catalog name + * @param catalogName catalog name + * @param catalogProperty catalog property, can be any custom property, + * a commonly used list of properties can be found + * at {@link org.apache.iceberg.CatalogProperties} + * @return Hadoop config key of a catalog property for the catalog name + */ + public static String catalogPropertyConfigKey(String catalogName, String catalogProperty) { + return String.format("%s%s.%s", CATALOG_CONFIG_PREFIX, catalogName, catalogProperty); + } + + /** + * Return the catalog type based on the catalog name. + *

+ * See Catalogs documentation for catalog type resolution strategy. + * + * @param conf global hive configuration + * @param catalogName name of the catalog + * @return type of the catalog, can be null + */ + public static String getCatalogType(Configuration conf, String catalogName) { + if (!StringUtils.isEmpty(catalogName)) { + String catalogType = conf.get(catalogPropertyConfigKey( + catalogName, CatalogUtil.ICEBERG_CATALOG_TYPE)); + if (catalogName.equals(ICEBERG_HADOOP_TABLE_NAME)) { + return NO_CATALOG_TYPE; + } else { + return catalogType; + } + } else { + String catalogType = conf.get(CatalogUtil.ICEBERG_CATALOG_TYPE); + if (catalogType != null && catalogType.equals(LOCATION)) { + return NO_CATALOG_TYPE; + } else { + return catalogType; + } + } + } + + public static String getCatalogImpl(Configuration conf, String catalogName) { + return Optional.ofNullable(catalogName) + .filter(StringUtils::isNotEmpty) + .map(name -> String.format(CatalogUtils.CATALOG_IMPL_TEMPLATE, name)) + .map(conf::get) + .orElse(null); + } + + public static boolean assertCatalogType(Configuration conf, Properties props, String expectedType, + String expectedImpl) { + String catalogName = props.getProperty(CATALOG_NAME); + String catalogType = Optional.ofNullable(CatalogUtils.getCatalogType(conf, catalogName)) + .orElseGet(() -> CatalogUtils.getCatalogType(conf, ICEBERG_DEFAULT_CATALOG_NAME)); + + if (catalogType != null) { + return expectedType.equalsIgnoreCase(catalogType); + } + + String actualImpl = CatalogUtils.getCatalogProperties(conf, catalogName).get(CatalogProperties.CATALOG_IMPL); + + // Return true immediately if the strings are equal (this also handles both being null). + if (StringUtils.equals(expectedImpl, actualImpl)) { + return true; + } + + // If they are not equal, but one of them is null, they can't be subtypes. + if (expectedImpl == null || actualImpl == null) { + return false; + } + + // Now that we know both are non-null and not equal, check the class hierarchy. + try { + return Class.forName(expectedImpl).isAssignableFrom(Class.forName(actualImpl)); + } catch (ClassNotFoundException e) { + throw new RuntimeException(String.format("Error checking if catalog %s is subtype of %s", + catalogName, expectedImpl), e); + } + } +} diff --git a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HMSTablePropertyHelper.java b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HMSTablePropertyHelper.java index 2eaaaee1272e..45253e1dd8d1 100644 --- a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HMSTablePropertyHelper.java +++ b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HMSTablePropertyHelper.java @@ -22,17 +22,20 @@ import java.util.Locale; import java.util.Map; import java.util.Optional; +import java.util.Properties; import java.util.Set; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hive.iceberg.com.fasterxml.jackson.core.JsonProcessingException; import org.apache.iceberg.BaseMetastoreTableOperations; +import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.PartitionSpecParser; import org.apache.iceberg.Schema; import org.apache.iceberg.SchemaParser; import org.apache.iceberg.Snapshot; import org.apache.iceberg.SnapshotSummary; +import org.apache.iceberg.SortOrder; import org.apache.iceberg.SortOrderParser; import org.apache.iceberg.TableMetadata; import org.apache.iceberg.TableProperties; @@ -42,6 +45,7 @@ import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; import org.apache.iceberg.relocated.com.google.common.collect.Maps; import org.apache.iceberg.util.JsonUtil; +import org.apache.parquet.Strings; import org.apache.parquet.hadoop.ParquetOutputFormat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -51,6 +55,7 @@ public class HMSTablePropertyHelper { private static final Logger LOG = LoggerFactory.getLogger(HMSTablePropertyHelper.class); public static final String HIVE_ICEBERG_STORAGE_HANDLER = "org.apache.iceberg.mr.hive.HiveIcebergStorageHandler"; + public static final String PARTITION_SPEC = "iceberg.mr.table.partition.spec"; private static final BiMap ICEBERG_TO_HMS_TRANSLATION = ImmutableBiMap.of( // gc.enabled in Iceberg and external.table.purge in Hive are meant to do the same things @@ -63,7 +68,7 @@ private HMSTablePropertyHelper() { /** * Provides key translation where necessary between Iceberg and HMS props. This translation is needed because some - * properties control the same behaviour but are named differently in Iceberg and Hive. Therefore changes to these + * properties control the same behaviour but are named differently in Iceberg and Hive. Therefore, changes to these * property pairs should be synchronized. * * Example: Deleting data files upon DROP TABLE is enabled using gc.enabled=true in Iceberg and @@ -131,6 +136,12 @@ public static void updateHmsTableForIcebergTable( tbl.setParameters(parameters); } + public static SortOrder getSortOrder(Properties props, Schema schema) { + String sortOrderJsonString = props.getProperty(TableProperties.DEFAULT_SORT_ORDER); + return Strings.isNullOrEmpty(sortOrderJsonString) ? SortOrder.unsorted() : SortOrderParser.fromJson(schema, + sortOrderJsonString); + } + private static void setCommonParameters( String newMetadataLocation, String uuid, @@ -143,8 +154,9 @@ private static void setCommonParameters( if (uuid != null) { parameters.put(TableProperties.UUID, uuid); } - - obsoleteProps.forEach(parameters::remove); + if (obsoleteProps != null) { + obsoleteProps.forEach(parameters::remove); + } parameters.put(BaseMetastoreTableOperations.TABLE_TYPE_PROP, tableType); parameters.put(BaseMetastoreTableOperations.METADATA_LOCATION_PROP, newMetadataLocation); @@ -158,7 +170,7 @@ private static void setCommonParameters( @VisibleForTesting static void setStorageHandler(Map parameters, boolean hiveEngineEnabled) { - // If needed set the 'storage_handler' property to enable query from Hive + // If needed, set the 'storage_handler' property to enable query from Hive if (hiveEngineEnabled) { parameters.put(hive_metastoreConstants.META_TABLE_STORAGE, HIVE_ICEBERG_STORAGE_HANDLER); } else { @@ -209,6 +221,16 @@ static void setPartitionSpec(TableMetadata metadata, Map paramet } } + public static PartitionSpec getPartitionSpec(Map props, Schema schema) { + String specJson = props.getOrDefault( + PARTITION_SPEC, + props.get(TableProperties.DEFAULT_PARTITION_SPEC) + ); + return Optional.ofNullable(specJson) + .map(spec -> PartitionSpecParser.fromJson(schema, spec)) + .orElseGet(PartitionSpec::unpartitioned); + } + @VisibleForTesting static void setSortOrder(TableMetadata metadata, Map parameters, long maxHiveTablePropertySize) { parameters.remove(TableProperties.DEFAULT_SORT_ORDER); diff --git a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/MetastoreUtil.java b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/MetastoreUtil.java index dd9fc44a5c85..1744b9b135cb 100644 --- a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/MetastoreUtil.java +++ b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/MetastoreUtil.java @@ -19,17 +19,39 @@ package org.apache.iceberg.hive; +import java.util.List; import java.util.Map; +import java.util.stream.Collectors; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.iceberg.BaseTable; +import org.apache.iceberg.CatalogUtil; +import org.apache.iceberg.Schema; +import org.apache.iceberg.TableMetadata; import org.apache.iceberg.common.DynMethods; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; +import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.apache.iceberg.relocated.com.google.common.collect.Maps; import org.apache.thrift.TException; public class MetastoreUtil { + + public static final String DEFAULT_INPUT_FORMAT_CLASS = "org.apache.iceberg.mr.hive.HiveIcebergInputFormat"; + public static final String DEFAULT_OUTPUT_FORMAT_CLASS = "org.apache.iceberg.mr.hive.HiveIcebergOutputFormat"; + public static final String DEFAULT_SERDE_CLASS = "org.apache.iceberg.mr.hive.HiveIcebergSerDe"; + private static final DynMethods.UnboundMethod ALTER_TABLE = DynMethods.builder("alter_table") .impl( @@ -87,4 +109,60 @@ public static void alterTable( } } } + + public static List getPartitionKeys(org.apache.iceberg.Table table, int specId) { + Schema schema = table.specs().get(specId).schema(); + List hiveSchema = HiveSchemaUtil.convert(schema); + Map colNameToColType = hiveSchema.stream() + .collect(Collectors.toMap(FieldSchema::getName, FieldSchema::getType)); + return table.specs().get(specId).fields().stream() + .map(partField -> new FieldSchema( + schema.findColumnName(partField.sourceId()), + colNameToColType.get(schema.findColumnName(partField.sourceId())), + String.format("Transform: %s", partField.transform().toString())) + ) + .toList(); + } + + public static Table toHiveTable(org.apache.iceberg.Table table, Configuration conf) { + var result = new Table(); + TableName tableName = TableName.fromString(table.name(), MetaStoreUtils.getDefaultCatalog(conf), + Warehouse.DEFAULT_DATABASE_NAME); + result.setCatName(tableName.getCat()); + result.setDbName(tableName.getDb()); + result.setTableName(tableName.getTable()); + result.setTableType(TableType.EXTERNAL_TABLE.toString()); + result.setPartitionKeys(getPartitionKeys(table, table.spec().specId())); + TableMetadata metadata = ((BaseTable) table).operations().current(); + long maxHiveTablePropertySize = conf.getLong(HiveOperationsBase.HIVE_TABLE_PROPERTY_MAX_SIZE, + HiveOperationsBase.HIVE_TABLE_PROPERTY_MAX_SIZE_DEFAULT); + HMSTablePropertyHelper.updateHmsTableForIcebergTable(metadata.metadataFileLocation(), result, metadata, + null, true, maxHiveTablePropertySize, null); + String catalogType = CatalogUtils.getCatalogType(conf); + if (!StringUtils.isEmpty(catalogType) && !CatalogUtils.NO_CATALOG_TYPE.equals(catalogType)) { + result.getParameters().put(CatalogUtil.ICEBERG_CATALOG_TYPE, CatalogUtils.getCatalogType(conf)); + } + result.setSd(getHiveStorageDescriptor(table)); + return result; + } + + private static StorageDescriptor getHiveStorageDescriptor(org.apache.iceberg.Table table) { + var result = new StorageDescriptor(); + result.setCols(HiveSchemaUtil.convert(table.schema())); + result.setBucketCols(Lists.newArrayList()); + result.setNumBuckets(-1); + result.setSortCols(Lists.newArrayList()); + result.setInputFormat(DEFAULT_INPUT_FORMAT_CLASS); + result.setOutputFormat(DEFAULT_OUTPUT_FORMAT_CLASS); + result.setSerdeInfo(getHiveSerdeInfo()); + result.setLocation(table.location()); + result.setParameters(Maps.newHashMap()); + return result; + } + + private static SerDeInfo getHiveSerdeInfo() { + var result = new SerDeInfo("icebergSerde", DEFAULT_SERDE_CLASS, Maps.newHashMap()); + result.getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1"); // Default serialization format. + return result; + } } diff --git a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/client/HiveRESTCatalogClient.java b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/client/HiveRESTCatalogClient.java new file mode 100644 index 000000000000..c1ffaa4a666c --- /dev/null +++ b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/client/HiveRESTCatalogClient.java @@ -0,0 +1,231 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.hive.client; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Properties; +import java.util.regex.Pattern; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.CreateTableRequest; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.DropDatabaseRequest; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.GetTableRequest; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.BaseMetaStoreClient; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.iceberg.CatalogUtil; +import org.apache.iceberg.Schema; +import org.apache.iceberg.SortOrder; +import org.apache.iceberg.catalog.Namespace; +import org.apache.iceberg.catalog.TableIdentifier; +import org.apache.iceberg.exceptions.NoSuchTableException; +import org.apache.iceberg.hive.CatalogUtils; +import org.apache.iceberg.hive.HMSTablePropertyHelper; +import org.apache.iceberg.hive.HiveSchemaUtil; +import org.apache.iceberg.hive.MetastoreUtil; +import org.apache.iceberg.hive.RuntimeMetaException; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; +import org.apache.iceberg.relocated.com.google.common.collect.Lists; +import org.apache.iceberg.relocated.com.google.common.collect.Maps; +import org.apache.iceberg.rest.RESTCatalog; +import org.apache.thrift.TException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class HiveRESTCatalogClient extends BaseMetaStoreClient { + + public static final String NAMESPACE_SEPARATOR = "."; + public static final String DB_OWNER = "owner"; + public static final String DB_OWNER_TYPE = "ownerType"; + + private static final Logger LOG = LoggerFactory.getLogger(HiveRESTCatalogClient.class); + + private RESTCatalog restCatalog; + + public HiveRESTCatalogClient(Configuration conf, boolean allowEmbedded) { + this(conf); + } + + public HiveRESTCatalogClient(Configuration conf) { + super(conf); + reconnect(); + } + + @Override + public void reconnect() { + close(); + String catName = MetaStoreUtils.getDefaultCatalog(conf); + Map properties = CatalogUtils.getCatalogProperties(conf, CatalogUtils.getCatalogName(conf)); + restCatalog = (RESTCatalog) CatalogUtil.buildIcebergCatalog(catName, properties, null); + } + + @Override + public void close() { + try { + if (restCatalog != null) { + restCatalog.close(); + } + } catch (IOException e) { + throw new RuntimeMetaException(e.getCause(), "Failed to close existing REST catalog"); + } + } + + @Override + public List getDatabases(String catName, String dbPattern) { + validateCurrentCatalog(catName); + // Convert the Hive glob pattern (e.g., "db*") to a valid Java regex ("db.*"). + String regex = dbPattern.replace("*", ".*"); + Pattern pattern = Pattern.compile(regex); + + return restCatalog.listNamespaces(Namespace.empty()).stream() + .map(Namespace::toString) + .filter(pattern.asPredicate()) + .toList(); + } + + @Override + public List getAllDatabases(String catName) { + return getDatabases(catName, "*"); + } + + @Override + public List getTables(String catName, String dbName, String tablePattern) { + validateCurrentCatalog(catName); + + // Convert the Hive glob pattern to a Java regex. + String regex = tablePattern.replace("*", ".*"); + Pattern pattern = Pattern.compile(regex); + + // List tables from the specific database (namespace) and filter them. + return restCatalog.listTables(Namespace.of(dbName)).stream() + .map(TableIdentifier::name) + .filter(pattern.asPredicate()) + .toList(); + } + + @Override + public List getAllTables(String catName, String dbName) { + return getTables(catName, dbName, "*"); + } + + @Override + public void dropTable(Table table, boolean deleteData, boolean ignoreUnknownTab, boolean ifPurge) throws TException { + restCatalog.dropTable(TableIdentifier.of(table.getDbName(), table.getTableName())); + } + + private void validateCurrentCatalog(String catName) { + if (!restCatalog.name().equals(catName)) { + throw new IllegalArgumentException( + String.format("Catalog name '%s' does not match the current catalog '%s'", catName, restCatalog.name())); + } + } + + @Override + public boolean tableExists(String catName, String dbName, String tableName) { + validateCurrentCatalog(catName); + return restCatalog.tableExists(TableIdentifier.of(dbName, tableName)); + } + + @Override + public Database getDatabase(String catName, String dbName) throws NoSuchObjectException { + validateCurrentCatalog(catName); + + return restCatalog.listNamespaces(Namespace.empty()).stream() + .filter(namespace -> namespace.levels()[0].equals(dbName)) + .map(namespace -> { + Database database = new Database(); + database.setName(String.join(NAMESPACE_SEPARATOR, namespace.levels())); + Map namespaceMetadata = restCatalog.loadNamespaceMetadata(Namespace.of(dbName)); + database.setLocationUri(namespaceMetadata.get(CatalogUtils.LOCATION)); + database.setCatalogName(restCatalog.name()); + database.setOwnerName(namespaceMetadata.get(DB_OWNER)); + try { + database.setOwnerType(PrincipalType.valueOf(namespaceMetadata.get(DB_OWNER_TYPE))); + } catch (Exception e) { + LOG.warn("Can not set ownerType: {}", namespaceMetadata.get(DB_OWNER_TYPE), e); + } + return database; + }).findFirst().orElseThrow(() -> + new NoSuchObjectException("Database " + dbName + " not found")); + } + + @Override + public Table getTable(GetTableRequest tableRequest) throws TException { + validateCurrentCatalog(tableRequest.getCatName()); + org.apache.iceberg.Table icebergTable; + try { + icebergTable = restCatalog.loadTable(TableIdentifier.of(tableRequest.getDbName(), + tableRequest.getTblName())); + } catch (NoSuchTableException exception) { + throw new NoSuchObjectException(); + } + return MetastoreUtil.toHiveTable(icebergTable, conf); + } + + @Override + public void createTable(CreateTableRequest request) throws TException { + Table table = request.getTable(); + List cols = Lists.newArrayList(table.getSd().getCols()); + if (table.isSetPartitionKeys() && !table.getPartitionKeys().isEmpty()) { + cols.addAll(table.getPartitionKeys()); + } + Properties catalogProperties = CatalogUtils.getCatalogProperties(table); + Schema schema = HiveSchemaUtil.convert(cols, true); + Map envCtxProps = Optional.ofNullable(request.getEnvContext()) + .map(EnvironmentContext::getProperties) + .orElse(Collections.emptyMap()); + org.apache.iceberg.PartitionSpec partitionSpec = + HMSTablePropertyHelper.getPartitionSpec(envCtxProps, schema); + SortOrder sortOrder = HMSTablePropertyHelper.getSortOrder(catalogProperties, schema); + + restCatalog.buildTable(TableIdentifier.of(table.getDbName(), table.getTableName()), schema) + .withPartitionSpec(partitionSpec) + .withLocation(catalogProperties.getProperty(CatalogUtils.LOCATION)) + .withSortOrder(sortOrder) + .withProperties(Maps.fromProperties(catalogProperties)) + .create(); + } + + @Override + public void createDatabase(Database db) { + validateCurrentCatalog(db.getCatalogName()); + Map props = ImmutableMap.of( + CatalogUtils.LOCATION, db.getLocationUri(), + DB_OWNER, db.getOwnerName(), + DB_OWNER_TYPE, db.getOwnerType().toString() + ); + restCatalog.createNamespace(Namespace.of(db.getName()), props); + } + + + @Override + public void dropDatabase(DropDatabaseRequest req) { + validateCurrentCatalog(req.getCatalogName()); + restCatalog.dropNamespace(Namespace.of(req.getName())); + } +} diff --git a/iceberg/iceberg-catalog/src/test/java/org/apache/iceberg/hive/client/TestHiveRESTCatalogClient.java b/iceberg/iceberg-catalog/src/test/java/org/apache/iceberg/hive/client/TestHiveRESTCatalogClient.java new file mode 100644 index 000000000000..f42e7e3775ee --- /dev/null +++ b/iceberg/iceberg-catalog/src/test/java/org/apache/iceberg/hive/client/TestHiveRESTCatalogClient.java @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.hive.client; + +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.CreateTableRequest; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.iceberg.BaseTable; +import org.apache.iceberg.CatalogUtil; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.PartitionSpecParser; +import org.apache.iceberg.Schema; +import org.apache.iceberg.TableMetadata; +import org.apache.iceberg.TableOperations; +import org.apache.iceberg.TableProperties; +import org.apache.iceberg.catalog.Catalog; +import org.apache.iceberg.catalog.Namespace; +import org.apache.iceberg.catalog.TableIdentifier; +import org.apache.iceberg.hive.HiveSchemaUtil; +import org.apache.iceberg.io.FileIO; +import org.apache.iceberg.io.LocationProvider; +import org.apache.iceberg.relocated.com.google.common.collect.Maps; +import org.apache.iceberg.rest.RESTCatalog; +import org.apache.thrift.TException; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.MockedStatic; +import org.mockito.Mockito; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; + +public class TestHiveRESTCatalogClient { + + private static HiveRESTCatalogClient spyHiveRESTCatalogClient; + private static RESTCatalog mockRestCatalog; + private static Catalog.TableBuilder mockTableBuilder; + private static MockedStatic mockCatalogUtil; + + private static final TableOperations ops = new TableOperations() { + @Override + public TableMetadata current() { + return TableMetadata.newTableMetadata(new Schema(), PartitionSpec.unpartitioned(), "location", + Maps.newHashMap()); + } + + @Override + public TableMetadata refresh() { + return null; + } + + @Override + public void commit(TableMetadata base, TableMetadata metadata) { + + } + + @Override + public FileIO io() { + return null; + } + + @Override + public String metadataFileLocation(String fileName) { + return null; + } + + @Override + public LocationProvider locationProvider() { + return null; + } + }; + + @BeforeAll + public static void before() throws MetaException { + Configuration configuration = new Configuration(); + configuration.set("iceberg.catalog", "ice01"); + configuration.set("iceberg.catalog.ice01.uri", "http://localhost"); + mockCatalogUtil = Mockito.mockStatic(CatalogUtil.class); + mockRestCatalog = Mockito.mock(RESTCatalog.class); + mockCatalogUtil.when(() -> CatalogUtil.buildIcebergCatalog(any(), any(), any())).thenReturn(mockRestCatalog); + spyHiveRESTCatalogClient = Mockito.spy(new HiveRESTCatalogClient(configuration)); + spyHiveRESTCatalogClient.reconnect(); + } + + @BeforeEach + public void resetMocks() { + Mockito.reset(mockRestCatalog); + + mockTableBuilder = Mockito.mock(Catalog.TableBuilder.class); + Mockito.when(mockTableBuilder.withPartitionSpec(any(PartitionSpec.class))).thenReturn(mockTableBuilder); + Mockito.when(mockTableBuilder.withLocation(any())).thenReturn(mockTableBuilder); + Mockito.when(mockTableBuilder.withSortOrder(any())).thenReturn(mockTableBuilder); + Mockito.when(mockTableBuilder.withProperties(any())).thenReturn(mockTableBuilder); + Mockito.doReturn(mockTableBuilder).when(mockRestCatalog).buildTable(any(), any()); + + Mockito.doReturn(new BaseTable(ops, "tableName")).when(mockRestCatalog).loadTable(any()); + Namespace namespace = Namespace.of("default"); + Mockito.doReturn(Collections.singletonList(namespace)).when(mockRestCatalog).listNamespaces(any()); + Mockito.doReturn("hive").when(mockRestCatalog).name(); + Mockito.doReturn(new BaseTable(ops, "tableName")).when(mockRestCatalog).createTable(any(), any(), any(), + any()); + } + + @AfterEach + public void after() { + + } + + @Test + public void testGetTable() throws TException { + spyHiveRESTCatalogClient.getTable("default", "tableName"); + Mockito.verify(mockRestCatalog).loadTable(TableIdentifier.of("default", "tableName")); + } + + @Test + public void testCreateTable() throws TException { + Table table = new Table(); + table.setTableName("tableName"); + table.setDbName("default"); + table.setSd(new StorageDescriptor()); + table.getSd().setCols(new LinkedList<>()); + table.setParameters(Maps.newHashMap()); + spyHiveRESTCatalogClient.createTable(table); + Mockito.verify(mockRestCatalog).buildTable(any(), any()); + } + + @Test + public void testCreatePartitionedTable() throws TException { + Table table = new Table(); + table.setTableName("tableName"); + table.setDbName("default"); + table.setParameters(Maps.newHashMap()); + + FieldSchema col1 = new FieldSchema("id", "string", ""); + FieldSchema col2 = new FieldSchema("city", "string", ""); + List cols = Arrays.asList(col1, col2); + + table.setSd(new StorageDescriptor()); + table.getSd().setCols(cols); + + Schema schema = HiveSchemaUtil.convert(cols, false); + PartitionSpec spec = PartitionSpec.builderFor(schema).identity("city").build(); + String specString = PartitionSpecParser.toJson(spec); + + CreateTableRequest request = new CreateTableRequest(table); + request.setEnvContext(new EnvironmentContext( + Map.ofEntries(Map.entry(TableProperties.DEFAULT_PARTITION_SPEC, specString)))); + spyHiveRESTCatalogClient.createTable(request); + + ArgumentCaptor captor = ArgumentCaptor.forClass(PartitionSpec.class); + // Verify buildTable was called, + Mockito.verify(mockRestCatalog).buildTable(any(), any()); + + // Verify that withPartitionSpec was called + Mockito.verify(mockTableBuilder).withPartitionSpec(captor.capture()); + + // Assert that the correct PartitionSpec was passed to .withPartitionSpec() + PartitionSpec capturedSpec = captor.getValue(); + assertThat(capturedSpec.isPartitioned()).isTrue(); + assertThat(capturedSpec.fields()).hasSize(1); + assertThat(capturedSpec.fields().getFirst().sourceId()).isEqualTo(schema.findField("city").fieldId()); + } + + @Test + public void testGetDatabase() throws TException { + Database aDefault = spyHiveRESTCatalogClient.getDatabase("default"); + assertThat(aDefault.getName()).isEqualTo("default"); + Mockito.verify(mockRestCatalog).listNamespaces(Namespace.empty()); + } +} diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/Catalogs.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/Catalogs.java index f65ed35af5c3..e431e0323a45 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/Catalogs.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/Catalogs.java @@ -24,23 +24,21 @@ import java.util.Properties; import java.util.Set; import org.apache.hadoop.conf.Configuration; -import org.apache.iceberg.CatalogProperties; import org.apache.iceberg.CatalogUtil; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.PartitionSpecParser; import org.apache.iceberg.Schema; import org.apache.iceberg.SchemaParser; import org.apache.iceberg.SortOrder; -import org.apache.iceberg.SortOrderParser; import org.apache.iceberg.Table; -import org.apache.iceberg.TableProperties; import org.apache.iceberg.catalog.Catalog; import org.apache.iceberg.catalog.TableIdentifier; import org.apache.iceberg.hadoop.HadoopTables; +import org.apache.iceberg.hive.CatalogUtils; +import org.apache.iceberg.hive.HMSTablePropertyHelper; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet; import org.apache.iceberg.relocated.com.google.common.collect.Maps; -import org.apache.parquet.Strings; /** * Class for catalog resolution and accessing the common functions for {@link Catalog} API. @@ -73,6 +71,7 @@ public final class Catalogs { public static final String SNAPSHOT_REF = "snapshot_ref"; private static final String NO_CATALOG_TYPE = "no catalog"; + private static final Set PROPERTIES_TO_REMOVE = ImmutableSet.of(InputFormatConfig.TABLE_SCHEMA, InputFormatConfig.PARTITION_SPEC, LOCATION, NAME, InputFormatConfig.CATALOG_NAME); @@ -144,7 +143,7 @@ public static Table createTable(Configuration conf, Properties props) { Map map = filterIcebergTableProperties(props); Optional catalog = loadCatalog(conf, catalogName); - SortOrder sortOrder = getSortOrder(props, schema); + SortOrder sortOrder = HMSTablePropertyHelper.getSortOrder(props, schema); if (catalog.isPresent()) { String name = props.getProperty(NAME); Preconditions.checkNotNull(name, "Table identifier not set"); @@ -156,12 +155,6 @@ public static Table createTable(Configuration conf, Properties props) { return new HadoopTables(conf).create(schema, spec, sortOrder, map, location); } - private static SortOrder getSortOrder(Properties props, Schema schema) { - String sortOrderJsonString = props.getProperty(TableProperties.DEFAULT_SORT_ORDER); - return Strings.isNullOrEmpty(sortOrderJsonString) ? - SortOrder.unsorted() : SortOrderParser.fromJson(schema, sortOrderJsonString); - } - /** * Drops an Iceberg table using the catalog specified by the configuration. *

@@ -194,30 +187,7 @@ public static boolean dropTable(Configuration conf, Properties props) { * @return true if the Catalog is HiveCatalog */ public static boolean hiveCatalog(Configuration conf, Properties props) { - String catalogName = props.getProperty(InputFormatConfig.CATALOG_NAME); - String catalogType = getCatalogType(conf, catalogName); - if (catalogType != null) { - return CatalogUtil.ICEBERG_CATALOG_TYPE_HIVE.equalsIgnoreCase(catalogType); - } - catalogType = getCatalogType(conf, ICEBERG_DEFAULT_CATALOG_NAME); - if (catalogType != null) { - return CatalogUtil.ICEBERG_CATALOG_TYPE_HIVE.equalsIgnoreCase(catalogType); - } - return getCatalogProperties(conf, catalogName).get(CatalogProperties.CATALOG_IMPL) == null; - } - - public static boolean hadoopCatalog(Configuration conf, Properties props) { - String catalogName = props.getProperty(InputFormatConfig.CATALOG_NAME); - String catalogType = getCatalogType(conf, catalogName); - if (catalogType != null) { - return CatalogUtil.ICEBERG_CATALOG_TYPE_HADOOP.equalsIgnoreCase(catalogType); - } - catalogType = getCatalogType(conf, ICEBERG_DEFAULT_CATALOG_NAME); - if (catalogType != null) { - return CatalogUtil.ICEBERG_CATALOG_TYPE_HADOOP.equalsIgnoreCase(catalogType); - } - return CatalogUtil.ICEBERG_CATALOG_HADOOP.equals( - getCatalogProperties(conf, catalogName).get(CatalogProperties.CATALOG_IMPL)); + return CatalogUtils.assertCatalogType(conf, props, CatalogUtil.ICEBERG_CATALOG_TYPE_HIVE, null); } /** @@ -241,7 +211,7 @@ public static Table registerTable(Configuration conf, Properties props, String m return catalog.get().registerTable(TableIdentifier.parse(name), metadataLocation); } Preconditions.checkNotNull(location, "Table location not set"); - SortOrder sortOrder = getSortOrder(props, schema); + SortOrder sortOrder = HMSTablePropertyHelper.getSortOrder(props, schema); return new HadoopTables(conf).create(schema, spec, sortOrder, map, location); } @@ -259,65 +229,13 @@ public static void renameTable(Configuration conf, Properties props, TableIdenti } static Optional loadCatalog(Configuration conf, String catalogName) { - String catalogType = getCatalogType(conf, catalogName); + String catalogType = CatalogUtils.getCatalogType(conf, catalogName); if (NO_CATALOG_TYPE.equalsIgnoreCase(catalogType)) { return Optional.empty(); } else { String name = catalogName == null ? ICEBERG_DEFAULT_CATALOG_NAME : catalogName; return Optional.of(CatalogUtil.buildIcebergCatalog(name, - getCatalogProperties(conf, name), conf)); - } - } - - /** - * Collect all the catalog specific configuration from the global hive configuration. - * @param conf a Hadoop configuration - * @param catalogName name of the catalog - * @return complete map of catalog properties - */ - private static Map getCatalogProperties(Configuration conf, String catalogName) { - Map catalogProperties = Maps.newHashMap(); - String keyPrefix = InputFormatConfig.CATALOG_CONFIG_PREFIX + catalogName; - conf.forEach(config -> { - if (config.getKey().startsWith(InputFormatConfig.CATALOG_DEFAULT_CONFIG_PREFIX)) { - catalogProperties.putIfAbsent( - config.getKey().substring(InputFormatConfig.CATALOG_DEFAULT_CONFIG_PREFIX.length()), - config.getValue()); - } else if (config.getKey().startsWith(keyPrefix)) { - catalogProperties.put( - config.getKey().substring(keyPrefix.length() + 1), - config.getValue()); - } - }); - - return catalogProperties; - } - - /** - * Return the catalog type based on the catalog name. - *

- * See {@link Catalogs} documentation for catalog type resolution strategy. - * - * @param conf global hive configuration - * @param catalogName name of the catalog - * @return type of the catalog, can be null - */ - private static String getCatalogType(Configuration conf, String catalogName) { - if (catalogName != null) { - String catalogType = conf.get(InputFormatConfig.catalogPropertyConfigKey( - catalogName, CatalogUtil.ICEBERG_CATALOG_TYPE)); - if (catalogName.equals(ICEBERG_HADOOP_TABLE_NAME)) { - return NO_CATALOG_TYPE; - } else { - return catalogType; - } - } else { - String catalogType = conf.get(CatalogUtil.ICEBERG_CATALOG_TYPE); - if (catalogType != null && catalogType.equals(LOCATION)) { - return NO_CATALOG_TYPE; - } else { - return catalogType; - } + CatalogUtils.getCatalogProperties(conf, name), conf)); } } diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/InputFormatConfig.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/InputFormatConfig.java index 4898b4b8954f..823c7a37ba37 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/InputFormatConfig.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/InputFormatConfig.java @@ -69,24 +69,14 @@ private InputFormatConfig() { public static final int COMMIT_TABLE_THREAD_POOL_SIZE_DEFAULT = 10; public static final String COMMIT_FILE_THREAD_POOL_SIZE = "iceberg.mr.commit.file.thread.pool.size"; public static final int COMMIT_FILE_THREAD_POOL_SIZE_DEFAULT = 10; - public static final String WRITE_TARGET_FILE_SIZE = "iceberg.mr.write.target.file.size"; public static final String CASE_SENSITIVE = "iceberg.mr.case.sensitive"; public static final boolean CASE_SENSITIVE_DEFAULT = true; public static final String CATALOG_NAME = "iceberg.catalog"; - public static final String HADOOP_CATALOG = "hadoop.catalog"; - public static final String HADOOP_TABLES = "hadoop.tables"; public static final String HIVE_CATALOG = "hive.catalog"; - public static final String ICEBERG_SNAPSHOTS_TABLE_SUFFIX = ".snapshots"; - public static final String SNAPSHOT_TABLE = "iceberg.snapshots.table"; - public static final String SNAPSHOT_TABLE_SUFFIX = "__snapshots"; public static final String CATALOG_CONFIG_PREFIX = "iceberg.catalog."; - public static final String CATALOG_TYPE_TEMPLATE = "iceberg.catalog.%s.type"; - public static final String CATALOG_WAREHOUSE_TEMPLATE = "iceberg.catalog.%s.warehouse"; - public static final String CATALOG_CLASS_TEMPLATE = "iceberg.catalog.%s.catalog-impl"; - public static final String CATALOG_DEFAULT_CONFIG_PREFIX = "iceberg.catalog-default."; public enum InMemoryDataModel { HIVE, @@ -215,18 +205,6 @@ public static boolean fetchVirtualColumns(Configuration conf) { return conf.getBoolean(InputFormatConfig.FETCH_VIRTUAL_COLUMNS, false); } - /** - * Get Hadoop config key of a catalog property based on catalog name - * @param catalogName catalog name - * @param catalogProperty catalog property, can be any custom property, - * a commonly used list of properties can be found - * at {@link org.apache.iceberg.CatalogProperties} - * @return Hadoop config key of a catalog property for the catalog name - */ - public static String catalogPropertyConfigKey(String catalogName, String catalogProperty) { - return String.format("%s%s.%s", CATALOG_CONFIG_PREFIX, catalogName, catalogProperty); - } - private static Schema schema(Configuration conf, String key) { String json = conf.get(key); return json == null ? null : SchemaParser.fromJson(json); diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/BaseHiveIcebergMetaHook.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/BaseHiveIcebergMetaHook.java new file mode 100644 index 000000000000..e160deebf418 --- /dev/null +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/BaseHiveIcebergMetaHook.java @@ -0,0 +1,433 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.hive; + +import com.fasterxml.jackson.databind.ObjectMapper; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Properties; +import java.util.Set; +import java.util.stream.Collectors; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.HiveMetaHook; +import org.apache.hadoop.hive.metastore.api.CreateTableRequest; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.ql.ddl.misc.sortoder.SortFieldDesc; +import org.apache.hadoop.hive.ql.ddl.misc.sortoder.SortFields; +import org.apache.hadoop.hive.ql.util.NullOrdering; +import org.apache.iceberg.BaseMetastoreTableOperations; +import org.apache.iceberg.BaseTable; +import org.apache.iceberg.NullOrder; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.PartitionSpecParser; +import org.apache.iceberg.Schema; +import org.apache.iceberg.SchemaParser; +import org.apache.iceberg.SortDirection; +import org.apache.iceberg.SortOrder; +import org.apache.iceberg.SortOrderParser; +import org.apache.iceberg.Table; +import org.apache.iceberg.TableProperties; +import org.apache.iceberg.catalog.TableIdentifier; +import org.apache.iceberg.exceptions.NoSuchTableException; +import org.apache.iceberg.exceptions.NotFoundException; +import org.apache.iceberg.hive.CatalogUtils; +import org.apache.iceberg.hive.HMSTablePropertyHelper; +import org.apache.iceberg.hive.HiveSchemaUtil; +import org.apache.iceberg.mr.Catalogs; +import org.apache.iceberg.mr.InputFormatConfig; +import org.apache.iceberg.relocated.com.google.common.base.Preconditions; +import org.apache.iceberg.relocated.com.google.common.base.Splitter; +import org.apache.iceberg.relocated.com.google.common.base.Strings; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet; +import org.apache.iceberg.relocated.com.google.common.collect.Lists; +import org.apache.iceberg.types.Types; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.apache.iceberg.RowLevelOperationMode.MERGE_ON_READ; + +public class BaseHiveIcebergMetaHook implements HiveMetaHook { + private static final Logger LOG = LoggerFactory.getLogger(BaseHiveIcebergMetaHook.class); + private static final ObjectMapper JSON_OBJECT_MAPPER = new ObjectMapper(); + public static final Map COMMON_HMS_PROPERTIES = ImmutableMap.of( + BaseMetastoreTableOperations.TABLE_TYPE_PROP, BaseMetastoreTableOperations.ICEBERG_TABLE_TYPE_VALUE.toUpperCase() + ); + private static final Set PARAMETERS_TO_REMOVE = ImmutableSet + .of(InputFormatConfig.TABLE_SCHEMA, Catalogs.LOCATION, Catalogs.NAME, InputFormatConfig.PARTITION_SPEC); + static final String ORC_FILES_ONLY = "iceberg.orc.files.only"; + + protected final Configuration conf; + protected Table icebergTable = null; + protected Properties catalogProperties; + protected boolean createHMSTableInHook = false; + + public enum FileFormat { + ORC("orc"), PARQUET("parquet"), AVRO("avro"); + + private final String label; + + FileFormat(String label) { + this.label = label; + } + + public String getLabel() { + return label; + } + } + + public BaseHiveIcebergMetaHook(Configuration conf) { + this.conf = conf; + } + + @Override + public void preCreateTable(org.apache.hadoop.hive.metastore.api.Table hmsTable) { + CreateTableRequest request = new CreateTableRequest(hmsTable); + preCreateTable(request); + } + + @Override + public void preCreateTable(CreateTableRequest request) { + org.apache.hadoop.hive.metastore.api.Table hmsTable = request.getTable(); + if (hmsTable.isTemporary()) { + throw new UnsupportedOperationException("Creation of temporary iceberg tables is not supported."); + } + this.catalogProperties = CatalogUtils.getCatalogProperties(hmsTable); + + // Set the table type even for non HiveCatalog based tables + hmsTable.getParameters().put(BaseMetastoreTableOperations.TABLE_TYPE_PROP, + BaseMetastoreTableOperations.ICEBERG_TABLE_TYPE_VALUE.toUpperCase()); + + if (!Catalogs.hiveCatalog(conf, catalogProperties)) { + if (Boolean.parseBoolean(this.catalogProperties.getProperty(hive_metastoreConstants.TABLE_IS_CTLT))) { + throw new UnsupportedOperationException("CTLT target table must be a HiveCatalog table."); + } + // For non-HiveCatalog tables too, we should set the input and output format + // so that the table can be read by other engines like Impala + hmsTable.getSd().setInputFormat(HiveIcebergInputFormat.class.getCanonicalName()); + hmsTable.getSd().setOutputFormat(HiveIcebergOutputFormat.class.getCanonicalName()); + + // If not using HiveCatalog check for existing table + try { + this.icebergTable = IcebergTableUtil.getTable(conf, catalogProperties, true); + + if (CatalogUtils.hadoopCatalog(conf, catalogProperties) && hmsTable.getSd() != null && + hmsTable.getSd().getLocation() == null) { + hmsTable.getSd().setLocation(icebergTable.location()); + } + Preconditions.checkArgument(catalogProperties.getProperty(InputFormatConfig.TABLE_SCHEMA) == null, + "Iceberg table already created - can not use provided schema"); + Preconditions.checkArgument(catalogProperties.getProperty(InputFormatConfig.PARTITION_SPEC) == null, + "Iceberg table already created - can not use provided partition specification"); + + LOG.info("Iceberg table already exists {}", icebergTable); + return; + } catch (NoSuchTableException nte) { + // If the table does not exist we will create it below + } + } + + // If the table does not exist collect data for table creation + // - InputFormatConfig.TABLE_SCHEMA, InputFormatConfig.PARTITION_SPEC takes precedence so the user can override the + // Iceberg schema and specification generated by the code + + Set identifierFields = Optional.ofNullable(request.getPrimaryKeys()) + .map(primaryKeys -> + primaryKeys.stream().map(SQLPrimaryKey::getColumn_name).collect(Collectors.toSet())) + .orElse(Collections.emptySet()); + + Schema schema = schema(catalogProperties, hmsTable, identifierFields); + PartitionSpec spec = spec(conf, schema, hmsTable); + + // If there are partition keys specified remove them from the HMS table and add them to the column list + if (hmsTable.isSetPartitionKeys()) { + hmsTable.getSd().getCols().addAll(hmsTable.getPartitionKeys()); + hmsTable.setPartitionKeysIsSet(false); + } + + catalogProperties.put(InputFormatConfig.TABLE_SCHEMA, SchemaParser.toJson(schema)); + String specString = PartitionSpecParser.toJson(spec); + catalogProperties.put(InputFormatConfig.PARTITION_SPEC, specString); + validateCatalogConfigsDefined(); + + if (request.getEnvContext() == null) { + request.setEnvContext(new EnvironmentContext()); + } + request.getEnvContext().putToProperties(TableProperties.DEFAULT_PARTITION_SPEC, specString); + setCommonHmsTablePropertiesForIceberg(hmsTable); + + if (hmsTable.getParameters().containsKey(BaseMetastoreTableOperations.METADATA_LOCATION_PROP)) { + createHMSTableInHook = true; + } + + assertFileFormat(catalogProperties.getProperty(TableProperties.DEFAULT_FILE_FORMAT)); + + // Set whether the format is ORC, to be used during vectorization. + setOrcOnlyFilesParam(hmsTable); + // Remove hive primary key columns from table request, as iceberg doesn't support hive primary key. + request.setPrimaryKeys(null); + setSortOrder(hmsTable, schema, catalogProperties); + } + + /** + * Method for verification that necessary catalog configs are defined in Session Conf. + * + *

If the catalog name is provided in 'iceberg.catalog' table property, + * and the name is not the default catalog and not hadoop catalog, checks that one of the two configs + * is defined in Session Conf: iceberg.catalog.catalogName.type + * or iceberg.catalog.catalogName.catalog-impl. See description in Catalogs.java for more details. + * + */ + private void validateCatalogConfigsDefined() { + String catalogName = catalogProperties.getProperty(InputFormatConfig.CATALOG_NAME); + if (!StringUtils.isEmpty(catalogName) && !Catalogs.ICEBERG_HADOOP_TABLE_NAME.equals(catalogName)) { + + boolean configsExist = !StringUtils.isEmpty(CatalogUtils.getCatalogType(conf, catalogName)) || + !StringUtils.isEmpty(CatalogUtils.getCatalogImpl(conf, catalogName)); + + Preconditions.checkArgument(configsExist, "Catalog type or impl must be set for catalog: %s", catalogName); + } + } + + private void setSortOrder(org.apache.hadoop.hive.metastore.api.Table hmsTable, Schema schema, + Properties properties) { + String sortOderJSONString = hmsTable.getParameters().get(TableProperties.DEFAULT_SORT_ORDER); + SortFields sortFields = null; + if (!Strings.isNullOrEmpty(sortOderJSONString)) { + try { + sortFields = JSON_OBJECT_MAPPER.reader().readValue(sortOderJSONString, SortFields.class); + } catch (Exception e) { + LOG.warn("Can not read write order json: {}", sortOderJSONString, e); + return; + } + if (sortFields != null && !sortFields.getSortFields().isEmpty()) { + SortOrder.Builder sortOderBuilder = SortOrder.builderFor(schema); + sortFields.getSortFields().forEach(fieldDesc -> { + NullOrder nullOrder = fieldDesc.getNullOrdering() == NullOrdering.NULLS_FIRST ? + NullOrder.NULLS_FIRST : NullOrder.NULLS_LAST; + SortDirection sortDirection = fieldDesc.getDirection() == SortFieldDesc.SortDirection.ASC ? + SortDirection.ASC : SortDirection.DESC; + sortOderBuilder.sortBy(fieldDesc.getColumnName(), sortDirection, nullOrder); + }); + properties.put(TableProperties.DEFAULT_SORT_ORDER, SortOrderParser.toJson(sortOderBuilder.build())); + } + } + } + + @Override + public void rollbackCreateTable(org.apache.hadoop.hive.metastore.api.Table hmsTable) { + // do nothing + } + + @Override + public void commitCreateTable(org.apache.hadoop.hive.metastore.api.Table hmsTable) { + // do nothing + } + + @Override + public void preDropTable(org.apache.hadoop.hive.metastore.api.Table hmsTable) { + // do nothing + } + + @Override + public void rollbackDropTable(org.apache.hadoop.hive.metastore.api.Table hmsTable) { + // do nothing + } + + @Override + public void commitDropTable(org.apache.hadoop.hive.metastore.api.Table hmsTable, boolean deleteData) { + // do nothing + } + + @Override + public boolean createHMSTableInHook() { + return createHMSTableInHook; + } + + private static void assertFileFormat(String format) { + if (format == null) { + return; + } + String lowerCaseFormat = format.toLowerCase(); + Preconditions.checkArgument(Arrays.stream(FileFormat.values()).anyMatch(v -> lowerCaseFormat.contains(v.label)), + String.format("Unsupported fileformat %s", format)); + } + + protected void setCommonHmsTablePropertiesForIceberg(org.apache.hadoop.hive.metastore.api.Table hmsTable) { + if (CatalogUtils.isHadoopTable(conf, catalogProperties)) { + String location = (hmsTable.getSd() != null) ? hmsTable.getSd().getLocation() : null; + if (location == null && CatalogUtils.hadoopCatalog(conf, catalogProperties)) { + location = IcebergTableUtil.defaultWarehouseLocation( + TableIdentifier.of(hmsTable.getDbName(), hmsTable.getTableName()), + conf, catalogProperties); + hmsTable.getSd().setLocation(location); + } + Preconditions.checkArgument(location != null, "Table location not set"); + } + + Map hmsParams = hmsTable.getParameters(); + COMMON_HMS_PROPERTIES.forEach(hmsParams::putIfAbsent); + + // Remove null values from hms table properties + hmsParams.entrySet().removeIf(e -> e.getKey() == null || e.getValue() == null); + + // Remove creation related properties + PARAMETERS_TO_REMOVE.forEach(hmsParams::remove); + + setWriteModeDefaults(null, hmsParams, null); + } + + protected Schema schema(Properties properties, org.apache.hadoop.hive.metastore.api.Table hmsTable, + Set identifierFields) { + boolean autoConversion = conf.getBoolean(InputFormatConfig.SCHEMA_AUTO_CONVERSION, false); + + if (properties.getProperty(InputFormatConfig.TABLE_SCHEMA) != null) { + return SchemaParser.fromJson(properties.getProperty(InputFormatConfig.TABLE_SCHEMA)); + } + List cols = Lists.newArrayList(hmsTable.getSd().getCols()); + if (hmsTable.isSetPartitionKeys() && !hmsTable.getPartitionKeys().isEmpty()) { + cols.addAll(hmsTable.getPartitionKeys()); + } + Schema schema = HiveSchemaUtil.convert(cols, autoConversion); + + return getSchemaWithIdentifierFields(schema, identifierFields); + } + + private Schema getSchemaWithIdentifierFields(Schema schema, Set identifierFields) { + if (identifierFields == null || identifierFields.isEmpty()) { + return schema; + } + Set identifierFieldIds = identifierFields.stream() + .map(column -> { + Types.NestedField field = schema.findField(column); + Preconditions.checkNotNull(field, + "Cannot find identifier field ID for the column %s in schema %s", column, schema); + return field.fieldId(); + }) + .collect(Collectors.toSet()); + + List cols = schema.columns().stream() + .map(column -> identifierFieldIds.contains(column.fieldId()) ? column.asRequired() : column) + .toList(); + + return new Schema(cols, identifierFieldIds); + } + + protected static PartitionSpec spec(Configuration configuration, Schema schema, + org.apache.hadoop.hive.metastore.api.Table hmsTable) { + + Preconditions.checkArgument(!hmsTable.isSetPartitionKeys() || hmsTable.getPartitionKeys().isEmpty(), + "We can only handle non-partitioned Hive tables. The Iceberg schema should be in " + + InputFormatConfig.PARTITION_SPEC + " or already converted to a partition transform "); + + PartitionSpec spec = IcebergTableUtil.spec(configuration, schema); + if (spec != null) { + Preconditions.checkArgument(hmsTable.getParameters().get(InputFormatConfig.PARTITION_SPEC) == null, + "Provide only one of the following: Hive partition transform specification, or the " + + InputFormatConfig.PARTITION_SPEC + " property"); + return spec; + } + + return HMSTablePropertyHelper.getPartitionSpec(hmsTable.getParameters(), schema); + } + + protected void setOrcOnlyFilesParam(org.apache.hadoop.hive.metastore.api.Table hmsTable) { + hmsTable.getParameters().put(ORC_FILES_ONLY, String.valueOf(isOrcOnlyFiles(hmsTable))); + } + + protected boolean isOrcOnlyFiles(org.apache.hadoop.hive.metastore.api.Table hmsTable) { + return !"FALSE".equalsIgnoreCase(hmsTable.getParameters().get(ORC_FILES_ONLY)) && + (hmsTable.getSd().getInputFormat() != null && + hmsTable.getSd().getInputFormat().toUpperCase().contains(org.apache.iceberg.FileFormat.ORC.name()) || + org.apache.iceberg.FileFormat.ORC.name() + .equalsIgnoreCase(hmsTable.getSd().getSerdeInfo().getParameters() + .get(TableProperties.DEFAULT_FILE_FORMAT)) || + org.apache.iceberg.FileFormat.ORC.name() + .equalsIgnoreCase(hmsTable.getParameters().get(TableProperties.DEFAULT_FILE_FORMAT))); + } + + protected void setWriteModeDefaults(Table icebergTbl, Map newProps, EnvironmentContext context) { + if ((icebergTbl == null || ((BaseTable) icebergTbl).operations().current().formatVersion() == 1) && + IcebergTableUtil.isV2TableOrAbove(newProps)) { + List writeModeList = ImmutableList.of( + TableProperties.DELETE_MODE, TableProperties.UPDATE_MODE, TableProperties.MERGE_MODE); + writeModeList.stream() + .filter(writeMode -> catalogProperties.get(writeMode) == null) + .forEach(writeMode -> { + catalogProperties.put(writeMode, MERGE_ON_READ.modeName()); + newProps.put(writeMode, MERGE_ON_READ.modeName()); + }); + + if (context != null) { + Splitter splitter = Splitter.on(PROPERTIES_SEPARATOR); + Map contextProperties = context.getProperties(); + if (contextProperties.containsKey(SET_PROPERTIES)) { + String propValue = context.getProperties().get(SET_PROPERTIES); + String writeModeStr = writeModeList.stream() + .filter(writeMode -> !splitter.splitToList(propValue).contains(writeMode)) + .collect(Collectors.joining("'")); + if (!writeModeStr.isEmpty()) { + contextProperties.put(SET_PROPERTIES, propValue + "'" + writeModeStr); + } + } + } + } + } + + @Override + public void postGetTable(org.apache.hadoop.hive.metastore.api.Table hmsTable) { + if (hmsTable != null) { + try { + Table tbl = IcebergTableUtil.getTable(conf, hmsTable); + String formatVersion = String.valueOf(((BaseTable) tbl).operations().current().formatVersion()); + hmsTable.getParameters().put(TableProperties.FORMAT_VERSION, formatVersion); + // Set the serde info + hmsTable.getSd().setInputFormat(HiveIcebergInputFormat.class.getName()); + hmsTable.getSd().setOutputFormat(HiveIcebergOutputFormat.class.getName()); + hmsTable.getSd().getSerdeInfo().setSerializationLib(HiveIcebergSerDe.class.getName()); + String storageHandler = hmsTable.getParameters().get(hive_metastoreConstants.META_TABLE_STORAGE); + // Check if META_TABLE_STORAGE is not present or is not an instance of ICEBERG_STORAGE_HANDLER + if (storageHandler == null || !isHiveIcebergStorageHandler(storageHandler)) { + hmsTable.getParameters() + .put(hive_metastoreConstants.META_TABLE_STORAGE, HMSTablePropertyHelper.HIVE_ICEBERG_STORAGE_HANDLER); + } + } catch (NoSuchTableException | NotFoundException ex) { + // If the table doesn't exist, ignore throwing exception from here + } + } + } + + private static boolean isHiveIcebergStorageHandler(String storageHandler) { + try { + Class storageHandlerClass = Class.forName(storageHandler); + return Class.forName(HIVE_ICEBERG_STORAGE_HANDLER).isAssignableFrom(storageHandlerClass); + } catch (ClassNotFoundException e) { + throw new RuntimeException("Error checking storage handler class", e); + } + } +} diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java index a7389f7f20f5..aec7ab7b03b3 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java @@ -19,11 +19,9 @@ package org.apache.iceberg.mr.hive; -import com.fasterxml.jackson.databind.ObjectMapper; import java.io.IOException; import java.net.URLDecoder; import java.nio.charset.StandardCharsets; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.EnumSet; @@ -31,8 +29,6 @@ import java.util.Map; import java.util.Objects; import java.util.Optional; -import java.util.Properties; -import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; import org.apache.commons.collections.CollectionUtils; @@ -44,13 +40,11 @@ import org.apache.hadoop.hive.metastore.HiveMetaHook; import org.apache.hadoop.hive.metastore.PartitionDropOptions; import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.CreateTableRequest; import org.apache.hadoop.hive.metastore.api.DropPartitionsExpr; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.RequestPartsSpec; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; @@ -58,8 +52,6 @@ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.QueryState; -import org.apache.hadoop.hive.ql.ddl.misc.sortoder.SortFieldDesc; -import org.apache.hadoop.hive.ql.ddl.misc.sortoder.SortFields; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; import org.apache.hadoop.hive.ql.exec.SerializationUtilities; import org.apache.hadoop.hive.ql.io.AcidUtils; @@ -76,7 +68,6 @@ import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionStateUtil; -import org.apache.hadoop.hive.ql.util.NullOrdering; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo; @@ -92,7 +83,6 @@ import org.apache.iceberg.FileScanTask; import org.apache.iceberg.MetadataTableType; import org.apache.iceberg.MetadataTableUtils; -import org.apache.iceberg.NullOrder; import org.apache.iceberg.PartitionData; import org.apache.iceberg.PartitionField; import org.apache.iceberg.PartitionSpec; @@ -100,9 +90,6 @@ import org.apache.iceberg.PartitionsTable; import org.apache.iceberg.Schema; import org.apache.iceberg.SchemaParser; -import org.apache.iceberg.SortDirection; -import org.apache.iceberg.SortOrder; -import org.apache.iceberg.SortOrderParser; import org.apache.iceberg.Table; import org.apache.iceberg.TableMetadata; import org.apache.iceberg.TableMetadataParser; @@ -113,14 +100,13 @@ import org.apache.iceberg.UpdateSchema; import org.apache.iceberg.catalog.TableIdentifier; import org.apache.iceberg.exceptions.NoSuchTableException; -import org.apache.iceberg.exceptions.NotFoundException; import org.apache.iceberg.expressions.Expression; import org.apache.iceberg.expressions.Expressions; import org.apache.iceberg.expressions.ResidualEvaluator; import org.apache.iceberg.expressions.UnboundPredicate; import org.apache.iceberg.expressions.UnboundTerm; import org.apache.iceberg.hive.CachedClientPool; -import org.apache.iceberg.hive.HMSTablePropertyHelper; +import org.apache.iceberg.hive.CatalogUtils; import org.apache.iceberg.hive.HiveLock; import org.apache.iceberg.hive.HiveSchemaUtil; import org.apache.iceberg.hive.HiveTableOperations; @@ -135,10 +121,8 @@ import org.apache.iceberg.mr.InputFormatConfig; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.iceberg.relocated.com.google.common.base.Splitter; -import org.apache.iceberg.relocated.com.google.common.base.Strings; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; -import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet; import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.apache.iceberg.relocated.com.google.common.collect.Maps; import org.apache.iceberg.relocated.com.google.common.collect.Sets; @@ -151,21 +135,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.iceberg.RowLevelOperationMode.MERGE_ON_READ; - -public class HiveIcebergMetaHook implements HiveMetaHook { +public class HiveIcebergMetaHook extends BaseHiveIcebergMetaHook { private static final Logger LOG = LoggerFactory.getLogger(HiveIcebergMetaHook.class); - private static final ObjectMapper JSON_OBJECT_MAPPER = new ObjectMapper(); - public static final Map COMMON_HMS_PROPERTIES = ImmutableMap.of( - BaseMetastoreTableOperations.TABLE_TYPE_PROP, BaseMetastoreTableOperations.ICEBERG_TABLE_TYPE_VALUE.toUpperCase() - ); - private static final Set PARAMETERS_TO_REMOVE = ImmutableSet - .of(InputFormatConfig.TABLE_SCHEMA, Catalogs.LOCATION, Catalogs.NAME, InputFormatConfig.PARTITION_SPEC); - private static final Set PROPERTIES_TO_REMOVE = ImmutableSet - // We don't want to push down the metadata location props to Iceberg from HMS, - // since the snapshot pointer in HMS would always be one step ahead - .of(BaseMetastoreTableOperations.METADATA_LOCATION_PROP, - BaseMetastoreTableOperations.PREVIOUS_METADATA_LOCATION_PROP); static final EnumSet SUPPORTED_ALTER_OPS = EnumSet.of( AlterTableType.ADDCOLS, AlterTableType.REPLACE_COLUMNS, AlterTableType.RENAME_COLUMN, AlterTableType.DROP_COLUMN, AlterTableType.ADDPROPS, AlterTableType.DROPPROPS, AlterTableType.SETPARTITIONSPEC, @@ -180,14 +151,9 @@ public class HiveIcebergMetaHook implements HiveMetaHook { private static final List> EMPTY_FILTER = Lists.newArrayList(org.apache.commons.lang3.tuple.Pair.of(1, new byte[0])); static final String MIGRATED_TO_ICEBERG = "MIGRATED_TO_ICEBERG"; - static final String ORC_FILES_ONLY = "iceberg.orc.files.only"; - static final String DECIMAL64_VECTORIZATION = "iceberg.decimal64.vectorization"; static final String MANUAL_ICEBERG_METADATA_LOCATION_CHANGE = "MANUAL_ICEBERG_METADATA_LOCATION_CHANGE"; - private final Configuration conf; - private Table icebergTable = null; - private Properties catalogProperties; private boolean deleteIcebergTable; private FileIO deleteIo; private TableMetadata deleteMetadata; @@ -196,127 +162,10 @@ public class HiveIcebergMetaHook implements HiveMetaHook { private UpdateSchema updateSchema; private Transaction transaction; private AlterTableType currentAlterTableOp; - private boolean createHMSTableInHook = false; private HiveLock commitLock; - private enum FileFormat { - ORC("orc"), PARQUET("parquet"), AVRO("avro"); - - private final String label; - - FileFormat(String label) { - this.label = label; - } - } - public HiveIcebergMetaHook(Configuration conf) { - this.conf = conf; - } - - @Override - public void preCreateTable(org.apache.hadoop.hive.metastore.api.Table hmsTable) { - CreateTableRequest request = new CreateTableRequest(hmsTable); - preCreateTable(request); - } - @Override - public void preCreateTable(CreateTableRequest request) { - org.apache.hadoop.hive.metastore.api.Table hmsTable = request.getTable(); - if (hmsTable.isTemporary()) { - throw new UnsupportedOperationException("Creation of temporary iceberg tables is not supported."); - } - this.catalogProperties = getCatalogProperties(hmsTable); - - // Set the table type even for non HiveCatalog based tables - hmsTable.getParameters().put(BaseMetastoreTableOperations.TABLE_TYPE_PROP, - BaseMetastoreTableOperations.ICEBERG_TABLE_TYPE_VALUE.toUpperCase()); - - if (!Catalogs.hiveCatalog(conf, catalogProperties)) { - if (Boolean.parseBoolean(this.catalogProperties.getProperty(hive_metastoreConstants.TABLE_IS_CTLT))) { - throw new UnsupportedOperationException("CTLT target table must be a HiveCatalog table."); - } - // For non-HiveCatalog tables too, we should set the input and output format - // so that the table can be read by other engines like Impala - hmsTable.getSd().setInputFormat(HiveIcebergInputFormat.class.getCanonicalName()); - hmsTable.getSd().setOutputFormat(HiveIcebergOutputFormat.class.getCanonicalName()); - - // If not using HiveCatalog check for existing table - try { - this.icebergTable = IcebergTableUtil.getTable(conf, catalogProperties, true); - - if (Catalogs.hadoopCatalog(conf, catalogProperties) && hmsTable.getSd() != null && - hmsTable.getSd().getLocation() == null) { - hmsTable.getSd().setLocation(icebergTable.location()); - } - Preconditions.checkArgument(catalogProperties.getProperty(InputFormatConfig.TABLE_SCHEMA) == null, - "Iceberg table already created - can not use provided schema"); - Preconditions.checkArgument(catalogProperties.getProperty(InputFormatConfig.PARTITION_SPEC) == null, - "Iceberg table already created - can not use provided partition specification"); - - LOG.info("Iceberg table already exists {}", icebergTable); - return; - } catch (NoSuchTableException nte) { - // If the table does not exist we will create it below - } - } - - // If the table does not exist collect data for table creation - // - InputFormatConfig.TABLE_SCHEMA, InputFormatConfig.PARTITION_SPEC takes precedence so the user can override the - // Iceberg schema and specification generated by the code - - Set identifierFields = Optional.ofNullable(request.getPrimaryKeys()) - .map(primaryKeys -> - primaryKeys.stream().map(SQLPrimaryKey::getColumn_name).collect(Collectors.toSet())) - .orElse(Collections.emptySet()); - - Schema schema = schema(catalogProperties, hmsTable, identifierFields); - PartitionSpec spec = spec(conf, schema, hmsTable); - - // If there are partition keys specified remove them from the HMS table and add them to the column list - if (hmsTable.isSetPartitionKeys()) { - hmsTable.getSd().getCols().addAll(hmsTable.getPartitionKeys()); - hmsTable.setPartitionKeysIsSet(false); - } - - catalogProperties.put(InputFormatConfig.TABLE_SCHEMA, SchemaParser.toJson(schema)); - catalogProperties.put(InputFormatConfig.PARTITION_SPEC, PartitionSpecParser.toJson(spec)); - setCommonHmsTablePropertiesForIceberg(hmsTable); - - if (hmsTable.getParameters().containsKey(BaseMetastoreTableOperations.METADATA_LOCATION_PROP)) { - createHMSTableInHook = true; - } - - assertFileFormat(catalogProperties.getProperty(TableProperties.DEFAULT_FILE_FORMAT)); - - // Set whether the format is ORC, to be used during vectorization. - setOrcOnlyFilesParam(hmsTable); - // Remove hive primary key columns from table request, as iceberg doesn't support hive primary key. - request.setPrimaryKeys(null); - setSortOrder(hmsTable, schema, catalogProperties); - } - - private void setSortOrder(org.apache.hadoop.hive.metastore.api.Table hmsTable, Schema schema, - Properties properties) { - String sortOderJSONString = hmsTable.getParameters().get(TableProperties.DEFAULT_SORT_ORDER); - SortFields sortFields = null; - if (!Strings.isNullOrEmpty(sortOderJSONString)) { - try { - sortFields = JSON_OBJECT_MAPPER.reader().readValue(sortOderJSONString, SortFields.class); - } catch (Exception e) { - LOG.warn("Can not read write order json: {}", sortOderJSONString, e); - return; - } - if (sortFields != null && !sortFields.getSortFields().isEmpty()) { - SortOrder.Builder sortOderBuilder = SortOrder.builderFor(schema); - sortFields.getSortFields().forEach(fieldDesc -> { - NullOrder nullOrder = fieldDesc.getNullOrdering() == NullOrdering.NULLS_FIRST ? - NullOrder.NULLS_FIRST : NullOrder.NULLS_LAST; - SortDirection sortDirection = fieldDesc.getDirection() == SortFieldDesc.SortDirection.ASC ? - SortDirection.ASC : SortDirection.DESC; - sortOderBuilder.sortBy(fieldDesc.getColumnName(), sortDirection, nullOrder); - }); - properties.put(TableProperties.DEFAULT_SORT_ORDER, SortOrderParser.toJson(sortOderBuilder.build())); - } - } + super(conf); } @Override @@ -358,7 +207,7 @@ public void preDropTable(org.apache.hadoop.hive.metastore.api.Table hmsTable) { @Override public void preDropTable(org.apache.hadoop.hive.metastore.api.Table hmsTable, boolean deleteData) { - this.catalogProperties = getCatalogProperties(hmsTable); + this.catalogProperties = CatalogUtils.getCatalogProperties(hmsTable); this.deleteIcebergTable = hmsTable.getParameters() != null && "TRUE".equalsIgnoreCase(hmsTable.getParameters().get(InputFormatConfig.EXTERNAL_TABLE_PURGE)); @@ -408,7 +257,7 @@ public void commitDropTable(org.apache.hadoop.hive.metastore.api.Table hmsTable, @Override public void preAlterTable(org.apache.hadoop.hive.metastore.api.Table hmsTable, EnvironmentContext context) throws MetaException { - catalogProperties = getCatalogProperties(hmsTable); + catalogProperties = CatalogUtils.getCatalogProperties(hmsTable); setupAlterOperationType(hmsTable, context); if (AlterTableType.RENAME.equals(currentAlterTableOp)) { catalogProperties.put(Catalogs.NAME, TableIdentifier.of(context.getProperties().get(OLD_DB_NAME), @@ -637,7 +486,7 @@ public void commitAlterTable(org.apache.hadoop.hive.metastore.api.Table hmsTable } commitLock.unlock(); if (isTableMigration) { - catalogProperties = getCatalogProperties(hmsTable); + catalogProperties = CatalogUtils.getCatalogProperties(hmsTable); catalogProperties.put(InputFormatConfig.TABLE_SCHEMA, SchemaParser.toJson(preAlterTableProperties.schema)); catalogProperties.put(InputFormatConfig.PARTITION_SPEC, PartitionSpecParser.toJson(preAlterTableProperties.spec)); setFileFormat(preAlterTableProperties.format); @@ -678,7 +527,7 @@ public void rollbackAlterTable(org.apache.hadoop.hive.metastore.api.Table hmsTab LOG.debug("Initiating rollback for table {} at location {}", hmsTable.getTableName(), hmsTable.getSd().getLocation()); context.getProperties().put(INITIALIZE_ROLLBACK_MIGRATION, "true"); - this.catalogProperties = getCatalogProperties(hmsTable); + this.catalogProperties = CatalogUtils.getCatalogProperties(hmsTable); try { this.icebergTable = Catalogs.loadTable(conf, catalogProperties); } catch (NoSuchTableException nte) { @@ -703,7 +552,7 @@ public void rollbackAlterTable(org.apache.hadoop.hive.metastore.api.Table hmsTab public void preTruncateTable(org.apache.hadoop.hive.metastore.api.Table table, EnvironmentContext context, List partNames) throws MetaException { - this.catalogProperties = getCatalogProperties(table); + this.catalogProperties = CatalogUtils.getCatalogProperties(table); this.icebergTable = Catalogs.loadTable(conf, catalogProperties); Map partitionFieldMap = icebergTable.spec().fields().stream() .collect(Collectors.toMap(PartitionField::name, Function.identity())); @@ -796,149 +645,12 @@ private void setFileFormat(String format) { String lowerCaseFormat = format.toLowerCase(); for (FileFormat fileFormat : FileFormat.values()) { - if (lowerCaseFormat.contains(fileFormat.label)) { - catalogProperties.put(TableProperties.DEFAULT_FILE_FORMAT, fileFormat.label); + if (lowerCaseFormat.contains(fileFormat.getLabel())) { + catalogProperties.put(TableProperties.DEFAULT_FILE_FORMAT, fileFormat.getLabel()); } } } - private void assertFileFormat(String format) { - if (format == null) { - return; - } - String lowerCaseFormat = format.toLowerCase(); - Preconditions.checkArgument(Arrays.stream(FileFormat.values()).anyMatch(v -> lowerCaseFormat.contains(v.label)), - String.format("Unsupported fileformat %s", format)); - } - - private void setCommonHmsTablePropertiesForIceberg(org.apache.hadoop.hive.metastore.api.Table hmsTable) { - // If the table is not managed by Hive or Hadoop catalog, then the location should be set - if (!Catalogs.hiveCatalog(conf, catalogProperties)) { - String location = (hmsTable.getSd() != null) ? hmsTable.getSd().getLocation() : null; - if (location == null && Catalogs.hadoopCatalog(conf, catalogProperties)) { - location = IcebergTableUtil.defaultWarehouseLocation( - TableIdentifier.of(hmsTable.getDbName(), hmsTable.getTableName()), - conf, catalogProperties); - hmsTable.getSd().setLocation(location); - } - Preconditions.checkArgument(location != null, "Table location not set"); - } - - Map hmsParams = hmsTable.getParameters(); - COMMON_HMS_PROPERTIES.forEach(hmsParams::putIfAbsent); - - // Remove null values from hms table properties - hmsParams.entrySet().removeIf(e -> e.getKey() == null || e.getValue() == null); - - // Remove creation related properties - PARAMETERS_TO_REMOVE.forEach(hmsParams::remove); - - setWriteModeDefaults(null, hmsParams, null); - } - - /** - * Calculates the properties we would like to send to the catalog. - *

    - *
  • The base of the properties is the properties stored at the Hive Metastore for the given table - *
  • We add the {@link Catalogs#LOCATION} as the table location - *
  • We add the {@link Catalogs#NAME} as TableIdentifier defined by the database name and table name - *
  • We add the serdeProperties of the HMS table - *
  • We remove some parameters that we don't want to push down to the Iceberg table props - *
- * @param hmsTable Table for which we are calculating the properties - * @return The properties we can provide for Iceberg functions, like {@link Catalogs} - */ - private static Properties getCatalogProperties(org.apache.hadoop.hive.metastore.api.Table hmsTable) { - Properties properties = new Properties(); - - hmsTable.getParameters().entrySet().stream().filter(e -> e.getKey() != null && e.getValue() != null).forEach(e -> { - // translate key names between HMS and Iceberg where needed - String icebergKey = HMSTablePropertyHelper.translateToIcebergProp(e.getKey()); - properties.put(icebergKey, e.getValue()); - }); - - if (properties.get(Catalogs.LOCATION) == null && - hmsTable.getSd() != null && hmsTable.getSd().getLocation() != null) { - properties.put(Catalogs.LOCATION, hmsTable.getSd().getLocation()); - } - - if (properties.get(Catalogs.NAME) == null) { - properties.put(Catalogs.NAME, TableIdentifier.of(hmsTable.getDbName(), hmsTable.getTableName()).toString()); - } - - SerDeInfo serdeInfo = hmsTable.getSd().getSerdeInfo(); - if (serdeInfo != null) { - serdeInfo.getParameters().entrySet().stream() - .filter(e -> e.getKey() != null && e.getValue() != null).forEach(e -> { - String icebergKey = HMSTablePropertyHelper.translateToIcebergProp(e.getKey()); - properties.put(icebergKey, e.getValue()); - }); - } - - // Remove HMS table parameters we don't want to propagate to Iceberg - PROPERTIES_TO_REMOVE.forEach(properties::remove); - - return properties; - } - - private Schema schema(Properties properties, org.apache.hadoop.hive.metastore.api.Table hmsTable, - Set identifierFields) { - boolean autoConversion = conf.getBoolean(InputFormatConfig.SCHEMA_AUTO_CONVERSION, false); - - if (properties.getProperty(InputFormatConfig.TABLE_SCHEMA) != null) { - return SchemaParser.fromJson(properties.getProperty(InputFormatConfig.TABLE_SCHEMA)); - } - List cols = Lists.newArrayList(hmsTable.getSd().getCols()); - if (hmsTable.isSetPartitionKeys() && !hmsTable.getPartitionKeys().isEmpty()) { - cols.addAll(hmsTable.getPartitionKeys()); - } - Schema schema = HiveSchemaUtil.convert(cols, autoConversion); - - return getSchemaWithIdentifierFields(schema, identifierFields); - } - - private Schema getSchemaWithIdentifierFields(Schema schema, Set identifierFields) { - if (identifierFields == null || identifierFields.isEmpty()) { - return schema; - } - Set identifierFieldIds = identifierFields.stream() - .map(column -> { - Types.NestedField field = schema.findField(column); - Preconditions.checkNotNull(field, - "Cannot find identifier field ID for the column %s in schema %s", column, schema); - return field.fieldId(); - }) - .collect(Collectors.toSet()); - - List cols = schema.columns().stream() - .map(column -> identifierFieldIds.contains(column.fieldId()) ? column.asRequired() : column) - .collect(Collectors.toList()); - - return new Schema(cols, identifierFieldIds); - } - - private static PartitionSpec spec(Configuration configuration, Schema schema, - org.apache.hadoop.hive.metastore.api.Table hmsTable) { - - Preconditions.checkArgument(!hmsTable.isSetPartitionKeys() || hmsTable.getPartitionKeys().isEmpty(), - "We can only handle non-partitioned Hive tables. The Iceberg schema should be in " + - InputFormatConfig.PARTITION_SPEC + " or already converted to a partition transform "); - - PartitionSpec spec = IcebergTableUtil.spec(configuration, schema); - if (spec != null) { - Preconditions.checkArgument(hmsTable.getParameters().get(InputFormatConfig.PARTITION_SPEC) == null, - "Provide only one of the following: Hive partition transform specification, or the " + - InputFormatConfig.PARTITION_SPEC + " property"); - return spec; - } - - if (hmsTable.getParameters().get(InputFormatConfig.PARTITION_SPEC) != null) { - return PartitionSpecParser.fromJson(schema, hmsTable.getParameters().get(InputFormatConfig.PARTITION_SPEC)); - } else { - return PartitionSpec.unpartitioned(); - } - } - private void handleAddColumns(org.apache.hadoop.hive.metastore.api.Table hmsTable) { Collection addedCols = HiveSchemaUtil.getSchemaDiff(hmsTable.getSd().getCols(), HiveSchemaUtil.convert(icebergTable.schema()), false) @@ -1090,84 +802,6 @@ private Type.PrimitiveType getPrimitiveTypeOrThrow(FieldSchema field) throws Met return (Type.PrimitiveType) newType; } - private void setOrcOnlyFilesParam(org.apache.hadoop.hive.metastore.api.Table hmsTable) { - if (isOrcOnlyFiles(hmsTable)) { - hmsTable.getParameters().put(ORC_FILES_ONLY, "true"); - } else { - hmsTable.getParameters().put(ORC_FILES_ONLY, "false"); - } - } - - private boolean isOrcOnlyFiles(org.apache.hadoop.hive.metastore.api.Table hmsTable) { - return !"FALSE".equalsIgnoreCase(hmsTable.getParameters().get(ORC_FILES_ONLY)) && - (hmsTable.getSd().getInputFormat() != null && - hmsTable.getSd().getInputFormat().toUpperCase().contains(org.apache.iceberg.FileFormat.ORC.name()) || - org.apache.iceberg.FileFormat.ORC.name() - .equalsIgnoreCase(hmsTable.getSd().getSerdeInfo().getParameters().get("write.format.default")) || - org.apache.iceberg.FileFormat.ORC.name() - .equalsIgnoreCase(hmsTable.getParameters().get("write.format.default"))); - } - - private void setWriteModeDefaults(Table icebergTbl, Map newProps, EnvironmentContext context) { - if ((icebergTbl == null || ((BaseTable) icebergTbl).operations().current().formatVersion() == 1) && - IcebergTableUtil.isV2TableOrAbove(newProps)) { - List writeModeList = ImmutableList.of( - TableProperties.DELETE_MODE, TableProperties.UPDATE_MODE, TableProperties.MERGE_MODE); - writeModeList.stream() - .filter(writeMode -> catalogProperties.get(writeMode) == null) - .forEach(writeMode -> { - catalogProperties.put(writeMode, MERGE_ON_READ.modeName()); - newProps.put(writeMode, MERGE_ON_READ.modeName()); - }); - - if (context != null) { - Splitter splitter = Splitter.on(PROPERTIES_SEPARATOR); - Map contextProperties = context.getProperties(); - if (contextProperties.containsKey(SET_PROPERTIES)) { - String propValue = context.getProperties().get(SET_PROPERTIES); - String writeModeStr = writeModeList.stream() - .filter(writeMode -> !splitter.splitToList(propValue).contains(writeMode)) - .collect(Collectors.joining("'")); - if (!writeModeStr.isEmpty()) { - contextProperties.put(SET_PROPERTIES, propValue + "'" + writeModeStr); - } - } - } - } - } - - @Override - public void postGetTable(org.apache.hadoop.hive.metastore.api.Table hmsTable) { - if (hmsTable != null) { - try { - Table tbl = IcebergTableUtil.getTable(conf, hmsTable); - String formatVersion = String.valueOf(((BaseTable) tbl).operations().current().formatVersion()); - hmsTable.getParameters().put(TableProperties.FORMAT_VERSION, formatVersion); - // Set the serde info - hmsTable.getSd().setInputFormat(HiveIcebergInputFormat.class.getName()); - hmsTable.getSd().setOutputFormat(HiveIcebergOutputFormat.class.getName()); - hmsTable.getSd().getSerdeInfo().setSerializationLib(HiveIcebergSerDe.class.getName()); - String storageHandler = hmsTable.getParameters().get(hive_metastoreConstants.META_TABLE_STORAGE); - // Check if META_TABLE_STORAGE is not present or is not an instance of ICEBERG_STORAGE_HANDLER - if (storageHandler == null || !isHiveIcebergStorageHandler(storageHandler)) { - hmsTable.getParameters() - .put(hive_metastoreConstants.META_TABLE_STORAGE, HMSTablePropertyHelper.HIVE_ICEBERG_STORAGE_HANDLER); - } - } catch (NoSuchTableException | NotFoundException ex) { - // If the table doesn't exist, ignore throwing exception from here - } - } - } - - private static boolean isHiveIcebergStorageHandler(String storageHandler) { - try { - Class storageHandlerClass = Class.forName(storageHandler); - return Class.forName(HIVE_ICEBERG_STORAGE_HANDLER).isAssignableFrom(storageHandlerClass); - } catch (ClassNotFoundException e) { - throw new RuntimeException("Error checking storage handler class", e); - } - } - @Override public void preDropPartitions(org.apache.hadoop.hive.metastore.api.Table hmsTable, EnvironmentContext context, diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java index 88aa9c449911..67b50c3f3489 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java @@ -131,6 +131,7 @@ import org.apache.hadoop.mapred.OutputFormat; import org.apache.iceberg.BaseMetastoreTableOperations; import org.apache.iceberg.BaseTable; +import org.apache.iceberg.CatalogUtil; import org.apache.iceberg.DataFile; import org.apache.iceberg.DataOperations; import org.apache.iceberg.ExpireSnapshots; @@ -172,8 +173,10 @@ import org.apache.iceberg.expressions.ResidualEvaluator; import org.apache.iceberg.expressions.StrictMetricsEvaluator; import org.apache.iceberg.hadoop.ConfigProperties; +import org.apache.iceberg.hive.CatalogUtils; import org.apache.iceberg.hive.HiveSchemaUtil; import org.apache.iceberg.hive.HiveTableOperations; +import org.apache.iceberg.hive.MetastoreUtil; import org.apache.iceberg.io.CloseableIterable; import org.apache.iceberg.mr.Catalogs; import org.apache.iceberg.mr.InputFormatConfig; @@ -257,7 +260,13 @@ public Class getSerDeClass() { public HiveMetaHook getMetaHook() { // Make sure to always return a new instance here, as HiveIcebergMetaHook might hold state relevant for the // operation. - return new HiveIcebergMetaHook(conf); + String catalogType = CatalogUtils.getCatalogType(conf); + if (StringUtils.isEmpty(catalogType) || CatalogUtil.ICEBERG_CATALOG_TYPE_HIVE.equals(catalogType)) { + return new HiveIcebergMetaHook(conf); + } else { + conf.set(ConfigProperties.LOCK_HIVE_ENABLED, "false"); + return new BaseHiveIcebergMetaHook(conf); + } } @Override @@ -2114,7 +2123,7 @@ public List getPartitionKeys(org.apache.hadoop.hive.ql.metadata.Tab return Collections.emptyList(); } Table icebergTable = IcebergTableUtil.getTable(conf, hmsTable.getTTable()); - return IcebergTableUtil.getPartitionKeys(icebergTable, icebergTable.spec().specId()); + return MetastoreUtil.getPartitionKeys(icebergTable, icebergTable.spec().specId()); } @Override diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergTableUtil.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergTableUtil.java index 098928bb2043..11fc7a624238 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergTableUtil.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergTableUtil.java @@ -44,7 +44,6 @@ import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.utils.TableFetcher; @@ -87,7 +86,7 @@ import org.apache.iceberg.expressions.Expression; import org.apache.iceberg.expressions.Expressions; import org.apache.iceberg.expressions.ResidualEvaluator; -import org.apache.iceberg.hive.HiveSchemaUtil; +import org.apache.iceberg.hive.CatalogUtils; import org.apache.iceberg.io.CloseableIterable; import org.apache.iceberg.mr.Catalogs; import org.apache.iceberg.mr.InputFormatConfig; @@ -106,7 +105,6 @@ import static org.apache.iceberg.RowLevelOperationMode.COPY_ON_WRITE; import static org.apache.iceberg.RowLevelOperationMode.MERGE_ON_READ; import static org.apache.iceberg.mr.InputFormatConfig.CATALOG_NAME; -import static org.apache.iceberg.mr.InputFormatConfig.CATALOG_WAREHOUSE_TEMPLATE; public class IcebergTableUtil { private static final Logger LOG = LoggerFactory.getLogger(IcebergTableUtil.class); @@ -501,20 +499,6 @@ public static Expression generateExpressionFromPartitionSpec(Table table, Map getPartitionKeys(Table table, int specId) { - Schema schema = table.specs().get(specId).schema(); - List hiveSchema = HiveSchemaUtil.convert(schema); - Map colNameToColType = hiveSchema.stream() - .collect(Collectors.toMap(FieldSchema::getName, FieldSchema::getType)); - return table.specs().get(specId).fields().stream() - .map(partField -> new FieldSchema( - schema.findColumnName(partField.sourceId()), - colNameToColType.get(schema.findColumnName(partField.sourceId())), - String.format("Transform: %s", partField.transform().toString())) - ) - .collect(Collectors.toList()); - } - public static List getPartitionFields(Table table, boolean latestSpecOnly) { return latestSpecOnly ? table.spec().fields() : table.specs().values().stream() @@ -646,7 +630,7 @@ public static String defaultWarehouseLocation(TableIdentifier tableIdentifier, Configuration conf, Properties catalogProperties) { StringBuilder sb = new StringBuilder(); String warehouseLocation = conf.get(String.format( - CATALOG_WAREHOUSE_TEMPLATE, catalogProperties.getProperty(CATALOG_NAME)) + CatalogUtils.CATALOG_WAREHOUSE_TEMPLATE, catalogProperties.getProperty(CATALOG_NAME)) ); sb.append(warehouseLocation).append('/'); for (String level : tableIdentifier.namespace().levels()) { diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/TestCatalogs.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/TestCatalogs.java index 42a6b0c77c6d..060ffa8fba82 100644 --- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/TestCatalogs.java +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/TestCatalogs.java @@ -35,6 +35,7 @@ import org.apache.iceberg.exceptions.NoSuchTableException; import org.apache.iceberg.hadoop.HadoopCatalog; import org.apache.iceberg.hadoop.HadoopTables; +import org.apache.iceberg.hive.CatalogUtils; import org.apache.iceberg.hive.HiveCatalog; import org.apache.iceberg.types.Types; import org.assertj.core.api.Assertions; @@ -209,7 +210,7 @@ public void testLoadCatalogDefault() { @Test public void testLoadCatalogHive() { String catalogName = "barCatalog"; - conf.set(InputFormatConfig.catalogPropertyConfigKey(catalogName, CatalogUtil.ICEBERG_CATALOG_TYPE), + conf.set(CatalogUtils.catalogPropertyConfigKey(catalogName, CatalogUtil.ICEBERG_CATALOG_TYPE), CatalogUtil.ICEBERG_CATALOG_TYPE_HIVE); Optional hiveCatalog = Catalogs.loadCatalog(conf, catalogName); Assert.assertTrue(hiveCatalog.isPresent()); @@ -222,9 +223,9 @@ public void testLoadCatalogHive() { @Test public void testLoadCatalogHadoop() { String catalogName = "barCatalog"; - conf.set(InputFormatConfig.catalogPropertyConfigKey(catalogName, CatalogUtil.ICEBERG_CATALOG_TYPE), + conf.set(CatalogUtils.catalogPropertyConfigKey(catalogName, CatalogUtil.ICEBERG_CATALOG_TYPE), CatalogUtil.ICEBERG_CATALOG_TYPE_HADOOP); - conf.set(InputFormatConfig.catalogPropertyConfigKey(catalogName, CatalogProperties.WAREHOUSE_LOCATION), + conf.set(CatalogUtils.catalogPropertyConfigKey(catalogName, CatalogProperties.WAREHOUSE_LOCATION), "/tmp/mylocation"); Optional hadoopCatalog = Catalogs.loadCatalog(conf, catalogName); Assert.assertTrue(hadoopCatalog.isPresent()); @@ -238,9 +239,9 @@ public void testLoadCatalogHadoop() { @Test public void testLoadCatalogCustom() { String catalogName = "barCatalog"; - conf.set(InputFormatConfig.catalogPropertyConfigKey(catalogName, CatalogProperties.CATALOG_IMPL), + conf.set(CatalogUtils.catalogPropertyConfigKey(catalogName, CatalogProperties.CATALOG_IMPL), CustomHadoopCatalog.class.getName()); - conf.set(InputFormatConfig.catalogPropertyConfigKey(catalogName, CatalogProperties.WAREHOUSE_LOCATION), + conf.set(CatalogUtils.catalogPropertyConfigKey(catalogName, CatalogProperties.WAREHOUSE_LOCATION), "/tmp/mylocation"); Optional customHadoopCatalog = Catalogs.loadCatalog(conf, catalogName); Assert.assertTrue(customHadoopCatalog.isPresent()); @@ -258,7 +259,7 @@ public void testLoadCatalogLocation() { @Test public void testLoadCatalogUnknown() { String catalogName = "barCatalog"; - conf.set(InputFormatConfig.catalogPropertyConfigKey(catalogName, CatalogUtil.ICEBERG_CATALOG_TYPE), "fooType"); + conf.set(CatalogUtils.catalogPropertyConfigKey(catalogName, CatalogUtil.ICEBERG_CATALOG_TYPE), "fooType"); Assertions.assertThatThrownBy(() -> Catalogs.loadCatalog(conf, catalogName)) .isInstanceOf(UnsupportedOperationException.class) @@ -269,7 +270,7 @@ public void testLoadCatalogUnknown() { public void testDefaultCatalogProperties() { String catalogProperty = "io.manifest.cache-enabled"; // Set global property - final String defaultCatalogProperty = InputFormatConfig.CATALOG_DEFAULT_CONFIG_PREFIX + catalogProperty; + final String defaultCatalogProperty = CatalogUtils.CATALOG_DEFAULT_CONFIG_PREFIX + catalogProperty; conf.setBoolean(defaultCatalogProperty, true); HiveCatalog defaultCatalog = (HiveCatalog) Catalogs.loadCatalog(conf, null).get(); Assert.assertEquals("true", defaultCatalog.properties().get(catalogProperty)); @@ -301,9 +302,9 @@ public CustomHadoopCatalog(Configuration conf, String warehouseLocation) { } private void setCustomCatalogProperties(String catalogName, String warehouseLocation) { - conf.set(InputFormatConfig.catalogPropertyConfigKey(catalogName, CatalogProperties.WAREHOUSE_LOCATION), + conf.set(CatalogUtils.catalogPropertyConfigKey(catalogName, CatalogProperties.WAREHOUSE_LOCATION), warehouseLocation); - conf.set(InputFormatConfig.catalogPropertyConfigKey(catalogName, CatalogProperties.CATALOG_IMPL), + conf.set(CatalogUtils.catalogPropertyConfigKey(catalogName, CatalogProperties.CATALOG_IMPL), CustomHadoopCatalog.class.getName()); conf.set(InputFormatConfig.CATALOG_NAME, catalogName); } diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/TestIcebergInputFormats.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/TestIcebergInputFormats.java index fddc0cca9b18..30f7bb087636 100644 --- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/TestIcebergInputFormats.java +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/TestIcebergInputFormats.java @@ -57,6 +57,7 @@ import org.apache.iceberg.expressions.Expressions; import org.apache.iceberg.hadoop.HadoopCatalog; import org.apache.iceberg.hadoop.HadoopTables; +import org.apache.iceberg.hive.CatalogUtils; import org.apache.iceberg.mr.hive.HiveIcebergInputFormat; import org.apache.iceberg.mr.mapred.Container; import org.apache.iceberg.mr.mapred.MapredIcebergInputFormat; @@ -372,9 +373,9 @@ public void testCustomCatalog() throws IOException { String warehouseLocation = temp.newFolder("hadoop_catalog").getAbsolutePath(); conf.set("warehouse.location", warehouseLocation); conf.set(InputFormatConfig.CATALOG_NAME, Catalogs.ICEBERG_DEFAULT_CATALOG_NAME); - conf.set(InputFormatConfig.catalogPropertyConfigKey(Catalogs.ICEBERG_DEFAULT_CATALOG_NAME, + conf.set(CatalogUtils.catalogPropertyConfigKey(Catalogs.ICEBERG_DEFAULT_CATALOG_NAME, CatalogUtil.ICEBERG_CATALOG_TYPE), CatalogUtil.ICEBERG_CATALOG_TYPE_HADOOP); - conf.set(InputFormatConfig.catalogPropertyConfigKey(Catalogs.ICEBERG_DEFAULT_CATALOG_NAME, + conf.set(CatalogUtils.catalogPropertyConfigKey(Catalogs.ICEBERG_DEFAULT_CATALOG_NAME, CatalogProperties.WAREHOUSE_LOCATION), warehouseLocation); Catalog catalog = new HadoopCatalog(conf, conf.get("warehouse.location")); diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerNoScan.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerNoScan.java index 732e6f9f94e0..09229d3b91ba 100644 --- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerNoScan.java +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerNoScan.java @@ -699,7 +699,8 @@ public void testCreateTableError() { "'='" + testTables.catalogName() + "')")); - if (testTableType != TestTables.TestTableType.HADOOP_CATALOG) { + if (testTableType != TestTables.TestTableType.HADOOP_CATALOG && + testTableType != TestTables.TestTableType.CUSTOM_CATALOG) { assertThatThrownBy .isInstanceOf(IllegalArgumentException.class) .hasMessageStartingWith("Failed to execute Hive query") diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestTables.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestTables.java index c66a3240978a..8e885f36ff72 100644 --- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestTables.java +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestTables.java @@ -49,6 +49,7 @@ import org.apache.iceberg.expressions.Expressions; import org.apache.iceberg.hadoop.HadoopCatalog; import org.apache.iceberg.hadoop.HadoopTables; +import org.apache.iceberg.hive.CatalogUtils; import org.apache.iceberg.hive.HiveCatalog; import org.apache.iceberg.hive.HiveVersion; import org.apache.iceberg.mr.Catalogs; @@ -542,10 +543,10 @@ static class CustomCatalogTestTables extends TestTables { @Override public Map properties() { return ImmutableMap.of( - InputFormatConfig.catalogPropertyConfigKey(catalog, CatalogProperties.CATALOG_IMPL), - TestCatalogs.CustomHadoopCatalog.class.getName(), - InputFormatConfig.catalogPropertyConfigKey(catalog, CatalogProperties.WAREHOUSE_LOCATION), - warehouseLocation + CatalogUtils.catalogPropertyConfigKey(catalog, CatalogProperties.CATALOG_IMPL), + TestCatalogs.CustomHadoopCatalog.class.getName(), + CatalogUtils.catalogPropertyConfigKey(catalog, CatalogProperties.WAREHOUSE_LOCATION), + warehouseLocation ); } @@ -573,10 +574,10 @@ static class HadoopCatalogTestTables extends TestTables { @Override public Map properties() { return ImmutableMap.of( - InputFormatConfig.catalogPropertyConfigKey(catalog, CatalogUtil.ICEBERG_CATALOG_TYPE), - CatalogUtil.ICEBERG_CATALOG_TYPE_HADOOP, - InputFormatConfig.catalogPropertyConfigKey(catalog, CatalogProperties.WAREHOUSE_LOCATION), - warehouseLocation + CatalogUtils.catalogPropertyConfigKey(catalog, CatalogUtil.ICEBERG_CATALOG_TYPE), + CatalogUtil.ICEBERG_CATALOG_TYPE_HADOOP, + CatalogUtils.catalogPropertyConfigKey(catalog, CatalogProperties.WAREHOUSE_LOCATION), + warehouseLocation ); } @@ -627,7 +628,7 @@ static class HiveTestTables extends TestTables { @Override public Map properties() { - return ImmutableMap.of(InputFormatConfig.catalogPropertyConfigKey(catalog, CatalogUtil.ICEBERG_CATALOG_TYPE), + return ImmutableMap.of(CatalogUtils.catalogPropertyConfigKey(catalog, CatalogUtil.ICEBERG_CATALOG_TYPE), CatalogUtil.ICEBERG_CATALOG_TYPE_HIVE); } diff --git a/iceberg/iceberg-handler/src/test/queries/positive/iceberg_rest_catalog.q b/iceberg/iceberg-handler/src/test/queries/positive/iceberg_rest_catalog.q new file mode 100644 index 000000000000..b87d9e0af671 --- /dev/null +++ b/iceberg/iceberg-handler/src/test/queries/positive/iceberg_rest_catalog.q @@ -0,0 +1,64 @@ +-- SORT_QUERY_RESULTS +-- Mask neededVirtualColumns due to non-strict order +--! qt:replace:/(\s+neededVirtualColumns:\s)(.*)/$1#Masked#/ +-- Mask random uuid +--! qt:replace:/(\s+'uuid'=')\S+('\s*)/$1#Masked#$2/ +-- Mask a random snapshot id +--! qt:replace:/(\s+current-snapshot-id\s+)\S+(\s*)/$1#Masked#/ +-- Mask added file size +--! qt:replace:/(\S\"added-files-size\\\":\\\")(\d+)(\\\")/$1#Masked#$3/ +-- Mask total file size +--! qt:replace:/(\S\"total-files-size\\\":\\\")(\d+)(\\\")/$1#Masked#$3/ +-- Mask current-snapshot-timestamp-ms +--! qt:replace:/(\s+current-snapshot-timestamp-ms\s+)\S+(\s*)/$1#Masked#$2/ +--! qt:replace:/(MAJOR\s+succeeded\s+)[a-zA-Z0-9\-\.\s+]+(\s+manual)/$1#Masked#$2/ +--! qt:replace:/(MAJOR\s+refused\s+)[a-zA-Z0-9\-\.\s+]+(\s+manual)/$1#Masked#$2/ +-- Mask compaction id as they will be allocated in parallel threads +--! qt:replace:/^[0-9]/#Masked#/ +-- Mask removed file size +--! qt:replace:/(\S\"removed-files-size\\\":\\\")(\d+)(\\\")/$1#Masked#$3/ +-- Mask iceberg version +--! qt:replace:/(\S\"iceberg-version\\\":\\\")(\w+\s\w+\s\d+\.\d+\.\d+\s\(\w+\s\w+\))(\\\")/$1#Masked#$3/ + +set metastore.client.impl=org.apache.iceberg.hive.client.HiveRESTCatalogClient; +set metastore.catalog.default=ice01; +set iceberg.catalog.ice01.type=rest; + +--! This config is set in the driver setup (see TestIcebergRESTCatalogLlapLocalCliDriver.java) +--! conf.set('iceberg.catalog.ice01.uri', ); + +create database ice_rest; +use ice_rest; + +--! Creating table without catalog name in table properties +create table ice_orc1 ( + first_name string, + last_name string, + dept_id bigint, + team_id bigint + ) +partitioned by (company_id bigint) +stored by iceberg stored as orc; + +--! Creating table with a valid catalog name in table properties +create table ice_orc2 ( + first_name string, + last_name string, + dept_id bigint, + team_id bigint + ) +partitioned by (company_id bigint) +stored by iceberg stored as orc +TBLPROPERTIES('format-version'='2', 'iceberg.catalog'='ice01'); + +--! Output should contain: 'type' = 'rest' +show create table ice_orc2; + +show tables; +drop table ice_orc1; +drop table ice_orc2; +show tables; + +show databases; +drop database ice_rest; +show databases; \ No newline at end of file diff --git a/iceberg/iceberg-handler/src/test/results/positive/llap/iceberg_rest_catalog.q.out b/iceberg/iceberg-handler/src/test/results/positive/llap/iceberg_rest_catalog.q.out new file mode 100644 index 000000000000..2622a47ae1cf --- /dev/null +++ b/iceberg/iceberg-handler/src/test/results/positive/llap/iceberg_rest_catalog.q.out @@ -0,0 +1,154 @@ +PREHOOK: query: create database ice_rest +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:ice_rest +POSTHOOK: query: create database ice_rest +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:ice_rest +PREHOOK: query: use ice_rest +PREHOOK: type: SWITCHDATABASE +PREHOOK: Input: database:ice_rest +POSTHOOK: query: use ice_rest +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Input: database:ice_rest +PREHOOK: query: create table ice_orc1 ( + first_name string, + last_name string, + dept_id bigint, + team_id bigint + ) +partitioned by (company_id bigint) +stored by iceberg stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:ice_rest +PREHOOK: Output: ice_rest@ice_orc1 +POSTHOOK: query: create table ice_orc1 ( + first_name string, + last_name string, + dept_id bigint, + team_id bigint + ) +partitioned by (company_id bigint) +stored by iceberg stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:ice_rest +POSTHOOK: Output: ice_rest@ice_orc1 +PREHOOK: query: create table ice_orc2 ( + first_name string, + last_name string, + dept_id bigint, + team_id bigint + ) +partitioned by (company_id bigint) +stored by iceberg stored as orc +TBLPROPERTIES('format-version'='2', 'iceberg.catalog'='ice01') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:ice_rest +PREHOOK: Output: ice_rest@ice_orc2 +POSTHOOK: query: create table ice_orc2 ( + first_name string, + last_name string, + dept_id bigint, + team_id bigint + ) +partitioned by (company_id bigint) +stored by iceberg stored as orc +TBLPROPERTIES('format-version'='2', 'iceberg.catalog'='ice01') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:ice_rest +POSTHOOK: Output: ice_rest@ice_orc2 +PREHOOK: query: show create table ice_orc2 +PREHOOK: type: SHOW_CREATETABLE +PREHOOK: Input: ice_rest@ice_orc2 +POSTHOOK: query: show create table ice_orc2 +POSTHOOK: type: SHOW_CREATETABLE +POSTHOOK: Input: ice_rest@ice_orc2 +CREATE EXTERNAL TABLE `ice_orc2`( + `first_name` string, + `last_name` string, + `dept_id` bigint, + `team_id` bigint, + `company_id` bigint) +PARTITIONED BY ( + `company_id` bigint COMMENT 'Transform: identity') +PARTITIONED BY SPEC ( +`company_id`) +ROW FORMAT SERDE + 'org.apache.iceberg.mr.hive.HiveIcebergSerDe' +STORED BY + 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' +WITH SERDEPROPERTIES ( + 'serialization.format'='1') +LOCATION +#### A masked pattern was here #### +TBLPROPERTIES ( + 'bucketing_version'='2', + 'current-schema'='{"type":"struct","schema-id":0,"fields":[{"id":1,"name":"first_name","required":false,"type":"string"},{"id":2,"name":"last_name","required":false,"type":"string"},{"id":3,"name":"dept_id","required":false,"type":"long"},{"id":4,"name":"team_id","required":false,"type":"long"},{"id":5,"name":"company_id","required":false,"type":"long"}]}', + 'default-partition-spec'='{"spec-id":0,"fields":[{"name":"company_id","transform":"identity","source-id":5,"field-id":1000}]}', + 'format-version'='2', + 'iceberg.catalog'='ice01', + 'iceberg.orc.files.only'='true', +#### A masked pattern was here #### + 'name'='ice_rest.ice_orc2', + 'parquet.compression'='zstd', + 'serialization.format'='1', + 'snapshot-count'='0', + 'table_type'='ICEBERG', + 'type'='rest', + 'uuid'='#Masked#', + 'write.delete.mode'='merge-on-read', + 'write.format.default'='orc', + 'write.merge.mode'='merge-on-read', + 'write.update.mode'='merge-on-read') +PREHOOK: query: show tables +PREHOOK: type: SHOWTABLES +PREHOOK: Input: database:ice_rest +POSTHOOK: query: show tables +POSTHOOK: type: SHOWTABLES +POSTHOOK: Input: database:ice_rest +ice_orc1 +ice_orc2 +PREHOOK: query: drop table ice_orc1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: ice_rest@ice_orc1 +PREHOOK: Output: database:ice_rest +PREHOOK: Output: ice_rest@ice_orc1 +POSTHOOK: query: drop table ice_orc1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: ice_rest@ice_orc1 +POSTHOOK: Output: database:ice_rest +POSTHOOK: Output: ice_rest@ice_orc1 +PREHOOK: query: drop table ice_orc2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: ice_rest@ice_orc2 +PREHOOK: Output: database:ice_rest +PREHOOK: Output: ice_rest@ice_orc2 +POSTHOOK: query: drop table ice_orc2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: ice_rest@ice_orc2 +POSTHOOK: Output: database:ice_rest +POSTHOOK: Output: ice_rest@ice_orc2 +PREHOOK: query: show tables +PREHOOK: type: SHOWTABLES +PREHOOK: Input: database:ice_rest +POSTHOOK: query: show tables +POSTHOOK: type: SHOWTABLES +POSTHOOK: Input: database:ice_rest +PREHOOK: query: show databases +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: show databases +POSTHOOK: type: SHOWDATABASES +default +ice_rest +PREHOOK: query: drop database ice_rest +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:ice_rest +PREHOOK: Output: database:ice_rest +POSTHOOK: query: drop database ice_rest +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:ice_rest +POSTHOOK: Output: database:ice_rest +PREHOOK: query: show databases +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: show databases +POSTHOOK: type: SHOWDATABASES +default diff --git a/itests/hive-iceberg/pom.xml b/itests/hive-iceberg/pom.xml new file mode 100644 index 000000000000..170a6c89abd8 --- /dev/null +++ b/itests/hive-iceberg/pom.xml @@ -0,0 +1,155 @@ + + + + 4.0.0 + + org.apache.hive + hive-it + 4.2.0-SNAPSHOT + ../pom.xml + + hive-it-iceberg + jar + Hive Iceberg Integration - Unit Tests + + ../.. + UTF-8 + false + 1.9.1 + + + + org.apache.hive + hive-standalone-metastore-server + ${project.version} + + + org.apache.hive + hive-standalone-metastore-rest-catalog + ${project.version} + + + org.apache.hive + hive-standalone-metastore-rest-catalog + tests + ${project.version} + + + org.apache.hive + hive-standalone-metastore-common + ${project.version} + + + org.apache.hive + hive-iceberg-catalog + ${project.version} + + + org.apache.hive + hive-iceberg-handler + ${project.version} + test + + + org.apache.hive + hive-standalone-metastore-common + ${project.version} + tests + test + + + org.apache.hive + hive-standalone-metastore-server + ${project.version} + tests + test + + + org.apache.iceberg + iceberg-api + ${iceberg.version} + tests + test + + + org.apache.iceberg + iceberg-core + ${iceberg.version} + tests + test + + + org.apache.iceberg + iceberg-open-api + ${iceberg.version} + test-fixtures + test + + + org.junit.jupiter + junit-jupiter + ${junit.jupiter.version} + test + + + org.apache.hive + hive-exec + tests + test + ${project.version} + + + org.apache.hive + hive-it-util + test + + + + + + org.apache.maven.plugins + maven-antrun-plugin + + + setup-metastore-scripts + process-test-resources + + run + + + + + + + + + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + + diff --git a/itests/hive-iceberg/src/test/java/org/apache/hive/TestHiveRESTCatalogClientIT.java b/itests/hive-iceberg/src/test/java/org/apache/hive/TestHiveRESTCatalogClientIT.java new file mode 100644 index 000000000000..b6386e0ec42f --- /dev/null +++ b/itests/hive-iceberg/src/test/java/org/apache/hive/TestHiveRESTCatalogClientIT.java @@ -0,0 +1,227 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hive; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.HiveMetaHook; +import org.apache.hadoop.hive.metastore.HiveMetaHookLoader; +import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.ServletSecurity; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; +import org.apache.hadoop.hive.ql.metadata.HiveUtils; +import org.apache.hadoop.hive.ql.stats.StatsUtils; +import org.apache.hadoop.mapred.TextInputFormat; +import org.apache.hadoop.util.StringUtils; +import org.apache.iceberg.CatalogUtil; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.PartitionSpecParser; +import org.apache.iceberg.Schema; +import org.apache.iceberg.TableProperties; +import org.apache.iceberg.hive.CatalogUtils; +import org.apache.iceberg.hive.HiveSchemaUtil; +import org.apache.iceberg.rest.extension.HiveRESTCatalogServerExtension; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.extension.RegisterExtension; +import java.util.Map; + +import java.util.Arrays; +import java.util.List; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +/* + * This test is an integration test for the hive-iceberg REST Catalog client and HMS REST Catalog Server. + * It uses the HiveMetaStoreClient backed by hive-iceberg REST catalog adapter to connect to the HMS RESTCatalog Server. + * The flow is as follows: + * Hive ql wrapper --> HiveMetaStoreClient --> HiveRESTCatalogClient --> HMS RESTCatalog Server --> HMS + */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public class TestHiveRESTCatalogClientIT { + + private static final String DB_NAME = "ice_db"; + private static final String TABLE_NAME = "ice_tbl"; + private static final String CATALOG_NAME = "ice01"; + private static final String HIVE_ICEBERG_STORAGE_HANDLER = "org.apache.iceberg.mr.hive.HiveIcebergStorageHandler"; + + private Configuration conf; + private HiveConf hiveConf; + private Hive hive; + + private IMetaStoreClient msClient; + + @RegisterExtension + private static final HiveRESTCatalogServerExtension REST_CATALOG_EXTENSION = + HiveRESTCatalogServerExtension.builder(ServletSecurity.AuthType.NONE) + .addMetaStoreSchemaClassName(ITestsSchemaInfo.class) + .build(); + + @BeforeAll + public void setup() throws Exception { + // Starting msClient with Iceberg REST Catalog client underneath + String restCatalogPrefix = String.format("%s%s.", CatalogUtils.CATALOG_CONFIG_PREFIX, CATALOG_NAME); + + conf = REST_CATALOG_EXTENSION.getConf(); + + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.METASTORE_CLIENT_IMPL, + "org.apache.iceberg.hive.client.HiveRESTCatalogClient"); + conf.set(MetastoreConf.ConfVars.CATALOG_DEFAULT.getVarname(), CATALOG_NAME); + conf.set(restCatalogPrefix + "uri", REST_CATALOG_EXTENSION.getRestEndpoint()); + conf.set(restCatalogPrefix + "type", CatalogUtil.ICEBERG_CATALOG_TYPE_REST); + + HiveMetaHookLoader hookLoader = tbl -> { + HiveStorageHandler storageHandler; + try { + storageHandler = HiveUtils.getStorageHandler(conf, HIVE_ICEBERG_STORAGE_HANDLER); + } catch (HiveException e) { + throw new MetaException(e.getMessage()); + } + return storageHandler == null ? null : storageHandler.getMetaHook(); + }; + + msClient = new HiveMetaStoreClient(conf, hookLoader); + hiveConf = new HiveConf(conf, HiveConf.class); + hive = Hive.get(hiveConf); + } + + @AfterAll public void tearDown() { + if (msClient != null) { + msClient.close(); + } + } + + @Test + public void testIceberg() throws Exception { + + // --- Create Database --- + Database db = new Database(); + db.setCatalogName(CATALOG_NAME); + db.setName(DB_NAME); + db.setOwnerType(PrincipalType.USER); + db.setOwnerName(System.getProperty("user.name")); + String warehouseDir = MetastoreConf.get(conf, MetastoreConf.ConfVars.WAREHOUSE_EXTERNAL.getVarname()); + db.setLocationUri(warehouseDir + "/" + DB_NAME + ".db"); + hive.createDatabase(db, true); + + // --- Get Database --- + Database retrievedDB = hive.getDatabase(CATALOG_NAME, DB_NAME); + Assertions.assertEquals(DB_NAME, retrievedDB.getName()); + Assertions.assertEquals(CATALOG_NAME, retrievedDB.getCatalogName()); + + // --- Get Databases --- + List dbs = msClient.getDatabases(CATALOG_NAME, "ice_*"); + Assertions.assertEquals(1, dbs.size()); + Assertions.assertEquals(DB_NAME, dbs.get(0)); + + // --- Get All Databases --- + List allDbs = msClient.getAllDatabases(CATALOG_NAME); + Assertions.assertEquals(2, allDbs.size()); + Assertions.assertTrue(allDbs.contains("default")); + Assertions.assertTrue(allDbs.contains(DB_NAME)); + + // --- Create Table --- + org.apache.hadoop.hive.metastore.api.Table tTable = createPartitionedTable(msClient, + CATALOG_NAME, DB_NAME, TABLE_NAME, new java.util.HashMap<>()); + Assertions.assertNotNull(tTable); + Assertions.assertEquals(HiveMetaHook.ICEBERG, tTable.getParameters().get(HiveMetaHook.TABLE_TYPE)); + + // --- Create Table --- with an invalid catalog name in table parameters (should fail) + Map tableParameters = new java.util.HashMap<>(); + tableParameters.put(CatalogUtils.CATALOG_NAME, "some_missing_catalog"); + assertThrows(IllegalArgumentException.class, () -> + createPartitionedTable(msClient, CATALOG_NAME, DB_NAME, TABLE_NAME + "_2", tableParameters)); + + // --- tableExists --- + Assertions.assertTrue(msClient.tableExists(CATALOG_NAME, DB_NAME, TABLE_NAME)); + + // --- Get Table --- + org.apache.hadoop.hive.metastore.api.Table table = msClient.getTable(CATALOG_NAME, DB_NAME, TABLE_NAME); + Assertions.assertEquals(DB_NAME, table.getDbName()); + Assertions.assertEquals(TABLE_NAME, table.getTableName()); + Assertions.assertEquals(HIVE_ICEBERG_STORAGE_HANDLER, table.getParameters().get("storage_handler")); + Assertions.assertNotNull(table.getParameters().get(TableProperties.DEFAULT_PARTITION_SPEC)); + Assertions.assertEquals(1, table.getPartitionKeys().size()); + Assertions.assertEquals("city", table.getPartitionKeys().getFirst().getName()); + + // --- Get Tables --- + List tables = msClient.getTables(CATALOG_NAME, DB_NAME, "ice_*"); + Assertions.assertEquals(1, tables.size()); + Assertions.assertEquals(TABLE_NAME, tables.getFirst()); + + // --- Get All Tables --- + List allTables = msClient.getAllTables(CATALOG_NAME, DB_NAME); + Assertions.assertEquals(1, allTables.size()); + Assertions.assertEquals(TABLE_NAME, allTables.getFirst()); + + // --- Drop Table --- + msClient.dropTable(CATALOG_NAME, DB_NAME, TABLE_NAME); + Assertions.assertFalse(msClient.tableExists(CATALOG_NAME, DB_NAME, TABLE_NAME)); + + // --- Drop Database --- + msClient.dropDatabase(DB_NAME); + Assertions.assertFalse(msClient.getAllDatabases(CATALOG_NAME).contains(DB_NAME)); + } + + private static Table createPartitionedTable(IMetaStoreClient db, String catName, String dbName, String tableName, + Map tableParameters) throws Exception { + db.dropTable(catName, dbName, tableName); + Table table = new Table(); + table.setCatName(catName); + table.setDbName(dbName); + table.setTableName(tableName); + + FieldSchema col1 = new FieldSchema("key", "string", ""); + FieldSchema col2 = new FieldSchema("value", "int", ""); + FieldSchema col3 = new FieldSchema("city", "string", ""); + List cols = Arrays.asList(col1, col2, col3); + + StorageDescriptor sd = new StorageDescriptor(); + sd.setSerdeInfo(new SerDeInfo()); + sd.setInputFormat(TextInputFormat.class.getCanonicalName()); + sd.setOutputFormat(HiveIgnoreKeyTextOutputFormat.class.getCanonicalName()); + sd.setCols(cols); + sd.getSerdeInfo().setParameters(new java.util.HashMap<>()); + table.setSd(sd); + + Schema schema = HiveSchemaUtil.convert(cols, false); + PartitionSpec spec = PartitionSpec.builderFor(schema).identity("city").build(); + String specString = PartitionSpecParser.toJson(spec); + table.setParameters(new java.util.HashMap<>()); + table.getParameters().putAll(tableParameters); + table.getParameters().put(TableProperties.DEFAULT_PARTITION_SPEC, specString); + + db.createTable(table); + return db.getTable(catName, dbName, tableName); + } +} diff --git a/itests/pom.xml b/itests/pom.xml index 938f5267c9fa..2d8b04efc5de 100644 --- a/itests/pom.xml +++ b/itests/pom.xml @@ -44,6 +44,7 @@ qtest-druid qtest-kudu qtest-iceberg + hive-iceberg diff --git a/itests/qtest-iceberg/pom.xml b/itests/qtest-iceberg/pom.xml index 57ac18684931..6ac62e8fa0b3 100644 --- a/itests/qtest-iceberg/pom.xml +++ b/itests/qtest-iceberg/pom.xml @@ -20,13 +20,23 @@ ../pom.xml 4.0.0 - hive-it-iceberg + hive-it-iceberg-qfile jar Hive Integration - QFile Iceberg Tests ../.. + + org.apache.hive + hive-standalone-metastore-server + ${project.version} + + + io.dropwizard.metrics + metrics-core + ${dropwizard.version} + @@ -452,6 +462,19 @@ ${mockito-core.version} test + + org.apache.hive + hive-standalone-metastore-rest-catalog + ${project.version} + test + + + org.apache.hive + hive-standalone-metastore-rest-catalog + tests + ${project.version} + test + diff --git a/itests/qtest-iceberg/src/test/java/org/apache/hadoop/hive/cli/HiveRESTCatalogServerExtension.java b/itests/qtest-iceberg/src/test/java/org/apache/hadoop/hive/cli/HiveRESTCatalogServerExtension.java new file mode 100644 index 000000000000..bc39ab612ea4 --- /dev/null +++ b/itests/qtest-iceberg/src/test/java/org/apache/hadoop/hive/cli/HiveRESTCatalogServerExtension.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.cli; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo; +import org.apache.hadoop.hive.metastore.ServletSecurity.AuthType; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.iceberg.rest.extension.RESTCatalogServer; +import org.junit.rules.ExternalResource; + +public class HiveRESTCatalogServerExtension extends ExternalResource { + private final Configuration conf; + private final RESTCatalogServer restCatalogServer; + + private HiveRESTCatalogServerExtension(AuthType authType, Class schemaInfoClass) { + this.conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setVar(conf, ConfVars.CATALOG_SERVLET_AUTH, authType.name()); + restCatalogServer = new RESTCatalogServer(); + if (schemaInfoClass != null) { + restCatalogServer.setSchemaInfoClass(schemaInfoClass); + } + } + + public Configuration getConf() { + return conf; + } + + @Override + protected void before() throws Throwable { + restCatalogServer.start(conf); + } + + @Override + protected void after() { + restCatalogServer.stop(); + } + + public String getRestEndpoint() { + return restCatalogServer.getRestEndpoint(); + } + + public static class Builder { + private final AuthType authType; + private Class metaStoreSchemaClass; + + private Builder(AuthType authType) { + this.authType = authType; + } + + public Builder addMetaStoreSchemaClassName(Class metaStoreSchemaClass) { + this.metaStoreSchemaClass = metaStoreSchemaClass; + return this; + } + + public HiveRESTCatalogServerExtension build() { + return new HiveRESTCatalogServerExtension(authType, metaStoreSchemaClass); + } + } + + public static Builder builder(AuthType authType) { + return new Builder(authType); + } + + public RESTCatalogServer getRestCatalogServer() { + return restCatalogServer; + } +} \ No newline at end of file diff --git a/itests/qtest-iceberg/src/test/java/org/apache/hadoop/hive/cli/TestIcebergRESTCatalogLlapLocalCliDriver.java b/itests/qtest-iceberg/src/test/java/org/apache/hadoop/hive/cli/TestIcebergRESTCatalogLlapLocalCliDriver.java new file mode 100644 index 000000000000..ef28f6257cb1 --- /dev/null +++ b/itests/qtest-iceberg/src/test/java/org/apache/hadoop/hive/cli/TestIcebergRESTCatalogLlapLocalCliDriver.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.cli; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.cli.control.CliAdapter; +import org.apache.hadoop.hive.cli.control.CliConfigs; +import org.apache.hadoop.hive.metastore.ServletSecurity; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hive.ITestsSchemaInfo; +import org.apache.iceberg.CatalogUtil; +import org.apache.iceberg.hive.CatalogUtils; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestRule; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.stream.Stream; + +@RunWith(Parameterized.class) +public class TestIcebergRESTCatalogLlapLocalCliDriver { + + private static final Logger LOG = LoggerFactory.getLogger( + org.apache.hadoop.hive.cli.TestIcebergRESTCatalogLlapLocalCliDriver.class); + private static final String CATALOG_NAME = "ice01"; + private static final CliAdapter adapter = new CliConfigs.TestIcebergRESTCatalogLlapLocalCliDriver().getCliAdapter(); + + private final String name; + private final File qfile; + + @ClassRule + public static final HiveRESTCatalogServerExtension REST_CATALOG_EXTENSION = + HiveRESTCatalogServerExtension.builder(ServletSecurity.AuthType.NONE) + .addMetaStoreSchemaClassName(ITestsSchemaInfo.class) + .build(); + + @ClassRule + public static final TestRule cliClassRule = adapter.buildClassRule(); + + @Rule + public final TestRule cliTestRule = adapter.buildTestRule(); + + @Parameters(name = "{0}") + public static List getParameters() throws Exception { + return adapter.getParameters(); + } + + public TestIcebergRESTCatalogLlapLocalCliDriver(String name, File qfile) { + this.name = name; + this.qfile = qfile; + } + + @Before + public void setupHiveConfig() { + String restCatalogPrefix = String.format("%s%s.", CatalogUtils.CATALOG_CONFIG_PREFIX, CATALOG_NAME); + + Configuration conf = SessionState.get().getConf(); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.METASTORE_CLIENT_IMPL, + "org.apache.iceberg.hive.client.HiveRESTCatalogClient"); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CATALOG_DEFAULT, CATALOG_NAME); + conf.set(restCatalogPrefix + "uri", REST_CATALOG_EXTENSION.getRestEndpoint()); + conf.set(restCatalogPrefix + "type", CatalogUtil.ICEBERG_CATALOG_TYPE_REST); + } + + @Before + public void cleanUpRestCatalogServerTmpDir() throws IOException { + try (Stream children = Files.list(REST_CATALOG_EXTENSION.getRestCatalogServer().getWarehouseDir())) { + children + .filter(path -> !path.getFileName().toString().equals("derby.log")) + .filter(path -> !path.getFileName().toString().equals("metastore_db")) + .forEach(path -> { + try { + if (Files.isDirectory(path)) { + FileUtils.deleteDirectory(path.toFile()); + } else { + Files.delete(path); + } + } catch (IOException e) { + LOG.error("Failed to delete path: {}", path, e); + } + }); + } + } + + @Test + public void testCliDriver() throws Exception { + adapter.runTest(name, qfile); + } +} \ No newline at end of file diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index 3f5b804bba3c..d253a2f93aa4 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -455,6 +455,9 @@ iceberg.llap.query.compactor.files=\ iceberg_minor_compaction_partition_evolution.q,\ iceberg_minor_compaction_unpartitioned.q +iceberg.llap.query.rest.files=\ + iceberg_rest_catalog.q + iceberg.llap.only.query.files=\ hadoop_catalog_create_table.q,\ iceberg_bucket_map_join_1.q,\ diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java index 6558955788dd..85cad1d2a66d 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java @@ -644,6 +644,7 @@ public IcebergCliConfig() { setQueryDir("iceberg/iceberg-handler/src/test/queries/positive"); excludesFrom(testConfigProps, "iceberg.llap.only.query.files"); excludesFrom(testConfigProps, "iceberg.llap.query.compactor.files"); + excludesFrom(testConfigProps, "iceberg.llap.query.rest.files"); setResultsDir("iceberg/iceberg-handler/src/test/results/positive"); setLogDir("itests/qtest/target/qfile-results/iceberg-handler/positive"); @@ -697,6 +698,28 @@ public IcebergLlapLocalCliConfig() { } } + public static class TestIcebergRESTCatalogLlapLocalCliDriver extends AbstractCliConfig { + + public TestIcebergRESTCatalogLlapLocalCliDriver() { + super(CoreCliDriver.class); + try { + setQueryDir("iceberg/iceberg-handler/src/test/queries/positive"); + includesFrom(testConfigProps, "iceberg.llap.query.rest.files"); + + setResultsDir("iceberg/iceberg-handler/src/test/results/positive/llap"); + setLogDir("itests/qtest/target/qfile-results/iceberg-handler/positive"); + + setInitScript("q_test_init_tez.sql"); + setCleanupScript("q_test_cleanup_tez.sql"); + + setHiveConfDir("data/conf/iceberg/llap"); + setClusterType(MiniClusterType.LLAP_LOCAL); + } catch (Exception e) { + throw new RuntimeException("can't contruct cliconfig", e); + } + } + } + public static class IcebergLlapLocalCompactorCliConfig extends AbstractCliConfig { public IcebergLlapLocalCompactorCliConfig() { diff --git a/itests/util/src/main/java/org/apache/hive/ITestsSchemaInfo.java b/itests/util/src/main/java/org/apache/hive/ITestsSchemaInfo.java new file mode 100644 index 000000000000..c3b6d445ce14 --- /dev/null +++ b/itests/util/src/main/java/org/apache/hive/ITestsSchemaInfo.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hive; + +import org.apache.hadoop.hive.metastore.HiveMetaException; +import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo; + +import java.io.File; + +public class ITestsSchemaInfo extends MetaStoreSchemaInfo { + private static final String BASE_DIR = System.getProperty("basedir"); + + public ITestsSchemaInfo(String metastoreHome, String dbType) throws HiveMetaException { + super(metastoreHome, dbType); + } + + @Override + public String getMetaStoreScriptDir() { + return new File(BASE_DIR, "../../standalone-metastore/metastore-server/src/main/sql/derby").getAbsolutePath(); + } +} diff --git a/pom.xml b/pom.xml index 58aa3532d086..2be866c8d17e 100644 --- a/pom.xml +++ b/pom.xml @@ -152,6 +152,8 @@ 4.5.13 4.4.13 2.9.2 + 5.3.1 + 5.3.1 2.5.2 2.16.1 2.3.4 @@ -581,6 +583,21 @@ httpcore ${httpcomponents.core.version} + + org.apache.httpcomponents.client5 + httpclient5 + ${httpcomponents5.core.version} + + + org.apache.httpcomponents.core5 + httpcore5 + ${httpcomponents5.core.version} + + + org.apache.httpcomponents.core5 + httpcore5-h2 + ${httpcomponents5.core.version} + org.apache.velocity velocity-engine-core diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/create/CreateDatabaseOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/create/CreateDatabaseOperation.java index 221d2e710b46..0bccba094fed 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/create/CreateDatabaseOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/create/CreateDatabaseOperation.java @@ -19,15 +19,16 @@ package org.apache.hadoop.hive.ql.ddl.database.create; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.DatabaseType; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.utils.StringUtils; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; -import org.apache.hadoop.hive.ql.ddl.database.desc.DescDatabaseDesc; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.session.SessionState; @@ -65,6 +66,10 @@ public int execute() throws HiveException { } else { // should never be here throw new HiveException("Unsupported database type " + database.getType() + " for " + database.getName()); } + String defaultCatalog = MetastoreConf.get(context.getConf(), MetastoreConf.ConfVars.CATALOG_DEFAULT.getVarname()); + if (!StringUtils.isEmpty(defaultCatalog) && !defaultCatalog.equals(Warehouse.DEFAULT_CATALOG_NAME)) { + database.setCatalogName(defaultCatalog); + } context.getDb().createDatabase(database, desc.getIfNotExists()); } catch (AlreadyExistsException ex) { //it would be better if AlreadyExistsException had an errorCode field.... diff --git a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 793cfffceda6..f13c7de29b12 100644 --- a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.nio.ByteBuffer; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -50,20 +51,26 @@ public interface IMetaStoreClient extends AutoCloseable { * Returns whether current client is compatible with conf argument or not * @return */ - boolean isCompatibleWith(Configuration conf); + default boolean isCompatibleWith(Configuration configuration) { + return false; + } /** * Set added jars path info to MetaStoreClient. * @param addedJars the hive.added.jars.path. It is qualified paths separated by commas. */ - void setHiveAddedJars(String addedJars); + default void setHiveAddedJars(String addedJars) { + throw new UnsupportedOperationException("MetaStore client does not support setting added jars"); + } /** * Returns true if the current client is using an in process metastore (local metastore). * * @return */ - boolean isLocalMetaStore(); + default boolean isLocalMetaStore(){ + throw new UnsupportedOperationException("MetaStore client does not support checking if metastore is local"); + } /** * Tries to reconnect this MetaStoreClient to the MetaStore. @@ -79,12 +86,16 @@ public interface IMetaStoreClient extends AutoCloseable { /** * set meta variable which is open to end users */ - void setMetaConf(String key, String value) throws MetaException, TException; + default void setMetaConf(String key, String value) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support setting meta variables"); + } /** * get current meta variable */ - String getMetaConf(String key) throws MetaException, TException; + default String getMetaConf(String key) throws TException{ + return ""; + } /** * Create a new catalog. @@ -95,8 +106,10 @@ public interface IMetaStoreClient extends AutoCloseable { * create the directory for the catalog. * @throws TException general thrift exception. */ - void createCatalog(Catalog catalog) - throws AlreadyExistsException, InvalidObjectException, MetaException, TException; + default void createCatalog(Catalog catalog) + throws AlreadyExistsException, InvalidObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support creating catalogs"); + } /** * Alter an existing catalog. @@ -110,8 +123,10 @@ void createCatalog(Catalog catalog) * @throws MetaException usually indicates a database error * @throws TException general thrift exception */ - void alterCatalog(String catalogName, Catalog newCatalog) - throws NoSuchObjectException, InvalidObjectException, MetaException, TException; + default void alterCatalog(String catalogName, Catalog newCatalog) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support altering catalogs"); + } /** * Get a catalog object. @@ -121,7 +136,9 @@ void alterCatalog(String catalogName, Catalog newCatalog) * @throws MetaException something went wrong, usually in the database. * @throws TException general thrift exception. */ - Catalog getCatalog(String catName) throws NoSuchObjectException, MetaException, TException; + default Catalog getCatalog(String catName) throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support fetching catalogs"); + } /** * Get a list of all catalogs known to the system. @@ -129,7 +146,9 @@ void alterCatalog(String catalogName, Catalog newCatalog) * @throws MetaException something went wrong, usually in the database. * @throws TException general thrift exception. */ - List getCatalogs() throws MetaException, TException; + default List getCatalogs() throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support fetching catalogs"); + } /** * Drop a catalog. Catalogs must be empty to be dropped, there is no cascade for dropping a @@ -140,8 +159,10 @@ void alterCatalog(String catalogName, Catalog newCatalog) * @throws MetaException something went wrong, usually in the database. * @throws TException general thrift exception. */ - void dropCatalog(String catName) - throws NoSuchObjectException, InvalidOperationException, MetaException, TException; + default void dropCatalog(String catName) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping catalogs"); + } /** * Drop a catalog. Catalogs must be empty to be dropped, there is no cascade for dropping a @@ -150,7 +171,9 @@ void dropCatalog(String catName) * @param ifExists if true, do not throw an error if the catalog does not exist. * @throws TException general thrift exception. */ - void dropCatalog(String catName, boolean ifExists) throws TException; + default void dropCatalog(String catName, boolean ifExists) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping catalogs"); + } /** * Get the names of all databases in the default catalog that match the given pattern. @@ -243,8 +266,10 @@ List getTables(String dbName, String tablePattern, TableType tableType) * @throws TException thrift transport error * @throws UnknownDBException indicated database does not exist. */ - List getTables(String catName, String dbName, String tablePattern, TableType tableType) - throws MetaException, TException, UnknownDBException; + default List getTables(String catName, String dbName, String tablePattern, TableType tableType) + throws MetaException, TException, UnknownDBException { + throw new UnsupportedOperationException("MetaStore client does not support fetching tables with table type"); + } /** * Retrieve all materialized views that have rewriting enabled. This will use the default catalog. @@ -253,8 +278,10 @@ List getTables(String catName, String dbName, String tablePattern, Table * @throws TException thrift transport error * @throws UnknownDBException no such database */ - List getAllMaterializedViewObjectsForRewriting() - throws MetaException, TException, UnknownDBException; + default List
getAllMaterializedViewObjectsForRewriting() + throws MetaException, TException, UnknownDBException { + throw new UnsupportedOperationException("MetaStore client does not support fetching materialized views"); + } /** * Get the names of all the tables along with extended table metadata @@ -267,8 +294,10 @@ List
getAllMaterializedViewObjectsForRewriting() * @throws MetaException Thrown if there is error on fetching from DBMS. * @throws TException Thrown if there is a thrift transport exception. */ - public List getTablesExt(String catName, String dbName, String tablePattern, int requestedFields, - int limit) throws MetaException, TException; + default List getTablesExt(String catName, String dbName, String tablePattern, int requestedFields, + int limit) throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support fetching extended table info"); + } /** * Get materialized views that have rewriting enabled. This will use the default catalog. @@ -278,8 +307,10 @@ public List getTablesExt(String catName, String dbName, Strin * @throws TException thrift transport error * @throws UnknownDBException no such database */ - List getMaterializedViewsForRewriting(String dbName) - throws MetaException, TException, UnknownDBException; + default List getMaterializedViewsForRewriting(String dbName) + throws MetaException, TException, UnknownDBException { + throw new UnsupportedOperationException("MetaStore client does not support fetching materialized views"); + } /** * Get materialized views that have rewriting enabled. @@ -290,8 +321,10 @@ List getMaterializedViewsForRewriting(String dbName) * @throws TException thrift transport error * @throws UnknownDBException no such database */ - List getMaterializedViewsForRewriting(String catName, String dbName) - throws MetaException, TException, UnknownDBException; + default List getMaterializedViewsForRewriting(String catName, String dbName) + throws MetaException, TException, UnknownDBException { + throw new UnsupportedOperationException("MetaStore client does not support fetching materialized views"); + } /** * Fetches just table name and comments. Useful when you need full table name @@ -305,8 +338,10 @@ List getMaterializedViewsForRewriting(String catName, String dbName) * @throws TException thrift transport error * @throws UnknownDBException No databases match the provided pattern. */ - List getTableMeta(String dbPatterns, String tablePatterns, List tableTypes) - throws MetaException, TException, UnknownDBException; + default List getTableMeta(String dbPatterns, String tablePatterns, List tableTypes) + throws MetaException, TException, UnknownDBException { + throw new UnsupportedOperationException("MetaStore client does not support fetching table metadata"); + } /** * Fetches just table name and comments. Useful when you need full table name @@ -321,9 +356,11 @@ List getTableMeta(String dbPatterns, String tablePatterns, List getTableMeta(String catName, String dbPatterns, String tablePatterns, + default List getTableMeta(String catName, String dbPatterns, String tablePatterns, List tableTypes) - throws MetaException, TException, UnknownDBException; + throws MetaException, TException, UnknownDBException { + throw new UnsupportedOperationException("MetaStore client does not support fetching table metadata"); + } /** * Get the names of all tables in the specified database. @@ -386,8 +423,10 @@ List getAllTables(String catName, String dbName) * @throws UnknownDBException no such database * @throws TException thrift transport error */ - List listTableNamesByFilter(String dbName, String filter, short maxTables) - throws TException, InvalidOperationException, UnknownDBException; + default List listTableNamesByFilter(String dbName, String filter, short maxTables) + throws TException, InvalidOperationException, UnknownDBException { + throw new UnsupportedOperationException("MetaStore client does not support listing table names by filter"); + } /** * Get a list of table names that match a filter. @@ -429,8 +468,10 @@ List listTableNamesByFilter(String dbName, String filter, short maxTable * @throws UnknownDBException no such database * @throws TException thrift transport error */ - List listTableNamesByFilter(String catName, String dbName, String filter, int maxTables) - throws TException, InvalidOperationException, UnknownDBException; + default List listTableNamesByFilter(String catName, String dbName, String filter, int maxTables) + throws TException, InvalidOperationException, UnknownDBException { + throw new UnsupportedOperationException("MetaStore client does not support listing table names by filter"); + } /** * Drop the table. @@ -566,18 +607,29 @@ default void dropTable(String catName, String dbName, String tableName) * @throws TException Thrift transport exception */ @Deprecated - void truncateTable(String dbName, String tableName, List partNames) throws MetaException, TException; + default void truncateTable(String dbName, String tableName, List partNames) throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support truncating tables"); + } - void truncateTable(TableName table, List partNames) throws TException; + default void truncateTable(TableName table, List partNames) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support truncating tables"); + } - void truncateTable(String dbName, String tableName, List partNames, - String validWriteIds, long writeId) throws TException; + default void truncateTable(String dbName, String tableName, List partNames, + String validWriteIds, long writeId) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support truncating tables with write ids"); + } - void truncateTable(String dbName, String tableName, List partNames, - String validWriteIds, long writeId, boolean deleteData) throws TException; - void truncateTable(String catName, String dbName, String tableName, String ref, List partNames, - String validWriteIds, long writeId, boolean deleteData, EnvironmentContext context) throws TException; + default void truncateTable(String dbName, String tableName, List partNames, + String validWriteIds, long writeId, boolean deleteData) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support truncating tables with write ids"); + } + + default void truncateTable(String catName, String dbName, String tableName, String ref, List partNames, + String validWriteIds, long writeId, boolean deleteData, EnvironmentContext context) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support truncating tables with write ids"); + } /** * Recycles the files recursively from the input path to the cmroot directory either by copying or moving it. @@ -586,7 +638,9 @@ void truncateTable(String catName, String dbName, String tableName, String ref, * isPurge flag when set to true files which needs to be recycled are not moved to Trash * @return Response which is currently void */ - CmRecycleResponse recycleDirToCmPath(CmRecycleRequest request) throws MetaException, TException; + default CmRecycleResponse recycleDirToCmPath(CmRecycleRequest request) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support recycling directories to cmroot"); + } /** * Check whether a table exists in the default catalog. @@ -742,8 +796,10 @@ Table getTable(String catName, String dbName, String tableName, * @throws MetaException * Any other errors */ - List
getTableObjectsByName(String dbName, List tableNames) - throws MetaException, InvalidOperationException, UnknownDBException, TException; + default List
getTableObjectsByName(String dbName, List tableNames) + throws MetaException, InvalidOperationException, UnknownDBException, TException { + throw new UnsupportedOperationException("MetaStore client does not support fetching table objects by name"); + } /** * Get tables as objects (rather than just fetching their names). This is more expensive and @@ -765,8 +821,11 @@ List
getTableObjectsByName(String dbName, List tableNames) * @throws MetaException * Any other errors */ - List
getTables(String catName, String dbName, List tableNames, GetProjectionsSpec projectionsSpec) - throws MetaException, InvalidOperationException, UnknownDBException, TException; + default List
getTables(String catName, String dbName, List tableNames, + GetProjectionsSpec projectionsSpec) throws MetaException, InvalidOperationException, UnknownDBException, + TException { + throw new UnsupportedOperationException("MetaStore client does not support fetching tables"); + } /** * Get tables as objects (rather than just fetching their names). This is more expensive and * should only be used if you actually need all the information about the tables. @@ -788,26 +847,35 @@ List
getTables(String catName, String dbName, List tableNames, Ge * @throws MetaException * Any other errors */ - List
getTableObjectsByName(String catName, String dbName, List tableNames) - throws MetaException, InvalidOperationException, UnknownDBException, TException; + default List
getTableObjectsByName(String catName, String dbName, List tableNames) + throws MetaException, InvalidOperationException, UnknownDBException, TException { + throw new UnsupportedOperationException("MetaStore client does not support fetching table objects by name"); + } /** * Returns the invalidation information for the materialized views given as input. */ - Materialization getMaterializationInvalidationInfo(CreationMetadata cm, String validTxnList) - throws MetaException, InvalidOperationException, UnknownDBException, TException; + default Materialization getMaterializationInvalidationInfo(CreationMetadata cm, String validTxnList) + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support fetching materialization invalidation " + + "info"); + } /** * Updates the creation metadata for the materialized view. */ - void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm) - throws MetaException, TException; + default void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm) + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support updating creation metadata"); + } /** * Updates the creation metadata for the materialized view. */ - void updateCreationMetadata(String catName, String dbName, String tableName, CreationMetadata cm) - throws MetaException, TException; + default void updateCreationMetadata(String catName, String dbName, String tableName, CreationMetadata cm) + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support updating creation metadata"); + } /** /** @@ -822,8 +890,10 @@ void updateCreationMetadata(String catName, String dbName, String tableName, Cre * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - Partition appendPartition(String dbName, String tableName, List partVals) - throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + default Partition appendPartition(String dbName, String tableName, List partVals) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support appending partitions"); + } /** * Add a partition to a table and get back the resulting Partition object. This creates an @@ -838,8 +908,10 @@ Partition appendPartition(String dbName, String tableName, List partVals * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - Partition appendPartition(String catName, String dbName, String tableName, List partVals) - throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + default Partition appendPartition(String catName, String dbName, String tableName, List partVals) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support appending partitions"); + } /** * Add a partition to a table and get back the resulting Partition object. This creates an @@ -853,8 +925,10 @@ Partition appendPartition(String catName, String dbName, String tableName, List< * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - Partition appendPartition(String dbName, String tableName, String name) - throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + default Partition appendPartition(String dbName, String tableName, String name) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support appending partitions"); + } /** * Add a partition to a table and get back the resulting Partition object. This creates an @@ -869,8 +943,10 @@ Partition appendPartition(String dbName, String tableName, String name) * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - Partition appendPartition(String catName, String dbName, String tableName, String name) - throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + default Partition appendPartition(String catName, String dbName, String tableName, String name) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support appending partitions"); + } /** * Add a partition to the table. @@ -887,8 +963,10 @@ Partition appendPartition(String catName, String dbName, String tableName, Strin * @throws TException * Thrift exception */ - Partition add_partition(Partition partition) - throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + default Partition add_partition(Partition partition) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support adding partitions"); + } /** * Add partitions to the table. @@ -904,8 +982,10 @@ Partition add_partition(Partition partition) * @throws TException * Thrift exception */ - int add_partitions(List partitions) - throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + default int add_partitions(List partitions) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support adding partitions"); + } /** * Add a partitions using a spec proxy. @@ -916,8 +996,10 @@ int add_partitions(List partitions) * @throws MetaException error accessing the RDBMS or storage. * @throws TException thrift transport error */ - int add_partitions_pspec(PartitionSpecProxy partitionSpec) - throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + default int add_partitions_pspec(PartitionSpecProxy partitionSpec) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support adding partitions using a spec proxy"); + } /** * Add partitions to the table. @@ -927,9 +1009,11 @@ int add_partitions_pspec(PartitionSpecProxy partitionSpec) * @param needResults Whether the results are needed * @return the partitions that were added, or null if !needResults */ - List add_partitions( + default List add_partitions( List partitions, boolean ifNotExists, boolean needResults) - throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support adding partitions"); + } /** * Get a partition. @@ -942,8 +1026,10 @@ List add_partitions( * @throws MetaException error access the RDBMS. * @throws TException thrift transport error */ - Partition getPartition(String dbName, String tblName, List partVals) - throws NoSuchObjectException, MetaException, TException; + default Partition getPartition(String dbName, String tblName, List partVals) + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting partitions"); + } /** * Get a partition. @@ -953,8 +1039,10 @@ Partition getPartition(String dbName, String tblName, List partVals) * @throws MetaException error access the RDBMS. * @throws TException thrift transport error */ - GetPartitionResponse getPartitionRequest(GetPartitionRequest req) - throws NoSuchObjectException, MetaException, TException; + default GetPartitionResponse getPartitionRequest(GetPartitionRequest req) + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting partitions"); + } /** * Get a partition. @@ -968,8 +1056,10 @@ GetPartitionResponse getPartitionRequest(GetPartitionRequest req) * @throws MetaException error access the RDBMS. * @throws TException thrift transport error */ - Partition getPartition(String catName, String dbName, String tblName, List partVals) - throws NoSuchObjectException, MetaException, TException; + default Partition getPartition(String catName, String dbName, String tblName, List partVals) + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting partitions"); + } /** * Move a partition from one table to another @@ -984,10 +1074,12 @@ Partition getPartition(String catName, String dbName, String tblName, List partitionSpecs, + default Partition exchange_partition(Map partitionSpecs, String sourceDb, String sourceTable, String destdb, String destTableName) throws MetaException, NoSuchObjectException, - InvalidObjectException, TException; + InvalidObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support exchanging partitions"); + } /** * Move a partition from one table to another @@ -1004,10 +1096,12 @@ Partition exchange_partition(Map partitionSpecs, * @throws InvalidObjectException error in partition specifications * @throws TException thrift transport error */ - Partition exchange_partition(Map partitionSpecs, String sourceCat, + default Partition exchange_partition(Map partitionSpecs, String sourceCat, String sourceDb, String sourceTable, String destCat, String destdb, String destTableName) throws MetaException, NoSuchObjectException, - InvalidObjectException, TException; + InvalidObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support exchanging partitions"); + } /** * With the one partitionSpecs to exchange, multiple partitions could be exchanged. @@ -1024,10 +1118,12 @@ Partition exchange_partition(Map partitionSpecs, String sourceCa * @throws TException thrift transport error * @return the list of the new partitions */ - List exchange_partitions(Map partitionSpecs, + default List exchange_partitions(Map partitionSpecs, String sourceDb, String sourceTable, String destdb, String destTableName) throws MetaException, NoSuchObjectException, - InvalidObjectException, TException; + InvalidObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support exchanging partitions"); + } /** * With the one partitionSpecs to exchange, multiple partitions could be exchanged. @@ -1046,10 +1142,12 @@ List exchange_partitions(Map partitionSpecs, * @throws TException thrift transport error * @return the list of the new partitions */ - List exchange_partitions(Map partitionSpecs, String sourceCat, + default List exchange_partitions(Map partitionSpecs, String sourceCat, String sourceDb, String sourceTable, String destCat, String destdb, String destTableName) - throws MetaException, NoSuchObjectException, InvalidObjectException, TException; + throws MetaException, NoSuchObjectException, InvalidObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support exchanging partitions"); + } /** * Get a Partition by name. @@ -1060,8 +1158,10 @@ List exchange_partitions(Map partitionSpecs, String s * @throws MetaException error access the RDBMS. * @throws TException thrift transport error */ - Partition getPartition(String dbName, String tblName, String name) - throws MetaException, UnknownTableException, NoSuchObjectException, TException; + default Partition getPartition(String dbName, String tblName, String name) + throws MetaException, UnknownTableException, NoSuchObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting partitions"); + } /** * Get a Partition by name. @@ -1073,8 +1173,10 @@ Partition getPartition(String dbName, String tblName, String name) * @throws MetaException error access the RDBMS. * @throws TException thrift transport error */ - Partition getPartition(String catName, String dbName, String tblName, String name) - throws MetaException, UnknownTableException, NoSuchObjectException, TException; + default Partition getPartition(String catName, String dbName, String tblName, String name) + throws MetaException, UnknownTableException, NoSuchObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting partitions"); + } /** @@ -1090,9 +1192,11 @@ Partition getPartition(String catName, String dbName, String tblName, String nam * @throws NoSuchObjectException no such partition * @throws TException thrift transport error */ - Partition getPartitionWithAuthInfo(String dbName, String tableName, + default Partition getPartitionWithAuthInfo(String dbName, String tableName, List pvals, String userName, List groupNames) - throws MetaException, UnknownTableException, NoSuchObjectException, TException; + throws MetaException, UnknownTableException, NoSuchObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting partitions with auth info"); + } /** * Get a Partition along with authorization information. @@ -1108,9 +1212,11 @@ Partition getPartitionWithAuthInfo(String dbName, String tableName, * @throws NoSuchObjectException no such partition * @throws TException thrift transport error */ - Partition getPartitionWithAuthInfo(String catName, String dbName, String tableName, + default Partition getPartitionWithAuthInfo(String catName, String dbName, String tableName, List pvals, String userName, List groupNames) - throws MetaException, UnknownTableException, NoSuchObjectException, TException; + throws MetaException, UnknownTableException, NoSuchObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting partitions with auth info"); + } /** * Get a list of partittions for a table. @@ -1122,8 +1228,10 @@ Partition getPartitionWithAuthInfo(String catName, String dbName, String tableNa * @throws MetaException error accessing RDBMS. * @throws TException thrift transport error */ - List listPartitions(String db_name, String tbl_name, short max_parts) - throws NoSuchObjectException, MetaException, TException; + default List listPartitions(String db_name, String tbl_name, short max_parts) + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support listing partitions"); + } /** * Get a list of partittions for a table. @@ -1136,8 +1244,10 @@ List listPartitions(String db_name, String tbl_name, short max_parts) * @throws MetaException error accessing RDBMS. * @throws TException thrift transport error */ - List listPartitions(String catName, String db_name, String tbl_name, int max_parts) - throws NoSuchObjectException, MetaException, TException; + default List listPartitions(String catName, String db_name, String tbl_name, int max_parts) + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support listing partitions"); + } /** * Get a list of partitions from a table, returned in the form of PartitionSpecProxy @@ -1147,8 +1257,10 @@ List listPartitions(String catName, String db_name, String tbl_name, * @return a PartitionSpecProxy * @throws TException thrift transport error */ - PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) - throws TException; + default PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support listing partition specs"); + } /** * Get a list of partitions from a table, returned in the form of PartitionSpecProxy @@ -1159,8 +1271,10 @@ PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxPa * @return a PartitionSpecProxy * @throws TException thrift transport error */ - PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tableName, - int maxParts) throws TException; + default PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tableName, int maxParts) + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support listing partition specs"); + } /** * Get a list of partitions based on a (possibly partial) list of partition values. @@ -1174,8 +1288,10 @@ PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tabl * @throws MetaException error accessing the database or processing the partition values. * @throws TException thrift transport error. */ - List listPartitions(String db_name, String tbl_name, - List part_vals, short max_parts) throws NoSuchObjectException, MetaException, TException; + default List listPartitions(String db_name, String tbl_name, + List part_vals, short max_parts) throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support listing partitions"); + } /** * Get a list of partitions based on a (possibly partial) list of partition values. @@ -1190,9 +1306,11 @@ List listPartitions(String db_name, String tbl_name, * @throws MetaException error accessing the database or processing the partition values. * @throws TException thrift transport error. */ - List listPartitions(String catName, String db_name, String tbl_name, + default List listPartitions(String catName, String db_name, String tbl_name, List part_vals, int max_parts) - throws NoSuchObjectException, MetaException, TException; + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support listing partitions"); + } /** * List Names of partitions in a table. @@ -1204,8 +1322,10 @@ List listPartitions(String catName, String db_name, String tbl_name, * @throws MetaException Error accessing the RDBMS. * @throws TException thrift transport error */ - List listPartitionNames(String db_name, String tbl_name, - short max_parts) throws NoSuchObjectException, MetaException, TException; + default List listPartitionNames(String db_name, String tbl_name, + short max_parts) throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support listing partition names"); + } /** * List Names of partitions in a table. @@ -1215,8 +1335,10 @@ List listPartitionNames(String db_name, String tbl_name, * @throws MetaException Error accessing the RDBMS. * @throws TException thrift transport error */ - GetPartitionNamesPsResponse listPartitionNamesRequest(GetPartitionNamesPsRequest req) - throws NoSuchObjectException, MetaException, TException; + default GetPartitionNamesPsResponse listPartitionNamesRequest(GetPartitionNamesPsRequest req) + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support listing partition names"); + } /** * List Names of partitions in a table. @@ -1229,8 +1351,10 @@ GetPartitionNamesPsResponse listPartitionNamesRequest(GetPartitionNamesPsRequest * @throws MetaException Error accessing the RDBMS. * @throws TException thrift transport error */ - List listPartitionNames(String catName, String db_name, String tbl_name, - int max_parts) throws NoSuchObjectException, MetaException, TException; + default List listPartitionNames(String catName, String db_name, String tbl_name, int max_parts) + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support listing partition names"); + } /** * Get a list of partition names matching a partial specification of the partition values. @@ -1246,9 +1370,10 @@ List listPartitionNames(String catName, String db_name, String tbl_name, * @throws TException thrift transport error. * @throws NoSuchObjectException no such table. */ - List listPartitionNames(String db_name, String tbl_name, - List part_vals, short max_parts) - throws MetaException, TException, NoSuchObjectException; + default List listPartitionNames(String db_name, String tbl_name, List part_vals, short max_parts) + throws MetaException, TException, NoSuchObjectException { + throw new UnsupportedOperationException("MetaStore client does not support listing partition names"); + } /** * Get a list of partition names matching a partial specification of the partition values. @@ -1265,9 +1390,10 @@ List listPartitionNames(String db_name, String tbl_name, * @throws TException thrift transport error. * @throws NoSuchObjectException no such table. */ - List listPartitionNames(String catName, String db_name, String tbl_name, - List part_vals, int max_parts) - throws MetaException, TException, NoSuchObjectException; + default List listPartitionNames(String catName, String db_name, String tbl_name, List part_vals, + int max_parts) throws MetaException, TException, NoSuchObjectException { + throw new UnsupportedOperationException("MetaStore client does not support listing partition names"); + } /** * Get a list of partition names matching the specified filter and return in order if specified. @@ -1277,8 +1403,10 @@ List listPartitionNames(String catName, String db_name, String tbl_name, * @throws TException thrift transport error. * @throws NoSuchObjectException no such table. */ - List listPartitionNames(PartitionsByExprRequest request) - throws MetaException, TException, NoSuchObjectException; + default List listPartitionNames(PartitionsByExprRequest request) + throws MetaException, TException, NoSuchObjectException { + throw new UnsupportedOperationException("MetaStore client does not support listing partition names"); + } /** * Get a list of partition values @@ -1288,8 +1416,10 @@ List listPartitionNames(PartitionsByExprRequest request) * @throws TException thrift transport error * @throws NoSuchObjectException no such table */ - PartitionValuesResponse listPartitionValues(PartitionValuesRequest request) - throws MetaException, TException, NoSuchObjectException; + default PartitionValuesResponse listPartitionValues(PartitionValuesRequest request) + throws MetaException, TException, NoSuchObjectException { + throw new UnsupportedOperationException("MetaStore client does not support listing partition values"); + } /** * Get number of partitions matching specified filter @@ -1303,8 +1433,10 @@ PartitionValuesResponse listPartitionValues(PartitionValuesRequest request) * @throws NoSuchObjectException no such table * @throws TException thrift transport error */ - int getNumPartitionsByFilter(String dbName, String tableName, - String filter) throws MetaException, NoSuchObjectException, TException; + default int getNumPartitionsByFilter(String dbName, String tableName, String filter) + throws MetaException, NoSuchObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting number of partitions by filter"); + } /** * Get number of partitions matching specified filter @@ -1319,8 +1451,10 @@ int getNumPartitionsByFilter(String dbName, String tableName, * @throws NoSuchObjectException no such table * @throws TException thrift transport error */ - int getNumPartitionsByFilter(String catName, String dbName, String tableName, - String filter) throws MetaException, NoSuchObjectException, TException; + default int getNumPartitionsByFilter(String catName, String dbName, String tableName, String filter) + throws MetaException, NoSuchObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting number of partitions by filter"); + } /** @@ -1337,8 +1471,10 @@ int getNumPartitionsByFilter(String catName, String dbName, String tableName, * @throws NoSuchObjectException No such table. * @throws TException thrift transport error */ - List listPartitionsByFilter(String db_name, String tbl_name, - String filter, short max_parts) throws MetaException, NoSuchObjectException, TException; + default List listPartitionsByFilter(String db_name, String tbl_name, String filter, short max_parts) + throws MetaException, NoSuchObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support listing partitions by filter"); + } /** * Get list of partitions matching specified filter @@ -1355,9 +1491,11 @@ List listPartitionsByFilter(String db_name, String tbl_name, * @throws NoSuchObjectException No such table. * @throws TException thrift transport error */ - List listPartitionsByFilter(String catName, String db_name, String tbl_name, + default List listPartitionsByFilter(String catName, String db_name, String tbl_name, String filter, int max_parts) - throws MetaException, NoSuchObjectException, TException; + throws MetaException, NoSuchObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support listing partitions by filter"); + } /** * Get a list of partitions in a PartitionSpec, using a filter to select which partitions to @@ -1371,9 +1509,10 @@ List listPartitionsByFilter(String catName, String db_name, String tb * @throws NoSuchObjectException No table matches the request * @throws TException thrift transport error */ - PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name, - String filter, int max_parts) - throws MetaException, NoSuchObjectException, TException; + default PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name, String filter, int max_parts) + throws MetaException, NoSuchObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support listing partition specs by filter"); + } /** * Get a list of partitions in a PartitionSpec, using a filter to select which partitions to @@ -1388,9 +1527,10 @@ PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name, * @throws NoSuchObjectException No table matches the request * @throws TException thrift transport error */ - PartitionSpecProxy listPartitionSpecsByFilter(String catName, String db_name, String tbl_name, - String filter, int max_parts) - throws MetaException, NoSuchObjectException, TException; + default PartitionSpecProxy listPartitionSpecsByFilter(String catName, String db_name, String tbl_name, + String filter, int max_parts) throws MetaException, NoSuchObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support listing partition specs by filter"); + } /** * Get list of {@link PartitionSpec} matching specified serialized expression. @@ -1398,8 +1538,9 @@ PartitionSpecProxy listPartitionSpecsByFilter(String catName, String db_name, St * @return whether the resulting list contains partitions which may or may not match the expr * @throws TException thrift transport error or error executing the filter. */ - boolean listPartitionsSpecByExpr(PartitionsByExprRequest req, List result) - throws TException; + default boolean listPartitionsSpecByExpr(PartitionsByExprRequest req, List result) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support listing partition specs by expr"); + } /** * Get list of partitions matching specified serialized expression @@ -1414,9 +1555,11 @@ boolean listPartitionsSpecByExpr(PartitionsByExprRequest req, List result) - throws TException; + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support listing partitions by expr"); + } /** * Get list of partitions matching specified serialized expression @@ -1432,9 +1575,10 @@ boolean listPartitionsByExpr(String db_name, String tbl_name, * @return whether the resulting list contains partitions which may or may not match the expr * @throws TException thrift transport error or error executing the filter. */ - boolean listPartitionsByExpr(String catName, String db_name, String tbl_name, byte[] expr, - String default_partition_name, int max_parts, List result) - throws TException; + default boolean listPartitionsByExpr(String catName, String db_name, String tbl_name, byte[] expr, + String default_partition_name, int max_parts, List result) throws TException { + throw new UnsupportedOperationException("tMetaStore client does not support listing partitions by expr"); + } /** * Get list of partitions matching specified serialized expression @@ -1442,7 +1586,9 @@ boolean listPartitionsByExpr(String catName, String db_name, String tbl_name, by * @return whether the resulting list contains partitions which may or may not match the expr * @throws TException thrift transport error or error executing the filter. */ - boolean listPartitionsByExpr(PartitionsByExprRequest req, List result) throws TException; + default boolean listPartitionsByExpr(PartitionsByExprRequest req, List result) throws TException{ + throw new UnsupportedOperationException("MetaStore client does not support listing partitions by expr"); + } /** * List partitions, fetching the authorization information along with the partitions. @@ -1456,9 +1602,11 @@ boolean listPartitionsByExpr(String catName, String db_name, String tbl_name, by * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - List listPartitionsWithAuthInfo(String dbName, + default List listPartitionsWithAuthInfo(String dbName, String tableName, short maxParts, String userName, List groupNames) - throws MetaException, TException, NoSuchObjectException; + throws MetaException, TException, NoSuchObjectException { + throw new UnsupportedOperationException("MetaStore client does not support listing partitions with auth info"); + } /** * List partitions, fetching the authorization information along with the partitions. @@ -1468,8 +1616,10 @@ List listPartitionsWithAuthInfo(String dbName, * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - GetPartitionsPsWithAuthResponse listPartitionsWithAuthInfoRequest(GetPartitionsPsWithAuthRequest req) - throws MetaException, TException, NoSuchObjectException; + default GetPartitionsPsWithAuthResponse listPartitionsWithAuthInfoRequest(GetPartitionsPsWithAuthRequest req) + throws MetaException, TException, NoSuchObjectException { + throw new UnsupportedOperationException("MetaStore client does not support listing partitions with auth info"); + } /** * List partitions, fetching the authorization information along with the partitions. @@ -1484,9 +1634,11 @@ GetPartitionsPsWithAuthResponse listPartitionsWithAuthInfoRequest(GetPartitionsP * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - List listPartitionsWithAuthInfo(String catName, String dbName, String tableName, + default List listPartitionsWithAuthInfo(String catName, String dbName, String tableName, int maxParts, String userName, List groupNames) - throws MetaException, TException, NoSuchObjectException; + throws MetaException, TException, NoSuchObjectException { + throw new UnsupportedOperationException("MetaStore client does not support listing partitions with auth info"); + } /** * Get partitions by a list of partition names. @@ -1500,8 +1652,10 @@ List listPartitionsWithAuthInfo(String catName, String dbName, String * @deprecated Use {@link #getPartitionsByNames(GetPartitionsByNamesRequest)} instead */ @Deprecated - List getPartitionsByNames(String db_name, String tbl_name, - List part_names) throws NoSuchObjectException, MetaException, TException; + default List getPartitionsByNames(String db_name, String tbl_name, + List part_names) throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting partitions by names"); + } /** * Get partitions by a list of partition names. @@ -1511,8 +1665,10 @@ List getPartitionsByNames(String db_name, String tbl_name, * @throws MetaException error accessing the RDBMS. * @throws TException thrift transport error */ - PartitionsResponse getPartitionsRequest(PartitionsRequest req) - throws NoSuchObjectException, MetaException, TException; + default PartitionsResponse getPartitionsRequest(PartitionsRequest req) + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting partitions request"); + } /** * Get partitions by a list of partition names. @@ -1522,7 +1678,9 @@ PartitionsResponse getPartitionsRequest(PartitionsRequest req) * @throws MetaException error accessing the RDBMS. * @throws TException thrift transport error */ - GetPartitionsByNamesResult getPartitionsByNames(GetPartitionsByNamesRequest req) throws TException; + default GetPartitionsByNamesResult getPartitionsByNames(GetPartitionsByNamesRequest req) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting partitions by names"); + } /** * List partitions along with privilege information for a user or groups @@ -1537,9 +1695,11 @@ PartitionsResponse getPartitionsRequest(PartitionsRequest req) * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - List listPartitionsWithAuthInfo(String dbName, + default List listPartitionsWithAuthInfo(String dbName, String tableName, List partialPvals, short maxParts, String userName, - List groupNames) throws MetaException, TException, NoSuchObjectException; + List groupNames) throws MetaException, TException, NoSuchObjectException { + throw new UnsupportedOperationException("MetaStore client does not support listing partitions with auth info"); + } /** * List partitions along with privilege information for a user or groups @@ -1554,10 +1714,11 @@ List listPartitionsWithAuthInfo(String dbName, * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - List listPartitionsWithAuthInfo(String catName, String dbName, String tableName, - List partialPvals, int maxParts, String userName, - List groupNames) - throws MetaException, TException, NoSuchObjectException; + default List listPartitionsWithAuthInfo( + String catName, String dbName, String tableName, List partialPvals, int maxParts, String userName, + List groupNames) throws MetaException, TException, NoSuchObjectException { + throw new UnsupportedOperationException("MetaStore client does not support listing partitions with auth info"); + } /** * Mark an event as having occurred on a partition. @@ -1573,9 +1734,11 @@ List listPartitionsWithAuthInfo(String catName, String dbName, String * @throws UnknownPartitionException no such partition * @throws InvalidPartitionException partition partKVs is invalid */ - void markPartitionForEvent(String db_name, String tbl_name, Map partKVs, + default void markPartitionForEvent(String db_name, String tbl_name, Map partKVs, PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException, - UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException; + UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException { + throw new UnsupportedOperationException("MetaStore client does not support marking partition for event"); + } /** * Mark an event as having occurred on a partition. @@ -1592,9 +1755,11 @@ void markPartitionForEvent(String db_name, String tbl_name, Map p * @throws UnknownPartitionException no such partition * @throws InvalidPartitionException partition partKVs is invalid */ - void markPartitionForEvent(String catName, String db_name, String tbl_name, Map partKVs, + default void markPartitionForEvent(String catName, String db_name, String tbl_name, Map partKVs, PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException, - UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException; + UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException { + throw new UnsupportedOperationException("MetaStore client does not support marking partition for event"); + } /** * Determine whether a partition has been marked with a particular event type. @@ -1610,9 +1775,11 @@ void markPartitionForEvent(String catName, String db_name, String tbl_name, Map< * @throws UnknownPartitionException no such partition * @throws InvalidPartitionException partition partKVs is invalid */ - boolean isPartitionMarkedForEvent(String db_name, String tbl_name, Map partKVs, + default boolean isPartitionMarkedForEvent(String db_name, String tbl_name, Map partKVs, PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException, - UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException; + UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException { + throw new UnsupportedOperationException("MetaStore client does not support checking if partition is marked for event"); + } /** * Determine whether a partition has been marked with a particular event type. @@ -1629,16 +1796,20 @@ boolean isPartitionMarkedForEvent(String db_name, String tbl_name, Map partKVs, + default boolean isPartitionMarkedForEvent(String catName, String db_name, String tbl_name, Map partKVs, PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException, - UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException; + UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException { + throw new UnsupportedOperationException("MetaStore client does not support checking if partition is marked for event"); + } /** * @param partVals * @throws TException * @throws MetaException */ - void validatePartitionNameCharacters(List partVals) throws TException, MetaException; + default void validatePartitionNameCharacters(List partVals) throws TException, MetaException { + throw new UnsupportedOperationException("MetaStore client does not support validating partition name characters"); + } /** * Dry run that translates table @@ -1647,8 +1818,9 @@ boolean isPartitionMarkedForEvent(String catName, String db_name, String tbl_nam * * a table object * * @throws HiveException */ - public Table getTranslateTableDryrun(Table tbl) throws AlreadyExistsException, - InvalidObjectException, MetaException, NoSuchObjectException, TException; + default Table getTranslateTableDryrun(Table tbl) throws TException { + return new Table(); + } /** * @param tbl @@ -1683,8 +1855,10 @@ void createTable(CreateTableRequest request) throws AlreadyExistsException, * @throws MetaException something went wrong, usually in the RDBMS * @throws TException general thrift exception */ - void alter_table(String databaseName, String tblName, Table table) - throws InvalidOperationException, MetaException, TException; + default void alter_table(String databaseName, String tblName, Table table) + throws InvalidOperationException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support altering table"); + } /** * Alter a table. Equivalent to @@ -1718,17 +1892,20 @@ default void alter_table(String catName, String dbName, String tblName, Table ne * @throws MetaException something went wrong, usually in the RDBMS * @throws TException general thrift exception */ - void alter_table(String catName, String dbName, String tblName, Table newTable, + default void alter_table(String catName, String dbName, String tblName, Table newTable, EnvironmentContext envContext) - throws InvalidOperationException, MetaException, TException; + throws InvalidOperationException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support altering table"); + } /** * @deprecated Use alter_table_with_environmentContext instead of alter_table with cascade option * passed in EnvironmentContext using {@code StatsSetupConst.CASCADE} */ @Deprecated - void alter_table(String defaultDatabaseName, String tblName, Table table, - boolean cascade) throws InvalidOperationException, MetaException, TException; + default void alter_table(String defaultDatabaseName, String tblName, Table table, boolean cascade) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support altering table with cascade option"); + } /** * Alter a table. @@ -1743,13 +1920,14 @@ void alter_table(String defaultDatabaseName, String tblName, Table table, * @throws TException general thrift exception */ @Deprecated - void alter_table_with_environmentContext(String databaseName, String tblName, Table table, + default void alter_table_with_environmentContext(String databaseName, String tblName, Table table, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, - TException; + TException { + throw new UnsupportedOperationException("MetaStore client does not support altering table with environment context"); + } - void alter_table(String catName, String databaseName, String tblName, Table table, - EnvironmentContext environmentContext, String validWriteIdList) - throws InvalidOperationException, MetaException, TException; + default void alter_table(String catName, String databaseName, String tblName, Table table, + EnvironmentContext environmentContext, String validWriteIdList) throws TException {} /** * Create a new database. * @param db database object. If the catalog name is null it will be assumed to be @@ -1844,8 +2022,10 @@ default void dropDatabase(String catName, String dbName, boolean deleteData, boo * @throws MetaException something went wrong, usually in the RDBMS. * @throws TException general thrift error. */ - void alterDatabase(String name, Database db) - throws NoSuchObjectException, MetaException, TException; + default void alterDatabase(String name, Database db) + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support altering database"); + } /** * Alter a database. @@ -1857,8 +2037,10 @@ void alterDatabase(String name, Database db) * @throws MetaException something went wrong, usually in the RDBMS. * @throws TException general thrift error. */ - void alterDatabase(String catName, String dbName, Database newDb) - throws NoSuchObjectException, MetaException, TException; + default void alterDatabase(String catName, String dbName, Database newDb) + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support altering database"); + } /** * Create a new dataconnector. @@ -1868,8 +2050,10 @@ void alterDatabase(String catName, String dbName, Database newDb) * @throws MetaException something went wrong, usually in the RDBMS * @throws TException general thrift error */ - void createDataConnector(DataConnector connector) - throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + default void createDataConnector(DataConnector connector) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support creating dataconnector"); + } /** * Drop a dataconnector. @@ -1881,8 +2065,10 @@ void createDataConnector(DataConnector connector) * @throws MetaException something went wrong, usually either in the RDMBS or in storage. * @throws TException general thrift error. */ - void dropDataConnector(String name, boolean ifNotExists, boolean checkReferences) - throws NoSuchObjectException, InvalidOperationException, MetaException, TException; + default void dropDataConnector(String name, boolean ifNotExists, boolean checkReferences) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping dataconnector"); + } /** * Alter a dataconnector. @@ -1892,8 +2078,10 @@ void dropDataConnector(String name, boolean ifNotExists, boolean checkReferences * @throws MetaException Operation could not be completed, usually in the RDBMS. * @throws TException thrift transport layer error. */ - void alterDataConnector(String name, DataConnector connector) - throws NoSuchObjectException, MetaException, TException; + default void alterDataConnector(String name, DataConnector connector) + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support altering dataconnector"); + } /** * Get the dataconnector by name @@ -1901,8 +2089,10 @@ void alterDataConnector(String name, DataConnector connector) * @throws MetaException error complete the operation * @throws TException thrift transport error */ - DataConnector getDataConnector(String name) - throws MetaException, TException; + default DataConnector getDataConnector(String name) + throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting dataconnector by name"); + } /** * Get the names of all dataconnectors in the MetaStore. @@ -1910,7 +2100,9 @@ DataConnector getDataConnector(String name) * @throws MetaException error accessing RDBMS. * @throws TException thrift transport error */ - List getAllDataConnectorNames() throws MetaException, TException; + default List getAllDataConnectorNames() throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting all dataconnector names"); + } /** * Drop a partition. @@ -1924,9 +2116,11 @@ DataConnector getDataConnector(String name) * @throws MetaException error accessing the RDBMS or the storage. * @throws TException thrift transport error */ - boolean dropPartition(String db_name, String tbl_name, + default boolean dropPartition(String db_name, String tbl_name, List part_vals, boolean deleteData) throws NoSuchObjectException, - MetaException, TException; + MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping partition"); + } /** * Drop a partition. @@ -1941,9 +2135,11 @@ boolean dropPartition(String db_name, String tbl_name, * @throws MetaException error accessing the RDBMS or the storage. * @throws TException thrift transport error */ - boolean dropPartition(String catName, String db_name, String tbl_name, + default boolean dropPartition(String catName, String db_name, String tbl_name, List part_vals, boolean deleteData) throws NoSuchObjectException, - MetaException, TException; + MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping partition"); + } /** * Drop a partition with the option to purge the partition data directly, @@ -1957,9 +2153,11 @@ boolean dropPartition(String catName, String db_name, String tbl_name, * @throws MetaException error accessing the RDBMS or the storage. * @throws TException thrift transport error. */ - boolean dropPartition(String db_name, String tbl_name, List part_vals, + default boolean dropPartition(String db_name, String tbl_name, List part_vals, PartitionDropOptions options) - throws NoSuchObjectException, MetaException, TException; + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping partition"); + } /** * Drop a partition with the option to purge the partition data directly, @@ -1974,9 +2172,11 @@ boolean dropPartition(String db_name, String tbl_name, List part_vals, * @throws MetaException error accessing the RDBMS or the storage. * @throws TException thrift transport error. */ - boolean dropPartition(String catName, String db_name, String tbl_name, List part_vals, + default boolean dropPartition(String catName, String db_name, String tbl_name, List part_vals, PartitionDropOptions options) - throws NoSuchObjectException, MetaException, TException; + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping partition"); + } /** * Drop partitions based on an expression. @@ -1997,9 +2197,11 @@ boolean dropPartition(String catName, String db_name, String tbl_name, List dropPartitions(String dbName, String tblName, + default List dropPartitions(String dbName, String tblName, List> partExprs, boolean deleteData, - boolean ifExists) throws NoSuchObjectException, MetaException, TException; + boolean ifExists) throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping partitions by expression"); + } /** * Drop partitions based on an expression. @@ -2030,11 +2232,18 @@ default List dropPartitions(String catName, String dbName, String tbl .deleteData(deleteData) .ifExists(ifExists)); } + @Deprecated + default List dropPartitions(String dbName, String tblName, + List> partExprs, boolean deleteData, + boolean ifExists, boolean needResults) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping partitions by expression"); + } /** * Drop partitions based on an expression. * @deprecated since 4.1.0, will be removed in 5.0.0 * use {@link #dropPartitions(TableName, RequestPartsSpec, PartitionDropOptions, EnvironmentContext)} instead. + * (HIVE-28658 Add Iceberg REST Catalog client support) * @param catName catalog name. * @param dbName database name. * @param tblName table name. @@ -2078,10 +2287,12 @@ default List dropPartitions(String catName, String dbName, String tbl * @throws TException On failure */ @Deprecated - List dropPartitions(String dbName, String tblName, + default List dropPartitions(String dbName, String tblName, List> partExprs, PartitionDropOptions options) - throws NoSuchObjectException, MetaException, TException; + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping partitions by expression"); + } /** * Generalization of dropPartitions(), @@ -2098,19 +2309,23 @@ List dropPartitions(String dbName, String tblName, * @throws TException On failure */ @Deprecated - List dropPartitions(String catName, String dbName, String tblName, + default List dropPartitions(String catName, String dbName, String tblName, List> partExprs, PartitionDropOptions options) - throws NoSuchObjectException, MetaException, TException; + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping partitions by expression"); + } /** * @deprecated since 4.1.0, will be removed in 5.0.0 * use {@link #dropPartitions(TableName, RequestPartsSpec, PartitionDropOptions, EnvironmentContext)} instead. */ @Deprecated - List dropPartitions(String catName, String dbName, String tblName, + default List dropPartitions(String catName, String dbName, String tblName, List> partExprs, PartitionDropOptions options, EnvironmentContext context) - throws NoSuchObjectException, MetaException, TException; + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping partitions by expression"); + } /** * Drop partitions based on the request partitions specification. @@ -2121,9 +2336,11 @@ List dropPartitions(String catName, String dbName, String tblName, * @return List of Partitions dropped. * @throws TException thrift transport error. */ - List dropPartitions(TableName tableName, + default List dropPartitions(TableName tableName, RequestPartsSpec partsSpec, PartitionDropOptions options, EnvironmentContext context) - throws TException; + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping partition"); + } /** * Drop a partition. @@ -2136,9 +2353,11 @@ List dropPartitions(TableName tableName, * @throws MetaException error accessing the RDBMS or storage * @throws TException thrift transport error */ - boolean dropPartition(String db_name, String tbl_name, + default boolean dropPartition(String db_name, String tbl_name, String name, boolean deleteData) throws NoSuchObjectException, - MetaException, TException; + MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping partition"); + } /** * Drop a partition. @@ -2152,9 +2371,11 @@ boolean dropPartition(String db_name, String tbl_name, * @throws MetaException error accessing the RDBMS or storage * @throws TException thrift transport error */ - boolean dropPartition(String catName, String db_name, String tbl_name, + default boolean dropPartition(String catName, String db_name, String tbl_name, String name, boolean deleteData) - throws NoSuchObjectException, MetaException, TException; + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping partition"); + } /** * updates a partition to new partition @@ -2172,8 +2393,10 @@ boolean dropPartition(String catName, String db_name, String tbl_name, * @throws TException * if error in communicating with metastore server */ - void alter_partition(String dbName, String tblName, Partition newPart) - throws InvalidOperationException, MetaException, TException; + default void alter_partition(String dbName, String tblName, Partition newPart) + throws InvalidOperationException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support altering partition"); + } /** * updates a partition to new partition @@ -2192,13 +2415,17 @@ void alter_partition(String dbName, String tblName, Partition newPart) * if error in communicating with metastore server */ @Deprecated - void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext) - throws InvalidOperationException, MetaException, TException; + default void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext) + throws InvalidOperationException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support altering partition"); + } - void alter_partition(String catName, String dbName, String tblName, Partition newPart, + default void alter_partition(String catName, String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext, String writeIdList) - throws InvalidOperationException, MetaException, TException; + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support altering partition"); + } /** * updates a partition to new partition @@ -2216,9 +2443,11 @@ void alter_partition(String catName, String dbName, String tblName, Partition ne * @throws TException * if error in communicating with metastore server */ - void alter_partition(String catName, String dbName, String tblName, Partition newPart, + default void alter_partition(String catName, String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext) - throws InvalidOperationException, MetaException, TException; + throws InvalidOperationException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support altering partition"); + } /** * updates a list of partitions @@ -2237,8 +2466,10 @@ void alter_partition(String catName, String dbName, String tblName, Partition ne * if error in communicating with metastore server */ @Deprecated - void alter_partitions(String dbName, String tblName, List newParts) - throws InvalidOperationException, MetaException, TException; + default void alter_partitions(String dbName, String tblName, List newParts) + throws InvalidOperationException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support altering partitions"); + } /** * updates a list of partitions @@ -2258,14 +2489,16 @@ void alter_partitions(String dbName, String tblName, List newParts) * if error in communicating with metastore server */ @Deprecated - void alter_partitions(String dbName, String tblName, List newParts, - EnvironmentContext environmentContext) - throws InvalidOperationException, MetaException, TException; + default void alter_partitions(String dbName, String tblName, List newParts, + EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support altering partitions"); + } - void alter_partitions(String dbName, String tblName, List newParts, + default void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext, - String writeIdList, long writeId) - throws InvalidOperationException, MetaException, TException; + String writeIdList, long writeId) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support altering partitions"); + } /** * updates a list of partitions @@ -2284,10 +2517,12 @@ void alter_partitions(String dbName, String tblName, List newParts, * @throws TException * if error in communicating with metastore server */ - void alter_partitions(String catName, String dbName, String tblName, List newParts, + default void alter_partitions(String catName, String dbName, String tblName, List newParts, EnvironmentContext environmentContext, String writeIdList, long writeId) - throws InvalidOperationException, MetaException, TException; + throws InvalidOperationException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support altering partitions"); + } /** * rename a partition to a new partition @@ -2308,9 +2543,11 @@ void alter_partitions(String catName, String dbName, String tblName, List part_vals, + default void renamePartition(final String dbname, final String tableName, final List part_vals, final Partition newPart) - throws InvalidOperationException, MetaException, TException; + throws InvalidOperationException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support renaming partition"); + } /** * rename a partition to a new partition @@ -2336,9 +2573,11 @@ default void renamePartition(String catName, String dbname, String tableName, Li renamePartition(catName, dbname, tableName, part_vals, newPart, validWriteIds, 0, false); } - void renamePartition(String catName, String dbname, String tableName, List part_vals, + default void renamePartition(String catName, String dbname, String tableName, List part_vals, Partition newPart, String validWriteIds, long txnId, boolean makeCopy) - throws TException; + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support renaming partition"); + } /** * Get schema for a table, excluding the partition columns. @@ -2350,9 +2589,11 @@ void renamePartition(String catName, String dbname, String tableName, List getFields(String db, String tableName) + default List getFields(String db, String tableName) throws MetaException, TException, UnknownTableException, - UnknownDBException; + UnknownDBException { + throw new UnsupportedOperationException("MetaStore client does not support getting fields for a table"); + } /** * Get schema for a table, excluding the partition columns. @@ -2365,9 +2606,11 @@ List getFields(String db, String tableName) * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - List getFields(String catName, String db, String tableName) + default List getFields(String catName, String db, String tableName) throws MetaException, TException, UnknownTableException, - UnknownDBException; + UnknownDBException { + throw new UnsupportedOperationException("MetaStore client does not support getting fields for a table"); + } /** * Get schema for a table, excluding the partition columns. @@ -2378,9 +2621,11 @@ List getFields(String catName, String db, String tableName) * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - GetFieldsResponse getFieldsRequest(GetFieldsRequest req) + default GetFieldsResponse getFieldsRequest(GetFieldsRequest req) throws MetaException, TException, UnknownTableException, - UnknownDBException; + UnknownDBException { + throw new UnsupportedOperationException("MetaStore client does not support getting fields for a table"); + } /** * Get schema for a table, including the partition columns. @@ -2392,9 +2637,11 @@ GetFieldsResponse getFieldsRequest(GetFieldsRequest req) * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - List getSchema(String db, String tableName) + default List getSchema(String db, String tableName) throws MetaException, TException, UnknownTableException, - UnknownDBException; + UnknownDBException { + throw new UnsupportedOperationException("MetaStore client does not support getting schema for a table"); + } /** * Get schema for a table, including the partition columns. @@ -2407,9 +2654,11 @@ List getSchema(String db, String tableName) * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - List getSchema(String catName, String db, String tableName) + default List getSchema(String catName, String db, String tableName) throws MetaException, TException, UnknownTableException, - UnknownDBException; + UnknownDBException { + throw new UnsupportedOperationException("MetaStore client does not support getting schema for a table"); + } /** * Get schema for a table, including the partition columns. @@ -2420,9 +2669,11 @@ List getSchema(String catName, String db, String tableName) * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - GetSchemaResponse getSchemaRequest(GetSchemaRequest req) + default GetSchemaResponse getSchemaRequest(GetSchemaRequest req) throws MetaException, TException, UnknownTableException, - UnknownDBException; + UnknownDBException { + throw new UnsupportedOperationException("MetaStore client does not support getting schema for a table"); + } /** * @param name @@ -2433,8 +2684,10 @@ GetSchemaResponse getSchemaRequest(GetSchemaRequest req) * @throws TException * @throws ConfigValSecurityException */ - String getConfigValue(String name, String defaultValue) - throws TException, ConfigValSecurityException; + default String getConfigValue(String name, String defaultValue) + throws TException, ConfigValSecurityException { + return "50"; + } /** * @@ -2444,8 +2697,10 @@ String getConfigValue(String name, String defaultValue) * @throws MetaException * @throws TException */ - List partitionNameToVals(String name) - throws MetaException, TException; + default List partitionNameToVals(String name) + throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support converting partition name to values"); + } /** * * @param name @@ -2454,8 +2709,10 @@ List partitionNameToVals(String name) * @throws MetaException * @throws TException */ - Map partitionNameToSpec(String name) - throws MetaException, TException; + default Map partitionNameToSpec(String name) + throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support converting partition name to spec"); + } /** * Write table level column statistics to persistent store @@ -2467,9 +2724,11 @@ Map partitionNameToSpec(String name) * @throws TException * @throws InvalidInputException */ - boolean updateTableColumnStatistics(ColumnStatistics statsObj) + default boolean updateTableColumnStatistics(ColumnStatistics statsObj) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, - InvalidInputException; + InvalidInputException { + throw new UnsupportedOperationException("MetaStore client does not support updating table column statistics"); +} /** * Write partition level column statistics to persistent store @@ -2481,9 +2740,11 @@ boolean updateTableColumnStatistics(ColumnStatistics statsObj) * @throws TException * @throws InvalidInputException */ - boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) + default boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, - InvalidInputException; + InvalidInputException { + throw new UnsupportedOperationException("MetaStore client does not support updating partition column statistics"); + } /** * Get the column statistics for a set of columns in a table. This should only be used for @@ -2498,12 +2759,15 @@ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - List getTableColumnStatistics(String dbName, String tableName, - List colNames, String engine) throws NoSuchObjectException, MetaException, TException; + default List getTableColumnStatistics(String dbName, String tableName, + List colNames, String engine) throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting table column statistics"); + } - List getTableColumnStatistics(String dbName, String tableName, - List colNames, String engine, String validWriteIdList) - throws NoSuchObjectException, MetaException, TException; + default List getTableColumnStatistics(String dbName, String tableName, + List colNames, String engine, String validWriteIdList) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting table column statistics"); + } /** * Get the column statistics for a set of columns in a table. This should only be used for @@ -2519,12 +2783,15 @@ List getTableColumnStatistics(String dbName, String tableNa * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - List getTableColumnStatistics(String catName, String dbName, String tableName, - List colNames, String engine) throws NoSuchObjectException, MetaException, TException; + default List getTableColumnStatistics(String catName, String dbName, String tableName, + List colNames, String engine) throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting table column statistics"); + } - List getTableColumnStatistics(String catName, String dbName, String tableName, - List colNames, String engine, String validWriteIdList) - throws NoSuchObjectException, MetaException, TException; + default List getTableColumnStatistics(String catName, String dbName, String tableName, + List colNames, String engine, String validWriteIdList) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting table column statistics"); + } /** * Get the column statistics for a set of columns in a partition. * @param dbName database name @@ -2538,14 +2805,16 @@ List getTableColumnStatistics(String catName, String dbName * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - Map> getPartitionColumnStatistics(String dbName, + default Map> getPartitionColumnStatistics(String dbName, String tableName, List partNames, List colNames, String engine) - throws NoSuchObjectException, MetaException, TException; + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting partition column statistics"); + } - Map> getPartitionColumnStatistics(String dbName, - String tableName, List partNames, List colNames, - String engine, String validWriteIdList) - throws NoSuchObjectException, MetaException, TException; + default Map> getPartitionColumnStatistics(String dbName, String tableName, + List partNames, List colNames, String engine, String validWriteIdList) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting partition column statistics"); + } /** * Get the column statistics for a set of columns in a partition. @@ -2561,16 +2830,18 @@ Map> getPartitionColumnStatistics(String dbNam * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - Map> getPartitionColumnStatistics( + default Map> getPartitionColumnStatistics( String catName, String dbName, String tableName, List partNames, List colNames, - String engine) throws NoSuchObjectException, MetaException, TException; + String engine) throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting partition column statistics"); + } - Map> getPartitionColumnStatistics( + default Map> getPartitionColumnStatistics( String catName, String dbName, String tableName, List partNames, List colNames, - String engine, String validWriteIdList) - throws NoSuchObjectException, MetaException, TException; - + String engine, String validWriteIdList) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting partition column statistics"); + } /** * Delete partition level column statistics given dbName, tableName, partName and colName, or * all columns in a partition. @@ -2705,9 +2976,13 @@ default boolean deleteTableColumnStatistics(String catName, String dbName, Strin * @return boolean indicating the outcome of the operation * @throws TException thrift transport error */ - public boolean deleteColumnStatistics(DeleteColumnStatisticsRequest req) throws TException; + default boolean deleteColumnStatistics(DeleteColumnStatisticsRequest req) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support deleting column statistics"); + } - void updateTransactionalStatistics(UpdateTransactionalStatsRequest req) throws TException; + default void updateTransactionalStatistics(UpdateTransactionalStatsRequest req) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support updating transactional statistics"); + } /** * @param role @@ -2716,8 +2991,10 @@ default boolean deleteTableColumnStatistics(String catName, String dbName, Strin * @throws MetaException * @throws TException */ - boolean create_role(Role role) - throws MetaException, TException; + default boolean create_role(Role role) + throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support creating roles"); + } /** * @param role_name @@ -2727,7 +3004,9 @@ boolean create_role(Role role) * @throws MetaException * @throws TException */ - boolean drop_role(String role_name) throws MetaException, TException; + default boolean drop_role(String role_name) throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping roles"); + } /** * list all role names @@ -2735,7 +3014,9 @@ boolean create_role(Role role) * @throws TException * @throws MetaException */ - List listRoleNames() throws MetaException, TException; + default List listRoleNames() throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support listing role names"); + } /** * @@ -2749,9 +3030,11 @@ boolean create_role(Role role) * @throws MetaException * @throws TException */ - boolean grant_role(String role_name, String user_name, + default boolean grant_role(String role_name, String user_name, PrincipalType principalType, String grantor, PrincipalType grantorType, - boolean grantOption) throws MetaException, TException; + boolean grantOption) throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support granting roles"); + } /** * @param role_name @@ -2764,8 +3047,10 @@ boolean grant_role(String role_name, String user_name, * @throws MetaException * @throws TException */ - boolean revoke_role(String role_name, String user_name, - PrincipalType principalType, boolean grantOption) throws MetaException, TException; + default boolean revoke_role(String role_name, String user_name, + PrincipalType principalType, boolean grantOption) throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support revoking roles"); + } /** * @@ -2775,8 +3060,10 @@ boolean revoke_role(String role_name, String user_name, * @throws MetaException * @throws TException */ - List list_roles(String principalName, PrincipalType principalType) - throws MetaException, TException; + default List list_roles(String principalName, PrincipalType principalType) + throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support listing roles"); + } /** * Return the privileges that the user, group have directly and indirectly through roles @@ -2788,9 +3075,11 @@ List list_roles(String principalName, PrincipalType principalType) * @throws MetaException * @throws TException */ - PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, + default PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, String user_name, List group_names) throws MetaException, - TException; + TException { + throw new UnsupportedOperationException("MetaStore client does not support getting privilege set"); + } /** * Return the privileges that this principal has directly over the object (not through roles). @@ -2801,9 +3090,11 @@ PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, * @throws MetaException * @throws TException */ - List list_privileges(String principal_name, + default List list_privileges(String principal_name, PrincipalType principal_type, HiveObjectRef hiveObject) - throws MetaException, TException; + throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support listing privileges"); + } /** * @param privileges @@ -2811,8 +3102,10 @@ List list_privileges(String principal_name, * @throws MetaException * @throws TException */ - boolean grant_privileges(PrivilegeBag privileges) - throws MetaException, TException; + default boolean grant_privileges(PrivilegeBag privileges) + throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support granting privileges"); + } /** * @param privileges @@ -2820,8 +3113,10 @@ boolean grant_privileges(PrivilegeBag privileges) * @throws MetaException * @throws TException */ - boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption) - throws MetaException, TException; + default boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption) + throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support revoking privileges"); + } /** * @param authorizer @@ -2830,8 +3125,10 @@ boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption) * @throws MetaException * @throws TException */ - boolean refresh_privileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges) - throws MetaException, TException; + default boolean refresh_privileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges) + throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support refreshing privileges"); + } /** * This is expected to be a no-op when in local mode, @@ -2842,8 +3139,10 @@ boolean refresh_privileges(HiveObjectRef objToRefresh, String authorizer, Privil * @throws MetaException * @throws TException */ - String getDelegationToken(String owner, String renewerKerberosPrincipalName) - throws MetaException, TException; + default String getDelegationToken(String owner, String renewerKerberosPrincipalName) + throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting delegation token"); + } /** * @param tokenStrForm @@ -2851,33 +3150,55 @@ String getDelegationToken(String owner, String renewerKerberosPrincipalName) * @throws MetaException * @throws TException */ - long renewDelegationToken(String tokenStrForm) throws MetaException, TException; + default long renewDelegationToken(String tokenStrForm) throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support renewing delegation token"); + } /** * @param tokenStrForm * @throws MetaException * @throws TException */ - void cancelDelegationToken(String tokenStrForm) throws MetaException, TException; + default void cancelDelegationToken(String tokenStrForm) throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support canceling delegation token"); + } - String getTokenStrForm() throws IOException; + default String getTokenStrForm() throws IOException { + throw new UnsupportedOperationException("MetaStore client does not support getting token string form"); + } - boolean addToken(String tokenIdentifier, String delegationToken) throws TException; + default boolean addToken(String tokenIdentifier, String delegationToken) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support adding tokens"); + } - boolean removeToken(String tokenIdentifier) throws TException; + default boolean removeToken(String tokenIdentifier) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support removing tokens"); + } - String getToken(String tokenIdentifier) throws TException; + default String getToken(String tokenIdentifier) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting tokens"); + } - List getAllTokenIdentifiers() throws TException; + default List getAllTokenIdentifiers() throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting all tokens"); + } - int addMasterKey(String key) throws MetaException, TException; + default int addMasterKey(String key) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support adding master keys"); + } - void updateMasterKey(Integer seqNo, String key) - throws NoSuchObjectException, MetaException, TException; + default void updateMasterKey(Integer seqNo, String key) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support updating master keys"); + } + + default boolean removeMasterKey(Integer keySeq) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support removing master keys"); + } - boolean removeMasterKey(Integer keySeq) throws TException; - String[] getMasterKeys() throws TException; + default String[] getMasterKeys() throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting master keys"); + } /** * Create a new function. @@ -2886,8 +3207,10 @@ void updateMasterKey(Integer seqNo, String key) * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - void createFunction(Function func) - throws InvalidObjectException, MetaException, TException; + default void createFunction(Function func) + throws InvalidObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support creating functions"); + } /** * Alter a function. @@ -2898,8 +3221,10 @@ void createFunction(Function func) * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - void alterFunction(String dbName, String funcName, Function newFunction) - throws InvalidObjectException, MetaException, TException; + default void alterFunction(String dbName, String funcName, Function newFunction) + throws InvalidObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support altering functions"); + } /** * Alter a function. @@ -2911,8 +3236,10 @@ void alterFunction(String dbName, String funcName, Function newFunction) * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - void alterFunction(String catName, String dbName, String funcName, Function newFunction) - throws InvalidObjectException, MetaException, TException; + default void alterFunction(String catName, String dbName, String funcName, Function newFunction) + throws InvalidObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support altering functions"); + } /** * Drop a function. @@ -2924,8 +3251,10 @@ void alterFunction(String catName, String dbName, String funcName, Function newF * @throws InvalidInputException not sure when this is thrown * @throws TException thrift transport error */ - void dropFunction(String dbName, String funcName) throws MetaException, - NoSuchObjectException, InvalidObjectException, InvalidInputException, TException; + default void dropFunction(String dbName, String funcName) throws MetaException, + NoSuchObjectException, InvalidObjectException, InvalidInputException, TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping functions"); + } /** * Drop a function. @@ -2938,8 +3267,10 @@ void dropFunction(String dbName, String funcName) throws MetaException, * @throws InvalidInputException not sure when this is thrown * @throws TException thrift transport error */ - void dropFunction(String catName, String dbName, String funcName) throws MetaException, - NoSuchObjectException, InvalidObjectException, InvalidInputException, TException; + default void dropFunction(String catName, String dbName, String funcName) throws MetaException, + NoSuchObjectException, InvalidObjectException, InvalidInputException, TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping functions"); + } /** * Get a function. @@ -2948,8 +3279,10 @@ void dropFunction(String catName, String dbName, String funcName) throws MetaExc * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - Function getFunction(String dbName, String funcName) - throws MetaException, TException; + default Function getFunction(String dbName, String funcName) + throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting functions"); + } /** * Get a function. @@ -2959,8 +3292,10 @@ Function getFunction(String dbName, String funcName) * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - Function getFunction(String catName, String dbName, String funcName) - throws MetaException, TException; + default Function getFunction(String catName, String dbName, String funcName) + throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting functions"); + } /** * Get all functions matching a pattern @@ -2970,16 +3305,20 @@ Function getFunction(String catName, String dbName, String funcName) * @throws TException thrift transport error */ @Deprecated - List getFunctions(String dbName, String pattern) - throws MetaException, TException; + default List getFunctions(String dbName, String pattern) + throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting functions"); + } /** * Get all functions matching a pattern * @param functionRequest function request. * @throws TException thrift transport error */ - GetFunctionsResponse getFunctionsRequest(GetFunctionsRequest functionRequest) - throws TException; + default GetFunctionsResponse getFunctionsRequest(GetFunctionsRequest functionRequest) + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting functions"); + } /** * Get all functions matching a pattern * @param catName catalog name. @@ -2989,8 +3328,10 @@ GetFunctionsResponse getFunctionsRequest(GetFunctionsRequest functionRequest) * @throws TException thrift transport error */ @Deprecated - List getFunctions(String catName, String dbName, String pattern) - throws MetaException, TException; + default List getFunctions(String catName, String dbName, String pattern) + throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting functions"); + } /** * Get all functions in the default catalog. @@ -2998,16 +3339,22 @@ List getFunctions(String catName, String dbName, String pattern) * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - GetAllFunctionsResponse getAllFunctions() throws MetaException, TException; + default GetAllFunctionsResponse getAllFunctions() throws MetaException, TException { + return new GetAllFunctionsResponse(); + } - GetOpenTxnsResponse getOpenTxns() throws TException ; + default GetOpenTxnsResponse getOpenTxns() throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting open transactions"); + } /** * Get a structure that details valid transactions. * @return list of valid transactions * @throws TException */ - ValidTxnList getValidTxns() throws TException; + default ValidTxnList getValidTxns() throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting valid transactions"); + } /** * Get a structure that details valid transactions. @@ -3016,7 +3363,9 @@ List getFunctions(String catName, String dbName, String pattern) * @return list of valid transactions and also valid write IDs for each input table. * @throws TException */ - ValidTxnList getValidTxns(long currentTxn) throws TException; + default ValidTxnList getValidTxns(long currentTxn) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting valid transactions"); + } /** * Get a structure that details valid transactions. @@ -3026,7 +3375,9 @@ List getFunctions(String catName, String dbName, String pattern) * @return list of valid transactions and also valid write IDs for each input table. * @throws TException */ - ValidTxnList getValidTxns(long currentTxn, List excludeTxnTypes) throws TException; + default ValidTxnList getValidTxns(long currentTxn, List excludeTxnTypes) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting valid transactions"); + } /** * Get a structure that details valid write ids. @@ -3034,7 +3385,9 @@ List getFunctions(String catName, String dbName, String pattern) * @return list of valid write ids for the given table * @throws TException */ - ValidWriteIdList getValidWriteIds(String fullTableName) throws TException; + default ValidWriteIdList getValidWriteIds(String fullTableName) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting valid write ids"); + } /** * Get a structure that details valid write ids. @@ -3043,7 +3396,9 @@ List getFunctions(String catName, String dbName, String pattern) * @return list of valid write ids for the given table * @throws TException */ - ValidWriteIdList getValidWriteIds(String fullTableName, Long writeId) throws TException; + default ValidWriteIdList getValidWriteIds(String fullTableName, Long writeId) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting valid write ids"); + } /** * Get a structure that details valid write ids list for all tables read by current txn. @@ -3053,15 +3408,19 @@ List getFunctions(String catName, String dbName, String pattern) * @return list of valid write ids for the given list of tables. * @throws TException */ - List getValidWriteIds(List tablesList, String validTxnList) - throws TException; + default List getValidWriteIds(List tablesList, String validTxnList) + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting valid write ids"); + } /** * Persists minOpenWriteId list to identify obsolete directories eligible for cleanup * @param txnId transaction identifier * @param writeIds list of minOpenWriteId */ - void addWriteIdsToMinHistory(long txnId, Map writeIds) throws TException; + default void addWriteIdsToMinHistory(long txnId, Map writeIds) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support adding write ids to min history"); + } /** * Initiate a transaction. @@ -3071,7 +3430,9 @@ List getValidWriteIds(List tablesList, String validT * @return transaction identifier * @throws TException */ - long openTxn(String user) throws TException; + default long openTxn(String user) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support opening transactions"); + } /** * Initiate a transaction with given type. @@ -3080,7 +3441,9 @@ List getValidWriteIds(List tablesList, String validT * @return transaction identifier * @throws TException */ - long openTxn(String user, TxnType txnType) throws TException; + default long openTxn(String user, TxnType txnType) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support opening transactions with type"); + } /** * Initiate a repl replayed or hive replication transaction (dump/load). @@ -3095,7 +3458,11 @@ List getValidWriteIds(List tablesList, String validT * @return transaction identifiers * @throws TException */ - List replOpenTxn(String replPolicy, List srcTxnIds, String user, TxnType txnType) throws TException; + default List replOpenTxn(String replPolicy, List srcTxnIds, String user, TxnType txnType) + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support opening repl replayed or hive " + + "replication transactions"); + } /** * Initiate a batch of transactions. It is not guaranteed that the @@ -3122,7 +3489,9 @@ List getValidWriteIds(List tablesList, String validT * optimistically assuming that the result matches the request. * @throws TException */ - OpenTxnsResponse openTxns(String user, int numTxns) throws TException; + default OpenTxnsResponse openTxns(String user, int numTxns) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support opening transactions in batch"); + } /** * Rollback a transaction. This will also unlock any locks associated with @@ -3133,7 +3502,9 @@ List getValidWriteIds(List tablesList, String validT * deleted. * @throws TException */ - void rollbackTxn(long txnid) throws NoSuchTxnException, TException; + default void rollbackTxn(long txnid) throws NoSuchTxnException, TException { + throw new UnsupportedOperationException("MetaStore client does not support rolling back transactions"); + } /** * Rollback a transaction. This will also unlock any locks associated with @@ -3145,7 +3516,9 @@ List getValidWriteIds(List tablesList, String validT * deleted. * @throws TException */ - void rollbackTxn(AbortTxnRequest abortTxnRequest) throws NoSuchTxnException, TException; + default void rollbackTxn(AbortTxnRequest abortTxnRequest) throws NoSuchTxnException, TException { + throw new UnsupportedOperationException("MetaStore client does not support rolling back transactions"); + } /** * Rollback a transaction. This will also unlock any locks associated with @@ -3161,10 +3534,15 @@ List getValidWriteIds(List tablesList, String validT * deleted. * @throws TException */ - void replRollbackTxn(long srcTxnid, String replPolicy, TxnType txnType) throws NoSuchTxnException, TException; + default void replRollbackTxn(long srcTxnid, String replPolicy, TxnType txnType) throws NoSuchTxnException, TException { + throw new UnsupportedOperationException("MetaStore client does not support rolling back transactions"); + } - ReplayedTxnsForPolicyResult getReplayedTxnsForPolicy(String replPolicy) throws TException; + default ReplayedTxnsForPolicyResult getReplayedTxnsForPolicy(String replPolicy) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting replayed transactions " + + "for policy"); + } /** * Commit a transaction. This will also unlock any locks associated with @@ -3177,11 +3555,13 @@ List getValidWriteIds(List tablesList, String validT * aborted. This can result from the transaction timing out. * @throws TException */ - void commitTxn(long txnid) - throws NoSuchTxnException, TxnAbortedException, TException; + default void commitTxn(long txnid) + throws NoSuchTxnException, TxnAbortedException, TException { + throw new UnsupportedOperationException("MetaStore client does not support committing transactions"); + } /** - * Like commitTxn but it will atomically store as well a key and a value. This + * Like commitTxn, but it will atomically store as well a key and a value. This * can be useful for example to know if the transaction corresponding to * txnid has been committed by later querying with DESCRIBE EXTENDED TABLE. * TABLE_PARAMS from the metastore must already have a row with the TBL_ID @@ -3202,9 +3582,11 @@ void commitTxn(long txnid) * tableId and key are found in TABLE_PARAMS while updating. * @throws TException */ - void commitTxnWithKeyValue(long txnid, long tableId, + default void commitTxnWithKeyValue(long txnid, long tableId, String key, String value) throws NoSuchTxnException, - TxnAbortedException, TException; + TxnAbortedException, TException { + throw new UnsupportedOperationException("MetaStore client does not support committing transactions with key/value"); + } /** * Commit a transaction. This will also unlock any locks associated with @@ -3218,14 +3600,18 @@ void commitTxnWithKeyValue(long txnid, long tableId, * aborted. This can result from the transaction timing out. * @throws TException */ - void commitTxn(CommitTxnRequest rqst) - throws NoSuchTxnException, TxnAbortedException, TException; + default void commitTxn(CommitTxnRequest rqst) + throws NoSuchTxnException, TxnAbortedException, TException { + throw new UnsupportedOperationException("MetaStore client does not support committing transactions with request"); + } /** * Abort a list of transactions. This is for use by "ABORT TRANSACTIONS" in the grammar. * @throws TException */ - void abortTxns(List txnids) throws TException; + default void abortTxns(List txnids) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support aborting transactions"); + } /** * Abort a list of transactions with additional information of @@ -3233,7 +3619,9 @@ void commitTxn(CommitTxnRequest rqst) * @param abortTxnsRequest Information containing txnIds and error codes * @throws TException */ - void abortTxns(AbortTxnsRequest abortTxnsRequest) throws TException; + default void abortTxns(AbortTxnsRequest abortTxnsRequest) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support aborting transactions with request"); + } /** * Allocate a per table write ID and associate it with the given transaction. @@ -3242,7 +3630,9 @@ void commitTxn(CommitTxnRequest rqst) * @param tableName table to which the write ID to be allocated * @throws TException */ - long allocateTableWriteId(long txnId, String dbName, String tableName) throws TException; + default long allocateTableWriteId(long txnId, String dbName, String tableName) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support allocating table write IDs"); + } /** * Allocate a per table write ID and associate it with the given transaction. @@ -3252,7 +3642,10 @@ void commitTxn(CommitTxnRequest rqst) * @param reallocate should we reallocate already mapped writeId (if true) or reuse (if false) * @throws TException */ - long allocateTableWriteId(long txnId, String dbName, String tableName, boolean reallocate) throws TException; + default long allocateTableWriteId(long txnId, String dbName, String tableName, boolean reallocate) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support allocating table write IDs " + + "with reallocate option"); + } /** * Replicate Table Write Ids state to mark aborted write ids and writeid high water mark. @@ -3262,8 +3655,10 @@ void commitTxn(CommitTxnRequest rqst) * @param partNames List of partitions being written. * @throws TException in case of failure to replicate the writeid state */ - void replTableWriteIdState(String validWriteIdList, String dbName, String tableName, List partNames) - throws TException; + default void replTableWriteIdState(String validWriteIdList, String dbName, String tableName, List partNames) + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support replicating table write IDs state"); + } /** * Allocate a per table write ID and associate it with the given transaction. @@ -3272,7 +3667,10 @@ void replTableWriteIdState(String validWriteIdList, String dbName, String tableN * @param tableName table to which the write ID to be allocated * @throws TException */ - List allocateTableWriteIdsBatch(List txnIds, String dbName, String tableName) throws TException; + default List allocateTableWriteIdsBatch(List txnIds, String dbName, String tableName) + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support allocating table write IDs in batch"); + } /** * Allocate a per table write ID and associate it with the given transaction. Used by replication load task. @@ -3282,8 +3680,11 @@ void replTableWriteIdState(String validWriteIdList, String dbName, String tableN * @param srcTxnToWriteIdList List of txn to write id map sent from the source cluster. * @throws TException */ - List replAllocateTableWriteIdsBatch(String dbName, String tableName, String replPolicy, - List srcTxnToWriteIdList) throws TException; + default List replAllocateTableWriteIdsBatch(String dbName, String tableName, String replPolicy, + List srcTxnToWriteIdList) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support replicating allocating table write " + + "IDs in batch"); + } /** * Get the maximum allocated writeId for the given table @@ -3292,7 +3693,9 @@ List replAllocateTableWriteIdsBatch(String dbName, String tableNam * @return the maximum allocated writeId * @throws TException */ - long getMaxAllocatedWriteId(String dbName, String tableName) throws TException; + default long getMaxAllocatedWriteId(String dbName, String tableName) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting maximum allocated write IDs"); + } /** * Seed an ACID table with the given writeId. If the table already contains writes it will fail. @@ -3301,7 +3704,9 @@ List replAllocateTableWriteIdsBatch(String dbName, String tableNam * @param seedWriteId the start value of writeId * @throws TException */ - void seedWriteId(String dbName, String tableName, long seedWriteId) throws TException; + default void seedWriteId(String dbName, String tableName, long seedWriteId) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support seeding write IDs"); + } /** * Seed or increment the global txnId to the given value. @@ -3309,7 +3714,9 @@ List replAllocateTableWriteIdsBatch(String dbName, String tableNam * @param seedTxnId The seed value for the next transactions * @throws TException */ - void seedTxnId(long seedTxnId) throws TException; + default void seedTxnId(long seedTxnId) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support seeding transaction IDs"); + } /** * Show the list of currently open transactions. This is for use by "show transactions" in the @@ -3318,7 +3725,9 @@ List replAllocateTableWriteIdsBatch(String dbName, String tableNam * @return List of currently opened transactions, included aborted ones. * @throws TException */ - GetOpenTxnsInfoResponse showTxns() throws TException; + default GetOpenTxnsInfoResponse showTxns() throws TException { + throw new UnsupportedOperationException("MetaStore client does not support showing transactions"); + } /** * Request a set of locks. All locks needed for a particular query, DML, @@ -3349,8 +3758,10 @@ List replAllocateTableWriteIdsBatch(String dbName, String tableNam * @throws TException */ @RetrySemantics.CannotRetry - LockResponse lock(LockRequest request) - throws NoSuchTxnException, TxnAbortedException, TException; + default LockResponse lock(LockRequest request) + throws NoSuchTxnException, TxnAbortedException, TException { + throw new UnsupportedOperationException("MetaStore client does not support locking"); + } /** * Check the status of a set of locks requested via a @@ -3373,9 +3784,11 @@ LockResponse lock(LockRequest request) * This can result from the lock timing out and being unlocked by the system. * @throws TException */ - LockResponse checkLock(long lockid) + default LockResponse checkLock(long lockid) throws NoSuchTxnException, TxnAbortedException, NoSuchLockException, - TException; + TException { + throw new UnsupportedOperationException("MetaStore client does not support checking locks"); + } /** * Unlock a set of locks. This can only be called when the locks are not @@ -3388,8 +3801,10 @@ LockResponse checkLock(long lockid) * transaction. * @throws TException */ - void unlock(long lockid) - throws NoSuchLockException, TxnOpenException, TException; + default void unlock(long lockid) + throws NoSuchLockException, TxnOpenException, TException { + throw new UnsupportedOperationException("MetaStore client does not support unlocking locks"); + } /** * Show all currently held and waiting locks. @@ -3397,7 +3812,9 @@ void unlock(long lockid) * @return List of currently held and waiting locks. * @throws TException */ - ShowLocksResponse showLocks(ShowLocksRequest showLocksRequest) throws TException; + default ShowLocksResponse showLocks(ShowLocksRequest showLocksRequest) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support showing locks"); + } /** * Send a heartbeat to indicate that the client holding these locks (if @@ -3419,9 +3836,11 @@ void unlock(long lockid) * This can result from the lock timing out and being unlocked by the system. * @throws TException */ - void heartbeat(long txnid, long lockid) + default void heartbeat(long txnid, long lockid) throws NoSuchLockException, NoSuchTxnException, TxnAbortedException, - TException; + TException { + throw new UnsupportedOperationException("MetaStore client does not support heartbeating"); + } /** * Send heartbeats for a range of transactions. This is for the streaming ingest client that @@ -3433,7 +3852,9 @@ void heartbeat(long txnid, long lockid) * have already been closed) and which were aborted. * @throws TException */ - HeartbeatTxnRangeResponse heartbeatTxnRange(long min, long max) throws TException; + default HeartbeatTxnRangeResponse heartbeatTxnRange(long min, long max) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support heartbeating a range of transactions"); + } /** * Send a request to compact a table or partition. This will not block until the compaction is @@ -3445,7 +3866,9 @@ void heartbeat(long txnid, long lockid) * a compaction request. * @throws TException */ - CompactionResponse compact2(CompactionRequest request) throws TException; + default CompactionResponse compact2(CompactionRequest request) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support compacting tables or partitions"); + } /** * Get a list of all compactions. @@ -3453,12 +3876,16 @@ void heartbeat(long txnid, long lockid) * in progress, and finished but waiting to clean the existing files. * @throws TException */ - ShowCompactResponse showCompactions() throws TException; + default ShowCompactResponse showCompactions() throws TException { + throw new UnsupportedOperationException("MetaStore client does not support showing compactions"); + } /** * Get a list of compactions for the given request object. */ - ShowCompactResponse showCompactions(ShowCompactRequest request) throws TException; + default ShowCompactResponse showCompactions(ShowCompactRequest request) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support showing compactions with request"); + } /** * Submit a request for performing cleanup of output directory. This is particularly @@ -3470,8 +3897,10 @@ void heartbeat(long txnid, long lockid) * @param txnId The transaction ID of the query. * @throws TException */ - boolean submitForCleanup(CompactionRequest rqst, long highestWriteId, - long txnId) throws TException; + default boolean submitForCleanup(CompactionRequest rqst, long highestWriteId, + long txnId) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support submitting for cleanup"); + } /** * Get one latest record of SUCCEEDED or READY_FOR_CLEANING compaction for a table/partition. @@ -3484,8 +3913,10 @@ boolean submitForCleanup(CompactionRequest rqst, long highestWriteId, * partition specified by the request. * @throws TException */ - GetLatestCommittedCompactionInfoResponse getLatestCommittedCompactionInfo(GetLatestCommittedCompactionInfoRequest request) - throws TException; + default GetLatestCommittedCompactionInfoResponse getLatestCommittedCompactionInfo(GetLatestCommittedCompactionInfoRequest request) + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting latest committed compaction info"); + } /** * Send a list of partitions to the metastore to indicate which partitions were loaded @@ -3497,9 +3928,11 @@ GetLatestCommittedCompactionInfoResponse getLatestCommittedCompactionInfo(GetLat * @param partNames partition name, as constructed by Warehouse.makePartName * @throws TException */ - void addDynamicPartitions(long txnId, long writeId, String dbName, String tableName, List partNames, + default void addDynamicPartitions(long txnId, long writeId, String dbName, String tableName, List partNames, DataOperationType operationType) - throws TException; + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support adding dynamic partitions"); + } /** * Performs the commit/rollback to the metadata storage for insert operator from external storage handler. @@ -3508,16 +3941,23 @@ void addDynamicPartitions(long txnId, long writeId, String dbName, String tableN * * @throws MetaException */ - void insertTable(Table table, boolean overwrite) throws MetaException; + default void insertTable(Table table, boolean overwrite) throws MetaException { + throw new UnsupportedOperationException("MetaStore client does not support inserting tables"); + } /** * Checks if there is a conflicting transaction * @param txnId * @return latest txnId in conflict */ - long getLatestTxnIdInConflict(long txnId) throws TException; + default long getLatestTxnIdInConflict(long txnId) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting latest transaction id " + + "in conflict"); + } - GetDatabaseObjectsResponse get_databases_req(GetDatabaseObjectsRequest request) throws TException; + default GetDatabaseObjectsResponse get_databases_req(GetDatabaseObjectsRequest request) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting database objects"); + } /** * A filter provided by the client that determines if a given notification event should be @@ -3546,8 +3986,10 @@ interface NotificationFilter { * @throws TException */ @InterfaceAudience.LimitedPrivate({"HCatalog"}) - NotificationEventResponse getNextNotification(long lastEventId, int maxEvents, - NotificationFilter filter) throws TException; + default NotificationEventResponse getNextNotification(long lastEventId, int maxEvents, + NotificationFilter filter) throws TException { + return new NotificationEventResponse(); + } /** * Get the next set of notifications from the database. @@ -3567,8 +4009,10 @@ NotificationEventResponse getNextNotification(long lastEventId, int maxEvents, * @throws TException */ @InterfaceAudience.LimitedPrivate({"HCatalog"}) - NotificationEventResponse getNextNotification(NotificationEventRequest request, - boolean allowGapsInEventIds, NotificationFilter filter) throws TException; + default NotificationEventResponse getNextNotification(NotificationEventRequest request, + boolean allowGapsInEventIds, NotificationFilter filter) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting next notification with request"); + } /** * Get the last used notification event id. @@ -3576,7 +4020,9 @@ NotificationEventResponse getNextNotification(NotificationEventRequest request, * @throws TException */ @InterfaceAudience.LimitedPrivate({"HCatalog"}) - CurrentNotificationEventId getCurrentNotificationEventId() throws TException; + default CurrentNotificationEventId getCurrentNotificationEventId() throws TException { + return new CurrentNotificationEventId(); + } /** * Get the number of events from given eventID for the input database. @@ -3584,8 +4030,10 @@ NotificationEventResponse getNextNotification(NotificationEventRequest request, * @throws TException */ @InterfaceAudience.LimitedPrivate({"HCatalog"}) - NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) - throws TException; + default NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting notification events count"); + } /** * Request that the metastore fire an event. Currently this is only supported for DML @@ -3596,7 +4044,9 @@ NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCou */ @InterfaceAudience.LimitedPrivate({"Apache Hive, HCatalog"}) - FireEventResponse fireListenerEvent(FireEventRequest request) throws TException; + default FireEventResponse fireListenerEvent(FireEventRequest request) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support firing events"); + } /** * Add a event related to write operations in an ACID table. @@ -3604,7 +4054,9 @@ NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCou * @throws TException */ @InterfaceAudience.LimitedPrivate({"Apache Hive, HCatalog"}) - void addWriteNotificationLog(WriteNotificationLogRequest rqst) throws TException; + default void addWriteNotificationLog(WriteNotificationLogRequest rqst) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support adding write notification log"); + } /** * Add a batch of event related to write operations in an ACID table. @@ -3612,7 +4064,9 @@ NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCou * @throws TException */ @InterfaceAudience.LimitedPrivate({"Apache Hive, HCatalog"}) - void addWriteNotificationLogInBatch(WriteNotificationLogBatchRequest rqst) throws TException; + default void addWriteNotificationLogInBatch(WriteNotificationLogBatchRequest rqst) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support adding write notification log in batch"); + } class IncompatibleMetastoreException extends MetaException { public IncompatibleMetastoreException(String message) { @@ -3629,8 +4083,11 @@ public IncompatibleMetastoreException(String message) { * @throws MetaException * @throws TException */ - GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest getPrincRoleReq) - throws MetaException, TException; + default GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest getPrincRoleReq) + throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting principals in role"); + } + /** * get all role-grants for roles that have been granted to given principal @@ -3641,8 +4098,10 @@ GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest ge * @throws MetaException * @throws TException */ - GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( - GetRoleGrantsForPrincipalRequest getRolePrincReq) throws MetaException, TException; + default GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( + GetRoleGrantsForPrincipalRequest getRolePrincReq) throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting role grants for principal"); + } /** * Get aggregated column stats for a set of partitions. @@ -3656,12 +4115,18 @@ GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( * @throws MetaException error accessing the RDBMS * @throws TException thrift transport exception */ - AggrStats getAggrColStatsFor(String dbName, String tblName, - List colNames, List partName, String engine) throws NoSuchObjectException, MetaException, TException; + default AggrStats getAggrColStatsFor(String dbName, String tblName, + List colNames, List partName, String engine) + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting aggregated column stats " + + "for partitions"); + } - AggrStats getAggrColStatsFor(String dbName, String tblName, - List colNames, List partName, - String engine, String writeIdList) throws NoSuchObjectException, MetaException, TException; + default AggrStats getAggrColStatsFor(String dbName, String tblName, List colNames, + List partName, String engine, String writeIdList) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting aggregated column stats " + + "for partitions with writeIdList"); + } /** * Get aggregated column stats for a set of partitions. @@ -3676,15 +4141,19 @@ AggrStats getAggrColStatsFor(String dbName, String tblName, * @throws MetaException error accessing the RDBMS * @throws TException thrift transport exception */ - AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, + default AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, List colNames, List partNames, String engine) - throws NoSuchObjectException, MetaException, TException; + throws NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting aggregated column stats for " + + "partitions with catalog name"); + } - AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, - List colNames, List partNames, - String engine, String writeIdList) - throws NoSuchObjectException, MetaException, TException; + default AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, List colNames, + List partNames, String engine, String writeIdList) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting aggregated column stats for " + + "partitions with catalog name and writeIdList"); + } /** * Set table or partition column statistics. * @param request request object, contains all the table, partition, and statistics information @@ -3695,38 +4164,53 @@ AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, * @throws TException thrift transport error. * @throws InvalidInputException the input is invalid (eg, a null table name) */ - boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) - throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException; + default boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException { + throw new UnsupportedOperationException("MetaStore client does not support setting partition column statistics"); + } /** * Flush any catalog objects held by the metastore implementation. Note that this does not * flush statistics objects. This should be called at the beginning of each query. */ - void flushCache(); + default void flushCache() {} /** * Gets file metadata, as cached by metastore, for respective file IDs. * The metadata that is not cached in metastore may be missing. */ - Iterable> getFileMetadata(List fileIds) throws TException; + default Iterable> getFileMetadata(List fileIds) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting file metadata"); + } - Iterable> getFileMetadataBySarg( - List fileIds, ByteBuffer sarg, boolean doGetFooters) throws TException; + default Iterable> getFileMetadataBySarg( + List fileIds, ByteBuffer sarg, boolean doGetFooters) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting file metadata by sarg"); + } /** * Cleares the file metadata cache for respective file IDs. */ - void clearFileMetadata(List fileIds) throws TException; + default void clearFileMetadata(List fileIds) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support clearing file metadata"); + } /** * Adds file metadata for respective file IDs to metadata cache in metastore. */ - void putFileMetadata(List fileIds, List metadata) throws TException; + default void putFileMetadata(List fileIds, List metadata) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support putting file metadata"); + } - boolean isSameConfObj(Configuration c); + default boolean isSameConfObj(Configuration c) { + throw new UnsupportedOperationException("MetaStore client does not support checking if the configuration object " + + "is the same"); + } - boolean cacheFileMetadata(String dbName, String tableName, String partName, - boolean allParts) throws TException; + default boolean cacheFileMetadata(String dbName, String tableName, String partName, + boolean allParts) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support caching file metadata"); + } /** * Get a primary key for a table. @@ -3736,8 +4220,10 @@ boolean cacheFileMetadata(String dbName, String tableName, String partName, * @throws NoSuchObjectException no primary key exists on this table, or maybe no such table * @throws TException thrift transport error */ - List getPrimaryKeys(PrimaryKeysRequest request) - throws MetaException, NoSuchObjectException, TException; + default List getPrimaryKeys(PrimaryKeysRequest request) + throws MetaException, NoSuchObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting primary keys"); + } /** * Get a foreign key for a table. @@ -3747,8 +4233,10 @@ List getPrimaryKeys(PrimaryKeysRequest request) * @throws NoSuchObjectException no foreign key exists on this table, or maybe no such table * @throws TException thrift transport error */ - List getForeignKeys(ForeignKeysRequest request) throws MetaException, - NoSuchObjectException, TException; + default List getForeignKeys(ForeignKeysRequest request) throws MetaException, + NoSuchObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting foreign keys"); + } /** * Get a unique constraint for a table. @@ -3758,8 +4246,10 @@ List getForeignKeys(ForeignKeysRequest request) throws MetaExcept * @throws NoSuchObjectException no unique constraint on this table, or maybe no such table * @throws TException thrift transport error */ - List getUniqueConstraints(UniqueConstraintsRequest request) throws MetaException, - NoSuchObjectException, TException; + default List getUniqueConstraints(UniqueConstraintsRequest request) throws MetaException, + NoSuchObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting unique constraints"); + } /** * Get a not null constraint for a table. @@ -3769,14 +4259,18 @@ List getUniqueConstraints(UniqueConstraintsRequest request) * @throws NoSuchObjectException no not null constraint on this table, or maybe no such table * @throws TException thrift transport error */ - List getNotNullConstraints(NotNullConstraintsRequest request) throws MetaException, - NoSuchObjectException, TException; + default List getNotNullConstraints(NotNullConstraintsRequest request) throws MetaException, + NoSuchObjectException, TException { + return Collections.emptyList(); + } - List getDefaultConstraints(DefaultConstraintsRequest request) throws MetaException, - NoSuchObjectException, TException; + default List getDefaultConstraints(DefaultConstraintsRequest request) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting default constraints"); + } - List getCheckConstraints(CheckConstraintsRequest request) throws MetaException, - NoSuchObjectException, TException; + default List getCheckConstraints(CheckConstraintsRequest request) throws TException { + return Collections.emptyList(); + } /** * Get all constraints of given table @@ -3786,17 +4280,21 @@ List getCheckConstraints(CheckConstraintsRequest request) th * @throws NoSuchObjectException * @throws TException */ - SQLAllTableConstraints getAllTableConstraints(AllTableConstraintsRequest request) - throws MetaException, NoSuchObjectException, TException; + default SQLAllTableConstraints getAllTableConstraints(AllTableConstraintsRequest request) + throws MetaException, NoSuchObjectException, TException { + return new SQLAllTableConstraints(); + } - void createTableWithConstraints( + default void createTableWithConstraints( org.apache.hadoop.hive.metastore.api.Table tTbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, List checkConstraints) - throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException; + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support creating table with constraints"); + } /** * Drop a constraint. This can be used for primary keys, foreign keys, unique constraints, or @@ -3808,8 +4306,10 @@ void createTableWithConstraints( * @throws NoSuchObjectException no such constraint exists * @throws TException thrift transport error */ - void dropConstraint(String dbName, String tableName, String constraintName) - throws MetaException, NoSuchObjectException, TException; + default void dropConstraint(String dbName, String tableName, String constraintName) + throws MetaException, NoSuchObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping constraints"); + } /** * Drop a constraint. This can be used for primary keys, foreign keys, unique constraints, or @@ -3822,8 +4322,10 @@ void dropConstraint(String dbName, String tableName, String constraintName) * @throws NoSuchObjectException no such constraint exists * @throws TException thrift transport error */ - void dropConstraint(String catName, String dbName, String tableName, String constraintName) - throws MetaException, NoSuchObjectException, TException; + default void dropConstraint(String catName, String dbName, String tableName, String constraintName) + throws MetaException, NoSuchObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping constraints with catalog name"); + } /** @@ -3833,8 +4335,10 @@ void dropConstraint(String catName, String dbName, String tableName, String cons * @throws NoSuchObjectException no such table exists * @throws TException thrift transport error */ - void addPrimaryKey(List primaryKeyCols) throws - MetaException, NoSuchObjectException, TException; + default void addPrimaryKey(List primaryKeyCols) throws + MetaException, NoSuchObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support adding primary keys"); + } /** * Add a foreign key @@ -3843,8 +4347,10 @@ void addPrimaryKey(List primaryKeyCols) throws * @throws NoSuchObjectException one of the tables in the foreign key does not exist. * @throws TException thrift transport error */ - void addForeignKey(List foreignKeyCols) throws - MetaException, NoSuchObjectException, TException; + default void addForeignKey(List foreignKeyCols) throws + MetaException, NoSuchObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support adding foreign keys"); + } /** * Add a unique constraint @@ -3853,8 +4359,10 @@ void addForeignKey(List foreignKeyCols) throws * @throws NoSuchObjectException no such table * @throws TException thrift transport error */ - void addUniqueConstraint(List uniqueConstraintCols) throws - MetaException, NoSuchObjectException, TException; + default void addUniqueConstraint(List uniqueConstraintCols) throws + MetaException, NoSuchObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support adding unique constraints"); + } /** * Add a not null constraint @@ -3864,14 +4372,18 @@ void addUniqueConstraint(List uniqueConstraintCols) throws * @throws NoSuchObjectException no such table * @throws TException thrift transport error */ - void addNotNullConstraint(List notNullConstraintCols) throws - MetaException, NoSuchObjectException, TException; + default void addNotNullConstraint(List notNullConstraintCols) throws + MetaException, NoSuchObjectException, TException { + throw new UnsupportedOperationException("MetaStore client does not support adding not null constraints"); + } - void addDefaultConstraint(List defaultConstraints) throws - MetaException, NoSuchObjectException, TException; + default void addDefaultConstraint(List defaultConstraints) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support adding default constraints"); + } - void addCheckConstraint(List checkConstraints) throws - MetaException, NoSuchObjectException, TException; + default void addCheckConstraint(List checkConstraints) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support adding check constraints"); + } /** * Gets the unique id of the backing database instance used for storing metadata @@ -3879,59 +4391,83 @@ void addCheckConstraint(List checkConstraints) throws * @throws MetaException if HMS is not able to fetch the UUID or if there are multiple UUIDs found in the database * @throws TException in case of Thrift errors */ - String getMetastoreDbUuid() throws MetaException, TException; + default String getMetastoreDbUuid() throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support getting metastore db uuid"); + } - void createResourcePlan(WMResourcePlan resourcePlan, String copyFromName) - throws InvalidObjectException, MetaException, TException; + default void createResourcePlan(WMResourcePlan resourcePlan, String copyFromName) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support creating resource plans"); + } - WMFullResourcePlan getResourcePlan(String resourcePlanName, String ns) - throws NoSuchObjectException, MetaException, TException; + default WMFullResourcePlan getResourcePlan(String resourcePlanName, String ns) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting resource plans"); + } - List getAllResourcePlans(String ns) - throws NoSuchObjectException, MetaException, TException; + default List getAllResourcePlans(String ns) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting all resource plans"); + } - void dropResourcePlan(String resourcePlanName, String ns) - throws NoSuchObjectException, MetaException, TException; + default void dropResourcePlan(String resourcePlanName, String ns) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping resource plans"); + } - WMFullResourcePlan alterResourcePlan(String resourcePlanName, String ns, WMNullableResourcePlan resourcePlan, - boolean canActivateDisabled, boolean isForceDeactivate, boolean isReplace) - throws NoSuchObjectException, InvalidObjectException, MetaException, TException; + default WMFullResourcePlan alterResourcePlan(String resourcePlanName, String ns, WMNullableResourcePlan resourcePlan, + boolean canActivateDisabled, boolean isForceDeactivate, boolean isReplace) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support altering resource plans"); + } - WMFullResourcePlan getActiveResourcePlan(String ns) throws MetaException, TException; + default WMFullResourcePlan getActiveResourcePlan(String ns) throws TException { + return new WMFullResourcePlan(); + } - WMValidateResourcePlanResponse validateResourcePlan(String resourcePlanName, String ns) - throws NoSuchObjectException, InvalidObjectException, MetaException, TException; + default WMValidateResourcePlanResponse validateResourcePlan(String resourcePlanName, String ns) throws TException { + throw new UnsupportedOperationException("this method is not supported"); + } - void createWMTrigger(WMTrigger trigger) - throws InvalidObjectException, MetaException, TException; + default void createWMTrigger(WMTrigger trigger) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support creating WM triggers"); + } - void alterWMTrigger(WMTrigger trigger) - throws NoSuchObjectException, InvalidObjectException, MetaException, TException; + default void alterWMTrigger(WMTrigger trigger) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support altering WM triggers"); + } - void dropWMTrigger(String resourcePlanName, String triggerName, String ns) - throws NoSuchObjectException, MetaException, TException; + default void dropWMTrigger(String resourcePlanName, String triggerName, String ns) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping WM triggers"); + } - List getTriggersForResourcePlan(String resourcePlan, String ns) - throws NoSuchObjectException, MetaException, TException; + default List getTriggersForResourcePlan(String resourcePlan, String ns) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting WM triggers for resource plans"); + } - void createWMPool(WMPool pool) - throws NoSuchObjectException, InvalidObjectException, MetaException, TException; + default void createWMPool(WMPool pool) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support creating WM pools"); + } - void alterWMPool(WMNullablePool pool, String poolPath) - throws NoSuchObjectException, InvalidObjectException, TException; + default void alterWMPool(WMNullablePool pool, String poolPath) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support altering WM pools"); + } - void dropWMPool(String resourcePlanName, String poolPath, String ns) - throws TException; + default void dropWMPool(String resourcePlanName, String poolPath, String ns) + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping WM pools"); + } - void createOrUpdateWMMapping(WMMapping mapping, boolean isUpdate) - throws TException; + default void createOrUpdateWMMapping(WMMapping mapping, boolean isUpdate) + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support creating or updating WM mappings"); + } - void dropWMMapping(WMMapping mapping) - throws TException; + default void dropWMMapping(WMMapping mapping) + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping WM mappings"); + } - void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerName, - String poolPath, boolean shouldDrop, String ns) throws AlreadyExistsException, NoSuchObjectException, - InvalidObjectException, MetaException, TException; + default void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerName, + String poolPath, boolean shouldDrop, String ns) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support creating or dropping trigger " + + "to pool mappings"); + } /** * Create a new schema. This is really a schema container, as there will be specific versions @@ -3942,7 +4478,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - void createISchema(ISchema schema) throws TException; + default void createISchema(ISchema schema) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support creating schemas"); + } /** * Alter an existing schema. @@ -3954,7 +4492,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - void alterISchema(String catName, String dbName, String schemaName, ISchema newSchema) throws TException; + default void alterISchema(String catName, String dbName, String schemaName, ISchema newSchema) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support altering schemas"); + } /** * Fetch a schema. @@ -3966,7 +4506,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - ISchema getISchema(String catName, String dbName, String name) throws TException; + default ISchema getISchema(String catName, String dbName, String name) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting schemas"); + } /** * Drop an existing schema. If there are schema versions of this, this call will fail. @@ -3978,7 +4520,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - void dropISchema(String catName, String dbName, String name) throws TException; + default void dropISchema(String catName, String dbName, String name) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping schemas"); + } /** * Add a new version to an existing schema. @@ -3988,7 +4532,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - void addSchemaVersion(SchemaVersion schemaVersion) throws TException; + default void addSchemaVersion(SchemaVersion schemaVersion) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support adding schema versions"); + } /** * Get a specific version of a schema. @@ -4000,7 +4546,10 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - SchemaVersion getSchemaVersion(String catName, String dbName, String schemaName, int version) throws TException; + default SchemaVersion getSchemaVersion(String catName, String dbName, String schemaName, int version) + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting schema versions"); + } /** * Get the latest version of a schema. @@ -4013,7 +4562,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - SchemaVersion getSchemaLatestVersion(String catName, String dbName, String schemaName) throws TException; + default SchemaVersion getSchemaLatestVersion(String catName, String dbName, String schemaName) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting latest schema version"); + } /** * Get all the extant versions of a schema. @@ -4026,7 +4577,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - List getSchemaAllVersions(String catName, String dbName, String schemaName) throws TException; + default List getSchemaAllVersions(String catName, String dbName, String schemaName) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting all schema versions"); + } /** * Drop a version of a schema. Given that versions are supposed to be immutable you should @@ -4040,7 +4593,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - void dropSchemaVersion(String catName, String dbName, String schemaName, int version) throws TException; + default void dropSchemaVersion(String catName, String dbName, String schemaName, int version) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping schema versions"); + } /** * Find all schema versions that have columns that match a query. @@ -4050,7 +4605,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - FindSchemasByColsResp getSchemaByCols(FindSchemasByColsRqst rqst) throws TException; + default FindSchemasByColsResp getSchemaByCols(FindSchemasByColsRqst rqst) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting schemas by columns"); + } /** * Map a schema version to a serde. This mapping is one-to-one, thus this will destroy any @@ -4065,7 +4622,10 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - void mapSchemaVersionToSerde(String catName, String dbName, String schemaName, int version, String serdeName) throws TException; + default void mapSchemaVersionToSerde(String catName, String dbName, String schemaName, int version, String serdeName) + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support mapping schema versions to serdes"); + } /** * Set the state of a schema version. @@ -4079,7 +4639,10 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - void setSchemaVersionState(String catName, String dbName, String schemaName, int version, SchemaVersionState state) throws TException; + default void setSchemaVersionState(String catName, String dbName, String schemaName, int version, + SchemaVersionState state) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support setting schema version state"); + } /** * Add a serde. This is primarily intended for use with SchemaRegistry objects, since serdes @@ -4089,7 +4652,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - void addSerDe(SerDeInfo serDeInfo) throws TException; + default void addSerDe(SerDeInfo serDeInfo) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support adding serdes"); + } /** * Fetch a serde. This is primarily intended for use with SchemaRegistry objects, since serdes @@ -4100,7 +4665,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - SerDeInfo getSerDe(String serDeName) throws TException; + default SerDeInfo getSerDe(String serDeName) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting serdes"); + } /** * Acquire the materialization rebuild lock for a given view. We need to specify the fully @@ -4112,7 +4679,10 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @return the response from the metastore, where the lock id is equal to the txn id and * the status can be either ACQUIRED or NOT ACQUIRED */ - LockResponse lockMaterializationRebuild(String dbName, String tableName, long txnId) throws TException; + default LockResponse lockMaterializationRebuild(String dbName, String tableName, long txnId) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support acquiring materialization rebuild lock"); + } + /** * Method to refresh the acquisition of a given materialization rebuild lock. @@ -4121,13 +4691,20 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @param txnId transaction id for the rebuild * @return true if the lock could be renewed, false otherwise */ - boolean heartbeatLockMaterializationRebuild(String dbName, String tableName, long txnId) throws TException; + default boolean heartbeatLockMaterializationRebuild(String dbName, String tableName, long txnId) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support heartbeating materialization " + + "rebuild lock"); + } /** Adds a RuntimeStat for metastore persistence. */ - void addRuntimeStat(RuntimeStat stat) throws TException; + default void addRuntimeStat(RuntimeStat stat) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support adding runtime stats"); + } /** Reads runtime statistics. */ - List getRuntimeStats(int maxWeight, int maxCreateTime) throws TException; + default List getRuntimeStats(int maxWeight, int maxCreateTime) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support reading runtime stats"); + } /** * Generic Partition request API, providing different ways of filtering and controlling output. @@ -4145,7 +4722,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * Partition filter spec is the generalization of various types of partition filtering. * Partitions can be filtered by names, by values or by partition expressions. */ - GetPartitionsResponse getPartitionsWithSpecs(GetPartitionsRequest request) throws TException; + default GetPartitionsResponse getPartitionsWithSpecs(GetPartitionsRequest request) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting partitions with specs"); + } /** * Get the next compaction job to do. @@ -4154,7 +4733,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException * @throws TException */ - OptionalCompactionInfoStruct findNextCompact(FindNextCompactRequest rqst) throws MetaException, TException; + default OptionalCompactionInfoStruct findNextCompact(FindNextCompactRequest rqst) throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support finding next compaction"); + } /** * Set the compaction highest write id. @@ -4162,7 +4743,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @param txnId transaction id. * @throws TException */ - void updateCompactorState(CompactionInfoStruct cr, long txnId) throws TException; + default void updateCompactorState(CompactionInfoStruct cr, long txnId) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support updating compactor state"); + } /** * Get columns. @@ -4170,7 +4753,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @return * @throws TException */ - List findColumnsWithStats(CompactionInfoStruct cr) throws TException; + default List findColumnsWithStats(CompactionInfoStruct cr) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support finding columns with stats"); + } /** * Mark a finished compaction as cleaned. @@ -4178,7 +4763,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException * @throws TException */ - void markCleaned(CompactionInfoStruct cr) throws MetaException, TException; + default void markCleaned(CompactionInfoStruct cr) throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support marking compaction as cleaned"); + } /** * Mark a finished compaction as compacted. @@ -4186,7 +4773,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException * @throws TException */ - void markCompacted(CompactionInfoStruct cr) throws MetaException, TException; + default void markCompacted(CompactionInfoStruct cr) throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support marking compaction as compacted"); + } /** * Mark a finished compaction as failed. @@ -4194,7 +4783,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException * @throws TException */ - void markFailed(CompactionInfoStruct cr) throws MetaException, TException; + default void markFailed(CompactionInfoStruct cr) throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support marking compaction as failed"); + } /** * Mark a compaction as refused (to run). @@ -4202,7 +4793,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException * @throws TException */ - void markRefused(CompactionInfoStruct cr) throws MetaException, TException; + default void markRefused(CompactionInfoStruct cr) throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support marking compaction as refused"); + } /** * Create, update or delete one record in the compaction metrics cache. @@ -4221,7 +4814,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException * @throws TException */ - boolean updateCompactionMetricsData(CompactionMetricsDataStruct struct) throws MetaException, TException; + default boolean updateCompactionMetricsData(CompactionMetricsDataStruct struct) throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support updating compaction metrics data"); + } /** @@ -4230,7 +4825,9 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException * @throws TException */ - void removeCompactionMetricsData(CompactionMetricsDataRequest request) throws MetaException, TException; + default void removeCompactionMetricsData(CompactionMetricsDataRequest request) throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support removing compaction metrics data"); + } /** * Set the hadoop id for a compaction. * @param jobId mapreduce job id that will do the compaction. @@ -4238,72 +4835,108 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException * @throws TException */ - void setHadoopJobid(String jobId, long cqId) throws MetaException, TException; + default void setHadoopJobid(String jobId, long cqId) throws MetaException, TException { + throw new UnsupportedOperationException("MetaStore client does not support setting hadoop job id for compaction"); + } /** * Gets the version string of the metastore server which this client is connected to * * @return String representation of the version number of Metastore server (eg: 3.1.0-SNAPSHOT) */ - String getServerVersion() throws TException; + default String getServerVersion() throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting server version"); + } /** * Returns details about a scheduled query by name. * * @throws NoSuchObjectException if an object by the given name dosen't exists. */ - ScheduledQuery getScheduledQuery(ScheduledQueryKey scheduleKey) throws TException; + default ScheduledQuery getScheduledQuery(ScheduledQueryKey scheduleKey) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting scheduled query by key"); + } /** * Carries out maintenance of scheduled queries (insert/update/drop). */ - void scheduledQueryMaintenance(ScheduledQueryMaintenanceRequest request) throws MetaException, TException; + default void scheduledQueryMaintenance(ScheduledQueryMaintenanceRequest request) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support scheduled query maintenance"); + } /** * Checks whenever a query is available for execution. * * @return optionally a scheduled query to be processed. */ - ScheduledQueryPollResponse scheduledQueryPoll(ScheduledQueryPollRequest request) throws MetaException, TException; + default ScheduledQueryPollResponse scheduledQueryPoll(ScheduledQueryPollRequest request) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support scheduled query poll"); + } /** * Registers the progress a scheduled query being executed. */ - void scheduledQueryProgress(ScheduledQueryProgressInfo info) throws TException; + default void scheduledQueryProgress(ScheduledQueryProgressInfo info) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support scheduled query progress"); + } /** * Adds replication metrics for the replication policies. * @param replicationMetricList * @throws MetaException */ - void addReplicationMetrics(ReplicationMetricList replicationMetricList) throws MetaException, TException; + default void addReplicationMetrics(ReplicationMetricList replicationMetricList) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support adding replication metrics"); + } - ReplicationMetricList getReplicationMetrics(GetReplicationMetricsRequest - replicationMetricsRequest) throws MetaException, TException; + default ReplicationMetricList getReplicationMetrics(GetReplicationMetricsRequest + replicationMetricsRequest) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting replication metrics"); + } - void createStoredProcedure(StoredProcedure proc) throws NoSuchObjectException, MetaException, TException; + default void createStoredProcedure(StoredProcedure proc) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support creating stored procedures"); + } - StoredProcedure getStoredProcedure(StoredProcedureRequest request) throws MetaException, NoSuchObjectException, TException; + default StoredProcedure getStoredProcedure(StoredProcedureRequest request) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting stored procedures"); + } - void dropStoredProcedure(StoredProcedureRequest request) throws MetaException, NoSuchObjectException, TException; + default void dropStoredProcedure(StoredProcedureRequest request) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping stored procedures"); + } - List getAllStoredProcedures(ListStoredProcedureRequest request) throws MetaException, TException; + default List getAllStoredProcedures(ListStoredProcedureRequest request) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting all stored procedures"); + } - void addPackage(AddPackageRequest request) throws NoSuchObjectException, MetaException, TException; + default void addPackage(AddPackageRequest request) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support adding packages"); + } - Package findPackage(GetPackageRequest request) throws TException; + default Package findPackage(GetPackageRequest request) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support finding packages"); + } - List listPackages(ListPackageRequest request) throws TException; + default List listPackages(ListPackageRequest request) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support listing packages"); + } - void dropPackage(DropPackageRequest request) throws TException; + default void dropPackage(DropPackageRequest request) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support dropping packages"); + } /** * Get acid write events of a specific transaction. * @throws TException */ - List getAllWriteEventInfo(GetAllWriteEventInfoRequest request) throws TException; + default List getAllWriteEventInfo(GetAllWriteEventInfoRequest request) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting write events"); + } - AbortCompactResponse abortCompactions(AbortCompactionRequest request) throws TException; + default AbortCompactResponse abortCompactions(AbortCompactionRequest request) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support aborting compactions"); + } /** * Sets properties. @@ -4325,7 +4958,8 @@ default boolean setProperties(String nameSpace, Map properties) * @return a map keyed by property map path to maps keyed by property name mapped to property values * @throws TException */ - default Map> getProperties(String nameSpace, String mapPrefix, String mapPredicate, String... selection) throws TException { + default Map> getProperties(String nameSpace, String mapPrefix, + String mapPredicate, String... selection) throws TException { throw new UnsupportedOperationException(); } } diff --git a/standalone-metastore/metastore-rest-catalog/pom.xml b/standalone-metastore/metastore-rest-catalog/pom.xml index 7da62dfae3dd..edf41fdc17a7 100644 --- a/standalone-metastore/metastore-rest-catalog/pom.xml +++ b/standalone-metastore/metastore-rest-catalog/pom.xml @@ -258,6 +258,17 @@ maven-surefire-plugin ${surefire.version} + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + org.codehaus.mojo exec-maven-plugin diff --git a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/HiveRESTCatalogServerExtension.java b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/HiveRESTCatalogServerExtension.java index 6b9a3f751472..5b80d2276671 100644 --- a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/HiveRESTCatalogServerExtension.java +++ b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/HiveRESTCatalogServerExtension.java @@ -25,6 +25,7 @@ import java.util.stream.Stream; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo; import org.apache.hadoop.hive.metastore.ServletSecurity.AuthType; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; @@ -42,7 +43,7 @@ public class HiveRESTCatalogServerExtension implements BeforeAllCallback, Before private final JwksServer jwksServer; private final RESTCatalogServer restCatalogServer; - private HiveRESTCatalogServerExtension(AuthType authType) { + private HiveRESTCatalogServerExtension(AuthType authType, Class schemaInfoClass) { this.conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setVar(conf, ConfVars.CATALOG_SERVLET_AUTH, authType.name()); if (authType == AuthType.JWT) { @@ -54,6 +55,13 @@ private HiveRESTCatalogServerExtension(AuthType authType) { jwksServer = null; } restCatalogServer = new RESTCatalogServer(); + if (schemaInfoClass != null) { + restCatalogServer.setSchemaInfoClass(schemaInfoClass); + } + } + + public Configuration getConf() { + return conf; } @Override @@ -98,13 +106,19 @@ public String getRestEndpoint() { public static class Builder { private final AuthType authType; + private Class metaStoreSchemaClass; private Builder(AuthType authType) { this.authType = authType; } + + public Builder addMetaStoreSchemaClassName(Class metaStoreSchemaClass) { + this.metaStoreSchemaClass = metaStoreSchemaClass; + return this; + } public HiveRESTCatalogServerExtension build() { - return new HiveRESTCatalogServerExtension(authType); + return new HiveRESTCatalogServerExtension(authType, metaStoreSchemaClass); } } diff --git a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/RESTCatalogServer.java b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/RESTCatalogServer.java index 466cf4603bb2..7d2aac692db5 100644 --- a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/RESTCatalogServer.java +++ b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/RESTCatalogServer.java @@ -23,6 +23,7 @@ import java.util.UUID; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; @@ -36,14 +37,19 @@ public class RESTCatalogServer { private Path warehouseDir; private int hmsPort = -1; private int restPort = -1; + private Class schemaInfoClass = RESTCatalogSchemaInfo.class; private static int createMetastoreServerWithRESTCatalog(int restPort, Configuration conf) throws Exception { MetastoreConf.setLongVar(conf, MetastoreConf.ConfVars.CATALOG_SERVLET_PORT, restPort); return MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf, true, false, false, false); } + + public void setSchemaInfoClass(Class schemaInfoClass) { + this.schemaInfoClass = schemaInfoClass; + } - void start(Configuration conf) throws Exception { + public void start(Configuration conf) throws Exception { MetaStoreTestUtils.setConfForStandloneMode(conf); // Avoid reusing the JVM-level caching across Hive Metastore servers @@ -60,8 +66,7 @@ void start(Configuration conf) throws Exception { MetastoreConf.setVar(conf, MetastoreConf.ConfVars.WAREHOUSE_EXTERNAL, externalPath); conf.set(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL.varname, externalPath); - MetastoreConf.setVar(conf, MetastoreConf.ConfVars.SCHEMA_INFO_CLASS, - RESTCatalogSchemaInfo.class.getCanonicalName()); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.SCHEMA_INFO_CLASS, schemaInfoClass.getCanonicalName()); for (int i = 0; i < MetaStoreTestUtils.RETRY_COUNT; i++) { try { @@ -79,15 +84,15 @@ void start(Configuration conf) throws Exception { LOG.info("Starting HMS(port={}) with Iceberg REST Catalog(port={})", hmsPort, restPort); } - void stop() { + public void stop() { MetaStoreTestUtils.close(hmsPort); } - Path getWarehouseDir() { + public Path getWarehouseDir() { return warehouseDir; } - String getRestEndpoint() { + public String getRestEndpoint() { return String.format("http://localhost:%d/iceberg", restPort); } }