Dataset Viewer
index
int64 0
0
| repo_id
stringlengths 26
205
| file_path
stringlengths 51
246
| content
stringlengths 8
433k
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Polaris connector classes.
*/
package com.netflix.metacat.connector.polaris;
| 0 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/common/PolarisConnectorConsts.java | package com.netflix.metacat.connector.polaris.common;
/**
* Polaris connector consts.
*/
public final class PolarisConnectorConsts {
/**
* Max number of client-side retries for CRDB txns.
*/
public static final int MAX_CRDB_TXN_RETRIES = 5;
/**
* Default Ctor.
*/
private PolarisConnectorConsts() {
}
}
| 1 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/common/TransactionRetryAspect.java | package com.netflix.metacat.connector.polaris.common;
import com.google.common.base.Throwables;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.monitoring.Metrics;
import lombok.extern.slf4j.Slf4j;
import org.aspectj.lang.ProceedingJoinPoint;
import org.aspectj.lang.annotation.Around;
import org.aspectj.lang.annotation.Aspect;
import org.springframework.core.Ordered;
import org.springframework.retry.RetryException;
import org.springframework.retry.support.RetryTemplate;
import org.springframework.transaction.support.TransactionSynchronizationManager;
import java.sql.SQLException;
/**
* Aspect for client-side transaction retries.
*/
@Aspect
@Slf4j
public class TransactionRetryAspect implements Ordered {
private static final String SQLSTATE_RETRY_TRANSACTION = "40001";
private final RetryTemplate retryTemplate;
private final ConnectorContext connectorContext;
/**
* Constructor.
*
* @param retryTemplate retry template.
* @param connectorContext the connector context.
*/
public TransactionRetryAspect(final RetryTemplate retryTemplate,
final ConnectorContext connectorContext) {
this.retryTemplate = retryTemplate;
this.connectorContext = connectorContext;
}
/**
* Pointcut for transactional methods in Polaris persistence classes.
*
* @param pjp joint point
* @return data results
* @throws Exception data exception
*/
@Around(value = "@annotation(org.springframework.transaction.annotation.Transactional)"
+ "&& within(com.netflix.metacat.connector.polaris.store..*)")
public Object retry(final ProceedingJoinPoint pjp) throws Exception {
return retryOnError(pjp);
}
private Object retryOnError(final ProceedingJoinPoint pjp) throws Exception {
return retryTemplate.<Object, Exception>execute(context -> {
try {
return pjp.proceed();
} catch (Throwable t) {
if (!TransactionSynchronizationManager.isActualTransactionActive() && isRetryError(t)) {
log.warn("Transaction failed with retry error: {}", t.getMessage());
connectorContext.getRegistry().counter(
Metrics.CounterTransactionRetryFailure.getMetricName()).increment();
throw new RetryException("TransactionRetryError", t);
}
throw new RuntimeException(t);
}
});
}
private boolean isRetryError(final Throwable t) {
for (Throwable ex : Throwables.getCausalChain(t)) {
if (ex instanceof SQLException && SQLSTATE_RETRY_TRANSACTION.equals(((SQLException) ex).getSQLState())) {
return true;
}
}
return false;
}
@Override
public int getOrder() {
return 99;
}
}
| 2 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/common/PolarisUtils.java | package com.netflix.metacat.connector.polaris.common;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import org.apache.commons.lang3.StringUtils;
/**
* Polaris connector utils.
*/
public final class PolarisUtils {
/**
* Default metacat user.
*/
public static final String DEFAULT_METACAT_USER = "metacat_user";
/**
* Default Ctor.
*/
private PolarisUtils() {
}
/**
* Get the user name from the request context or
* a default one if missing.
* @param context The request context.
* @return the user name.
*/
public static String getUserOrDefault(final ConnectorRequestContext context) {
final String userName = context.getUserName();
return StringUtils.isNotBlank(userName) ? userName : DEFAULT_METACAT_USER;
}
}
| 3 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/common/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Polaris connector common classes.
*/
package com.netflix.metacat.connector.polaris.common;
| 4 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/configs/PolarisPersistenceConfig.java | package com.netflix.metacat.connector.polaris.configs;
import com.netflix.metacat.connector.polaris.store.PolarisStoreConnector;
import com.netflix.metacat.connector.polaris.store.PolarisStoreService;
import com.netflix.metacat.connector.polaris.store.repos.PolarisDatabaseRepository;
import com.netflix.metacat.connector.polaris.store.repos.PolarisTableRepository;
import com.zaxxer.hikari.HikariDataSource;
import org.springframework.boot.autoconfigure.ImportAutoConfiguration;
import org.springframework.boot.autoconfigure.domain.EntityScan;
import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration;
import org.springframework.boot.autoconfigure.jdbc.DataSourceProperties;
import org.springframework.boot.autoconfigure.jdbc.DataSourceTransactionManagerAutoConfiguration;
import org.springframework.boot.autoconfigure.orm.jpa.HibernateJpaAutoConfiguration;
import org.springframework.boot.autoconfigure.transaction.TransactionAutoConfiguration;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.data.jpa.repository.config.EnableJpaAuditing;
import org.springframework.data.jpa.repository.config.EnableJpaRepositories;
import org.springframework.transaction.annotation.EnableTransactionManagement;
import javax.sql.DataSource;
/**
* The Polaris Store Persistence config.
*
*/
@Configuration
@EntityScan("com.netflix.metacat.connector.polaris.store.entities")
@EnableJpaRepositories("com.netflix.metacat.connector.polaris.store.repos")
@EnableJpaAuditing
@EnableTransactionManagement(proxyTargetClass = true)
@ImportAutoConfiguration({DataSourceAutoConfiguration.class,
DataSourceTransactionManagerAutoConfiguration.class, HibernateJpaAutoConfiguration.class,
TransactionAutoConfiguration.class})
public class PolarisPersistenceConfig {
/**
* Primary datasource. Since connectors can have data sources configured, polaris store JPA needs to be
* explicitly configured.
*
* @param dataSourceProperties datasource properties
* @return Datasource
*/
@Bean
@ConfigurationProperties(prefix = "spring.datasource.hikari")
public DataSource dataSource(final DataSourceProperties dataSourceProperties) {
return dataSourceProperties.initializeDataSourceBuilder().type(HikariDataSource.class).build();
}
/**
* Datasource properties.
*
* @return DataSourceProperties
*/
@Bean
@Primary
@ConfigurationProperties("spring.datasource")
public DataSourceProperties dataSourceProperties() {
return new DataSourceProperties();
}
/**
* Get an implementation of {@link PolarisStoreConnector}.
*
* @param repo - PolarisDatabaseRepository
* @param tblRepo - PolarisTableRepository
* @return PolarisStoreConnector
*/
@Bean
public PolarisStoreService polarisStoreService(
final PolarisDatabaseRepository repo, final PolarisTableRepository tblRepo) {
return new PolarisStoreConnector(repo, tblRepo);
}
}
| 5 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/configs/PolarisConnectorConfig.java | package com.netflix.metacat.connector.polaris.configs;
import com.google.common.collect.ImmutableMap;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.util.ThreadServiceManager;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableCriteria;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableCriteriaImpl;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableHandler;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableOpWrapper;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableOpsProxy;
import com.netflix.metacat.connector.polaris.PolarisConnectorDatabaseService;
import com.netflix.metacat.connector.polaris.PolarisConnectorPartitionService;
import com.netflix.metacat.connector.polaris.PolarisConnectorTableService;
import com.netflix.metacat.connector.polaris.common.PolarisConnectorConsts;
import com.netflix.metacat.connector.polaris.common.TransactionRetryAspect;
import com.netflix.metacat.connector.polaris.mappers.PolarisTableMapper;
import com.netflix.metacat.connector.polaris.store.PolarisStoreService;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.context.annotation.Bean;
import org.springframework.retry.RetryException;
import org.springframework.retry.backoff.ExponentialBackOffPolicy;
import org.springframework.retry.policy.SimpleRetryPolicy;
import org.springframework.retry.support.RetryTemplate;
/**
* Config for polaris connector.
*/
public class PolarisConnectorConfig {
/**
* Creates a new instance of a polaris connector partition service.
*
* @param icebergTableHandler iceberg table handler
* @param connectorContext connector context
* @param polarisTableService polaris table service
* @return PolarisConnectorPartitionService
*/
@Bean
public PolarisConnectorPartitionService polarisConnectorPartitionService(
final IcebergTableHandler icebergTableHandler,
final ConnectorContext connectorContext,
final PolarisConnectorTableService polarisTableService) {
return new PolarisConnectorPartitionService(connectorContext, icebergTableHandler, polarisTableService);
}
/**
* Create polaris connector database service.
*
* @param polarisStoreService polaris store service
* @param connectorContext connector context
* @return PolarisConnectorDatabaseService
*/
@Bean
@ConditionalOnMissingBean(PolarisConnectorDatabaseService.class)
public PolarisConnectorDatabaseService polarisDatabaseService(
final PolarisStoreService polarisStoreService,
final ConnectorContext connectorContext
) {
return new PolarisConnectorDatabaseService(polarisStoreService, connectorContext);
}
/**
* Create polaris connector table service.
*
* @param polarisStoreService polaris connector
* @param connectorConverter connector converter
* @param connectorDatabaseService polaris database service
* @param icebergTableHandler iceberg table handler
* @param polarisTableMapper polaris table mapper
* @param connectorContext connector context
* @return PolarisConnectorTableService
*/
@Bean
@ConditionalOnMissingBean(PolarisConnectorTableService.class)
public PolarisConnectorTableService polarisTableService(
final PolarisStoreService polarisStoreService,
final HiveConnectorInfoConverter connectorConverter,
final PolarisConnectorDatabaseService connectorDatabaseService,
final IcebergTableHandler icebergTableHandler,
final PolarisTableMapper polarisTableMapper,
final ConnectorContext connectorContext
) {
return new PolarisConnectorTableService(
polarisStoreService,
connectorContext.getCatalogName(),
connectorDatabaseService,
connectorConverter,
icebergTableHandler,
polarisTableMapper,
connectorContext
);
}
/**
* Create PolarisTableMapper.
* @param connectorContext server context
* @return PolarisTableMapper.
*/
@Bean
public PolarisTableMapper polarisTableMapper(final ConnectorContext connectorContext) {
return new PolarisTableMapper(connectorContext.getCatalogName());
}
/**
* Create iceberg table handler.
* @param connectorContext server context
* @param icebergTableCriteria iceberg table criteria
* @param icebergTableOpWrapper iceberg table operation
* @param icebergTableOpsProxy IcebergTableOps proxy
* @return IcebergTableHandler
*/
@Bean
public IcebergTableHandler icebergTableHandler(final ConnectorContext connectorContext,
final IcebergTableCriteria icebergTableCriteria,
final IcebergTableOpWrapper icebergTableOpWrapper,
final IcebergTableOpsProxy icebergTableOpsProxy) {
return new IcebergTableHandler(
connectorContext,
icebergTableCriteria,
icebergTableOpWrapper,
icebergTableOpsProxy);
}
/**
* Create iceberg table criteria.
* @param connectorContext server context
* @return IcebergTableCriteria
*/
@Bean
public IcebergTableCriteria icebergTableCriteria(final ConnectorContext connectorContext) {
return new IcebergTableCriteriaImpl(connectorContext);
}
/**
* Create iceberg table operation wrapper.
* @param connectorContext server context
* @param threadServiceManager executor service
* @return IcebergTableOpWrapper
*/
@Bean
public IcebergTableOpWrapper icebergTableOpWrapper(final ConnectorContext connectorContext,
final ThreadServiceManager threadServiceManager) {
return new IcebergTableOpWrapper(connectorContext, threadServiceManager);
}
/**
* Create thread service manager.
* @param connectorContext connector config
* @return ThreadServiceManager
*/
@Bean
public ThreadServiceManager threadServiceManager(final ConnectorContext connectorContext) {
return new ThreadServiceManager(connectorContext.getRegistry(), connectorContext.getConfig());
}
/**
* Create IcebergTableOps proxy.
* @return IcebergTableOpsProxy
*/
@Bean
public IcebergTableOpsProxy icebergTableOps() {
return new IcebergTableOpsProxy();
}
/**
* Retry template to use for transaction retries.
*
* @return The retry template bean.
*/
@Bean
public RetryTemplate transactionRetryTemplate() {
final RetryTemplate result = new RetryTemplate();
result.setRetryPolicy(new SimpleRetryPolicy(
PolarisConnectorConsts.MAX_CRDB_TXN_RETRIES,
new ImmutableMap.Builder<Class<? extends Throwable>, Boolean>()
.put(RetryException.class, true)
.build()));
result.setBackOffPolicy(new ExponentialBackOffPolicy());
return result;
}
/**
* Aspect advice for transaction retries.
*
* @param retryTemplate the transaction retry template.
* @param connectorContext the connector context.
* @return TransactionRetryAspect
*/
@Bean
public TransactionRetryAspect transactionRetryAspect(
@Qualifier("transactionRetryTemplate") final RetryTemplate retryTemplate,
final ConnectorContext connectorContext) {
return new TransactionRetryAspect(retryTemplate, connectorContext);
}
}
| 6 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/configs/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Polaris config classes.
*/
package com.netflix.metacat.connector.polaris.configs;
| 7 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/mappers/InfoToEntityMapper.java | package com.netflix.metacat.connector.polaris.mappers;
/**
* Info to Entity mapper.
*
* @param <I> The info type to map from.
* @param <E> The entity type to map to.
*/
public interface InfoToEntityMapper<I, E> {
/**
* Maps an info object to an entity object.
*
* @param info The info object to map from.
* @return The result entity object.
*/
E toEntity(I info);
}
| 8 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/mappers/EntityToInfoMapper.java | package com.netflix.metacat.connector.polaris.mappers;
/**
* Entity to Info Mapper.
*
* @param <E> The entity type to map from.
* @param <I> The info type to map to.
*/
public interface EntityToInfoMapper<E, I> {
/**
* Maps an Entity to the Info object.
*
* @param entity The entity to map from.
* @return The result info object.
*/
I toInfo(E entity);
}
| 9 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/mappers/PolarisDatabaseMapper.java | package com.netflix.metacat.connector.polaris.mappers;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.connector.polaris.store.entities.PolarisDatabaseEntity;
/**
* Database object mapper implementations.
*/
public class PolarisDatabaseMapper implements
EntityToInfoMapper<PolarisDatabaseEntity, DatabaseInfo>,
InfoToEntityMapper<DatabaseInfo, PolarisDatabaseEntity> {
// TODO: this can be reworked if PolarisDatabaseEntity starts tracking catalog name
private final String catalogName;
/**
* Constructor.
* @param catalogName the catalog name
*/
public PolarisDatabaseMapper(final String catalogName) {
this.catalogName = catalogName;
}
/**
* {@inheritDoc}.
*/
@Override
public DatabaseInfo toInfo(final PolarisDatabaseEntity entity) {
final DatabaseInfo databaseInfo = DatabaseInfo.builder()
.name(QualifiedName.ofDatabase(catalogName, entity.getDbName()))
.uri(entity.getLocation())
.build();
return databaseInfo;
}
/**
* {@inheritDoc}.
*/
@Override
public PolarisDatabaseEntity toEntity(final DatabaseInfo info) {
final PolarisDatabaseEntity databaseEntity = PolarisDatabaseEntity.builder()
.dbName(info.getName().getDatabaseName())
.location(info.getUri())
.build();
return databaseEntity;
}
}
| 10 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/mappers/PolarisTableMapper.java | package com.netflix.metacat.connector.polaris.mappers;
import com.google.common.collect.ImmutableMap;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.model.AuditInfo;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.hive.sql.DirectSqlTable;
import com.netflix.metacat.connector.polaris.store.entities.PolarisTableEntity;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang.StringUtils;
import java.sql.Date;
import java.util.Map;
/**
* Table object mapper implementations.
*/
public class PolarisTableMapper implements
EntityToInfoMapper<PolarisTableEntity, TableInfo>,
InfoToEntityMapper<TableInfo, PolarisTableEntity> {
private static final String PARAMETER_SPARK_SQL_PROVIDER = "spark.sql.sources.provider";
private static final String PARAMETER_EXTERNAL = "EXTERNAL";
private static final String PARAMETER_METADATA_PREFIX = "/metadata/";
private final String catalogName;
/**
* Constructor.
* @param catalogName the catalog name
*/
public PolarisTableMapper(final String catalogName) {
this.catalogName = catalogName;
}
/**
* {@inheritDoc}.
*/
@Override
public TableInfo toInfo(final PolarisTableEntity entity) {
final int uriIndex = entity.getMetadataLocation().indexOf(PARAMETER_METADATA_PREFIX);
final TableInfo tableInfo = TableInfo.builder()
.name(QualifiedName.ofTable(catalogName, entity.getDbName(), entity.getTblName()))
.metadata(ImmutableMap.of(
DirectSqlTable.PARAM_METADATA_LOCATION, entity.getMetadataLocation(),
PARAMETER_EXTERNAL, "TRUE", PARAMETER_SPARK_SQL_PROVIDER, "iceberg",
DirectSqlTable.PARAM_TABLE_TYPE, DirectSqlTable.ICEBERG_TABLE_TYPE))
.serde(StorageInfo.builder().inputFormat("org.apache.hadoop.mapred.FileInputFormat")
.outputFormat("org.apache.hadoop.mapred.FileOutputFormat")
.serializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")
.uri(uriIndex > 0 ? entity.getMetadataLocation().substring(0, uriIndex) : "")
.build())
.auditInfo(AuditInfo.builder()
.createdBy(entity.getAudit().getCreatedBy())
.createdDate(Date.from(entity.getAudit().getCreatedDate()))
.lastModifiedBy(entity.getAudit().getLastModifiedBy())
.lastModifiedDate(Date.from(entity.getAudit().getLastModifiedDate()))
.build())
.build();
return tableInfo;
}
/**
* {@inheritDoc}.
*/
@Override
public PolarisTableEntity toEntity(final TableInfo info) {
final Map<String, String> metadata = info.getMetadata();
if (MapUtils.isEmpty(metadata)) {
final String message = String.format("No parameters defined for iceberg table %s", info.getName());
throw new InvalidMetaException(info.getName(), message, null);
}
final String location = metadata.get(DirectSqlTable.PARAM_METADATA_LOCATION);
if (StringUtils.isEmpty(location)) {
final String message = String.format("No metadata location defined for iceberg table %s", info.getName());
throw new InvalidMetaException(info.getName(), message, null);
}
final PolarisTableEntity tableEntity = PolarisTableEntity.builder()
.dbName(info.getName().getDatabaseName())
.tblName(info.getName().getTableName())
.metadataLocation(location)
.build();
return tableEntity;
}
}
| 11 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/mappers/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Polaris mapper classes.
*/
package com.netflix.metacat.connector.polaris.mappers;
| 12 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store/PolarisStoreService.java | package com.netflix.metacat.connector.polaris.store;
import com.netflix.metacat.connector.polaris.store.entities.PolarisDatabaseEntity;
import com.netflix.metacat.connector.polaris.store.entities.PolarisTableEntity;
import java.util.List;
import java.util.Optional;
/**
* Interface methods for Polaris Store CRUD access.
*/
public interface PolarisStoreService {
/**
* Creates a database entry.
* @param databaseName database name
* @param location the database location.
* @param createdBy user creating this database.
* @return Polaris Database entity.
*/
PolarisDatabaseEntity createDatabase(String databaseName, String location, String createdBy);
/**
* Fetches database entry.
* @param databaseName database name
* @return Polaris Database entity
*/
Optional<PolarisDatabaseEntity> getDatabase(String databaseName);
/**
* Deletes the database entry.
* @param dbName database name.
*/
void deleteDatabase(String dbName);
/**
* Fetches all database entities.
* @return Polaris Database entities
*/
List<PolarisDatabaseEntity> getAllDatabases();
/**
* Checks if database with the name exists.
* @param databaseName database name to look up.
* @return true, if database exists. false, otherwise.
*/
boolean databaseExists(String databaseName);
/**
* Updates existing database entity.
* @param databaseEntity databaseEntity to save.
* @return the saved database entity.
*/
PolarisDatabaseEntity saveDatabase(PolarisDatabaseEntity databaseEntity);
/**
* Creates a table entry.
* @param dbName database name
* @param tableName table name
* @param metadataLocation metadata location
* @param createdBy user creating this table.
* @return Polaris Table entity.
*/
PolarisTableEntity createTable(String dbName, String tableName, String metadataLocation, String createdBy);
/**
* Fetches table entry.
* @param dbName database name
* @param tableName table name
* @return Polaris Table entity
*/
Optional<PolarisTableEntity> getTable(String dbName, String tableName);
/**
* Fetch table entities for given database.
* @param databaseName database name
* @param tableNamePrefix table name prefix. can be empty.
* @return table entities in the database.
*/
List<PolarisTableEntity> getTableEntities(final String databaseName, final String tableNamePrefix);
/**
* Updates existing or creates new table entry.
* @param tableEntity tableEntity to save.
* @return The saved entity.
*/
PolarisTableEntity saveTable(PolarisTableEntity tableEntity);
/**
* Deletes the table entry.
* @param dbName database name.
* @param tableName table name.
*/
void deleteTable(String dbName, String tableName);
/**
* Checks if table with the name exists.
* @param databaseName database name of the table to be looked up.
* @param tableName table name to look up.
* @return true, if table exists. false, otherwise.
*/
boolean tableExists(String databaseName, String tableName);
/**
* Gets tables in the database and tableName prefix.
* @param databaseName database name
* @param tableNamePrefix table name prefix
* @return list of table names in the database with the table name prefix.
*/
List<String> getTables(String databaseName, String tableNamePrefix);
/**
* Do an atomic compare-and-swap to update the table's metadata location.
* @param databaseName database name of the table
* @param tableName table name
* @param expectedLocation expected current metadata-location of the table
* @param newLocation new metadata location of the table
* @param lastModifiedBy user updating the location.
* @return true, if update was successful. false, otherwise.
*/
boolean updateTableMetadataLocation(
String databaseName, String tableName,
String expectedLocation, String newLocation,
String lastModifiedBy);
}
| 13 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store/PolarisStoreConnector.java | package com.netflix.metacat.connector.polaris.store;
import com.netflix.metacat.connector.polaris.store.entities.AuditEntity;
import com.netflix.metacat.connector.polaris.store.entities.PolarisDatabaseEntity;
import com.netflix.metacat.connector.polaris.store.entities.PolarisTableEntity;
import com.netflix.metacat.connector.polaris.store.repos.PolarisDatabaseRepository;
import com.netflix.metacat.connector.polaris.store.repos.PolarisTableRepository;
import lombok.RequiredArgsConstructor;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.domain.PageRequest;
import org.springframework.data.domain.Pageable;
import org.springframework.data.domain.Slice;
import org.springframework.data.domain.Sort;
import org.springframework.transaction.annotation.Propagation;
import org.springframework.transaction.annotation.Transactional;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
/**
* This class exposes APIs for CRUD operations.
*/
@Transactional(rollbackFor = Exception.class)
@RequiredArgsConstructor(onConstructor = @__(@Autowired))
public class PolarisStoreConnector implements PolarisStoreService {
private final PolarisDatabaseRepository dbRepo;
private final PolarisTableRepository tblRepo;
/**
* Creates entry for new database.
* @param databaseName database name
* @return entity
*/
@Override
public PolarisDatabaseEntity createDatabase(final String databaseName,
final String location,
final String createdBy) {
final PolarisDatabaseEntity e = new PolarisDatabaseEntity(databaseName, location, createdBy);
return dbRepo.save(e);
}
/**
* Fetches database entry.
*
* @param databaseName database name
* @return Polaris Database entity
*/
@Override
public Optional<PolarisDatabaseEntity> getDatabase(final String databaseName) {
return dbRepo.findByDbName(databaseName);
}
/**
* Deletes the database entry.
*
* @param dbName database name.
*/
@Override
public void deleteDatabase(final String dbName) {
dbRepo.deleteByName(dbName);
}
/**
* Fetches all database entities.
*
* @return Polaris Database entities
*/
@Override
@Transactional(propagation = Propagation.SUPPORTS)
public List<PolarisDatabaseEntity> getAllDatabases() {
final int pageFetchSize = 1000;
final List<PolarisDatabaseEntity> retval = new ArrayList<>();
Pageable page = PageRequest.of(0, pageFetchSize);
boolean hasNext;
do {
final Slice<PolarisDatabaseEntity> dbs = dbRepo.getDatabases(page);
retval.addAll(dbs.toList());
hasNext = dbs.hasNext();
if (hasNext) {
page = dbs.nextPageable();
}
} while (hasNext);
return retval;
}
/**
* Checks if database with the name exists.
*
* @param databaseName database name to look up.
* @return true, if database exists. false, otherwise.
*/
@Override
public boolean databaseExists(final String databaseName) {
return dbRepo.existsByDbName(databaseName);
}
/**
* Updates existing database entity, or creates a new one if not present.
*
* @param databaseEntity databaseEntity to save.
* @return the saved database entity.
*/
@Override
public PolarisDatabaseEntity saveDatabase(final PolarisDatabaseEntity databaseEntity) {
return dbRepo.save(databaseEntity);
}
boolean databaseExistsById(final String dbId) {
return dbRepo.existsById(dbId);
}
/**
* Creates entry for new table.
* @param dbName database name
* @param tableName table name
* @param metadataLocation metadata location of the table.
* @param createdBy user creating this table.
* @return entity corresponding to created table entry
*/
@Override
public PolarisTableEntity createTable(final String dbName,
final String tableName,
final String metadataLocation,
final String createdBy) {
final AuditEntity auditEntity = AuditEntity.builder()
.createdBy(createdBy)
.lastModifiedBy(createdBy)
.build();
final PolarisTableEntity e = PolarisTableEntity.builder()
.audit(auditEntity)
.dbName(dbName)
.tblName(tableName)
.metadataLocation(metadataLocation)
.build();
return tblRepo.save(e);
}
/**
* Fetches table entry.
*
* @param tableName table name
* @return Polaris Table entity
*/
@Override
public Optional<PolarisTableEntity> getTable(final String dbName, final String tableName) {
return tblRepo.findByDbNameAndTblName(dbName, tableName);
}
/**
* Fetch table entities for given database.
* @param databaseName database name
* @param tableNamePrefix table name prefix. can be empty.
* @return table entities in the database.
*/
@Override
@Transactional(propagation = Propagation.SUPPORTS)
public List<PolarisTableEntity> getTableEntities(final String databaseName, final String tableNamePrefix) {
final int pageFetchSize = 1000;
final List<PolarisTableEntity> retval = new ArrayList<>();
final String tblPrefix = tableNamePrefix == null ? "" : tableNamePrefix;
Pageable page = PageRequest.of(0, pageFetchSize, Sort.by("tblName").ascending());
Slice<PolarisTableEntity> tbls;
boolean hasNext;
do {
tbls = tblRepo.findAllTablesByDbNameAndTablePrefix(databaseName, tblPrefix, page);
retval.addAll(tbls.toList());
hasNext = tbls.hasNext();
if (hasNext) {
page = tbls.nextPageable();
}
} while (hasNext);
return retval;
}
/**
* Updates existing table entry.
*
* @param tableEntity tableEntity to save.
* @return The saved entity.
*/
@Override
public PolarisTableEntity saveTable(final PolarisTableEntity tableEntity) {
return tblRepo.save(tableEntity);
}
/**
* Deletes entry for table.
* @param dbName database name
* @param tableName table name
*/
@Override
public void deleteTable(final String dbName, final String tableName) {
tblRepo.deleteByName(dbName, tableName);
}
/**
* Checks if table with the name exists.
*
* @param tableName table name to look up.
* @return true, if table exists. false, otherwise.
*/
@Override
public boolean tableExists(final String databaseName, final String tableName) {
return tblRepo.existsByDbNameAndTblName(databaseName, tableName);
}
boolean tableExistsById(final String tblId) {
return tblRepo.existsById(tblId);
}
/**
* Fetch table names for given database.
* @param databaseName database name
* @param tableNamePrefix table name prefix. can be empty.
* @return table names in the database.
*/
@Override
@Transactional(propagation = Propagation.SUPPORTS)
public List<String> getTables(final String databaseName, final String tableNamePrefix) {
final int pageFetchSize = 1000;
final List<String> retval = new ArrayList<>();
final String tblPrefix = tableNamePrefix == null ? "" : tableNamePrefix;
Pageable page = PageRequest.of(0, pageFetchSize, Sort.by("tblName").ascending());
Slice<String> tblNames = null;
boolean hasNext = true;
do {
tblNames = tblRepo.findAllByDbNameAndTablePrefix(databaseName, tblPrefix, page);
retval.addAll(tblNames.toList());
hasNext = tblNames.hasNext();
if (hasNext) {
page = tblNames.nextPageable();
}
} while (hasNext);
return retval;
}
/**
* Do an atomic compare-and-swap to update the table's metadata location.
*
* @param databaseName database name of the table
* @param tableName table name
* @param expectedLocation expected current metadata-location of the table
* @param newLocation new metadata location of the table
* @param lastModifiedBy user updating the location.
* @return true, if update was successful. false, otherwise.
*/
@Override
public boolean updateTableMetadataLocation(
final String databaseName, final String tableName,
final String expectedLocation, final String newLocation,
final String lastModifiedBy) {
final int updatedRowCount =
tblRepo.updateMetadataLocation(databaseName, tableName,
expectedLocation, newLocation, lastModifiedBy, Instant.now());
return updatedRowCount > 0;
}
}
| 14 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Polaris data classes.
*/
package com.netflix.metacat.connector.polaris.store;
| 15 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store/repos/PolarisDatabaseRepository.java | package com.netflix.metacat.connector.polaris.store.repos;
import com.netflix.metacat.connector.polaris.store.entities.PolarisDatabaseEntity;
import org.springframework.data.domain.Pageable;
import org.springframework.data.domain.Slice;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.jpa.repository.JpaSpecificationExecutor;
import org.springframework.data.jpa.repository.Modifying;
import org.springframework.data.jpa.repository.Query;
import org.springframework.data.repository.query.Param;
import org.springframework.stereotype.Repository;
import java.util.Optional;
/**
* JPA repository implementation for storing PolarisDatabaseEntity.
*/
@Repository
public interface PolarisDatabaseRepository extends JpaRepository<PolarisDatabaseEntity, String>,
JpaSpecificationExecutor {
/**
* Fetch database entry.
* @param dbName database name
* @return database entry, if found
*/
Optional<PolarisDatabaseEntity> findByDbName(@Param("dbName") final String dbName);
/**
* Check if database with that name exists.
* @param dbName database name to look up.
* @return true, if database exists. false, otherwise.
*/
boolean existsByDbName(@Param("dbName") final String dbName);
/**
* Delete database entry by name.
* @param dbName database name.
*/
@Modifying
@Query("DELETE FROM PolarisDatabaseEntity e WHERE e.dbName = :dbName")
void deleteByName(@Param("dbName") final String dbName);
/**
* Fetch databases.
* @param page pageable.
* @return database entities.
*/
@Query("SELECT e FROM PolarisDatabaseEntity e")
Slice<PolarisDatabaseEntity> getDatabases(Pageable page);
}
| 16 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store/repos/PolarisTableRepository.java | package com.netflix.metacat.connector.polaris.store.repos;
import com.netflix.metacat.connector.polaris.store.entities.PolarisTableEntity;
import org.springframework.data.domain.Pageable;
import org.springframework.data.domain.Slice;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.jpa.repository.JpaSpecificationExecutor;
import org.springframework.data.jpa.repository.Modifying;
import org.springframework.data.jpa.repository.Query;
import org.springframework.data.repository.query.Param;
import org.springframework.stereotype.Repository;
import org.springframework.transaction.annotation.Transactional;
import java.time.Instant;
import java.util.Optional;
/**
* JPA repository implementation for storing PolarisTableEntity.
*/
@Repository
public interface PolarisTableRepository extends JpaRepository<PolarisTableEntity, String>,
JpaSpecificationExecutor {
/**
* Delete table entry by name.
* @param dbName database name.
* @param tblName table name.
*/
@Modifying
@Query("DELETE FROM PolarisTableEntity e WHERE e.dbName = :dbName AND e.tblName = :tblName")
@Transactional
void deleteByName(
@Param("dbName") final String dbName,
@Param("tblName") final String tblName);
/**
* Fetch table names in database.
* @param dbName database name
* @param tableNamePrefix table name prefix. can be empty.
* @param page pageable.
* @return table names that belong to the database.
*/
@Query("SELECT e.tblName FROM PolarisTableEntity e WHERE e.dbName = :dbName AND e.tblName LIKE :tableNamePrefix%")
Slice<String> findAllByDbNameAndTablePrefix(
@Param("dbName") final String dbName,
@Param("tableNamePrefix") final String tableNamePrefix,
Pageable page);
/**
* Fetch table entry.
* @param dbName database name
* @param tblName table name
* @return optional table entry
*/
Optional<PolarisTableEntity> findByDbNameAndTblName(
@Param("dbName") final String dbName,
@Param("tblName") final String tblName);
/**
* Checks if table with the database name and table name exists.
* @param dbName database name of the table to be looked up.
* @param tblName table name to be looked up.
* @return true, if table exists. false, otherwise.
*/
boolean existsByDbNameAndTblName(
@Param("dbName") final String dbName,
@Param("tblName") final String tblName);
/**
* Fetch table entities in database.
* @param dbName database name
* @param tableNamePrefix table name prefix. can be empty.
* @param page pageable.
* @return table entities that belong to the database.
*/
@Query("SELECT e FROM PolarisTableEntity e WHERE e.dbName = :dbName AND e.tblName LIKE :tableNamePrefix%")
Slice<PolarisTableEntity> findAllTablesByDbNameAndTablePrefix(
@Param("dbName") final String dbName,
@Param("tableNamePrefix") final String tableNamePrefix,
Pageable page);
/**
* Do an atomic compare-and-swap on the metadata location of the table.
* @param dbName database name of the table
* @param tableName table name
* @param expectedLocation expected metadata location before the update is done.
* @param newLocation new metadata location of the table.
* @param lastModifiedBy user updating the location.
* @param lastModifiedDate timestamp for when the location was updated.
* @return number of rows that are updated.
*/
@Modifying(flushAutomatically = true, clearAutomatically = true)
@Query("UPDATE PolarisTableEntity t SET t.metadataLocation = :newLocation, "
+ "t.audit.lastModifiedBy = :lastModifiedBy, t.audit.lastModifiedDate = :lastModifiedDate, "
+ "t.previousMetadataLocation = t.metadataLocation, t.version = t.version + 1 "
+ "WHERE t.metadataLocation = :expectedLocation AND t.dbName = :dbName AND t.tblName = :tableName")
@Transactional
int updateMetadataLocation(
@Param("dbName") final String dbName,
@Param("tableName") final String tableName,
@Param("expectedLocation") final String expectedLocation,
@Param("newLocation") final String newLocation,
@Param("lastModifiedBy") final String lastModifiedBy,
@Param("lastModifiedDate") final Instant lastModifiedDate);
}
| 17 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store/repos/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Polaris repo classes.
*/
package com.netflix.metacat.connector.polaris.store.repos;
| 18 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store/entities/PolarisTableEntity.java | package com.netflix.metacat.connector.polaris.store.entities;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import lombok.ToString;
import org.hibernate.annotations.GenericGenerator;
import org.springframework.data.jpa.domain.support.AuditingEntityListener;
import javax.persistence.Basic;
import javax.persistence.Column;
import javax.persistence.Embedded;
import javax.persistence.Entity;
import javax.persistence.EntityListeners;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.Table;
import javax.persistence.Version;
/**
* Entity class for Table object.
*/
@Getter
@AllArgsConstructor
@NoArgsConstructor
@Builder(toBuilder = true)
@EqualsAndHashCode
@Entity
@ToString(callSuper = true)
@Table(name = "TBLS")
@EntityListeners(AuditingEntityListener.class)
public class PolarisTableEntity {
@Version
private Long version;
@Basic
@Id
@GeneratedValue(generator = "uuid")
@GenericGenerator(name = "uuid", strategy = "uuid2")
@Column(name = "id", nullable = false, unique = true, updatable = false)
private String tblId;
@Basic
@Column(name = "db_name", nullable = false, updatable = false)
private String dbName;
@Basic
@Setter
@Column(name = "tbl_name", nullable = false)
private String tblName;
@Basic
@Setter
@Column(name = "previous_metadata_location", nullable = true, updatable = true)
private String previousMetadataLocation;
@Basic
@Setter
@Column(name = "metadata_location", nullable = true, updatable = true)
private String metadataLocation;
@Embedded
private AuditEntity audit;
/**
* Constructor for Polaris Table Entity.
*
* @param dbName database name
* @param tblName table name
* @param createdBy user that created this entity.
*/
public PolarisTableEntity(final String dbName,
final String tblName,
final String createdBy) {
this.dbName = dbName;
this.tblName = tblName;
this.audit = AuditEntity
.builder()
.createdBy(createdBy)
.lastModifiedBy(createdBy)
.build();
}
}
| 19 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store/entities/PolarisDatabaseEntity.java | package com.netflix.metacat.connector.polaris.store.entities;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.ToString;
import org.hibernate.annotations.GenericGenerator;
import org.springframework.data.jpa.domain.support.AuditingEntityListener;
import javax.persistence.Basic;
import javax.persistence.Column;
import javax.persistence.Embedded;
import javax.persistence.Entity;
import javax.persistence.EntityListeners;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.Table;
import javax.persistence.Version;
/**
* Entity class for Database object.
*/
@Getter
@AllArgsConstructor
@NoArgsConstructor
@Builder(toBuilder = true)
@EqualsAndHashCode
@Entity
@ToString(callSuper = true)
@Table(name = "DBS")
@EntityListeners(AuditingEntityListener.class)
public class PolarisDatabaseEntity {
@Version
private Long version;
@Basic
@Id
@GeneratedValue(generator = "uuid")
@GenericGenerator(name = "uuid", strategy = "uuid2")
@Column(name = "id", nullable = false, unique = true, updatable = false)
private String dbId;
@Basic
@Column(name = "name", nullable = false, unique = true, updatable = false)
private String dbName;
@Basic
@Column(name = "location", updatable = false)
private String location;
@Embedded
private AuditEntity audit;
/**
* Constructor for Polaris Database Entity.
*
* @param dbName database name
* @param location database location.
* @param createdBy user that created this entity.
*/
public PolarisDatabaseEntity(final String dbName,
final String location,
final String createdBy) {
this.dbName = dbName;
this.location = location;
this.audit = AuditEntity
.builder()
.createdBy(createdBy)
.lastModifiedBy(createdBy)
.build();
}
}
| 20 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store/entities/AuditEntity.java | package com.netflix.metacat.connector.polaris.store.entities;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import lombok.ToString;
import org.springframework.data.annotation.CreatedDate;
import org.springframework.data.annotation.LastModifiedDate;
import javax.persistence.Basic;
import javax.persistence.Column;
import javax.persistence.Embeddable;
import java.time.Instant;
/**
* Embeddable audit entity.
*
* @author rveeramacheneni
*/
@Embeddable
@Getter
@Setter
@Builder
@AllArgsConstructor
@NoArgsConstructor
@EqualsAndHashCode
@ToString(of = {
"createdBy",
"lastModifiedBy",
"createdDate",
"lastModifiedDate"
})
public class AuditEntity {
@Basic
@Column(name = "created_by")
private String createdBy;
@Basic
@Column(name = "last_updated_by")
private String lastModifiedBy;
@Basic
@Column(name = "created_date", updatable = false, nullable = false)
@CreatedDate
private Instant createdDate;
@Basic
@Column(name = "last_updated_date", nullable = false)
@LastModifiedDate
private Instant lastModifiedDate;
}
| 21 |
0 | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store | Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store/entities/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Polaris entity classes.
*/
package com.netflix.metacat.connector.polaris.store.entities;
| 22 |
0 | Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector | Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector/snowflake/SnowflakeExceptionMapper.java | /*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.snowflake;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import java.sql.SQLException;
/**
* Exception mapper for Snowflake SQLExceptions.
*
* @author amajumdar
* @see SQLException
* @see ConnectorException
* @since 1.2.0
*/
public class SnowflakeExceptionMapper implements JdbcExceptionMapper {
/**
* {@inheritDoc}
*/
@Override
public ConnectorException toConnectorException(
final SQLException se,
final QualifiedName name
) {
final int errorCode = se.getErrorCode();
switch (errorCode) {
case 2042: //database already exists
return new DatabaseAlreadyExistsException(name, se);
case 2002: //table already exists
return new TableAlreadyExistsException(name, se);
case 2043: //database does not exist
return new DatabaseNotFoundException(name, se);
case 2003: //table doesn't exist
return new TableNotFoundException(name, se);
default:
return new ConnectorException(se.getMessage(), se);
}
}
}
| 23 |
0 | Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector | Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector/snowflake/SnowflakeConnectorModule.java | /*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.snowflake;
import com.google.inject.AbstractModule;
import com.google.inject.Scopes;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.util.DataSourceManager;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import com.netflix.metacat.connector.jdbc.JdbcTypeConverter;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorDatabaseService;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorPartitionService;
import javax.sql.DataSource;
import java.util.Map;
/**
* Guice module for the Snowflake Connector.
*
* @author amajumdar
* @since 1.2.0
*/
public class SnowflakeConnectorModule extends AbstractModule {
private final String catalogShardName;
private final Map<String, String> configuration;
/**
* Constructor.
*
* @param catalogShardName unique catalog shard name
* @param configuration connector configuration
*
*/
public SnowflakeConnectorModule(
final String catalogShardName,
final Map<String, String> configuration
) {
this.catalogShardName = catalogShardName;
this.configuration = configuration;
}
/**
* {@inheritDoc}
*/
@Override
protected void configure() {
this.bind(DataSource.class).toInstance(DataSourceManager.get()
.load(this.catalogShardName, this.configuration).get(this.catalogShardName));
this.bind(JdbcTypeConverter.class).to(SnowflakeTypeConverter.class).in(Scopes.SINGLETON);
this.bind(JdbcExceptionMapper.class).to(SnowflakeExceptionMapper.class).in(Scopes.SINGLETON);
this.bind(ConnectorDatabaseService.class)
.to(ConnectorUtils.getDatabaseServiceClass(this.configuration, JdbcConnectorDatabaseService.class))
.in(Scopes.SINGLETON);
this.bind(ConnectorTableService.class)
.to(ConnectorUtils.getTableServiceClass(this.configuration, SnowflakeConnectorTableService.class))
.in(Scopes.SINGLETON);
this.bind(ConnectorPartitionService.class)
.to(ConnectorUtils.getPartitionServiceClass(this.configuration, JdbcConnectorPartitionService.class))
.in(Scopes.SINGLETON);
}
}
| 24 |
0 | Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector | Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector/snowflake/SnowflakeTypeConverter.java | /*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.snowflake;
import com.netflix.metacat.common.type.BaseType;
import com.netflix.metacat.common.type.CharType;
import com.netflix.metacat.common.type.DecimalType;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.VarcharType;
import com.netflix.metacat.connector.jdbc.JdbcTypeConverter;
import lombok.extern.slf4j.Slf4j;
/**
* Type converter for Snowflake.
*
* @author amajumdar
* @since 1.2.0
*/
@Slf4j
public class SnowflakeTypeConverter extends JdbcTypeConverter {
static final int DEFAULT_CHARACTER_LENGTH = 256;
private static final String DEFAULT_CHARACTER_LENGTH_STRING = Integer.toString(DEFAULT_CHARACTER_LENGTH);
/**
* {@inheritDoc}
*
* @see <a href="https://docs.snowflake.net/manuals/sql-reference/data-types.html">Snowflake Types</a>
*/
@Override
public Type toMetacatType(final String type) {
final String lowerType = type.toLowerCase();
// Split up the possible type: TYPE[(size, magnitude)] EXTRA
final String[] splitType = this.splitType(lowerType);
switch (splitType[0]) {
case "smallint":
case "tinyint":
case "byteint":
return BaseType.SMALLINT;
case "int":
case "integer":
return BaseType.INT;
case "bigint":
return BaseType.BIGINT;
case "number":
case "decimal":
case "numeric":
return this.toMetacatDecimalType(splitType);
case "real":
case "float4":
return BaseType.FLOAT;
case "double":
case "double precision":
case "float8":
case "float":
return BaseType.DOUBLE;
case "varchar":
fixDataSizeIfIncorrect(splitType);
return this.toMetacatVarcharType(splitType);
case "text":
case "string":
// text is basically alias for VARCHAR(256)
splitType[1] = DEFAULT_CHARACTER_LENGTH_STRING;
return this.toMetacatVarcharType(splitType);
case "character":
case "char":
fixDataSizeIfIncorrect(splitType);
return this.toMetacatCharType(splitType);
case "binary":
case "varbinary":
fixDataSizeIfIncorrect(splitType);
return this.toMetacatVarbinaryType(splitType);
case "timestamp":
case "datetime":
case "timestamp_ntz":
case "timestampntz":
case "timestamp without time zone":
return this.toMetacatTimestampType(splitType);
case "timestamp_tz":
case "timestamptz":
case "timestampltz":
case "timestamp_ltz":
case "timestamp with local time zone":
case "timestamp with time zone":
return BaseType.TIMESTAMP_WITH_TIME_ZONE;
case "date":
return BaseType.DATE;
case "boolean":
return BaseType.BOOLEAN;
default:
log.info("Unhandled or unknown Snowflake type {}", splitType[0]);
return BaseType.UNKNOWN;
}
}
private void fixDataSizeIfIncorrect(final String[] splitType) {
//
// Adding a hack to ignore errors for data type with negative size.
// TODO: Remove this hack when we have a solution for the above.
//
if (splitType[1] == null || Integer.parseInt(splitType[1]) <= 0) {
splitType[1] = DEFAULT_CHARACTER_LENGTH_STRING;
}
}
/**
* {@inheritDoc}
*/
@Override
public String fromMetacatType(final Type type) {
switch (type.getTypeSignature().getBase()) {
case ARRAY:
throw new UnsupportedOperationException("Snowflake doesn't support array types");
case BIGINT:
return "NUMBER(38)";
case BOOLEAN:
return "BOOLEAN";
case CHAR:
if (!(type instanceof CharType)) {
throw new IllegalArgumentException("Expected CHAR type but was " + type.getClass().getName());
}
final CharType charType = (CharType) type;
return "CHAR(" + charType.getLength() + ")";
case DATE:
return "DATE";
case DECIMAL:
if (!(type instanceof DecimalType)) {
throw new IllegalArgumentException("Expected decimal type but was " + type.getClass().getName());
}
final DecimalType decimalType = (DecimalType) type;
return "DECIMAL(" + decimalType.getPrecision() + ", " + decimalType.getScale() + ")";
case DOUBLE:
case FLOAT:
return "DOUBLE PRECISION";
case INT:
return "INT";
case INTERVAL_DAY_TO_SECOND:
throw new UnsupportedOperationException("Snowflake doesn't support interval types");
case INTERVAL_YEAR_TO_MONTH:
throw new UnsupportedOperationException("Snowflake doesn't support interval types");
case JSON:
throw new UnsupportedOperationException("Snowflake doesn't support JSON types");
case MAP:
throw new UnsupportedOperationException("Snowflake doesn't support MAP types");
case ROW:
throw new UnsupportedOperationException("Snowflake doesn't support ROW types");
case SMALLINT:
return "SMALLINT";
case STRING:
return "STRING";
case TIME:
case TIME_WITH_TIME_ZONE:
return "TIME";
case TIMESTAMP:
return "TIMESTAMP";
case TIMESTAMP_WITH_TIME_ZONE:
return "TIMESTAMPTZ";
case TINYINT:
return "SMALLINT";
case UNKNOWN:
throw new IllegalArgumentException("Can't map an unknown type");
case VARBINARY:
return "VARBINARY";
case VARCHAR:
if (!(type instanceof VarcharType)) {
throw new IllegalArgumentException("Expected varchar type but was " + type.getClass().getName());
}
final VarcharType varcharType = (VarcharType) type;
return "VARCHAR(" + varcharType.getLength() + ")";
default:
throw new IllegalArgumentException("Unknown type " + type.getTypeSignature().getBase());
}
}
}
| 25 |
0 | Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector | Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector/snowflake/SnowflakeConnectorPlugin.java | /*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.snowflake;
import com.netflix.metacat.common.server.connectors.ConnectorFactory;
import com.netflix.metacat.common.server.connectors.ConnectorPlugin;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import lombok.NonNull;
import javax.annotation.Nonnull;
/**
* Snowflake Connector Plugin.
*
* @author amajumdar
* @since 1.2.0
*/
public class SnowflakeConnectorPlugin implements ConnectorPlugin {
private static final String CONNECTOR_TYPE = "snowflake";
private static final SnowflakeTypeConverter TYPE_CONVERTER = new SnowflakeTypeConverter();
/**
* {@inheritDoc}
*/
@Override
public String getType() {
return CONNECTOR_TYPE;
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorFactory create(@Nonnull @NonNull final ConnectorContext connectorContext) {
return new SnowflakeConnectorFactory(connectorContext.getCatalogName(),
connectorContext.getCatalogShardName(), connectorContext.getConfiguration());
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTypeConverter getTypeConverter() {
return TYPE_CONVERTER;
}
}
| 26 |
0 | Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector | Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector/snowflake/SnowflakeConnectorTableService.java | /*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.snowflake;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.model.AuditInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import com.netflix.metacat.connector.jdbc.JdbcTypeConverter;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorTableService;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorUtils;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.List;
/**
* Snowflake table service implementation.
*
* @author amajumdar
* @since 1.2.0
*/
@Slf4j
public class SnowflakeConnectorTableService extends JdbcConnectorTableService {
private static final String COL_CREATED = "CREATED";
private static final String COL_LAST_ALTERED = "LAST_ALTERED";
private static final String SQL_GET_AUDIT_INFO
= "select created, last_altered from information_schema.tables"
+ " where table_schema=? and table_name=?";
private static final String JDBC_UNDERSCORE = "_";
private static final String JDBC_ESCAPE_UNDERSCORE = "\\_";
/**
* Constructor.
*
* @param dataSource the datasource to use to connect to the database
* @param typeConverter The type converter to use from the SQL type to Metacat canonical type
* @param exceptionMapper The exception mapper to use
*/
@Inject
public SnowflakeConnectorTableService(
final DataSource dataSource,
final JdbcTypeConverter typeConverter,
final JdbcExceptionMapper exceptionMapper
) {
super(dataSource, typeConverter, exceptionMapper);
}
/**
* {@inheritDoc}
*/
@Override
public void delete(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
super.delete(context, getSnowflakeName(name));
}
/**
* Returns the snowflake represented name which is always uppercase.
*
* @param name qualified name
* @return qualified name
*/
private QualifiedName getSnowflakeName(final QualifiedName name) {
return name.cloneWithUpperCase();
}
/**
* Returns a normalized string that escapes JDBC special characters like "_".
*
* @param input object name.
* @return the normalized string.
*/
private static String getJdbcNormalizedSnowflakeName(final String input) {
if (!StringUtils.isBlank(input) && input.contains(JDBC_UNDERSCORE)) {
return StringUtils.replace(input, JDBC_UNDERSCORE, JDBC_ESCAPE_UNDERSCORE);
}
return input;
}
/**
* {@inheritDoc}
*/
@Override
public TableInfo get(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
return super.get(context, getSnowflakeName(name));
}
/**
* {@inheritDoc}
*/
@Override
public List<TableInfo> list(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable) {
return super.list(context, getSnowflakeName(name), prefix, sort, pageable);
}
/**
* {@inheritDoc}
*/
@Override
public List<QualifiedName> listNames(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable) {
return super.listNames(context, name.cloneWithUpperCase(), prefix, sort, pageable);
}
/**
* {@inheritDoc}
*/
@Override
public void rename(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName oldName,
@Nonnull final QualifiedName newName) {
super.rename(context, getSnowflakeName(oldName), getSnowflakeName(newName));
}
@Override
protected Connection getConnection(@Nonnull @NonNull final String schema) throws SQLException {
final Connection connection = this.dataSource.getConnection();
connection.setSchema(connection.getCatalog());
return connection;
}
/**
* {@inheritDoc}
*/
@Override
public boolean exists(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
boolean result = false;
final QualifiedName sName = getSnowflakeName(name);
try (Connection connection = this.dataSource.getConnection()) {
final ResultSet rs = getTables(connection, sName, sName, false);
if (rs.next()) {
result = true;
}
} catch (final SQLException se) {
throw this.exceptionMapper.toConnectorException(se, name);
}
return result;
}
@Override
protected ResultSet getColumns(
@Nonnull @NonNull final Connection connection,
@Nonnull @NonNull final QualifiedName name
) throws SQLException {
try {
return connection.getMetaData().getColumns(
connection.getCatalog(),
getJdbcNormalizedSnowflakeName(name.getDatabaseName()),
getJdbcNormalizedSnowflakeName(name.getTableName()),
JdbcConnectorUtils.MULTI_CHARACTER_SEARCH
);
} catch (SQLException e) {
throw this.exceptionMapper.toConnectorException(e, name);
}
}
/**
* {@inheritDoc}
*/
@Override
protected void setTableInfoDetails(final Connection connection, final TableInfo tableInfo) {
final QualifiedName tableName = getSnowflakeName(tableInfo.getName());
try (
PreparedStatement statement = connection.prepareStatement(SQL_GET_AUDIT_INFO)
) {
statement.setString(1, tableName.getDatabaseName());
statement.setString(2, tableName.getTableName());
try (ResultSet resultSet = statement.executeQuery()) {
if (resultSet.next()) {
final AuditInfo auditInfo =
AuditInfo.builder().createdDate(resultSet.getDate(COL_CREATED))
.lastModifiedDate(resultSet.getDate(COL_LAST_ALTERED)).build();
tableInfo.setAudit(auditInfo);
}
}
} catch (final Exception ignored) {
log.info("Ignoring. Error getting the audit info for table {}", tableName);
}
}
/**
* {@inheritDoc}
*/
@Override
protected ResultSet getTables(
@Nonnull @NonNull final Connection connection,
@Nonnull @NonNull final QualifiedName name,
@Nullable final QualifiedName prefix
) throws SQLException {
return getTables(connection, name, prefix, true);
}
private ResultSet getTables(
@Nonnull @NonNull final Connection connection,
@Nonnull @NonNull final QualifiedName name,
@Nullable final QualifiedName prefix,
final boolean multiCharacterSearch
) throws SQLException {
final String schema = getJdbcNormalizedSnowflakeName(name.getDatabaseName());
final DatabaseMetaData metaData = connection.getMetaData();
return prefix == null || StringUtils.isEmpty(prefix.getTableName())
? metaData.getTables(connection.getCatalog(), schema, null, TABLE_TYPES)
: metaData
.getTables(
connection.getCatalog(),
schema,
multiCharacterSearch ? getJdbcNormalizedSnowflakeName(prefix.getTableName())
+ JdbcConnectorUtils.MULTI_CHARACTER_SEARCH
: getJdbcNormalizedSnowflakeName(prefix.getTableName()),
TABLE_TYPES
);
}
}
| 27 |
0 | Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector | Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector/snowflake/package-info.java | /*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Classes for the Snowflake Connector implementation.
*
* @author amajumdar
* @since 1.2.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.snowflake;
import javax.annotation.ParametersAreNonnullByDefault;
| 28 |
0 | Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector | Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector/snowflake/SnowflakeConnectorFactory.java | /*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.snowflake;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.server.connectors.DefaultConnectorFactory;
import java.util.Map;
/**
* Connector Factory for Snowflake.
*
* @author amajumdar
* @since 1.2.0
*/
class SnowflakeConnectorFactory extends DefaultConnectorFactory {
/**
* Constructor.
*
* @param name catalog name
* @param catalogShardName catalog shard name
* @param configuration catalog configuration
*/
SnowflakeConnectorFactory(
final String name,
final String catalogShardName,
final Map<String, String> configuration
) {
super(name, catalogShardName,
Lists.newArrayList(new SnowflakeConnectorModule(catalogShardName, configuration)));
}
}
| 29 |
0 | Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata | Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata/mysql/MySqlServiceUtil.java | /*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.metadata.mysql;
import com.google.common.collect.Sets;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.util.DataSourceManager;
import org.springframework.dao.EmptyResultDataAccessException;
import org.springframework.jdbc.core.JdbcTemplate;
import java.io.InputStream;
import java.net.URL;
import java.nio.file.FileSystems;
import java.util.Properties;
import java.util.Set;
/**
* MySqlServiceUtil.
*
* @author zhenl
* @since 1.1.0
*/
public final class MySqlServiceUtil {
private MySqlServiceUtil() {
}
/**
* Returns the list of string having the input ids.
*
* @param jdbcTemplate jdbc template
* @param sql query sql
* @param item identifier
* @return list of results
*/
public static Set<String> getValues(final JdbcTemplate jdbcTemplate,
final String sql,
final Object item) {
try {
return jdbcTemplate.query(sql, rs -> {
final Set<String> result = Sets.newHashSet();
while (rs.next()) {
result.add(rs.getString("value"));
}
return result;
}, item);
} catch (EmptyResultDataAccessException e) {
return Sets.newHashSet();
}
}
/**
* load mysql data source.
*
* @param dataSourceManager data source manager to use
* @param configLocation usermetadata config location
* @throws Exception exception to throw
*/
public static void loadMySqlDataSource(final DataSourceManager dataSourceManager,
final String configLocation) throws Exception {
URL url = Thread.currentThread().getContextClassLoader().getResource(configLocation);
if (url == null) {
url = FileSystems.getDefault().getPath(configLocation).toUri().toURL();
}
final Properties connectionProperties = new Properties();
try (InputStream is = url.openStream()) {
connectionProperties.load(is);
} catch (Exception e) {
throw new Exception(String.format("Unable to read from user metadata config file %s", configLocation), e);
}
dataSourceManager.load(UserMetadataService.NAME_DATASOURCE, connectionProperties);
}
}
| 30 |
0 | Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata | Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata/mysql/MySqlLookupService.java | /*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.metadata.mysql;
import com.google.common.base.Joiner;
import com.google.common.collect.Sets;
import com.netflix.metacat.common.server.model.Lookup;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.usermetadata.LookupService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataServiceException;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.extern.slf4j.Slf4j;
import org.springframework.dao.EmptyResultDataAccessException;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.SqlParameterValue;
import org.springframework.jdbc.support.GeneratedKeyHolder;
import org.springframework.jdbc.support.KeyHolder;
import org.springframework.transaction.annotation.Transactional;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.sql.Statement;
import java.sql.Types;
import java.util.Set;
import java.util.stream.Collectors;
/**
* User metadata service impl using Mysql.
*/
@Slf4j
@SuppressFBWarnings
@Transactional("metadataTxManager")
public class MySqlLookupService implements LookupService {
private static final String SQL_GET_LOOKUP =
"select id, name, type, created_by createdBy, last_updated_by lastUpdatedBy, date_created dateCreated,"
+ " last_updated lastUpdated from lookup where name=?";
private static final String SQL_INSERT_LOOKUP =
"insert into lookup( name, version, type, created_by, last_updated_by, date_created, last_updated)"
+ " values (?,0,?,?,?,now(),now())";
private static final String SQL_INSERT_LOOKUP_VALUES =
"insert into lookup_values( lookup_id, values_string) values (?,?)";
private static final String SQL_DELETE_LOOKUP_VALUES =
"delete from lookup_values where lookup_id=? and values_string in (%s)";
private static final String SQL_GET_LOOKUP_VALUES =
"select values_string value from lookup_values where lookup_id=?";
private static final String SQL_GET_LOOKUP_VALUES_BY_NAME =
"select lv.values_string value from lookup l, lookup_values lv where l.id=lv.lookup_id and l.name=?";
private static final String STRING_TYPE = "string";
private final Config config;
private JdbcTemplate jdbcTemplate;
/**
* Constructor.
*
* @param config config
* @param jdbcTemplate jdbc template
*/
public MySqlLookupService(final Config config, final JdbcTemplate jdbcTemplate) {
this.config = config;
this.jdbcTemplate = jdbcTemplate;
}
/**
* Returns the lookup for the given <code>name</code>.
*
* @param name lookup name
* @return lookup
*/
@Override
@Transactional(readOnly = true)
public Lookup get(final String name) {
try {
return jdbcTemplate.queryForObject(
SQL_GET_LOOKUP,
new Object[]{name}, new int[]{Types.VARCHAR},
(rs, rowNum) -> {
final Lookup lookup = new Lookup();
lookup.setId(rs.getLong("id"));
lookup.setName(rs.getString("name"));
lookup.setType(rs.getString("type"));
lookup.setCreatedBy(rs.getString("createdBy"));
lookup.setLastUpdated(rs.getDate("lastUpdated"));
lookup.setLastUpdatedBy(rs.getString("lastUpdatedBy"));
lookup.setDateCreated(rs.getDate("dateCreated"));
lookup.setValues(getValues(rs.getLong("id")));
return lookup;
});
} catch (EmptyResultDataAccessException e) {
return null;
} catch (Exception e) {
final String message = String.format("Failed to get the lookup for name %s", name);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
/**
* Returns the value of the lookup name.
*
* @param name lookup name
* @return scalar lookup value
*/
@Override
@Transactional(readOnly = true)
public String getValue(final String name) {
String result = null;
final Set<String> values = getValues(name);
if (values != null && values.size() > 0) {
result = values.iterator().next();
}
return result;
}
/**
* Returns the list of values of the lookup name.
*
* @param lookupId lookup id
* @return list of lookup values
*/
@Override
@Transactional(readOnly = true)
public Set<String> getValues(final Long lookupId) {
try {
return MySqlServiceUtil.getValues(jdbcTemplate, SQL_GET_LOOKUP_VALUES, lookupId);
} catch (EmptyResultDataAccessException e) {
return Sets.newHashSet();
} catch (Exception e) {
final String message = String.format("Failed to get the lookup values for id %s", lookupId);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
/**
* Returns the list of values of the lookup name.
*
* @param name lookup name
* @return list of lookup values
*/
@Override
@Transactional(readOnly = true)
public Set<String> getValues(final String name) {
try {
return MySqlServiceUtil.getValues(jdbcTemplate, SQL_GET_LOOKUP_VALUES_BY_NAME, name);
} catch (EmptyResultDataAccessException e) {
return Sets.newHashSet();
} catch (Exception e) {
final String message = String.format("Failed to get the lookup values for name %s", name);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
/**
* Saves the lookup value.
*
* @param name lookup name
* @param values multiple values
* @return returns the lookup with the given name.
*/
@Override
public Lookup setValues(final String name, final Set<String> values) {
try {
final Lookup lookup = findOrCreateLookupByName(name);
final Set<String> inserts;
Set<String> deletes = Sets.newHashSet();
final Set<String> lookupValues = lookup.getValues();
if (lookupValues == null || lookupValues.isEmpty()) {
inserts = values;
} else {
inserts = Sets.difference(values, lookupValues).immutableCopy();
deletes = Sets.difference(lookupValues, values).immutableCopy();
}
lookup.setValues(values);
if (!inserts.isEmpty()) {
insertLookupValues(lookup.getId(), inserts);
}
if (!deletes.isEmpty()) {
deleteLookupValues(lookup.getId(), deletes);
}
return lookup;
} catch (Exception e) {
final String message = String.format("Failed to set the lookup values for name %s", name);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
private void insertLookupValues(final Long id, final Set<String> inserts) {
jdbcTemplate.batchUpdate(SQL_INSERT_LOOKUP_VALUES, inserts.stream().map(insert -> new Object[]{id, insert})
.collect(Collectors.toList()), new int[]{Types.BIGINT, Types.VARCHAR});
}
private void deleteLookupValues(final Long id, final Set<String> deletes) {
jdbcTemplate.update(
String.format(SQL_DELETE_LOOKUP_VALUES, "'" + Joiner.on("','").skipNulls().join(deletes) + "'"),
new SqlParameterValue(Types.BIGINT, id));
}
/**
* findOrCreateLookupByName.
*
* @param name name to find or create
* @return Look up object
* @throws SQLException sql exception
*/
private Lookup findOrCreateLookupByName(final String name) throws SQLException {
Lookup lookup = get(name);
if (lookup == null) {
final KeyHolder holder = new GeneratedKeyHolder();
jdbcTemplate.update(connection -> {
final PreparedStatement ps = connection.prepareStatement(SQL_INSERT_LOOKUP,
Statement.RETURN_GENERATED_KEYS);
ps.setString(1, name);
ps.setString(2, STRING_TYPE);
ps.setString(3, config.getLookupServiceUserAdmin());
ps.setString(4, config.getLookupServiceUserAdmin());
return ps;
}, holder);
final Long lookupId = holder.getKey().longValue();
lookup = new Lookup();
lookup.setName(name);
lookup.setId(lookupId);
}
return lookup;
}
/**
* Saves the lookup value.
*
* @param name lookup name
* @param values multiple values
* @return returns the lookup with the given name.
*/
@Override
public Lookup addValues(final String name, final Set<String> values) {
try {
final Lookup lookup = findOrCreateLookupByName(name);
final Set<String> inserts;
final Set<String> lookupValues = lookup.getValues();
if (lookupValues == null || lookupValues.isEmpty()) {
inserts = values;
lookup.setValues(values);
} else {
inserts = Sets.difference(values, lookupValues);
}
if (!inserts.isEmpty()) {
insertLookupValues(lookup.getId(), inserts);
}
return lookup;
} catch (Exception e) {
final String message = String.format("Failed to set the lookup values for name %s", name);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
/**
* Saves the lookup value.
*
* @param name lookup name
* @param value lookup value
* @return returns the lookup with the given name.
*/
@Override
public Lookup setValue(final String name, final String value) {
return setValues(name, Sets.newHashSet(value));
}
}
| 31 |
0 | Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata | Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata/mysql/MySqlTagService.java | /*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.metadata.mysql;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.exception.MetacatBadRequestException;
import com.netflix.metacat.common.json.MetacatJson;
import com.netflix.metacat.common.server.model.Lookup;
import com.netflix.metacat.common.server.model.TagItem;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.usermetadata.LookupService;
import com.netflix.metacat.common.server.usermetadata.TagService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataServiceException;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.dao.EmptyResultDataAccessException;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.SqlParameterValue;
import org.springframework.jdbc.support.GeneratedKeyHolder;
import org.springframework.jdbc.support.KeyHolder;
import org.springframework.transaction.annotation.Transactional;
import javax.annotation.Nullable;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.sql.Statement;
import java.sql.Types;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* Tag service implementation.
*
* @author amajumdar
* @author zhenl
*/
@Slf4j
@SuppressFBWarnings
@Transactional("metadataTxManager")
public class MySqlTagService implements TagService {
/**
* Lookup name for tag.
*/
private static final String LOOKUP_NAME_TAG = "tag";
private static final String NAME_TAGS = "tags";
private static final String QUERY_LIST =
"select distinct i.name from tag_item i, tag_item_tags t where i.id=t.tag_item_id"
+ " and (1=? or t.tags_string in (%s) ) and (1=? or i.name like ?) and (1=? or i.name rlike ?)";
private static final String QUERY_SEARCH =
"select distinct i.name from tag_item i, tag_item_tags t where i.id=t.tag_item_id"
+ " and (1=? or t.tags_string %s ) and (1=? or i.name like ?)";
private static final String SQL_GET_TAG_ITEM =
"select id, name, created_by createdBy, last_updated_by lastUpdatedBy, date_created dateCreated,"
+ " last_updated lastUpdated from tag_item where name=?";
private static final String SQL_INSERT_TAG_ITEM =
"insert into tag_item( name, version, created_by, last_updated_by, date_created, last_updated)"
+ " values (?,0,?,?,now(),now())";
private static final String SQL_UPDATE_TAG_ITEM =
"update tag_item set name=?, last_updated=now() where name=?";
private static final String SQL_INSERT_TAG_ITEM_TAGS =
"insert into tag_item_tags( tag_item_id, tags_string) values (?,?)";
private static final String SQL_DELETE_TAG_ITEM =
"delete from tag_item where name=?";
private static final String SQL_DELETE_TAG_ITEM_TAGS_BY_NAME =
"delete from tag_item_tags where tag_item_id=(select id from tag_item where name=?)";
private static final String SQL_DELETE_TAG_ITEM_TAGS_BY_NAME_TAGS =
"delete from tag_item_tags where tag_item_id=(select id from tag_item where name=?) and tags_string in (%s)";
private static final String SQL_DELETE_TAG_ITEM_TAGS =
"delete from tag_item_tags where tag_item_id=(?) and tags_string in (%s)";
private static final String SQL_GET_TAG_ITEM_TAGS =
"select tags_string value from tag_item_tags where tag_item_id=?";
private static final String EMPTY_CLAUSE = "''";
// private static final String SQL_GET_LOOKUP_VALUES_BY_NAME =
// "select lv.tags_string value from tag_item l, tag_item_tags lv where l.id=lv.tag_item_id and l.name=?";
private static final int MAX_TAGS_LIST_COUNT = 16;
private final Config config;
private final LookupService lookupService;
private final MetacatJson metacatJson;
private final UserMetadataService userMetadataService;
private JdbcTemplate jdbcTemplate;
/**
* Constructor.
*
* @param config config
* @param jdbcTemplate JDBC template
* @param lookupService lookup service
* @param metacatJson json util
* @param userMetadataService user metadata service
*/
public MySqlTagService(
final Config config,
final JdbcTemplate jdbcTemplate,
final LookupService lookupService,
final MetacatJson metacatJson,
final UserMetadataService userMetadataService
) {
this.config = Preconditions.checkNotNull(config, "config is required");
this.jdbcTemplate = jdbcTemplate;
this.lookupService = Preconditions.checkNotNull(lookupService, "lookupService is required");
this.metacatJson = Preconditions.checkNotNull(metacatJson, "metacatJson is required");
this.userMetadataService = Preconditions.checkNotNull(userMetadataService, "userMetadataService is required");
}
private Lookup addTags(final Set<String> tags) {
try {
return lookupService.addValues(LOOKUP_NAME_TAG, tags);
} catch (Exception e) {
final String message = String.format("Failed adding the tags %s", tags);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
/**
* Get the tag item.
*
* @param name name
* @return tag item
*/
public TagItem get(final QualifiedName name) {
return get(name.toString());
}
/**
* Returns the TagItem for the given <code>name</code>.
*
* @param name tag name
* @return TagItem
*/
@Transactional(readOnly = true)
public TagItem get(final String name) {
try {
return jdbcTemplate.queryForObject(
SQL_GET_TAG_ITEM,
new Object[]{name}, new int[]{Types.VARCHAR},
(rs, rowNum) -> {
final TagItem tagItem = new TagItem();
tagItem.setId(rs.getLong("id"));
tagItem.setName(rs.getString("name"));
tagItem.setCreatedBy(rs.getString("createdBy"));
tagItem.setLastUpdated(rs.getDate("lastUpdated"));
tagItem.setLastUpdatedBy(rs.getString("lastUpdatedBy"));
tagItem.setDateCreated(rs.getDate("dateCreated"));
tagItem.setValues(getValues(rs.getLong("id")));
return tagItem;
});
} catch (EmptyResultDataAccessException e) {
return null;
} catch (Exception e) {
final String message = String.format("Failed to get the tag for name %s", name);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
/**
* Returns the list of tags of the tag item id.
*
* @param tagItemId tag item id
* @return list of tags
*/
private Set<String> getValues(final Long tagItemId) {
try {
return MySqlServiceUtil.getValues(jdbcTemplate, SQL_GET_TAG_ITEM_TAGS, tagItemId);
} catch (EmptyResultDataAccessException e) {
return Sets.newHashSet();
} catch (Exception e) {
final String message = String.format("Failed to get the tags for id %s", tagItemId);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
/**
* findOrCreateTagItemByName.
*
* @param name name to find or create
* @return Tag Item
* @throws SQLException sql exception
*/
private TagItem findOrCreateTagItemByName(final String name) throws SQLException {
TagItem result = get(name);
if (result == null) {
final KeyHolder holder = new GeneratedKeyHolder();
jdbcTemplate.update(connection -> {
final PreparedStatement ps = connection.prepareStatement(SQL_INSERT_TAG_ITEM,
Statement.RETURN_GENERATED_KEYS);
ps.setString(1, name);
ps.setString(2, config.getTagServiceUserAdmin());
ps.setString(3, config.getTagServiceUserAdmin());
return ps;
}, holder);
final Long id = holder.getKey().longValue();
result = new TagItem();
result.setName(name);
result.setId(id);
}
return result;
}
@Override
public void renameTableTags(final QualifiedName name, final String newTableName) {
try {
final QualifiedName newName = QualifiedName.ofTable(name.getCatalogName(), name.getDatabaseName(),
newTableName);
if (get(newName) != null) {
delete(newName, false /*don't delete existing definition metadata with the new name*/);
}
jdbcTemplate.update(SQL_UPDATE_TAG_ITEM, new String[]{newName.toString(), name.toString()},
new int[]{Types.VARCHAR, Types.VARCHAR});
} catch (Exception e) {
final String message = String.format("Failed to rename item name %s", name);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
@Override
public void delete(final QualifiedName name, final boolean updateUserMetadata) {
try {
jdbcTemplate
.update(SQL_DELETE_TAG_ITEM_TAGS_BY_NAME, new SqlParameterValue(Types.VARCHAR, name.toString()));
jdbcTemplate.update(SQL_DELETE_TAG_ITEM, new SqlParameterValue(Types.VARCHAR, name.toString()));
if (updateUserMetadata) {
// Set the tags in user metadata
final Map<String, Set<String>> data = Maps.newHashMap();
data.put(NAME_TAGS, Sets.newHashSet());
userMetadataService
.saveDefinitionMetadata(name, "admin", Optional.of(metacatJson.toJsonObject(data)),
true);
}
} catch (Exception e) {
final String message = String.format("Failed to delete all tags for name %s", name);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
/**
* remove.
*
* @param name qualifiedName
* @param tags tags
* @param updateUserMetadata flag to update user metadata
*/
public void remove(final QualifiedName name, final Set<String> tags, final boolean updateUserMetadata) {
try {
final TagItem tagItem = get(name);
if (tagItem == null || tagItem.getValues().isEmpty()) {
log.info(String.format("No tags or tagItems found for table %s", name));
return;
}
final List<SqlParameterValue> params = Lists.newArrayList();
params.add(new SqlParameterValue(Types.VARCHAR, name.toString()));
jdbcTemplate.update(String.format(SQL_DELETE_TAG_ITEM_TAGS_BY_NAME_TAGS,
buildParametrizedInClause(tags, params, params.size())),
params.toArray());
if (updateUserMetadata) {
tagItem.getValues().removeAll(tags);
final Map<String, Set<String>> data = Maps.newHashMap();
data.put(NAME_TAGS, tagItem.getValues());
userMetadataService
.saveDefinitionMetadata(name, "admin", Optional.of(metacatJson.toJsonObject(data)),
true);
}
} catch (Exception e) {
final String message = String.format("Failed to remove tags for name %s", name);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
/**
* Returns the list of tags.
*
* @return list of tag names
*/
@Override
@Transactional(readOnly = true)
public Set<String> getTags() {
return lookupService.getValues(LOOKUP_NAME_TAG);
}
/**
* Returns the list of <code>QualifiedName</code> of items that are tagged by the
* given <code>includeTags</code> and do not contain the given <code>excludeTags</code>.
*
* @param includeTags include items that contain tags
* @param excludeTags include items that do not contain tags
* @param sourceName catalog/source name
* @param databaseName database name
* @param tableName table name
* @param type metacat data category
* @return list of qualified names of the items
*/
@Override
@Transactional(readOnly = true)
public List<QualifiedName> list(
@Nullable final Set<String> includeTags,
@Nullable final Set<String> excludeTags,
@Nullable final String sourceName,
@Nullable final String databaseName,
@Nullable final String tableName,
@Nullable final QualifiedName.Type type
) {
Set<String> includedNames = Sets.newHashSet();
final Set<String> excludedNames = Sets.newHashSet();
final String wildCardName =
QualifiedName.qualifiedNameToWildCardQueryString(sourceName, databaseName, tableName);
final Set<String> localIncludes = includeTags != null ? includeTags : Sets.newHashSet();
validateRequestTagCount(localIncludes);
try {
includedNames.addAll(queryTaggedItems(wildCardName, type, localIncludes));
if (excludeTags != null && !excludeTags.isEmpty()) {
excludedNames.addAll(queryTaggedItems(wildCardName, type, excludeTags));
}
} catch (Exception e) {
final String message = String.format("Failed getting the list of qualified names for tags %s", includeTags);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
if (excludeTags != null && !excludeTags.isEmpty()) {
includedNames = Sets.difference(includedNames, excludedNames);
}
return includedNames.stream().map(s -> QualifiedName.fromString(s, false)).collect(Collectors.toList());
}
/**
* Returns the list of <code>QualifiedName</code> of items that have tags containing the given tag text.
*
* @param tag partial text of a tag
* @param sourceName source/catalog name
* @param databaseName database name
* @param tableName table name
* @return list of qualified names of the items
*/
@Override
@Transactional(readOnly = true)
public List<QualifiedName> search(
@Nullable final String tag,
@Nullable final String sourceName,
@Nullable final String databaseName,
@Nullable final String tableName
) {
final Set<String> result = Sets.newHashSet();
try {
final String wildCardName =
QualifiedName.qualifiedNameToWildCardQueryString(sourceName, databaseName, tableName);
//Includes
final String query = String.format(QUERY_SEARCH, "like ?");
final Object[] params = {tag == null ? 1 : 0, tag + "%", wildCardName == null ? 1 : 0, wildCardName};
result.addAll(jdbcTemplate.query(query, params,
new int[]{Types.INTEGER, Types.VARCHAR, Types.INTEGER, Types.VARCHAR},
(rs, rowNum) -> rs.getString("name")));
} catch (Exception e) {
final String message = String.format("Failed getting the list of qualified names for tag %s", tag);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
return result.stream().map(QualifiedName::fromString).collect(Collectors.toList());
}
/**
* Tags the given table with the given <code>tags</code>.
*
* @param name resource name
* @param tags list of tags
* @return return the complete list of tags associated with the table
*/
@Override
public Set<String> setTags(final QualifiedName name, final Set<String> tags,
final boolean updateUserMetadata) {
addTags(tags);
try {
final TagItem tagItem = findOrCreateTagItemByName(name.toString());
final Set<String> inserts;
Set<String> deletes = Sets.newHashSet();
Set<String> values = tagItem.getValues();
if (values == null || values.isEmpty()) {
inserts = tags;
} else {
inserts = Sets.difference(tags, values).immutableCopy();
deletes = Sets.difference(values, tags).immutableCopy();
}
values = tags;
if (!inserts.isEmpty()) {
insertTagItemTags(tagItem.getId(), inserts);
}
if (!deletes.isEmpty()) {
removeTagItemTags(tagItem.getId(), deletes);
}
if (updateUserMetadata) {
// Set the tags in user metadata
final Map<String, Set<String>> data = Maps.newHashMap();
data.put(NAME_TAGS, values);
userMetadataService
.saveDefinitionMetadata(name, "admin", Optional.of(metacatJson.toJsonObject(data)),
true);
}
} catch (Exception e) {
final String message = String.format("Failed to remove tags for name %s", name);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
return tags;
}
private void removeTagItemTags(final Long id, final Set<String> tags) {
final List<SqlParameterValue> params = Lists.newArrayList();
params.add(new SqlParameterValue(Types.BIGINT, id));
jdbcTemplate
.update(String.format(SQL_DELETE_TAG_ITEM_TAGS, buildParametrizedInClause(
tags,
params,
params.size()
)), params.toArray());
}
private void insertTagItemTags(final Long id, final Set<String> tags) {
jdbcTemplate.batchUpdate(SQL_INSERT_TAG_ITEM_TAGS, tags.stream().map(tag -> new Object[]{id, tag})
.collect(Collectors.toList()), new int[]{Types.BIGINT, Types.VARCHAR});
}
/**
* Removes the tags from the given resource.
*
* @param name qualified name
* @param deleteAll if true, will delete all tags associated with the given table
* @param tags list of tags to be removed for the given table
*/
@Override
public void removeTags(final QualifiedName name, final Boolean deleteAll,
final Set<String> tags, final boolean updateUserMetadata) {
if (deleteAll != null && deleteAll) {
delete(name, updateUserMetadata);
} else {
remove(name, tags, updateUserMetadata);
}
}
private List<String> queryTaggedItems(final String name,
final QualifiedName.Type type,
final Set<String> tags) {
final List<SqlParameterValue> sqlParams = Lists.newArrayList();
sqlParams.add(new SqlParameterValue(Types.INTEGER, tags.size() == 0 ? 1 : 0));
final String query = String.format(QUERY_LIST,
buildParametrizedInClause(tags, sqlParams, sqlParams.size()));
sqlParams.addAll(Stream.of(
new SqlParameterValue(Types.INTEGER, name == null ? 1 : 0),
new SqlParameterValue(Types.VARCHAR, name),
new SqlParameterValue(Types.INTEGER, type == null ? 1 : 0),
new SqlParameterValue(Types.VARCHAR, type == null ? ".*" : type.getRegexValue())
).collect(Collectors.toList()));
return jdbcTemplate.query(query,
sqlParams.toArray(),
(rs, rowNum) -> rs.getString("name"));
}
private static String buildParametrizedInClause(final Set<String> tags,
final List<SqlParameterValue> params,
final int index) {
final String tagList = tags.stream().filter(StringUtils::isNotBlank)
.map(v -> "?").collect(Collectors.joining(", "));
params.addAll(index, tags.stream().filter(StringUtils::isNotBlank)
.map(p -> new SqlParameterValue(Types.VARCHAR, p))
.collect(Collectors.toList()));
return StringUtils.isBlank(tagList) ? EMPTY_CLAUSE : tagList;
}
private static void validateRequestTagCount(final Set<String> tags) {
final int totalTags = tags.size();
if (totalTags > MAX_TAGS_LIST_COUNT) {
throw new MetacatBadRequestException(String.format("Too many tags in request. Count %s", totalTags));
}
}
}
| 32 |
0 | Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata | Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata/mysql/MySqlUserMetadataConfig.java | /*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.metadata.mysql;
import com.netflix.metacat.common.json.MetacatJson;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.properties.MetacatProperties;
import com.netflix.metacat.common.server.usermetadata.MetadataInterceptor;
import com.netflix.metacat.common.server.usermetadata.LookupService;
import com.netflix.metacat.common.server.usermetadata.MetadataInterceptorImpl;
import com.netflix.metacat.common.server.usermetadata.TagService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.util.DataSourceManager;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import javax.sql.DataSource;
/**
* MySql UserMetadata Config.
*
* @author zhenl
* @since 1.1.0
*/
@Configuration
@ConditionalOnProperty(value = "metacat.mysqlmetadataservice.enabled", havingValue = "true")
public class MySqlUserMetadataConfig {
/**
* business Metadata Manager.
* @return business Metadata Manager
*/
@Bean
@ConditionalOnMissingBean(MetadataInterceptor.class)
public MetadataInterceptor businessMetadataManager(
) {
return new MetadataInterceptorImpl();
}
/**
* User Metadata service.
*
* @param jdbcTemplate JDBC template
* @param config System config to use
* @param metacatJson Json Utilities to use
* @param metadataInterceptor business metadata manager
* @return User metadata service based on MySql
*/
@Bean
public UserMetadataService userMetadataService(
@Qualifier("metadataJdbcTemplate") final JdbcTemplate jdbcTemplate,
final Config config,
final MetacatJson metacatJson,
final MetadataInterceptor metadataInterceptor
) {
return new MysqlUserMetadataService(jdbcTemplate, metacatJson, config, metadataInterceptor);
}
/**
* Lookup service.
*
* @param jdbcTemplate JDBC template
* @param config System configuration to use
* @return Lookup service backed by MySQL
*/
@Bean
public LookupService lookupService(
@Qualifier("metadataJdbcTemplate") final JdbcTemplate jdbcTemplate,
final Config config) {
return new MySqlLookupService(config, jdbcTemplate);
}
/**
* The tag service to use.
*
* @param jdbcTemplate JDBC template
* @param config System config to use
* @param metacatJson Json Utilities to use
* @param lookupService Look up service implementation to use
* @param userMetadataService User metadata service implementation to use
* @return The tag service implementation backed by MySQL
*/
@Bean
public TagService tagService(
@Qualifier("metadataJdbcTemplate") final JdbcTemplate jdbcTemplate,
final Config config,
final MetacatJson metacatJson,
final LookupService lookupService,
final UserMetadataService userMetadataService
) {
return new MySqlTagService(config, jdbcTemplate, lookupService, metacatJson, userMetadataService);
}
/**
* mySql DataSource.
*
* @param dataSourceManager data source manager
* @param metacatProperties metacat properties
* @return data source
* @throws Exception exception
*/
@Bean
public DataSource metadataDataSource(final DataSourceManager dataSourceManager,
final MetacatProperties metacatProperties) throws Exception {
MySqlServiceUtil.loadMySqlDataSource(dataSourceManager,
metacatProperties.getUsermetadata().getConfig().getLocation());
return dataSourceManager.get(UserMetadataService.NAME_DATASOURCE);
}
/**
* mySql metadata Transaction Manager.
*
* @param mySqlDataSource metadata data source
* @return metadata transaction manager
*/
@Bean
public DataSourceTransactionManager metadataTxManager(
@Qualifier("metadataDataSource") final DataSource mySqlDataSource) {
return new DataSourceTransactionManager(mySqlDataSource);
}
/**
* mySql metadata JDBC template.
*
* @param mySqlDataSource metadata data source
* @param config System config to use
* @return metadata JDBC template
*/
@Bean
public JdbcTemplate metadataJdbcTemplate(
@Qualifier("metadataDataSource") final DataSource mySqlDataSource,
final Config config) {
final JdbcTemplate result = new JdbcTemplate(mySqlDataSource);
result.setQueryTimeout(config.getMetadataQueryTimeout());
return result;
}
}
| 33 |
0 | Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata | Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata/mysql/MysqlUserMetadataService.java | /*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.metadata.mysql;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.base.Joiner;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.DefinitionMetadataDto;
import com.netflix.metacat.common.dto.HasDataMetadata;
import com.netflix.metacat.common.dto.HasDefinitionMetadata;
import com.netflix.metacat.common.dto.HasMetadata;
import com.netflix.metacat.common.exception.MetacatBadRequestException;
import com.netflix.metacat.common.json.MetacatJson;
import com.netflix.metacat.common.json.MetacatJsonException;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetadataException;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.usermetadata.BaseUserMetadataService;
import com.netflix.metacat.common.server.usermetadata.GetMetadataInterceptorParameters;
import com.netflix.metacat.common.server.usermetadata.MetadataInterceptor;
import com.netflix.metacat.common.server.usermetadata.UserMetadataServiceException;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.Data;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.ResultSetExtractor;
import org.springframework.jdbc.core.SqlParameterValue;
import org.springframework.transaction.annotation.Transactional;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.sql.Types;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
/**
* User metadata service.
* <p>
* Definition metadata (business metadata about the logical schema definition) is stored in two tables. Definition
* metadata about the partitions are stored in 'partition_definition_metadata' table. Definition metadata about the
* catalogs, databases and tables are stored in 'definition_metadata' table.
* <p>
* Data metadata (metadata about the data stored in the location referred by the schema). This information is stored in
* 'data_metadata' table.
*/
@Slf4j
@SuppressFBWarnings
@Transactional("metadataTxManager")
public class MysqlUserMetadataService extends BaseUserMetadataService {
private static final String NAME_OWNER = "owner";
private static final String NAME_USERID = "userId";
private static final List<String> DEFINITION_METADATA_SORT_BY_COLUMNS = Arrays.asList(
"id", "date_created", "created_by", "last_updated_by", "name", "last_updated");
private static final List<String> VALID_SORT_ORDER = Arrays.asList("ASC", "DESC");
private final MetacatJson metacatJson;
private final Config config;
private JdbcTemplate jdbcTemplate;
private final MetadataInterceptor metadataInterceptor;
/**
* Constructor.
*
* @param jdbcTemplate jdbc template
* @param metacatJson json utility
* @param config config
* @param metadataInterceptor metadata interceptor
*/
public MysqlUserMetadataService(
final JdbcTemplate jdbcTemplate,
final MetacatJson metacatJson,
final Config config,
final MetadataInterceptor metadataInterceptor
) {
this.metacatJson = metacatJson;
this.config = config;
this.jdbcTemplate = jdbcTemplate;
this.metadataInterceptor = metadataInterceptor;
}
@Override
public void saveMetadata(final String userId, final HasMetadata holder, final boolean merge) {
super.saveMetadata(userId, holder, merge);
}
@Override
public void populateMetadata(final HasMetadata holder, final ObjectNode definitionMetadata,
final ObjectNode dataMetadata) {
super.populateMetadata(holder, definitionMetadata, dataMetadata);
}
@Nonnull
@Override
@Transactional(readOnly = true)
public Optional<ObjectNode> getDefinitionMetadataWithInterceptor(
@Nonnull final QualifiedName name,
final GetMetadataInterceptorParameters getMetadataInterceptorParameters) {
//not applying interceptor
final Optional<ObjectNode> retData = getDefinitionMetadata(name);
retData.ifPresent(objectNode ->
this.metadataInterceptor.onRead(this, name, objectNode, getMetadataInterceptorParameters));
return retData;
}
@Override
public void softDeleteDataMetadata(
final String user,
@Nonnull final List<String> uris
) {
try {
final List<List<String>> subLists = Lists.partition(uris, config.getUserMetadataMaxInClauseItems());
for (List<String> subUris : subLists) {
_softDeleteDataMetadata(user, subUris);
}
} catch (Exception e) {
final String message = String.format("Failed deleting the data metadata for %s", uris);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
@Override
public void deleteDataMetadata(
@Nonnull final List<String> uris
) {
deleteDataMetadatasWithBatch(uris, true);
}
@Override
public void deleteDataMetadataDeletes(
@Nonnull final List<String> uris
) {
deleteDataMetadatasWithBatch(uris, false);
}
private void deleteDataMetadatasWithBatch(final List<String> uris, final boolean removeDataMetadata) {
try {
final List<List<String>> subLists = Lists.partition(uris, config.getUserMetadataMaxInClauseItems());
for (List<String> subUris : subLists) {
_deleteDataMetadata(subUris, removeDataMetadata);
}
} catch (Exception e) {
final String message = String.format("Failed deleting the data metadata for %s", uris);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
@Override
public void deleteDefinitionMetadata(
@Nonnull final List<QualifiedName> names
) {
try {
final List<List<QualifiedName>> subLists =
Lists.partition(names, config.getUserMetadataMaxInClauseItems());
for (List<QualifiedName> subNames : subLists) {
_deleteDefinitionMetadata(subNames);
}
} catch (Exception e) {
final String message = String.format("Failed deleting the definition metadata for %s", names);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
@Override
public void deleteStaleDefinitionMetadata(
@NonNull final String qualifiedNamePattern,
@NonNull final Date lastUpdated) {
if (qualifiedNamePattern == null || lastUpdated == null) {
return;
}
try {
jdbcTemplate.update(SQL.DELETE_DEFINITION_METADATA_STALE, new Object[]{qualifiedNamePattern, lastUpdated},
new int[]{Types.VARCHAR, Types.TIMESTAMP});
} catch (Exception e) {
final String message = String.format("Failed to delete stale definition metadata for pattern %s",
qualifiedNamePattern);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
@Override
public void deleteMetadata(final String userId, final List<HasMetadata> holders) {
try {
final List<List<HasMetadata>> subLists =
Lists.partition(holders, config.getUserMetadataMaxInClauseItems());
for (List<HasMetadata> hasMetadatas : subLists) {
final List<QualifiedName> names = hasMetadatas.stream()
.filter(m -> m instanceof HasDefinitionMetadata)
.map(m -> ((HasDefinitionMetadata) m).getDefinitionName())
.collect(Collectors.toList());
if (!names.isEmpty()) {
_deleteDefinitionMetadata(names);
}
if (config.canSoftDeleteDataMetadata()) {
final List<String> uris = hasMetadatas.stream()
.filter(m -> m instanceof HasDataMetadata && ((HasDataMetadata) m).isDataExternal())
.map(m -> ((HasDataMetadata) m).getDataUri()).collect(Collectors.toList());
if (!uris.isEmpty()) {
_softDeleteDataMetadata(userId, uris);
}
}
}
} catch (Exception e) {
log.error("Failed deleting metadatas", e);
throw new UserMetadataServiceException("Failed deleting metadatas", e);
}
}
/**
* delete Definition Metadatas.
*
* @param names names to delete
*/
@SuppressWarnings("checkstyle:methodname")
private void _deleteDefinitionMetadata(
@Nullable final List<QualifiedName> names
) {
if (names != null && !names.isEmpty()) {
final SqlParameterValue[] aNames = names.stream().filter(name -> !name.isPartitionDefinition())
.map(n -> new SqlParameterValue(Types.VARCHAR, n))
.toArray(SqlParameterValue[]::new);
final SqlParameterValue[] aPartitionNames = names.stream().filter(QualifiedName::isPartitionDefinition)
.map(n -> new SqlParameterValue(Types.VARCHAR, n))
.toArray(SqlParameterValue[]::new);
if (aNames.length > 0) {
final List<String> paramVariables = Arrays.stream(aNames).map(s -> "?").collect(Collectors.toList());
jdbcTemplate.update(
String.format(SQL.DELETE_DEFINITION_METADATA, Joiner.on(",").skipNulls().join(paramVariables)),
(Object[]) aNames);
}
if (aPartitionNames.length > 0) {
final List<String> paramVariables =
Arrays.stream(aPartitionNames).map(s -> "?").collect(Collectors.toList());
jdbcTemplate.update(
String.format(SQL.DELETE_PARTITION_DEFINITION_METADATA,
Joiner.on(",").skipNulls().join(paramVariables)), (Object[]) aPartitionNames);
}
}
}
/**
* soft Delete Data Metadatas.
*
* @param userId user id
* @param uris uri list
*/
@SuppressWarnings("checkstyle:methodname")
private void _softDeleteDataMetadata(final String userId,
@Nullable final List<String> uris) {
if (uris != null && !uris.isEmpty()) {
final List<String> paramVariables = uris.stream().map(s -> "?").collect(Collectors.toList());
final String[] aUris = uris.toArray(new String[0]);
final String paramString = Joiner.on(",").skipNulls().join(paramVariables);
final List<Long> ids = jdbcTemplate
.query(String.format(SQL.GET_DATA_METADATA_IDS, paramString), aUris, (rs, rowNum) -> rs.getLong("id"));
if (!ids.isEmpty()) {
final List<String> idParamVariables = ids.stream().map(s -> "?").collect(Collectors.toList());
final Long[] aIds = ids.toArray(new Long[0]);
final String idParamString = Joiner.on(",").skipNulls().join(idParamVariables);
final List<Long> dupIds = jdbcTemplate
.query(String.format(SQL.GET_DATA_METADATA_DELETE_BY_IDS, idParamString), aIds,
(rs, rowNum) -> rs.getLong("id"));
if (!dupIds.isEmpty()) {
ids.removeAll(dupIds);
}
final List<Object[]> deleteDataMetadatas = Lists.newArrayList();
ids.forEach(id -> deleteDataMetadatas.add(new Object[]{id, userId}));
final int[] colTypes = {Types.BIGINT, Types.VARCHAR};
jdbcTemplate.batchUpdate(SQL.SOFT_DELETE_DATA_METADATA, deleteDataMetadatas, colTypes);
}
}
}
/**
* delete Data Metadatas.
*
* @param uris uri list
* @param removeDataMetadata flag to remove data meta data
*/
@SuppressWarnings("checkstyle:methodname")
private void _deleteDataMetadata(
@Nullable final List<String> uris,
final boolean removeDataMetadata
) {
if (uris != null && !uris.isEmpty()) {
final List<String> paramVariables = uris.stream().map(s -> "?").collect(Collectors.toList());
final String[] aUris = uris.toArray(new String[0]);
final String paramString = Joiner.on(",").skipNulls().join(paramVariables);
final List<Long> ids = jdbcTemplate
.query(String.format(SQL.GET_DATA_METADATA_IDS, paramString), aUris, (rs, rowNum) -> rs.getLong("id"));
if (!ids.isEmpty()) {
final List<String> idParamVariables = ids.stream().map(s -> "?").collect(Collectors.toList());
final SqlParameterValue[] aIds = ids.stream().map(id -> new SqlParameterValue(Types.BIGINT, id))
.toArray(SqlParameterValue[]::new);
final String idParamString = Joiner.on(",").skipNulls().join(idParamVariables);
jdbcTemplate.update(String.format(SQL.DELETE_DATA_METADATA_DELETE, idParamString), (Object[]) aIds);
if (removeDataMetadata) {
jdbcTemplate.update(String.format(SQL.DELETE_DATA_METADATA, idParamString), (Object[]) aIds);
}
}
}
}
@Nonnull
@Override
@Transactional(readOnly = true)
public Optional<ObjectNode> getDataMetadata(
@Nonnull final String uri) {
return getJsonForKey(SQL.GET_DATA_METADATA, uri);
}
@Nonnull
@Override
@Transactional(readOnly = true)
public Map<String, ObjectNode> getDataMetadataMap(
@Nonnull final List<String> uris) {
final Map<String, ObjectNode> result = Maps.newHashMap();
if (!uris.isEmpty()) {
final List<List<String>> parts = Lists.partition(uris, config.getUserMetadataMaxInClauseItems());
parts.forEach(keys -> result.putAll(_getMetadataMap(keys, SQL.GET_DATA_METADATAS)));
}
return result;
}
@Nonnull
@Override
@Transactional(readOnly = true)
public Optional<ObjectNode> getDefinitionMetadata(
@Nonnull final QualifiedName name) {
final Optional<ObjectNode> retData = getJsonForKey(
name.isPartitionDefinition() ? SQL.GET_PARTITION_DEFINITION_METADATA : SQL.GET_DEFINITION_METADATA,
name.toString());
return retData;
}
@Override
@Transactional(readOnly = true)
public List<QualifiedName> getDescendantDefinitionNames(@Nonnull final QualifiedName name) {
final List<String> result;
try {
result = jdbcTemplate
.query(SQL.GET_DESCENDANT_DEFINITION_NAMES, new Object[]{name.toString() + "/%"},
new int[]{Types.VARCHAR},
(rs, rowNum) -> rs.getString("name"));
} catch (Exception e) {
final String message = String.format("Failed to get descendant names for %s", name);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
return result.stream().map(QualifiedName::fromString).collect(Collectors.toList());
}
@Override
@Transactional(readOnly = true)
public List<String> getDescendantDataUris(@Nonnull final String uri) {
final List<String> result;
try {
result = jdbcTemplate.query(SQL.GET_DESCENDANT_DATA_URIS, new Object[]{uri + "/%"},
new int[]{Types.VARCHAR},
(rs, rowNum) -> rs.getString("uri"));
} catch (Exception e) {
final String message = String.format("Failed to get descendant uris for %s", uri);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
return result;
}
//TODO: For partition metadata, add interceptor if needed
@Nonnull
@Override
@Transactional(readOnly = true)
public Map<String, ObjectNode> getDefinitionMetadataMap(
@Nonnull final List<QualifiedName> names) {
//
// names can contain partition names and non-partition names. Since definition metadata is stored in two tables,
// metadata needs to be retrieved from both the tables.
//
final List<QualifiedName> oNames = names.stream().filter(name -> !name.isPartitionDefinition()).collect(
Collectors.toList());
final List<QualifiedName> partitionNames = names.stream().filter(QualifiedName::isPartitionDefinition).collect(
Collectors.toList());
final Map<String, ObjectNode> result = Maps.newHashMap();
if (!oNames.isEmpty()) {
result.putAll(_getNonPartitionDefinitionMetadataMap(oNames));
}
if (!partitionNames.isEmpty()) {
result.putAll(_getPartitionDefinitionMetadata(partitionNames));
}
return result;
}
@SuppressWarnings("checkstyle:methodname")
private Map<String, ObjectNode> _getNonPartitionDefinitionMetadataMap(final List<QualifiedName> names) {
final List<List<QualifiedName>> parts = Lists.partition(names, config.getUserMetadataMaxInClauseItems());
return parts.parallelStream()
.map(keys -> _getMetadataMap(keys, SQL.GET_DEFINITION_METADATAS))
.flatMap(it -> it.entrySet().stream())
.collect(Collectors.toConcurrentMap(it -> QualifiedName.fromString(it.getKey()).toString(),
Map.Entry::getValue));
}
@SuppressWarnings("checkstyle:methodname")
private Map<String, ObjectNode> _getPartitionDefinitionMetadata(final List<QualifiedName> names) {
final List<List<QualifiedName>> parts = Lists.partition(names, config.getUserMetadataMaxInClauseItems());
return parts.parallelStream()
.map(keys -> _getMetadataMap(keys, SQL.GET_PARTITION_DEFINITION_METADATAS))
.flatMap(it -> it.entrySet().stream())
.collect(Collectors.toConcurrentMap(it -> QualifiedName.fromString(it.getKey()).toString(),
Map.Entry::getValue));
}
/**
* get Metadata Map.
*
* @param keys list of keys
* @param sql query string
* @return map of the metadata
*/
@SuppressWarnings("checkstyle:methodname")
private Map<String, ObjectNode> _getMetadataMap(@Nullable final List<?> keys, final String sql) {
final Map<String, ObjectNode> result = Maps.newHashMap();
if (keys == null || keys.isEmpty()) {
return result;
}
final List<String> paramVariables = keys.stream().map(s -> "?").collect(Collectors.toList());
final SqlParameterValue[] aKeys = keys.stream().map(o -> new SqlParameterValue(Types.VARCHAR, o.toString()))
.toArray(SqlParameterValue[]::new);
final String query = String.format(sql, Joiner.on(","
+ "").join(paramVariables));
try {
final ResultSetExtractor<Void> handler = resultSet -> {
while (resultSet.next()) {
final String json = resultSet.getString("data");
final String name = resultSet.getString("name");
if (json != null) {
try {
result.put(name, metacatJson.parseJsonObject(json));
} catch (MetacatJsonException e) {
log.error("Invalid json '{}' for name '{}'", json, name);
throw new UserMetadataServiceException(
String.format("Invalid json %s for name %s", json, name), e);
}
}
}
return null;
};
jdbcTemplate.query(query, aKeys, handler);
} catch (Exception e) {
final String message = String.format("Failed to get data for %s", keys);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
return result;
}
/**
* get Json for key.
*
* @param query query string
* @param keyValue parameters
* @return result object node
*/
private Optional<ObjectNode> getJsonForKey(final String query, final String keyValue) {
try {
ResultSetExtractor<Optional<ObjectNode>> handler = rs -> {
final String json;
Optional<ObjectNode> result = Optional.empty();
while (rs.next()) {
final String key = rs.getString(1);
if (keyValue.equalsIgnoreCase(key)) {
json = rs.getString(2);
if (Strings.isNullOrEmpty(json)) {
return Optional.empty();
}
result = Optional.ofNullable(metacatJson.parseJsonObject(json));
break;
}
}
return result;
};
return jdbcTemplate.query(query, new String[]{keyValue}, new int[]{Types.VARCHAR}, handler);
} catch (MetacatJsonException e) {
final String message = String.format("Invalid json %s for name %s", e.getInputJson(), keyValue);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
} catch (Exception e) {
final String message = String.format("Failed to get data for %s", keyValue);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
/**
* executeUpdateForKey.
*
* @param query sql query string
* @param keyValues parameters
* @return number of updated rows
*/
private int executeUpdateForKey(final String query, final String... keyValues) {
try {
final SqlParameterValue[] values =
Arrays.stream(keyValues).map(keyValue -> new SqlParameterValue(Types.VARCHAR, keyValue))
.toArray(SqlParameterValue[]::new);
return jdbcTemplate.update(query, (Object[]) values);
} catch (Exception e) {
final String message = String.format("Failed to save data for %s", Arrays.toString(keyValues));
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
private void throwIfPartitionDefinitionMetadataDisabled() {
if (config.disablePartitionDefinitionMetadata()) {
throw new MetacatBadRequestException("Partition Definition metadata updates are disabled");
}
}
@Override
public void saveDataMetadata(
@Nonnull final String uri,
@Nonnull final String userId,
@Nonnull final Optional<ObjectNode> metadata, final boolean merge) {
final Optional<ObjectNode> existingData = getDataMetadata(uri);
final int count;
if (existingData.isPresent() && metadata.isPresent()) {
final ObjectNode merged = existingData.get();
if (merge) {
metacatJson.mergeIntoPrimary(merged, metadata.get());
}
count = executeUpdateForKey(SQL.UPDATE_DATA_METADATA, merged.toString(), userId, uri);
} else {
count = metadata.map(
jsonNodes -> executeUpdateForKey(SQL.INSERT_DATA_METADATA, jsonNodes.toString(), userId, userId, uri))
.orElse(1);
}
if (count != 1) {
throw new IllegalStateException("Expected one row to be insert or update for " + uri);
}
}
@Override
public void saveDefinitionMetadata(
@Nonnull final QualifiedName name,
@Nonnull final String userId,
@Nonnull final Optional<ObjectNode> metadata, final boolean merge)
throws InvalidMetadataException {
final Optional<ObjectNode> existingData = getDefinitionMetadata(name);
final int count;
if (existingData.isPresent() && metadata.isPresent()) {
ObjectNode merged = existingData.get();
if (merge) {
metacatJson.mergeIntoPrimary(merged, metadata.get());
} else {
merged = metadata.get();
}
//apply interceptor to change the object node
this.metadataInterceptor.onWrite(this, name, merged);
String query;
if (name.isPartitionDefinition()) {
throwIfPartitionDefinitionMetadataDisabled();
query = SQL.UPDATE_PARTITION_DEFINITION_METADATA;
} else {
query = SQL.UPDATE_DEFINITION_METADATA;
}
count = executeUpdateForKey(
query,
merged.toString(),
userId,
name.toString());
} else {
//apply interceptor to change the object node
if (metadata.isPresent()) {
this.metadataInterceptor.onWrite(this, name, metadata.get());
}
String queryToExecute;
if (name.isPartitionDefinition()) {
throwIfPartitionDefinitionMetadataDisabled();
queryToExecute = SQL.INSERT_PARTITION_DEFINITION_METADATA;
} else {
queryToExecute = SQL.INSERT_DEFINITION_METADATA;
}
count = metadata.map(jsonNodes -> executeUpdateForKey(
queryToExecute,
jsonNodes.toString(),
userId,
userId,
name.toString()
)).orElse(1);
}
if (count != 1) {
throw new IllegalStateException("Expected one row to be insert or update for " + name);
}
}
@Override
public int renameDataMetadataKey(
@Nonnull final String oldUri,
@Nonnull final String newUri) {
return executeUpdateForKey(SQL.RENAME_DATA_METADATA, newUri, oldUri);
}
@Override
public int renameDefinitionMetadataKey(
@Nonnull final QualifiedName oldName,
@Nonnull final QualifiedName newName) {
_deleteDefinitionMetadata(Lists.newArrayList(newName));
return executeUpdateForKey(SQL.RENAME_DEFINITION_METADATA, newName.toString(), oldName.toString());
}
@Override
public void saveMetadata(final String user, final List<? extends HasMetadata> metadatas, final boolean merge) {
try {
@SuppressWarnings("unchecked") final List<List<HasMetadata>> subLists = Lists.partition(
(List<HasMetadata>) metadatas,
config.getUserMetadataMaxInClauseItems()
);
for (List<HasMetadata> hasMetadatas : subLists) {
final List<String> uris = Lists.newArrayList();
final List<QualifiedName> names = Lists.newArrayList();
// Get the names and uris
final List<HasDefinitionMetadata> definitionMetadatas = Lists.newArrayList();
final List<HasDataMetadata> dataMetadatas = Lists.newArrayList();
hasMetadatas.forEach(hasMetadata -> {
if (hasMetadata instanceof HasDefinitionMetadata) {
final HasDefinitionMetadata oDef = (HasDefinitionMetadata) hasMetadata;
names.add(oDef.getDefinitionName());
if (oDef.getDefinitionMetadata() != null) {
definitionMetadatas.add(oDef);
}
}
if (hasMetadata instanceof HasDataMetadata) {
final HasDataMetadata oData = (HasDataMetadata) hasMetadata;
if (oData.isDataExternal() && oData.getDataMetadata() != null
&& oData.getDataMetadata().size() > 0) {
uris.add(oData.getDataUri());
dataMetadatas.add(oData);
}
}
});
if (!definitionMetadatas.isEmpty() || !dataMetadatas.isEmpty()) {
// Get the existing metadata based on the names and uris
final Map<String, ObjectNode> definitionMap = getDefinitionMetadataMap(names);
final Map<String, ObjectNode> dataMap = getDataMetadataMap(uris);
// Curate the list of existing and new metadatas
final List<Object[]> insertDefinitionMetadatas = Lists.newArrayList();
final List<Object[]> updateDefinitionMetadatas = Lists.newArrayList();
final List<Object[]> insertPartitionDefinitionMetadatas = Lists.newArrayList();
final List<Object[]> updatePartitionDefinitionMetadatas = Lists.newArrayList();
final List<Object[]> insertDataMetadatas = Lists.newArrayList();
final List<Object[]> updateDataMetadatas = Lists.newArrayList();
definitionMetadatas.forEach(oDef -> {
final QualifiedName qualifiedName = oDef.getDefinitionName();
if (qualifiedName != null && oDef.getDefinitionMetadata() != null
&& oDef.getDefinitionMetadata().size() != 0) {
final String name = qualifiedName.toString();
final ObjectNode oNode = definitionMap.get(name);
if (oNode == null) {
final Object[] o = new Object[]{
metacatJson.toJsonString(oDef.getDefinitionMetadata()), user, user, name, };
if (qualifiedName.isPartitionDefinition()) {
insertPartitionDefinitionMetadatas.add(o);
} else {
insertDefinitionMetadatas.add(o);
}
} else {
metacatJson.mergeIntoPrimary(oNode, oDef.getDefinitionMetadata());
final Object[] o = new Object[]{metacatJson.toJsonString(oNode), user, name};
if (qualifiedName.isPartitionDefinition()) {
updatePartitionDefinitionMetadatas.add(o);
} else {
updateDefinitionMetadatas.add(o);
}
}
}
});
dataMetadatas.forEach(oData -> {
final String uri = oData.getDataUri();
final ObjectNode oNode = dataMap.get(uri);
if (oData.getDataMetadata() != null && oData.getDataMetadata().size() != 0) {
if (oNode == null) {
insertDataMetadatas.add(
new Object[]{
metacatJson.toJsonString(oData.getDataMetadata()),
user,
user,
uri,
}
);
} else {
metacatJson.mergeIntoPrimary(oNode, oData.getDataMetadata());
updateDataMetadatas
.add(new Object[]{metacatJson.toJsonString(oNode), user, uri});
}
}
});
if (!insertDefinitionMetadatas.isEmpty()) {
jdbcTemplate.batchUpdate(SQL.INSERT_DEFINITION_METADATA, insertDefinitionMetadatas,
new int[]{Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR});
}
if (!updateDefinitionMetadatas.isEmpty()) {
jdbcTemplate.batchUpdate(SQL.UPDATE_DEFINITION_METADATA, updateDefinitionMetadatas,
new int[]{Types.VARCHAR, Types.VARCHAR, Types.VARCHAR});
}
if (!insertPartitionDefinitionMetadatas.isEmpty()) {
throwIfPartitionDefinitionMetadataDisabled();
jdbcTemplate.batchUpdate(SQL.INSERT_PARTITION_DEFINITION_METADATA,
insertPartitionDefinitionMetadatas,
new int[]{Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR});
}
if (!updatePartitionDefinitionMetadatas.isEmpty()) {
throwIfPartitionDefinitionMetadataDisabled();
jdbcTemplate.batchUpdate(SQL.UPDATE_PARTITION_DEFINITION_METADATA,
updatePartitionDefinitionMetadatas,
new int[]{Types.VARCHAR, Types.VARCHAR, Types.VARCHAR});
}
if (!insertDataMetadatas.isEmpty()) {
jdbcTemplate.batchUpdate(SQL.INSERT_DATA_METADATA, insertDataMetadatas,
new int[]{Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR});
}
if (!updateDataMetadatas.isEmpty()) {
jdbcTemplate.batchUpdate(SQL.UPDATE_DATA_METADATA, updateDataMetadatas,
new int[]{Types.VARCHAR, Types.VARCHAR, Types.VARCHAR});
}
}
}
} catch (Exception e) {
log.error("Failed to save metadata", e);
throw new UserMetadataServiceException("Failed to save metadata", e);
}
}
@Override
@Transactional(readOnly = true)
public List<DefinitionMetadataDto> searchDefinitionMetadata(
@Nullable final Set<String> propertyNames,
@Nullable final String type,
@Nullable final String name,
@Nullable final HasMetadata holder,
@Nullable final String sortBy,
@Nullable final String sortOrder,
@Nullable final Integer offset,
@Nullable final Integer limit
) {
final List<DefinitionMetadataDto> result = Lists.newArrayList();
final SearchMetadataQuery queryObj = new SearchMetadataQuery(SQL.SEARCH_DEFINITION_METADATAS)
.buildSearchMetadataQuery(
propertyNames,
type,
name,
sortBy,
sortOrder,
offset,
limit
);
try {
// Handler for reading the result set
final ResultSetExtractor<Void> handler = rs -> {
while (rs.next()) {
final String definitionName = rs.getString("name");
final String data = rs.getString("data");
final DefinitionMetadataDto definitionMetadataDto = new DefinitionMetadataDto();
definitionMetadataDto.setName(QualifiedName.fromString(definitionName));
definitionMetadataDto.setDefinitionMetadata(metacatJson.parseJsonObject(data));
result.add(definitionMetadataDto);
}
return null;
};
jdbcTemplate.query(queryObj.getSearchQuery().toString(), queryObj.getSearchParamList().toArray(), handler);
} catch (Exception e) {
log.error("Failed to search definition data", e);
throw new UserMetadataServiceException("Failed to search definition data", e);
}
return result;
}
@Override
@Transactional(readOnly = true)
public List<QualifiedName> searchByOwners(final Set<String> owners) {
final List<QualifiedName> result = Lists.newArrayList();
final StringBuilder query = new StringBuilder(SQL.SEARCH_DEFINITION_METADATA_NAMES);
final List<SqlParameterValue> paramList = Lists.newArrayList();
query.append(" where 1=0");
owners.forEach(s -> {
query.append(" or data like ?");
paramList.add(new SqlParameterValue(Types.VARCHAR, "%\"userId\":\"" + s.trim() + "\"%"));
});
final SqlParameterValue[] params = new SqlParameterValue[paramList.size()];
try {
// Handler for reading the result set
final ResultSetExtractor<Void> handler = rs -> {
while (rs.next()) {
final String definitionName = rs.getString("name");
result.add(QualifiedName.fromString(definitionName, false));
}
return null;
};
jdbcTemplate.query(query.toString(), paramList.toArray(params), handler);
} catch (Exception e) {
log.error("Failed to search by owners", e);
throw new UserMetadataServiceException("Failed to search by owners", e);
}
return result;
}
@Override
@Transactional(readOnly = true)
public List<String> getDeletedDataMetadataUris(final Date deletedPriorTo, final Integer offset,
final Integer limit) {
try {
return jdbcTemplate.query(String.format(SQL.GET_DELETED_DATA_METADATA_URI, offset, limit),
new Object[]{deletedPriorTo}, new int[]{Types.TIMESTAMP}, (rs, rowNum) -> rs.getString("uri"));
} catch (Exception e) {
final String message =
String.format("Failed to get deleted data metadata uris deleted prior to %s", deletedPriorTo);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
@Override
public void populateOwnerIfMissing(final HasDefinitionMetadata holder, final String owner) {
ObjectNode definitionMetadata = holder.getDefinitionMetadata();
if (definitionMetadata == null) {
definitionMetadata = metacatJson.emptyObjectNode();
holder.setDefinitionMetadata(definitionMetadata);
}
final ObjectNode ownerNode = definitionMetadata.with(NAME_OWNER);
final JsonNode userId = ownerNode.get(NAME_USERID);
if (userId == null || Strings.isNullOrEmpty(userId.textValue())) {
ownerNode.put(NAME_USERID, owner);
}
}
/**
* Inner help class for generating the search definition/business metadata.
*/
@Data
class SearchMetadataQuery {
private StringBuilder searchQuery;
private List<SqlParameterValue> searchParamList = Lists.newArrayList();
SearchMetadataQuery(final String querySQL) {
this.searchQuery = new StringBuilder(querySQL);
}
SearchMetadataQuery buildSearchMetadataQuery(@Nullable final Set<String> propertyNames,
@Nullable final String type,
@Nullable final String name,
@Nullable final String sortByStr,
@Nullable final String sortOrderStr,
@Nullable final Integer offset,
@Nullable final Integer limit) {
String sortBy = null;
if (StringUtils.isNotBlank(sortByStr)) {
sortBy = sortByStr.trim().toLowerCase();
if (!DEFINITION_METADATA_SORT_BY_COLUMNS.contains(sortBy)) {
throw new IllegalArgumentException(String.format("Invalid sortBy column %s", sortBy));
}
}
String sortOrder = null;
if (StringUtils.isNotBlank(sortOrderStr)) {
sortOrder = sortOrderStr.trim().toUpperCase();
if (!VALID_SORT_ORDER.contains(sortOrder)) {
throw new IllegalArgumentException("Invalid sort order. Expected ASC or DESC");
}
}
if (type != null) {
String typeRegex = null;
switch (type) {
case "catalog":
typeRegex = "^[^/]*$";
break;
case "database":
typeRegex = "^[^/]*/[^/]*$";
break;
case "table":
typeRegex = "^[^/]*/[^/]*/[^/]*$";
break;
case "partition":
typeRegex = "^[^/]*/[^/]*/[^/]*/.*$";
break;
default:
}
if (typeRegex != null) {
this.searchQuery.append(" and name rlike ?");
this.searchParamList.add(new SqlParameterValue(Types.VARCHAR, typeRegex));
}
}
if (propertyNames != null && !propertyNames.isEmpty()) {
propertyNames.forEach(propertyName -> {
this.searchQuery.append(" and data like ?");
searchParamList.add(new SqlParameterValue(Types.VARCHAR, "%\"" + propertyName + "\":%"));
});
}
if (!Strings.isNullOrEmpty(name)) {
this.searchQuery.append(" and name like ?");
this.searchParamList.add(new SqlParameterValue(Types.VARCHAR, name));
}
if (!Strings.isNullOrEmpty(sortBy)) {
this.searchQuery.append(" order by ").append(sortBy);
if (!Strings.isNullOrEmpty(sortOrder)) {
this.searchQuery.append(" ").append(sortOrder);
}
}
if (limit != null) {
this.searchQuery.append(" limit ");
if (offset != null) {
this.searchQuery.append(offset).append(",");
}
this.searchQuery.append(limit);
}
return this;
}
}
protected static class SQL {
static final String SOFT_DELETE_DATA_METADATA =
"insert into data_metadata_delete(id, created_by,date_created) values (?,?, now())";
static final String GET_DATA_METADATA_IDS =
"select id from data_metadata where uri in (%s)";
static final String GET_DATA_METADATA_DELETE_BY_IDS =
"select id from data_metadata_delete where id in (%s)";
static final String DELETE_DATA_METADATA_DELETE =
"delete from data_metadata_delete where id in (%s)";
static final String DELETE_DATA_METADATA =
"delete from data_metadata where id in (%s)";
static final String DELETE_DEFINITION_METADATA =
"delete from definition_metadata where name in (%s)";
static final String DELETE_DEFINITION_METADATA_STALE =
"delete from definition_metadata where name like ? and last_updated < ?";
static final String DELETE_PARTITION_DEFINITION_METADATA =
"delete from partition_definition_metadata where name in (%s)";
static final String GET_DATA_METADATA =
"select uri name, data from data_metadata where uri=?";
static final String GET_DELETED_DATA_METADATA_URI =
"select uri from data_metadata_delete dmd join data_metadata dm on dmd.id=dm.id"
+ " where dmd.date_created < ? limit %d,%d";
static final String GET_DESCENDANT_DATA_URIS =
"select uri from data_metadata where uri like ?";
static final String GET_DESCENDANT_DEFINITION_NAMES =
"select name from partition_definition_metadata where name like ?";
static final String GET_DATA_METADATAS =
"select uri name,data from data_metadata where uri in (%s)";
static final String GET_DEFINITION_METADATA =
"select name, data from definition_metadata where name=?";
static final String GET_PARTITION_DEFINITION_METADATA =
"select name, data from partition_definition_metadata where name=?";
static final String GET_DEFINITION_METADATAS =
"select name,data from definition_metadata where name in (%s)";
static final String GET_PARTITION_DEFINITION_METADATAS =
"select name,data from partition_definition_metadata where name in (%s)";
static final String SEARCH_DEFINITION_METADATAS =
"select name,data from definition_metadata where 1=1";
static final String SEARCH_DEFINITION_METADATA_NAMES =
"select name from definition_metadata";
static final String INSERT_DATA_METADATA = "insert into data_metadata "
+ "(data, created_by, last_updated_by, date_created, last_updated, version, uri) values "
+ "(?, ?, ?, now(), now(), 0, ?)";
static final String INSERT_DEFINITION_METADATA = "insert into definition_metadata "
+ "(data, created_by, last_updated_by, date_created, last_updated, version, name) values "
+ "(?, ?, ?, now(), now(), 0, ?)";
static final String INSERT_PARTITION_DEFINITION_METADATA = "insert into partition_definition_metadata "
+ "(data, created_by, last_updated_by, date_created, last_updated, version, name) values "
+ "(?, ?, ?, now(), now(), 0, ?)";
static final String RENAME_DATA_METADATA = "update data_metadata set uri=? where uri=?";
static final String RENAME_DEFINITION_METADATA = "update definition_metadata set name=? where name=?";
static final String UPDATE_DATA_METADATA =
"update data_metadata set data=?, last_updated=now(), last_updated_by=? where uri=?";
static final String UPDATE_DEFINITION_METADATA =
"update definition_metadata set data=?, last_updated=now(), last_updated_by=? where name=?";
static final String UPDATE_PARTITION_DEFINITION_METADATA =
"update partition_definition_metadata set data=?, last_updated=now(), last_updated_by=? where name=?";
}
}
| 34 |
0 | Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata | Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata/mysql/package-info.java | /*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This package includes user metadata service classes.
*
* @author amajumdar
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.metadata.mysql;
import javax.annotation.ParametersAreNonnullByDefault;
| 35 |
0 | Create_ds/metacat/metacat-connector-redshift/src/main/java/com/netflix/metacat/connector | Create_ds/metacat/metacat-connector-redshift/src/main/java/com/netflix/metacat/connector/redshift/RedshiftConnectorFactory.java | /*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.redshift;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.server.connectors.DefaultConnectorFactory;
import java.util.Map;
/**
* Connector Factory for Redshift.
*
* @author tgianos
* @since 1.0.0
*/
class RedshiftConnectorFactory extends DefaultConnectorFactory {
/**
* Constructor.
*
* @param name catalog name
* @param catalogShardName catalog shard name
* @param configuration catalog configuration
*/
RedshiftConnectorFactory(
final String name,
final String catalogShardName,
final Map<String, String> configuration
) {
super(name, catalogShardName, Lists.newArrayList(new RedshiftConnectorModule(catalogShardName, configuration)));
}
}
| 36 |
0 | Create_ds/metacat/metacat-connector-redshift/src/main/java/com/netflix/metacat/connector | Create_ds/metacat/metacat-connector-redshift/src/main/java/com/netflix/metacat/connector/redshift/RedshiftConnectorPlugin.java | /*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.redshift;
import com.netflix.metacat.common.server.connectors.ConnectorFactory;
import com.netflix.metacat.common.server.connectors.ConnectorPlugin;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import lombok.NonNull;
import javax.annotation.Nonnull;
/**
* Redshift Connector Plugin.
*
* @author tgianos
* @since 1.0.0
*/
public class RedshiftConnectorPlugin implements ConnectorPlugin {
private static final String CONNECTOR_TYPE = "redshift";
private static final RedshiftTypeConverter TYPE_CONVERTER = new RedshiftTypeConverter();
/**
* {@inheritDoc}
*/
@Override
public String getType() {
return CONNECTOR_TYPE;
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorFactory create(@Nonnull @NonNull final ConnectorContext connectorContext) {
return new RedshiftConnectorFactory(connectorContext.getCatalogName(),
connectorContext.getCatalogShardName(), connectorContext.getConfiguration());
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTypeConverter getTypeConverter() {
return TYPE_CONVERTER;
}
}
| 37 |
0 | Create_ds/metacat/metacat-connector-redshift/src/main/java/com/netflix/metacat/connector | Create_ds/metacat/metacat-connector-redshift/src/main/java/com/netflix/metacat/connector/redshift/RedshiftTypeConverter.java | /*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.redshift;
import com.netflix.metacat.common.type.BaseType;
import com.netflix.metacat.common.type.CharType;
import com.netflix.metacat.common.type.DecimalType;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.VarcharType;
import com.netflix.metacat.connector.jdbc.JdbcTypeConverter;
import lombok.extern.slf4j.Slf4j;
/**
* Type converter for Redshift.
*
* @author tgianos
* @since 1.0.0
*/
@Slf4j
public class RedshiftTypeConverter extends JdbcTypeConverter {
static final int DEFAULT_CHARACTER_LENGTH = 256;
private static final String DEFAULT_CHARACTER_LENGTH_STRING = Integer.toString(DEFAULT_CHARACTER_LENGTH);
/**
* {@inheritDoc}
*
* @see <a href="http://docs.aws.amazon.com/redshift/latest/dg/c_Supported_data_types.html">Redshift Types</a>
* @see <a href="http://docs.aws.amazon.com/redshift/latest/dg/c_unsupported-postgresql-datatypes.html">
* Unsupported PostgreSQL Types
* </a>
*/
@Override
public Type toMetacatType(final String type) {
// See: https://www.postgresql.org/docs/current/static/datatype.html
final String lowerType = type.toLowerCase();
// Split up the possible type: TYPE[(size, magnitude)] EXTRA
final String[] splitType = this.splitType(lowerType);
switch (splitType[0]) {
case "smallint":
case "int2":
return BaseType.SMALLINT;
case "int":
case "integer":
case "int4":
return BaseType.INT;
case "int8":
case "bigint":
case "oid":
return BaseType.BIGINT;
case "decimal":
case "numeric":
return this.toMetacatDecimalType(splitType);
case "real":
case "float4":
return BaseType.FLOAT;
case "double precision":
case "float8":
case "float":
return BaseType.DOUBLE;
case "character varying":
case "varchar":
case "nvarchar":
fixDataSizeIfIncorrect(splitType);
return this.toMetacatVarcharType(splitType);
case "text":
case "name":
// text is basically alias for VARCHAR(256)
splitType[1] = DEFAULT_CHARACTER_LENGTH_STRING;
return this.toMetacatVarcharType(splitType);
case "character":
case "char":
case "nchar":
fixDataSizeIfIncorrect(splitType);
return this.toMetacatCharType(splitType);
case "bpchar":
// bpchar defaults to fixed length of 256 characters
splitType[1] = DEFAULT_CHARACTER_LENGTH_STRING;
return this.toMetacatCharType(splitType);
case "timestamp":
return this.toMetacatTimestampType(splitType);
case "timestampz":
return BaseType.TIMESTAMP_WITH_TIME_ZONE;
case "date":
return BaseType.DATE;
case "boolean":
case "bool":
return BaseType.BOOLEAN;
default:
// see: http://docs.aws.amazon.com/redshift/latest/dg/c_unsupported-postgresql-datatypes.html
log.info("Unhandled or unknown Redshift type {}", splitType[0]);
return BaseType.UNKNOWN;
}
}
private void fixDataSizeIfIncorrect(final String[] splitType) {
//
// Adding a hack to ignore errors for data type with negative size.
// TODO: Remove this hack when we have a solution for the above.
//
if (splitType[1] == null || Integer.parseInt(splitType[1]) <= 0) {
splitType[1] = DEFAULT_CHARACTER_LENGTH_STRING;
}
}
/**
* {@inheritDoc}
*/
@Override
public String fromMetacatType(final Type type) {
switch (type.getTypeSignature().getBase()) {
case ARRAY:
throw new UnsupportedOperationException("Redshift doesn't support array types");
case BIGINT:
return "BIGINT";
case BOOLEAN:
return "BOOLEAN";
case CHAR:
if (!(type instanceof CharType)) {
throw new IllegalArgumentException("Expected CHAR type but was " + type.getClass().getName());
}
final CharType charType = (CharType) type;
return "CHAR(" + charType.getLength() + ")";
case DATE:
return "DATE";
case DECIMAL:
if (!(type instanceof DecimalType)) {
throw new IllegalArgumentException("Expected decimal type but was " + type.getClass().getName());
}
final DecimalType decimalType = (DecimalType) type;
return "DECIMAL(" + decimalType.getPrecision() + ", " + decimalType.getScale() + ")";
case DOUBLE:
case FLOAT:
return "DOUBLE PRECISION";
case INT:
return "INT";
case INTERVAL_DAY_TO_SECOND:
throw new UnsupportedOperationException("Redshift doesn't support interval types");
case INTERVAL_YEAR_TO_MONTH:
throw new UnsupportedOperationException("Redshift doesn't support interval types");
case JSON:
throw new UnsupportedOperationException("Redshift doesn't support JSON types");
case MAP:
throw new UnsupportedOperationException("Redshift doesn't support MAP types");
case ROW:
throw new UnsupportedOperationException("Redshift doesn't support ROW types");
case SMALLINT:
return "SMALLINT";
case STRING:
throw new UnsupportedOperationException("Redshift doesn't support STRING types");
case TIME:
case TIME_WITH_TIME_ZONE:
throw new UnsupportedOperationException("Redshift doesn't support TIME types");
case TIMESTAMP:
return "TIMESTAMP";
case TIMESTAMP_WITH_TIME_ZONE:
return "TIMESTAMPZ";
case TINYINT:
// NOTE: There is no tiny int type in Redshift so using slightly larger SMALLINT
return "SMALLINT";
case UNKNOWN:
throw new IllegalArgumentException("Can't map an unknown type");
case VARBINARY:
throw new UnsupportedOperationException("Redshift doesn't support VARBINARY types");
case VARCHAR:
if (!(type instanceof VarcharType)) {
throw new IllegalArgumentException("Expected varchar type but was " + type.getClass().getName());
}
final VarcharType varcharType = (VarcharType) type;
// NOTE: PostgreSQL lets you store up to 1GB in a varchar field which is about the same as TEXT
return "VARCHAR(" + varcharType.getLength() + ")";
default:
throw new IllegalArgumentException("Unknown type " + type.getTypeSignature().getBase());
}
}
}
| 38 |
End of preview. Expand
in Data Studio
- Downloads last month
- 70