index
int64 0
0
| repo_id
stringlengths 26
205
| file_path
stringlengths 51
246
| content
stringlengths 8
433k
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/CatalogTraversalAction.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services;
import com.netflix.metacat.common.dto.CatalogDto;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.TableDto;
import java.util.List;
import java.util.Optional;
/**
* Action interface that will be called on catalog traversal.
*/
public interface CatalogTraversalAction {
/**
* Called when the catalog traversal starts.
*
* @param context traversal context
*/
default void init(CatalogTraversal.Context context) { }
/**
* Called when the catalog traversal processes catalogs.
*
* @param context traversal context
* @param catalogs list of catalogs
*/
default void applyCatalogs(CatalogTraversal.Context context, List<CatalogDto> catalogs) { }
/**
* Called when the catalog traversal processes databases.
*
* @param context traversal context
* @param databases lst of databases
*/
default void applyDatabases(CatalogTraversal.Context context, List<DatabaseDto> databases) { }
/**
* Called when the catalog traversal processes tables.
*
* @param context traversal context
* @param tables list of tables
*/
default void applyTables(CatalogTraversal.Context context, List<Optional<TableDto>> tables) { }
/**
* Called when the catalog traversal ends.
*
* @param context traversal context
*/
default void done(CatalogTraversal.Context context) { }
}
| 100 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/ConnectorTableServiceProxy.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.impl;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.converter.ConverterUtil;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.metacat.main.manager.ConnectorManager;
import com.netflix.metacat.main.services.GetTableNamesServiceParameters;
import com.netflix.metacat.main.services.GetTableServiceParameters;
import lombok.extern.slf4j.Slf4j;
import org.springframework.cache.annotation.CacheConfig;
import org.springframework.cache.annotation.CacheEvict;
import org.springframework.cache.annotation.Cacheable;
import org.springframework.cache.annotation.Caching;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Handles calls to the connector table service.
*/
@Slf4j
@CacheConfig(cacheNames = "metacat")
public class ConnectorTableServiceProxy {
private final ConnectorManager connectorManager;
private final ConverterUtil converterUtil;
/**
* Constructor.
*
* @param connectorManager connector manager
* @param converterUtil utility to convert to/from Dto to connector resources
*/
public ConnectorTableServiceProxy(
final ConnectorManager connectorManager,
final ConverterUtil converterUtil
) {
this.connectorManager = connectorManager;
this.converterUtil = converterUtil;
}
/**
* Calls the connector table service create method.
* @param name table name
* @param tableInfo table object
*/
public void create(final QualifiedName name, final TableInfo tableInfo) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorTableService service = connectorManager.getTableService(name);
final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
service.create(connectorRequestContext, tableInfo);
}
/**
* Calls the connector table service delete method.
* @param name table name
*/
@Caching(evict = {
@CacheEvict(key = "'table.' + #name", beforeInvocation = true),
@CacheEvict(key = "'table.includeInfoDetails.' + #name", beforeInvocation = true),
@CacheEvict(key = "'table.metadataLocationOnly.' + #name", beforeInvocation = true)
})
public void delete(final QualifiedName name) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorTableService service = connectorManager.getTableService(name);
log.info("Drop table {}", name);
final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
service.delete(connectorRequestContext, name);
}
/**
* Returns an info object that's populated only with the metadata location. Fetches from the cache if
* useCache is set to true.
* @param name the table name
* @param getTableServiceParameters the table service parameters
* @param useCache true, if the location can be retrieved from the cache
* @return The table info object with the metadata location.
*/
@Cacheable(key = "'table.metadataLocationOnly.' + #name", condition = "#useCache")
public TableInfo getWithMetadataLocationOnly(final QualifiedName name,
final GetTableServiceParameters getTableServiceParameters,
final boolean useCache) {
return getInternal(name, getTableServiceParameters);
}
/**
* Returns an info object that's populated with info details. Fetches from the cache if useCache is set to true.
* @param name the table name
* @param getTableServiceParameters the table service parameters
* @param useCache true, if the location can be retrieved from the cache
* @return The table info object
*/
@Cacheable(key = "'table.includeInfoDetails.' + #name", condition = "#useCache")
public TableInfo getWithInfoDetails(final QualifiedName name,
final GetTableServiceParameters getTableServiceParameters,
final boolean useCache) {
return getInternal(name, getTableServiceParameters);
}
/**
*
* Returns table if <code>useCache</code> is true and object exists in the cache. If <code>useCache</code> is false
* or object does not exists in the cache, it is retrieved from the store.
* @param name table name
* @param getTableServiceParameters get table parameters
* @param useCache true, if table can be retrieved from cache
* @return table dto
*/
@Cacheable(key = "'table.' + #name", condition = "#useCache")
public TableInfo get(final QualifiedName name,
final GetTableServiceParameters getTableServiceParameters,
final boolean useCache) {
return getInternal(name, getTableServiceParameters);
}
/**
* Internal get implementation.
* @param name The table name.
* @param getTableServiceParameters get table parameters.
* @return The tableinfo instance.
*/
private TableInfo getInternal(final QualifiedName name,
final GetTableServiceParameters getTableServiceParameters) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
connectorRequestContext.setIncludeMetadataLocationOnly(
getTableServiceParameters.isIncludeMetadataLocationOnly());
connectorRequestContext.setIncludeMetadata(getTableServiceParameters.isIncludeMetadataFromConnector());
final ConnectorTableService service = connectorManager.getTableService(name);
return service.get(connectorRequestContext, name);
}
/**
* Calls the connector table service rename method.
* @param oldName old table name
* @param newName new table name
* @param isMView true, if the object is a view
*/
@Caching(evict = {
@CacheEvict(key = "'table.' + #oldName", beforeInvocation = true),
@CacheEvict(key = "'table.includeInfoDetails.' + #oldName", beforeInvocation = true),
@CacheEvict(key = "'table.metadataLocationOnly.' + #oldName", beforeInvocation = true)
})
public void rename(
final QualifiedName oldName,
final QualifiedName newName,
final boolean isMView
) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorTableService service = connectorManager.getTableService(oldName);
try {
log.info("Renaming {} {} to {}", isMView ? "view" : "table", oldName, newName);
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
service.rename(connectorRequestContext, oldName, newName);
} catch (UnsupportedOperationException ignored) {
}
}
/**
* Calls the connector table service update method.
* @param name table name
* @param tableInfo table object
* @return true if errors after this should be ignored.
*/
@Caching(evict = {
@CacheEvict(key = "'table.' + #name", beforeInvocation = true),
@CacheEvict(key = "'table.includeInfoDetails.' + #name", beforeInvocation = true),
@CacheEvict(key = "'table.metadataLocationOnly.' + #name", beforeInvocation = true)
})
public boolean update(final QualifiedName name, final TableInfo tableInfo) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorTableService service = connectorManager.getTableService(name);
boolean result = false;
try {
log.info("Updating table {}", name);
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
service.update(connectorRequestContext, tableInfo);
result = connectorRequestContext.isIgnoreErrorsAfterUpdate();
} catch (UnsupportedOperationException ignored) {
//Ignore if the operation is not supported, so that we can at least go ahead and save the user metadata.
log.debug("Catalog {} does not support the table update operation.", name.getCatalogName());
}
return result;
}
/**
* Calls the connector table service getTableNames method.
* @param uri location
* @param prefixSearch if false, the method looks for exact match for the uri
* @return list of table names
*/
public List<QualifiedName> getQualifiedNames(final String uri, final boolean prefixSearch) {
final List<QualifiedName> result = Lists.newArrayList();
connectorManager.getTableServices().forEach(service -> {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
try {
final Map<String, List<QualifiedName>> names =
service.getTableNames(connectorRequestContext, Lists.newArrayList(uri), prefixSearch);
final List<QualifiedName> qualifiedNames = names.values().stream().flatMap(Collection::stream)
.collect(Collectors.toList());
result.addAll(qualifiedNames);
} catch (final UnsupportedOperationException uoe) {
log.debug("Table service doesn't support getting table names by URI. Skipping");
}
});
return result;
}
/**
* Calls the connector table service getTableNames method.
* @param uris list of locations
* @param prefixSearch if false, the method looks for exact match for the uri
* @return list of table names
*/
public Map<String, List<QualifiedName>> getQualifiedNames(final List<String> uris, final boolean prefixSearch) {
final Map<String, List<QualifiedName>> result = Maps.newHashMap();
connectorManager.getTableServices().forEach(service -> {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
try {
final Map<String, List<QualifiedName>> names =
service.getTableNames(connectorRequestContext, uris, prefixSearch);
names.forEach((uri, qNames) -> {
final List<QualifiedName> existingNames = result.get(uri);
if (existingNames == null) {
result.put(uri, qNames);
} else {
existingNames.addAll(qNames);
}
});
} catch (final UnsupportedOperationException uoe) {
log.debug("Table service doesn't support getting table names by URI. Skipping");
}
});
return result;
}
/**
* Calls the connector table service exists method.
* @param name table name
* @return true, if the object exists.
*/
public boolean exists(final QualifiedName name) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorTableService service = connectorManager.getTableService(name);
final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
return service.exists(connectorRequestContext, name);
}
/**
* Returns a filtered list of table names.
* @param name catalog name
* @param parameters service parameters
* @return list of table names
*/
public List<QualifiedName> getQualifiedNames(final QualifiedName name,
final GetTableNamesServiceParameters parameters) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorTableService service = connectorManager.getTableService(name);
final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
return service.getTableNames(connectorRequestContext, name, parameters.getFilter(), parameters.getLimit());
}
}
| 101 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/DefaultOwnerValidationService.java | package com.netflix.metacat.main.services.impl;
import com.google.common.collect.ImmutableSet;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.metacat.main.services.OwnerValidationService;
import com.netflix.spectator.api.Registry;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.web.context.request.RequestAttributes;
import org.springframework.web.context.request.RequestContextHolder;
import org.springframework.web.context.request.ServletRequestAttributes;
import javax.annotation.Nullable;
import javax.servlet.http.HttpServletRequest;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* A default implementation of Ownership validation service that check for users against
* known invalid userIds.
*/
@Slf4j
@RequiredArgsConstructor
public class DefaultOwnerValidationService implements OwnerValidationService {
private static final Set<String> KNOWN_INVALID_OWNERS = ImmutableSet.of(
"root", "metacat", "metacat-thrift-interface");
private static final Set<String> KNOWN_INVALID_OWNER_GROUPS = ImmutableSet.of(
"root", "metacat", "metacat-thrift-interface");
private final Registry registry;
@Override
public List<String> extractPotentialOwners(@NonNull final TableDto dto) {
return Stream.of(
dto.getTableOwner().orElse(null),
MetacatContextManager.getContext().getUserName(),
dto.getSerde().getOwner()
).filter(Objects::nonNull).collect(Collectors.toList());
}
@Override
public List<String> extractPotentialOwnerGroups(@NonNull final TableDto dto) {
return Collections.singletonList(dto.getTableOwnerGroup().orElse(null));
}
@Override
public boolean isUserValid(@Nullable final String user) {
return !isKnownInvalidUser(user);
}
@Override
public boolean isGroupValid(@Nullable final String groupName) {
return !isKnownInvalidGroup(groupName);
}
@Override
public void enforceOwnerValidation(@NonNull final String operationName,
@NonNull final QualifiedName tableName,
@NonNull final TableDto tableDto) {
final String tableOwner = tableDto.getTableOwner().orElse(null);
final String tableOwnerGroup = tableDto.getTableOwnerGroup().orElse(null);
final MetacatRequestContext context = MetacatContextManager.getContext();
final Map<String, String> requestHeaders = getHttpHeaders();
final boolean tableOwnerValid = isUserValid(tableOwner) || isGroupValid(tableOwnerGroup);
logOwnershipDiagnosticDetails(
operationName, tableName, tableDto,
context, tableOwnerValid, requestHeaders);
}
/**
* Checks if the user is from a known list of invalid users. Subclasses can use
* this method before attempting to check against remote services to save on latency.
*
* @param userId the user id
* @return true if the user id is a known invalid user, else false
*/
protected boolean isKnownInvalidUser(@Nullable final String userId) {
return StringUtils.isBlank(userId) || knownInvalidOwners().contains(userId);
}
/**
* Checks if the group is from a known list of invalid groups. Subclasses can use
* this method before attempting to check against remote services to save on latency.
*
* @param groupName the group name
* @return true if the group is a known invalid group, else false
*/
protected boolean isKnownInvalidGroup(@Nullable final String groupName) {
return StringUtils.isBlank(groupName) || knownInvalidOwnerGroups().contains(groupName);
}
/**
* Returns set of known invalid users. Subclasses can override to provide
* a list fetched from a dynamic source.
*
* @return set of known invalid users
*/
protected Set<String> knownInvalidOwners() {
return KNOWN_INVALID_OWNERS;
}
/**
* Returns set of known invalid owner groups. Subclasses can override to provide
* a list fetched from a dynamic source.
*
* @return set of known invalid groups
*/
protected Set<String> knownInvalidOwnerGroups() {
return KNOWN_INVALID_OWNER_GROUPS;
}
/**
* Logs diagnostic data for debugging invalid owners. Subclasses can use this to log
* diagnostic data when owners are found to be invalid.
*/
protected void logOwnershipDiagnosticDetails(final String operationName,
final QualifiedName name,
final TableDto tableDto,
final MetacatRequestContext context,
final boolean tableOwnerValid,
final Map<String, String> requestHeaders) {
try {
if (!tableOwnerValid) {
registry.counter(
"metacat.table.owner.invalid",
"operation", operationName,
"scheme", String.valueOf(context.getScheme()),
"catalogAndDb", name.getCatalogName() + "_" + name.getDatabaseName()
).increment();
log.info("Operation: {}, invalid owner: {}, group: {}. name: {}, dto: {}, context: {}, headers: {}",
operationName,
tableDto.getTableOwner().orElse("<null>"),
tableDto.getTableOwnerGroup().orElse("<null>"),
name, tableDto, context, requestHeaders);
}
} catch (final Exception ex) {
log.warn("Error when logging diagnostic data for invalid owner for operation: {}, name: {}, table: {}",
operationName, name, tableDto, ex);
}
}
/**
* Returns all the Http headers for the current request. Subclasses can use it to
* log diagnostic data.
*
* @return the Http headers
*/
protected Map<String, String> getHttpHeaders() {
final Map<String, String> requestHeaders = new HashMap<>();
final RequestAttributes requestAttributes = RequestContextHolder.getRequestAttributes();
if (requestAttributes instanceof ServletRequestAttributes) {
final ServletRequestAttributes servletRequestAttributes = (ServletRequestAttributes) requestAttributes;
final HttpServletRequest servletRequest = servletRequestAttributes.getRequest();
if (servletRequest != null) {
final Enumeration<String> headerNames = servletRequest.getHeaderNames();
if (headerNames != null) {
while (headerNames.hasMoreElements()) {
final String header = headerNames.nextElement();
requestHeaders.put(header, servletRequest.getHeader(header));
}
}
}
}
return requestHeaders;
}
}
| 102 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/CatalogServiceImpl.java | /*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.impl;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.CatalogDto;
import com.netflix.metacat.common.dto.CatalogMappingDto;
import com.netflix.metacat.common.dto.CreateCatalogDto;
import com.netflix.metacat.common.exception.MetacatNotFoundException;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.model.CatalogInfo;
import com.netflix.metacat.common.server.connectors.model.ClusterInfo;
import com.netflix.metacat.common.server.converter.ConverterUtil;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.events.MetacatUpdateDatabasePostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateDatabasePreEvent;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.metacat.main.manager.ConnectorManager;
import com.netflix.metacat.main.services.CatalogService;
import com.netflix.metacat.common.server.spi.MetacatCatalogConfig;
import com.netflix.metacat.main.services.GetCatalogServiceParameters;
import javax.annotation.Nonnull;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
/**
* Catalog service implementation.
*/
public class CatalogServiceImpl implements CatalogService {
private final ConnectorManager connectorManager;
private final UserMetadataService userMetadataService;
private final MetacatEventBus eventBus;
private final ConverterUtil converterUtil;
/**
* Constructor.
*
* @param connectorManager connector manager
* @param userMetadataService user metadata service
* @param eventBus Internal event bus
* @param converterUtil utility to convert to/from Dto to connector resources
*/
public CatalogServiceImpl(
final ConnectorManager connectorManager,
final UserMetadataService userMetadataService,
final MetacatEventBus eventBus,
final ConverterUtil converterUtil
) {
this.connectorManager = connectorManager;
this.userMetadataService = userMetadataService;
this.eventBus = eventBus;
this.converterUtil = converterUtil;
}
/**
* {@inheritDoc}
*/
@Nonnull
@Override
public CatalogDto get(final QualifiedName name, final GetCatalogServiceParameters getCatalogServiceParameters) {
final Set<MetacatCatalogConfig> configs = connectorManager.getCatalogConfigs(name.getCatalogName());
final CatalogDto result = new CatalogDto();
result.setName(name);
// Prepare the connector context
final ConnectorRequestContext context = converterUtil.toConnectorContext(MetacatContextManager.getContext());
context.setIncludeMetadata(getCatalogServiceParameters.isIncludeMetadataFromConnector());
final List<String> databases = Lists.newArrayList();
configs.forEach(config -> {
QualifiedName qName = name;
if (config.getSchemaWhitelist().isEmpty()) {
result.setType(config.getType());
} else {
qName = QualifiedName.ofDatabase(name.getCatalogName(), config.getSchemaWhitelist().get(0));
}
if (getCatalogServiceParameters.isIncludeDatabaseNames()) {
databases.addAll(
connectorManager.getDatabaseService(qName).listNames(context, name, null, null, null)
.stream().map(QualifiedName::getDatabaseName)
.filter(s -> config.getSchemaBlacklist().isEmpty() || !config.getSchemaBlacklist().contains(s))
.filter(s -> config.getSchemaWhitelist().isEmpty() || config.getSchemaWhitelist().contains(s))
.sorted(String.CASE_INSENSITIVE_ORDER)
.collect(Collectors.toList())
);
}
if (config.isProxy()) {
final CatalogInfo catalogInfo =
connectorManager.getCatalogService(name).get(context, name);
final ClusterInfo clusterInfo = catalogInfo.getClusterInfo();
result.setCluster(converterUtil.toClusterDto(clusterInfo));
result.setType(clusterInfo.getType());
result.setMetadata(catalogInfo.getMetadata());
} else {
result.setCluster(converterUtil.toClusterDto(config.getClusterInfo()));
}
});
result.setDatabases(databases);
if (getCatalogServiceParameters.isIncludeUserMetadata()) {
userMetadataService.populateMetadata(result, false);
}
return result;
}
/**
* {@inheritDoc}
*/
@Nonnull
@Override
public CatalogDto get(final QualifiedName name) {
return get(name, GetCatalogServiceParameters.builder().includeDatabaseNames(true)
.includeUserMetadata(true).build());
}
/**
* {@inheritDoc}
*/
@Nonnull
@Override
public List<CatalogMappingDto> getCatalogNames() {
final Set<CatalogInfo> catalogs = connectorManager.getCatalogs();
if (catalogs.isEmpty()) {
throw new MetacatNotFoundException("Unable to locate any catalogs");
}
return catalogs.stream()
.map(catalog -> new CatalogMappingDto(catalog.getName().getCatalogName(),
catalog.getClusterInfo().getType(), converterUtil.toClusterDto(catalog.getClusterInfo())))
.distinct()
.collect(Collectors.toList());
}
/**
* {@inheritDoc}
*/
@Override
public void update(final QualifiedName name, final CreateCatalogDto createCatalogDto) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
eventBus.post(new MetacatUpdateDatabasePreEvent(name, metacatRequestContext, this));
connectorManager.getCatalogConfigs(name.getCatalogName());
userMetadataService.saveMetadata(metacatRequestContext.getUserName(), createCatalogDto, true);
eventBus.post(new MetacatUpdateDatabasePostEvent(name, metacatRequestContext, this));
}
}
| 103 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/PartitionServiceImpl.java | /*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.impl;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.GetPartitionsRequestDto;
import com.netflix.metacat.common.dto.HasMetadata;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.PartitionsSaveRequestDto;
import com.netflix.metacat.common.dto.PartitionsSaveResponseDto;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.exception.TableMigrationInProgressException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionListRequest;
import com.netflix.metacat.common.server.connectors.model.PartitionsSaveResponse;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.converter.ConverterUtil;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPreEvent;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionMetadataOnlyPostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionMetadataOnlyPreEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPreEvent;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.metacat.common.server.util.MetacatUtils;
import com.netflix.metacat.common.server.util.ThreadServiceManager;
import com.netflix.metacat.main.manager.ConnectorManager;
import com.netflix.metacat.main.services.CatalogService;
import com.netflix.metacat.main.services.GetTableServiceParameters;
import com.netflix.metacat.main.services.PartitionService;
import com.netflix.metacat.main.services.TableService;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
/**
* Partition service.
*/
@Slf4j
public class PartitionServiceImpl implements PartitionService {
private final CatalogService catalogService;
private final ConnectorManager connectorManager;
private final TableService tableService;
private final UserMetadataService userMetadataService;
private final ThreadServiceManager threadServiceManager;
private final Config config;
private final MetacatEventBus eventBus;
private final ConverterUtil converterUtil;
private final Registry registry;
private final Id partitionAddDistSummary;
private final Id partitionMetadataOnlyAddDistSummary;
private final Id partitionGetDistSummary;
private final Id partitionDeleteDistSummary;
/**
* Constructor.
*
* @param catalogService catalog service
* @param connectorManager connector manager
* @param tableService table service
* @param userMetadataService user metadata service
* @param threadServiceManager thread manager
* @param config configurations
* @param eventBus Internal event bus
* @param converterUtil utility to convert to/from Dto to connector resources
* @param registry registry handle
*/
public PartitionServiceImpl(
final CatalogService catalogService,
final ConnectorManager connectorManager,
final TableService tableService,
final UserMetadataService userMetadataService,
final ThreadServiceManager threadServiceManager,
final Config config,
final MetacatEventBus eventBus,
final ConverterUtil converterUtil,
final Registry registry
) {
this.catalogService = catalogService;
this.connectorManager = connectorManager;
this.tableService = tableService;
this.userMetadataService = userMetadataService;
this.threadServiceManager = threadServiceManager;
this.config = config;
this.eventBus = eventBus;
this.converterUtil = converterUtil;
this.registry = registry;
this.partitionAddDistSummary =
registry.createId(Metrics.DistributionSummaryAddPartitions.getMetricName());
this.partitionMetadataOnlyAddDistSummary =
registry.createId(Metrics.DistributionSummaryMetadataOnlyAddPartitions.getMetricName());
this.partitionGetDistSummary =
registry.createId(Metrics.DistributionSummaryGetPartitions.getMetricName());
this.partitionDeleteDistSummary =
registry.createId(Metrics.DistributionSummaryDeletePartitions.getMetricName());
}
/**
* {@inheritDoc}
*/
@Override
public List<PartitionDto> list(
final QualifiedName name,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
final boolean includeUserDefinitionMetadata,
final boolean includeUserDataMetadata,
@Nullable final GetPartitionsRequestDto getPartitionsRequestDto
) {
//add table info here
// the conversion will handle getPartitionsRequestDto as null case
final PartitionListRequest partitionListRequest =
converterUtil.toPartitionListRequest(getPartitionsRequestDto, pageable, sort);
final String filterExpression = partitionListRequest.getFilter();
final List<String> partitionNames = partitionListRequest.getPartitionNames();
if (Strings.isNullOrEmpty(filterExpression)
&& (pageable == null || !pageable.isPageable())
&& (partitionNames == null || partitionNames.isEmpty())
&& config.getNamesToThrowErrorOnListPartitionsWithNoFilter().contains(name)) {
throw new IllegalArgumentException(String.format("No filter or limit specified for table %s", name));
}
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorPartitionService service = connectorManager.getPartitionService(name);
final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
final List<PartitionInfo> resultInfo = service
.getPartitions(connectorRequestContext, name, partitionListRequest, this.getTableInfo(name));
List<PartitionDto> result = Lists.newArrayList();
if (resultInfo != null && !resultInfo.isEmpty()) {
result = resultInfo.stream().map(converterUtil::toPartitionDto).collect(Collectors.toList());
final List<QualifiedName> names = Lists.newArrayList();
final List<String> uris = Lists.newArrayList();
final Map<String, ObjectNode> prePopulatedMap = new HashMap<>();
resultInfo.stream().filter(partitionInfo -> partitionInfo.getDataMetrics() != null)
.forEach(partitionInfo ->
prePopulatedMap.put(partitionInfo.getName().toString(), partitionInfo.getDataMetrics()));
result.forEach(partitionDto -> {
names.add(partitionDto.getName());
if (partitionDto.isDataExternal()) {
uris.add(partitionDto.getDataUri());
}
});
registry.distributionSummary(
this.partitionGetDistSummary.withTags(name.parts())).record(result.size());
log.info("Got {} partitions for {} using filter: {} and partition names: {}",
result.size(), name, filterExpression,
partitionNames);
if (includeUserDefinitionMetadata || includeUserDataMetadata) {
final List<ListenableFuture<Map<String, ObjectNode>>> futures = Lists.newArrayList();
futures.add(threadServiceManager.getExecutor().submit(() -> includeUserDefinitionMetadata
? userMetadataService.getDefinitionMetadataMap(names)
: Maps.newHashMap()));
futures.add(threadServiceManager.getExecutor().submit(() -> includeUserDataMetadata
? userMetadataService.getDataMetadataMap(uris)
: Maps.newHashMap()));
try {
final List<Map<String, ObjectNode>> metadataResults = Futures.successfulAsList(futures)
.get(1, TimeUnit.HOURS);
final Map<String, ObjectNode> definitionMetadataMap = metadataResults.get(0);
final Map<String, ObjectNode> dataMetadataMap = metadataResults.get(1);
result.forEach(partitionDto -> userMetadataService.populateMetadata(partitionDto,
definitionMetadataMap.get(partitionDto.getName().toString()),
prePopulatedMap.containsKey(partitionDto.getName().toString())
? prePopulatedMap.get(partitionDto.getName().toString()) //using the prepopulated datametric
: dataMetadataMap.get(partitionDto.getDataUri())));
} catch (Exception e) {
Throwables.propagate(e);
}
}
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public Integer count(final QualifiedName name) {
Integer result = 0;
if (tableService.exists(name)) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorPartitionService service = connectorManager.getPartitionService(name);
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
result = service.getPartitionCount(connectorRequestContext, name, this.getTableInfo(name));
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public PartitionsSaveResponseDto save(final QualifiedName name, final PartitionsSaveRequestDto dto) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorPartitionService service = connectorManager.getPartitionService(name);
final List<PartitionDto> partitionDtos = dto.getPartitions();
// If no partitions are passed, then return
if (partitionDtos == null || partitionDtos.isEmpty()) {
return new PartitionsSaveResponseDto();
}
if (!tableService.exists(name)) {
throw new TableNotFoundException(name);
}
// Fetch tableDto only if no update on tags configs exist.
if (MetacatUtils.configHasDoNotModifyForIcebergMigrationTag(config.getNoTableUpdateOnTags())) {
final TableDto tableDto = getTableDto(name);
if (MetacatUtils.hasDoNotModifyForIcebergMigrationTag(tableDto, config.getNoTableUpdateOnTags())) {
throw new TableMigrationInProgressException(
MetacatUtils.getIcebergMigrationExceptionMsg("PartitionUpdate", name.getTableName()));
}
}
//optimization for metadata only updates (e.g. squirrel) , assuming only validate partitions are requested
if (dto.getSaveMetadataOnly()) {
return savePartitionMetadataOnly(metacatRequestContext, dto, name, partitionDtos);
} else {
return updatePartitions(service, metacatRequestContext, dto, name, partitionDtos);
}
}
/**
* Optimization for metadata only updates.
*
* @param metacatRequestContext request context
* @param dto savePartition dto
* @param name qualified name
* @param partitionDtos partition dtos
* @return empty save partition response dto
*/
private PartitionsSaveResponseDto savePartitionMetadataOnly(
final MetacatRequestContext metacatRequestContext,
final PartitionsSaveRequestDto dto,
final QualifiedName name, final List<PartitionDto> partitionDtos) {
validateAdds(name, partitionDtos.size());
registry.distributionSummary(
this.partitionMetadataOnlyAddDistSummary.withTags(name.parts())).record(partitionDtos.size());
eventBus.post(
new MetacatSaveTablePartitionMetadataOnlyPreEvent(name, metacatRequestContext, this, dto));
// Save metadata
log.info("Saving metadata only for partitions for {}", name);
userMetadataService.saveMetadata(metacatRequestContext.getUserName(), partitionDtos, true);
eventBus.post(
new MetacatSaveTablePartitionMetadataOnlyPostEvent(
name, metacatRequestContext, this, partitionDtos, new PartitionsSaveResponseDto()));
//empty saveResponseDto is returned for optimization purpose
//since client (squirrel) only checks the response code
return converterUtil.toPartitionsSaveResponseDto(new PartitionsSaveResponse());
}
/**
* Add, delete, update partitions.
*
* @param service partition service
* @param metacatRequestContext metacat request context
* @param dto partition save request dto
* @param name qualified name
* @param partitionDtos partitions dto
* @return partition save response dto
*/
private PartitionsSaveResponseDto updatePartitions(
final ConnectorPartitionService service,
final MetacatRequestContext metacatRequestContext,
final PartitionsSaveRequestDto dto,
final QualifiedName name, final List<PartitionDto> partitionDtos) {
final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
List<HasMetadata> deletePartitions = Lists.newArrayList();
List<PartitionDto> deletePartitionDtos = Lists.newArrayList();
validate(name, dto);
registry.distributionSummary(
this.partitionAddDistSummary.withTags(name.parts())).record(partitionDtos.size());
final List<String> partitionIdsForDeletes = dto.getPartitionIdsForDeletes();
if (partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()) {
eventBus.post(new MetacatDeleteTablePartitionPreEvent(name, metacatRequestContext, this, dto));
registry.distributionSummary(
this.partitionDeleteDistSummary.withTags(name.parts())).record(partitionIdsForDeletes.size());
final GetPartitionsRequestDto requestDto =
new GetPartitionsRequestDto(null, partitionIdsForDeletes, false, true);
final List<PartitionInfo> deletePartitionInfos = service.getPartitions(connectorRequestContext, name,
converterUtil.toPartitionListRequest(requestDto, null, null), this.getTableInfo(name));
if (deletePartitionInfos != null) {
deletePartitionDtos = deletePartitionInfos.stream()
.map(converterUtil::toPartitionDto).collect(Collectors.toList());
deletePartitions = new ArrayList<>(deletePartitions);
}
}
// Save all the new and updated partitions
eventBus.post(new MetacatSaveTablePartitionPreEvent(name, metacatRequestContext, this, dto));
log.info("Saving partitions for {} ({})", name, partitionDtos.size());
final PartitionsSaveResponseDto result = converterUtil.toPartitionsSaveResponseDto(
service.savePartitions(connectorRequestContext, name, converterUtil.toPartitionsSaveRequest(dto)));
// Save metadata
log.info("Saving user metadata for partitions for {}", name);
// delete metadata
if (!deletePartitions.isEmpty()) {
log.info("Deleting user metadata for partitions with names {} for {}", partitionIdsForDeletes, name);
deleteMetadatas(metacatRequestContext.getUserName(), deletePartitions);
}
final long start = registry.clock().wallTime();
userMetadataService.saveMetadata(metacatRequestContext.getUserName(), partitionDtos, true);
final long duration = registry.clock().wallTime() - start;
log.info("Time taken to save user metadata for table {} is {} ms", name, duration);
registry.timer(registry.createId(Metrics.TimerSavePartitionMetadata.getMetricName()).withTags(name.parts()))
.record(duration, TimeUnit.MILLISECONDS);
//publish the delete and save in order
//TODO: create MetacatUpdateTablePartitionEvents, only publish one partitionUpdateEvent here.
if (partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()) {
eventBus.post(
new MetacatDeleteTablePartitionPostEvent(name,
metacatRequestContext, this, deletePartitionDtos));
}
eventBus.post(
new MetacatSaveTablePartitionPostEvent(name, metacatRequestContext, this, partitionDtos, result));
return result;
}
private void validate(final QualifiedName name, final PartitionsSaveRequestDto dto) {
validateDeletes(name, dto.getPartitionIdsForDeletes() != null ? dto.getPartitionIdsForDeletes().size() : 0);
validateAdds(name, dto.getPartitions() != null ? dto.getPartitions().size() : 0);
}
private void validateDeletes(final QualifiedName name, final int noOfDeletes) {
if (noOfDeletes > config.getMaxDeletedPartitionsThreshold()) {
final String message =
String.format("Number of partitions to be deleted for table %s exceeded the threshold %d",
name, config.getMaxDeletedPartitionsThreshold());
log.warn(message);
throw new IllegalArgumentException(message);
}
}
private void validateAdds(final QualifiedName name, final int noOfAdds) {
if (noOfAdds > config.getMaxAddedPartitionsThreshold()) {
final String message =
String.format("Number of partitions to be added/updated for table %s exceeded the threshold %d",
name, config.getMaxAddedPartitionsThreshold());
log.warn(message);
throw new IllegalArgumentException(message);
}
}
/**
* {@inheritDoc}
*/
@Override
public void delete(final QualifiedName name, final List<String> partitionIds) {
validateDeletes(name, partitionIds != null ? partitionIds.size() : 0);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
registry.distributionSummary(
this.partitionDeleteDistSummary.withTags(name.parts())).record(partitionIds.size());
if (!tableService.exists(name)) {
throw new TableNotFoundException(name);
}
// Fetch tableDto only if no update on tags configs exist.
if (MetacatUtils.configHasDoNotModifyForIcebergMigrationTag(config.getNoTableDeleteOnTags())) {
final TableDto tableDto = getTableDto(name);
if (MetacatUtils.hasDoNotModifyForIcebergMigrationTag(tableDto, config.getNoTableDeleteOnTags())) {
throw new TableMigrationInProgressException(
MetacatUtils.getIcebergMigrationExceptionMsg("PartitionDelete", name.getTableName()));
}
}
if (!partitionIds.isEmpty()) {
final PartitionsSaveRequestDto dto = new PartitionsSaveRequestDto();
dto.setPartitionIdsForDeletes(partitionIds);
eventBus.post(new MetacatDeleteTablePartitionPreEvent(name, metacatRequestContext, this, dto));
final ConnectorPartitionService service = connectorManager.getPartitionService(name);
// Get the partitions before calling delete
final GetPartitionsRequestDto requestDto = new GetPartitionsRequestDto(null, partitionIds, false, true);
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
final List<PartitionInfo> partitionInfos = service.getPartitions(connectorRequestContext, name,
converterUtil.toPartitionListRequest(requestDto, null, null), this.getTableInfo(name));
List<HasMetadata> partitions = Lists.newArrayList();
List<PartitionDto> partitionDtos = Lists.newArrayList();
if (partitionInfos != null) {
partitionDtos = partitionInfos.stream().map(converterUtil::toPartitionDto).collect(Collectors.toList());
partitions = new ArrayList<>(partitions);
}
log.info("Deleting partitions with names {} for {}", partitionIds, name);
service.deletePartitions(connectorRequestContext, name, partitionIds, this.getTableInfo(name));
// delete metadata
log.info("Deleting user metadata for partitions with names {} for {}", partitionIds, name);
if (!partitions.isEmpty()) {
deleteMetadatas(metacatRequestContext.getUserName(), partitions);
}
eventBus.post(
new MetacatDeleteTablePartitionPostEvent(name, metacatRequestContext, this, partitionDtos)
);
}
}
private void deleteMetadatas(final String userId, final List<HasMetadata> partitions) {
// Spawning off since this is a time consuming task
threadServiceManager.getExecutor().submit(() -> userMetadataService.deleteMetadata(userId, partitions));
}
/**
* {@inheritDoc}
*/
@Override
public List<QualifiedName> getQualifiedNames(final String uri, final boolean prefixSearch) {
return getQualifiedNames(Lists.newArrayList(uri), prefixSearch).values().stream().flatMap(Collection::stream)
.collect(Collectors.toList());
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, List<QualifiedName>> getQualifiedNames(final List<String> uris, final boolean prefixSearch) {
final Map<String, List<QualifiedName>> result = Maps.newConcurrentMap();
final List<ListenableFuture<Void>> futures = Lists.newArrayList();
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
connectorManager.getPartitionServices().forEach(service -> {
futures.add(threadServiceManager.getExecutor().submit(() -> {
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
try {
final Map<String, List<QualifiedName>> partitionNames = service
.getPartitionNames(connectorRequestContext, uris, prefixSearch);
partitionNames.forEach((uri, subPartitionNames) -> {
final List<QualifiedName> existingPartitionNames = result.get(uri);
if (existingPartitionNames == null) {
result.put(uri, subPartitionNames);
} else {
existingPartitionNames.addAll(subPartitionNames);
}
});
} catch (final UnsupportedOperationException uoe) {
log.debug("Partition service doesn't support getPartitionNames. Ignoring.");
}
return null;
}));
});
try {
Futures.allAsList(futures).get(1, TimeUnit.HOURS);
} catch (Exception e) {
Throwables.propagate(e);
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public List<String> getPartitionKeys(
final QualifiedName name,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
@Nullable final GetPartitionsRequestDto getPartitionsRequestDto
) {
List<String> result = Lists.newArrayList();
if (tableService.exists(name)) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorPartitionService service = connectorManager.getPartitionService(name);
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
try {
result = service.getPartitionKeys(
connectorRequestContext,
name,
converterUtil.toPartitionListRequest(getPartitionsRequestDto, pageable, sort),
this.getTableInfo(name)
);
} catch (final UnsupportedOperationException uoe) {
log.debug("Catalog {} doesn't support getPartitionKeys. Ignoring.", name.getCatalogName());
}
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public List<String> getPartitionUris(
final QualifiedName name,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
@Nullable final GetPartitionsRequestDto getPartitionsRequestDto
) {
List<String> result = Lists.newArrayList();
if (tableService.exists(name)) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorPartitionService service = connectorManager.getPartitionService(name);
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
try {
result = service.getPartitionUris(connectorRequestContext, name,
converterUtil.toPartitionListRequest(
getPartitionsRequestDto, pageable, sort), this.getTableInfo(name));
} catch (final UnsupportedOperationException uoe) {
log.info("Catalog {} doesn't support getPartitionUris. Ignoring.", name.getCatalogName());
}
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public PartitionDto create(final QualifiedName name, final PartitionDto partitionDto) {
final PartitionsSaveRequestDto dto = new PartitionsSaveRequestDto();
dto.setCheckIfExists(false);
dto.setPartitions(Lists.newArrayList(partitionDto));
save(name, dto);
return partitionDto;
}
/**
* {@inheritDoc}
*/
@Override
public void update(final QualifiedName name, final PartitionDto partitionDto) {
final PartitionsSaveRequestDto dto = new PartitionsSaveRequestDto();
dto.setPartitions(Lists.newArrayList(partitionDto));
save(name, dto);
}
/**
* {@inheritDoc}
*/
@Override
public PartitionDto updateAndReturn(final QualifiedName name, final PartitionDto dto) {
update(name, dto);
return dto;
}
/**
* {@inheritDoc}
*/
@Override
public void delete(final QualifiedName name) {
final QualifiedName tableName = QualifiedName
.ofTable(name.getCatalogName(), name.getDatabaseName(), name.getTableName());
delete(tableName, Lists.newArrayList(name.getPartitionName()));
}
/**
* {@inheritDoc}
*/
@Override
public PartitionDto get(final QualifiedName name) {
PartitionDto result = null;
final QualifiedName tableName = QualifiedName
.ofTable(name.getCatalogName(), name.getDatabaseName(), name.getTableName());
final List<PartitionDto> dtos =
list(tableName, null, null, true, true,
new GetPartitionsRequestDto(null, Lists.newArrayList(name.getPartitionName()), true, true));
if (!dtos.isEmpty()) {
result = dtos.get(0);
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public boolean exists(final QualifiedName name) {
return get(name) != null;
}
private TableDto getTableDto(final QualifiedName name) {
return this.tableService.get(name,
GetTableServiceParameters.builder().includeInfo(true)
.useCache(true).build()).orElseThrow(() -> new TableNotFoundException(name));
}
private TableInfo getTableInfo(final QualifiedName name) {
return converterUtil.fromTableDto(getTableDto(name));
}
}
| 104 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/MViewServiceImpl.java | /*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.impl;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.base.MoreObjects;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.NameDateDto;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.GetPartitionsRequestDto;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.PartitionsSaveRequestDto;
import com.netflix.metacat.common.dto.PartitionsSaveResponseDto;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.dto.StorageDto;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.exception.NotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.converter.ConverterUtil;
import com.netflix.metacat.common.server.events.MetacatCreateMViewPostEvent;
import com.netflix.metacat.common.server.events.MetacatCreateMViewPreEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteMViewPartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteMViewPartitionPreEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteMViewPostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteMViewPreEvent;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.events.MetacatSaveMViewPartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveMViewPartitionPreEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateMViewPostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateMViewPreEvent;
import com.netflix.metacat.common.server.usermetadata.GetMetadataInterceptorParameters;
import com.netflix.metacat.main.manager.ConnectorManager;
import com.netflix.metacat.main.services.GetTableServiceParameters;
import com.netflix.metacat.main.services.MViewService;
import com.netflix.metacat.main.services.MetacatServiceHelper;
import com.netflix.metacat.main.services.PartitionService;
import com.netflix.metacat.main.services.TableService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* Metacat view service.
*/
@Slf4j
public class MViewServiceImpl implements MViewService {
/**
* Hive database name where views are stored.
*/
private static final String VIEW_DB_NAME = "franklinviews";
private final ConnectorManager connectorManager;
private final TableService tableService;
private final PartitionService partitionService;
private final UserMetadataService userMetadataService;
private final MetacatEventBus eventBus;
private final ConverterUtil converterUtil;
/**
* Constructor.
*
* @param connectorManager connector manager
* @param tableService table service
* @param partitionService partition service
* @param userMetadataService user metadata interceptor service
* @param eventBus Internal event bus
* @param converterUtil utility to convert to/from Dto to connector resources
*/
public MViewServiceImpl(
final ConnectorManager connectorManager,
final TableService tableService,
final PartitionService partitionService,
final UserMetadataService userMetadataService,
final MetacatEventBus eventBus,
final ConverterUtil converterUtil
) {
this.connectorManager = connectorManager;
this.tableService = tableService;
this.partitionService = partitionService;
this.userMetadataService = userMetadataService;
this.eventBus = eventBus;
this.converterUtil = converterUtil;
}
/**
* Creates the materialized view using the schema of the give table
* Assumes that the "franklinviews" database name already exists in the given catalog.
*/
@Override
public TableDto create(final QualifiedName name) {
return createAndSnapshotPartitions(name, false, null);
}
/**
* Creates the materialized view using the schema of the give table
* Assumes that the "franklinviews" database name already exists in the given catalog.
*/
@Override
public TableDto createAndSnapshotPartitions(final QualifiedName name,
final boolean snapshot,
@Nullable final String filter) {
final TableDto result;
// Get the table
log.info("Get the table {}", name);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
eventBus.post(new MetacatCreateMViewPreEvent(name, metacatRequestContext, this, snapshot, filter));
final Optional<TableDto> oTable = tableService.get(name,
GetTableServiceParameters.builder()
.includeDataMetadata(false)
.includeDefinitionMetadata(false)
.disableOnReadMetadataIntercetor(true) //turn off for optimization
.includeInfo(true)
.build());
if (oTable.isPresent()) {
final TableDto table = oTable.get();
final String viewName = createViewName(name);
final QualifiedName targetName = QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, viewName);
// Get the view table if it exists
log.info("Check if the view table {} exists.", targetName);
Optional<TableDto> oViewTable = Optional.empty();
try {
//read the original view back
oViewTable = tableService.get(targetName,
GetTableServiceParameters.builder()
.includeDataMetadata(false)
.includeDefinitionMetadata(false)
.disableOnReadMetadataIntercetor(false)
.includeInfo(true)
.build());
} catch (NotFoundException ignored) {
}
if (!oViewTable.isPresent()) {
log.info("Creating view {}.", targetName);
//
// Fix issue where an iceberg table is used to create the mView and fails with invalid field type.
// The issue is caused by a mismatch with field source types.
// The check for iceberg table is needed to not disrupt the previous logic for other table types.
//
if (MetacatServiceHelper.isIcebergTable(table)) {
table.getFields().forEach(f -> f.setSource_type(null));
}
result = tableService.copy(table, targetName);
} else {
result = oViewTable.get();
}
if (snapshot) {
snapshotPartitions(name, filter);
}
eventBus.post(
new MetacatCreateMViewPostEvent(name, metacatRequestContext, this, result, snapshot, filter)
);
} else {
throw new TableNotFoundException(name);
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public TableDto create(final QualifiedName name, final TableDto dto) {
// Ignore the dto passed
return create(name);
}
/**
* {@inheritDoc}
*/
@Override
public TableDto deleteAndReturn(final QualifiedName name) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
eventBus.post(new MetacatDeleteMViewPreEvent(name, metacatRequestContext, this));
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
log.info("Deleting view {}.", viewQName);
final TableDto deletedDto = tableService.deleteAndReturn(viewQName, true);
eventBus.post(new MetacatDeleteMViewPostEvent(name, metacatRequestContext, this, deletedDto));
return deletedDto;
}
/**
* {@inheritDoc}
*/
@Override
public void delete(final QualifiedName name) {
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
tableService.deleteAndReturn(viewQName, true);
}
/**
* {@inheritDoc}
*/
@Override
public void update(final QualifiedName name, final TableDto tableDto) {
updateAndReturn(name, tableDto);
}
/**
* {@inheritDoc}
*/
@Override
public TableDto updateAndReturn(final QualifiedName name, final TableDto tableDto) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
eventBus.post(new MetacatUpdateMViewPreEvent(name, metacatRequestContext, this, tableDto));
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
log.info("Updating view {}.", viewQName);
tableService.update(viewQName, tableDto);
final TableDto updatedDto = getOpt(name, GetTableServiceParameters.builder()
.includeInfo(true)
.includeDefinitionMetadata(false)
.includeDataMetadata(false)
.disableOnReadMetadataIntercetor(false)
.build()).orElseThrow(() -> new IllegalStateException("should exist"));
eventBus.post(new MetacatUpdateMViewPostEvent(name, metacatRequestContext, this, updatedDto));
return updatedDto;
}
/**
* {@inheritDoc}
*/
@Override
public TableDto get(final QualifiedName name) {
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
return tableService.get(viewQName);
}
/**
* {@inheritDoc}
*/
@Override
public Optional<TableDto> getOpt(final QualifiedName name, final GetTableServiceParameters tableParameters) {
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
final Optional<TableDto> result = tableService.get(viewQName, tableParameters);
//
// User definition metadata of the underlying table is returned
//
if (result.isPresent()) {
final TableDto table = result.get();
table.setName(name);
final QualifiedName tableName = QualifiedName
.ofTable(name.getCatalogName(), name.getDatabaseName(), name.getTableName());
final Optional<ObjectNode> definitionMetadata =
userMetadataService.getDefinitionMetadataWithInterceptor(tableName,
GetMetadataInterceptorParameters.builder().hasMetadata(table).build());
definitionMetadata.ifPresent(
jsonNodes -> userMetadataService.populateMetadata(table, jsonNodes, null));
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public void snapshotPartitions(final QualifiedName name, @Nullable final String filter) {
final List<PartitionDto> partitionDtos =
partitionService.list(name, null, null, false, false,
new GetPartitionsRequestDto(filter, null, true, true));
if (partitionDtos != null && !partitionDtos.isEmpty()) {
log.info("Snapshot partitions({}) for view {}.", partitionDtos.size(), name);
final PartitionsSaveRequestDto dto = new PartitionsSaveRequestDto();
dto.setPartitions(partitionDtos);
savePartitions(name, dto, false);
}
}
/**
* {@inheritDoc}
*/
@Override
public PartitionsSaveResponseDto savePartitions(
final QualifiedName name,
final PartitionsSaveRequestDto dto,
final boolean merge
) {
final PartitionsSaveResponseDto result;
final List<PartitionDto> partitionDtos = dto.getPartitions();
if (partitionDtos == null || partitionDtos.isEmpty()) {
return new PartitionsSaveResponseDto();
}
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
partitionDtos.forEach(partitionDto ->
partitionDto.setName(QualifiedName
.ofPartition(viewQName.getCatalogName(), viewQName.getDatabaseName(), viewQName.getTableName(),
partitionDto.getName().getPartitionName())));
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
eventBus.post(new MetacatSaveMViewPartitionPreEvent(name, metacatRequestContext, this, dto));
final List<String> partitionIdsForDeletes = dto.getPartitionIdsForDeletes();
if (partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()) {
eventBus.post(new MetacatDeleteMViewPartitionPreEvent(name, metacatRequestContext, this, dto));
}
if (merge) {
final List<String> partitionNames = partitionDtos.stream().map(
partitionDto -> partitionDto.getName().getPartitionName()).collect(Collectors.toList());
final List<PartitionDto> existingPartitions =
partitionService.list(viewQName, null, null, false, false,
new GetPartitionsRequestDto(null, partitionNames, false, true));
final Map<String, PartitionDto> existingPartitionsMap = existingPartitions.stream()
.collect(Collectors
.toMap(partitionDto -> partitionDto.getName().getPartitionName(), Function.identity()));
final List<PartitionDto> mergedPartitions = partitionDtos.stream()
.map(partitionDto -> {
final String partitionName = partitionDto.getName().getPartitionName();
final PartitionDto existingPartition = existingPartitionsMap.get(partitionName);
return mergePartition(partitionDto, existingPartition);
}).collect(Collectors.toList());
dto.setPartitions(mergedPartitions);
result = partitionService.save(viewQName, dto);
} else {
result = partitionService.save(viewQName, dto);
}
eventBus.post(
new MetacatSaveMViewPartitionPostEvent(name, metacatRequestContext, this, dto.getPartitions())
);
if (partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()) {
eventBus.post(
new MetacatDeleteMViewPartitionPostEvent(name, metacatRequestContext, this, partitionIdsForDeletes)
);
}
return result;
}
private PartitionDto mergePartition(final PartitionDto partitionDto,
@Nullable final PartitionDto existingPartition) {
if (existingPartition != null) {
final StorageDto existingSerde = existingPartition.getSerde();
if (existingSerde != null) {
StorageDto serde = partitionDto.getSerde();
if (serde == null) {
serde = new StorageDto();
partitionDto.setSerde(serde);
}
if (serde.getUri() == null || serde.getUri().equals(existingSerde.getUri())) {
serde.setUri(existingSerde.getUri());
if (serde.getInputFormat() == null) {
serde.setInputFormat(existingSerde.getInputFormat());
}
if (serde.getOutputFormat() == null) {
serde.setOutputFormat(existingSerde.getOutputFormat());
}
if (serde.getSerializationLib() == null) {
serde.setSerializationLib(existingSerde.getSerializationLib());
}
}
}
}
return partitionDto;
}
/**
* {@inheritDoc}
*/
@Override
public void deletePartitions(final QualifiedName name, final List<String> partitionIds) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final PartitionsSaveRequestDto dto = new PartitionsSaveRequestDto();
dto.setPartitionIdsForDeletes(partitionIds);
eventBus.post(new MetacatDeleteMViewPartitionPreEvent(name, metacatRequestContext, this, dto));
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
partitionService.delete(viewQName, partitionIds);
eventBus.post(
new MetacatDeleteMViewPartitionPostEvent(name, metacatRequestContext, this, partitionIds)
);
}
/**
* {@inheritDoc}
*/
@Override
public List<PartitionDto> listPartitions(
final QualifiedName name,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
final boolean includeUserMetadata,
@Nullable final GetPartitionsRequestDto getPartitionsRequestDto
) {
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
return partitionService.list(viewQName, sort, pageable, includeUserMetadata, includeUserMetadata,
getPartitionsRequestDto);
}
/**
* {@inheritDoc}
*/
@Override
public List<String> getPartitionKeys(
final QualifiedName name,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
@Nullable final GetPartitionsRequestDto getPartitionsRequestDto
) {
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
return partitionService.getPartitionKeys(viewQName, sort, pageable, getPartitionsRequestDto);
}
/**
* {@inheritDoc}
*/
@Override
public List<String> getPartitionUris(final QualifiedName name,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
@Nullable final GetPartitionsRequestDto getPartitionsRequestDto) {
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
return partitionService.getPartitionUris(viewQName, sort, pageable, getPartitionsRequestDto);
}
/**
* {@inheritDoc}
*/
@Override
public Integer partitionCount(final QualifiedName name) {
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
return partitionService.count(viewQName);
}
/**
* {@inheritDoc}
*/
@Override
public List<NameDateDto> list(final QualifiedName name) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final QualifiedName viewDbName = QualifiedName.ofDatabase(name.getCatalogName(), VIEW_DB_NAME);
final ConnectorTableService service = connectorManager.getTableService(viewDbName);
List<QualifiedName> tableNames = Lists.newArrayList();
try {
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
tableNames = service.listNames(connectorRequestContext, viewDbName, null, null, null);
} catch (Exception ignored) {
// ignore. Return an empty list if database 'franklinviews' does not exist
}
if (!name.isDatabaseDefinition() && name.isCatalogDefinition()) {
return tableNames.stream()
.map(viewName -> {
final NameDateDto dto = new NameDateDto();
dto.setName(viewName);
return dto;
})
.collect(Collectors.toList());
} else {
final String prefix = String.format("%s_%s_", name.getDatabaseName(),
MoreObjects.firstNonNull(name.getTableName(), ""));
return tableNames.stream()
.filter(qualifiedTableName -> qualifiedTableName.getTableName().startsWith(prefix))
.map(qualifiedTableName -> {
final NameDateDto dto = new NameDateDto();
dto.setName(QualifiedName
.ofView(qualifiedTableName.getCatalogName(), name.getDatabaseName(), name.getTableName(),
qualifiedTableName.getTableName().substring(prefix.length())));
return dto;
})
.collect(Collectors.toList());
}
}
/**
* {@inheritDoc}
*/
@Override
public void saveMetadata(final QualifiedName name,
final ObjectNode definitionMetadata,
final ObjectNode dataMetadata) {
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
tableService.saveMetadata(viewQName, definitionMetadata, dataMetadata);
}
/**
* {@inheritDoc}
*/
@Override
public void rename(final QualifiedName name, final QualifiedName newViewName) {
final QualifiedName oldViewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
final QualifiedName newViewQName = QualifiedName
.ofTable(newViewName.getCatalogName(), VIEW_DB_NAME, createViewName(newViewName));
tableService.rename(oldViewQName, newViewQName, true);
}
/**
* {@inheritDoc}
*/
@Override
public boolean exists(final QualifiedName name) {
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
return tableService.exists(viewQName);
}
/**
* The view is going to be represented by a table in a special db in Franklin. As such there must be
* a conversion from view id -> view table id like so:
* [dbName]_[tableName]_[viewName]
*/
private String createViewName(final QualifiedName name) {
return String.format("%s_%s_%s", name.getDatabaseName(), name.getTableName(), name.getViewName());
}
}
| 105 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/DatabaseServiceImpl.java | /*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.impl;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.NotFoundException;
import com.netflix.metacat.common.server.converter.ConverterUtil;
import com.netflix.metacat.common.server.events.MetacatCreateDatabasePostEvent;
import com.netflix.metacat.common.server.events.MetacatCreateDatabasePreEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteDatabasePostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteDatabasePreEvent;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.events.MetacatUpdateDatabasePostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateDatabasePreEvent;
import com.netflix.metacat.common.server.spi.MetacatCatalogConfig;
import com.netflix.metacat.common.server.usermetadata.AuthorizationService;
import com.netflix.metacat.common.server.usermetadata.MetacatOperation;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.metacat.main.manager.ConnectorManager;
import com.netflix.metacat.main.services.DatabaseService;
import com.netflix.metacat.main.services.GetDatabaseServiceParameters;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.extern.slf4j.Slf4j;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* Database service implementation.
*/
@Slf4j
public class DatabaseServiceImpl implements DatabaseService {
private final ConnectorManager connectorManager;
private final UserMetadataService userMetadataService;
private final MetacatEventBus eventBus;
private final ConverterUtil converterUtil;
private final AuthorizationService authorizationService;
/**
* Constructor.
*
* @param connectorManager connector manager
* @param userMetadataService user metadata service
* @param eventBus internal event bus
* @param converterUtil utility to convert to/from Dto to connector resources
* @param authorizationService authorization service
*/
public DatabaseServiceImpl(
final ConnectorManager connectorManager,
final UserMetadataService userMetadataService,
final MetacatEventBus eventBus,
final ConverterUtil converterUtil,
final AuthorizationService authorizationService
) {
this.connectorManager = connectorManager;
this.userMetadataService = userMetadataService;
this.eventBus = eventBus;
this.converterUtil = converterUtil;
this.authorizationService = authorizationService;
}
/**
* {@inheritDoc}
*/
@Override
public DatabaseDto create(final QualifiedName name, final DatabaseDto dto) {
validate(name);
log.info("Creating schema {}", name);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
eventBus.post(new MetacatCreateDatabasePreEvent(name, metacatRequestContext, this));
final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
connectorManager.getDatabaseService(name).create(connectorRequestContext,
converterUtil.fromDatabaseDto(dto));
if (dto.getDefinitionMetadata() != null) {
log.info("Saving user metadata for schema {}", name);
userMetadataService.saveDefinitionMetadata(name, metacatRequestContext.getUserName(),
Optional.of(dto.getDefinitionMetadata()), true);
}
final DatabaseDto createdDto = get(name,
GetDatabaseServiceParameters.builder()
.disableOnReadMetadataIntercetor(true)
.includeUserMetadata(dto.getDefinitionMetadata() != null)
.includeTableNames(true)
.build());
eventBus.post(new MetacatCreateDatabasePostEvent(name, metacatRequestContext, this, createdDto));
return createdDto;
}
/**
* {@inheritDoc}
*/
@Override
public void update(final QualifiedName name, final DatabaseDto dto) {
validate(name);
log.info("Updating schema {}", name);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
eventBus.post(new MetacatUpdateDatabasePreEvent(name, metacatRequestContext, this));
try {
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
connectorManager.getDatabaseService(name)
.update(connectorRequestContext, converterUtil.fromDatabaseDto(dto));
} catch (UnsupportedOperationException ignored) {
}
if (dto.getDefinitionMetadata() != null) {
log.info("Saving user metadata for schema {}", name);
userMetadataService.saveDefinitionMetadata(name, metacatRequestContext.getUserName(),
Optional.of(dto.getDefinitionMetadata()), true);
}
eventBus.post(new MetacatUpdateDatabasePostEvent(name, metacatRequestContext, this));
}
/**
* {@inheritDoc}
*/
@Override
public DatabaseDto updateAndReturn(final QualifiedName name, final DatabaseDto dto) {
update(name, dto);
return get(name);
}
/**
* {@inheritDoc}
*/
@Override
public void delete(final QualifiedName name) {
validate(name);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
this.authorizationService.checkPermission(metacatRequestContext.getUserName(),
name, MetacatOperation.DELETE);
log.info("Dropping schema {}", name);
final DatabaseDto dto = get(name, GetDatabaseServiceParameters.builder()
.disableOnReadMetadataIntercetor(false)
.includeUserMetadata(true)
.includeTableNames(true)
.build());
eventBus.post(new MetacatDeleteDatabasePreEvent(name, metacatRequestContext, this, dto));
final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
connectorManager.getDatabaseService(name).delete(connectorRequestContext, name);
// Delete definition metadata if it exists
if (userMetadataService.getDefinitionMetadata(name).isPresent()) {
log.info("Deleting user metadata for schema {}", name);
userMetadataService.deleteDefinitionMetadata(ImmutableList.of(name));
}
eventBus.post(new MetacatDeleteDatabasePostEvent(name, metacatRequestContext, this, dto));
}
/**
* {@inheritDoc}
*/
@Override
public DatabaseDto get(final QualifiedName name) {
return get(name,
GetDatabaseServiceParameters.builder()
.includeUserMetadata(true)
.includeTableNames(true)
.disableOnReadMetadataIntercetor(false)
.build());
}
/**
* {@inheritDoc}
*/
@Override
public DatabaseDto get(final QualifiedName name, final GetDatabaseServiceParameters getDatabaseServiceParameters) {
validate(name);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final MetacatCatalogConfig config = connectorManager.getCatalogConfig(name);
final ConnectorDatabaseService service = connectorManager.getDatabaseService(name);
final ConnectorTableService tableService = connectorManager.getTableService(name);
// Prepare the connector request
final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
connectorRequestContext.setIncludeMetadata(getDatabaseServiceParameters.isIncludeMetadataFromConnector());
final DatabaseDto dto = converterUtil.toDatabaseDto(service.get(connectorRequestContext, name));
dto.setType(config.getType());
if (getDatabaseServiceParameters.isIncludeTableNames()) {
final List<QualifiedName> tableNames = tableService
.listNames(connectorRequestContext, name, null, null, null);
List<QualifiedName> viewNames = Collections.emptyList();
if (config.isIncludeViewsWithTables()) {
// TODO JdbcMetadata returns ImmutableList.of() for views. We should change it to fetch views.
try {
viewNames = service.listViewNames(connectorRequestContext, name);
} catch (UnsupportedOperationException ignored) {
}
}
// Check to see if schema exists
if (tableNames.isEmpty() && viewNames.isEmpty() && !exists(name)) {
throw new DatabaseNotFoundException(name);
}
dto.setTables(
Stream.concat(tableNames.stream(), viewNames.stream())
.map(QualifiedName::getTableName)
.sorted(String.CASE_INSENSITIVE_ORDER)
.collect(Collectors.toList())
);
}
if (getDatabaseServiceParameters.isIncludeUserMetadata()) {
log.info("Populate user metadata for schema {}", name);
userMetadataService.populateMetadata(dto,
getDatabaseServiceParameters.isDisableOnReadMetadataIntercetor());
}
return dto;
}
/**
* {@inheritDoc}
*/
@SuppressFBWarnings
@Override
public boolean exists(final QualifiedName name) {
boolean result = false;
try {
result = get(name, GetDatabaseServiceParameters.builder()
.disableOnReadMetadataIntercetor(true).includeTableNames(false)
.includeUserMetadata(false).build()) != null;
} catch (NotFoundException ignored) {
// name does not exists.
}
return result;
}
private void validate(final QualifiedName name) {
Preconditions.checkNotNull(name, "name cannot be null");
Preconditions.checkState(name.isDatabaseDefinition(), "name %s is not for a database", name);
}
}
| 106 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/TableServiceImpl.java | /*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.impl;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.StorageDto;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.exception.MetacatBadRequestException;
import com.netflix.metacat.common.exception.MetacatNotSupportedException;
import com.netflix.metacat.common.json.MetacatJson;
import com.netflix.metacat.common.server.connectors.exception.NotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TableMigrationInProgressException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.converter.ConverterUtil;
import com.netflix.metacat.common.server.events.MetacatCreateTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatCreateTablePreEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePreEvent;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.events.MetacatRenameTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatRenameTablePreEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateIcebergTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateTablePreEvent;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.spi.MetacatCatalogConfig;
import com.netflix.metacat.common.server.usermetadata.AuthorizationService;
import com.netflix.metacat.common.server.usermetadata.GetMetadataInterceptorParameters;
import com.netflix.metacat.common.server.usermetadata.MetacatOperation;
import com.netflix.metacat.common.server.usermetadata.TagService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.metacat.common.server.util.MetacatUtils;
import com.netflix.metacat.main.manager.ConnectorManager;
import com.netflix.metacat.main.services.DatabaseService;
import com.netflix.metacat.main.services.GetTableNamesServiceParameters;
import com.netflix.metacat.main.services.GetTableServiceParameters;
import com.netflix.metacat.main.services.OwnerValidationService;
import com.netflix.metacat.main.services.TableService;
import com.netflix.spectator.api.Registry;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.TimeUnit;
/**
* Table service implementation.
*/
@Slf4j
@RequiredArgsConstructor
public class TableServiceImpl implements TableService {
private final ConnectorManager connectorManager;
private final ConnectorTableServiceProxy connectorTableServiceProxy;
private final DatabaseService databaseService;
private final TagService tagService;
private final UserMetadataService userMetadataService;
private final MetacatJson metacatJson;
private final MetacatEventBus eventBus;
private final Registry registry;
private final Config config;
private final ConverterUtil converterUtil;
private final AuthorizationService authorizationService;
private final OwnerValidationService ownerValidationService;
/**
* {@inheritDoc}
*/
@Override
public TableDto create(final QualifiedName name, final TableDto tableDto) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
validate(name);
this.authorizationService.checkPermission(metacatRequestContext.getUserName(),
tableDto.getName(), MetacatOperation.CREATE);
setDefaultAttributes(tableDto);
ownerValidationService.enforceOwnerValidation("createTable", name, tableDto);
log.info("Creating table {}", name);
eventBus.post(new MetacatCreateTablePreEvent(name, metacatRequestContext, this, tableDto));
connectorTableServiceProxy.create(name, converterUtil.fromTableDto(tableDto));
if (tableDto.getDataMetadata() != null || tableDto.getDefinitionMetadata() != null) {
log.info("Saving user metadata for table {}", name);
final long start = registry.clock().wallTime();
userMetadataService.saveMetadata(metacatRequestContext.getUserName(), tableDto, true);
final long duration = registry.clock().wallTime() - start;
log.info("Time taken to save user metadata for table {} is {} ms", name, duration);
registry.timer(registry.createId(Metrics.TimerSaveTableMetadata.getMetricName()).withTags(name.parts()))
.record(duration, TimeUnit.MILLISECONDS);
tag(name, tableDto.getDefinitionMetadata());
}
TableDto dto = tableDto;
try {
dto = get(name, GetTableServiceParameters.builder()
.disableOnReadMetadataIntercetor(false)
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.build()).orElse(tableDto);
} catch (Exception e) {
handleExceptionOnCreate(name, "getTable", e);
}
try {
eventBus.post(new MetacatCreateTablePostEvent(name, metacatRequestContext, this, dto));
} catch (Exception e) {
handleExceptionOnCreate(name, "postEvent", e);
}
return dto;
}
private void setDefaultAttributes(final TableDto tableDto) {
setDefaultSerdeIfNull(tableDto);
setDefaultDefinitionMetadataIfNull(tableDto);
setOwnerIfNull(tableDto);
setOwnerGroupIfAvailable(tableDto);
}
private void setDefaultDefinitionMetadataIfNull(final TableDto tableDto) {
ObjectNode definitionMetadata = tableDto.getDefinitionMetadata();
if (definitionMetadata == null) {
definitionMetadata = metacatJson.emptyObjectNode();
tableDto.setDefinitionMetadata(definitionMetadata);
}
}
private void setDefaultSerdeIfNull(final TableDto tableDto) {
StorageDto serde = tableDto.getSerde();
if (serde == null) {
serde = new StorageDto();
tableDto.setSerde(serde);
}
}
/**
* Sets the owner of the table. The order of priority of selecting the owner is:
* <pre>
* 1. Explicitly set in the table dto
* 2. Username from the request headers
* 3. Owner set in the serde
* </pre>
*
* @param tableDto the table DTO
*/
private void setOwnerIfNull(final TableDto tableDto) {
final List<String> potentialOwners = ownerValidationService.extractPotentialOwners(tableDto);
final String validOwner = potentialOwners.stream()
.filter(this::isOwnerValid)
.findFirst()
.orElse(null);
if (validOwner != null) {
updateTableOwner(tableDto, validOwner);
} else {
potentialOwners.stream()
.filter(Objects::nonNull)
.findFirst()
.ifPresent(nonNullOwner -> updateTableOwner(tableDto, nonNullOwner));
}
}
private void setOwnerGroupIfAvailable(final TableDto tableDto) {
final List<String> potentialOwnerGroups = ownerValidationService.extractPotentialOwnerGroups(tableDto);
potentialOwnerGroups.stream()
.filter(this::isOwnerGroupValid)
.findFirst()
.ifPresent(validOwnerGroup -> updateTableOwnerGroup(tableDto, validOwnerGroup));
}
void updateTableOwner(final TableDto tableDto, final String userId) {
final ObjectNode ownerNode = tableDto.getDefinitionMetadata().with("owner");
ownerNode.put("userId", userId);
}
void updateTableOwnerGroup(final TableDto tableDto, final String groupName) {
final ObjectNode ownerNode = tableDto.getDefinitionMetadata().with("owner");
ownerNode.put("google_group", groupName);
}
private boolean isOwnerValid(@Nullable final String userId) {
return ownerValidationService.isUserValid(userId);
}
private boolean isOwnerGroupValid(@Nullable final String groupName) {
return ownerValidationService.isGroupValid(groupName);
}
@SuppressFBWarnings
private void tag(final QualifiedName name, final ObjectNode definitionMetadata) {
final Set<String> tags = MetacatUtils.getTableTags(definitionMetadata);
if (!tags.isEmpty()) {
log.info("Setting tags {} for table {}", tags, name);
final Set<String> result = tagService.setTags(name, tags, false);
}
}
/**
* {@inheritDoc}
*/
@Override
public TableDto deleteAndReturn(final QualifiedName name, final boolean isMView) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
validate(name);
this.authorizationService.checkPermission(metacatRequestContext.getUserName(),
name, MetacatOperation.DELETE);
eventBus.post(new MetacatDeleteTablePreEvent(name, metacatRequestContext, this));
TableDto tableDto = new TableDto();
tableDto.setName(name);
try {
final Optional<TableDto> oTable = get(name,
GetTableServiceParameters.builder()
.includeInfo(true)
.disableOnReadMetadataIntercetor(false)
.includeDefinitionMetadata(true)
.includeDataMetadata(true)
.build());
tableDto = oTable.orElse(tableDto);
} catch (Exception e) {
handleException(name, true, "deleteAndReturn_get", e);
}
// Fail if the table is tagged not to be deleted.
if (hasTags(tableDto, config.getNoTableDeleteOnTags())) {
if (MetacatUtils.hasDoNotModifyForIcebergMigrationTag(tableDto, config.getNoTableDeleteOnTags())) {
throw new TableMigrationInProgressException(
MetacatUtils.getIcebergMigrationExceptionMsg("Delete", name.toString()));
} else {
throw new IllegalArgumentException(
String.format("Table %s cannot be deleted because it is tagged with %s.", name,
config.getNoTableDeleteOnTags()));
}
}
// Try to delete the table even if get above fails
try {
connectorTableServiceProxy.delete(name);
// If this is a common view, the storage_table if present
// should also be deleted.
if (MetacatUtils.isCommonView(tableDto.getMetadata())
&& config.deleteCommonViewStorageTable()) {
final Optional<String> storageTableName = MetacatUtils
.getCommonViewStorageTable(tableDto.getMetadata());
if (storageTableName.isPresent()) {
final QualifiedName qualifiedStorageTableName = QualifiedName.ofTable(name.getCatalogName(),
name.getDatabaseName(), storageTableName.get());
deleteCommonViewStorageTable(name, qualifiedStorageTableName);
}
}
} catch (NotFoundException ignored) {
log.debug("NotFoundException ignored for table {}", name);
}
if (canDeleteMetadata(name)) {
// Delete the metadata. Type doesn't matter since we discard the result
log.info("Deleting user metadata for table {}", name);
userMetadataService.deleteMetadata(metacatRequestContext.getUserName(), Lists.newArrayList(tableDto));
log.info("Deleting tags for table {}", name);
tagService.delete(name, false);
} else {
if (config.canSoftDeleteDataMetadata() && tableDto.isDataExternal()) {
userMetadataService.softDeleteDataMetadata(metacatRequestContext.getUserName(),
Lists.newArrayList(tableDto.getDataUri()));
}
}
eventBus.post(new MetacatDeleteTablePostEvent(name, metacatRequestContext, this, tableDto, isMView));
return tableDto;
}
private boolean hasTags(@Nullable final TableDto tableDto, final Set<String> hasTags) {
if (!hasTags.isEmpty() && tableDto != null) {
final Set<String> tags = MetacatUtils.getTableTags(tableDto.getDefinitionMetadata());
if (!tags.isEmpty()) {
for (String t: hasTags) {
if (tags.contains(t)) {
return true;
}
}
}
}
return false;
}
/**
* Returns true
* 1. If the system is configured to delete deifnition metadata.
* 2. If the system is configured not to but the tableName is configured to either explicitly or if the
* table's database/catalog is configure to.
*
* @param tableName table name
* @return whether or not to delete definition metadata
*/
private boolean canDeleteMetadata(final QualifiedName tableName) {
return config.canDeleteTableDefinitionMetadata() || isEnabledForTableDefinitionMetadataDelete(tableName);
}
/**
* Returns true if tableName is enabled for deifnition metadata delete either explicitly or if the
* table's database/catalog is configure to.
*
* @param tableName table name
* @return whether or not to delete definition metadata
*/
private boolean isEnabledForTableDefinitionMetadataDelete(final QualifiedName tableName) {
final Set<QualifiedName> enableDeleteForQualifiedNames = config.getNamesEnabledForDefinitionMetadataDelete();
return enableDeleteForQualifiedNames.contains(tableName)
|| enableDeleteForQualifiedNames.contains(
QualifiedName.ofDatabase(tableName.getCatalogName(), tableName.getDatabaseName()))
|| enableDeleteForQualifiedNames.contains(QualifiedName.ofCatalog(tableName.getCatalogName()));
}
/**
* {@inheritDoc}
*/
@Override
public Optional<TableDto> get(final QualifiedName name, final GetTableServiceParameters getTableServiceParameters) {
validate(name);
TableDto tableInternal = null;
final TableDto table;
final MetacatCatalogConfig catalogConfig = connectorManager.getCatalogConfig(name);
if (getTableServiceParameters.isIncludeInfo()
|| (getTableServiceParameters.isIncludeDefinitionMetadata() && catalogConfig.isInterceptorEnabled()
&& !getTableServiceParameters.isDisableOnReadMetadataIntercetor())) {
try {
final boolean useCache = getTableServiceParameters.isUseCache() && config.isCacheEnabled()
&& catalogConfig.isCacheEnabled();
tableInternal = converterUtil.toTableDto(
getFromTableServiceProxy(name, getTableServiceParameters, useCache));
} catch (NotFoundException ignored) {
return Optional.empty();
}
table = tableInternal;
} else {
table = new TableDto();
table.setName(name);
}
if (getTableServiceParameters.isIncludeDefinitionMetadata()) {
final Optional<ObjectNode> definitionMetadata =
(getTableServiceParameters.isDisableOnReadMetadataIntercetor())
? userMetadataService.getDefinitionMetadata(name)
: userMetadataService.getDefinitionMetadataWithInterceptor(name,
GetMetadataInterceptorParameters.builder().hasMetadata(tableInternal).build());
definitionMetadata.ifPresent(table::setDefinitionMetadata);
}
if (getTableServiceParameters.isIncludeDataMetadata() && catalogConfig.isHasDataExternal()) {
TableDto dto = table;
if (tableInternal == null && !getTableServiceParameters.isIncludeInfo()) {
try {
final boolean useCache = getTableServiceParameters.isUseCache() && config.isCacheEnabled();
dto = converterUtil.toTableDto(
getFromTableServiceProxy(name, getTableServiceParameters, useCache));
} catch (NotFoundException ignored) {
}
}
if (dto != null && dto.getSerde() != null) {
final Optional<ObjectNode> dataMetadata =
userMetadataService.getDataMetadata(dto.getSerde().getUri());
dataMetadata.ifPresent(table::setDataMetadata);
}
}
return Optional.of(table);
}
/**
* {@inheritDoc}
*/
@Override
public void rename(
final QualifiedName oldName,
final QualifiedName newName,
final boolean isMView
) {
validate(oldName);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
this.authorizationService.checkPermission(metacatRequestContext.getUserName(),
oldName, MetacatOperation.RENAME);
final TableDto oldTable = get(oldName, GetTableServiceParameters.builder()
.includeInfo(true)
.disableOnReadMetadataIntercetor(false)
.includeDefinitionMetadata(true)
.includeDataMetadata(true)
.build()).orElseThrow(() -> new TableNotFoundException(oldName));
// Fail if the table is tagged not to be renamed.
if (hasTags(oldTable, config.getNoTableRenameOnTags())) {
if (MetacatUtils.hasDoNotModifyForIcebergMigrationTag(oldTable, config.getNoTableRenameOnTags())) {
throw new TableMigrationInProgressException(
MetacatUtils.getIcebergMigrationExceptionMsg("Rename", oldName.toString()));
} else {
throw new IllegalArgumentException(
String.format("Table %s cannot be renamed because it is tagged with %s.", oldName,
config.getNoTableRenameOnTags()));
}
}
if (oldTable != null) {
//Ignore if the operation is not supported, so that we can at least go ahead and save the user metadata
eventBus.post(new MetacatRenameTablePreEvent(oldName, metacatRequestContext, this, newName));
connectorTableServiceProxy.rename(oldName, newName, isMView);
userMetadataService.renameDefinitionMetadataKey(oldName, newName);
tagService.renameTableTags(oldName, newName.getTableName());
final TableDto dto = get(newName, GetTableServiceParameters.builder()
.includeInfo(true)
.disableOnReadMetadataIntercetor(false)
.includeDefinitionMetadata(true)
.includeDataMetadata(true)
.build()).orElseThrow(() -> new IllegalStateException("should exist"));
eventBus.post(
new MetacatRenameTablePostEvent(oldName, metacatRequestContext, this, oldTable, dto, isMView));
}
}
/**
* {@inheritDoc}
*/
@Override
public void update(final QualifiedName name, final TableDto tableDto) {
updateAndReturn(name, tableDto);
}
/**
* {@inheritDoc}
*/
@Override
public TableDto updateAndReturn(final QualifiedName name, final TableDto tableDto) {
validate(name);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final TableDto oldTable = get(name, GetTableServiceParameters.builder()
.disableOnReadMetadataIntercetor(false)
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.build()).orElseThrow(() -> new TableNotFoundException(name));
eventBus.post(new MetacatUpdateTablePreEvent(name, metacatRequestContext, this, oldTable, tableDto));
if (MetacatUtils.hasDoNotModifyForIcebergMigrationTag(oldTable, config.getNoTableUpdateOnTags())) {
throw new TableMigrationInProgressException(
MetacatUtils.getIcebergMigrationExceptionMsg("Updates", name.toString()));
}
//
// Check if the table schema info is provided. If provided, we should continue calling the update on the table
// schema. Uri may exist in the serde when updating data metadata for a table.
//
boolean ignoreErrorsAfterUpdate = false;
if (isTableInfoProvided(tableDto, oldTable)) {
ignoreErrorsAfterUpdate = connectorTableServiceProxy.update(name, converterUtil.fromTableDto(tableDto));
}
// we do ownership validation and enforcement only if table owner is set in the dto
// because if it is null, we do not update the owner in the existing metadata record
if (tableDto.getTableOwner().isPresent()) {
// only if the owner is different from the previous, we run the enforcement
// for backwards compatibility
if (!tableDto.getTableOwner().get().equals(oldTable.getTableOwner().orElse(null))) {
ownerValidationService.enforceOwnerValidation("updateTable", name, tableDto);
}
}
try {
// Merge in metadata if the user sent any
if (tableDto.getDataMetadata() != null || tableDto.getDefinitionMetadata() != null) {
log.info("Saving user metadata for table {}", name);
final long start = registry.clock().wallTime();
userMetadataService.saveMetadata(metacatRequestContext.getUserName(), tableDto, true);
final long duration = registry.clock().wallTime() - start;
log.info("Time taken to save user metadata for table {} is {} ms", name, duration);
registry.timer(registry.createId(Metrics.TimerSaveTableMetadata.getMetricName()).withTags(name.parts()))
.record(duration, TimeUnit.MILLISECONDS);
}
} catch (Exception e) {
handleException(name, ignoreErrorsAfterUpdate, "saveMetadata", e);
}
// ignoreErrorsAfterUpdate is currently set only for iceberg tables
if (config.isUpdateIcebergTableAsyncPostEventEnabled() && ignoreErrorsAfterUpdate) {
eventBus.post(new MetacatUpdateIcebergTablePostEvent(name,
metacatRequestContext, this, oldTable, tableDto));
return tableDto;
} else {
TableDto updatedDto = tableDto;
try {
updatedDto = get(name,
GetTableServiceParameters.builder()
.disableOnReadMetadataIntercetor(false)
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.build()).orElse(tableDto);
} catch (Exception e) {
handleException(name, ignoreErrorsAfterUpdate, "getTable", e);
}
try {
eventBus.post(new MetacatUpdateTablePostEvent(name, metacatRequestContext, this, oldTable,
updatedDto, updatedDto != tableDto));
} catch (Exception e) {
handleException(name, ignoreErrorsAfterUpdate, "postEvent", e);
}
return updatedDto;
}
}
/**
* Throws exception if the provided <code>ignoreErrorsAfterUpdate</code> is false. If true, it will swallow the
* exception and log it.
*
*/
private void handleException(final QualifiedName name,
final boolean ignoreErrorsAfterUpdate,
final String request,
final Exception ex) {
if (ignoreErrorsAfterUpdate) {
log.warn("Failed {} for table {}. Error: {}", request, name, ex.getMessage());
registry.counter(registry.createId(
Metrics.CounterTableUpdateIgnoredException.getMetricName()).withTags(name.parts())
.withTag("request", request)).increment();
} else {
throw Throwables.propagate(ex);
}
}
/**
* Swallow the exception and log it.
*
*/
private void handleExceptionOnCreate(final QualifiedName name,
final String request,
final Exception ex) {
log.warn("Failed {} for create table {}. Error: {}", request, name, ex.getMessage());
registry.counter(registry.createId(
Metrics.CounterTableCreateIgnoredException.getMetricName()).withTags(name.parts())
.withTag("request", request)).increment();
}
@VisibleForTesting
private boolean isTableInfoProvided(final TableDto tableDto, final TableDto oldTableDto) {
boolean result = false;
if ((tableDto.getFields() != null && !tableDto.getFields().isEmpty())
|| isSerdeInfoProvided(tableDto, oldTableDto)
|| (tableDto.getMetadata() != null && !tableDto.getMetadata().isEmpty())
|| tableDto.getAudit() != null) {
result = true;
}
return result;
}
private boolean isSerdeInfoProvided(final TableDto tableDto, final TableDto oldTableDto) {
boolean result = false;
final StorageDto serde = tableDto.getSerde();
if (serde == null) {
result = false;
} else {
final StorageDto oldSerde = oldTableDto.getSerde();
final String oldUri = oldSerde != null ? oldSerde.getUri() : null;
if (serde.getInputFormat() != null
|| serde.getOutputFormat() != null
|| serde.getOwner() != null
|| serde.getParameters() != null
|| serde.getSerdeInfoParameters() != null
|| serde.getSerializationLib() != null
|| (serde.getUri() != null && !Objects.equals(serde.getUri(), oldUri))) {
result = true;
}
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public void delete(final QualifiedName name) {
deleteAndReturn(name, false);
}
/**
* {@inheritDoc}
*/
@Override
public TableDto get(final QualifiedName name) {
//this is used for different purpose, need to change the ineral calls
final Optional<TableDto> dto = get(name, GetTableServiceParameters.builder()
.includeInfo(true)
.includeDefinitionMetadata(true)
.includeDataMetadata(true)
.disableOnReadMetadataIntercetor(false)
.build());
return dto.orElse(null);
}
/**
* {@inheritDoc}
*/
@Override
public TableDto copy(final QualifiedName sourceName, final QualifiedName targetName) {
// Source should be same
if (!sourceName.getCatalogName().equals(targetName.getCatalogName())) {
throw new MetacatNotSupportedException("Cannot copy a table from a different source");
}
// Error out when source table does not exists
final Optional<TableDto> oTable = get(sourceName,
GetTableServiceParameters.builder()
.includeInfo(true)
.disableOnReadMetadataIntercetor(true)
.includeDataMetadata(false)
.includeDefinitionMetadata(false)
.build());
if (!oTable.isPresent()) {
throw new TableNotFoundException(sourceName);
}
// Error out when target table already exists
final Optional<TableDto> oTargetTable = get(targetName,
GetTableServiceParameters.builder()
.disableOnReadMetadataIntercetor(true)
.includeInfo(true)
.includeDataMetadata(false)
.includeDefinitionMetadata(false)
.build());
if (oTargetTable.isPresent()) {
throw new TableNotFoundException(targetName);
}
return copy(oTable.get(), targetName);
}
/**
* {@inheritDoc}
*/
@Override
public TableDto copy(final TableDto tableDto, final QualifiedName targetName) {
final QualifiedName databaseName =
QualifiedName.ofDatabase(targetName.getCatalogName(), targetName.getDatabaseName());
if (!databaseService.exists(databaseName)) {
final DatabaseDto databaseDto = new DatabaseDto();
databaseDto.setName(databaseName);
databaseService.create(databaseName, databaseDto);
}
final TableDto targetTableDto = new TableDto();
targetTableDto.setName(targetName);
targetTableDto.setFields(tableDto.getFields());
targetTableDto.setPartition_keys(tableDto.getPartition_keys());
final StorageDto storageDto = tableDto.getSerde();
if (storageDto != null) {
final StorageDto targetStorageDto = new StorageDto();
targetStorageDto.setInputFormat(storageDto.getInputFormat());
targetStorageDto.setOwner(storageDto.getOwner());
targetStorageDto.setOutputFormat(storageDto.getOutputFormat());
targetStorageDto.setParameters(storageDto.getParameters());
targetStorageDto.setUri(storageDto.getUri());
targetStorageDto.setSerializationLib(storageDto.getSerializationLib());
targetTableDto.setSerde(targetStorageDto);
}
create(targetName, targetTableDto);
return targetTableDto;
}
/**
* {@inheritDoc}
*/
@Override
public void saveMetadata(final QualifiedName name, final ObjectNode definitionMetadata,
final ObjectNode dataMetadata) {
validate(name);
final Optional<TableDto> tableDtoOptional = get(name, GetTableServiceParameters.builder().includeInfo(true)
.disableOnReadMetadataIntercetor(true)
.includeDefinitionMetadata(false)
.includeDataMetadata(false)
.build());
if (tableDtoOptional.isPresent()) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final TableDto tableDto = tableDtoOptional.get();
tableDto.setDefinitionMetadata(definitionMetadata); //override the previous one
tableDto.setDataMetadata(dataMetadata);
log.info("Saving user metadata for table {}", name);
userMetadataService.saveMetadata(metacatRequestContext.getUserName(), tableDto, true);
tag(name, tableDto.getDefinitionMetadata());
}
}
/**
* {@inheritDoc}
*/
@Override
public List<QualifiedName> getQualifiedNames(final String uri, final boolean prefixSearch) {
return connectorTableServiceProxy.getQualifiedNames(uri, prefixSearch);
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, List<QualifiedName>> getQualifiedNames(final List<String> uris, final boolean prefixSearch) {
return connectorTableServiceProxy.getQualifiedNames(uris, prefixSearch);
}
@Override
public List<QualifiedName> getQualifiedNames(final QualifiedName name,
final GetTableNamesServiceParameters parameters) {
if (Strings.isNullOrEmpty(parameters.getFilter())) {
throw new MetacatBadRequestException("Filter expression cannot be empty");
}
return connectorTableServiceProxy.getQualifiedNames(name, parameters);
}
private TableInfo getFromTableServiceProxy(final QualifiedName name,
final GetTableServiceParameters getTableServiceParameters,
final boolean useCache) {
return getTableServiceParameters.isIncludeMetadataLocationOnly()
? connectorTableServiceProxy.getWithMetadataLocationOnly(name, getTableServiceParameters, useCache)
: (getTableServiceParameters.isIncludeMetadataFromConnector()
? connectorTableServiceProxy.getWithInfoDetails(name, getTableServiceParameters, useCache)
: connectorTableServiceProxy.get(name, getTableServiceParameters, useCache));
}
private void deleteCommonViewStorageTable(final QualifiedName viewName,
final QualifiedName storageTableName) {
try {
log.warn("Deleting storage table: {} belonging to common view: {}",
storageTableName, viewName);
deleteAndReturn(storageTableName, false);
} catch (Exception e) {
// For now only register failures to drop
handleException(storageTableName, true, "deleteCommonViewStorageTable", e);
}
}
/**
* {@inheritDoc}
*/
@Override
public boolean exists(final QualifiedName name) {
return connectorTableServiceProxy.exists(name);
}
private void validate(final QualifiedName name) {
Preconditions.checkNotNull(name, "name cannot be null");
Preconditions.checkArgument(name.isTableDefinition(), "Definition {} does not refer to a table", name);
}
}
| 107 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/package-info.java | /*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* This package includes metacat service implementation classes.
*
* @author amajumdar
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.main.services.impl;
import javax.annotation.ParametersAreNonnullByDefault;
| 108 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/init/MetacatThriftInitService.java | package com.netflix.metacat.main.services.init;
import com.google.common.base.Throwables;
import com.netflix.metacat.main.services.MetacatThriftService;
import lombok.Getter;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.experimental.Accessors;
import lombok.extern.slf4j.Slf4j;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Inits the thrift service.
*/
@Slf4j
@Getter
@Accessors(fluent = true)
@RequiredArgsConstructor
public class MetacatThriftInitService {
@NonNull
private final MetacatThriftService metacatThriftService;
@NonNull
private final MetacatCoreInitService coreInitService;
// Initial values are false
private final AtomicBoolean thriftStarted = new AtomicBoolean();
/**
* Metacat service shutdown.
*/
public void stop() {
log.info("Metacat application is stopped. Stopping services.");
try {
this.metacatThriftService.stop();
this.thriftStarted.set(false);
this.coreInitService.stop();
} catch (final Exception e) {
// Just log it since we're shutting down anyway shouldn't matter to propagate it
log.error("Unable to properly shutdown services due to {}", e.getMessage(), e);
}
log.info("Finished stopping services.");
}
/**
* Metacat service initialization.
*/
public void start() {
log.info("Metacat application starting. Starting internal services...");
try {
// TODO: Rather than doing this statically why don't we have things that need to be started implement
// some interface/order?
this.coreInitService.start();
this.metacatThriftService.start();
this.thriftStarted.set(true);
} catch (final Exception e) {
log.error("Unable to initialize services due to {}", e.getMessage(), e);
Throwables.propagate(e);
}
log.info("Finished starting internal services.");
}
}
| 109 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/init/MetacatCoreInitService.java | package com.netflix.metacat.main.services.init;
import com.netflix.metacat.common.server.util.ThreadServiceManager;
import com.netflix.metacat.main.manager.CatalogManager;
import com.netflix.metacat.main.manager.ConnectorManager;
import com.netflix.metacat.main.manager.PluginManager;
import lombok.Getter;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.experimental.Accessors;
import lombok.extern.slf4j.Slf4j;
import org.springframework.context.ApplicationContext;
/**
* Inits the core catalog related dependencies.
*/
@Slf4j
@Getter
@Accessors(fluent = true)
@RequiredArgsConstructor
public class MetacatCoreInitService {
@NonNull
private final PluginManager pluginManager;
@NonNull
private final CatalogManager catalogManager;
@NonNull
private final ConnectorManager connectorManager;
@NonNull
private final ThreadServiceManager threadServiceManager;
@NonNull
private final ApplicationContext applicationContext;
/**
* Metacat service shutdown.
*/
public void stop() {
log.info("Metacat application is stopped. Stopping services.");
this.connectorManager.stop();
this.threadServiceManager.stop();
}
/**
* Metacat service initialization.
*
* @throws Exception if an error occurs during initialization.
*/
public void start() throws Exception {
log.info("Metacat application starting. Starting internal services...");
this.pluginManager.loadPlugins();
this.catalogManager.loadCatalogs(applicationContext);
}
}
| 110 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/init/package-info.java | /*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat initialization services.
*/
@javax.annotation.ParametersAreNonnullByDefault
package com.netflix.metacat.main.services.init;
| 111 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/health/MetacatHealthIndicator.java | package com.netflix.metacat.main.services.health;
import com.netflix.metacat.main.services.init.MetacatCoreInitService;
import com.netflix.metacat.main.services.init.MetacatThriftInitService;
import lombok.RequiredArgsConstructor;
import org.apache.thrift.transport.TSocket;
import org.springframework.boot.actuate.health.Health;
import org.springframework.boot.actuate.health.HealthIndicator;
/**
* Metacat health indicator.
*/
@RequiredArgsConstructor
public class MetacatHealthIndicator implements HealthIndicator {
protected static final String PLUGIN_KEY = "pluginsLoaded";
protected static final String CATALOG_KEY = "catalogsLoaded";
protected static final String THRIFT_KEY = "thriftStarted";
private final MetacatCoreInitService coreInitService;
private final MetacatThriftInitService thriftInitService;
/**
* {@inheritDoc}
*/
@Override
public Health health() {
final boolean plugins = coreInitService.pluginManager().arePluginsLoaded();
final boolean catalogs = coreInitService.catalogManager().areCatalogsLoaded();
final boolean thrift = thriftInitService.thriftStarted().get()
&& thriftInitService.metacatThriftService()
.getCatalogThriftServices().parallelStream().map(c -> {
TSocket transport = null;
try {
transport = new TSocket("localhost", c.getPortNumber(), 100);
transport.open();
} catch (Exception e) {
return false;
} finally {
if (transport != null && transport.isOpen()) {
transport.close();
}
}
return true;
}).reduce(Boolean.TRUE, Boolean::equals);
final Health.Builder builder = plugins && catalogs && thrift ? Health.up() : Health.outOfService();
builder.withDetail(PLUGIN_KEY, plugins);
builder.withDetail(CATALOG_KEY, catalogs);
builder.withDetail(THRIFT_KEY, thrift);
return builder.build();
}
}
| 112 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/health/package-info.java | /*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat service health related classes.
*/
@javax.annotation.ParametersAreNonnullByDefault
package com.netflix.metacat.main.services.health;
| 113 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchEventHandlers.java | /*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.search;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.json.MetacatJsonLocator;
import com.netflix.metacat.common.server.events.AsyncListener;
import com.netflix.metacat.common.server.events.MetacatCreateDatabasePostEvent;
import com.netflix.metacat.common.server.events.MetacatCreateTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteDatabasePostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatRenameTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateTablePostEvent;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Timer;
import lombok.extern.slf4j.Slf4j;
import org.springframework.context.event.EventListener;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
/**
* Event handlers for elastic search indexing.
*/
@Slf4j
@AsyncListener
public class ElasticSearchEventHandlers {
private final ElasticSearchUtil es;
private final MetacatJsonLocator metacatJsonLocator;
private final Config config;
private final Timer databaseCreateEventsDelayTimer;
private final Timer databaseCreateTimer;
private final Timer tableCreateEventsDelayTimer;
private final Timer tableCreateTimer;
private final Timer databaseDeleteEventsDelayTimer;
private final Timer databaseDeleteTimer;
private final Timer tableDeleteEventsDelayTimer;
private final Timer tableDeleteTimer;
private final Timer partitionDeleteEventsDelayTimer;
private final Timer partitionDeleteTimer;
private final Timer tableRenameEventsDelayTimer;
private final Timer tableRenameTimer;
private final Timer tableUpdateEventsDelayTimer;
private final Timer tableUpdateTimer;
private final Timer partitionSaveEventsDelayTimer;
private final Timer partitionSaveTimer;
/**
* Constructor.
*
* @param es elastic search util
* @param registry registry to spectator
* @param config configurations
*/
public ElasticSearchEventHandlers(final ElasticSearchUtil es,
final Registry registry,
final Config config) {
this.es = es;
this.metacatJsonLocator = new MetacatJsonLocator();
this.config = config;
this.databaseCreateEventsDelayTimer = registry.timer(Metrics.TimerElasticSearchEventsDelay.getMetricName(),
Metrics.TagEventsType.getMetricName(), "database.create");
this.databaseCreateTimer = registry.timer(Metrics.TimerElasticSearchDatabaseCreate.getMetricName());
this.tableCreateEventsDelayTimer = registry.timer(Metrics.TimerElasticSearchEventsDelay.getMetricName(),
Metrics.TagEventsType.getMetricName(), "table.create");
this.tableCreateTimer = registry.timer(Metrics.TimerElasticSearchTableCreate.getMetricName());
this.databaseDeleteEventsDelayTimer = registry.timer(Metrics.TimerElasticSearchEventsDelay.getMetricName(),
Metrics.TagEventsType.getMetricName(), "database.delete");
this.databaseDeleteTimer = registry.timer(Metrics.TimerElasticSearchDatabaseDelete.getMetricName());
this.tableDeleteEventsDelayTimer = registry.timer(Metrics.TimerElasticSearchEventsDelay.getMetricName(),
Metrics.TagEventsType.getMetricName(), "table.delete");
this.tableDeleteTimer = registry.timer(Metrics.TimerElasticSearchTableDelete.getMetricName());
this.partitionDeleteEventsDelayTimer = registry.timer(Metrics.TimerElasticSearchEventsDelay.getMetricName(),
Metrics.TagEventsType.getMetricName(), "partition.delete");
this.partitionDeleteTimer = registry.timer(Metrics.TimerElasticSearchPartitionDelete.getMetricName());
this.tableRenameEventsDelayTimer = registry.timer(Metrics.TimerElasticSearchEventsDelay.getMetricName(),
Metrics.TagEventsType.getMetricName(), "table.rename");
this.tableRenameTimer = registry.timer(Metrics.TimerElasticSearchTableRename.getMetricName());
this.tableUpdateEventsDelayTimer = registry.timer(Metrics.TimerElasticSearchEventsDelay.getMetricName(),
Metrics.TagEventsType.getMetricName(), "table.update");
this.tableUpdateTimer = registry.timer(Metrics.TimerElasticSearchTableUpdate.getMetricName());
this.partitionSaveEventsDelayTimer = registry.timer(Metrics.TimerElasticSearchEventsDelay.getMetricName(),
Metrics.TagEventsType.getMetricName(), "partition.save");
this.partitionSaveTimer = registry.timer(Metrics.TimerElasticSearchPartitionSave.getMetricName());
}
/**
* Subscriber.
*
* @param event event
*/
@EventListener
public void metacatCreateDatabasePostEventHandler(final MetacatCreateDatabasePostEvent event) {
log.debug("Received CreateDatabaseEvent {}", event);
this.databaseCreateEventsDelayTimer
.record(System.currentTimeMillis() - event.getRequestContext().getTimestamp(), TimeUnit.MILLISECONDS);
this.databaseCreateTimer.record(() -> {
final DatabaseDto dto = event.getDatabase();
final ElasticSearchDoc doc = new ElasticSearchDoc(dto.getName().toString(), dto,
event.getRequestContext().getUserName(), false);
es.save(ElasticSearchDoc.Type.database.name(), doc.getId(), doc);
});
}
/**
* Subscriber.
*
* @param event event
*/
@EventListener
public void metacatCreateTablePostEventHandler(final MetacatCreateTablePostEvent event) {
log.debug("Received CreateTableEvent {}", event);
this.tableCreateEventsDelayTimer
.record(System.currentTimeMillis() - event.getRequestContext().getTimestamp(), TimeUnit.MILLISECONDS);
this.tableCreateTimer.record(() -> {
final TableDto dto = event.getTable();
final ElasticSearchDoc doc = new ElasticSearchDoc(dto.getName().toString(), dto,
event.getRequestContext().getUserName(), false);
es.save(ElasticSearchDoc.Type.table.name(), doc.getId(), doc);
});
}
/**
* Subscriber.
*
* @param event event
*/
@EventListener
public void metacatDeleteDatabasePostEventHandler(final MetacatDeleteDatabasePostEvent event) {
log.debug("Received DeleteDatabaseEvent {}", event);
this.databaseDeleteEventsDelayTimer
.record(System.currentTimeMillis() - event.getRequestContext().getTimestamp(), TimeUnit.MILLISECONDS);
this.databaseDeleteTimer.record(() -> {
final DatabaseDto dto = event.getDatabase();
es.softDelete(ElasticSearchDoc.Type.database.name(), dto.getName().toString(), event.getRequestContext());
});
}
/**
* Subscriber.
*
* @param event event
*/
@EventListener
public void metacatDeleteTablePostEventHandler(final MetacatDeleteTablePostEvent event) {
log.debug("Received DeleteTableEvent {}", event);
this.tableDeleteEventsDelayTimer
.record(System.currentTimeMillis() - event.getRequestContext().getTimestamp(), TimeUnit.MILLISECONDS);
this.tableDeleteTimer.record(() -> {
final TableDto dto = event.getTable();
es.softDelete(ElasticSearchDoc.Type.table.name(), dto.getName().toString(), event.getRequestContext());
if (config.isElasticSearchPublishPartitionEnabled()) {
try {
final List<String> partitionIdsToBeDeleted =
es.getIdsByQualifiedName(ElasticSearchDoc.Type.partition.name(), dto.getName());
es.delete(ElasticSearchDoc.Type.partition.name(), partitionIdsToBeDeleted);
} catch (Exception e) {
log.warn("Failed deleting the partitions for the dropped table/view:{}", dto.getName());
}
}
});
}
/**
* Subscriber.
*
* @param event event
*/
@EventListener
public void metacatDeleteTablePartitionPostEventHandler(final MetacatDeleteTablePartitionPostEvent event) {
log.debug("Received DeleteTablePartitionEvent {}", event);
this.partitionDeleteEventsDelayTimer
.record(System.currentTimeMillis() - event.getRequestContext().getTimestamp(), TimeUnit.MILLISECONDS);
if (config.isElasticSearchPublishPartitionEnabled()) {
this.partitionDeleteTimer.record(() -> {
final List<String> partitionIds = event.getPartitionIds();
final List<String> esPartitionIds = partitionIds.stream()
.map(partitionId -> event.getName().toString() + "/" + partitionId).collect(Collectors.toList());
es.softDelete(ElasticSearchDoc.Type.partition.name(), esPartitionIds, event.getRequestContext());
});
}
}
/**
* Subscriber.
*
* @param event event
*/
@EventListener
public void metacatRenameTablePostEventHandler(final MetacatRenameTablePostEvent event) {
log.debug("Received RenameTableEvent {}", event);
this.tableRenameEventsDelayTimer
.record(System.currentTimeMillis() - event.getRequestContext().getTimestamp(), TimeUnit.MILLISECONDS);
this.tableRenameTimer.record(() -> {
es.delete(ElasticSearchDoc.Type.table.name(), event.getName().toString());
final TableDto dto = event.getCurrentTable();
final ElasticSearchDoc doc = new ElasticSearchDoc(dto.getName().toString(), dto,
event.getRequestContext().getUserName(), false);
es.save(ElasticSearchDoc.Type.table.name(), doc.getId(), doc);
});
}
/**
* Subscriber.
*
* @param event event
*/
@EventListener
public void metacatUpdateTablePostEventHandler(final MetacatUpdateTablePostEvent event) {
log.debug("Received UpdateTableEvent {}", event);
this.tableUpdateEventsDelayTimer
.record(System.currentTimeMillis() - event.getRequestContext().getTimestamp(), TimeUnit.MILLISECONDS);
this.tableUpdateTimer.record(() -> {
final TableDto dto = event.getCurrentTable();
final ElasticSearchDoc doc = new ElasticSearchDoc(dto.getName().toString(), dto,
event.getRequestContext().getUserName(), false);
final ElasticSearchDoc oldDoc = es.get(ElasticSearchDoc.Type.table.name(), doc.getId());
es.save(ElasticSearchDoc.Type.table.name(), doc.getId(), doc);
if (config.isElasticSearchUpdateTablesWithSameUriEnabled()
&& (oldDoc == null || oldDoc.getDto() == null
|| !Objects.equals(((TableDto) oldDoc.getDto()).getDataMetadata(), dto.getDataMetadata()))) {
updateEntitiesWithSameUri(ElasticSearchDoc.Type.table.name(),
dto, event.getRequestContext().getUserName());
}
});
}
private void updateEntitiesWithSameUri(final String metadataType, final TableDto dto,
final String userName) {
if (dto.isDataExternal()) {
final List<String> ids = es.getTableIdsByUri(metadataType, dto.getDataUri())
.stream().filter(s -> !s.equals(dto.getName().toString())).collect(Collectors.toList());
if (!ids.isEmpty()) {
log.info("ElasticSearch table updates({}) with same uri {} (Table:{})",
ids.size(), dto.getDataUri(), dto.getName());
final ObjectNode node = metacatJsonLocator.emptyObjectNode();
node.set(ElasticSearchDoc.Field.DATA_METADATA, dto.getDataMetadata());
node.put(ElasticSearchDoc.Field.USER, userName);
node.put(ElasticSearchDoc.Field.TIMESTAMP, java.time.Instant.now().toEpochMilli());
es.updates(ElasticSearchDoc.Type.table.name(), ids, node);
}
}
}
/**
* Subscriber.
*
* @param event event
*/
@EventListener
public void metacatSaveTablePartitionPostEventHandler(final MetacatSaveTablePartitionPostEvent event) {
log.debug("Received SaveTablePartitionEvent {}", event);
this.partitionSaveEventsDelayTimer
.record(System.currentTimeMillis() - event.getRequestContext().getTimestamp(), TimeUnit.MILLISECONDS);
if (config.isElasticSearchPublishPartitionEnabled()) {
this.partitionSaveTimer.record(() -> {
final List<PartitionDto> partitionDtos = event.getPartitions();
final MetacatRequestContext context = event.getRequestContext();
final List<ElasticSearchDoc> docs = partitionDtos.stream()
.map(dto -> new ElasticSearchDoc(dto.getName().toString(), dto, context.getUserName(), false))
.collect(Collectors.toList());
es.save(ElasticSearchDoc.Type.partition.name(), docs);
});
}
}
}
| 114 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchUtil.java | /*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.search;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.TableDto;
import org.joda.time.Instant;
import javax.annotation.Nullable;
import java.util.List;
/**
* Utility class for index, update, delete metacat doc from elastic search.
*/
public interface ElasticSearchUtil {
/**
* Delete the records for the given type.
*
* @param metacatRequestContext context
* @param type doc type
* @param softDelete if true, marks the doc for deletion
*/
void delete(MetacatRequestContext metacatRequestContext, String type,
boolean softDelete);
/**
* Delete index documents.
*
* @param type index type
* @param ids entity ids
*/
void delete(String type, List<String> ids);
/**
* Delete index document.
*
* @param type index type
* @param id entity id
*/
void delete(String type, String id);
/**
* Gets the document for the given type and id.
*
* @param type doc type
* @param id doc id
* @return doc
*/
ElasticSearchDoc get(String type, String id);
/**
* Gets the document for the given type and id.
*
* @param type doc type
* @param id doc id
* @param index the es index
* @return doc
*/
ElasticSearchDoc get(String type, String id, String index);
/**
* List of names.
*
* @param type type
* @param qualifiedName name
* @return list of names
*/
List<String> getIdsByQualifiedName(String type, QualifiedName qualifiedName);
/**
* Search the names by names and by the given marker.
*
* @param type type
* @param qualifiedNames names
* @param marker marker
* @param excludeQualifiedNames exclude names
* @param valueType dto type
* @param <T> dto type
* @return dto
*/
<T> List<T> getQualifiedNamesByMarkerByNames(String type, List<QualifiedName> qualifiedNames,
Instant marker,
List<QualifiedName> excludeQualifiedNames,
Class<T> valueType);
/**
* List table names.
*
* @param type doc type
* @param qualifiedNames names
* @param excludeQualifiedNames exclude names
* @return list of table names
*/
List<String> getTableIdsByCatalogs(String type, List<QualifiedName> qualifiedNames,
List<QualifiedName> excludeQualifiedNames);
/**
* List table names by uri.
*
* @param type doc type
* @param dataUri uri
* @return list of table names
*/
List<String> getTableIdsByUri(String type, String dataUri);
/**
* Wrapper for logging the message in elastic search esIndex.
*
* @param method method
* @param type type
* @param name name
* @param data data
* @param logMessage message
* @param ex exception
* @param error is an error
*/
void log(String method, String type, String name, @Nullable String data,
String logMessage, @Nullable Exception ex, boolean error);
/**
* Elastic search index refresh.
*/
void refresh();
/**
* Bulk save of the entities.
*
* @param type index type
* @param docs metacat documents
*/
void save(String type, List<ElasticSearchDoc> docs);
/**
* Save of a single entity.
*
* @param type index type
* @param id id of the entity
* @param doc metacat documents
*/
void save(String type, String id, ElasticSearchDoc doc);
/**
* Full text search.
*
* @param searchString search text
* @return list of table info
*/
List<TableDto> simpleSearch(String searchString);
/**
* Marks the documents as deleted.
*
* @param type index type
* @param ids list of entity ids
* @param metacatRequestContext context containing the user name
*/
void softDelete(String type, List<String> ids,
MetacatRequestContext metacatRequestContext);
/**
* Marks the document as deleted.
*
* @param type index type
* @param id entity id
* @param metacatRequestContext context containing the user name
*/
void softDelete(String type, String id, MetacatRequestContext metacatRequestContext);
/**
* Creates JSON from elasticSearchdoc object.
*
* @param elasticSearchDoc elastic search doc.
* @return Json String
*/
String toJsonString(ElasticSearchDoc elasticSearchDoc);
/**
* Updates the documents with partial updates with the given fields.
*
* @param type index type
* @param ids list of entity ids
* @param node Object node to update the doc
*/
void updates(String type, List<String> ids, ObjectNode node);
}
| 115 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchUtilImpl.java | /*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.search;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.github.rholder.retry.RetryException;
import com.github.rholder.retry.Retryer;
import com.github.rholder.retry.RetryerBuilder;
import com.github.rholder.retry.StopStrategies;
import com.github.rholder.retry.WaitStrategies;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.json.MetacatJson;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.client.transport.NoNodeAvailableException;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.CancellableThreads;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.node.NodeClosedException;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.transport.ReceiveTimeoutTransportException;
import org.elasticsearch.transport.TransportException;
import org.joda.time.Instant;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
/**
* Utility class for index, update, delete metacat doc from elastic search.
*/
@Slf4j
public class ElasticSearchUtilImpl implements ElasticSearchUtil {
private static final Retryer<Void> RETRY_ES_PUBLISH = RetryerBuilder.<Void>newBuilder()
.retryIfExceptionOfType(FailedNodeException.class)
.retryIfExceptionOfType(NodeClosedException.class)
.retryIfExceptionOfType(NoNodeAvailableException.class)
.retryIfExceptionOfType(ReceiveTimeoutTransportException.class)
.retryIfExceptionOfType(TransportException.class)
.retryIfExceptionOfType(ElasticsearchTimeoutException.class)
.retryIfExceptionOfType(EsRejectedExecutionException.class)
.retryIfExceptionOfType(CancellableThreads.ExecutionCancelledException.class)
.withWaitStrategy(WaitStrategies.incrementingWait(10, TimeUnit.MILLISECONDS, 30, TimeUnit.MILLISECONDS))
.withStopStrategy(StopStrategies.stopAfterAttempt(3))
.build();
private static final int NO_OF_CONFLICT_RETRIES = 3;
private final Client client;
private final String esIndex;
private final Config config;
private final MetacatJson metacatJson;
private XContentType contentType = Requests.INDEX_CONTENT_TYPE;
private final Registry registry;
private final TimeValue esCallTimeout;
private final TimeValue esBulkCallTimeout;
/**
* Constructor.
*
* @param client elastic search client
* @param config config
* @param metacatJson json utility
* @param registry spectator registry
*/
public ElasticSearchUtilImpl(
@Nullable final Client client,
final Config config,
final MetacatJson metacatJson,
final Registry registry) {
this.config = config;
this.client = client;
this.metacatJson = metacatJson;
this.esIndex = config.getEsIndex();
this.registry = registry;
this.esCallTimeout = TimeValue.timeValueSeconds(config.getElasticSearchCallTimeout());
this.esBulkCallTimeout = TimeValue.timeValueSeconds(config.getElasticSearchBulkCallTimeout());
}
/**
* {@inheritDoc}
*/
@Override
public void delete(final String type, final String id) {
try {
RETRY_ES_PUBLISH.call(() -> {
client.prepareDelete(esIndex, type, id).execute().actionGet(esCallTimeout);
return null;
});
} catch (Exception e) {
handleException("ElasticSearchUtil.delete", type, id, e,
Metrics.CounterElasticSearchDelete.getMetricName());
}
}
private void handleException(final String request,
final String type,
final String id,
final Exception exception,
final String metricName) {
log.error("Failed {} metadata of type {} with id {}. {}", request, type, id, exception);
String exceptionName = exception.getClass().getSimpleName();
if (exception instanceof RetryException) {
final Throwable error = ((RetryException) exception).getLastFailedAttempt().getExceptionCause();
if (error != null) {
exceptionName = error.getClass().getSimpleName();
}
}
final Map<String, String> tags = ImmutableMap
.<String, String>builder().put("status", "failure").put("name", id).put("exception", exceptionName).build();
registry.counter(registry.createId(metricName).withTags(tags)).increment();
log(request, type, id, null, exception.getMessage(), exception, true);
}
private void handleException(final String request,
final String type,
final List<String> ids,
final Exception exception,
final String metricName) {
log.error("Failed {} metadata of type {} with ids {}. {}", request, type, ids, exception);
String exceptionName = exception.getClass().getSimpleName();
if (exception instanceof RetryException) {
final Throwable error = ((RetryException) exception).getLastFailedAttempt().getExceptionCause();
if (error != null) {
exceptionName = error.getClass().getSimpleName();
}
}
final Map<String, String> tags = ImmutableMap
.<String, String>builder().put("status", "failure").put("exception", exceptionName).build();
registry.counter(registry.createId(metricName).withTags(tags)).increment();
log(request, type, ids.toString(), null, exception.getMessage(), exception, true);
}
/**
* {@inheritDoc}
*/
@Override
public void delete(final String type, final List<String> ids) {
if (ids != null && !ids.isEmpty()) {
final List<List<String>> partitionedIds = Lists.partition(ids, 10000);
partitionedIds.forEach(subIds -> hardDeleteDoc(type, subIds));
}
}
/**
* {@inheritDoc}
*/
@Override
public void softDelete(final String type, final String id, final MetacatRequestContext metacatRequestContext) {
try {
RETRY_ES_PUBLISH.call(() -> {
final XContentBuilder builder = XContentFactory.contentBuilder(contentType);
builder.startObject().field(ElasticSearchDoc.Field.DELETED, true)
.field(ElasticSearchDoc.Field.TIMESTAMP, java.time.Instant.now().toEpochMilli())
.field(ElasticSearchDoc.Field.USER,
metacatRequestContext.getUserName()).endObject();
client.prepareUpdate(esIndex, type, id)
.setRetryOnConflict(NO_OF_CONFLICT_RETRIES).setDoc(builder).get(esCallTimeout);
ensureMigrationByCopy(type, Collections.singletonList(id));
return null;
});
} catch (Exception e) {
handleException("ElasticSearchUtil.softDelete", type, id, e,
Metrics.CounterElasticSearchDelete.getMetricName());
}
}
/**
* {@inheritDoc}
*/
@Override
public void softDelete(final String type, final List<String> ids,
final MetacatRequestContext metacatRequestContext) {
if (ids != null && !ids.isEmpty()) {
final List<List<String>> partitionedIds = Lists.partition(ids, 100);
partitionedIds.forEach(subIds -> softDeleteDoc(type, subIds, metacatRequestContext));
partitionedIds.forEach(subIds -> ensureMigrationByCopy(type, subIds));
}
}
/**
* {@inheritDoc}
*/
@Override
public void updates(final String type, final List<String> ids, final ObjectNode node) {
if (ids != null && !ids.isEmpty()) {
final List<List<String>> partitionedIds = Lists.partition(ids, 100);
partitionedIds.forEach(subIds -> updateDocs(type, subIds, node));
partitionedIds.forEach(subIds -> ensureMigrationByCopy(type, subIds));
}
}
private void updateDocs(final String type, final List<String> ids, final ObjectNode node) {
try {
RETRY_ES_PUBLISH.call(() -> {
final BulkRequestBuilder bulkRequest = client.prepareBulk();
ids.forEach(id -> {
bulkRequest.add(client.prepareUpdate(esIndex, type, id)
.setRetryOnConflict(NO_OF_CONFLICT_RETRIES)
.setDoc(metacatJson.toJsonAsBytes(node), XContentType.JSON));
});
final BulkResponse bulkResponse = bulkRequest.execute().actionGet(esBulkCallTimeout);
if (bulkResponse.hasFailures()) {
for (BulkItemResponse item : bulkResponse.getItems()) {
if (item.isFailed()) {
handleException("ElasticSearchUtil.updateDocs.item", type, item.getId(),
item.getFailure().getCause(), Metrics.CounterElasticSearchUpdate.getMetricName());
}
}
}
return null;
});
} catch (Exception e) {
handleException("ElasticSearchUtil.updatDocs", type, ids, e,
Metrics.CounterElasticSearchBulkUpdate.getMetricName());
}
}
/**
* {@inheritDoc}
*/
@Override
public void save(final String type, final String id, final ElasticSearchDoc doc) {
saveToIndex(type, id, doc, esIndex);
ensureMigrationByCopy(type, Collections.singletonList(id));
}
/**
* {@inheritDoc}
*/
@Override
public void save(final String type, final List<ElasticSearchDoc> docs) {
if (docs != null && !docs.isEmpty()) {
final List<List<ElasticSearchDoc>> partitionedDocs = Lists.partition(docs, 100);
partitionedDocs.forEach(subDocs -> bulkSaveToIndex(type, subDocs, esIndex));
partitionedDocs.forEach(subDocs -> ensureMigrationBySave(type, subDocs));
}
}
/**
* {@inheritDoc}
*/
@Override
public String toJsonString(final ElasticSearchDoc elasticSearchDoc) {
final String result = metacatJson.toJsonString(toJsonObject(elasticSearchDoc));
return result.replace("{}", "null");
}
private ObjectNode toJsonObject(final ElasticSearchDoc elasticSearchDoc) {
final ObjectNode oMetadata = metacatJson.toJsonObject(elasticSearchDoc.getDto());
//add the searchable definition metadata
elasticSearchDoc.addSearchableDefinitionMetadata(oMetadata);
//Adding the timestamp explicitly
oMetadata.put(ElasticSearchDoc.Field.TIMESTAMP, elasticSearchDoc.getTimestamp());
//True if this entity has been deleted
oMetadata.put(ElasticSearchDoc.Field.DELETED, elasticSearchDoc.isDeleted());
//True if this entity has been deleted
oMetadata.put(ElasticSearchDoc.Field.USER, elasticSearchDoc.getUser());
if (elasticSearchDoc.getRefreshMarker() != null) {
oMetadata.put(ElasticSearchDoc.Field.REFRESH_MARKER, elasticSearchDoc.getRefreshMarker());
}
return oMetadata;
}
/**
* {@inheritDoc}
*/
@Override
public List<String> getTableIdsByUri(final String type, final String dataUri) {
List<String> ids = Lists.newArrayList();
// Run the query and get the response.
if (dataUri != null) {
final SearchRequestBuilder request = client.prepareSearch(esIndex)
.setTypes(type)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(QueryBuilders.termQuery("serde.uri", dataUri))
.setSize(Integer.MAX_VALUE)
.setFetchSource(false);
final SearchResponse response = request.execute().actionGet(esCallTimeout);
if (response.getHits().getHits().length != 0) {
ids = getIds(response);
}
}
return ids;
}
/**
* {@inheritDoc}
*/
@Override
public List<String> getTableIdsByCatalogs(final String type, final List<QualifiedName> qualifiedNames,
final List<QualifiedName> excludeQualifiedNames) {
List<String> ids = Lists.newArrayList();
final QueryBuilder queryBuilder = QueryBuilders.boolQuery()
.must(QueryBuilders.termsQuery("name.qualifiedName.tree", qualifiedNames))
.must(QueryBuilders.termQuery("deleted_", false))
.mustNot(QueryBuilders.termsQuery("name.qualifiedName.tree", excludeQualifiedNames));
// Run the query and get the response.
final SearchRequestBuilder request = client.prepareSearch(esIndex)
.setTypes(type)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(queryBuilder)
.setSize(Integer.MAX_VALUE) // TODO May break if too many tables returned back, change to Scroll
.setFetchSource(false);
final SearchResponse response = request.execute().actionGet(esCallTimeout);
if (response.getHits().getHits().length != 0) {
ids = getIds(response);
}
return ids;
}
/**
* {@inheritDoc}
*/
@Override
public List<String> getIdsByQualifiedName(final String type, final QualifiedName qualifiedName) {
List<String> result = Lists.newArrayList();
// Run the query and get the response.
final QueryBuilder queryBuilder = QueryBuilders.boolQuery()
.must(QueryBuilders.termQuery("name.qualifiedName.tree", qualifiedName))
.must(QueryBuilders.termQuery("deleted_", false));
final SearchRequestBuilder request = client.prepareSearch(esIndex)
.setTypes(type)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(queryBuilder)
.setSize(Integer.MAX_VALUE)
.setFetchSource(false);
final SearchResponse response = request.execute().actionGet(esCallTimeout);
if (response.getHits().getHits().length != 0) {
result = getIds(response);
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public <T> List<T> getQualifiedNamesByMarkerByNames(final String type,
final List<QualifiedName> qualifiedNames,
final Instant marker,
final List<QualifiedName> excludeQualifiedNames,
final Class<T> valueType) {
final List<T> result = Lists.newArrayList();
final List<String> names = qualifiedNames.stream().map(QualifiedName::toString).collect(Collectors.toList());
final List<String> excludeNames = excludeQualifiedNames.stream().map(QualifiedName::toString)
.collect(Collectors.toList());
//
// Run the query and get the response.
final QueryBuilder queryBuilder = QueryBuilders.boolQuery()
.must(QueryBuilders.termsQuery("name.qualifiedName.tree", names))
.must(QueryBuilders.termQuery("deleted_", false))
.must(QueryBuilders.rangeQuery(ElasticSearchDoc.Field.TIMESTAMP).lte(marker.getMillis()))
.mustNot(QueryBuilders.termsQuery("name.qualifiedName.tree", excludeNames))
.mustNot(QueryBuilders.termQuery("refreshMarker_", marker.toString()));
final SearchRequestBuilder request = client.prepareSearch(esIndex)
.setTypes(type)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(queryBuilder)
.setSize(Integer.MAX_VALUE);
final SearchResponse response = request.execute().actionGet(esCallTimeout);
if (response.getHits().getHits().length != 0) {
result.addAll(parseResponse(response, valueType));
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public void refresh() {
client.admin().indices().refresh(new RefreshRequest(esIndex)).actionGet();
}
/**
* {@inheritDoc}
*/
@Override
public ElasticSearchDoc get(final String type, final String id) {
return get(type, id, esIndex);
}
/**
* {@inheritDoc}
*/
@Override
public ElasticSearchDoc get(final String type, final String id, final String index) {
ElasticSearchDoc result = null;
final GetResponse response = client.prepareGet(index, type, id).execute().actionGet(esCallTimeout);
if (response.isExists()) {
result = parse(response);
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public void delete(final MetacatRequestContext metacatRequestContext, final String type,
final boolean softDelete) {
SearchResponse response = client.prepareSearch(esIndex)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setScroll(new TimeValue(config.getElasticSearchScrollTimeout()))
.setSize(config.getElasticSearchScrollFetchSize())
.setQuery(QueryBuilders.termQuery("_type", type))
.setFetchSource(false)
.execute()
.actionGet(esCallTimeout);
while (true) {
response = client.prepareSearchScroll(response.getScrollId())
.setScroll(new TimeValue(config.getElasticSearchScrollTimeout())).execute().actionGet(esCallTimeout);
//Break condition: No hits are returned
if (response.getHits().getHits().length == 0) {
break;
}
final List<String> ids = getIds(response);
if (softDelete) {
softDelete(type, ids, metacatRequestContext);
} else {
delete(type, ids);
}
}
}
/**
* {@inheritDoc}
*/
@Override
public void log(final String method, final String type, final String name, @Nullable final String data,
final String logMessage, @Nullable final Exception ex, final boolean error) {
log(method, type, name, data, logMessage, ex, error, esIndex);
}
/**
* Log the message in elastic search.
*
* @param method method
* @param type type
* @param name name
* @param data data
* @param logMessage message
* @param ex exception
* @param error is an error
* @param index es index
*/
private void log(final String method, final String type, final String name, @Nullable final String data,
final String logMessage, @Nullable final Exception ex, final boolean error, final String index) {
if (config.isElasticSearchPublishMetacatLogEnabled()) {
try {
final Map<String, Object> source = Maps.newHashMap();
source.put("method", method);
source.put("qname", name);
source.put("type", type);
source.put("data", data);
source.put("error", error);
source.put("message", logMessage);
source.put("details", Throwables.getStackTraceAsString(ex));
client.prepareIndex(index, "metacat-log").setSource(source).execute().actionGet(esCallTimeout);
} catch (Exception e) {
registry.counter(registry.createId(Metrics.CounterElasticSearchLog.getMetricName())
.withTags(Metrics.tagStatusFailureMap)).increment();
log.warn("Failed saving the log message in elastic search for index{} method {}, name {}. Message: {}",
index, method, name, e.getMessage());
}
}
}
/**
* {@inheritDoc}
*/
@Override
public List<TableDto> simpleSearch(final String searchString) {
final List<TableDto> result = Lists.newArrayList();
final SearchResponse response = client.prepareSearch(esIndex)
.setTypes(ElasticSearchDoc.Type.table.name())
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(QueryBuilders.termQuery("_all", searchString))
.setSize(Integer.MAX_VALUE)
.execute()
.actionGet(esCallTimeout);
if (response.getHits().getHits().length != 0) {
result.addAll(parseResponse(response, TableDto.class));
}
return result;
}
/**
* Permanently delete index documents.
*
* @param type index type
* @param ids entity ids
*/
private void hardDeleteDoc(final String type, final List<String> ids) {
try {
RETRY_ES_PUBLISH.call(() -> {
final BulkRequestBuilder bulkRequest = client.prepareBulk();
ids.forEach(id -> bulkRequest.add(client.prepareDelete(esIndex, type, id)));
final BulkResponse bulkResponse = bulkRequest.execute().actionGet(esBulkCallTimeout);
log.info("Deleting metadata of type {} with count {}", type, ids.size());
if (bulkResponse.hasFailures()) {
for (BulkItemResponse item : bulkResponse.getItems()) {
if (item.isFailed()) {
handleException("ElasticSearchUtil.bulkDelete.item", type, item.getId(),
item.getFailure().getCause(), Metrics.CounterElasticSearchDelete.getMetricName());
}
}
}
return null;
});
} catch (Exception e) {
handleException("ElasticSearchUtil.bulkDelete", type, ids, e,
Metrics.CounterElasticSearchBulkDelete.getMetricName());
}
}
/**
* Get class from elastic search doc type.
*
* @param type type in string
* @return object class
*/
private Class getClass(final String type) {
return ElasticSearchDoc.Type.valueOf(type).getClazz();
}
private ElasticSearchDoc parse(final GetResponse response) {
ElasticSearchDoc result = null;
if (response.isExists()) {
final Map<String, Object> responseMap = response.getSourceAsMap();
final String user = (String) responseMap.get(ElasticSearchDoc.Field.USER);
final boolean deleted = (boolean) responseMap.get(ElasticSearchDoc.Field.DELETED);
final long timestamp = (long) responseMap.get(ElasticSearchDoc.Field.TIMESTAMP);
@SuppressWarnings("unchecked") final Object dto = metacatJson.parseJsonValue(
response.getSourceAsBytes(),
getClass(response.getType())
);
result = new ElasticSearchDoc(response.getId(), dto, user, deleted, timestamp);
}
return result;
}
/*
* Read the documents from source index then copy to merge index
* @param type index type
* @param ids list of doc ids
*/
private void copyDocToMergeIndex(final String type, final List<String> ids) {
final List<ElasticSearchDoc> docs = new ArrayList<>();
ids.forEach(id -> {
final ElasticSearchDoc doc = get(type, id);
if (doc != null) {
docs.add(doc);
}
});
bulkSaveToIndex(type, docs, config.getMergeEsIndex());
}
/*
* Check if in migration mode, copy to merge index
* @param type index type
* @param ids list of doc ids
*/
private void ensureMigrationByCopy(final String type, final List<String> ids) {
if (!Strings.isNullOrEmpty(config.getMergeEsIndex())) {
copyDocToMergeIndex(type, ids);
}
}
/*
* Check if in migration mode, copy to merge index
* @param type index type
* @param ids list of doc ids
*/
private void ensureMigrationBySave(final String type, final List<ElasticSearchDoc> docs) {
if (!Strings.isNullOrEmpty(config.getMergeEsIndex())) {
log.info("Bulk save to mergeEsIndex = {}", config.getMergeEsIndex());
bulkSaveToIndex(type, docs, config.getMergeEsIndex());
}
}
/* Use elasticSearch bulk API to mark the documents as deleted
* @param type index type
* @param ids list of entity ids
* @param metacatRequestContext context containing the user name
*/
private void softDeleteDoc(
final String type,
final List<String> ids,
final MetacatRequestContext metacatRequestContext) {
try {
RETRY_ES_PUBLISH.call(() -> {
final BulkRequestBuilder bulkRequest = client.prepareBulk();
final XContentBuilder builder = XContentFactory.contentBuilder(contentType);
builder.startObject().field(ElasticSearchDoc.Field.DELETED, true)
.field(ElasticSearchDoc.Field.TIMESTAMP, java.time.Instant.now().toEpochMilli())
.field(ElasticSearchDoc.Field.USER, metacatRequestContext.getUserName()).endObject();
ids.forEach(id -> bulkRequest.add(client.prepareUpdate(esIndex, type, id)
.setRetryOnConflict(NO_OF_CONFLICT_RETRIES).setDoc(builder)));
final BulkResponse bulkResponse = bulkRequest.execute().actionGet(esBulkCallTimeout);
if (bulkResponse.hasFailures()) {
for (BulkItemResponse item : bulkResponse.getItems()) {
if (item.isFailed()) {
handleException("ElasticSearchUtil.bulkSoftDelete.item", type, item.getId(),
item.getFailure().getCause(), Metrics.CounterElasticSearchDelete.getMetricName());
}
}
}
return null;
});
} catch (Exception e) {
handleException("ElasticSearchUtil.bulkSoftDelete", type, ids, e,
Metrics.CounterElasticSearchBulkDelete.getMetricName());
}
}
/**
* Save of a single entity to an index.
*
* @param type index type
* @param id id of the entity
* @param doc source string of the entity
* @param index the index name
*/
private void saveToIndex(final String type, final String id, final ElasticSearchDoc doc, final String index) {
try {
RETRY_ES_PUBLISH.call(() -> {
final IndexRequestBuilder indexRequestBuilder = prepareIndexRequest(index, type, doc);
if (indexRequestBuilder != null) {
indexRequestBuilder.execute().actionGet(esCallTimeout);
}
return null;
});
} catch (Exception e) {
handleException("ElasticSearchUtil.saveToIndex", type, id, e,
Metrics.CounterElasticSearchSave.getMetricName());
}
}
private static List<String> getIds(final SearchResponse response) {
final List<String> ret = Lists.newArrayList();
for (SearchHit hit : response.getHits().getHits()) {
ret.add(hit.getId());
}
return ret;
}
private <T> List<T> parseResponse(final SearchResponse response, final Class<T> valueType) {
final List<T> ret = Lists.newArrayList();
for (SearchHit hit : response.getHits().getHits()) {
try {
ret.add(metacatJson.parseJsonValue(hit.getSourceAsString(), valueType));
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
return ret;
}
/**
* Bulk save of the entities.
*
* @param type index type
* @param docs metacat documents
*/
private void bulkSaveToIndex(final String type, final List<ElasticSearchDoc> docs, final String index) {
if (docs != null && !docs.isEmpty()) {
try {
RETRY_ES_PUBLISH.call(() -> {
final BulkRequestBuilder bulkRequest = client.prepareBulk();
for (ElasticSearchDoc doc : docs) {
final IndexRequestBuilder indexRequestBuilder = prepareIndexRequest(index, type, doc);
if (indexRequestBuilder != null) {
bulkRequest.add(indexRequestBuilder);
}
}
if (bulkRequest.numberOfActions() > 0) {
final BulkResponse bulkResponse = bulkRequest.execute().actionGet(esBulkCallTimeout);
log.info("Bulk saving metadata of index {} type {} with size {}.",
index, type, docs.size());
if (bulkResponse.hasFailures()) {
for (BulkItemResponse item : bulkResponse.getItems()) {
if (item.isFailed()) {
handleException("ElasticSearchUtil.bulkSaveToIndex.index", type, item.getId(),
item.getFailure().getCause(), Metrics.CounterElasticSearchSave.getMetricName());
}
}
}
}
return null;
});
} catch (Exception e) {
final List<String> docIds = docs.stream().map(ElasticSearchDoc::getId).collect(Collectors.toList());
handleException("ElasticSearchUtil.bulkSaveToIndex", type, docIds, e,
Metrics.CounterElasticSearchBulkSave.getMetricName());
}
}
}
IndexRequestBuilder prepareIndexRequest(final String index,
final String type,
final ElasticSearchDoc doc) {
return client.prepareIndex(index, type, doc.getId()).setSource(toJsonString(doc), XContentType.JSON);
}
}
| 116 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchCatalogTraversalAction.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.search;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.usermetadata.TagService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.main.services.CatalogTraversal;
import com.netflix.metacat.main.services.CatalogTraversalAction;
import com.netflix.metacat.main.services.DatabaseService;
import com.netflix.metacat.main.services.TableService;
import com.netflix.spectator.api.Registry;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nonnull;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
/**
* This class does a refresh of all the metadata entities from original data sources to elastic search.
*
* @author amajumdar
*/
@Slf4j
public class ElasticSearchCatalogTraversalAction implements CatalogTraversalAction {
private final Config config;
private final DatabaseService databaseService;
private final TableService tableService;
private final ElasticSearchUtil elasticSearchUtil;
private final UserMetadataService userMetadataService;
private final TagService tagService;
private final MetacatEventBus eventBus;
private final Registry registry;
/**
* Constructor.
*
* @param config System config
* @param eventBus Event bus
* @param databaseService Database service
* @param tableService Table service
* @param userMetadataService User metadata service
* @param tagService Tag service
* @param registry registry of spectator
* @param elasticSearchUtil ElasticSearch client wrapper
*/
public ElasticSearchCatalogTraversalAction(
@Nonnull @NonNull final Config config,
@Nonnull @NonNull final MetacatEventBus eventBus,
@Nonnull @NonNull final DatabaseService databaseService,
@Nonnull @NonNull final TableService tableService,
@Nonnull @NonNull final UserMetadataService userMetadataService,
@Nonnull @NonNull final TagService tagService,
@Nonnull @NonNull final ElasticSearchUtil elasticSearchUtil,
@Nonnull @NonNull final Registry registry
) {
this.config = config;
this.eventBus = eventBus;
this.databaseService = databaseService;
this.tableService = tableService;
this.userMetadataService = userMetadataService;
this.tagService = tagService;
this.elasticSearchUtil = elasticSearchUtil;
this.registry = registry;
}
@Override
public void done(final CatalogTraversal.Context context) {
deleteUnmarkedEntities(context);
}
private void deleteUnmarkedEntities(final CatalogTraversal.Context context) {
log.info("Start: Delete unmarked entities");
//
// get unmarked qualified names
// check if it not exists
// delete
//
elasticSearchUtil.refresh();
final MetacatRequestContext requestContext = MetacatRequestContext.builder().userName("admin").
clientAppName("metacat-refresh")
.apiUri("esRefresh")
.scheme("internal").build();
final List<DatabaseDto> unmarkedDatabaseDtos = elasticSearchUtil
.getQualifiedNamesByMarkerByNames("database", context.getQNames(), context.getStartInstant(),
context.getExcludeQNames(),
DatabaseDto.class);
if (!unmarkedDatabaseDtos.isEmpty()) {
if (unmarkedDatabaseDtos.size() <= config.getElasticSearchThresholdUnmarkedDatabasesDelete()) {
log.info("Traversal Done: Start: Delete unmarked databases({})", unmarkedDatabaseDtos.size());
final List<String> unmarkedDatabaseNames = Lists.newArrayList();
final List<DatabaseDto> deleteDatabaseDtos = unmarkedDatabaseDtos.stream().filter(databaseDto -> {
boolean result = false;
try {
unmarkedDatabaseNames.add(databaseDto.getName().toString());
result = !databaseService.exists(databaseDto.getName());
} catch (Exception e) {
log.warn("Ignoring exception during deleteUnmarkedEntities for {}. Message: {}",
databaseDto.getName(), e.getMessage());
}
return result;
}).collect(Collectors.toList());
log.info("Unmarked databases({}): {}", unmarkedDatabaseNames.size(), unmarkedDatabaseNames);
log.info("Deleting databases({})", deleteDatabaseDtos.size());
if (!deleteDatabaseDtos.isEmpty()) {
final List<QualifiedName> deleteDatabaseQualifiedNames = deleteDatabaseDtos.stream()
.map(DatabaseDto::getName)
.collect(Collectors.toList());
final List<String> deleteDatabaseNames = deleteDatabaseQualifiedNames.stream().map(
QualifiedName::toString).collect(Collectors.toList());
log.info("Deleting databases({}): {}", deleteDatabaseNames.size(), deleteDatabaseNames);
userMetadataService.deleteDefinitionMetadata(deleteDatabaseQualifiedNames);
elasticSearchUtil.softDelete("database", deleteDatabaseNames, requestContext);
}
log.info("End: Delete unmarked databases({})", unmarkedDatabaseDtos.size());
} else {
log.info("Count of unmarked databases({}) is more than the threshold {}", unmarkedDatabaseDtos.size(),
config.getElasticSearchThresholdUnmarkedDatabasesDelete());
registry.counter(
registry.createId(Metrics.CounterElasticSearchUnmarkedDatabaseThreshholdReached.getMetricName()))
.increment();
}
}
final List<TableDto> unmarkedTableDtos = elasticSearchUtil
.getQualifiedNamesByMarkerByNames("table",
context.getQNames(), context.getStartInstant(), context.getExcludeQNames(), TableDto.class);
if (!unmarkedTableDtos.isEmpty()) {
if (unmarkedTableDtos.size() <= config.getElasticSearchThresholdUnmarkedTablesDelete()) {
log.info("Start: Delete unmarked tables({})", unmarkedTableDtos.size());
final List<String> unmarkedTableNames = Lists.newArrayList();
final List<TableDto> deleteTableDtos = unmarkedTableDtos.stream().filter(tableDto -> {
boolean result = false;
try {
unmarkedTableNames.add(tableDto.getName().toString());
result = !tableService.exists(tableDto.getName());
} catch (Exception e) {
log.warn("Ignoring exception during deleteUnmarkedEntities for {}. Message: {}",
tableDto.getName(), e.getMessage());
}
return result;
}).collect(Collectors.toList());
log.info("Unmarked tables({}): {}", unmarkedTableNames.size(), unmarkedTableNames);
log.info("Deleting tables({})", deleteTableDtos.size());
if (!deleteTableDtos.isEmpty()) {
final List<String> deleteTableNames = deleteTableDtos.stream().map(
dto -> dto.getName().toString()).collect(Collectors.toList());
log.info("Deleting tables({}): {}", deleteTableNames.size(), deleteTableNames);
userMetadataService.deleteMetadata("admin", Lists.newArrayList(deleteTableDtos));
// Publish event. Elasticsearch event handler will take care of updating the index already
// TODO: Re-evaluate events vs. direct calls for these types of situations like in Genie
deleteTableDtos.forEach(
tableDto -> {
tagService.delete(tableDto.getName(), false);
this.eventBus.post(
new MetacatDeleteTablePostEvent(tableDto.getName(), requestContext, this, tableDto)
);
}
);
}
log.info("Traversal Done: End: Delete unmarked tables({})", unmarkedTableDtos.size());
} else {
log.info("Count of unmarked tables({}) is more than the threshold {}", unmarkedTableDtos.size(),
config.getElasticSearchThresholdUnmarkedTablesDelete());
registry.counter(
registry.createId(Metrics.CounterElasticSearchUnmarkedTableThreshholdReached.getMetricName()))
.increment();
}
}
log.info("End: Delete unmarked entities");
}
/**
* Save all databases to index it in elastic search.
*
* @param context traversal context
* @param dtos database dtos
*/
@Override
public void applyDatabases(final CatalogTraversal.Context context, final List<DatabaseDto> dtos) {
final List<ElasticSearchDoc> docs = dtos.stream()
.filter(Objects::nonNull)
.map(dto -> new ElasticSearchDoc(dto.getName().toString(), dto, "admin", false, context.getRunId()))
.collect(Collectors.toList());
elasticSearchUtil.save(ElasticSearchDoc.Type.database.name(), docs);
}
/**
* Save all tables to index it in elastic search.
*
* @param context traversal context
* @param dtos table dtos
*/
@Override
public void applyTables(final CatalogTraversal.Context context, final List<Optional<TableDto>> dtos) {
final List<ElasticSearchDoc> docs = dtos.stream().filter(dto -> dto != null && dto.isPresent()).map(
tableDtoOptional -> {
final TableDto dto = tableDtoOptional.get();
final String userName = dto.getAudit() != null ? dto.getAudit().getCreatedBy() : "admin";
return new ElasticSearchDoc(dto.getName().toString(), dto, userName, false, context.getRunId());
}).collect(Collectors.toList());
elasticSearchUtil.save(ElasticSearchDoc.Type.table.name(), docs);
}
}
| 117 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchRefresh.java | /*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.search;
import com.google.common.base.Functions;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.CatalogDto;
import com.netflix.metacat.common.dto.CatalogMappingDto;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.GetPartitionsRequestDto;
import com.netflix.metacat.common.dto.HasMetadata;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.dto.SortOrder;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.usermetadata.TagService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.metacat.main.services.CatalogService;
import com.netflix.metacat.main.services.DatabaseService;
import com.netflix.metacat.main.services.GetDatabaseServiceParameters;
import com.netflix.metacat.main.services.GetTableServiceParameters;
import com.netflix.metacat.main.services.PartitionService;
import com.netflix.metacat.main.services.TableService;
import com.netflix.spectator.api.Registry;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.joda.time.Instant;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Collectors;
/**
* This class does a refresh of all the metadata entities from original data sources to elastic search.
*
* @author amajumdar
*/
@Slf4j
@Deprecated
public class ElasticSearchRefresh {
private static final Predicate<Object> NOT_NULL = Objects::nonNull;
private static AtomicBoolean isElasticSearchMetacatRefreshAlreadyRunning = new AtomicBoolean(false);
private final CatalogService catalogService;
private final Config config;
private final DatabaseService databaseService;
private final TableService tableService;
private final PartitionService partitionService;
private final ElasticSearchUtil elasticSearchUtil;
private final UserMetadataService userMetadataService;
private final TagService tagService;
private final MetacatEventBus eventBus;
private Instant refreshMarker;
private String refreshMarkerText;
private Registry registry;
// Fixed thread pool
private ListeningExecutorService service;
private ListeningExecutorService esService;
private ExecutorService defaultService;
/**
* Constructor.
*
* @param config System config
* @param eventBus Event bus
* @param catalogService Catalog service
* @param databaseService Database service
* @param tableService Table service
* @param partitionService Partition service
* @param userMetadataService User metadata service
* @param tagService Tag service
* @param registry registry of spectator
* @param elasticSearchUtil ElasticSearch client wrapper
*/
public ElasticSearchRefresh(
@Nonnull @NonNull final Config config,
@Nonnull @NonNull final MetacatEventBus eventBus,
@Nonnull @NonNull final CatalogService catalogService,
@Nonnull @NonNull final DatabaseService databaseService,
@Nonnull @NonNull final TableService tableService,
@Nonnull @NonNull final PartitionService partitionService,
@Nonnull @NonNull final UserMetadataService userMetadataService,
@Nonnull @NonNull final TagService tagService,
@Nonnull @NonNull final ElasticSearchUtil elasticSearchUtil,
@Nonnull @NonNull final Registry registry
) {
this.config = config;
this.eventBus = eventBus;
this.catalogService = catalogService;
this.databaseService = databaseService;
this.tableService = tableService;
this.partitionService = partitionService;
this.userMetadataService = userMetadataService;
this.tagService = tagService;
this.elasticSearchUtil = elasticSearchUtil;
this.registry = registry;
}
private static ExecutorService newFixedThreadPool(
final int nThreads,
final String threadFactoryName,
final int queueSize
) {
return new ThreadPoolExecutor(nThreads, nThreads,
0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<>(queueSize),
new ThreadFactoryBuilder()
.setNameFormat(threadFactoryName)
.build(),
(r, executor) -> {
// this will block if the queue is full
try {
executor.getQueue().put(r);
} catch (InterruptedException e) {
throw Throwables.propagate(e);
}
});
}
/**
* Does a sweep across all catalogs to refresh the same data in elastic search.
*/
public void process() {
final List<String> catalogNames = getCatalogNamesToRefresh();
final List<QualifiedName> qNames = catalogNames.stream()
.map(QualifiedName::ofCatalog).collect(Collectors.toList());
_process(qNames, () -> _processCatalogs(catalogNames), "process", true, 1000);
}
/**
* Does a sweep across given catalogs to refresh the same data in elastic search.
*
* @param catalogNames catalog anmes
*/
public void processCatalogs(final List<String> catalogNames) {
final List<QualifiedName> qNames = catalogNames.stream()
.map(QualifiedName::ofCatalog).collect(Collectors.toList());
_process(qNames, () -> _processCatalogs(catalogNames), "processCatalogs", true, 1000);
}
/**
* Does a sweep across given catalog and databases to refresh the same data in elastic search.
*
* @param catalogName catalog
* @param databaseNames database names
*/
public void processDatabases(final String catalogName, final List<String> databaseNames) {
final List<QualifiedName> qNames = databaseNames.stream()
.map(s -> QualifiedName.ofDatabase(catalogName, s)).collect(Collectors.toList());
_process(qNames, () -> _processDatabases(QualifiedName.ofCatalog(catalogName), qNames), "processDatabases",
true, 1000);
}
/**
* Does a sweep across all catalogs to refresh the same data in elastic search.
*
* @param names qualified names
*/
public void processPartitions(final List<QualifiedName> names) {
List<QualifiedName> qNames = names;
if (qNames == null || qNames.isEmpty()) {
final List<String> catalogNames = Splitter.on(',').omitEmptyStrings().trimResults()
.splitToList(config.getElasticSearchRefreshPartitionsIncludeCatalogs());
qNames = catalogNames.stream()
.map(QualifiedName::ofCatalog).collect(Collectors.toList());
}
final List<QualifiedName> qualifiedNames = qNames;
_process(qualifiedNames, () -> _processPartitions(qualifiedNames), "processPartitions", false, 500);
}
@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processPartitions(final List<QualifiedName> qNames) {
final List<QualifiedName> excludeQualifiedNames = config.getElasticSearchRefreshExcludeQualifiedNames();
final List<String> tables =
elasticSearchUtil.getTableIdsByCatalogs(ElasticSearchDoc.Type.table.name(),
qNames, excludeQualifiedNames);
final List<ListenableFuture<ListenableFuture<Void>>> futures = tables.stream().map(s -> service.submit(() -> {
final QualifiedName tableName = QualifiedName.fromString(s, false);
final List<ListenableFuture<Void>> indexFutures = Lists.newArrayList();
int offset = 0;
int count;
final Sort sort;
if ("s3".equals(tableName.getCatalogName()) || "aegisthus".equals(tableName.getCatalogName())) {
sort = new Sort("id", SortOrder.ASC);
} else {
sort = new Sort("part_id", SortOrder.ASC);
}
final Pageable pageable = new Pageable(10000, offset);
do {
final List<PartitionDto> partitionDtos =
partitionService.list(tableName, sort, pageable, true, true,
new GetPartitionsRequestDto(null, null, true, true));
count = partitionDtos.size();
if (!partitionDtos.isEmpty()) {
final List<List<PartitionDto>> partitionedPartitionDtos = Lists.partition(partitionDtos, 1000);
partitionedPartitionDtos.forEach(
subPartitionsDtos -> indexFutures.add(indexPartitionDtos(tableName, subPartitionsDtos)));
offset = offset + count;
pageable.setOffset(offset);
}
} while (count == 10000);
return Futures.transform(Futures.successfulAsList(indexFutures),
Functions.constant((Void) null), defaultService);
})).collect(Collectors.toList());
final ListenableFuture<Void> processPartitionsFuture = Futures.transformAsync(Futures.successfulAsList(futures),
input -> {
final List<ListenableFuture<Void>> inputFuturesWithoutNulls = input.stream().filter(NOT_NULL)
.collect(Collectors.toList());
return Futures.transform(Futures.successfulAsList(inputFuturesWithoutNulls),
Functions.constant(null), defaultService);
}, defaultService);
return Futures.transformAsync(processPartitionsFuture, input -> {
elasticSearchUtil.refresh();
final List<ListenableFuture<Void>> cleanUpFutures = tables.stream()
.map(s -> service
.submit(() -> partitionsCleanUp(QualifiedName.fromString(s, false), excludeQualifiedNames)))
.collect(Collectors.toList());
return Futures.transform(Futures.successfulAsList(cleanUpFutures),
Functions.constant(null), defaultService);
}, defaultService);
}
private Void partitionsCleanUp(final QualifiedName tableName, final List<QualifiedName> excludeQualifiedNames) {
final List<PartitionDto> unmarkedPartitionDtos = elasticSearchUtil.getQualifiedNamesByMarkerByNames(
ElasticSearchDoc.Type.partition.name(),
Lists.newArrayList(tableName), refreshMarker, excludeQualifiedNames, PartitionDto.class);
if (!unmarkedPartitionDtos.isEmpty()) {
log.info("Start deleting unmarked partitions({}) for table {}",
unmarkedPartitionDtos.size(), tableName);
try {
final List<String> unmarkedPartitionNames = unmarkedPartitionDtos.stream()
.map(p -> p.getDefinitionName().getPartitionName()).collect(Collectors.toList());
final Set<String> existingUnmarkedPartitionNames = Sets.newHashSet(
partitionService.getPartitionKeys(tableName, null, null,
new GetPartitionsRequestDto(null, unmarkedPartitionNames, false, true)));
final List<String> partitionIds = unmarkedPartitionDtos.stream()
.filter(p -> !existingUnmarkedPartitionNames.contains(
p.getDefinitionName().getPartitionName()))
.map(p -> p.getDefinitionName().toString()).collect(Collectors.toList());
if (!partitionIds.isEmpty()) {
log.info("Deleting unused partitions({}) for table {}:{}",
partitionIds.size(), tableName, partitionIds);
elasticSearchUtil.delete(ElasticSearchDoc.Type.partition.name(), partitionIds);
final List<HasMetadata> deletePartitionDtos = unmarkedPartitionDtos.stream()
.filter(
p -> !existingUnmarkedPartitionNames.contains(
p.getDefinitionName().getPartitionName()))
.collect(Collectors.toList());
userMetadataService.deleteMetadata("admin", deletePartitionDtos);
}
} catch (Exception e) {
log.warn("Failed deleting the unmarked partitions for table {}", tableName);
}
log.info("End deleting unmarked partitions for table {}", tableName);
}
return null;
}
@SuppressWarnings("checkstyle:methodname")
private void _process(final List<QualifiedName> qNames, final Supplier<ListenableFuture<Void>> supplier,
final String requestName, final boolean delete, final int queueSize) {
if (isElasticSearchMetacatRefreshAlreadyRunning.compareAndSet(false, true)) {
final long start = registry.clock().wallTime();
try {
log.info("Start: Full refresh of metacat index in elastic search. Processing {} ...", qNames);
final MetacatRequestContext context = MetacatRequestContext.builder()
.userName("admin")
.clientAppName("elasticSearchRefresher")
.apiUri("esRefresh")
.scheme("internal")
.build();
MetacatContextManager.setContext(context);
refreshMarker = Instant.now();
refreshMarkerText = refreshMarker.toString();
service = MoreExecutors
.listeningDecorator(newFixedThreadPool(10, "elasticsearch-refresher-%d", queueSize));
esService = MoreExecutors
.listeningDecorator(newFixedThreadPool(5, "elasticsearch-refresher-es-%d", queueSize));
defaultService = Executors.newSingleThreadExecutor();
supplier.get().get(24, TimeUnit.HOURS);
log.info("End: Full refresh of metacat index in elastic search");
if (delete) {
deleteUnmarkedEntities(qNames, config.getElasticSearchRefreshExcludeQualifiedNames());
}
} catch (Exception e) {
log.error("Full refresh of metacat index failed", e);
registry.counter(registry.createId(Metrics.CounterElasticSearchRefresh.getMetricName())
.withTags(Metrics.tagStatusFailureMap)).increment();
} finally {
try {
shutdown(service);
shutdown(esService);
shutdown(defaultService);
} finally {
isElasticSearchMetacatRefreshAlreadyRunning.set(false);
final long duration = registry.clock().wallTime() - start;
this.registry.timer(Metrics.TimerElasticSearchRefresh.getMetricName()
+ "." + requestName).record(duration, TimeUnit.MILLISECONDS);
log.info("### Time taken to complete {} is {} ms", requestName, duration);
}
}
} else {
log.info("Full refresh of metacat index is already running.");
registry.counter(registry.createId(Metrics.CounterElasticSearchRefreshAlreadyRunning.getMetricName()))
.increment();
}
}
private void shutdown(@Nullable final ExecutorService executorService) {
if (executorService != null) {
executorService.shutdown();
try {
// Wait a while for existing tasks to terminate
if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) {
executorService.shutdownNow(); // Cancel currently executing tasks
// Wait a while for tasks to respond to being cancelled
if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) {
log.warn("Thread pool for metacat refresh did not terminate");
}
}
} catch (InterruptedException ie) {
// (Re-)Cancel if current thread also interrupted
executorService.shutdownNow();
// Preserve interrupt status
Thread.currentThread().interrupt();
}
}
}
private void deleteUnmarkedEntities(final List<QualifiedName> qNames,
final List<QualifiedName> excludeQualifiedNames) {
log.info("Start: Delete unmarked entities");
//
// get unmarked qualified names
// check if it not exists
// delete
//
elasticSearchUtil.refresh();
final MetacatRequestContext context = MetacatRequestContext.builder().userName("admin").
clientAppName("metacat-refresh")
.apiUri("esRefresh")
.scheme("internal").build();
final List<DatabaseDto> unmarkedDatabaseDtos = elasticSearchUtil
.getQualifiedNamesByMarkerByNames("database", qNames, refreshMarker, excludeQualifiedNames,
DatabaseDto.class);
if (!unmarkedDatabaseDtos.isEmpty()) {
if (unmarkedDatabaseDtos.size() <= config.getElasticSearchThresholdUnmarkedDatabasesDelete()) {
log.info("Start: Delete unmarked databases({})", unmarkedDatabaseDtos.size());
final List<String> unmarkedDatabaseNames = Lists.newArrayList();
final List<DatabaseDto> deleteDatabaseDtos = unmarkedDatabaseDtos.stream().filter(databaseDto -> {
boolean result = false;
try {
unmarkedDatabaseNames.add(databaseDto.getName().toString());
final DatabaseDto dto = databaseService.get(databaseDto.getName(),
GetDatabaseServiceParameters.builder()
.includeUserMetadata(false)
.includeTableNames(false)
.disableOnReadMetadataIntercetor(false)
.build());
if (dto == null) {
result = true;
}
} catch (DatabaseNotFoundException de) {
result = true;
} catch (Exception e) {
log.warn("Ignoring exception during deleteUnmarkedEntities for {}. Message: {}",
databaseDto.getName(), e.getMessage());
}
return result;
}).collect(Collectors.toList());
log.info("Unmarked databases({}): {}", unmarkedDatabaseNames.size(), unmarkedDatabaseNames);
log.info("Deleting databases({})", deleteDatabaseDtos.size());
if (!deleteDatabaseDtos.isEmpty()) {
final List<QualifiedName> deleteDatabaseQualifiedNames = deleteDatabaseDtos.stream()
.map(DatabaseDto::getName)
.collect(Collectors.toList());
final List<String> deleteDatabaseNames = deleteDatabaseQualifiedNames.stream().map(
QualifiedName::toString).collect(Collectors.toList());
log.info("Deleting databases({}): {}", deleteDatabaseNames.size(), deleteDatabaseNames);
userMetadataService.deleteDefinitionMetadata(deleteDatabaseQualifiedNames);
elasticSearchUtil.softDelete("database", deleteDatabaseNames, context);
}
log.info("End: Delete unmarked databases({})", unmarkedDatabaseDtos.size());
} else {
log.info("Count of unmarked databases({}) is more than the threshold {}", unmarkedDatabaseDtos.size(),
config.getElasticSearchThresholdUnmarkedDatabasesDelete());
registry.counter(
registry.createId(Metrics.CounterElasticSearchUnmarkedDatabaseThreshholdReached.getMetricName()))
.increment();
}
}
final List<TableDto> unmarkedTableDtos = elasticSearchUtil
.getQualifiedNamesByMarkerByNames("table",
qNames, refreshMarker, excludeQualifiedNames, TableDto.class);
if (!unmarkedTableDtos.isEmpty()) {
if (unmarkedTableDtos.size() <= config.getElasticSearchThresholdUnmarkedTablesDelete()) {
log.info("Start: Delete unmarked tables({})", unmarkedTableDtos.size());
final List<String> unmarkedTableNames = Lists.newArrayList();
final List<TableDto> deleteTableDtos = unmarkedTableDtos.stream().filter(tableDto -> {
boolean result = false;
try {
unmarkedTableNames.add(tableDto.getName().toString());
final Optional<TableDto> dto = tableService.get(tableDto.getName(),
GetTableServiceParameters.builder()
.includeDataMetadata(false)
.disableOnReadMetadataIntercetor(false)
.includeInfo(true)
.includeDefinitionMetadata(false)
.build());
if (!dto.isPresent()) {
result = true;
}
} catch (Exception e) {
log.warn("Ignoring exception during deleteUnmarkedEntities for {}. Message: {}",
tableDto.getName(), e.getMessage());
}
return result;
}).collect(Collectors.toList());
log.info("Unmarked tables({}): {}", unmarkedTableNames.size(), unmarkedTableNames);
log.info("Deleting tables({})", deleteTableDtos.size());
if (!deleteTableDtos.isEmpty()) {
final List<String> deleteTableNames = deleteTableDtos.stream().map(
dto -> dto.getName().toString()).collect(Collectors.toList());
log.info("Deleting tables({}): {}", deleteTableNames.size(), deleteTableNames);
userMetadataService.deleteMetadata("admin", Lists.newArrayList(deleteTableDtos));
// Publish event. Elasticsearch event handler will take care of updating the index already
// TODO: Re-evaluate events vs. direct calls for these types of situations like in Genie
deleteTableDtos.forEach(
tableDto -> {
tagService.delete(tableDto.getName(), false);
this.eventBus.post(
new MetacatDeleteTablePostEvent(tableDto.getName(), context, this, tableDto)
);
}
);
}
log.info("End: Delete unmarked tables({})", unmarkedTableDtos.size());
} else {
log.info("Count of unmarked tables({}) is more than the threshold {}", unmarkedTableDtos.size(),
config.getElasticSearchThresholdUnmarkedTablesDelete());
registry.counter(
registry.createId(Metrics.CounterElasticSearchUnmarkedTableThreshholdReached.getMetricName()))
.increment();
}
}
log.info("End: Delete unmarked entities");
}
@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processCatalogs(final List<String> catalogNames) {
log.info("Start: Full refresh of catalogs: {}", catalogNames);
final List<ListenableFuture<CatalogDto>> getCatalogFutures = catalogNames.stream()
.map(catalogName -> service.submit(() -> {
CatalogDto result = null;
try {
result = getCatalog(catalogName);
} catch (Exception e) {
log.error("Failed to retrieve catalog: {}", catalogName);
elasticSearchUtil.log("ElasticSearchRefresh.getCatalog",
ElasticSearchDoc.Type.catalog.name(), catalogName, null,
e.getMessage(), e, true);
}
return result;
}))
.collect(Collectors.toList());
return Futures.transformAsync(Futures.successfulAsList(getCatalogFutures),
input -> {
final List<ListenableFuture<Void>> processCatalogFutures = input.stream().filter(NOT_NULL).map(
catalogDto -> {
final List<QualifiedName> databaseNames = getDatabaseNamesToRefresh(catalogDto);
return _processDatabases(catalogDto.getName(), databaseNames);
}).filter(NOT_NULL).collect(Collectors.toList());
return Futures.transform(Futures.successfulAsList(processCatalogFutures),
Functions.constant(null), defaultService);
}, defaultService);
}
private List<QualifiedName> getDatabaseNamesToRefresh(final CatalogDto catalogDto) {
List<QualifiedName> result = null;
if (!config.getElasticSearchRefreshIncludeDatabases().isEmpty()) {
result = config.getElasticSearchRefreshIncludeDatabases().stream()
.filter(q -> catalogDto.getName().getCatalogName().equals(q.getCatalogName()))
.collect(Collectors.toList());
} else {
result = catalogDto.getDatabases().stream()
.map(n -> QualifiedName.ofDatabase(catalogDto.getName().getCatalogName(), n))
.collect(Collectors.toList());
}
if (!config.getElasticSearchRefreshExcludeQualifiedNames().isEmpty()) {
result.removeAll(config.getElasticSearchRefreshExcludeQualifiedNames());
}
return result;
}
private List<String> getCatalogNamesToRefresh() {
List<String> result = null;
if (!Strings.isNullOrEmpty(config.getElasticSearchRefreshIncludeCatalogs())) {
result = Splitter.on(',').omitEmptyStrings().trimResults()
.splitToList(config.getElasticSearchRefreshIncludeCatalogs());
} else {
result = getCatalogNames();
}
return result;
}
/**
* Process the list of databases.
*
* @param catalogName catalog name
* @param databaseNames database names
* @return future
*/
@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processDatabases(final QualifiedName catalogName,
final List<QualifiedName> databaseNames) {
ListenableFuture<Void> resultFuture = null;
log.info("Full refresh of catalog {} for databases({}): {}", catalogName, databaseNames.size(), databaseNames);
final List<ListenableFuture<DatabaseDto>> getDatabaseFutures = databaseNames.stream()
.map(databaseName -> service.submit(() -> {
DatabaseDto result = null;
try {
result = getDatabase(databaseName);
} catch (Exception e) {
log.error("Failed to retrieve database: {}", databaseName);
elasticSearchUtil.log("ElasticSearchRefresh.getDatabase",
ElasticSearchDoc.Type.database.name(),
databaseName.toString(), null, e.getMessage(), e, true);
}
return result;
}))
.collect(Collectors.toList());
if (getDatabaseFutures != null && !getDatabaseFutures.isEmpty()) {
resultFuture = Futures.transformAsync(Futures.successfulAsList(getDatabaseFutures),
input -> {
final ListenableFuture<Void> processDatabaseFuture = indexDatabaseDtos(catalogName, input);
final List<ListenableFuture<Void>> processDatabaseFutures = input.stream().filter(NOT_NULL)
.map(databaseDto -> {
final List<QualifiedName> tableNames = databaseDto.getTables().stream()
.map(s -> QualifiedName.ofTable(databaseDto.getName().getCatalogName(),
databaseDto.getName().getDatabaseName(), s))
.collect(Collectors.toList());
log.info("Full refresh of database {} for tables({}): {}",
databaseDto.getName(),
databaseDto.getTables().size(), databaseDto.getTables());
return processTables(databaseDto.getName(), tableNames);
}).filter(NOT_NULL).collect(Collectors.toList());
processDatabaseFutures.add(processDatabaseFuture);
return Futures.transform(Futures.successfulAsList(processDatabaseFutures),
Functions.constant(null), defaultService);
}, defaultService);
}
return resultFuture;
}
/**
* Save all databases to index it in elastic search.
*
* @param catalogName catalog name
* @param dtos database dtos
* @return future
*/
private ListenableFuture<Void> indexDatabaseDtos(final QualifiedName catalogName, final List<DatabaseDto> dtos) {
return esService.submit(() -> {
final List<ElasticSearchDoc> docs = dtos.stream()
.filter(dto -> dto != null)
.map(dto -> new ElasticSearchDoc(dto.getName().toString(), dto, "admin", false, refreshMarkerText))
.collect(Collectors.toList());
log.info("Saving databases for catalog: {}", catalogName);
elasticSearchUtil.save(ElasticSearchDoc.Type.database.name(), docs);
return null;
});
}
/**
* Process the list of tables in batches.
*
* @param databaseName database name
* @param tableNames table names
* @return A future containing the tasks
*/
private ListenableFuture<Void> processTables(final QualifiedName databaseName,
final List<QualifiedName> tableNames) {
final List<List<QualifiedName>> tableNamesBatches = Lists.partition(tableNames, 500);
final List<ListenableFuture<Void>> processTablesBatchFutures = tableNamesBatches.stream().map(
subTableNames -> _processTables(databaseName, subTableNames)).collect(Collectors.toList());
return Futures.transform(Futures.successfulAsList(processTablesBatchFutures),
Functions.constant(null), defaultService);
}
@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processTables(final QualifiedName databaseName,
final List<QualifiedName> tableNames) {
final List<ListenableFuture<Optional<TableDto>>> getTableFutures = tableNames.stream()
.map(tableName -> service.submit(() -> {
Optional<TableDto> result = null;
try {
result = getTable(tableName);
} catch (Exception e) {
log.error("Failed to retrieve table: {}", tableName);
elasticSearchUtil.log("ElasticSearchRefresh.getTable",
ElasticSearchDoc.Type.table.name(),
tableName.toString(), null, e.getMessage(), e, true);
}
return result;
}))
.collect(Collectors.toList());
return Futures.transformAsync(Futures.successfulAsList(getTableFutures),
input -> indexTableDtos(databaseName, input), defaultService);
}
/**
* Save all tables to index it in elastic search.
*
* @param databaseName database name
* @param dtos table dtos
* @return future
*/
private ListenableFuture<Void> indexTableDtos(final QualifiedName databaseName,
final List<Optional<TableDto>> dtos) {
return esService.submit(() -> {
final List<ElasticSearchDoc> docs = dtos.stream().filter(dto -> dto != null && dto.isPresent()).map(
tableDtoOptional -> {
final TableDto dto = tableDtoOptional.get();
final String userName = dto.getAudit() != null ? dto.getAudit().getCreatedBy() : "admin";
return new ElasticSearchDoc(dto.getName().toString(), dto, userName, false, refreshMarkerText);
}).collect(Collectors.toList());
log.info("Saving tables for database: {}", databaseName);
elasticSearchUtil.save(ElasticSearchDoc.Type.table.name(), docs);
return null;
});
}
/**
* Save all tables to index it in elastic search.
*
* @param tableName database name
* @param dtos partition dtos
* @return future
*/
private ListenableFuture<Void> indexPartitionDtos(final QualifiedName tableName, final List<PartitionDto> dtos) {
return esService.submit(() -> {
final List<ElasticSearchDoc> docs = dtos.stream().filter(dto -> dto != null).map(
dto -> {
final String userName = dto.getAudit() != null ? dto.getAudit().getCreatedBy() : "admin";
return new ElasticSearchDoc(dto.getName().toString(), dto, userName, false, refreshMarkerText);
}).collect(Collectors.toList());
log.info("Saving partitions for tableName: {}", tableName);
elasticSearchUtil.save(ElasticSearchDoc.Type.partition.name(), docs);
return null;
});
}
protected List<String> getCatalogNames() {
return catalogService.getCatalogNames().stream().map(CatalogMappingDto::getCatalogName).collect(
Collectors.toList());
}
protected CatalogDto getCatalog(final String catalogName) {
return catalogService.get(QualifiedName.ofCatalog(catalogName));
}
protected DatabaseDto getDatabase(final QualifiedName databaseName) {
return databaseService.get(databaseName,
GetDatabaseServiceParameters.builder()
.disableOnReadMetadataIntercetor(false)
.includeTableNames(true)
.includeUserMetadata(true)
.build());
}
protected Optional<TableDto> getTable(final QualifiedName tableName) {
return tableService.get(tableName, GetTableServiceParameters.builder()
.disableOnReadMetadataIntercetor(false)
.includeInfo(true)
.includeDefinitionMetadata(true)
.includeDataMetadata(true)
.build());
}
}
| 118 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchDocConstants.java | /*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.search;
/**
* ElasticSearchDocConstants.
*
* @author zhenl
*/
final class ElasticSearchDocConstants {
/**
* DEFINITION_METADATA.
*/
static final String DEFINITION_METADATA = "definitionMetadata";
/**
* DEFINITION_METADATA_OWNER.
*/
static final String DEFINITION_METADATA_OWNER = "owner";
/**
* DEFINITION_METADATA_TAGS.
*/
static final String DEFINITION_METADATA_TAGS = "tags";
/**
* DEFINITION_METADATA_DATA_HYGIENE.
*/
static final String DEFINITION_METADATA_DATA_HYGIENE = "data_hygiene";
/**
* DEFINITION_METADATA_LIFETIME.
*/
static final String DEFINITION_METADATA_LIFETIME = "lifetime";
/**
* DEFINITION_METADATA_EXTENDED_SCHEMA.
*/
static final String DEFINITION_METADATA_EXTENDED_SCHEMA = "extendedSchema";
/**
* DEFINITION_METADATA_DATA_DEPENDENCY.
*/
static final String DEFINITION_METADATA_DATA_DEPENDENCY = "data_dependency";
/**
* DEFINITION_METADATA_TABLE_COST.
*/
static final String DEFINITION_METADATA_TABLE_COST = "table_cost";
/**
* DEFINITION_METADATA_LIFECYCLE.
*/
static final String DEFINITION_METADATA_LIFECYCLE = "lifecycle";
/**
* DEFINITION_METADATA_AUDIENCE.
*/
static final String DEFINITION_METADATA_AUDIENCE = "audience";
/**
* DEFINITION_METADATA_MODEL.
*/
static final String DEFINITION_METADATA_MODEL = "model";
//TODO: remove after the data are fixed and copied to subjectAreas
/**
* DEFINITION_METADATA_SUBJECT_AREA.
*/
static final String DEFINITION_METADATA_SUBJECT_AREA = "subject_area";
/**
* DEFINITION_METADATA_SUBJECT_AREAS.
*/
static final String DEFINITION_METADATA_SUBJECT_AREAS = "subjectAreas";
/**
* DEFINITION_METADATA_DATA_CATEGORY.
*/
static final String DEFINITION_METADATA_DATA_CATEGORY = "data_category";
/**
* DEFINITION_METADATA_JOB.
*/
static final String DEFINITION_METADATA_JOB = "job";
/**
* DEFINITION_METADATA_TABLE_DESCRIPTION.
*/
static final String DEFINITION_METADATA_TABLE_DESCRIPTION = "table_description";
/**
* DEFINITION_DATA_MANAGEMENT.
*/
static final String DEFINITION_DATA_MANAGEMENT = "data_management";
private ElasticSearchDocConstants() {
}
}
| 119 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/search/package-info.java | /*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This package includes elastic search integration classes.
*
* @author amajumdar
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.main.services.search;
import javax.annotation.ParametersAreNonnullByDefault;
| 120 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchDoc.java | /*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.search;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.dto.CatalogDto;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.TableDto;
import lombok.Getter;
import java.time.Instant;
/**
* Document that gets stored in elastic search.
*
* @author amajumdar
*/
@Getter
public class ElasticSearchDoc {
/**
* Definition Metadata pull out fields.
*/
private static final String[] DEFINITION_METADATA_FIELDS = {
ElasticSearchDocConstants.DEFINITION_METADATA_OWNER,
ElasticSearchDocConstants.DEFINITION_METADATA_TAGS,
ElasticSearchDocConstants.DEFINITION_METADATA_DATA_HYGIENE,
ElasticSearchDocConstants.DEFINITION_METADATA_LIFETIME,
ElasticSearchDocConstants.DEFINITION_METADATA_EXTENDED_SCHEMA,
ElasticSearchDocConstants.DEFINITION_METADATA_DATA_DEPENDENCY,
ElasticSearchDocConstants.DEFINITION_METADATA_TABLE_COST,
ElasticSearchDocConstants.DEFINITION_METADATA_LIFECYCLE,
ElasticSearchDocConstants.DEFINITION_METADATA_AUDIENCE,
ElasticSearchDocConstants.DEFINITION_METADATA_MODEL,
ElasticSearchDocConstants.DEFINITION_METADATA_SUBJECT_AREA, //TODO: remove after the data is moved
ElasticSearchDocConstants.DEFINITION_METADATA_SUBJECT_AREAS,
ElasticSearchDocConstants.DEFINITION_METADATA_DATA_CATEGORY,
ElasticSearchDocConstants.DEFINITION_METADATA_JOB,
ElasticSearchDocConstants.DEFINITION_METADATA_TABLE_DESCRIPTION,
ElasticSearchDocConstants.DEFINITION_DATA_MANAGEMENT,
};
private String id;
private Object dto;
private Long timestamp;
private String user;
private boolean deleted;
private String refreshMarker;
/**
* Constructor.
*
* @param id doc id
* @param dto dto
* @param user user name
* @param deleted is it marked deleted
*/
public ElasticSearchDoc(final String id,
final Object dto,
final String user,
final boolean deleted) {
this.id = id;
this.dto = dto;
this.user = user;
this.deleted = deleted;
this.timestamp = Instant.now().toEpochMilli();
}
/**
* Constructor.
*
* @param id doc id
* @param dto dto
* @param user user name
* @param deleted is it marked deleted
* @param timestamp timestampe of the doc
*/
public ElasticSearchDoc(final String id,
final Object dto,
final String user,
final boolean deleted,
final long timestamp) {
this.id = id;
this.dto = dto;
this.user = user;
this.deleted = deleted;
this.timestamp = timestamp;
}
/**
* Constructor.
*
* @param id doc id
* @param dto dto
* @param user user name
* @param deleted is it marked deleted
* @param refreshMarker refresh marker
*/
public ElasticSearchDoc(final String id,
final Object dto,
final String user,
final boolean deleted,
final String refreshMarker) {
this.id = id;
this.dto = dto;
this.user = user;
this.deleted = deleted;
this.refreshMarker = refreshMarker;
this.timestamp = Instant.now().toEpochMilli();
}
/**
* addSearchableDefinitionMetadataEnabled.
*
* @param objectNode object node
*/
public void addSearchableDefinitionMetadata(final ObjectNode objectNode) {
final JsonNode jsonNode = objectNode.get(ElasticSearchDocConstants.DEFINITION_METADATA);
final ObjectNode node = JsonNodeFactory.instance.objectNode();
for (final String tag : DEFINITION_METADATA_FIELDS) {
node.set(tag, jsonNode.get(tag));
}
objectNode.set(Field.SEARCHABLE_DEFINITION_METADATA, node);
}
/**
* Document types.
*/
public enum Type {
/**
* Document types.
*/
catalog(CatalogDto.class), database(DatabaseDto.class), table(TableDto.class),
/**
* Document types.
*/
mview(TableDto.class), partition(PartitionDto.class);
private Class clazz;
Type(final Class clazz) {
this.clazz = clazz;
}
public Class getClazz() {
return clazz;
}
}
/**
* Document context attributes.
*/
protected static class Field {
public static final String USER = "user_";
public static final String DELETED = "deleted_";
public static final String REFRESH_MARKER = "refreshMarker_";
public static final String SEARCHABLE_DEFINITION_METADATA = "searchableDefinitionMetadata";
public static final String TIMESTAMP = "timestamp";
public static final String DATA_METADATA = "dataMetadata";
}
}
| 121 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications/NotificationService.java | /*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.services.notifications;
import com.netflix.metacat.common.server.events.MetacatCreateTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatRenameTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionMetadataOnlyPostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateTablePostEvent;
/**
* Interface for services which will provide external notifications based on internal events. The structure and
* destinations of the notifications are left up to the implementation.
*
* @author tgianos
* @since 0.1.47
*/
public interface NotificationService {
/**
* Publish information about partitions being added.
*
* @param event The event passed within the JVM after a partition has been successfully added
*/
void notifyOfPartitionAddition(MetacatSaveTablePartitionPostEvent event);
/**
* Publish information about partition metadata save only.
*
* @param event The event passed within the JVM after a partition has been successfully added
*/
void notifyOfPartitionMetdataDataSaveOnly(MetacatSaveTablePartitionMetadataOnlyPostEvent event);
/**
* Publish information about partitions being deleted.
*
* @param event The event passed within the JVM after a partition has been successfully deleted
*/
void notifyOfPartitionDeletion(MetacatDeleteTablePartitionPostEvent event);
/**
* Publish information about a table being created.
*
* @param event The event passed within the JVM after a table has been successfully created
*/
void notifyOfTableCreation(MetacatCreateTablePostEvent event);
/**
* Publish information about a table being deleted.
*
* @param event The event passed within the JVM after a table has been successfully deleted
*/
void notifyOfTableDeletion(MetacatDeleteTablePostEvent event);
/**
* Publish information about a table being renamed.
*
* @param event The event passed within the JVM after a table has been successfully renamed
*/
void notifyOfTableRename(MetacatRenameTablePostEvent event);
/**
* Publish information about a table being updated.
*
* @param event The event passed within the JVM after a table has been successfully updated
*/
void notifyOfTableUpdate(MetacatUpdateTablePostEvent event);
}
| 122 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications/DefaultNotificationServiceImpl.java | /*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.services.notifications;
import com.netflix.metacat.common.server.events.MetacatCreateTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatRenameTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionMetadataOnlyPostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateTablePostEvent;
import lombok.extern.slf4j.Slf4j;
/**
* This is a default implementation of the NotificationService interface. It doesn't really do anything other than
* log the event that would have generated some sort of external notification in a real instance. This class exists
* primarily to handle returns from providers when the "plugin" isn't enabled instead of returning null which is
* prohibited by the Provider interface definition.
*
* @author tgianos
* @since 0.1.47
*/
@Slf4j
public class DefaultNotificationServiceImpl implements NotificationService {
/**
* {@inheritDoc}
*/
@Override
public void notifyOfPartitionAddition(
final MetacatSaveTablePartitionPostEvent event) {
log.debug(event.toString());
}
/**
* {@inheritDoc}
*/
@Override
public void notifyOfPartitionDeletion(final MetacatDeleteTablePartitionPostEvent event) {
log.debug(event.toString());
}
/**
* {@inheritDoc}
*/
@Override
public void notifyOfPartitionMetdataDataSaveOnly(final MetacatSaveTablePartitionMetadataOnlyPostEvent event) {
log.debug(event.toString());
}
/**
* {@inheritDoc}
*/
@Override
public void notifyOfTableCreation(final MetacatCreateTablePostEvent event) {
log.debug(event.toString());
}
/**
* {@inheritDoc}
*/
@Override
public void notifyOfTableDeletion(final MetacatDeleteTablePostEvent event) {
log.debug(event.toString());
}
/**
* {@inheritDoc}
*/
@Override
public void notifyOfTableRename(final MetacatRenameTablePostEvent event) {
log.debug(event.toString());
}
/**
* {@inheritDoc}
*/
@Override
public void notifyOfTableUpdate(final MetacatUpdateTablePostEvent event) {
log.debug(event.toString());
}
}
| 123 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications/package-info.java | /*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Classes related to sending notifications out of Metacat via implementations of the service interface.
*
* @author tgianos
* @since 0.1.46
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.main.services.notifications;
import javax.annotation.ParametersAreNonnullByDefault;
| 124 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications/sns/SNSNotificationPartitionAddMsg.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.services.notifications.sns;
/**
* Enum class for partition add payload message.
*
* @author zhenl
* @since 1.2.0
*/
public enum SNSNotificationPartitionAddMsg {
/**
* Attached Valid Paritition Key.
*/
ATTACHED_VALID_PARITITION_KEY,
/**
* Invalid Partition Key Format.
*/
INVALID_PARTITION_KEY_FORMAT,
/**
* All Future Partition Keys.
*/
ALL_FUTURE_PARTITION_KEYS,
/**
* Empty Deleted Column.
*/
EMPTY_DELETE_COLUMN,
/**
* No Candidate Partitions Keys.
*/
NO_CANDIDATE_PARTITION_KEYS,
/**
* Missing Metadata Info For Partition Key.
*/
MISSING_METADATA_INFO_FOR_PARTITION_KEY,
/**
* Failuer of Getting Latest Partition Key.
*/
FAILURE_OF_GET_LATEST_PARTITION_KEY,
/**
* Partition Key Unabled.
*/
PARTITION_KEY_UNABLED
}
| 125 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications/sns/SNSNotificationMetric.java | /*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.services.notifications.sns;
import com.google.common.base.Throwables;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.notifications.sns.SNSMessage;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Timer;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nullable;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
/**
* SNS Notification Metric.
*
* @author zhenl
* @since 1.1.0
*/
@Slf4j
@Getter
public class SNSNotificationMetric {
private final Registry registry;
private final HashMap<String, Counter> counterHashMap = new HashMap<>();
/**
* Constructor.
*
* @param registry The registry handle of spectator
*/
public SNSNotificationMetric(
final Registry registry
) {
this.registry = registry;
this.counterHashMap.put(Metrics.CounterSNSNotificationTablePartitionAdd.getMetricName(),
registry.counter(registry.createId(Metrics.CounterSNSNotificationTablePartitionAdd.getMetricName())
.withTags(Metrics.tagStatusSuccessMap)));
this.counterHashMap.put(Metrics.CounterSNSNotificationTablePartitionDelete.getMetricName(),
registry.counter(registry.createId(Metrics.CounterSNSNotificationTablePartitionDelete.getMetricName())
.withTags(Metrics.tagStatusSuccessMap)));
this.counterHashMap.put(Metrics.CounterSNSNotificationPartitionDelete.getMetricName(),
registry.counter(registry.createId(Metrics.CounterSNSNotificationPartitionDelete.getMetricName())
.withTags(Metrics.tagStatusSuccessMap)));
this.counterHashMap.put(Metrics.CounterSNSNotificationTableCreate.getMetricName(),
registry.counter(registry.createId(Metrics.CounterSNSNotificationTableCreate.getMetricName())
.withTags(Metrics.tagStatusSuccessMap)));
this.counterHashMap.put(Metrics.CounterSNSNotificationTableDelete.getMetricName(),
registry.counter(registry.createId(Metrics.CounterSNSNotificationTableDelete.getMetricName())
.withTags(Metrics.tagStatusSuccessMap)));
this.counterHashMap.put(Metrics.CounterSNSNotificationTableRename.getMetricName(),
registry.counter(registry.createId(Metrics.CounterSNSNotificationTableRename.getMetricName())
.withTags(Metrics.tagStatusSuccessMap)));
this.counterHashMap.put(Metrics.CounterSNSNotificationTableUpdate.getMetricName(),
registry.counter(registry.createId(Metrics.CounterSNSNotificationTableUpdate.getMetricName())
.withTags(Metrics.tagStatusSuccessMap)));
this.counterHashMap.put(Metrics.CounterSNSNotificationPublishMessageSizeExceeded.getMetricName(),
registry.counter(
registry.createId(Metrics.CounterSNSNotificationPublishMessageSizeExceeded.getMetricName())));
this.counterHashMap.put(Metrics.CounterSNSNotificationPartitionAdd.getMetricName(),
registry.counter(
registry.createId(Metrics.CounterSNSNotificationPartitionAdd.getMetricName())));
this.counterHashMap.put(Metrics.CounterSNSNotificationPublishFallback.getMetricName(),
registry.counter(
registry.createId(Metrics.CounterSNSNotificationPublishFallback.getMetricName())));
}
void counterIncrement(final String counterKey) {
if (counterHashMap.containsKey(counterKey)) {
this.counterHashMap.get(counterKey).increment();
} else {
log.error("SNS Notification does not suport counter for {}", counterKey);
}
}
void handleException(
final QualifiedName name,
final String message,
final String counterKey,
@Nullable final SNSMessage payload,
final Exception e
) {
log.error("{} with payload: {}", message, payload, e);
final Map<String, String> tags = new HashMap<>(name.parts());
tags.putAll(Metrics.tagStatusFailureMap);
this.registry.counter(this.registry.createId(counterKey).withTags(tags)).increment();
Throwables.propagate(e);
}
void recordTime(final SNSMessage<?> message, final String timeName) {
final Timer timer = this.registry.timer(
timeName,
Metrics.TagEventsType.getMetricName(),
message.getClass().getName()
);
timer.record(this.registry.clock().wallTime() - message.getTimestamp(), TimeUnit.MILLISECONDS);
}
void recordPartitionLatestDeleteColumn(final QualifiedName name,
@Nullable final String latestDeleteColumn,
final String message) {
final Map<String, String> tags = new HashMap<>(name.parts());
if (latestDeleteColumn != null) {
tags.put("latestDeleteColumn", latestDeleteColumn);
}
tags.put("message", message);
this.registry.counter(
this.registry.createId(Metrics.CounterSNSNotificationPartitionLatestDeleteColumnAdd.getMetricName())
.withTags(tags)).increment();
}
}
| 126 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications/sns/SNSNotificationServiceImpl.java | /*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.services.notifications.sns;
import com.amazonaws.services.sns.AmazonSNS;
import com.amazonaws.services.sns.model.PublishResult;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.github.fge.jsonpatch.JsonPatch;
import com.github.fge.jsonpatch.diff.JsonDiff;
import com.google.common.base.Throwables;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.dto.notifications.sns.SNSMessage;
import com.netflix.metacat.common.dto.notifications.sns.SNSMessageType;
import com.netflix.metacat.common.dto.notifications.sns.messages.AddPartitionMessage;
import com.netflix.metacat.common.dto.notifications.sns.messages.CreateTableMessage;
import com.netflix.metacat.common.dto.notifications.sns.messages.DeletePartitionMessage;
import com.netflix.metacat.common.dto.notifications.sns.messages.DeleteTableMessage;
import com.netflix.metacat.common.dto.notifications.sns.messages.RenameTableMessage;
import com.netflix.metacat.common.dto.notifications.sns.messages.UpdateOrRenameTableMessageBase;
import com.netflix.metacat.common.dto.notifications.sns.messages.UpdateTableMessage;
import com.netflix.metacat.common.dto.notifications.sns.messages.UpdateTablePartitionsMessage;
import com.netflix.metacat.common.dto.notifications.sns.payloads.TablePartitionsUpdatePayload;
import com.netflix.metacat.common.dto.notifications.sns.payloads.UpdatePayload;
import com.netflix.metacat.common.server.events.MetacatCreateTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatRenameTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionMetadataOnlyPostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateTablePostEvent;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.main.configs.SNSNotificationsConfig;
import com.netflix.metacat.main.services.notifications.NotificationService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.context.event.EventListener;
import javax.annotation.Nullable;
import javax.validation.constraints.Size;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Implementation of the NotificationService using Amazon SNS.
*
* @author tgianos
* @since 0.1.47
*/
@Slf4j
public class SNSNotificationServiceImpl implements NotificationService {
private final AtomicBoolean isClientPoolDown;
private AmazonSNS client;
private final String tableTopicArn;
private final String partitionTopicArn;
private final ObjectMapper mapper;
private final Config config;
private SNSNotificationMetric notificationMetric;
private SNSNotificationServiceUtil snsNotificationServiceUtil;
/**
* Constructor.
*
* @param client The SNS client to use to publish notifications
* @param tableTopicArn The topic to publish table related notifications to
* @param partitionTopicArn The topic to publish partition related notifications to
* @param mapper The object mapper to use to convert objects to JSON strings
* @param config The system config
* @param notificationMetric The SNS notification metric
* @param snsNotificationServiceUtil The SNS notification service util
*/
public SNSNotificationServiceImpl(
final AmazonSNS client,
@Size(min = 1) final String tableTopicArn,
@Size(min = 1) final String partitionTopicArn,
final ObjectMapper mapper,
final Config config,
final SNSNotificationMetric notificationMetric,
final SNSNotificationServiceUtil snsNotificationServiceUtil
) {
this.client = client;
this.tableTopicArn = tableTopicArn;
this.partitionTopicArn = partitionTopicArn;
this.mapper = mapper;
this.config = config;
this.notificationMetric = notificationMetric;
this.snsNotificationServiceUtil = snsNotificationServiceUtil;
this.isClientPoolDown = new AtomicBoolean();
}
/**
* {@inheritDoc}
*/
@Override
@EventListener
public void notifyOfPartitionAddition(final MetacatSaveTablePartitionPostEvent event) {
log.debug("Received SaveTablePartitionPostEvent {}", event);
final String name = event.getName().toString();
final long timestamp = event.getRequestContext().getTimestamp();
final String requestId = event.getRequestContext().getId();
// Publish a global message stating how many partitions were updated for the table to the table topic
final TablePartitionsUpdatePayload partitionsUpdatePayload;
if (this.config.isSnsNotificationAttachPartitionIdsEnabled() && event.getPartitions() != null) {
partitionsUpdatePayload = this.snsNotificationServiceUtil.
createTablePartitionsUpdatePayload(event.getPartitions(), event);
} else {
partitionsUpdatePayload = new TablePartitionsUpdatePayload(null, event.getPartitions().size(), 0,
SNSNotificationPartitionAddMsg.PARTITION_KEY_UNABLED.name(),
SNSNotificationServiceUtil.getPartitionNameListFromDtos(event.getPartitions()));
}
final UpdateTablePartitionsMessage tableMessage = new UpdateTablePartitionsMessage(
UUID.randomUUID().toString(),
timestamp,
requestId,
name,
partitionsUpdatePayload);
this.publishNotification(this.tableTopicArn, this.config.getFallbackSnsTopicTableArn(),
tableMessage, event.getName(),
"Unable to publish table partition add notification",
Metrics.CounterSNSNotificationTablePartitionAdd.getMetricName());
//publish the delete column key metric after publishing message
if (this.config.isSnsNotificationAttachPartitionIdsEnabled()) {
this.notificationMetric.recordPartitionLatestDeleteColumn(
event.getName(), partitionsUpdatePayload.getLatestDeleteColumnValue(),
partitionsUpdatePayload.getMessage());
}
if (config.isSnsNotificationTopicPartitionEnabled()) {
AddPartitionMessage message = null;
for (final PartitionDto partition : event.getPartitions()) {
message = new AddPartitionMessage(
UUID.randomUUID().toString(),
timestamp,
requestId,
name,
partition
);
this.publishNotification(this.partitionTopicArn, this.config.getFallbackSnsTopicPartitionArn(),
message, event.getName(),
"Unable to publish partition creation notification",
Metrics.CounterSNSNotificationPartitionAdd.getMetricName());
log.debug("Published create partition message {} on {}", message, this.partitionTopicArn);
}
}
}
/**
* {@inheritDoc}
*/
@Override
@EventListener
public void notifyOfPartitionMetdataDataSaveOnly(final MetacatSaveTablePartitionMetadataOnlyPostEvent event) {
log.debug("Received SaveTablePartitionMetadataOnlyPostEvent {}", event);
}
/**
* {@inheritDoc}
*/
@Override
@EventListener
public void notifyOfPartitionDeletion(final MetacatDeleteTablePartitionPostEvent event) {
log.debug("Received DeleteTablePartition event {}", event);
final String name = event.getName().toString();
final long timestamp = event.getRequestContext().getTimestamp();
final String requestId = event.getRequestContext().getId();
final TablePartitionsUpdatePayload partitionsUpdatePayload;
partitionsUpdatePayload = new TablePartitionsUpdatePayload(null, 0, event.getPartitions().size(),
SNSNotificationPartitionAddMsg.PARTITION_KEY_UNABLED.name(),
SNSNotificationServiceUtil.getPartitionNameListFromDtos(event.getPartitions()));
final UpdateTablePartitionsMessage tableMessage = new UpdateTablePartitionsMessage(
UUID.randomUUID().toString(),
timestamp,
requestId,
name,
partitionsUpdatePayload
);
this.publishNotification(this.tableTopicArn, this.config.getFallbackSnsTopicTableArn(),
tableMessage, event.getName(),
"Unable to publish table partition delete notification",
Metrics.CounterSNSNotificationTablePartitionDelete.getMetricName());
DeletePartitionMessage message = null;
if (config.isSnsNotificationTopicPartitionEnabled()) {
for (final String partitionId : event.getPartitionIds()) {
message = new DeletePartitionMessage(
UUID.randomUUID().toString(),
timestamp,
requestId,
name,
partitionId
);
this.publishNotification(this.partitionTopicArn, this.config.getFallbackSnsTopicPartitionArn(),
message, event.getName(),
"Unable to publish partition deletion notification",
Metrics.CounterSNSNotificationPartitionDelete.getMetricName());
log.debug("Published delete partition message {} on {}", message, this.partitionTopicArn);
}
}
}
/**
* {@inheritDoc}
*/
@Override
@EventListener
public void notifyOfTableCreation(final MetacatCreateTablePostEvent event) {
log.debug("Received CreateTableEvent {}", event);
final CreateTableMessage message = new CreateTableMessage(
UUID.randomUUID().toString(),
event.getRequestContext().getTimestamp(),
event.getRequestContext().getId(),
event.getName().toString(),
event.getTable()
);
this.publishNotification(this.tableTopicArn, this.config.getFallbackSnsTopicTableArn(),
message, event.getName(),
"Unable to publish create table notification",
Metrics.CounterSNSNotificationTableCreate.getMetricName());
}
/**
* {@inheritDoc}
*/
@Override
@EventListener
public void notifyOfTableDeletion(final MetacatDeleteTablePostEvent event) {
log.debug("Received DeleteTableEvent {}", event);
final DeleteTableMessage message = new DeleteTableMessage(
UUID.randomUUID().toString(),
event.getRequestContext().getTimestamp(),
event.getRequestContext().getId(),
event.getName().toString(),
event.getTable()
);
this.publishNotification(this.tableTopicArn, this.config.getFallbackSnsTopicTableArn(),
message, event.getName(),
"Unable to publish delete table notification",
Metrics.CounterSNSNotificationTableDelete.getMetricName());
}
/**
* {@inheritDoc}
*/
@Override
@EventListener
public void notifyOfTableRename(final MetacatRenameTablePostEvent event) {
log.debug("Received RenameTableEvent {}", event);
final RenameTableMessage message = (RenameTableMessage) this.createUpdateorRenameTableMessage(
UUID.randomUUID().toString(),
event.getRequestContext().getTimestamp(),
event.getRequestContext().getId(),
event.getName(),
event.getOldTable(),
event.getCurrentTable(),
"Unable to create json patch for rename table notification",
Metrics.CounterSNSNotificationTableRename.getMetricName(),
SNSMessageType.TABLE_RENAME
);
this.publishNotification(this.tableTopicArn, this.config.getFallbackSnsTopicTableArn(),
message, event.getName(),
"Unable to publish rename table notification",
Metrics.CounterSNSNotificationTableRename.getMetricName());
}
/**
* {@inheritDoc}
*/
@Override
@EventListener
public void notifyOfTableUpdate(final MetacatUpdateTablePostEvent event) {
log.debug("Received UpdateTableEvent {}", event);
final SNSMessage<?> message;
final long timestamp = event.getRequestContext().getTimestamp();
final String requestId = event.getRequestContext().getId();
final QualifiedName name = event.getName();
final TableDto oldTable = event.getOldTable();
final TableDto currentTable = event.getCurrentTable();
if (event.isLatestCurrentTable()) {
message = this.createUpdateorRenameTableMessage(
UUID.randomUUID().toString(),
timestamp,
requestId,
name,
oldTable,
currentTable,
"Unable to create json patch for update table notification",
Metrics.CounterSNSNotificationTableUpdate.getMetricName(),
SNSMessageType.TABLE_UPDATE
);
} else {
// Send a null payload if we failed to get the latest version
// of the current table. This will signal users to callback
//
message = new SNSMessage<Void>(
UUID.randomUUID().toString(),
timestamp,
requestId,
SNSMessageType.TABLE_UPDATE,
name.toString(),
null);
}
this.publishNotification(this.tableTopicArn, this.config.getFallbackSnsTopicTableArn(),
message, event.getName(),
"Unable to publish update table notification",
Metrics.CounterSNSNotificationTableUpdate.getMetricName());
}
private UpdateOrRenameTableMessageBase createUpdateorRenameTableMessage(
final String id,
final long timestamp,
final String requestId,
final QualifiedName name,
final TableDto oldTable,
final TableDto currentTable,
final String exceptionMessage,
final String metricName,
final SNSMessageType messageType
) {
try {
final JsonPatch patch = JsonDiff.asJsonPatch(
this.mapper.valueToTree(oldTable),
this.mapper.valueToTree(currentTable)
);
if (messageType == SNSMessageType.TABLE_UPDATE) {
return new UpdateTableMessage(
id,
timestamp,
requestId,
name.toString(),
new UpdatePayload<>(oldTable, patch)
);
} else {
return new RenameTableMessage(
id,
timestamp,
requestId,
name.toString(),
new UpdatePayload<>(oldTable, patch)
);
}
} catch (final Exception e) {
this.notificationMetric.handleException(
name,
exceptionMessage,
metricName,
null,
e
);
}
return null;
}
private void publishNotification(
final String arn,
@Nullable final String fallbackArn,
final SNSMessage<?> message,
final QualifiedName name,
final String errorMessage,
final String counterKey
) {
this.notificationMetric.recordTime(message, Metrics.TimerNotificationsBeforePublishDelay.getMetricName());
try {
//
// Publish the event to original SNS topic. If we receive an error from SNS, we will then try publishing
// to the fallback topic.
//
try {
publishNotification(arn, message, counterKey);
} catch (final Exception exception) {
if (fallbackArn != null) {
log.info("Fallback published message to topic {} because of error {}",
fallbackArn, exception.getMessage());
notificationMetric.counterIncrement(
Metrics.CounterSNSNotificationPublishFallback.getMetricName());
publishNotification(fallbackArn, message, counterKey);
} else {
throw exception;
}
}
} catch (Exception e) {
notificationMetric.handleException(name, errorMessage, counterKey, message, e);
}
}
private void publishNotification(
final String arn,
final SNSMessage<?> message,
final String counterKey
) throws Exception {
PublishResult result = null;
try {
result = publishNotification(arn, this.mapper.writeValueAsString(message));
} catch (Exception exception) {
log.error("SNS Publish message failed.", exception);
notificationMetric.counterIncrement(
Metrics.CounterSNSNotificationPublishMessageSizeExceeded.getMetricName());
final SNSMessage<Void> voidMessage = new SNSMessage<>(message.getId(),
message.getTimestamp(), message.getRequestId(), message.getType(), message.getName(),
null);
result = publishNotification(arn, this.mapper.writeValueAsString(voidMessage));
}
log.info("Successfully published message to topic {} with id {}", arn, result.getMessageId());
log.debug("Successfully published message {} to topic {} with id {}", message, arn, result.getMessageId());
notificationMetric.counterIncrement(counterKey);
notificationMetric.recordTime(message, Metrics.TimerNotificationsPublishDelay.getMetricName());
}
private PublishResult publishNotification(final String arn, final String message) {
if (isClientPoolDown.get()) {
synchronized (this) {
return publishNotificationWithNoCheck(arn, message);
}
} else {
return publishNotificationWithNoCheck(arn, message);
}
}
private PublishResult publishNotificationWithNoCheck(final String arn, final String message) {
try {
return this.client.publish(arn, message);
} catch (Exception e) {
//
// SNS Http client pool once shutdown cannot be recovered. Hence we are shutting down the SNS client
// and recreating a new instance.
//
return Throwables.getCausalChain(e).stream()
.filter(ex -> ex instanceof IllegalStateException
&& ex.getMessage().contains("Connection pool shut down"))
.findFirst()
.map(ex -> {
if (isClientPoolDown.compareAndSet(false, true)) {
reinitializeClient();
}
return publishNotification(arn, message);
}).orElseThrow(() -> Throwables.propagate(e));
}
}
private synchronized void reinitializeClient() {
if (isClientPoolDown.get()) {
log.warn("SNS HTTP connection pool is down. It will be restarted.");
try {
this.client.shutdown();
} catch (Exception exception) {
log.warn("Failed shutting down SNS client.", exception);
}
this.client = new SNSNotificationsConfig().amazonSNS();
isClientPoolDown.set(false);
}
}
}
| 127 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications/sns/SNSNotificationServiceUtil.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.services.notifications.sns;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.notifications.sns.payloads.TablePartitionsUpdatePayload;
import com.netflix.metacat.common.server.events.MetacatEvent;
import com.netflix.metacat.common.server.partition.util.PartitionUtil;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.time.Instant;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.TimeZone;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
/**
* The util class for SNS Notification service.
*
* @author zhenl
* @since 1.2.0
*/
@Slf4j
public final class SNSNotificationServiceUtil {
private static final String PARTITION_COLUMN_DATA_TYPE_PATH = "/data_dependency/partition_column_date_type";
private static final String DELETION_COLUMN_PATH = "/data_hygiene/delete_column";
private static final Set<String> PST_TIME = new HashSet<String>(Arrays.asList("region", "pacific"));
//Timestamp in seconds: 1522257960 or 1367992474.293378
//Timestamp in milliseconds: 1522257960000 or 1367992474000.293378
//ISO basic date format: 20180101
private static final Pattern TIMESTAMP_FORMAT = Pattern.compile("^(?<time>\\d{10})(?:\\d{3})?(?:\\.\\d+)?$");
private static final Pattern ISO_BASIC = Pattern.compile("^\\d{8}$");
private static final int PARTITIONS_UPDATED_LIST_MAX_SIZE = 1000;
private UserMetadataService userMetadataService;
private final DateFormat simpleDateFormatRegional = new SimpleDateFormat("yyyyMMdd");
private final DateFormat simpleDateFormatUTC = new SimpleDateFormat("yyyyMMdd");
/**
* SNS Notification Service Util constructor.
*
* @param userMetadataService user metadata service
*/
public SNSNotificationServiceUtil(
final UserMetadataService userMetadataService
) {
this.userMetadataService = userMetadataService;
this.simpleDateFormatRegional.setTimeZone(TimeZone.getTimeZone("America/Los_Angeles"));
this.simpleDateFormatUTC.setTimeZone(TimeZone.getTimeZone("UTC"));
}
/**
* create table partition add payload.
* The logic below primarily is for calculating the latest deletion column value in a batch of
* partitions. The latest delete column value:
* (1) valid timestamp/date format
* (2) the latest timestamp from the delete column
* (3) the timestamp must be less or equal to today ( utc now )
*
* @param partitionDtos partition DTOs
* @param event Metacat event
* @return TablePartitionsUpdatePayload
*/
public TablePartitionsUpdatePayload createTablePartitionsUpdatePayload(
final List<PartitionDto> partitionDtos,
final MetacatEvent event) {
final List<String> deleteColumnValues;
String latestDeleteColumnValue = null;
String message;
try {
final Optional<ObjectNode> objectNode = this.userMetadataService.getDefinitionMetadata(
QualifiedName.ofTable(event.getName().getCatalogName(), event.getName().getDatabaseName(),
event.getName().getTableName()));
//Mark as missing metadata if any of delete column or partition column data type is missing
if (objectNode.isPresent()
&& !objectNode.get().at(DELETION_COLUMN_PATH).isMissingNode()
&& !objectNode.get().at(PARTITION_COLUMN_DATA_TYPE_PATH).isMissingNode()) {
final String deleteColumn = objectNode.get().at(DELETION_COLUMN_PATH).textValue();
//Mark with message empty delete column and return
if (StringUtils.isEmpty(deleteColumn)) {
return new TablePartitionsUpdatePayload(
null,
partitionDtos.size(),
0,
SNSNotificationPartitionAddMsg.EMPTY_DELETE_COLUMN.name(),
getPartitionNameListFromDtos(partitionDtos)
);
}
deleteColumnValues = getSortedDeletionPartitionKeys(partitionDtos, deleteColumn);
//Calculate the latest partition key from candidates
if (deleteColumnValues != null && !deleteColumnValues.isEmpty()) {
message = SNSNotificationPartitionAddMsg.ALL_FUTURE_PARTITION_KEYS.name();
//using utc now as today
final long nowSecond = Instant.now().getEpochSecond();
final boolean regional = PST_TIME.contains(
objectNode.get().at(PARTITION_COLUMN_DATA_TYPE_PATH).textValue());
//convert the value to utc then compare
for (String val : deleteColumnValues) {
try {
final Long timestamp = getTimeStamp(val, regional);
if (timestamp <= nowSecond) {
latestDeleteColumnValue = deleteColumn + "=" + val; //the delete column with value
message = SNSNotificationPartitionAddMsg.ATTACHED_VALID_PARITITION_KEY.name();
break;
}
} catch (ParseException ex) {
message = SNSNotificationPartitionAddMsg.INVALID_PARTITION_KEY_FORMAT.name();
log.debug("Failure of getting latest key due to invalid timestamp format {} {}:{}",
event.getName().getTableName(), deleteColumn, val);
break;
}
}
} else {
message = SNSNotificationPartitionAddMsg.NO_CANDIDATE_PARTITION_KEYS.name();
}
} else {
message = SNSNotificationPartitionAddMsg.MISSING_METADATA_INFO_FOR_PARTITION_KEY.name();
}
} catch (Exception ex) {
message = SNSNotificationPartitionAddMsg.FAILURE_OF_GET_LATEST_PARTITION_KEY.name();
log.error("Failure of createTablePartitionsUpdatePayload", ex.getMessage());
}
return new TablePartitionsUpdatePayload(
latestDeleteColumnValue,
partitionDtos.size(),
0,
message,
getPartitionNameListFromDtos(partitionDtos)
);
}
/**
* get descending order deletion column value.
*
* @param partitionDtos partition DTOs
* @param deleteColumn delete column name
* @return descending order deletion column
*/
@VisibleForTesting
private static List<String> getSortedDeletionPartitionKeys(final List<PartitionDto> partitionDtos,
final String deleteColumn) {
return partitionDtos.stream()
.map(x -> PartitionUtil.getPartitionKeyValues(x.getName().toString()).get(deleteColumn))
.filter(Objects::nonNull)
.sorted(Comparator.reverseOrder())
.collect(Collectors.toList());
}
/**
* get partition name list from list of partitionDtos. The returned list is capped at
* the first PARTITIONS_UPDATED_LIST_MAX_SIZE elements, if there are more than that number of elements
* in the input then the return is empty which serves as a signal that complete list cannot be included
*
* @param partitionDtos partition DTOs
* @return list of partition ids from the input list
*/
protected static List<String> getPartitionNameListFromDtos(final List<PartitionDto> partitionDtos) {
if (partitionDtos.size() > PARTITIONS_UPDATED_LIST_MAX_SIZE) {
// empty list signals
return Collections.emptyList();
}
return partitionDtos.stream()
.map(dto -> dto.getName().getPartitionName())
.collect(Collectors.toList());
}
/**
* convert string to time stamp.
* Three formats are accepted for now, which are basic standard ISO format and epoch timestamp format.
*
* @param timeStr time in string
* @param regional in pst
* @return timestamp
* @throws ParseException parsing error
*/
public Long getTimeStamp(final String timeStr, final boolean regional) throws ParseException {
final Matcher m = TIMESTAMP_FORMAT.matcher(timeStr);
if (m.find()) {
return Long.parseLong(m.group("time"));
}
if (ISO_BASIC.matcher(timeStr).matches()) {
if (regional) {
return this.simpleDateFormatRegional.parse(timeStr).toInstant().getEpochSecond();
} else {
return this.simpleDateFormatUTC.parse(timeStr).toInstant().getEpochSecond();
}
}
throw new ParseException("Unknown format", 0);
}
}
| 128 |
0 | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications | Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications/sns/package-info.java | /*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Classes related to setting up and sending SNS Notifications.
*
* @author tgianos
* @since 0.1.47
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.main.services.notifications.sns;
import javax.annotation.ParametersAreNonnullByDefault;
| 129 |
0 | Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat | Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat Metadata functional test classes.
*/
package com.netflix.metacat.metadata;
| 130 |
0 | Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata | Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata/store/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat Metadata functional test classes.
*/
package com.netflix.metacat.metadata.store;
| 131 |
0 | Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata/store | Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata/store/data/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat Metadata test classes.
*/
package com.netflix.metacat.metadata.store.data;
| 132 |
0 | Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata/store/data | Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata/store/data/repositories/CrdbDataMetadataRepositoryTests.java | //CHECKSTYLE:OFF
package com.netflix.metacat.metadata.store.data.repositories;
import com.netflix.metacat.metadata.store.configs.UserMetadataStoreConfig;
import org.junit.jupiter.api.extension.ExtendWith;
import org.springframework.boot.test.autoconfigure.orm.jpa.AutoConfigureDataJpa;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.junit.jupiter.SpringExtension;
import org.springframework.transaction.annotation.Transactional;
@ExtendWith(SpringExtension.class)
@SpringBootTest(classes = {UserMetadataStoreConfig.class})
@ActiveProfiles(profiles = {"usermetadata-crdb"})
@Transactional
@AutoConfigureDataJpa
public class CrdbDataMetadataRepositoryTests extends DataMetadataRepositoryTests {
}
| 133 |
0 | Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata/store/data | Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata/store/data/repositories/CrdbDefinitionMetadataRepositoryTests.java | //CHECKSTYLE:OFF
package com.netflix.metacat.metadata.store.data.repositories;
import com.netflix.metacat.metadata.store.configs.UserMetadataStoreConfig;
import org.junit.jupiter.api.extension.ExtendWith;
import org.springframework.boot.test.autoconfigure.orm.jpa.AutoConfigureDataJpa;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.junit.jupiter.SpringExtension;
import org.springframework.transaction.annotation.Transactional;
@ExtendWith(SpringExtension.class)
@SpringBootTest(classes = {UserMetadataStoreConfig.class})
@ActiveProfiles(profiles = {"usermetadata-crdb"})
@Transactional
@AutoConfigureDataJpa
public class CrdbDefinitionMetadataRepositoryTests extends DefinitionMetadataRepositoryTests {
}
| 134 |
0 | Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata/store/data | Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata/store/data/repositories/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat Metadata repository test classes.
*/
package com.netflix.metacat.metadata.store.data.repositories;
| 135 |
0 | Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat | Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat metadata test classes.
*/
package com.netflix.metacat.metadata;
| 136 |
0 | Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata | Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/util/EntityTestUtil.java | //CHECKSTYLE:OFF
package com.netflix.metacat.metadata.util;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.metadata.store.data.entities.AuditEntity;
import com.netflix.metacat.metadata.store.data.entities.DataMetadataEntity;
import com.netflix.metacat.metadata.store.data.entities.DefinitionMetadataEntity;
import java.time.Instant;
public class EntityTestUtil {
public static ObjectMapper objectMapper = new ObjectMapper()
.findAndRegisterModules()
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
.setSerializationInclusion(JsonInclude.Include.ALWAYS);
public static DataMetadataEntity createDataMetadataEntity() {
return createDataMetadataEntity("s3://iceberg/bucket");
}
public static DataMetadataEntity createDataMetadataEntity(String uri) {
return DataMetadataEntity.builder()
.uri(uri)
.data(createTestObjectNode())
.audit(createAuditEntity())
.build();
}
public static DefinitionMetadataEntity createDefinitionMetadataEntity() {
return createDefinitionMetadataEntity(QualifiedName.fromString("prodhive/foo/bar"));
}
public static DefinitionMetadataEntity createDefinitionMetadataEntity(QualifiedName name) {
return DefinitionMetadataEntity.builder()
.name(name)
.data(createTestObjectNode())
.audit(createAuditEntity())
.build();
}
public static AuditEntity createAuditEntity() {
return AuditEntity.builder()
.createdBy("metacat_user")
.lastModifiedBy("metacat_user")
.createdDate(Instant.now())
.lastModifiedDate(Instant.now())
.build();
}
public static ObjectNode createTestObjectNode() {
return objectMapper.createObjectNode().put("size", "50");
}
}
| 137 |
0 | Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata | Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/util/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat metadata utility classes.
*/
package com.netflix.metacat.metadata.util;
| 138 |
0 | Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata | Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/store/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat metadata test classes.
*/
package com.netflix.metacat.metadata.store;
| 139 |
0 | Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/store | Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/store/data/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat metadata test classes.
*/
package com.netflix.metacat.metadata.store.data;
| 140 |
0 | Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/store/data | Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/store/data/repositories/DataMetadataRepositoryTests.java | //CHECKSTYLE:OFF
package com.netflix.metacat.metadata.store.data.repositories;
import com.netflix.metacat.metadata.store.configs.UserMetadataStoreConfig;
import com.netflix.metacat.metadata.store.data.entities.DataMetadataEntity;
import com.netflix.metacat.metadata.util.EntityTestUtil;
import lombok.extern.slf4j.Slf4j;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.autoconfigure.orm.jpa.AutoConfigureDataJpa;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.junit.jupiter.SpringExtension;
import org.springframework.util.Assert;
import javax.transaction.Transactional;
import java.util.Optional;
/**
* Test data metadata repository APIs
*/
@ExtendWith(SpringExtension.class)
@SpringBootTest(classes = {UserMetadataStoreConfig.class})
@ActiveProfiles(profiles = {"usermetadata-h2db"})
@Transactional
@AutoConfigureDataJpa
@Slf4j
public class DataMetadataRepositoryTests {
@Autowired
public DataMetadataRepository dataMetadataRepository;
@Test
public void testCreateAndGet() {
DataMetadataEntity metadataEntity =
dataMetadataRepository.save(EntityTestUtil.createDataMetadataEntity());
// get the entity back
DataMetadataEntity savedEntity = dataMetadataRepository.getOne(metadataEntity.getId());
Assert.isTrue(savedEntity.equals(metadataEntity), "Retrieved entity should be the same");
String testUri = savedEntity.getUri();
log.info("Found test metadata entity Uri: {} and Id: {}",
testUri, savedEntity.getId());
// soft delete the entity
savedEntity.setDeleted(true);
dataMetadataRepository.saveAndFlush(savedEntity);
Optional<DataMetadataEntity> entity =
dataMetadataRepository.findByUri(testUri);
Assert.isTrue(entity.isPresent() && entity.get().isDeleted(),
"Entity should be soft-deleted");
// delete the entity
dataMetadataRepository.delete(savedEntity);
Assert.isTrue(!dataMetadataRepository.findByUri(testUri).isPresent(),
"Entity should be deleted");
}
}
| 141 |
0 | Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/store/data | Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/store/data/repositories/DefinitionMetadataRepositoryTests.java | //CHECKSTYLE:OFF
package com.netflix.metacat.metadata.store.data.repositories;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.metadata.store.configs.UserMetadataStoreConfig;
import com.netflix.metacat.metadata.store.data.entities.DefinitionMetadataEntity;
import com.netflix.metacat.metadata.util.EntityTestUtil;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.autoconfigure.orm.jpa.AutoConfigureDataJpa;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.junit.jupiter.SpringExtension;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.util.Assert;
import java.util.Optional;
/**
* Test definition metadata repository APIs
*/
@ExtendWith(SpringExtension.class)
@SpringBootTest(classes = {UserMetadataStoreConfig.class})
@ActiveProfiles(profiles = {"usermetadata-h2db"})
@Transactional
@AutoConfigureDataJpa
public class DefinitionMetadataRepositoryTests {
@Autowired
private DefinitionMetadataRepository definitionMetadataRepository;
@Test
public void testCreateAndGet() {
QualifiedName testQName = QualifiedName.fromString("prodhive/foo/bar");
DefinitionMetadataEntity metadataEntity =
definitionMetadataRepository.save(EntityTestUtil.createDefinitionMetadataEntity(testQName));
// get the entity back
DefinitionMetadataEntity savedEntity = definitionMetadataRepository.getOne(metadataEntity.getId());
Assert.isTrue(savedEntity.equals(metadataEntity), "Retrieved entity should be the same");
// soft delete the entity
savedEntity.setDeleted(true);
definitionMetadataRepository.saveAndFlush(savedEntity);
Optional<DefinitionMetadataEntity> entity =
definitionMetadataRepository.findByName(testQName);
Assert.isTrue(entity.isPresent() && entity.get().isDeleted(),
"Entity should be soft-deleted");
// delete the entity
definitionMetadataRepository.delete(savedEntity);
Assert.isTrue(!definitionMetadataRepository.findByName(testQName).isPresent(),
"Entity should be deleted");
}
}
| 142 |
0 | Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/store/data | Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/store/data/repositories/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat metadata repository test classes.
*/
package com.netflix.metacat.metadata.store.data.repositories;
| 143 |
0 | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/UserMetadataServiceImpl.java | package com.netflix.metacat.metadata;
import com.netflix.metacat.common.json.MetacatJson;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.usermetadata.BaseUserMetadataService;
import com.netflix.metacat.common.server.usermetadata.MetadataInterceptor;
import com.netflix.metacat.metadata.store.UserMetadataStoreService;
import org.springframework.beans.factory.annotation.Autowired;
/**
* The Hibernate-based User metadata service implementation.
*
* @author rveeramacheneni
*/
public class UserMetadataServiceImpl extends BaseUserMetadataService {
private final UserMetadataStoreService userMetadataStoreService;
private final MetacatJson metacatJson;
private final Config config;
private final MetadataInterceptor metadataInterceptor;
/**
* Ctor.
*
* @param userMetadataStoreService The User metadata store service.
* @param metacatJson The Metacat jackson JSON mapper.
* @param config The config.
* @param metadataInterceptor The metadata interceptor.
*/
@Autowired
public UserMetadataServiceImpl(final UserMetadataStoreService userMetadataStoreService,
final MetacatJson metacatJson,
final Config config,
final MetadataInterceptor metadataInterceptor) {
this.userMetadataStoreService = userMetadataStoreService;
this.metacatJson = metacatJson;
this.config = config;
this.metadataInterceptor = metadataInterceptor;
}
}
| 144 |
0 | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat User metadata classes.
*/
package com.netflix.metacat.metadata;
| 145 |
0 | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/UserMetadataStoreService.java | package com.netflix.metacat.metadata.store;
import com.netflix.metacat.metadata.store.data.repositories.DataMetadataRepository;
import com.netflix.metacat.metadata.store.data.repositories.DefinitionMetadataRepository;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
/**
* Storage interface for user metadata entity operations.
*
* @author rveeramacheneni
*/
@Slf4j
public class UserMetadataStoreService {
private final DefinitionMetadataRepository definitionMetadataRepository;
private final DataMetadataRepository dataMetadataRepository;
/**
* Ctor.
*
* @param definitionMetadataRepository The definition metadata repository.
* @param dataMetadataRepository The data metadata repository.
*/
@Autowired
public UserMetadataStoreService(@NonNull final DefinitionMetadataRepository definitionMetadataRepository,
@NonNull final DataMetadataRepository dataMetadataRepository) {
this.definitionMetadataRepository = definitionMetadataRepository;
this.dataMetadataRepository = dataMetadataRepository;
}
}
| 146 |
0 | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat Metadata storage-related classes.
*/
package com.netflix.metacat.metadata.store;
| 147 |
0 | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/configs/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* User metadata store config classes.
*/
package com.netflix.metacat.metadata.store.configs;
| 148 |
0 | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/configs/UserMetadataStoreConfig.java | package com.netflix.metacat.metadata.store.configs;
import com.netflix.metacat.common.json.MetacatJson;
import com.netflix.metacat.common.json.MetacatJsonLocator;
import com.netflix.metacat.metadata.store.UserMetadataStoreService;
import com.netflix.metacat.metadata.store.data.repositories.DataMetadataRepository;
import com.netflix.metacat.metadata.store.data.repositories.DefinitionMetadataRepository;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.domain.EntityScan;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.jpa.repository.config.EnableJpaRepositories;
/**
* The user metadata store config.
*
* @author rveeramacheneni
*/
@Configuration
@EntityScan("com.netflix.metacat.metadata.store.data.*")
@EnableJpaRepositories("com.netflix.metacat.metadata.store.data.*")
public class UserMetadataStoreConfig {
/**
* The user metadata store service.
*
* @param definitionMetadataRepository The definition metadata repository.
* @param dataMetadataRepository The data metadata repository.
* @return the constructed bean.
*/
@Bean
public UserMetadataStoreService userMetadataStoreService(
final DefinitionMetadataRepository definitionMetadataRepository,
final DataMetadataRepository dataMetadataRepository) {
return new UserMetadataStoreService(definitionMetadataRepository, dataMetadataRepository);
}
/**
* Store metacat JSON Handler.
*
* @return The JSON handler
*/
@Bean
@ConditionalOnMissingBean(MetacatJson.class)
public MetacatJson metacatJson() {
return new MetacatJsonLocator();
}
}
| 149 |
0 | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/converters/QualifiedNameConverter.java | package com.netflix.metacat.metadata.store.data.converters;
import com.netflix.metacat.common.QualifiedName;
import lombok.extern.slf4j.Slf4j;
import javax.persistence.AttributeConverter;
import javax.persistence.Converter;
/**
* The attribute converter for the QualifiedName type.
*
* @author rveeramacheneni
*/
@Slf4j
@Converter(autoApply = true)
@SuppressWarnings("PMD")
public class QualifiedNameConverter implements AttributeConverter<QualifiedName, String> {
@Override
public String convertToDatabaseColumn(final QualifiedName attribute) {
return attribute == null ? null : attribute.toString();
}
@Override
public QualifiedName convertToEntityAttribute(final String dbData) {
return dbData == null ? null : QualifiedName.fromString(dbData);
}
}
| 150 |
0 | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/converters/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat Metadata converter classes.
*/
package com.netflix.metacat.metadata.store.data.converters;
| 151 |
0 | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/converters/ObjectNodeConverter.java | package com.netflix.metacat.metadata.store.data.converters;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.json.MetacatJson;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import javax.persistence.AttributeConverter;
import javax.persistence.Converter;
/**
* Attribute converter for Jackson ObjectNode type.
*
* @author rveeramacheneni
*/
@Slf4j
@Converter(autoApply = true)
@SuppressWarnings("PMD")
public class ObjectNodeConverter implements AttributeConverter<ObjectNode, String> {
private final MetacatJson metacatJson;
/**
* Ctor.
*
* @param metacatJson the Jackson object mapper.
*/
public ObjectNodeConverter(@NonNull final MetacatJson metacatJson) {
this.metacatJson = metacatJson;
}
@Override
public String convertToDatabaseColumn(final ObjectNode attribute) {
return attribute == null ? null : attribute.toString();
}
@Override
public ObjectNode convertToEntityAttribute(final String dbData) {
return dbData == null ? null : metacatJson.parseJsonObject(dbData);
}
}
| 152 |
0 | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/repositories/DefinitionMetadataRepository.java | package com.netflix.metacat.metadata.store.data.repositories;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.metadata.store.data.entities.DefinitionMetadataEntity;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.stereotype.Repository;
import java.util.Optional;
/**
* The DefinitionMetadata entity repository.
*/
@Repository
public interface DefinitionMetadataRepository extends JpaRepository<DefinitionMetadataEntity, String> {
/**
* Find a definition metadata entity using the given QualifiedName.
*
* @param name The QualifiedName of the entity.
* @return The definition metadata entity.
*/
Optional<DefinitionMetadataEntity> findByName(QualifiedName name);
}
| 153 |
0 | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/repositories/DataMetadataRepository.java | package com.netflix.metacat.metadata.store.data.repositories;
import com.netflix.metacat.metadata.store.data.entities.DataMetadataEntity;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.stereotype.Repository;
import java.util.Optional;
/**
* The data metadata entity repository.
*
* @author rveeramacheneni
*/
@Repository
public interface DataMetadataRepository extends JpaRepository<DataMetadataEntity, String> {
/**
* Find a data metadata entity using the given uri.
*
* @param uri The uri of the entity.
* @return The data metadata entity.
*/
Optional<DataMetadataEntity> findByUri(String uri);
}
| 154 |
0 | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/repositories/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat Metadata repository classes.
*/
package com.netflix.metacat.metadata.store.data.repositories;
| 155 |
0 | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/entities/BaseUserMetadataEntity.java | package com.netflix.metacat.metadata.store.data.entities;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.metadata.store.data.converters.ObjectNodeConverter;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import lombok.ToString;
import lombok.experimental.SuperBuilder;
import org.hibernate.annotations.ColumnDefault;
import javax.persistence.Basic;
import javax.persistence.Column;
import javax.persistence.Convert;
import javax.persistence.MappedSuperclass;
/**
* Represents a basic user metadata entity.
*
* @author rveeramacheneni
*/
@MappedSuperclass
@Getter
@Setter
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@ToString(callSuper = true, of = {
"isDeleted"
})
@SuppressWarnings("PMD")
public class BaseUserMetadataEntity extends BaseEntity {
@Basic
@Column(name = "is_deleted", nullable = false)
@ColumnDefault("false")
protected boolean isDeleted;
@Basic
@Column(name = "data", columnDefinition = "jsonb")
@Convert(converter = ObjectNodeConverter.class)
protected ObjectNode data;
}
| 156 |
0 | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/entities/BaseEntity.java | package com.netflix.metacat.metadata.store.data.entities;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import lombok.ToString;
import lombok.experimental.SuperBuilder;
import org.hibernate.annotations.GenericGenerator;
import javax.persistence.Basic;
import javax.persistence.Column;
import javax.persistence.Embedded;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.MappedSuperclass;
import javax.persistence.Version;
/**
* Represents a basic metadata entity.
*
* @author rveeramacheneni
*/
@MappedSuperclass
@Getter
@Setter
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@EqualsAndHashCode(of = "id")
@ToString(of = {
"id",
"version",
"audit"
})
@SuppressWarnings("PMD")
public abstract class BaseEntity {
@Basic
@Id
@GeneratedValue(generator = "uuid")
@GenericGenerator(name = "uuid", strategy = "uuid2")
@Column(name = "id", nullable = false, unique = true, updatable = false)
@Setter(AccessLevel.NONE)
protected String id;
@Version
@Column(name = "version")
@Setter(AccessLevel.NONE)
protected Long version;
@Embedded
@Builder.Default
protected AuditEntity audit = new AuditEntity();
}
| 157 |
0 | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/entities/AuditEntity.java | package com.netflix.metacat.metadata.store.data.entities;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import lombok.ToString;
import org.springframework.data.annotation.CreatedBy;
import org.springframework.data.annotation.CreatedDate;
import org.springframework.data.annotation.LastModifiedBy;
import org.springframework.data.annotation.LastModifiedDate;
import javax.persistence.Basic;
import javax.persistence.Column;
import javax.persistence.Embeddable;
import java.time.Instant;
/**
* Embeddable entity with audit fields.
*
* @author rveeramacheneni
*/
@Embeddable
@Getter
@Setter
@Builder
@AllArgsConstructor
@NoArgsConstructor
@ToString(of = {
"createdBy",
"lastModifiedBy",
"createdDate",
"lastModifiedDate"
})
public class AuditEntity {
@Basic
@Column(name = "created_by", nullable = false)
@CreatedBy
protected String createdBy;
@Basic
@Column(name = "last_updated_by")
@LastModifiedBy
protected String lastModifiedBy;
@Basic
@Column(name = "created_date", updatable = false)
@CreatedDate
protected Instant createdDate;
@Basic
@Column(name = "last_updated_date")
@LastModifiedDate
protected Instant lastModifiedDate;
}
| 158 |
0 | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/entities/DefinitionMetadataEntity.java | package com.netflix.metacat.metadata.store.data.entities;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.metadata.store.data.converters.QualifiedNameConverter;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import lombok.ToString;
import lombok.experimental.SuperBuilder;
import javax.persistence.Basic;
import javax.persistence.Column;
import javax.persistence.Convert;
import javax.persistence.Entity;
import javax.persistence.Table;
/**
* The definition metadata entity.
*
* @author rveeramacheneni
*/
@Entity
@Getter
@Setter
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@ToString(of = {
"name"
})
@Table(name = "definition_metadata")
@SuppressWarnings("PMD")
public class DefinitionMetadataEntity extends BaseUserMetadataEntity {
@Basic
@Column(name = "name", nullable = false, unique = true)
@Convert(converter = QualifiedNameConverter.class)
private QualifiedName name;
}
| 159 |
0 | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/entities/DataMetadataEntity.java | package com.netflix.metacat.metadata.store.data.entities;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import lombok.ToString;
import lombok.experimental.SuperBuilder;
import javax.persistence.Basic;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.Table;
/**
* Represents a data metadata entity.
*
* @author rveeramacheneni
*/
@Entity
@Getter
@Setter
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@ToString(of = {
"uri"
})
@Table(name = "data_metadata")
public class DataMetadataEntity extends BaseUserMetadataEntity {
@Basic
@Column(name = "uri", nullable = false, unique = true)
private String uri;
}
| 160 |
0 | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data | Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/entities/package-info.java | /*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat Metadata entity classes.
*/
package com.netflix.metacat.metadata.store.data.entities;
| 161 |
0 | Create_ds/mantis-api/src/test/java/io/mantisrx | Create_ds/mantis-api/src/test/java/io/mantisrx/api/UtilTest.java | package io.mantisrx.api;
import org.junit.Test;
import static org.junit.Assert.assertArrayEquals;
public class UtilTest {
@Test
public void testGetTagList() {
String[] tags = Util.getTaglist("/jobconnectbyname/rx-sps-tracker?clientId=testClientId", "testTargetId", "us-east-1");
assertArrayEquals(new String[]{
"clientId", "testClientId",
"SessionId", "testTargetId",
"urlPath", "/jobconnectbyname/rx-sps-tracker",
"region", "us-east-1"}, tags);
tags = Util.getTaglist("/jobconnectbyname/rx-sps-tracker?clientId=testClientId&MantisApiTag=tag1:value1", "testTargetId", "us-east-1");
assertArrayEquals(new String[]{
"tag1", "value1",
"clientId", "testClientId",
"SessionId", "testTargetId",
"urlPath", "/jobconnectbyname/rx-sps-tracker",
"region", "us-east-1"}, tags);
tags = Util.getTaglist("/jobconnectbyname/rx-sps-tracker?clientId=testClientId&MantisApiTag=tag1:value1&MantisApiTag=clientId:testClientId2", "testTargetId", "us-east-1");
assertArrayEquals(new String[]{
"tag1", "value1",
"clientId", "testClientId2",
"SessionId", "testTargetId",
"urlPath", "/jobconnectbyname/rx-sps-tracker",
"region", "us-east-1"}, tags);
}
}
| 162 |
0 | Create_ds/mantis-api/src/test/java/io/mantisrx/api | Create_ds/mantis-api/src/test/java/io/mantisrx/api/tunnel/CrossRegionHandlerTest.java | package io.mantisrx.api.tunnel;
import com.google.common.collect.ImmutableList;
import io.mantisrx.api.push.ConnectionBroker;
import junit.framework.TestCase;
import org.junit.Test;
import rx.Scheduler;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
public class CrossRegionHandlerTest extends TestCase {
@Test
public void testParseUriRegion() {
CrossRegionHandler regionHandler = spy(new CrossRegionHandler(ImmutableList.of(), mock(MantisCrossRegionalClient.class), mock(ConnectionBroker.class), mock(Scheduler.class)));
doReturn(ImmutableList.of("us-east-1", "eu-west-1")).when(regionHandler).getTunnelRegions();
assertEquals(ImmutableList.of("us-east-1"), regionHandler.parseRegionsInUri("/region/us-east-1/foobar"));
assertEquals(ImmutableList.of("us-east-2"), regionHandler.parseRegionsInUri("/region/us-east-2/foobar"));
assertEquals(ImmutableList.of("us-east-1", "eu-west-1"), regionHandler.parseRegionsInUri("/region/all/foobar"));
assertEquals(ImmutableList.of("us-east-1", "eu-west-1"), regionHandler.parseRegionsInUri("/region/ALL/foobar"));
doReturn(ImmutableList.of("us-east-1", "eu-west-1", "us-west-2")).when(regionHandler).getTunnelRegions();
assertEquals(ImmutableList.of("us-east-1", "eu-west-1", "us-west-2"), regionHandler.parseRegionsInUri("/region/ALL/foobar"));
assertEquals(ImmutableList.of("us-east-1", "us-east-2"), regionHandler.parseRegionsInUri("/region/us-east-1,us-east-2/foobar"));
assertEquals(ImmutableList.of("us-east-1", "us-west-2"), regionHandler.parseRegionsInUri("/region/us-east-1,us-west-2/foobar"));
}
}
| 163 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx | Create_ds/mantis-api/src/main/java/io/mantisrx/api/Bootstrap.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api;
import com.google.inject.Injector;
import com.netflix.config.ConfigurationManager;
import com.netflix.governator.InjectorBuilder;
import com.netflix.zuul.netty.server.BaseServerStartup;
import com.netflix.zuul.netty.server.Server;
/**
* Bootstrap
*
* Author: Arthur Gonigberg
* Date: November 20, 2017
*/
public class Bootstrap {
public static void main(String[] args) {
String propertiesFile = null;
if (args.length >= 2 && "-p".equals(args[0])) {
propertiesFile = args[1];
if (propertiesFile.endsWith(".properties")) {
propertiesFile = propertiesFile.substring(0, propertiesFile.length() - 11);
}
}
new Bootstrap().start(propertiesFile);
}
public void start(String configName) {
System.out.println("Mantis API: starting up.");
long startTime = System.currentTimeMillis();
int exitCode = 0;
Server server = null;
try {
ConfigurationManager.loadCascadedPropertiesFromResources(configName);
Injector injector = InjectorBuilder.fromModule(new MantisAPIModule()).createInjector();
BaseServerStartup serverStartup = injector.getInstance(BaseServerStartup.class);
server = serverStartup.server();
long startupDuration = System.currentTimeMillis() - startTime;
System.out.println("Mantis API: finished startup. Duration = " + startupDuration + " ms");
server.start();
server.awaitTermination();
}
catch (Throwable t) {
t.printStackTrace();
System.err.println("###############");
System.err.println("Mantis API: initialization failed. Forcing shutdown now.");
System.err.println("###############");
exitCode = 1;
}
finally {
// server shutdown
if (server != null) server.stop();
System.exit(exitCode);
}
}
}
| 164 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx | Create_ds/mantis-api/src/main/java/io/mantisrx/api/Util.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api;
import com.google.common.base.Strings;
import io.netty.handler.codec.http.QueryStringDecoder;
import lombok.experimental.UtilityClass;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.slf4j.Logger;
import rx.Observable;
import rx.functions.Func1;
import java.util.*;
import java.util.concurrent.TimeUnit;
import static io.mantisrx.api.Constants.*;
@UtilityClass
@Slf4j
public class Util {
private static final int defaultNumRetries = 2;
public static boolean startsWithAnyOf(final String target, List<String> prefixes) {
for (String prefix : prefixes) {
if (target.startsWith(prefix)) {
return true;
}
}
return false;
}
//
// Regions
//
public static String getLocalRegion() {
return System.getenv("EC2_REGION");
}
//
// Query Params
//
public static String[] getTaglist(String uri, String id) {
return getTaglist(uri, id, null);
}
public static String[] getTaglist(String uri, String id, String region) {
QueryStringDecoder queryStringDecoder = new QueryStringDecoder(uri);
Map<String, List<String>> queryParameters = queryStringDecoder.parameters();
boolean isClientIdSet = false;
final List<String> tags = new LinkedList<>();
if (queryParameters != null) {
List<String> tagVals = queryParameters.get(TagsParamName);
if (tagVals != null) {
for (String s : tagVals) {
StringTokenizer tokenizer = new StringTokenizer(s, TagNameValDelimiter);
if (tokenizer.countTokens() == 2) {
String s1 = tokenizer.nextToken();
String s2 = tokenizer.nextToken();
if (s1 != null && !s1.isEmpty() && s2 != null && !s2.isEmpty()) {
tags.add(s1);
tags.add(s2);
if (ClientIdTagName.equals(s1)) {
isClientIdSet = true;
}
}
}
}
}
tagVals = queryParameters.get(ClientIdTagName);
if (!isClientIdSet && tagVals != null && !tagVals.isEmpty()) {
tags.add(ClientIdTagName);
tags.add(tagVals.get(0));
}
}
tags.add("SessionId");
tags.add(id);
tags.add("urlPath");
tags.add(queryStringDecoder.path());
if (!Strings.isNullOrEmpty(region)) {
tags.add("region");
tags.add(region);
}
return tags.toArray(new String[]{});
}
//
// Retries
//
public static Func1<Observable<? extends Throwable>, Observable<?>> getRetryFunc(final Logger logger, String name) {
return getRetryFunc(logger, name, defaultNumRetries);
}
public static Func1<Observable<? extends Throwable>, Observable<?>> getRetryFunc(final Logger logger, String name, final int retries) {
final int limit = retries == Integer.MAX_VALUE ? retries : retries + 1;
return attempts -> attempts
.zipWith(Observable.range(1, limit), (t1, integer) -> {
logger.warn("Caught exception connecting for {}.", name, t1);
return new ImmutablePair<Throwable, Integer>(t1, integer);
})
.flatMap(pair -> {
Throwable t = pair.left;
int retryIter = pair.right;
long delay = Math.round(Math.pow(2, retryIter));
if (retryIter > retries) {
logger.error("Exceeded maximum retries ({}) for {} with exception: {}", retries, name, t.getMessage(), t);
return Observable.error(new Exception("Timeout after " + retries + " retries"));
}
logger.info("Retrying connection to {} after sleeping for {} seconds.", name, delay, t);
return Observable.timer(delay, TimeUnit.SECONDS);
});
}
}
| 165 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx | Create_ds/mantis-api/src/main/java/io/mantisrx/api/MantisAPIModule.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api;
import com.google.inject.Scopes;
import com.google.inject.util.Modules;
import com.netflix.appinfo.EurekaInstanceConfig;
import com.netflix.appinfo.providers.MyDataCenterInstanceConfigProvider;
import com.netflix.discovery.guice.EurekaModule;
import com.netflix.zuul.*;
import com.netflix.zuul.filters.FilterRegistry;
import com.netflix.zuul.filters.MutableFilterRegistry;
import com.netflix.zuul.groovy.GroovyCompiler;
import com.netflix.zuul.groovy.GroovyFileFilter;
import io.mantisrx.api.services.AppStreamDiscoveryService;
import io.mantisrx.api.services.AppStreamStore;
import io.mantisrx.api.services.ConfigurationBasedAppStreamStore;
import io.mantisrx.server.core.Configurations;
import io.mantisrx.server.core.CoreConfiguration;
import io.mantisrx.server.master.client.HighAvailabilityServices;
import io.mantisrx.server.master.client.HighAvailabilityServicesUtil;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
import com.google.inject.Singleton;
import com.google.inject.name.Named;
import com.netflix.config.ConfigurationManager;
import com.netflix.netty.common.accesslog.AccessLogPublisher;
import com.netflix.netty.common.status.ServerStatusManager;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.patterns.ThreadPoolMonitor;
import com.netflix.zuul.context.SessionContextDecorator;
import com.netflix.zuul.context.ZuulSessionContextDecorator;
import com.netflix.zuul.init.ZuulFiltersModule;
import com.netflix.zuul.netty.server.BaseServerStartup;
import com.netflix.zuul.netty.server.ClientRequestReceiver;
import com.netflix.zuul.origins.BasicNettyOriginManager;
import com.netflix.zuul.origins.OriginManager;
import io.mantisrx.api.services.artifacts.ArtifactManager;
import io.mantisrx.api.services.artifacts.InMemoryArtifactManager;
import com.netflix.zuul.stats.BasicRequestMetricsPublisher;
import com.netflix.zuul.stats.RequestMetricsPublisher;
import io.mantisrx.api.tunnel.MantisCrossRegionalClient;
import io.mantisrx.api.tunnel.NoOpCrossRegionalClient;
import io.mantisrx.client.MantisClient;
import io.mantisrx.server.worker.client.WorkerMetricsClient;
import io.mantisrx.shaded.org.apache.curator.framework.listen.Listenable;
import io.mantisrx.shaded.org.apache.curator.framework.listen.StandardListenerManager;
import org.apache.commons.configuration.AbstractConfiguration;
import rx.Scheduler;
import rx.schedulers.Schedulers;
import java.io.FilenameFilter;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.*;
public class MantisAPIModule extends AbstractModule {
@Override
protected void configure() {
bind(AbstractConfiguration.class).toInstance(ConfigurationManager.getConfigInstance());
bind(BaseServerStartup.class).to(MantisServerStartup.class);
// use provided basic netty origin manager
bind(OriginManager.class).to(BasicNettyOriginManager.class);
// zuul filter loading
bind(DynamicCodeCompiler.class).to(GroovyCompiler.class);
bind(FilenameFilter.class).to(GroovyFileFilter.class);
install(Modules.override(new EurekaModule()).with(new AbstractModule() {
@Override
protected void configure() {
bind(EurekaInstanceConfig.class).toProvider(MyDataCenterInstanceConfigProvider.class).in(Scopes.SINGLETON);
}
}));
install(new ZuulFiltersModule());
bind(FilterLoader.class).to(DynamicFilterLoader.class);
bind(FilterRegistry.class).to(MutableFilterRegistry.class);
bind(FilterFileManager.class).asEagerSingleton();
// general server bindings
bind(ServerStatusManager.class); // health/discovery status
bind(SessionContextDecorator.class).to(ZuulSessionContextDecorator.class); // decorate new sessions when requests come in
bind(Registry.class).to(DefaultRegistry.class); // atlas metrics registry
bind(RequestCompleteHandler.class).to(BasicRequestCompleteHandler.class); // metrics post-request completion
bind(RequestMetricsPublisher.class).to(BasicRequestMetricsPublisher.class); // timings publisher
// access logger, including request ID generator
bind(AccessLogPublisher.class).toInstance(new AccessLogPublisher("ACCESS",
(channel, httpRequest) -> ClientRequestReceiver.getRequestFromChannel(channel).getContext().getUUID()));
bind(ArtifactManager.class).to(InMemoryArtifactManager.class);
bind(MantisCrossRegionalClient.class).to(NoOpCrossRegionalClient.class);
bind(ObjectMapper.class).toInstance(new ObjectMapper()
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false));
}
@Provides
@Singleton
HighAvailabilityServices provideHighAvailabilityServices(AbstractConfiguration configuration) {
Properties props = new Properties();
configuration.getKeys("mantis").forEachRemaining(key -> {
props.put(key, configuration.getString(key));
});
return HighAvailabilityServicesUtil.createHAServices(
Configurations.frmProperties(props, CoreConfiguration.class));
}
@Provides @Singleton MantisClient provideMantisClient(AbstractConfiguration configuration) {
Properties props = new Properties();
configuration.getKeys("mantis").forEachRemaining(key -> {
props.put(key, configuration.getString(key));
});
return new MantisClient(props);
}
@Provides
@Singleton
@Named("io-scheduler")
Scheduler provideIoScheduler(Registry registry) {
ThreadPoolExecutor executor = new ThreadPoolExecutor(16, 128, 60,
TimeUnit.SECONDS, new LinkedBlockingQueue<>());
ThreadPoolMonitor.attach(registry, executor, "io-thread-pool");
return Schedulers.from(executor);
}
@Provides
@Singleton
ConfigurationBasedAppStreamStore.ConfigSource provideConfigSource(AbstractConfiguration configuration) {
return new ConfigurationBasedAppStreamStore.ConfigSource() {
@Override
public Listenable<ConfigurationBasedAppStreamStore.ConfigurationChangeListener> getListenable() {
return StandardListenerManager.standard();
}
@Override
public String get() {
return String.join(",", configuration.getStringArray("mreAppJobClusterMap"));
}
};
}
@Provides
@Singleton
AppStreamDiscoveryService provideAppStreamDiscoveryService(MantisClient mantisClient,
@Named("io-scheduler") Scheduler ioScheduler,
ConfigurationBasedAppStreamStore.ConfigSource configSource) {
AppStreamStore appStreamStore = new ConfigurationBasedAppStreamStore(configSource);
return new AppStreamDiscoveryService(mantisClient, ioScheduler, appStreamStore);
}
@Provides @Singleton
WorkerMetricsClient provideWorkerMetricsClient(AbstractConfiguration configuration) {
Properties props = new Properties();
configuration.getKeys("mantis").forEachRemaining(key -> {
props.put(key, configuration.getString(key));
});
return new WorkerMetricsClient(props);
}
@Provides
@Singleton
@Named("push-prefixes")
List<String> providePushPrefixes() {
List<String> pushPrefixes = new ArrayList<>(20);
pushPrefixes.add("/jobconnectbyid");
pushPrefixes.add("/api/v1/jobconnectbyid");
pushPrefixes.add("/jobconnectbyname");
pushPrefixes.add("/api/v1/jobconnectbyname");
pushPrefixes.add("/jobsubmitandconnect");
pushPrefixes.add("/api/v1/jobsubmitandconnect");
pushPrefixes.add("/jobClusters/discoveryInfoStream");
pushPrefixes.add("/jobstatus");
pushPrefixes.add("/api/v1/jobstatus");
pushPrefixes.add("/api/v1/jobs/schedulingInfo/");
pushPrefixes.add("/api/v1/metrics");
return pushPrefixes;
}
}
| 166 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx | Create_ds/mantis-api/src/main/java/io/mantisrx/api/MantisServerStartup.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api;
import com.google.inject.name.Named;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.config.DynamicIntProperty;
import com.netflix.discovery.EurekaClient;
import com.netflix.netty.common.accesslog.AccessLogPublisher;
import com.netflix.netty.common.channel.config.ChannelConfig;
import com.netflix.netty.common.channel.config.CommonChannelConfigKeys;
import com.netflix.netty.common.metrics.EventLoopGroupMetrics;
import com.netflix.netty.common.proxyprotocol.StripUntrustedProxyHeadersHandler;
import com.netflix.netty.common.status.ServerStatusManager;
import com.netflix.spectator.api.Registry;
import com.netflix.zuul.FilterLoader;
import com.netflix.zuul.FilterUsageNotifier;
import com.netflix.zuul.RequestCompleteHandler;
import com.netflix.zuul.context.SessionContextDecorator;
import com.netflix.zuul.netty.server.BaseServerStartup;
import com.netflix.zuul.netty.server.DirectMemoryMonitor;
import com.netflix.zuul.netty.server.NamedSocketAddress;
import io.mantisrx.api.initializers.MantisApiServerChannelInitializer;
import io.mantisrx.api.push.ConnectionBroker;
import io.mantisrx.api.tunnel.MantisCrossRegionalClient;
import io.mantisrx.server.master.client.HighAvailabilityServices;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.group.ChannelGroup;
import org.apache.commons.configuration.AbstractConfiguration;
import rx.Scheduler;
import javax.inject.Inject;
import javax.inject.Singleton;
import java.net.InetSocketAddress;
import java.util.*;
@Singleton
public class MantisServerStartup extends BaseServerStartup {
private final HighAvailabilityServices highAvailabilityServices;
private final MantisCrossRegionalClient mantisCrossRegionalClient;
private final ConnectionBroker connectionBroker;
private final Scheduler scheduler;
private final List<String> pushPrefixes;
@Inject
public MantisServerStartup(ServerStatusManager serverStatusManager, FilterLoader filterLoader,
SessionContextDecorator sessionCtxDecorator, FilterUsageNotifier usageNotifier,
RequestCompleteHandler reqCompleteHandler, Registry registry,
DirectMemoryMonitor directMemoryMonitor, EventLoopGroupMetrics eventLoopGroupMetrics,
EurekaClient discoveryClient, ApplicationInfoManager applicationInfoManager,
AccessLogPublisher accessLogPublisher,
AbstractConfiguration configurationManager,
HighAvailabilityServices highAvailabilityServices,
MantisCrossRegionalClient mantisCrossRegionalClient,
ConnectionBroker connectionBroker,
@Named("io-scheduler") Scheduler scheduler,
@Named("push-prefixes") List<String> pushPrefixes
) {
super(serverStatusManager, filterLoader, sessionCtxDecorator, usageNotifier, reqCompleteHandler, registry,
directMemoryMonitor, eventLoopGroupMetrics, discoveryClient, applicationInfoManager,
accessLogPublisher);
this.highAvailabilityServices = highAvailabilityServices;
this.mantisCrossRegionalClient = mantisCrossRegionalClient;
this.connectionBroker = connectionBroker;
this.scheduler = scheduler;
this.pushPrefixes = pushPrefixes;
// Mantis Master Listener
highAvailabilityServices
.getMasterMonitor()
.getMasterObservable()
.filter(x -> x != null)
.forEach(masterDescription -> {
LOG.info("Received new Mantis Master: " + masterDescription);
configurationManager.setProperty("api.ribbon.listOfServers",
masterDescription.getHostIP() + ":" + masterDescription.getApiPort());
});
}
@Override
protected Map<NamedSocketAddress, ChannelInitializer<?>> chooseAddrsAndChannels(ChannelGroup clientChannels) {
Map<NamedSocketAddress, ChannelInitializer<?>> addrsToChannels = new HashMap<>();
String mainPortName = "main";
int port = new DynamicIntProperty("zuul.server.port.main", 7001).get();
NamedSocketAddress sockAddr = new NamedSocketAddress(mainPortName, new InetSocketAddress(port));
ChannelConfig channelConfig = defaultChannelConfig(mainPortName);
ChannelConfig channelDependencies = defaultChannelDependencies(mainPortName);
/* These settings may need to be tweaked depending if you're running behind an ELB HTTP listener, TCP listener,
* or directly on the internet.
*/
channelConfig.set(CommonChannelConfigKeys.allowProxyHeadersWhen,
StripUntrustedProxyHeadersHandler.AllowWhen.ALWAYS);
channelConfig.set(CommonChannelConfigKeys.preferProxyProtocolForClientIp, false);
channelConfig.set(CommonChannelConfigKeys.isSSlFromIntermediary, false);
channelConfig.set(CommonChannelConfigKeys.withProxyProtocol, false);
addrsToChannels.put(
sockAddr,
new MantisApiServerChannelInitializer(
String.valueOf(port), channelConfig, channelDependencies, clientChannels, pushPrefixes,
highAvailabilityServices, mantisCrossRegionalClient, connectionBroker,
scheduler, false));
logAddrConfigured(sockAddr);
return Collections.unmodifiableMap(addrsToChannels);
}
}
| 167 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx | Create_ds/mantis-api/src/main/java/io/mantisrx/api/MantisConfigurationBasedServerList.java | /**
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.loadbalancer.ConfigurationBasedServerList;
import com.netflix.loadbalancer.Server;
import com.netflix.niws.loadbalancer.DiscoveryEnabledServer;
import java.util.List;
public class MantisConfigurationBasedServerList extends ConfigurationBasedServerList {
@Override
protected List<Server> derive(String value) {
List<Server> list = Lists.newArrayList();
if (!Strings.isNullOrEmpty(value)) {
for (String s : value.split(",")) {
Server server = new Server(s.trim());
InstanceInfo instanceInfo =
InstanceInfo.Builder.newBuilder()
.setAppName("mantismasterv2")
.setIPAddr(server.getHost())
.setPort(server.getPort())
.build();
list.add(new DiscoveryEnabledServer(instanceInfo, false, true));
}
}
return list;
}
}
| 168 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx | Create_ds/mantis-api/src/main/java/io/mantisrx/api/Constants.java | package io.mantisrx.api;
import lombok.experimental.UtilityClass;
@UtilityClass
public class Constants {
public static final String numMessagesCounterName = "numSinkMessages";
public static final String numDroppedMessagesCounterName = "numDroppedSinkMessages";
public static final String numBytesCounterName = "numSinkBytes";
public static final String numDroppedBytesCounterName = "numDroppedSinkBytes";
public static final String drainTriggeredCounterName = "drainTriggered";
public static final String numIncomingMessagesCounterName = "numIncomingMessages";
public static final String SSE_DATA_SUFFIX = "\r\n\r\n";
public static final String SSE_DATA_PREFIX = "data: ";
public static final long TunnelPingIntervalSecs = 12;
public static final String TunnelPingMessage = "MantisApiTunnelPing";
public static final String TunnelPingParamName = "MantisApiTunnelPingEnabled";
public static final String OriginRegionTagName = "originRegion";
public static final String ClientIdTagName = "clientId";
public static final String TagsParamName = "MantisApiTag";
public static final String TagNameValDelimiter = ":";
public static final String metaErrorMsgHeader = "mantis.meta.error.message";
public static final String metaOriginName = "mantis.meta.origin";
public static final String numRemoteBytesCounterName = "numRemoteSinkBytes";
public static final String numRemoteMessagesCounterName = "numRemoteMessages";
public static final String numSseErrorsCounterName = "numSseErrors";
public static final String DUMMY_TIMER_DATA = "DUMMY_TIMER_DATA";
public static final String MANTISAPI_CACHED_HEADER = "x-nflx-mantisapi-cached";
}
| 169 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/MasterCacheLoader.java | package io.mantisrx.api.filters;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheStats;
import com.google.inject.Inject;
import com.netflix.config.DynamicBooleanProperty;
import com.netflix.config.DynamicIntProperty;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Spectator;
import com.netflix.spectator.api.patterns.PolledMeter;
import com.netflix.zuul.filters.http.HttpOutboundSyncFilter;
import com.netflix.zuul.message.http.HttpResponseMessage;
import io.mantisrx.api.Constants;
import lombok.extern.slf4j.Slf4j;
import java.util.concurrent.TimeUnit;
@Slf4j
public class MasterCacheLoader extends HttpOutboundSyncFilter {
@Override
public boolean needsBodyBuffered(HttpResponseMessage message) {
return true;
}
private static DynamicBooleanProperty cacheEnabled = new DynamicBooleanProperty("mantisapi.cache.enabled", false);
private static DynamicIntProperty cacheSize = new DynamicIntProperty("mantisapi.cache.size", 1000);
private static DynamicIntProperty cacheDurationSeconds = new DynamicIntProperty("mantisapi.cache.seconds", 1);
public static final Cache<String, String> masterCache = CacheBuilder.newBuilder()
.maximumSize(cacheSize.get())
.expireAfterWrite(cacheDurationSeconds.get(), TimeUnit.SECONDS)
.build();
@Inject
public MasterCacheLoader(Registry registry) {
CacheStats stats = masterCache.stats();
PolledMeter.using(registry)
.withName("mantis.api.cache.size")
.withTag(new BasicTag("id", "api"))
.monitorMonotonicCounter(masterCache, Cache::size);
PolledMeter.using(registry)
.withName("mantis.api.cache.hitCount")
.withTag(new BasicTag("id", "api"))
.monitorMonotonicCounter(stats, CacheStats::hitCount);
PolledMeter.using(registry)
.withName("mantis.api.cache.missCount")
.withTag(new BasicTag("id", "api"))
.monitorMonotonicCounter(stats, CacheStats::missCount);
}
@Override
public HttpResponseMessage apply(HttpResponseMessage input) {
String key = input.getInboundRequest().getPathAndQuery();
String responseBody = input.getBodyAsText();
if (null != responseBody && cacheEnabled.get()) {
masterCache.put(key, responseBody);
}
return input;
}
@Override
public int filterOrder() {
return 999;
}
@Override
public boolean shouldFilter(HttpResponseMessage msg) {
return msg.getOutboundRequest().getContext().getRouteVIP() != null
&& msg.getOutboundRequest().getContext().getRouteVIP().equalsIgnoreCase("api")
&& msg.getInboundRequest().getMethod().equalsIgnoreCase("get")
&& msg.getHeaders().getAll(Constants.MANTISAPI_CACHED_HEADER).size() == 0; // Set by the MasterCacheHitChecker, ensures we aren't re-caching.
}
}
| 170 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/Artifacts.java | package io.mantisrx.api.filters;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Strings;
import com.netflix.zuul.filters.http.HttpSyncEndpoint;
import com.netflix.zuul.message.http.HttpHeaderNames;
import com.netflix.zuul.message.http.HttpRequestMessage;
import com.netflix.zuul.message.http.HttpResponseMessage;
import com.netflix.zuul.message.http.HttpResponseMessageImpl;
import io.mantisrx.api.proto.Artifact;
import io.mantisrx.api.services.artifacts.ArtifactManager;
import io.netty.handler.codec.http.HttpHeaderValues;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.vavr.control.Try;
import lombok.extern.slf4j.Slf4j;
import javax.inject.Inject;
import java.util.List;
import java.util.Optional;
@Slf4j
public class Artifacts extends HttpSyncEndpoint {
private final ArtifactManager artifactManager;
private final ObjectMapper objectMapper;
public static final String PATH_SPEC = "/api/v1/artifacts";
@Override
public boolean needsBodyBuffered(HttpRequestMessage input) {
return input.getMethod().toLowerCase().equals("post");
}
@Inject
public Artifacts(ArtifactManager artifactManager, ObjectMapper objectMapper) {
this.artifactManager = artifactManager;
this.objectMapper = objectMapper;
artifactManager.putArtifact(new Artifact("mantis.json", 0, new byte[0]));
artifactManager.putArtifact(new Artifact("mantis.zip", 0, new byte[0]));
}
@Override
public HttpResponseMessage apply(HttpRequestMessage request) {
if (request.getMethod().toLowerCase().equals("get")) {
String fileName = request.getPath().replaceFirst("^" + PATH_SPEC + "/?", "");
if (Strings.isNullOrEmpty(fileName)) {
List<String> files = artifactManager
.getArtifacts();
Try<String> serialized = Try.of(() -> objectMapper.writeValueAsString(files));
return serialized.map(body -> {
HttpResponseMessage response = new HttpResponseMessageImpl(request.getContext(), request, 200);
response.getHeaders().set(HttpHeaderNames.CONTENT_TYPE.toString(), HttpHeaderValues.APPLICATION_JSON.toString());
response.setBodyAsText(body);
return response;
}).getOrElseGet(t -> {
HttpResponseMessage response = new HttpResponseMessageImpl(request.getContext(), request, 500);
response.getHeaders().set(HttpHeaderNames.CONTENT_TYPE.toString(), HttpHeaderValues.TEXT_PLAIN.toString());
response.setBodyAsText(t.getMessage());
return response;
});
} else {
Optional<Artifact> artifact = artifactManager.getArtifact(fileName);
return artifact.map(art -> {
HttpResponseMessage response = new HttpResponseMessageImpl(request.getContext(), request,
HttpResponseStatus.OK.code());
response.setBody(art.getContent());
response.getHeaders().set(HttpHeaderNames.CONTENT_TYPE,
fileName.endsWith("json")
? HttpHeaderValues.APPLICATION_JSON.toString()
: HttpHeaderValues.APPLICATION_OCTET_STREAM.toString());
response.getHeaders().set("Content-Disposition",
String.format("attachment; filename=\"%s\"", fileName));
return response;
}).orElseGet(() -> {
HttpResponseMessage response = new HttpResponseMessageImpl(request.getContext(), request,
HttpResponseStatus.NOT_FOUND.code());
response.setBody(new byte[]{});
return response;
});
}
} else if (request.getMethod().toLowerCase().equals("post")) {
byte[] body = request.getBody();
artifactManager.putArtifact(new Artifact("testing.json", body.length, body));
HttpResponseMessage response = new HttpResponseMessageImpl(request.getContext(), request,
HttpResponseStatus.OK.code());
return response;
}
HttpResponseMessage response = new HttpResponseMessageImpl(request.getContext(), request, HttpResponseStatus.METHOD_NOT_ALLOWED.code());
response.setBodyAsText(HttpResponseStatus.METHOD_NOT_ALLOWED.reasonPhrase());
response.getHeaders().set(HttpHeaderNames.CONTENT_TYPE.toString(), HttpHeaderValues.TEXT_PLAIN.toString());
return response;
}
}
| 171 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/Routes.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.filters;
import com.netflix.zuul.context.SessionContext;
import com.netflix.zuul.filters.http.HttpInboundSyncFilter;
import com.netflix.zuul.message.http.HttpRequestMessage;
import com.netflix.zuul.netty.filter.ZuulEndPointRunner;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class Routes extends HttpInboundSyncFilter {
@Override
public int filterOrder() {
return 0;
}
@Override
public boolean shouldFilter(HttpRequestMessage httpRequestMessage) {
return true;
}
@Override
public HttpRequestMessage apply(HttpRequestMessage request) {
SessionContext context = request.getContext();
String path = request.getPath();
String host = request.getOriginalHost();
if (request.getMethod().toLowerCase().equals("options")) {
context.setEndpoint(Options.class.getCanonicalName());
} else if (path.equalsIgnoreCase("/healthcheck")) {
context.setEndpoint(Healthcheck.class.getCanonicalName());
} else if (path.equalsIgnoreCase("/favicon.ico")) {
context.setEndpoint(Favicon.class.getCanonicalName());
} else if (path.startsWith(Artifacts.PATH_SPEC)) {
context.setEndpoint(Artifacts.class.getCanonicalName());
} else if (path.equalsIgnoreCase("/api/v1/mantis/publish/streamDiscovery")) {
context.setEndpoint(AppStreamDiscovery.class.getCanonicalName());
} else if (path.startsWith("/jobClusters/discoveryInfo")) {
String jobCluster = request.getPath().replaceFirst(JobDiscoveryInfoCacheHitChecker.PATH_SPEC + "/", "");
String newUrl = "/api/v1/jobClusters/" + jobCluster + "/latestJobDiscoveryInfo";
request.setPath(newUrl);
context.setEndpoint(ZuulEndPointRunner.PROXY_ENDPOINT_FILTER_NAME);
context.setRouteVIP("api");
} else if (path.equalsIgnoreCase("/api/v1/mql/parse")) {
context.setEndpoint(MQLParser.class.getCanonicalName());
} else if (path.equals(MREAppStreamToJobClusterMapping.PATH_SPEC)) {
context.setEndpoint(MREAppStreamToJobClusterMapping.class.getCanonicalName());
} else {
context.setEndpoint(ZuulEndPointRunner.PROXY_ENDPOINT_FILTER_NAME);
context.setRouteVIP("api");
}
return request;
}
}
| 172 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/MetricsReporting.java | package io.mantisrx.api.filters;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Timer;
import com.netflix.zuul.filters.http.HttpOutboundSyncFilter;
import com.netflix.zuul.message.http.HttpResponseMessage;
import com.netflix.zuul.netty.SpectatorUtils;
import io.vavr.Tuple;
import io.vavr.Tuple2;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
public class MetricsReporting extends HttpOutboundSyncFilter {
private static final ConcurrentHashMap<Tuple2<String, String>, Timer> timerCache = new ConcurrentHashMap<>(500);
private static final ConcurrentHashMap<Tuple2<String, String>, Counter> counterCache = new ConcurrentHashMap<>(500);
@Override
public HttpResponseMessage apply(HttpResponseMessage input) {
String path = input.getInboundRequest().getPath();
String status = statusCodeToStringRepresentation(input.getStatus());
// Record Latency. Zuul no longer record total request time.
timerCache.computeIfAbsent(Tuple.of(path, status),
tuple -> SpectatorUtils.newTimer("latency", path,"status", status))
.record(input.getContext().getOriginReportedDuration(), TimeUnit.NANOSECONDS);
// Record Request
counterCache.computeIfAbsent(Tuple.of(path, status),
tuple -> SpectatorUtils.newCounter("requests", path, "status", status))
.increment();
return input;
}
private String statusCodeToStringRepresentation(Integer statusCode) {
return (statusCode / 100) + "xx";
}
@Override
public int filterOrder() {
return -100;
}
@Override
public boolean shouldFilter(HttpResponseMessage msg) {
return true;
}
}
| 173 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/Healthcheck.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.filters;
import com.netflix.zuul.filters.http.HttpSyncEndpoint;
import com.netflix.zuul.message.http.HttpRequestMessage;
import com.netflix.zuul.message.http.HttpResponseMessage;
import com.netflix.zuul.message.http.HttpResponseMessageImpl;
import com.netflix.zuul.stats.status.StatusCategoryUtils;
import com.netflix.zuul.stats.status.ZuulStatusCategory;
public class Healthcheck extends HttpSyncEndpoint {
@Override
public HttpResponseMessage apply(HttpRequestMessage request) {
HttpResponseMessage resp = new HttpResponseMessageImpl(request.getContext(), request, 200);
resp.setBodyAsText("mantisapi healthy");
StatusCategoryUtils.setStatusCategory(request.getContext(), ZuulStatusCategory.SUCCESS);
return resp;
}
}
| 174 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/Favicon.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.filters;
import com.netflix.zuul.filters.http.HttpSyncEndpoint;
import com.netflix.zuul.message.http.HttpRequestMessage;
import com.netflix.zuul.message.http.HttpResponseMessage;
import com.netflix.zuul.message.http.HttpResponseMessageImpl;
import com.netflix.zuul.stats.status.StatusCategoryUtils;
import com.netflix.zuul.stats.status.ZuulStatusCategory;
/**
* Returns an empty 200 response to prevent 404s on Favicon.
*/
public class Favicon extends HttpSyncEndpoint {
@Override
public HttpResponseMessage apply(HttpRequestMessage request) {
HttpResponseMessage resp = new HttpResponseMessageImpl(request.getContext(), request, 200);
resp.setBody(new byte[0]);
StatusCategoryUtils.setStatusCategory(request.getContext(), ZuulStatusCategory.SUCCESS);
return resp;
}
}
| 175 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/MREAppStreamToJobClusterMapping.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.filters;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.discovery.proto.AppJobClustersMap;
import com.netflix.zuul.filters.http.HttpSyncEndpoint;
import com.netflix.zuul.message.http.HttpHeaderNames;
import com.netflix.zuul.message.http.HttpRequestMessage;
import com.netflix.zuul.message.http.HttpResponseMessage;
import com.netflix.zuul.message.http.HttpResponseMessageImpl;
import io.mantisrx.api.services.AppStreamDiscoveryService;
import io.mantisrx.shaded.com.google.common.base.Preconditions;
import io.netty.handler.codec.http.HttpHeaderValues;
import io.vavr.control.Try;
import java.util.List;
import javax.inject.Inject;
public class MREAppStreamToJobClusterMapping extends HttpSyncEndpoint {
private final AppStreamDiscoveryService appStreamDiscoveryService;
private final ObjectMapper objectMapper;
private static final String APPNAME_QUERY_PARAM = "app";
public static final String PATH_SPEC = "/api/v1/mantis/publish/streamJobClusterMap";
@Inject
public MREAppStreamToJobClusterMapping(AppStreamDiscoveryService appStreamDiscoveryService,
ObjectMapper objectMapper) {
Preconditions.checkArgument(appStreamDiscoveryService != null, "appStreamDiscoveryService cannot be null");
this.appStreamDiscoveryService = appStreamDiscoveryService;
Preconditions.checkArgument(objectMapper != null, "objectMapper cannot be null");
this.objectMapper = objectMapper;
}
@Override
public HttpResponseMessage apply(HttpRequestMessage request) {
List<String> apps = request.getQueryParams().get(APPNAME_QUERY_PARAM);
Try<AppJobClustersMap> payloadTry = Try.ofCallable(() -> appStreamDiscoveryService.getAppJobClustersMap(apps));
Try<String> serialized = payloadTry.flatMap(payload -> Try.of(() -> objectMapper.writeValueAsString(payload)));
return serialized.map(body -> {
HttpResponseMessage resp = new HttpResponseMessageImpl(request.getContext(), request, 200);
resp.setBodyAsText(body);
resp.getHeaders().set(HttpHeaderNames.CONTENT_TYPE.toString(), HttpHeaderValues.APPLICATION_JSON.toString());
return resp;
}).getOrElseGet(t -> {
HttpResponseMessage resp = new HttpResponseMessageImpl(request.getContext(), request, 200);
resp.setBodyAsText(t.getMessage());
resp.getHeaders().set(HttpHeaderNames.CONTENT_TYPE.toString(), HttpHeaderValues.TEXT_PLAIN.toString());
return resp;
});
}
}
| 176 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/AppStreamDiscovery.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.filters;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.Inject;
import com.netflix.zuul.filters.http.HttpSyncEndpoint;
import com.netflix.zuul.message.http.HttpHeaderNames;
import com.netflix.zuul.message.http.HttpRequestMessage;
import com.netflix.zuul.message.http.HttpResponseMessage;
import com.netflix.zuul.message.http.HttpResponseMessageImpl;
import io.mantisrx.api.proto.AppDiscoveryMap;
import io.mantisrx.api.services.AppStreamDiscoveryService;
import com.netflix.zuul.stats.status.StatusCategoryUtils;
import com.netflix.zuul.stats.status.ZuulStatusCategory;
import io.netty.handler.codec.http.HttpHeaderValues;
import io.vavr.control.Either;
import io.vavr.control.Try;
import lombok.extern.slf4j.Slf4j;
import java.util.List;
import java.util.function.Function;
@Slf4j
public class AppStreamDiscovery extends HttpSyncEndpoint {
private final AppStreamDiscoveryService appStreamDiscoveryService;
private final ObjectMapper objectMapper;
private static final String APPNAME_QUERY_PARAM = "app";
@Inject
public AppStreamDiscovery(AppStreamDiscoveryService appStreamDiscoveryService,
ObjectMapper objectMapper) {
this.appStreamDiscoveryService = appStreamDiscoveryService;
this.objectMapper = objectMapper;
}
@Override
public HttpResponseMessage apply(HttpRequestMessage request) {
List<String> apps = request.getQueryParams().get(APPNAME_QUERY_PARAM);
Either<String, AppDiscoveryMap> result = appStreamDiscoveryService.getAppDiscoveryMap(apps);
return result.bimap(errorMessage -> {
HttpResponseMessage resp = new HttpResponseMessageImpl(request.getContext(), request, 500);
resp.setBodyAsText(errorMessage);
StatusCategoryUtils.setStatusCategory(request.getContext(), ZuulStatusCategory.FAILURE_LOCAL);
return resp;
}, appDiscoveryMap -> {
Try<String> serialized = Try.of(() -> objectMapper.writeValueAsString(appDiscoveryMap));
if (serialized.isSuccess()) {
StatusCategoryUtils.setStatusCategory(request.getContext(), ZuulStatusCategory.SUCCESS);
HttpResponseMessage resp = new HttpResponseMessageImpl(request.getContext(), request, 200);
resp.getHeaders().set(HttpHeaderNames.CONTENT_TYPE.toString(), HttpHeaderValues.APPLICATION_JSON.toString());
resp.setBodyAsText(serialized.get());
return resp;
} else {
StatusCategoryUtils.setStatusCategory(request.getContext(), ZuulStatusCategory.FAILURE_LOCAL);
HttpResponseMessage resp = new HttpResponseMessageImpl(request.getContext(), request, 500);
resp.getHeaders().set(HttpHeaderNames.CONTENT_TYPE.toString(), HttpHeaderValues.TEXT_PLAIN.toString());
resp.setBodyAsText(serialized.getOrElseGet(Throwable::getMessage));
return resp;
}
}).getOrElseGet(Function.identity());
}
}
| 177 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/MasterCacheHitChecker.java | package io.mantisrx.api.filters;
import com.google.inject.Inject;
import com.google.inject.name.Named;
import com.netflix.config.DynamicBooleanProperty;
import com.netflix.spectator.api.Counter;
import com.netflix.zuul.filters.http.HttpInboundSyncFilter;
import com.netflix.zuul.message.http.HttpHeaderNames;
import com.netflix.zuul.message.http.HttpRequestMessage;
import com.netflix.zuul.message.http.HttpResponseMessage;
import com.netflix.zuul.message.http.HttpResponseMessageImpl;
import com.netflix.zuul.netty.SpectatorUtils;
import io.mantisrx.api.Constants;
import io.mantisrx.api.Util;
import io.netty.handler.codec.http.HttpHeaderValues;
import lombok.extern.slf4j.Slf4j;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
@Slf4j
public class MasterCacheHitChecker extends HttpInboundSyncFilter {
private static DynamicBooleanProperty cacheEnabled = new DynamicBooleanProperty("mantisapi.cache.enabled", false);
private static final ConcurrentHashMap<String, Counter> cacheHitCounters = new ConcurrentHashMap<>(500);
private static final ConcurrentHashMap<String, Counter> cacheMissCounters = new ConcurrentHashMap<>(500);
private static final String CACHE_HIT_COUNTER_NAME = "mantis.api.cache.count";
private final List<String> pushPrefixes;
@Inject
public MasterCacheHitChecker(@Named("push-prefixes") List<String> pushPrefixes) {
super();
this.pushPrefixes = pushPrefixes;
}
@Override
public HttpRequestMessage apply(HttpRequestMessage request) {
if(cacheEnabled.get()) {
String key = request.getPathAndQuery();
String bodyText = MasterCacheLoader.masterCache.getIfPresent(key);
if (bodyText != null) { // Cache Hit
HttpResponseMessage response = new HttpResponseMessageImpl(request.getContext(), request, 200);
response.setBodyAsText(bodyText);
response.getHeaders().set(HttpHeaderNames.CONTENT_TYPE.toString(), HttpHeaderValues.APPLICATION_JSON.toString());
response.getHeaders().set(Constants.MANTISAPI_CACHED_HEADER, "true");
request.getContext().setStaticResponse(response);
cacheHitCounters.computeIfAbsent(key,
k -> SpectatorUtils.newCounter(CACHE_HIT_COUNTER_NAME, "api", "endpoint", k, "class", "hit"))
.increment();
} else { // Cache Miss
cacheMissCounters.computeIfAbsent(key,
k -> SpectatorUtils.newCounter(CACHE_HIT_COUNTER_NAME, "api", "endpoint", k, "class", "miss"))
.increment();
}
}
return request;
}
@Override
public int filterOrder() {
return 0;
}
@Override
public boolean shouldFilter(HttpRequestMessage msg) {
String key = msg.getPathAndQuery();
return msg.getMethod().equalsIgnoreCase("get")
&& key.startsWith("/api")
&& !Util.startsWithAnyOf(key, pushPrefixes);
}
}
| 178 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/Options.java | package io.mantisrx.api.filters;
import com.netflix.zuul.filters.http.HttpSyncEndpoint;
import com.netflix.zuul.message.http.HttpRequestMessage;
import com.netflix.zuul.message.http.HttpResponseMessage;
import com.netflix.zuul.message.http.HttpResponseMessageImpl;
import com.netflix.zuul.stats.status.StatusCategoryUtils;
import com.netflix.zuul.stats.status.ZuulStatusCategory;
import io.netty.handler.codec.http.HttpHeaderNames;
import io.netty.handler.codec.http.HttpResponseStatus;
public class Options extends HttpSyncEndpoint {
@Override
public HttpResponseMessage apply(HttpRequestMessage request) {
HttpResponseMessage resp = new HttpResponseMessageImpl(request.getContext(), request, HttpResponseStatus.OK.code());
resp.setBodyAsText("");
StatusCategoryUtils.setStatusCategory(request.getContext(), ZuulStatusCategory.SUCCESS);
return resp;
}
}
| 179 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/OutboundHeaders.java | package io.mantisrx.api.filters;
import com.netflix.zuul.filters.http.HttpOutboundSyncFilter;
import com.netflix.zuul.message.HeaderName;
import com.netflix.zuul.message.http.HttpResponseMessage;
import io.netty.handler.codec.http.HttpHeaderNames;
import io.netty.util.AsciiString;
public class OutboundHeaders extends HttpOutboundSyncFilter {
@Override
public boolean shouldFilter(HttpResponseMessage msg) {
return true;
}
@Override
public HttpResponseMessage apply(HttpResponseMessage resp) {
upsert(resp, HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
addHeaderIfMissing(resp, HttpHeaderNames.ACCESS_CONTROL_ALLOW_HEADERS,
"Origin, X-Requested-With, Accept, Content-Type, Cache-Control");
addHeaderIfMissing(resp, HttpHeaderNames.ACCESS_CONTROL_ALLOW_METHODS,
"GET, OPTIONS, PUT, POST, DELETE, CONNECT");
addHeaderIfMissing(resp, HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
return resp;
}
private void upsert(HttpResponseMessage resp, AsciiString name, String value) {
resp.getHeaders().remove(new HeaderName(name.toString()));
resp.getHeaders().add(new HeaderName(name.toString()), value);
}
private void addHeaderIfMissing(HttpResponseMessage resp, AsciiString name, String value) {
if (resp.getHeaders().getAll(name.toString()).size() == 0) {
resp.getHeaders().add(name.toString(), value);
}
}
@Override
public int filterOrder() {
return 0;
}
}
| 180 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/JobDiscoveryInfoCacheHitChecker.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.filters;
import com.google.common.base.Strings;
import com.netflix.config.DynamicBooleanProperty;
import com.netflix.zuul.filters.http.HttpInboundSyncFilter;
import com.netflix.zuul.message.http.HttpHeaderNames;
import com.netflix.zuul.message.http.HttpRequestMessage;
import com.netflix.zuul.message.http.HttpResponseMessage;
import com.netflix.zuul.message.http.HttpResponseMessageImpl;
import io.mantisrx.api.Constants;
import io.mantisrx.api.services.JobDiscoveryService;
import io.netty.handler.codec.http.HttpHeaderValues;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class JobDiscoveryInfoCacheHitChecker extends HttpInboundSyncFilter {
public static final String PATH_SPEC = "/jobClusters/discoveryInfo";
private static DynamicBooleanProperty cacheEnabled = new DynamicBooleanProperty("mantisapi.cache.enabled", false);
@Override
public int filterOrder() {
return -1;
}
@Override
public boolean shouldFilter(HttpRequestMessage httpRequestMessage) {
String jobCluster = httpRequestMessage.getPath().replaceFirst(PATH_SPEC + "/", "");
return httpRequestMessage.getPath().startsWith(PATH_SPEC)
&& JobDiscoveryService.jobDiscoveryInfoCache.getIfPresent(jobCluster) != null;
}
@Override
public HttpRequestMessage apply(HttpRequestMessage request) {
String jobCluster = request.getPath().replaceFirst(PATH_SPEC + "/", "");
HttpResponseMessage resp = new HttpResponseMessageImpl(request.getContext(), request, 200);
String bodyText = JobDiscoveryService.jobDiscoveryInfoCache.getIfPresent(jobCluster) ;
if (cacheEnabled.get() && !Strings.isNullOrEmpty(bodyText)) {
log.info("Serving cached job discovery info for {}.", jobCluster);
resp.setBodyAsText(bodyText);
resp.getHeaders().set(HttpHeaderNames.CONTENT_TYPE.toString(), HttpHeaderValues.APPLICATION_JSON.toString());
resp.getHeaders().set(Constants.MANTISAPI_CACHED_HEADER, "true");
request.getContext().setStaticResponse(resp);
}
return request;
}
}
| 181 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/JobDiscoveryCacheLoader.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.filters;
import com.netflix.config.DynamicBooleanProperty;
import com.netflix.zuul.filters.http.HttpOutboundSyncFilter;
import com.netflix.zuul.message.http.HttpResponseMessage;
import io.mantisrx.api.Constants;
import io.mantisrx.api.services.JobDiscoveryService;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class JobDiscoveryCacheLoader extends HttpOutboundSyncFilter {
private static DynamicBooleanProperty cacheEnabled = new DynamicBooleanProperty("mantisapi.cache.enabled", false);
@Override
public boolean needsBodyBuffered(HttpResponseMessage message) {
return true;
}
@Override
public int filterOrder() {
return 999; // Don't really care.
}
@Override
public boolean shouldFilter(HttpResponseMessage response) {
return response.getOutboundRequest().getPath().matches("^/api/v1/jobClusters/.*/latestJobDiscoveryInfo$")
&& response.getHeaders().getAll(Constants.MANTISAPI_CACHED_HEADER).isEmpty()
&& cacheEnabled.get();
}
@Override
public HttpResponseMessage apply(HttpResponseMessage response) {
String jobCluster = response.getOutboundRequest().getPath()
.replaceFirst("^/api/v1/jobClusters/", "")
.replaceFirst("/latestJobDiscoveryInfo$", "");
String responseBody = response.getBodyAsText();
if (null != responseBody) {
log.info("Caching latest job discovery info for {}.", jobCluster);
JobDiscoveryService.jobDiscoveryInfoCache.put(jobCluster, response.getBodyAsText());
}
return response;
}
}
| 182 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/MQLParser.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.filters;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.zuul.filters.http.HttpSyncEndpoint;
import com.netflix.zuul.message.http.HttpRequestMessage;
import com.netflix.zuul.message.http.HttpResponseMessage;
import com.netflix.zuul.message.http.HttpResponseMessageImpl;
import io.mantisrx.mql.shaded.clojure.java.api.Clojure;
import io.mantisrx.mql.shaded.clojure.lang.IFn;
import lombok.Value;
import lombok.extern.slf4j.Slf4j;
import java.nio.charset.Charset;
@Slf4j
public class MQLParser extends HttpSyncEndpoint {
private static IFn require = Clojure.var("io.mantisrx.mql.shaded.clojure.core", "require");
static {
require.invoke(Clojure.read("io.mantisrx.mql.core"));
require.invoke(Clojure.read("io.mantisrx.mql.jvm.interfaces.server"));
require.invoke(Clojure.read("io.mantisrx.mql.jvm.interfaces.core"));
}
private static IFn parses = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "parses?");
private static IFn getParseError = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "get-parse-error");
private static final ObjectMapper objectMapper = new ObjectMapper();
public @Value class MQLParseResult {
private boolean success;
private String criterion;
private String message;
}
@Override
public HttpResponseMessage apply(HttpRequestMessage input) {
String query = input.getQueryParams().getFirst("criterion");
boolean parses = parses(query);
String parseError = getParseError(query);
MQLParseResult result = new MQLParseResult(parses, query, parses ? "" : parseError);
try {
HttpResponseMessage response = new HttpResponseMessageImpl(input.getContext(), input, 200);
response.setBody(objectMapper.writeValueAsBytes(result));
return response;
} catch (JsonProcessingException ex) {
HttpResponseMessage response = new HttpResponseMessageImpl(input.getContext(), input, 500);
response.setBody(getErrorResponse(ex.getMessage()).getBytes(Charset.defaultCharset()));
return response;
}
}
/**
* A predicate which indicates whether or not the MQL parser considers query to be a valid query.
* @param query A String representing the MQL query.
* @return A boolean indicating whether or not the query successfully parses.
*/
public static Boolean parses(String query) {
return (Boolean) parses.invoke(query);
}
/**
* A convenience function allowing a caller to determine what went wrong if a call to #parses(String query) returns
* false.
* @param query A String representing the MQL query.
* @return A String representing the parse error for an MQL query, null if no parse error occurred.
*/
public static String getParseError(String query) {
return (String) getParseError.invoke(query);
}
private String getErrorResponse(String exceptionMessage) {
StringBuilder sb = new StringBuilder(50);
sb.append("{\"success\": false, \"messages\": \"");
sb.append(exceptionMessage);
sb.append("\"}");
return sb.toString();
}
}
| 183 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/proto/Artifact.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.proto;
import java.util.Objects;
public class Artifact {
private long sizeInBytes;
private String fileName;
private byte[] content;
public Artifact(String fileName, long sizeInBytes, byte[] content) {
Objects.requireNonNull(fileName, "File name cannot be null");
Objects.requireNonNull(content, "Content cannot be null");
this.fileName = fileName;
this.sizeInBytes = sizeInBytes;
this.content = content;
}
public long getSizeInBytes() {
return this.sizeInBytes;
}
public byte[] getContent() {
return content;
}
public String getFileName() {
return fileName;
}
}
| 184 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/proto/AppDiscoveryMap.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.proto;
import io.mantisrx.server.core.JobSchedulingInfo;
import java.util.HashMap;
import java.util.Map;
public class AppDiscoveryMap {
public final String version;
public final Long timestamp;
public final Map<String, Map<String, JobSchedulingInfo>> mappings = new HashMap<>();
public AppDiscoveryMap(String version, Long timestamp) {
this.version = version;
this.timestamp = timestamp;
}
public void addMapping(String app, String stream, JobSchedulingInfo schedulingInfo) {
if(!mappings.containsKey(app)) {
mappings.put(app, new HashMap<String, JobSchedulingInfo>());
}
mappings.get(app).put(stream, schedulingInfo);
}
}
| 185 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/tunnel/MantisCrossRegionalClient.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.tunnel;
import io.netty.buffer.ByteBuf;
import mantis.io.reactivex.netty.protocol.http.client.HttpClient;
import mantis.io.reactivex.netty.protocol.http.sse.ServerSentEvent;
public interface MantisCrossRegionalClient {
HttpClient<ByteBuf, ServerSentEvent> getSecureSseClient(String region);
HttpClient<String, ByteBuf> getSecureRestClient(String region);
}
| 186 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/tunnel/RegionData.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.tunnel;
import lombok.Value;
public @Value class RegionData {
private final String region;
private final boolean success;
private final String data;
private final int responseCode;
}
| 187 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/tunnel/NoOpCrossRegionalClient.java | package io.mantisrx.api.tunnel;
import io.netty.buffer.ByteBuf;
import mantis.io.reactivex.netty.protocol.http.client.HttpClient;
import mantis.io.reactivex.netty.protocol.http.sse.ServerSentEvent;
public class NoOpCrossRegionalClient implements MantisCrossRegionalClient {
@Override
public HttpClient<ByteBuf, ServerSentEvent> getSecureSseClient(String region) {
throw new UnsupportedOperationException();
}
@Override
public HttpClient<String, ByteBuf> getSecureRestClient(String region) {
throw new UnsupportedOperationException();
}
}
| 188 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/tunnel/CrossRegionHandler.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.tunnel;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.config.DynamicIntProperty;
import com.netflix.config.DynamicStringProperty;
import com.netflix.spectator.api.Counter;
import com.netflix.zuul.netty.SpectatorUtils;
import io.mantisrx.api.Constants;
import io.mantisrx.api.Util;
import io.mantisrx.api.push.ConnectionBroker;
import io.mantisrx.api.push.PushConnectionDetails;
import io.mantisrx.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.handler.codec.http.DefaultFullHttpResponse;
import io.netty.handler.codec.http.DefaultHttpHeaders;
import io.netty.handler.codec.http.DefaultHttpResponse;
import io.netty.handler.codec.http.FullHttpRequest;
import io.netty.handler.codec.http.FullHttpResponse;
import io.netty.handler.codec.http.HttpHeaderNames;
import io.netty.handler.codec.http.HttpHeaderValues;
import io.netty.handler.codec.http.HttpHeaders;
import io.netty.handler.codec.http.HttpMethod;
import io.netty.handler.codec.http.HttpResponse;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.netty.handler.codec.http.HttpUtil;
import io.netty.handler.codec.http.HttpVersion;
import io.netty.handler.codec.http.QueryStringDecoder;
import io.netty.handler.codec.http.QueryStringEncoder;
import lombok.extern.slf4j.Slf4j;
import mantis.io.reactivex.netty.channel.StringTransformer;
import mantis.io.reactivex.netty.protocol.http.client.HttpClient;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientRequest;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientResponse;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import rx.Observable;
import rx.Scheduler;
import rx.Subscription;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import static io.mantisrx.api.Constants.OriginRegionTagName;
import static io.mantisrx.api.Constants.TagNameValDelimiter;
import static io.mantisrx.api.Constants.TagsParamName;
import static io.mantisrx.api.Constants.TunnelPingMessage;
import static io.mantisrx.api.Constants.TunnelPingParamName;
import static io.mantisrx.api.Util.getLocalRegion;
@Slf4j
public class CrossRegionHandler extends SimpleChannelInboundHandler<FullHttpRequest> {
private final List<String> pushPrefixes;
private final MantisCrossRegionalClient mantisCrossRegionalClient;
private final ConnectionBroker connectionBroker;
private final Scheduler scheduler;
private Subscription subscription = null;
private String uriForLogging = null;
private ScheduledExecutorService scheduledExecutorService = new ScheduledThreadPoolExecutor(1,
new ThreadFactoryBuilder().setNameFormat("cross-region-handler-drainer-%d").build());
private ScheduledFuture drainFuture;
private final DynamicIntProperty queueCapacity = new DynamicIntProperty("io.mantisrx.api.push.queueCapacity", 1000);
private final DynamicIntProperty writeIntervalMillis = new DynamicIntProperty("io.mantisrx.api.push.writeIntervalMillis", 50);
private final DynamicStringProperty tunnelRegionsProperty = new DynamicStringProperty("io.mantisrx.api.tunnel.regions", Util.getLocalRegion());
public CrossRegionHandler(
List<String> pushPrefixes,
MantisCrossRegionalClient mantisCrossRegionalClient,
ConnectionBroker connectionBroker,
Scheduler scheduler) {
super(true);
this.pushPrefixes = pushPrefixes;
this.mantisCrossRegionalClient = mantisCrossRegionalClient;
this.connectionBroker = connectionBroker;
this.scheduler = scheduler;
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, FullHttpRequest request) throws Exception {
uriForLogging = request.uri();
if (HttpUtil.is100ContinueExpected(request)) {
send100Contine(ctx);
}
if (isCrossRegionStreamingPath(request.uri())) {
handleRemoteSse(ctx, request);
} else { // REST
if (request.method() == HttpMethod.HEAD) {
handleHead(ctx, request);
} else if (request.method() == HttpMethod.GET) {
handleRestGet(ctx, request);
} else if(request.method() == HttpMethod.POST) {
handleRestPost(ctx, request);
} else {
ctx.fireChannelRead(request.retain());
}
}
}
//
// REST Implementations
//
private void handleHead(ChannelHandlerContext ctx, FullHttpRequest request) {
HttpHeaders headers = new DefaultHttpHeaders();
headers.add(HttpHeaderNames.CONTENT_TYPE, HttpHeaderValues.APPLICATION_JSON);
headers.add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
headers.add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_HEADERS,
"Origin, X-Requested-With, Accept, Content-Type, Cache-Control");
headers.add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_METHODS,
"GET, OPTIONS, PUT, POST, DELETE, CONNECT");
HttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1,
HttpResponseStatus.OK,
Unpooled.copiedBuffer("", Charset.defaultCharset()),
headers,
new DefaultHttpHeaders());
ctx.writeAndFlush(response)
.addListener(__ -> ctx.close());
}
@VisibleForTesting
List<String> getTunnelRegions() {
return parseRegionCsv(tunnelRegionsProperty.get());
}
private static List<String> parseRegionCsv(String regionCsv) {
return Arrays.stream(regionCsv.split(","))
.map(String::trim)
.map(String::toLowerCase)
.collect(Collectors.toList());
}
@VisibleForTesting
List<String> parseRegionsInUri(String uri) {
final String regionString = getRegion(uri);
if (isAllRegion(regionString)) {
return getTunnelRegions();
} else if (regionString.contains(",")) {
return parseRegionCsv(regionString);
} else {
return Collections.singletonList(regionString);
}
}
private void handleRestGet(ChannelHandlerContext ctx, FullHttpRequest request) {
List<String> regions = parseRegionsInUri(request.uri());
String uri = getTail(request.uri());
log.info("Relaying GET URI {} to {} (original uri {}).", uri, regions, request.uri());
Observable.from(regions)
.flatMap(region -> {
final AtomicReference<Throwable> ref = new AtomicReference<>();
HttpClientRequest<String> rq = HttpClientRequest.create(HttpMethod.GET, uri);
return Observable
.create((Observable.OnSubscribe<HttpClient<String, ByteBuf>>) subscriber ->
subscriber.onNext(mantisCrossRegionalClient.getSecureRestClient(region)))
.flatMap(client -> {
ref.set(null);
return client.submit(rq)
.flatMap(resp -> {
final int code = resp.getStatus().code();
if (code >= 500) {
throw new RuntimeException(resp.getStatus().toString());
}
return responseToRegionData(region, resp);
})
.onErrorReturn(t -> {
log.warn("Error getting response from remote master: " + t.getMessage());
ref.set(t);
return new RegionData(region, false, t.getMessage(), 0);
});
})
.map(data -> {
final Throwable t = ref.get();
if (t != null)
throw new RuntimeException(t);
return data;
})
.retryWhen(Util.getRetryFunc(log, uri + " in " + region))
.take(1)
.onErrorReturn(t -> new RegionData(region, false, t.getMessage(), 0));
})
.reduce(new ArrayList<RegionData>(3), (regionDatas, regionData) -> {
regionDatas.add(regionData);
return regionDatas;
})
.observeOn(scheduler)
.subscribeOn(scheduler)
.take(1)
.subscribe(result -> writeDataAndCloseChannel(ctx, result));
}
private void handleRestPost(ChannelHandlerContext ctx, FullHttpRequest request) {
String uri = getTail(request.uri());
List<String> regions = parseRegionsInUri(request.uri());
log.info("Relaying POST URI {} to {} (original uri {}).", uri, regions, request.uri());
final AtomicReference<Throwable> ref = new AtomicReference<>();
String content = request.content().toString(Charset.defaultCharset());
Observable.from(regions)
.flatMap(region -> {
HttpClientRequest<String> rq = HttpClientRequest.create(HttpMethod.POST, uri);
rq.withRawContent(content, StringTransformer.DEFAULT_INSTANCE);
return Observable
.create((Observable.OnSubscribe<HttpClient<String, ByteBuf>>) subscriber ->
subscriber.onNext(mantisCrossRegionalClient.getSecureRestClient(region)))
.flatMap(client -> client.submit(rq)
.flatMap(resp -> {
final int code = resp.getStatus().code();
if (code >= 500) {
throw new RuntimeException(resp.getStatus().toString() + "in " + region );
}
return responseToRegionData(region, resp);
})
.onErrorReturn(t -> {
log.warn("Error getting response from remote master: " + t.getMessage());
ref.set(t);
return new RegionData(region, false, t.getMessage(), 0);
}))
.map(data -> {
final Throwable t = ref.get();
if (t != null)
throw new RuntimeException(t);
return data;
})
.retryWhen(Util.getRetryFunc(log, uri + " in " + region))
.take(1)
.onErrorReturn(t -> new RegionData(region, false, t.getMessage(), 0));
})
.reduce(new ArrayList<RegionData>(), (regionDatas, regionData) -> {
regionDatas.add(regionData);
return regionDatas;
})
.observeOn(scheduler)
.subscribeOn(scheduler)
.take(1)
.subscribe(result -> writeDataAndCloseChannel(ctx, result));
}
private void handleRemoteSse(ChannelHandlerContext ctx, FullHttpRequest request) {
HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1,
HttpResponseStatus.OK);
HttpHeaders headers = response.headers();
headers.add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
headers.add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_HEADERS, "Origin, X-Requested-With, Accept, Content-Type, Cache-Control");
headers.set(HttpHeaderNames.CONTENT_TYPE, "text/event-stream");
headers.set(HttpHeaderNames.CACHE_CONTROL, "no-cache, no-store, max-age=0, must-revalidate");
headers.set(HttpHeaderNames.PRAGMA, HttpHeaderValues.NO_CACHE);
headers.set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED);
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE);
ctx.writeAndFlush(response);
final boolean sendThroughTunnelPings = hasTunnelPingParam(request.uri());
final String uri = uriWithTunnelParamsAdded(getTail(request.uri()));
List<String> regions = parseRegionsInUri(request.uri());
log.info("Initiating remote SSE connection to {} in {} (original URI: {}).", uri, regions, request.uri());
PushConnectionDetails pcd = PushConnectionDetails.from(uri, regions);
String[] tags = Util.getTaglist(request.uri(), pcd.target, getRegion(request.uri()));
Counter numDroppedBytesCounter = SpectatorUtils.newCounter(Constants.numDroppedBytesCounterName, pcd.target, tags);
Counter numDroppedMessagesCounter = SpectatorUtils.newCounter(Constants.numDroppedMessagesCounterName, pcd.target, tags);
Counter numMessagesCounter = SpectatorUtils.newCounter(Constants.numMessagesCounterName, pcd.target, tags);
Counter numBytesCounter = SpectatorUtils.newCounter(Constants.numBytesCounterName, pcd.target, tags);
Counter drainTriggeredCounter = SpectatorUtils.newCounter(Constants.drainTriggeredCounterName, pcd.target, tags);
Counter numIncomingMessagesCounter = SpectatorUtils.newCounter(Constants.numIncomingMessagesCounterName, pcd.target, tags);
BlockingQueue<String> queue = new LinkedBlockingQueue<String>(queueCapacity.get());
drainFuture = scheduledExecutorService.scheduleAtFixedRate(() -> {
try {
if (queue.size() > 0 && ctx.channel().isWritable()) {
drainTriggeredCounter.increment();
final List<String> items = new ArrayList<>(queue.size());
synchronized (queue) {
queue.drainTo(items);
}
for (String data : items) {
ctx.write(Unpooled.copiedBuffer(data, StandardCharsets.UTF_8));
numMessagesCounter.increment();
numBytesCounter.increment(data.length());
}
ctx.flush();
}
} catch (Exception ex) {
log.error("Error writing to channel", ex);
}
}, writeIntervalMillis.get(), writeIntervalMillis.get(), TimeUnit.MILLISECONDS);
subscription = connectionBroker.connect(pcd)
.filter(event -> !event.equalsIgnoreCase(TunnelPingMessage) || sendThroughTunnelPings)
.doOnNext(event -> {
numIncomingMessagesCounter.increment();
if (!Constants.DUMMY_TIMER_DATA.equals(event)) {
String data = Constants.SSE_DATA_PREFIX + event + Constants.SSE_DATA_SUFFIX;
boolean offer = false;
synchronized (queue) {
offer = queue.offer(data);
}
if (!offer) {
numDroppedBytesCounter.increment(data.length());
numDroppedMessagesCounter.increment();
}
}
})
.subscribe();
}
@Override
public void channelUnregistered(ChannelHandlerContext ctx) throws Exception {
log.info("Channel {} is unregistered. URI: {}", ctx.channel(), uriForLogging);
unsubscribeIfSubscribed();
super.channelUnregistered(ctx);
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
log.info("Channel {} is inactive. URI: {}", ctx.channel(), uriForLogging);
unsubscribeIfSubscribed();
super.channelInactive(ctx);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
log.warn("Exception caught by channel {}. URI: {}", ctx.channel(), uriForLogging, cause);
unsubscribeIfSubscribed();
// This is the tail of handlers. We should close the channel between the server and the client,
// essentially causing the client to disconnect and terminate.
ctx.close();
}
/** Unsubscribe if it's subscribed. */
private void unsubscribeIfSubscribed() {
if (subscription != null && !subscription.isUnsubscribed()) {
log.info("SSE unsubscribing subscription with URI: {}", uriForLogging);
subscription.unsubscribe();
}
if (drainFuture != null) {
drainFuture.cancel(false);
}
if (scheduledExecutorService != null) {
scheduledExecutorService.shutdown();
}
}
private boolean hasTunnelPingParam(String uri) {
return uri != null && uri.contains(TunnelPingParamName);
}
private Observable<RegionData> responseToRegionData(String region, HttpClientResponse<ByteBuf> resp) {
final int code = resp.getStatus().code();
return resp.getContent()
.collect(Unpooled::buffer,
ByteBuf::writeBytes)
.map(byteBuf -> new RegionData(region, true,
byteBuf.toString(StandardCharsets.UTF_8), code)
)
.onErrorReturn(t -> new RegionData(region, false, t.getMessage(), code));
}
private void writeDataAndCloseChannel(ChannelHandlerContext ctx, ArrayList<RegionData> result) {
try {
String serialized = responseToString(result);
HttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1,
HttpResponseStatus.OK,
Unpooled.copiedBuffer(serialized, Charset.defaultCharset()));
HttpHeaders headers = response.headers();
headers.add(HttpHeaderNames.CONTENT_TYPE, HttpHeaderValues.APPLICATION_JSON + "; charset=utf-8");
headers.add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
headers.add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_HEADERS,
"Origin, X-Requested-With, Accept, Content-Type, Cache-Control");
headers.add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_METHODS,
"GET, OPTIONS, PUT, POST, DELETE, CONNECT");
headers.add(HttpHeaderNames.CONTENT_LENGTH, serialized.length());
ctx.writeAndFlush(response)
.addListener(__ -> ctx.close());
} catch (Exception ex) {
log.error("Error serializing cross regional response: {}", ex.getMessage(), ex);
}
}
private String uriWithTunnelParamsAdded(String uri) {
QueryStringDecoder queryStringDecoder = new QueryStringDecoder(uri);
QueryStringEncoder queryStringEncoder = new QueryStringEncoder(queryStringDecoder.path());
queryStringDecoder.parameters().forEach((key, value) -> value.forEach(val -> queryStringEncoder.addParam(key, val)));
queryStringEncoder.addParam(TunnelPingParamName, "true");
queryStringEncoder.addParam(TagsParamName, OriginRegionTagName + TagNameValDelimiter + getLocalRegion());
return queryStringEncoder.toString();
}
private static void send100Contine(ChannelHandlerContext ctx) {
FullHttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1,
HttpResponseStatus.CONTINUE);
ctx.writeAndFlush(response);
}
private boolean isCrossRegionStreamingPath(String uri) {
return Util.startsWithAnyOf(getTail(uri), this.pushPrefixes);
}
private static String getTail(String uri) {
return uri.replaceFirst("^/region/.*?/", "/");
}
/**
* Fetches a region from a URI if it contains one, returns garbage if not.
*
* @param uri The uri from which to fetch the region.
* @return The region embedded in the URI, always lower case.
* */
private static String getRegion(String uri) {
return uri.replaceFirst("^/region/", "")
.replaceFirst("/.*$", "")
.trim()
.toLowerCase();
}
/**
* Checks for a specific `all` string to connect to all regions
* specified by {@link CrossRegionHandler#getTunnelRegions()}
*/
private static boolean isAllRegion(String region) {
return region != null && region.trim().equalsIgnoreCase("all");
}
private static String responseToString(List<RegionData> dataList) {
StringBuilder sb = new StringBuilder("[");
boolean first = true;
for (RegionData data : dataList) {
if (first)
first = false;
else {
sb.append(",");
}
if (data.isSuccess()) {
String outputData = getForceWrappedJson(data.getData(), data.getRegion(), data.getResponseCode(), null);
sb.append(outputData);
} else {
sb.append(getForceWrappedJson("", data.getRegion(), data.getResponseCode(), data.getData()));
}
}
sb.append("]");
return sb.toString();
}
private final static String regKey = "mantis.meta.origin";
private final static String errKey = "mantis.meta.errorString";
private final static String codeKey = "mantis.meta.origin.response.code";
public static String getWrappedJson(String data, String region, String err) {
return getWrappedJsonIntl(data, region, err, 0, false);
}
public static String getForceWrappedJson(String data, String region, int code, String err) {
return getWrappedJsonIntl(data, region, err, code, true);
}
private static String getWrappedJsonIntl(String data, String region, String err, int code, boolean forceJson) {
try {
JSONObject o = new JSONObject(data);
o.put(regKey, region);
if (err != null && !err.isEmpty())
o.put(errKey, err);
if (code > 0)
o.put(codeKey, "" + code);
return o.toString();
} catch (JSONException e) {
try {
JSONArray a = new JSONArray(data);
if (!forceJson)
return data;
JSONObject o = new JSONObject();
o.put(regKey, region);
if (err != null && !err.isEmpty())
o.put(errKey, err);
if (code > 0)
o.put(codeKey, "" + code);
o.accumulate("response", a);
return o.toString();
} catch (JSONException ae) {
if (!forceJson)
return data;
JSONObject o = new JSONObject();
o.put(regKey, region);
if (err != null && !err.isEmpty())
o.put(errKey, err);
if (code > 0)
o.put(codeKey, "" + code);
o.put("response", data);
return o.toString();
}
}
}
}
| 189 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/push/MantisWebSocketFrameHandler.java | package io.mantisrx.api.push;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.ArrayList;
import java.util.List;
import com.netflix.config.DynamicIntProperty;
import com.netflix.spectator.api.Counter;
import com.netflix.zuul.netty.SpectatorUtils;
import io.mantisrx.api.Constants;
import io.mantisrx.api.Util;
import io.mantisrx.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.handler.codec.http.websocketx.TextWebSocketFrame;
import io.netty.handler.codec.http.websocketx.WebSocketServerProtocolHandler;
import io.netty.util.ReferenceCountUtil;
import lombok.extern.slf4j.Slf4j;
import rx.Subscription;
@Slf4j
public class MantisWebSocketFrameHandler extends SimpleChannelInboundHandler<TextWebSocketFrame> {
private final ConnectionBroker connectionBroker;
private final DynamicIntProperty queueCapacity = new DynamicIntProperty("io.mantisrx.api.push.queueCapacity", 1000);
private final DynamicIntProperty writeIntervalMillis = new DynamicIntProperty("io.mantisrx.api.push.writeIntervalMillis", 50);
private Subscription subscription;
private String uri;
private ScheduledExecutorService scheduledExecutorService = new ScheduledThreadPoolExecutor(1,
new ThreadFactoryBuilder().setNameFormat("websocket-handler-drainer-%d").build());
private ScheduledFuture drainFuture;
public MantisWebSocketFrameHandler(ConnectionBroker broker) {
super(true);
this.connectionBroker = broker;
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt.getClass() == WebSocketServerProtocolHandler.HandshakeComplete.class) {
WebSocketServerProtocolHandler.HandshakeComplete complete = (WebSocketServerProtocolHandler.HandshakeComplete) evt;
uri = complete.requestUri();
final PushConnectionDetails pcd = PushConnectionDetails.from(uri);
log.info("Request to URI '{}' is a WebSSocket upgrade, removing the SSE handler", uri);
if (ctx.pipeline().get(MantisSSEHandler.class) != null) {
ctx.pipeline().remove(MantisSSEHandler.class);
}
final String[] tags = Util.getTaglist(uri, pcd.target);
Counter numDroppedBytesCounter = SpectatorUtils.newCounter(Constants.numDroppedBytesCounterName, pcd.target, tags);
Counter numDroppedMessagesCounter = SpectatorUtils.newCounter(Constants.numDroppedMessagesCounterName, pcd.target, tags);
Counter numMessagesCounter = SpectatorUtils.newCounter(Constants.numMessagesCounterName, pcd.target, tags);
Counter numBytesCounter = SpectatorUtils.newCounter(Constants.numBytesCounterName, pcd.target, tags);
Counter drainTriggeredCounter = SpectatorUtils.newCounter(Constants.drainTriggeredCounterName, pcd.target, tags);
Counter numIncomingMessagesCounter = SpectatorUtils.newCounter(Constants.numIncomingMessagesCounterName, pcd.target, tags);
BlockingQueue<String> queue = new LinkedBlockingQueue<>(queueCapacity.get());
drainFuture = scheduledExecutorService.scheduleAtFixedRate(() -> {
try {
if (queue.size() > 0 && ctx.channel().isWritable()) {
drainTriggeredCounter.increment();
final List<String> items = new ArrayList<>(queue.size());
synchronized (queue) {
queue.drainTo(items);
}
for (String data : items) {
ctx.write(new TextWebSocketFrame(data));
numMessagesCounter.increment();
numBytesCounter.increment(data.length());
}
ctx.flush();
}
} catch (Exception ex) {
log.error("Error writing to channel", ex);
}
}, writeIntervalMillis.get(), writeIntervalMillis.get(), TimeUnit.MILLISECONDS);
this.subscription = this.connectionBroker.connect(pcd)
.doOnNext(event -> {
numIncomingMessagesCounter.increment();
if (!Constants.DUMMY_TIMER_DATA.equals(event)) {
boolean offer = false;
synchronized (queue) {
offer = queue.offer(event);
}
if (!offer) {
numDroppedBytesCounter.increment(event.length());
numDroppedMessagesCounter.increment();
}
}
})
.subscribe();
} else {
ReferenceCountUtil.retain(evt);
super.userEventTriggered(ctx, evt);
}
}
@Override
public void channelUnregistered(ChannelHandlerContext ctx) throws Exception {
log.info("Channel {} is unregistered. URI: {}", ctx.channel(), uri);
unsubscribeIfSubscribed();
super.channelUnregistered(ctx);
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
log.info("Channel {} is inactive. URI: {}", ctx.channel(), uri);
unsubscribeIfSubscribed();
super.channelInactive(ctx);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
log.warn("Exception caught by channel {}. URI: {}", ctx.channel(), uri, cause);
unsubscribeIfSubscribed();
// This is the tail of handlers. We should close the channel between the server and the client,
// essentially causing the client to disconnect and terminate.
ctx.close();
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, TextWebSocketFrame msg) {
// No op.
}
/** Unsubscribe if it's subscribed. */
private void unsubscribeIfSubscribed() {
if (subscription != null && !subscription.isUnsubscribed()) {
log.info("WebSocket unsubscribing subscription with URI: {}", uri);
subscription.unsubscribe();
}
if (drainFuture != null) {
drainFuture.cancel(false);
}
if (scheduledExecutorService != null) {
scheduledExecutorService.shutdown();
}
}
}
| 190 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/push/ConnectionBroker.java | package io.mantisrx.api.push;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.google.inject.name.Named;
import com.netflix.spectator.api.Counter;
import com.netflix.zuul.netty.SpectatorUtils;
import io.mantisrx.api.Constants;
import io.mantisrx.api.Util;
import io.mantisrx.api.services.JobDiscoveryService;
import io.mantisrx.api.tunnel.MantisCrossRegionalClient;
import io.mantisrx.client.MantisClient;
import io.mantisrx.client.SinkConnectionFunc;
import io.mantisrx.client.SseSinkConnectionFunction;
import io.mantisrx.common.MantisServerSentEvent;
import io.mantisrx.runtime.parameter.SinkParameters;
import io.mantisrx.server.worker.client.MetricsClient;
import io.mantisrx.server.worker.client.SseWorkerConnectionFunction;
import io.mantisrx.server.worker.client.WorkerConnectionsStatus;
import io.mantisrx.server.worker.client.WorkerMetricsClient;
import io.vavr.control.Try;
import lombok.extern.slf4j.Slf4j;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientRequest;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientResponse;
import mantis.io.reactivex.netty.protocol.http.sse.ServerSentEvent;
import rx.Observable;
import rx.Observer;
import rx.Scheduler;
import rx.functions.Action1;
import rx.schedulers.Schedulers;
import java.util.List;
import java.util.Map;
import java.util.WeakHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import static io.mantisrx.api.Constants.TunnelPingMessage;
import static io.mantisrx.api.Util.getLocalRegion;
@Slf4j
@Singleton
public class ConnectionBroker {
private final MantisClient mantisClient;
private final MantisCrossRegionalClient mantisCrossRegionalClient;
private final WorkerMetricsClient workerMetricsClient;
private final JobDiscoveryService jobDiscoveryService;
private final Scheduler scheduler;
private final ObjectMapper objectMapper;
private final Map<PushConnectionDetails, Observable<String>> connectionCache = new WeakHashMap<>();
@Inject
public ConnectionBroker(MantisClient mantisClient,
MantisCrossRegionalClient mantisCrossRegionalClient,
WorkerMetricsClient workerMetricsClient,
@Named("io-scheduler") Scheduler scheduler,
ObjectMapper objectMapper) {
this.mantisClient = mantisClient;
this.mantisCrossRegionalClient = mantisCrossRegionalClient;
this.workerMetricsClient = workerMetricsClient;
this.jobDiscoveryService = JobDiscoveryService.getInstance(mantisClient, scheduler);
this.scheduler = scheduler;
this.objectMapper = objectMapper;
}
public Observable<String> connect(PushConnectionDetails details) {
if (!connectionCache.containsKey(details)) {
switch (details.type) {
case CONNECT_BY_NAME:
return getConnectByNameFor(details)
.subscribeOn(scheduler)
.doOnUnsubscribe(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.doOnCompleted(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.share();
case CONNECT_BY_ID:
return getConnectByIdFor(details)
.subscribeOn(scheduler)
.doOnUnsubscribe(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.doOnCompleted(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.share();
case METRICS:
return getWorkerMetrics(details)
.subscribeOn(scheduler)
.doOnUnsubscribe(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.doOnCompleted(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
});
case JOB_STATUS:
connectionCache.put(details,
mantisClient
.getJobStatusObservable(details.target)
.subscribeOn(scheduler)
.doOnCompleted(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.doOnUnsubscribe(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.replay(25)
.autoConnect());
break;
case JOB_SCHEDULING_INFO:
connectionCache.put(details,
mantisClient.getSchedulingChanges(details.target)
.subscribeOn(scheduler)
.map(changes -> Try.of(() -> objectMapper.writeValueAsString(changes)).getOrElse("Error"))
.doOnCompleted(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.doOnUnsubscribe(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.replay(1)
.autoConnect());
break;
case JOB_CLUSTER_DISCOVERY:
connectionCache.put(details,
jobDiscoveryService.jobDiscoveryInfoStream(jobDiscoveryService.key(JobDiscoveryService.LookupType.JOB_CLUSTER, details.target))
.subscribeOn(scheduler)
.map(jdi ->Try.of(() -> objectMapper.writeValueAsString(jdi)).getOrElse("Error"))
.doOnCompleted(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.doOnUnsubscribe(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.replay(1)
.autoConnect());
break;
}
log.info("Caching connection for: {}", details);
}
return connectionCache.get(details);
}
//
// Helpers
//
private Observable<String> getConnectByNameFor(PushConnectionDetails details) {
return details.regions.isEmpty()
? getResults(false, this.mantisClient, details.target, details.getSinkparameters())
.flatMap(m -> m)
.map(MantisServerSentEvent::getEventAsString)
: getRemoteDataObservable(details.getUri(), details.target, details.getRegions().asJava());
}
private Observable<String> getConnectByIdFor(PushConnectionDetails details) {
return details.getRegions().isEmpty()
? getResults(true, this.mantisClient, details.target, details.getSinkparameters())
.flatMap(m -> m)
.map(MantisServerSentEvent::getEventAsString)
: getRemoteDataObservable(details.getUri(), details.target, details.getRegions().asJava());
}
private static SinkConnectionFunc<MantisServerSentEvent> getSseConnFunc(final String target, SinkParameters sinkParameters) {
return new SseSinkConnectionFunction(true,
t -> log.warn("Reconnecting to sink of job " + target + " after error: " + t.getMessage()),
sinkParameters);
}
private static Observable<Observable<MantisServerSentEvent>> getResults(boolean isJobId, MantisClient mantisClient,
final String target, SinkParameters sinkParameters) {
final AtomicBoolean hasError = new AtomicBoolean();
return isJobId ?
mantisClient.getSinkClientByJobId(target, getSseConnFunc(target, sinkParameters), null).getResults() :
mantisClient.getSinkClientByJobName(target, getSseConnFunc(target, sinkParameters), null)
.switchMap(serverSentEventSinkClient -> {
if (serverSentEventSinkClient.hasError()) {
hasError.set(true);
return Observable.error(new Exception(serverSentEventSinkClient.getError()));
}
return serverSentEventSinkClient.getResults();
})
.takeWhile(o -> !hasError.get());
}
//
// Tunnel
//
private Observable<String> getRemoteDataObservable(String uri, String target, List<String> regions) {
return Observable.from(regions)
.flatMap(region -> {
final String originReplacement = "\\{\"" + Constants.metaOriginName + "\": \"" + region + "\", ";
if (region.equalsIgnoreCase(getLocalRegion())) {
return this.connect(PushConnectionDetails.from(uri))
.map(datum -> datum.replaceFirst("^\\{", originReplacement));
} else {
log.info("Connecting to remote region {} at {}.", region, uri);
return mantisCrossRegionalClient.getSecureSseClient(region)
.submit(HttpClientRequest.createGet(uri))
.retryWhen(Util.getRetryFunc(log, uri + " in " + region))
.doOnError(throwable -> log.warn(
"Error getting response from remote SSE server for uri {} in region {}: {}",
uri, region, throwable.getMessage(), throwable)
).flatMap(remoteResponse -> {
if (!remoteResponse.getStatus().reasonPhrase().equals("OK")) {
log.warn("Unexpected response from remote sink for uri {} region {}: {}", uri, region, remoteResponse.getStatus().reasonPhrase());
String err = remoteResponse.getHeaders().get(Constants.metaErrorMsgHeader);
if (err == null || err.isEmpty())
err = remoteResponse.getStatus().reasonPhrase();
return Observable.<MantisServerSentEvent>error(new Exception(err))
.map(datum -> datum.getEventAsString());
}
return clientResponseToObservable(remoteResponse, target, region, uri)
.map(datum -> datum.replaceFirst("^\\{", originReplacement))
.doOnError(t -> log.error(t.getMessage()));
})
.subscribeOn(scheduler)
.observeOn(scheduler)
.doOnError(t -> log.warn("Error streaming in remote data ({}). Will retry: {}", region, t.getMessage(), t))
.doOnCompleted(() -> log.info(String.format("remote sink connection complete for uri %s, region=%s", uri, region)));
}
})
.observeOn(scheduler)
.subscribeOn(scheduler)
.doOnError(t -> log.error("Error in flatMapped cross-regional observable for {}", uri, t));
}
private Observable<String> clientResponseToObservable(HttpClientResponse<ServerSentEvent> response, String target, String
region, String uri) {
Counter numRemoteBytes = SpectatorUtils.newCounter(Constants.numRemoteBytesCounterName, target, "region", region);
Counter numRemoteMessages = SpectatorUtils.newCounter(Constants.numRemoteMessagesCounterName, target, "region", region);
Counter numSseErrors = SpectatorUtils.newCounter(Constants.numSseErrorsCounterName, target, "region", region);
return response.getContent()
.doOnError(t -> log.warn(t.getMessage()))
.timeout(3 * Constants.TunnelPingIntervalSecs, TimeUnit.SECONDS)
.doOnError(t -> log.warn("Timeout getting data from remote {} connection for {}", region, uri))
.filter(sse -> !(!sse.hasEventType() || !sse.getEventTypeAsString().startsWith("error:")) ||
!TunnelPingMessage.equals(sse.contentAsString()))
.map(t1 -> {
String data = "";
if (t1.hasEventType() && t1.getEventTypeAsString().startsWith("error:")) {
log.error("SSE has error, type=" + t1.getEventTypeAsString() + ", content=" + t1.contentAsString());
numSseErrors.increment();
throw new RuntimeException("Got error SSE event: " + t1.contentAsString());
}
try {
data = t1.contentAsString();
if (data != null) {
numRemoteBytes.increment(data.length());
numRemoteMessages.increment();
}
} catch (Exception e) {
log.error("Could not extract data from SSE " + e.getMessage(), e);
}
return data;
});
}
private Observable<String> getWorkerMetrics(PushConnectionDetails details) {
final String jobId = details.target;
SinkParameters metricNamesFilter = details.getSinkparameters();
final MetricsClient<MantisServerSentEvent> metricsClient = workerMetricsClient.getMetricsClientByJobId(jobId,
new SseWorkerConnectionFunction(true, new Action1<Throwable>() {
@Override
public void call(Throwable throwable) {
log.error("Metric connection error: " + throwable.getMessage());
try {
Thread.sleep(500);
} catch (InterruptedException ie) {
log.error("Interrupted waiting for retrying connection");
}
}
}, metricNamesFilter),
new Observer<WorkerConnectionsStatus>() {
@Override
public void onCompleted() {
log.info("got onCompleted in WorkerConnStatus obs");
}
@Override
public void onError(Throwable e) {
log.info("got onError in WorkerConnStatus obs");
}
@Override
public void onNext(WorkerConnectionsStatus workerConnectionsStatus) {
log.info("got WorkerConnStatus {}", workerConnectionsStatus);
}
});
return metricsClient
.getResults()
.flatMap(metrics -> metrics
.map(MantisServerSentEvent::getEventAsString));
}
}
| 191 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/push/PushConnectionDetails.java | package io.mantisrx.api.push;
import io.mantisrx.runtime.parameter.SinkParameter;
import io.mantisrx.runtime.parameter.SinkParameters;
import io.netty.handler.codec.http.QueryStringDecoder;
import io.vavr.collection.List;
import io.vavr.control.Try;
import lombok.Value;
import java.util.stream.Collectors;
public @Value class PushConnectionDetails {
public enum TARGET_TYPE {
CONNECT_BY_NAME,
CONNECT_BY_ID,
JOB_STATUS,
JOB_SCHEDULING_INFO,
JOB_CLUSTER_DISCOVERY,
METRICS
}
private final String uri;
public final String target;
public final TARGET_TYPE type;
public final List<String> regions;
/**
* Determines the connection type for a given push connection.
*
* @param uri Request URI as returned by Netty's requestUri() methods. Expects leading slash.
* @return The CONNECTION_TYPE requested by the URI.
*/
public static TARGET_TYPE determineTargetType(final String uri) {
if (uri.startsWith("/jobconnectbyname") || uri.startsWith("/api/v1/jobconnectbyname")) {
return TARGET_TYPE.CONNECT_BY_NAME;
} else if (uri.startsWith("/jobconnectbyid") || uri.startsWith("/api/v1/jobconnectbyid")) {
return TARGET_TYPE.CONNECT_BY_ID;
} else if (uri.startsWith("/jobstatus/") || uri.startsWith("/api/v1/jobstatus/")) {
return TARGET_TYPE.JOB_STATUS;
} else if (uri.startsWith("/api/v1/jobs/schedulingInfo/")) {
return TARGET_TYPE.JOB_SCHEDULING_INFO;
} else if (uri.startsWith("/jobClusters/discoveryInfoStream/")) {
return TARGET_TYPE.JOB_CLUSTER_DISCOVERY;
} else if (uri.startsWith("/api/v1/metrics/")) {
return TARGET_TYPE.METRICS;
} else {
throw new IllegalArgumentException("Unable to determine push connection type from URI: " + uri);
}
}
/**
* Determines the target for a push connection request. Typically a job name or id.
*
* @param uri Request URI as returned by Netty's requestUri() methods. Expects leading slash.
* @return The target requested by the URI.
*/
public static String determineTarget(final String uri) {
String sanitized = uri.replaceFirst("^/(api/v1/)?(jobconnectbyid|jobconnectbyname|jobstatus|jobs/schedulingInfo|jobClusters/discoveryInfoStream|metrics)/", "");
QueryStringDecoder queryStringDecoder = new QueryStringDecoder(sanitized);
return queryStringDecoder.path();
}
//
// Computed Properties
//
public SinkParameters getSinkparameters() {
SinkParameters.Builder builder = new SinkParameters.Builder();
QueryStringDecoder queryStringDecoder = new QueryStringDecoder(uri);
builder.parameters(queryStringDecoder
.parameters()
.entrySet()
.stream()
.flatMap(entry -> entry.getValue()
.stream()
.map(val -> Try.of(() -> new SinkParameter(entry.getKey(), val)))
.filter(Try::isSuccess)
.map(Try::get))
.collect(Collectors.toList())
.toArray(new SinkParameter[]{}));
return builder.build();
}
//
// Static Factories
//
public static PushConnectionDetails from(String uri) {
return from(uri, List.empty());
}
public static PushConnectionDetails from(String uri, List<String> regions) {
return new PushConnectionDetails(uri, determineTarget(uri), determineTargetType(uri), regions);
}
public static PushConnectionDetails from(String uri, java.util.List<String> regions) {
return new PushConnectionDetails(uri, determineTarget(uri), determineTargetType(uri), List.ofAll(regions));
}
}
| 192 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/push/MantisSSEHandler.java | package io.mantisrx.api.push;
import com.netflix.config.DynamicIntProperty;
import com.netflix.spectator.api.Counter;
import com.netflix.zuul.netty.SpectatorUtils;
import io.mantisrx.api.Constants;
import io.mantisrx.api.Util;
import io.mantisrx.server.core.master.MasterDescription;
import io.mantisrx.server.master.client.HighAvailabilityServices;
import io.mantisrx.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.handler.codec.http.*;
import lombok.extern.slf4j.Slf4j;
import mantis.io.reactivex.netty.RxNetty;
import mantis.io.reactivex.netty.channel.StringTransformer;
import mantis.io.reactivex.netty.pipeline.PipelineConfigurator;
import mantis.io.reactivex.netty.pipeline.PipelineConfigurators;
import mantis.io.reactivex.netty.protocol.http.client.HttpClient;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientRequest;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientResponse;
import mantis.io.reactivex.netty.protocol.http.client.HttpResponseHeaders;
import rx.Observable;
import rx.Subscription;
import java.nio.charset.StandardCharsets;
import java.util.*;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
/**
* Http handler for the WebSocket/SSE paths.
*/
@Slf4j
public class MantisSSEHandler extends SimpleChannelInboundHandler<FullHttpRequest> {
private final DynamicIntProperty queueCapacity = new DynamicIntProperty("io.mantisrx.api.push.queueCapacity", 1000);
private final DynamicIntProperty writeIntervalMillis = new DynamicIntProperty("io.mantisrx.api.push.writeIntervalMillis", 50);
private final ConnectionBroker connectionBroker;
private final HighAvailabilityServices highAvailabilityServices;
private final List<String> pushPrefixes;
private Subscription subscription;
private ScheduledExecutorService scheduledExecutorService = new ScheduledThreadPoolExecutor(1,
new ThreadFactoryBuilder().setNameFormat("sse-handler-drainer-%d").build());
private ScheduledFuture drainFuture;
private String uri;
public MantisSSEHandler(ConnectionBroker connectionBroker, HighAvailabilityServices highAvailabilityServices,
List<String> pushPrefixes) {
super(true);
this.connectionBroker = connectionBroker;
this.highAvailabilityServices = highAvailabilityServices;
this.pushPrefixes = pushPrefixes;
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, FullHttpRequest request) throws Exception {
if (Util.startsWithAnyOf(request.uri(), pushPrefixes)
&& !isWebsocketUpgrade(request)) {
if (HttpUtil.is100ContinueExpected(request)) {
send100Contine(ctx);
}
HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1,
HttpResponseStatus.OK);
HttpHeaders headers = response.headers();
headers.add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
headers.add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_HEADERS, "Origin, X-Requested-With, Accept, Content-Type, Cache-Control");
headers.set(HttpHeaderNames.CONTENT_TYPE, "text/event-stream");
headers.set(HttpHeaderNames.CACHE_CONTROL, "no-cache, no-store, max-age=0, must-revalidate");
headers.set(HttpHeaderNames.PRAGMA, HttpHeaderValues.NO_CACHE);
headers.set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED);
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE);
ctx.writeAndFlush(response);
uri = request.uri();
final PushConnectionDetails pcd =
isSubmitAndConnect(request)
? new PushConnectionDetails(uri, jobSubmit(request), PushConnectionDetails.TARGET_TYPE.CONNECT_BY_ID, io.vavr.collection.List.empty())
: PushConnectionDetails.from(uri);
log.info("SSE Connecting for: {}", pcd);
boolean tunnelPingsEnabled = isTunnelPingsEnabled(uri);
final String[] tags = Util.getTaglist(uri, pcd.target);
Counter numDroppedBytesCounter = SpectatorUtils.newCounter(Constants.numDroppedBytesCounterName, pcd.target, tags);
Counter numDroppedMessagesCounter = SpectatorUtils.newCounter(Constants.numDroppedMessagesCounterName, pcd.target, tags);
Counter numMessagesCounter = SpectatorUtils.newCounter(Constants.numMessagesCounterName, pcd.target, tags);
Counter numBytesCounter = SpectatorUtils.newCounter(Constants.numBytesCounterName, pcd.target, tags);
Counter drainTriggeredCounter = SpectatorUtils.newCounter(Constants.drainTriggeredCounterName, pcd.target, tags);
Counter numIncomingMessagesCounter = SpectatorUtils.newCounter(Constants.numIncomingMessagesCounterName, pcd.target, tags);
BlockingQueue<String> queue = new LinkedBlockingQueue<>(queueCapacity.get());
drainFuture = scheduledExecutorService.scheduleAtFixedRate(() -> {
try {
if (queue.size() > 0 && ctx.channel().isWritable()) {
drainTriggeredCounter.increment();
final List<String> items = new ArrayList<>(queue.size());
synchronized (queue) {
queue.drainTo(items);
}
for (String data : items) {
ctx.write(Unpooled.copiedBuffer(data, StandardCharsets.UTF_8));
numMessagesCounter.increment();
numBytesCounter.increment(data.length());
}
ctx.flush();
}
} catch (Exception ex) {
log.error("Error writing to channel", ex);
}
}, writeIntervalMillis.get(), writeIntervalMillis.get(), TimeUnit.MILLISECONDS);
this.subscription = this.connectionBroker.connect(pcd)
.doOnNext(event -> numIncomingMessagesCounter.increment())
.mergeWith(tunnelPingsEnabled
? Observable.interval(Constants.TunnelPingIntervalSecs, Constants.TunnelPingIntervalSecs,
TimeUnit.SECONDS)
.map(l -> Constants.TunnelPingMessage)
: Observable.empty())
.doOnNext(event -> {
if (!Constants.DUMMY_TIMER_DATA.equals(event)) {
String data = Constants.SSE_DATA_PREFIX + event + Constants.SSE_DATA_SUFFIX;
boolean offer = false;
synchronized (queue) {
offer = queue.offer(data);
}
if (!offer) {
numDroppedBytesCounter.increment(data.length());
numDroppedMessagesCounter.increment();
}
}
})
.subscribe();
} else {
ctx.fireChannelRead(request.retain());
}
}
private static void send100Contine(ChannelHandlerContext ctx) {
FullHttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1,
HttpResponseStatus.CONTINUE);
ctx.writeAndFlush(response);
}
private boolean isTunnelPingsEnabled(String uri) {
QueryStringDecoder queryStringDecoder = new QueryStringDecoder(uri);
return queryStringDecoder.parameters()
.getOrDefault(Constants.TunnelPingParamName, Arrays.asList("false"))
.get(0)
.equalsIgnoreCase("true");
}
private boolean isWebsocketUpgrade(HttpRequest request) {
HttpHeaders headers = request.headers();
// Header "Connection" contains "upgrade" (case insensitive) and
// Header "Upgrade" equals "websocket" (case insensitive)
String connection = headers.get(HttpHeaderNames.CONNECTION);
String upgrade = headers.get(HttpHeaderNames.UPGRADE);
return connection != null && connection.toLowerCase().contains("upgrade") &&
upgrade != null && upgrade.toLowerCase().equals("websocket");
}
private boolean isSubmitAndConnect(HttpRequest request) {
return request.method().equals(HttpMethod.POST) && request.uri().contains("jobsubmitandconnect");
}
@Override
public void channelUnregistered(ChannelHandlerContext ctx) throws Exception {
log.info("Channel {} is unregistered. URI: {}", ctx.channel(), uri);
unsubscribeIfSubscribed();
super.channelUnregistered(ctx);
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
log.info("Channel {} is inactive. URI: {}", ctx.channel(), uri);
unsubscribeIfSubscribed();
super.channelInactive(ctx);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
log.warn("Exception caught by channel {}. URI: {}", ctx.channel(), uri, cause);
unsubscribeIfSubscribed();
ctx.close();
}
/** Unsubscribe if it's subscribed. */
private void unsubscribeIfSubscribed() {
if (subscription != null && !subscription.isUnsubscribed()) {
log.info("SSE unsubscribing subscription with URI: {}", uri);
subscription.unsubscribe();
}
if (drainFuture != null) {
drainFuture.cancel(false);
}
if (scheduledExecutorService != null) {
scheduledExecutorService.shutdown();
}
}
public String jobSubmit(FullHttpRequest request) {
final String API_JOB_SUBMIT_PATH = "/api/submit";
String content = request.content().toString(StandardCharsets.UTF_8);
return callPostOnMaster(highAvailabilityServices.getMasterMonitor().getMasterObservable(), API_JOB_SUBMIT_PATH, content)
.retryWhen(Util.getRetryFunc(log, API_JOB_SUBMIT_PATH))
.flatMap(masterResponse -> masterResponse.getByteBuf()
.take(1)
.map(byteBuf -> {
final String s = byteBuf.toString(StandardCharsets.UTF_8);
log.info("response: " + s);
return s;
}))
.take(1)
.toBlocking()
.first();
}
public static class MasterResponse {
private final HttpResponseStatus status;
private final Observable<ByteBuf> byteBuf;
private final HttpResponseHeaders responseHeaders;
public MasterResponse(HttpResponseStatus status, Observable<ByteBuf> byteBuf, HttpResponseHeaders responseHeaders) {
this.status = status;
this.byteBuf = byteBuf;
this.responseHeaders = responseHeaders;
}
public HttpResponseStatus getStatus() {
return status;
}
public Observable<ByteBuf> getByteBuf() {
return byteBuf;
}
public HttpResponseHeaders getResponseHeaders() { return responseHeaders; }
}
public static Observable<MasterResponse> callPostOnMaster(Observable<MasterDescription> masterObservable, String uri, String content) {
PipelineConfigurator<HttpClientResponse<ByteBuf>, HttpClientRequest<String>> pipelineConfigurator
= PipelineConfigurators.httpClientConfigurator();
return masterObservable
.filter(Objects::nonNull)
.flatMap(masterDesc -> {
HttpClient<String, ByteBuf> client =
RxNetty.<String, ByteBuf>newHttpClientBuilder(masterDesc.getHostname(), masterDesc.getApiPort())
.pipelineConfigurator(pipelineConfigurator)
.build();
HttpClientRequest<String> request = HttpClientRequest.create(HttpMethod.POST, uri);
request = request.withHeader(HttpHeaderNames.CONTENT_TYPE.toString(), HttpHeaderValues.APPLICATION_JSON.toString());
request.withRawContent(content, StringTransformer.DEFAULT_INSTANCE);
return client.submit(request)
.map(response -> new MasterResponse(response.getStatus(), response.getContent(), response.getHeaders()));
})
.take(1);
}
}
| 193 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/services/AppStreamStore.java | package io.mantisrx.api.services;
import com.google.common.collect.ImmutableList;
import io.mantisrx.discovery.proto.AppJobClustersMap;
import java.io.IOException;
import java.util.Collection;
/**
* Interface to get streams associated with a given app or set of apps
*/
public interface AppStreamStore {
default AppJobClustersMap getJobClusterMappings(String app) throws IOException {
return getJobClusterMappings(ImmutableList.of(app));
}
AppJobClustersMap getJobClusterMappings(Collection<String> apps) throws IOException;
}
| 194 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/services/AppStreamDiscoveryService.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.services;
import com.google.common.base.Preconditions;
import com.netflix.spectator.api.Counter;
import com.netflix.zuul.netty.SpectatorUtils;
import io.mantisrx.api.proto.AppDiscoveryMap;
import io.mantisrx.client.MantisClient;
import io.mantisrx.discovery.proto.AppJobClustersMap;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.vavr.control.Either;
import io.vavr.control.Option;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
import rx.Scheduler;
@Slf4j
public class AppStreamDiscoveryService {
private final MantisClient mantisClient;
private final Scheduler scheduler;
private final AppStreamStore appStreamStore;
public AppStreamDiscoveryService(
MantisClient mantisClient,
Scheduler scheduler,
AppStreamStore appStreamStore) {
Preconditions.checkArgument(mantisClient != null);
Preconditions.checkArgument(appStreamStore != null);
Preconditions.checkArgument(scheduler != null);
this.mantisClient = mantisClient;
this.scheduler = scheduler;
this.appStreamStore = appStreamStore;
Counter appJobClusterMappingNullCount = SpectatorUtils.newCounter(
"appJobClusterMappingNull", "mantisapi");
Counter appJobClusterMappingRequestCount = SpectatorUtils.newCounter(
"appJobClusterMappingRequest", "mantisapi", "app", "unknown");
Counter appJobClusterMappingFailCount = SpectatorUtils.newCounter(
"appJobClusterMappingFail", "mantisapi");
}
public Either<String, AppDiscoveryMap> getAppDiscoveryMap(List<String> appNames) {
try {
AppJobClustersMap appJobClusters = getAppJobClustersMap(appNames);
//
// Lookup discovery info per stream and build mapping
//
AppDiscoveryMap adm = new AppDiscoveryMap(appJobClusters.getVersion(), appJobClusters.getTimestamp());
for (String app : appJobClusters.getMappings().keySet()) {
for (String stream : appJobClusters.getMappings().get(app).keySet()) {
String jobCluster = appJobClusters.getMappings().get(app).get(stream);
Option<JobSchedulingInfo> jobSchedulingInfo = getJobDiscoveryInfo(jobCluster);
jobSchedulingInfo.map(jsi -> {
adm.addMapping(app, stream, jsi);
return jsi;
});
}
}
return Either.right(adm);
} catch (Exception ex) {
log.error(ex.getMessage());
return Either.left(ex.getMessage());
}
}
public AppJobClustersMap getAppJobClustersMap(List<String> appNames) throws IOException {
return appStreamStore.getJobClusterMappings(appNames);
}
private Option<JobSchedulingInfo> getJobDiscoveryInfo(String jobCluster) {
JobDiscoveryService jdim = JobDiscoveryService.getInstance(mantisClient, scheduler);
return jdim
.jobDiscoveryInfoStream(jdim.key(JobDiscoveryService.LookupType.JOB_CLUSTER, jobCluster))
.map(Option::of)
.take(1)
.timeout(2, TimeUnit.SECONDS, Observable.just(Option.none()))
.doOnError((t) -> {
log.warn("Timed out looking up job discovery info for cluster: " + jobCluster + ".");
})
.subscribeOn(scheduler)
.observeOn(scheduler)
.toSingle()
.toBlocking()
.value();
}
}
| 195 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/services/JobDiscoveryService.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.services;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.impl.AtomicDouble;
import com.netflix.zuul.netty.SpectatorUtils;
import io.mantisrx.api.Util;
import io.mantisrx.client.MantisClient;
import io.mantisrx.server.core.JobSchedulingInfo;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
import rx.Scheduler;
import rx.Subscription;
import rx.functions.Action1;
import rx.subjects.BehaviorSubject;
import rx.subjects.Subject;
import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
@Slf4j
public class JobDiscoveryService {
public enum LookupType {
JOB_CLUSTER,
JOB_ID
}
public class JobDiscoveryLookupKey {
private final LookupType lookupType;
private final String id;
public JobDiscoveryLookupKey(final LookupType lookupType, final String id) {
this.lookupType = lookupType;
this.id = id;
}
public LookupType getLookupType() {
return lookupType;
}
public String getId() {
return id;
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final JobDiscoveryLookupKey that = (JobDiscoveryLookupKey) o;
return lookupType == that.lookupType &&
Objects.equals(id, that.id);
}
@Override
public int hashCode() {
return Objects.hash(lookupType, id);
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("JobDiscoveryLookupKey{");
sb.append("lookupType=").append(lookupType);
sb.append(", id='").append(id).append('\'');
sb.append('}');
return sb.toString();
}
}
/**
* The Purpose of this class is to dedup multiple schedulingChanges streams for the same JobId.
* The first subscriber will cause a BehaviorSubject to be setup with data obtained from mantisClient.getSchedulingChanges
* Future subscribers will simply connect to the same Subject
* When the no. of subscribers falls to zero the Observable is unsubscribed and a cleanup callback is invoked.
*/
public class JobSchedulingInfoSubjectHolder implements AutoCloseable {
private Subscription subscription;
private final AtomicInteger subscriberCount = new AtomicInteger();
private final String jobId;
private final MantisClient mantisClient;
private AtomicBoolean inited = new AtomicBoolean(false);
private CountDownLatch initComplete = new CountDownLatch(1);
private final Action1 doOnZeroConnections;
private final Subject<JobSchedulingInfo, JobSchedulingInfo> schedulingInfoBehaviorSubjectingSubject = BehaviorSubject.create();
private final Registry registry;
private final Scheduler scheduler;
private final Counter cleanupCounter;
private final AtomicLong subscriberCountGauge;
public JobSchedulingInfoSubjectHolder(MantisClient mantisClient, String jobId, Action1 onZeroConnections, Registry registry, Scheduler scheduler) {
this(mantisClient, jobId, onZeroConnections, 5, registry, scheduler);
}
/**
* Ctor only no subscriptions happen as part of the ctor
*
* @param mantisClient - Used to get the schedulingInfo Observable
* @param jobId - JobId of job to get schedulingInfo
* @param onZeroConnections - Call back when there are no more subscriptions for this observable
* @param retryCount - No. of retires in case of error connecting to schedulingInfo
*/
JobSchedulingInfoSubjectHolder(MantisClient mantisClient,
String jobId,
Action1 onZeroConnections,
int retryCount,
Registry registry,
Scheduler scheduler) {
Preconditions.checkNotNull(mantisClient, "Mantis Client cannot be null");
Preconditions.checkNotNull(jobId, "JobId cannot be null");
Preconditions.checkArgument(!jobId.isEmpty(), "JobId cannot be empty");
Preconditions.checkNotNull(onZeroConnections, "on Zero Connections callback cannot be null");
Preconditions.checkArgument(retryCount >= 0, "Retry count cannot be less than 0");
this.jobId = jobId;
this.mantisClient = mantisClient;
this.doOnZeroConnections = onZeroConnections;
this.registry = registry;
this.scheduler = scheduler;
cleanupCounter = SpectatorUtils.newCounter("mantisapi.schedulingChanges.cleanupCount", "", "jobId", jobId);
subscriberCountGauge = SpectatorUtils.newGauge("mantisapi.schedulingChanges.subscriberCount", "",
new AtomicLong(0l), "jobId", jobId);
}
/**
* If invoked the first time it will subscribe to the schedulingInfo Observable via mantisClient and onNext
* the results to the schedulinginfoSubject
* If 2 or more threads concurrently invoke this only 1 will do the initialization while others wait.
*/
private void init() {
if (!inited.getAndSet(true)) {
subscription = mantisClient.getSchedulingChanges(jobId)
.retryWhen(Util.getRetryFunc(log, "job scheduling information for " + jobId))
.doOnError((t) -> {
schedulingInfoBehaviorSubjectingSubject.toSerialized().onError(t);
doOnZeroConnections.call(jobId);
})
.doOnCompleted(() -> {
schedulingInfoBehaviorSubjectingSubject.toSerialized().onCompleted();
doOnZeroConnections.call(jobId);
})
.subscribeOn(scheduler)
.subscribe((schedInfo) -> schedulingInfoBehaviorSubjectingSubject.onNext(schedInfo));
initComplete.countDown();
} else {
try {
initComplete.await();
} catch (InterruptedException e) {
log.error(e.getMessage());
}
}
}
/**
* For testing
*
* @return current subscription count
*/
int getSubscriptionCount() {
return subscriberCount.get();
}
/**
* If a subject holding schedulingInfo for the job exists return it as an Observable
* if not then invoke mantisClient to get an Observable of scheduling changes, write them to a Subject
* and return it as an observable
* Also keep track of subscription Count, When the subscription count falls to 0 unsubscribe from schedulingInfo Observable
*
* @return Observable of scheduling changes
*/
public Observable<JobSchedulingInfo> getSchedulingChanges() {
init();
return schedulingInfoBehaviorSubjectingSubject
.doOnSubscribe(() -> {
if (log.isDebugEnabled()) { log.debug("Subscribed"); }
subscriberCount.incrementAndGet();
subscriberCountGauge.set(subscriberCount.get());
if (log.isDebugEnabled()) { log.debug("Subscriber count " + subscriberCount.get()); }
})
.doOnUnsubscribe(() -> {
if (log.isDebugEnabled()) {log.debug("UnSubscribed"); }
int subscriberCnt = subscriberCount.decrementAndGet();
subscriberCountGauge.set(subscriberCount.get());
if (log.isDebugEnabled()) { log.debug("Subscriber count " + subscriberCnt); }
if (0 == subscriberCount.get()) {
if (log.isDebugEnabled()) { log.debug("Shutting down"); }
close();
}
})
.doOnError((t) -> close())
;
}
/**
* Invoked If schedulingInfo Observable Completes or throws an onError of if the subscription count falls to 0
* Unsubscribes from the schedulingInfoObservable and invokes doOnZeroConnection callback
*/
@Override
public void close() {
if (log.isDebugEnabled()) { log.debug("In Close Unsubscribing...." + subscription.isUnsubscribed()); }
if (inited.get() && subscription != null && !subscription.isUnsubscribed()) {
if (log.isDebugEnabled()) { log.debug("Unsubscribing...."); }
subscription.unsubscribe();
inited.set(false);
initComplete = new CountDownLatch(1);
}
cleanupCounter.increment();
this.doOnZeroConnections.call(this.jobId);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
JobSchedulingInfoSubjectHolder that = (JobSchedulingInfoSubjectHolder) o;
return Objects.equals(jobId, that.jobId);
}
@Override
public int hashCode() {
return Objects.hash(jobId);
}
}
/**
* The Purpose of this class is to dedup multiple job discovery info streams for the same JobId.
* The first subscriber will cause a BehaviorSubject to be setup with data obtained from mantisClient.jobDiscoveryInfoStream
* Future subscribers will connect to the same Subject
* When the no. of subscribers falls to zero the Observable is un-subscribed and a cleanup callback is invoked.
*/
public class JobDiscoveryInfoSubjectHolder implements AutoCloseable {
private Subscription subscription;
private final AtomicInteger subscriberCount = new AtomicInteger();
private final JobDiscoveryLookupKey lookupKey;
private final MantisClient mantisClient;
private AtomicBoolean inited = new AtomicBoolean(false);
private CountDownLatch initComplete = new CountDownLatch(1);
private final Action1 doOnZeroConnections;
private final Subject<JobSchedulingInfo, JobSchedulingInfo> discoveryInfoBehaviorSubject = BehaviorSubject.create();
private final Scheduler scheduler;
private final Counter cleanupCounter;
private final AtomicLong subscriberCountGauge;
public JobDiscoveryInfoSubjectHolder(MantisClient mantisClient, JobDiscoveryLookupKey lookupKey, Action1 onZeroConnections, Scheduler scheduler) {
this(mantisClient, lookupKey, onZeroConnections, 5, scheduler);
}
/**
* Ctor only no subscriptions happen as part of the ctor
*
* @param mantisClient - Used to get the schedulingInfo Observable
* @param lookupKey - JobId or JobCluster to get schedulingInfo
* @param onZeroConnections - Call back when there are no more subscriptions for this observable
* @param retryCount - No. of retires in case of error connecting to schedulingInfo
*/
JobDiscoveryInfoSubjectHolder(MantisClient mantisClient,
JobDiscoveryLookupKey lookupKey,
Action1 onZeroConnections,
int retryCount,
Scheduler scheduler) {
Preconditions.checkNotNull(mantisClient, "Mantis Client cannot be null");
Preconditions.checkNotNull(lookupKey, "lookup key cannot be null");
Preconditions.checkArgument(lookupKey.getId() != null && !lookupKey.getId().isEmpty(), "lookup key cannot be empty or null");
Preconditions.checkNotNull(onZeroConnections, "on Zero Connections callback cannot be null");
Preconditions.checkArgument(retryCount >= 0, "Retry count cannot be less than 0");
this.lookupKey = lookupKey;
this.mantisClient = mantisClient;
this.doOnZeroConnections = onZeroConnections;
this.scheduler = scheduler;
cleanupCounter = SpectatorUtils.newCounter("mantisapi.discoveryinfo.cleanupCount", "", "lookupKey", lookupKey.getId());
subscriberCountGauge = SpectatorUtils.newGauge("mantisapi.discoveryinfo.subscriberCount", "",
new AtomicLong(0l),
"lookupKey", lookupKey.getId());
}
/**
* If invoked the first time it will subscribe to the schedulingInfo Observable via mantisClient and onNext
* the results to the schedulinginfoSubject
* If 2 or more threads concurrently invoke this only 1 will do the initialization while others wait.
*/
private void init() {
if (!inited.getAndSet(true)) {
Observable<JobSchedulingInfo> jobSchedulingInfoObs;
switch (lookupKey.getLookupType()) {
case JOB_ID:
jobSchedulingInfoObs = mantisClient.getSchedulingChanges(lookupKey.getId());
break;
case JOB_CLUSTER:
jobSchedulingInfoObs = mantisClient.jobClusterDiscoveryInfoStream(lookupKey.getId());
break;
default:
throw new IllegalArgumentException("lookup key type is not supported " + lookupKey.getLookupType());
}
subscription = jobSchedulingInfoObs
.retryWhen(Util.getRetryFunc(log, "job scheduling info for (" + lookupKey.getLookupType() + ") " + lookupKey.id))
.doOnError((t) -> {
log.info("cleanup jobDiscoveryInfo onError for {}", lookupKey);
discoveryInfoBehaviorSubject.toSerialized().onError(t);
doOnZeroConnections.call(lookupKey);
})
.doOnCompleted(() -> {
log.info("cleanup jobDiscoveryInfo onCompleted for {}", lookupKey);
discoveryInfoBehaviorSubject.toSerialized().onCompleted();
doOnZeroConnections.call(lookupKey);
})
.subscribeOn(scheduler)
.subscribe((schedInfo) -> discoveryInfoBehaviorSubject.onNext(schedInfo));
initComplete.countDown();
} else {
try {
initComplete.await();
} catch (InterruptedException e) {
log.error(e.getMessage());
}
}
}
/**
* For testing
*
* @return current subscription count
*/
int getSubscriptionCount() {
return subscriberCount.get();
}
/**
* If a subject holding schedulingInfo for the job exists return it as an Observable
* if not then invoke mantisClient to get an Observable of scheduling changes, write them to a Subject
* and return it as an observable
* Also keep track of subscription Count, When the subscription count falls to 0 unsubscribe from schedulingInfo Observable
*
* @return Observable of scheduling changes
*/
public Observable<JobSchedulingInfo> jobDiscoveryInfoStream() {
init();
return discoveryInfoBehaviorSubject
.doOnSubscribe(() -> {
if (log.isDebugEnabled()) { log.debug("Subscribed"); }
subscriberCount.incrementAndGet();
subscriberCountGauge.set(subscriberCount.get());
if (log.isDebugEnabled()) { log.debug("Subscriber count " + subscriberCount.get()); }
})
.doOnUnsubscribe(() -> {
if (log.isDebugEnabled()) {log.debug("UnSubscribed"); }
int subscriberCnt = subscriberCount.decrementAndGet();
subscriberCountGauge.set(subscriberCount.get());
if (log.isDebugEnabled()) { log.debug("Subscriber count " + subscriberCnt); }
if (0 == subscriberCount.get()) {
if (log.isDebugEnabled()) { log.debug("Shutting down"); }
close();
}
})
.doOnError((t) -> close())
;
}
/**
* Invoked If schedulingInfo Observable Completes or throws an onError of if the subscription count falls to 0
* Unsubscribes from the schedulingInfoObservable and invokes doOnZeroConnection callback
*/
@Override
public void close() {
if (log.isDebugEnabled()) { log.debug("In Close un-subscribing...." + subscription.isUnsubscribed()); }
if (inited.get() && subscription != null && !subscription.isUnsubscribed()) {
if (log.isDebugEnabled()) { log.debug("Unsubscribing...."); }
subscription.unsubscribe();
inited.set(false);
initComplete = new CountDownLatch(1);
}
cleanupCounter.increment();
log.info("jobDiscoveryInfo close for {}", lookupKey);
this.doOnZeroConnections.call(this.lookupKey);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
JobDiscoveryInfoSubjectHolder that = (JobDiscoveryInfoSubjectHolder) o;
return Objects.equals(lookupKey, that.lookupKey);
}
@Override
public int hashCode() {
return Objects.hash(lookupKey);
}
}
private final MantisClient mantisClient;
private final Scheduler scheduler;
private final AtomicDouble subjectMapSizeGauge;
private int retryCount = 5;
private static JobDiscoveryService INSTANCE = null;
public static synchronized JobDiscoveryService getInstance(MantisClient mantisClient, Scheduler scheduler) {
if (INSTANCE == null) {
INSTANCE = new JobDiscoveryService(mantisClient, scheduler);
}
return INSTANCE;
}
private JobDiscoveryService(final MantisClient mClient, Scheduler scheduler) {
Preconditions.checkNotNull(mClient, "mantisClient cannot be null");
this.mantisClient = mClient;
this.subjectMapSizeGauge = SpectatorUtils.newGauge("mantisapi.discoveryInfo.subjectMapSize", "mantisapi.discoveryInfo.subjectMapSize", new AtomicDouble(0.0));
this.scheduler = scheduler;
}
/**
* For testing purposes
*
* @param cnt No of retries
*/
@VisibleForTesting
void setRetryCount(int cnt) {
this.retryCount = cnt;
}
private final ConcurrentMap<JobDiscoveryLookupKey, JobDiscoveryInfoSubjectHolder> subjectMap = new ConcurrentHashMap<>();
/**
* Invoked by the subjectHolders when the subscription count goes to 0 (or if there is an error)
*/
private final Action1<JobDiscoveryLookupKey> removeSubjectAction = key -> {
if (log.isDebugEnabled()) { log.info("Removing subject for key {}", key.toString()); }
removeSchedulingInfoSubject(key);
};
/**
* Atomically inserts a JobDiscoveryInfoSubjectHolder if absent and returns an Observable of JobSchedulingInfo to the caller
*
* @param lookupKey - Job cluster name or JobID
*
* @return
*/
public Observable<JobSchedulingInfo> jobDiscoveryInfoStream(JobDiscoveryLookupKey lookupKey) {
Preconditions.checkNotNull(lookupKey, "lookup key cannot be null for fetching job discovery info");
Preconditions.checkArgument(lookupKey.getId() != null && !lookupKey.getId().isEmpty(), "Lookup ID cannot be null or empty" + lookupKey);
subjectMapSizeGauge.set(subjectMap.size());
return subjectMap.computeIfAbsent(lookupKey, (jc) -> new JobDiscoveryInfoSubjectHolder(mantisClient, jc, removeSubjectAction, this.retryCount, scheduler)).jobDiscoveryInfoStream();
}
/**
* Intended to be called via a callback when subscriber count falls to 0
*
* @param lookupKey JobId whose entry needs to be removed
*/
private void removeSchedulingInfoSubject(JobDiscoveryLookupKey lookupKey) {
subjectMap.remove(lookupKey);
subjectMapSizeGauge.set(subjectMap.size());
}
/**
* For testing purposes
*
* @return No. of entries in the subject
*/
int getSubjectMapSize() {
return subjectMap.size();
}
/**
* For testing purposes
*/
void clearMap() {
subjectMap.clear();
}
public JobDiscoveryLookupKey key(LookupType lookupType, String jobCluster) {
return new JobDiscoveryLookupKey(lookupType, jobCluster);
}
public static final Cache<String, String> jobDiscoveryInfoCache = CacheBuilder.newBuilder()
.expireAfterWrite(250, TimeUnit.MILLISECONDS)
.maximumSize(500)
.build();
}
| 196 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api | Create_ds/mantis-api/src/main/java/io/mantisrx/api/services/ConfigurationBasedAppStreamStore.java | package io.mantisrx.api.services;
import com.netflix.spectator.api.Counter;
import com.netflix.zuul.netty.SpectatorUtils;
import io.mantisrx.common.JsonSerializer;
import io.mantisrx.discovery.proto.AppJobClustersMap;
import io.mantisrx.shaded.org.apache.curator.framework.listen.Listenable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Supplier;
import javax.annotation.Nullable;
import lombok.extern.slf4j.Slf4j;
@SuppressWarnings("unused")
@Slf4j
public class ConfigurationBasedAppStreamStore implements AppStreamStore {
private final JsonSerializer jsonSerializer;
private final AtomicReference<AppJobClustersMap> appJobClusterMappings = new AtomicReference<>();
private final Counter appJobClusterMappingNullCount;
private final Counter appJobClusterMappingFailCount;
private final Counter appJobClusterMappingRequestCount;
public ConfigurationBasedAppStreamStore(ConfigSource configSource) {
configSource.getListenable()
.addListener((newConfig) -> updateAppJobClustersMapping(newConfig));
this.jsonSerializer = new JsonSerializer();
updateAppJobClustersMapping(configSource.get());
this.appJobClusterMappingNullCount = SpectatorUtils.newCounter(
"appJobClusterMappingNull", "mantisapi");
this.appJobClusterMappingRequestCount = SpectatorUtils.newCounter(
"appJobClusterMappingRequest", "mantisapi", "app", "unknown");
this.appJobClusterMappingFailCount = SpectatorUtils.newCounter(
"appJobClusterMappingFail", "mantisapi");
}
@Override
public AppJobClustersMap getJobClusterMappings(Collection<String> apps) throws IOException {
return getAppJobClustersMap(apps, this.appJobClusterMappings.get());
}
private AppJobClustersMap getAppJobClustersMap(Collection<String> appNames,
@Nullable AppJobClustersMap appJobClustersMap) throws IOException {
if (appJobClustersMap != null) {
final AppJobClustersMap appJobClusters;
if (appNames.size() > 0) {
appJobClusters = appJobClustersMap.getFilteredAppJobClustersMap(new ArrayList<>(appNames));
} else {
appJobClusterMappingRequestCount.increment();
appJobClusters = appJobClustersMap;
}
return appJobClusters;
} else {
appJobClusterMappingNullCount.increment();
throw new IOException("AppJobClustersMap is null");
}
}
private void updateAppJobClustersMapping(String appJobClusterStr) {
try {
AppJobClustersMap appJobClustersMap =
jsonSerializer.fromJSON(appJobClusterStr, AppJobClustersMap.class);
log.info("appJobClustersMap updated to {}", appJobClustersMap);
appJobClusterMappings.set(appJobClustersMap);
} catch (Exception ioe) {
log.error("failed to update appJobClustersMap on Property update {}", appJobClusterStr, ioe);
appJobClusterMappingFailCount.increment();
}
}
public interface ConfigSource extends Supplier<String> {
Listenable<ConfigurationChangeListener> getListenable();
}
public interface ConfigurationChangeListener {
void onConfigChange(String config);
}
}
| 197 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api/services | Create_ds/mantis-api/src/main/java/io/mantisrx/api/services/artifacts/InMemoryArtifactManager.java | package io.mantisrx.api.services.artifacts;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import io.mantisrx.api.proto.Artifact;
public class InMemoryArtifactManager implements ArtifactManager {
private Map<String, Artifact> artifacts = new HashMap<>();
@Override
public List<String> getArtifacts() {
return artifacts
.values()
.stream()
.map(Artifact::getFileName)
.collect(Collectors.toList());
}
@Override
public Optional<Artifact> getArtifact(String name) {
return artifacts
.values()
.stream()
.filter(artifact -> artifact.getFileName().equals(name))
.findFirst();
}
@Override
public void deleteArtifact(String name) {
this.artifacts.remove(name);
}
@Override
public void putArtifact(Artifact artifact) {
this.artifacts.put(artifact.getFileName(), artifact);
}
}
| 198 |
0 | Create_ds/mantis-api/src/main/java/io/mantisrx/api/services | Create_ds/mantis-api/src/main/java/io/mantisrx/api/services/artifacts/ArtifactManager.java | package io.mantisrx.api.services.artifacts;
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import io.mantisrx.api.proto.Artifact;
import java.util.List;
import java.util.Optional;
public interface ArtifactManager {
List<String> getArtifacts();
Optional<Artifact> getArtifact(String name);
void deleteArtifact(String name);
void putArtifact(Artifact artifact);
} | 199 |
Subsets and Splits