package org.jetlinks.pro.cassandra.device;

import com.google.common.collect.Maps;
import lombok.Generated;
import lombok.Setter;
import org.apache.commons.codec.binary.Hex;
import org.apache.commons.collections4.CollectionUtils;
import org.hswebframework.ezorm.core.ValueCodec;
import org.hswebframework.ezorm.core.param.Term;
import org.hswebframework.ezorm.core.param.TermType;
import org.hswebframework.ezorm.rdb.executor.wrapper.ResultWrappers;
import org.hswebframework.ezorm.rdb.metadata.RDBDatabaseMetadata;
import org.hswebframework.ezorm.rdb.metadata.RDBIndexMetadata;
import org.hswebframework.ezorm.rdb.operator.DatabaseOperator;
import org.hswebframework.ezorm.rdb.operator.DefaultDatabaseOperator;
import org.hswebframework.ezorm.rdb.operator.ddl.TableBuilder;
import org.hswebframework.ezorm.rdb.operator.dml.query.Selects;
import org.hswebframework.ezorm.rdb.operator.dml.query.SortOrder;
import org.hswebframework.web.api.crud.entity.PagerResult;
import org.hswebframework.web.api.crud.entity.QueryParamEntity;
import org.hswebframework.web.exception.BusinessException;
import org.hswebframework.web.utils.DigestUtils;
import org.jetlinks.core.device.DeviceProductOperator;
import org.jetlinks.core.device.DeviceRegistry;
import org.jetlinks.core.message.DeviceMessage;
import org.jetlinks.core.message.event.EventMessage;
import org.jetlinks.core.metadata.*;
import org.jetlinks.core.metadata.types.*;
import org.jetlinks.pro.IntervalUnit;
import org.jetlinks.pro.ScrollPagerResult;
import org.jetlinks.pro.cassandra.metadata.*;
import org.jetlinks.pro.cassandra.metadata.types.*;
import org.jetlinks.pro.device.entity.DeviceEvent;
import org.jetlinks.pro.device.entity.DeviceOperationLogEntity;
import org.jetlinks.pro.device.entity.DeviceProperty;
import org.jetlinks.pro.device.service.data.AbstractDeviceDataStoragePolicy;
import org.jetlinks.pro.device.service.data.DeviceDataService;
import org.jetlinks.pro.device.service.data.DeviceDataStorageProperties;
import org.jetlinks.pro.timeseries.TimeSeriesData;
import org.jetlinks.pro.timeseries.query.AggregationData;
import org.jetlinks.pro.utils.ConverterUtils;
import org.jetlinks.pro.utils.ScrollPagingUtils;
import org.jetlinks.reactor.ql.utils.CastUtils;
import org.jetlinks.supports.official.JetLinksEventMetadata;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.springframework.boot.autoconfigure.cassandra.CassandraProperties;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.data.cassandra.ReactiveSessionFactory;
import org.springframework.data.cassandra.core.cql.ReactiveCqlTemplate;
import org.springframework.stereotype.Component;
import org.springframework.util.Assert;
import org.springframework.util.StringUtils;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Schedulers;
import reactor.util.function.Tuple2;
import reactor.util.function.Tuples;

import javax.annotation.Nonnull;
import java.time.LocalDateTime;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;

/**
 * 使用cassandra来存储设备数据。仅支持按设备ID查询相关操作,不支持按产品ID查询.
 * <p>
 * 在发布产品时,会自动根据无模型创建设备属性表,设备日志表,设备事件表.
 * <p>
 * <p>
 * 数据会根据时间进行分区存储,可以通过{@code jetlinks.device.storage.cassandra.partition-interval}来指定分区周期.
 * <br>
 * 注意:在查询时,如果没有指定查询时间范围,默认只会查询2个分区间隔周期内的数据.
 * <p>
 * 属性表名规则为: properties_{productId},表结构:
 * <pre>{@code
 *     id text, //ID
 *     partition bigint, //分区列
 *     property text,   //属性ID
 *     value text, //属性值,cassandra不支持分组聚合,所以直接存储text,查询时再转换类型
 *     timestamp bigint, //属性时间戳
 *     createTime bigint, //创建时间
 *     deviceId text //设备ID
 * }</pre>
 * <p>
 * 主键:{@code (deviceId,property,partition),timestamp}.
 * 同一个设备的相同属性值会存储到同一个分区中.查询时只能根据设备id和属性id以及分区来进行查询.
 * <br>
 * 实现逻辑:{@link CassandraDeviceStoragePolicy#createPropertiesTable(String)}
 * <p>
 * 日志表名规则为: device_log_{productId},表结构:
 * <pre>{@code
 *     id text, //ID
 *     partition bigint, //分区列
 *     content text,   //日志内容
 *     messageId text, //消息ID
 *     type text, //日志类型
 *     timestamp bigint, //属性时间戳
 *     createTime bigint, //创建时间
 *     deviceId text //设备ID
 * }</pre>
 * <p>
 * 主键:{@code (deviceId,partition),timestamp}.
 * <br>
 * 实现逻辑:{@link CassandraDeviceStoragePolicy#createLogTable(String)}
 * <p>
 * 事件表表结构根据事件输出类型不同而不同.
 * <br>
 * 实现逻辑:{@link CassandraDeviceStoragePolicy#createEventTable(String, EventMetadata)}
 * <p>
 * </pre>
 */
@Component
@ConfigurationProperties(prefix = "jetlinks.device.storage.cassandra")
public class CassandraDeviceStoragePolicy extends AbstractDeviceDataStoragePolicy {

    //分区列名
    protected static final String COLUMN_PARTITION = "partition";

    //数据库操作接口
    private final DatabaseOperator databaseOperator;

    //分区周期
    @Setter
    private IntervalUnit partitionInterval = IntervalUnit.MONTHS;

    //无模型和cassandra的类型映射
    private static final Map<String, CassandraDataType> typeMapping = new HashMap<>();

    static {
        //int
        typeMapping.put(IntType.ID, CassandraIntType.INSTANCE);
        //long
        typeMapping.put(org.jetlinks.core.metadata.types.LongType.ID, CassandraBigIntType.INSTANCE);
        //float
        typeMapping.put(FloatType.ID, CassandraFloatType.INSTANCE);
        //double
        typeMapping.put(DoubleType.ID, CassandraDoubleType.INSTANCE);
        //boolean
        typeMapping.put(BooleanType.ID, CassandraBooleanType.INSTANCE);
        //datetime
        typeMapping.put(DateTimeType.ID, CassandraTimestampType.INSTANCE);
        //string
        typeMapping.put(StringType.ID, CassandraTextType.INSTANCE);
        //password
        typeMapping.put(PasswordType.ID, CassandraTextType.INSTANCE);
        //enum
        typeMapping.put(EnumType.ID, CassandraTextType.INSTANCE);
        //file
        typeMapping.put(FileType.ID, CassandraTextType.INSTANCE);
        //geo
        typeMapping.put(GeoType.ID, CassandraTextType.INSTANCE);
    }

    public CassandraDeviceStoragePolicy(DeviceRegistry registry,
                                        DeviceDataStorageProperties properties,
                                        CassandraProperties cassandraProperties,
                                        ReactiveSessionFactory reactiveSessionFactory) {
        super(registry, properties);

        RDBDatabaseMetadata database = new RDBDatabaseMetadata(new CassandraDialect());
        //Sql执行器
        database.addFeature(new CassandraReactiveSqlExecutor(new ReactiveCqlTemplate(reactiveSessionFactory)));

        //定义schema
        CassandraSchema schema = new CassandraSchema(cassandraProperties.getKeyspaceName());
        database.addSchema(schema);
        database.setCurrentSchema(schema);

        //构造cassandra数据操作接口
        databaseOperator = DefaultDatabaseOperator.of(database);
    }


    @Override
    @Generated
    public String getId() {
        return "cassandra-row";
    }

    @Override
    @Generated
    public String getName() {
        return "Cassandra行式模式";
    }

    @Override
    @Generated
    public String getDescription() {
        return "使用Cassandra存储设备数据,每一个属性为一行数据.";
    }

    @Nonnull
    @Override
    @Generated
    public Mono<ConfigMetadata> getConfigMetadata() {
        return Mono.empty();
    }

    @Override
    protected Mono<Void> doSaveData(String metric, TimeSeriesData data) {

        return databaseOperator
            .dml()
            .insert(metric)
            .value(data.values())
            .execute()
            .reactive()
            .then();
    }

    @Override
    protected Mono<Void> doSaveData(String metric, Flux<TimeSeriesData> data) {
        return data
            .flatMap(ts -> doSaveData(metric, ts))
            .then();
    }

    @Override
    protected Flux<Tuple2<String, TimeSeriesData>> convertProperties(String productId,
                                                                     DeviceMessage message,
                                                                     Map<String, Object> properties,
                                                                     Map<String, Long> propertySourceTimes) {
        //转化为行式存储模式
        return convertPropertiesForRowPolicy(productId, message, properties, propertySourceTimes);
    }

    @Override
    protected <T> Flux<T> doQuery(String metric,
                                  QueryParamEntity paramEntity,
                                  Function<TimeSeriesData, T> mapper) {
        //指定pageSize为0则表示不查询数据,可能只需要查询数量
        if (paramEntity.getPageSize() == 0) {
            return Flux.empty();
        }
        return databaseOperator
            .getMetadata()
            .getTableReactive(metric)
            //表不存在,可能首次使用存储策略,产品没有重新发布.
            .switchIfEmpty(Mono.error(() -> new BusinessException("error.cassandra_device_table_notfound", 500, metric)))
            .flatMapMany(metadata -> databaseOperator
                .dml()
                .query(metadata)
                .setParam(paramEntity)
                //查询结果包转为map
                .fetch(ResultWrappers.map())
                .reactive()
                .map(map -> {
                    //获取时间戳
                    long ts = CastUtils.castNumber(map.get(COLUMN_TIMESTAMP)).longValue();
                    return mapper.apply(TimeSeriesData.of(ts, map));
                }));
    }

    @Override
    protected Map<String, Object> createRowPropertyData(String id,
                                                        long timestamp,
                                                        String deviceId,
                                                        PropertyMetadata property,
                                                        Object value) {
        Map<String, Object> propertyData = Maps.newHashMapWithExpectedSize(24);
        //使用md5来对id摘要,统一格式,因为id规则可能不一样,比如以时间戳为ID
        propertyData.put(COLUMN_ID, DigestUtils.md5Hex(id));
        propertyData.put(COLUMN_DEVICE_ID, deviceId);
        propertyData.put(COLUMN_TIMESTAMP, timestamp);
        propertyData.put(COLUMN_PROPERTY_ID, property.getId());
        propertyData.put(COLUMN_CREATE_TIME, System.currentTimeMillis());
        //根据时间戳来生成分区值
        propertyData.put(COLUMN_PARTITION, computePartition(timestamp));
        propertyData.put(COLUMN_PROPERTY_VALUE, String.valueOf(value));

        return propertyData;
    }

    @Override
    protected Map<String, Object> createDeviceLogData(String productId,
                                                      DeviceMessage message) {
        Map<String, Object> data = super.createDeviceLogData(productId, message);
        data.put(COLUMN_PARTITION, computePartition(message.getTimestamp()));
        data.remove(COLUMN_PRODUCT_ID);
        return data;
    }

    @Override
    protected Map<String, Object> createEventData(EventMessage message,
                                                  DeviceMetadata metadata) {
        Map<String, Object> data = super.createEventData(message, metadata);
        data.put(COLUMN_PARTITION, computePartition(message.getTimestamp()));
        data.remove(COLUMN_PRODUCT_ID);
        return data;
    }

    protected long computePartition(long timestamp) {
        return partitionInterval.truncatedTo(timestamp);
    }

    protected Flux<QueryParamEntity> computePartitionQuery(QueryParamEntity queryParam) {
        return this
            .computePartition(queryParam)
            .map(partition -> queryParam.clone().and(COLUMN_PARTITION, TermType.eq, partition));
    }

    /**
     * 根据查询条件来计算分区值,如果查询条件中包含{@code timestamp}查询条件,则使用这个条件值进行分区计算.
     * 否则使用当前时间往前的2个分区周期来进行计算.
     *
     * @param queryParam 查询条件
     * @return 分区值
     */
    protected Flux<Long> computePartition(QueryParamEntity queryParam) {
        ZonedDateTime baseTime = null;
        //查询起始时间
        long from = 0;
        //查询截止时间
        long to = 0;

        //遍历查询条件(不支持嵌套条件)
        for (Term term : queryParam.getTerms()) {
            //查询条件中携带有时间查询条件,则根据查询条件来解析分区
            if (COLUMN_TIMESTAMP.equals(term.getColumn())) {
                String termType = term.getTermType().toLowerCase(Locale.ROOT);
                switch (termType) {
                    // > 和 >= 的情况,说明时起始时间
                    case TermType.gt:
                    case TermType.gte:
                        from = CastUtils.castDate(term.getValue()).getTime();
                        break;
                    // > 和 >= 的情况,说明时截止时间
                    case TermType.lt:
                    case TermType.lte:
                        to = CastUtils.castDate(term.getValue()).getTime();
                        break;
                    // between
                    case TermType.btw:
                        //将查询条件转为时间
                        List<Date> values = ConverterUtils.convertToList(term.getValue(), CastUtils::castDate);

                        from = values.get(0).getTime();
                        if (values.size() > 1) {
                            to = values.get(1).getTime();
                        }
                        break;
                }
            }
        }
        //初始化基准时间为当前时间
        if (from == 0 || to == 0) {
            baseTime = LocalDateTime.now().atZone(ZoneId.systemDefault());
        }
        //查询条件没有指定起始时间,则往前1个周期,大部分情况下,最终会产生计算出2个分区值.
        if (from == 0) {
            from = baseTime
                .minus(partitionInterval.getDurationOfUnit(), partitionInterval.getUnit())
                .toInstant()
                .toEpochMilli();
        }
        //没有指定截止时间则使用基准时间
        if (to == 0) {
            to = baseTime.toInstant()
                         .toEpochMilli();
        }

        return Flux.fromIterable(partitionInterval.iterate(from, to));
    }

    @Override
    protected <T> Mono<PagerResult<T>> doQueryPager(String metric,
                                                    QueryParamEntity paramEntity,
                                                    Function<TimeSeriesData, T> mapper) {
        //游标分页ID
        String scrollId = ScrollPagingUtils.getScrollId(paramEntity).orElse(null);

        return this
            .computePartitionQuery(paramEntity)
            .cache()
            .as(flux -> {
                Mono<Integer> total =
                    //前端指定了总数则直接返回,不执行count
                    //推荐使用这种操作:count耗时比较长
                    paramEntity.getTotal() != null
                        ? Mono.just(paramEntity.getTotal())
                        : flux
                        //select count(1) total from {metric} where ...
                        .flatMap(param -> databaseOperator
                            .dml()
                            .query(metric)
                            .select(Selects.count1().as("total"))
                            .setParam(param.clone().noPaging().doNotSort())
                            .fetch(ResultWrappers.map())
                            .reactive()
                            .map(map -> CastUtils.castNumber(map.getOrDefault("total", 0)).intValue())
                            .singleOrEmpty()
                            .defaultIfEmpty(0))
                        //汇总多个分区的值
                        .reduce(Math::addExact);

                //存在多个分区时,每个分页游标ID使用,分割
                String[] scrollIds = scrollId != null ? scrollId.split(",") : null;
                //
                Map<Integer, CassandraPagingContext> contexts = new TreeMap<>();

                //执行查询
                Mono<List<T>> result = flux
                    .index()
                    .flatMap(tp2 -> {
                        int index = tp2.getT1().intValue();
                        QueryParamEntity param = tp2.getT2().clone();

                        //获取分页游标ID
                        String _scrollId =
                            scrollIds != null && scrollIds.length > tp2.getT1()
                                ? scrollIds[index]
                                : null;
                        //设置分页游标ID
                        if (StringUtils.hasText(_scrollId)) {
                            param.context(ScrollPagingUtils.SCROLL_ID_KEY, _scrollId);
                        }
                        //创建分页上下文
                        CassandraPagingContext pageContext = CassandraPagingContext
                            .of(param.getPageIndex(), param.getPageSize(), _scrollId);
                        contexts.put(index, pageContext);

                        return this
                            .doQuery(metric, param, mapper)
                            //设置分页上下文
                            .as(CassandraPagingContext.setFluxPaging(pageContext));
                    })
                    //对查询结果进行排序,因为如果查询了多个分区,会导致排序混乱
                    .as(resultFlux -> ConverterUtils.convertSortedStream(resultFlux, paramEntity.getSorts()))
                    //只获取指定的分页数量
                    //因为会同时查询多个分区,如果多个分区都有数据,数据量会超过指定的每页数量.
                    .take(paramEntity.getPageSize())
                    .collectList();

                return Mono
                    .zip(result, total, (_result, _total) -> {
                        //获取上下为的下一个分页游标ID
                        String nextScrollId = contexts
                            .values()
                            .stream()
                            .map(CassandraPagingContext::nextPageStateBase64)
                            .filter(Objects::nonNull)
                            .collect(Collectors.joining(","));
                        //分页游标分页查询结果
                        return ScrollPagerResult.of(_total, _result, nextScrollId, paramEntity);
                    });
            });
    }

    private Mono<Void> createLogTable(String tableName) {
        return databaseOperator
            .ddl()
            .createOrAlter(tableName)
            //ID
            .addColumn(COLUMN_ID).type(CassandraTextType.INSTANCE).comment("ID").commit()
            //分区
            .addColumn(COLUMN_PARTITION).type(CassandraBigIntType.INSTANCE).comment("数据分区").commit()
            //时间戳
            .addColumn(COLUMN_TIMESTAMP).type(CassandraBigIntType.INSTANCE)
            /**/.custom(column -> column.setValueCodec(CassandraBigIntType.INSTANCE))
            /**/.comment("上报时间").commit()
            //创建时间
            .addColumn(COLUMN_CREATE_TIME).type(CassandraBigIntType.INSTANCE)
            /**/.custom(column -> column.setValueCodec(CassandraBigIntType.INSTANCE))
            /**/.comment("创建时间").commit()
            //日志内容
            .addColumn(COLUMN_DEVICE_LOG_CONTENT).type(CassandraTextType.INSTANCE).comment("日志内容").commit()
            //设备ID
            .addColumn(COLUMN_DEVICE_ID).type(CassandraTextType.INSTANCE).comment("设备ID").commit()
            //消息ID
            .addColumn(COLUMN_MESSAGE_ID).type(CassandraTextType.INSTANCE).comment("消息ID").commit()
            //日志类型
            .addColumn(COLUMN_DEVICE_LOG_TYPE).type(CassandraTextType.INSTANCE).comment("日志类型").commit()
            .custom(table -> {
                //配置主键
                table.addFeature(FixedCassandraPrimaryKeyBuilder.of(
                    CassandraPrimaryKey.of(
                        CassandraPrimaryKey.of(
                            COLUMN_DEVICE_ID, COLUMN_PARTITION
                        ), COLUMN_TIMESTAMP)
                ));
                //排序
                table.addFeature(ClusteringOrderBy.of(SortOrder.desc(COLUMN_TIMESTAMP)));
                //type使用索引
                RDBIndexMetadata index = new RDBIndexMetadata();
                index.setName(tableName + "_log_type");
                index
                    .getColumns()
                    .add(RDBIndexMetadata.IndexColumn.of(COLUMN_DEVICE_LOG_TYPE, RDBIndexMetadata.IndexSort.asc));
                table.addIndex(index);
            })
            .commit()
            .reactive()
            .then();
    }

    private Mono<Void> createPropertiesTable(String tableName) {
        return databaseOperator
            .ddl()
            .createOrAlter(tableName)
            //id
            .addColumn(COLUMN_ID).type(CassandraTextType.INSTANCE).comment("ID").commit()
            //deviceId
            .addColumn(COLUMN_DEVICE_ID).type(CassandraTextType.INSTANCE).comment("设备ID").commit()
            //property
            .addColumn(COLUMN_PROPERTY_ID).type(CassandraTextType.INSTANCE).comment("属性ID").commit()
            //partition
            .addColumn(COLUMN_PARTITION).type(CassandraBigIntType.INSTANCE).comment("数据分区").commit()
            //timestamp
            .addColumn(COLUMN_TIMESTAMP).type(CassandraBigIntType.INSTANCE)
            /**/.custom(column -> column.setValueCodec(CassandraBigIntType.INSTANCE))
            /**/.comment("上报时间").commit()
            //createTime
            .addColumn(COLUMN_CREATE_TIME).type(CassandraBigIntType.INSTANCE)
            /**/.custom(column -> column.setValueCodec(CassandraBigIntType.INSTANCE))
            /**/.comment("创建时间").commit()
            //value cassandra不支持分组聚合,所以直接存储text,查询时再转换类型
            .addColumn(COLUMN_PROPERTY_VALUE).type(CassandraTextType.INSTANCE).comment("值").commit()
            .custom(table -> {
                //主键 (deviceId,property,partition),timestamp
                table.addFeature(FixedCassandraPrimaryKeyBuilder
                                     .of(CassandraPrimaryKey
                                             .of(CassandraPrimaryKey
                                                     .of(COLUMN_DEVICE_ID,
                                                         COLUMN_PROPERTY_ID,
                                                         COLUMN_PARTITION),
                                                 COLUMN_TIMESTAMP)
                                     ));
                //默认排序列
                table.addFeature(ClusteringOrderBy.of(SortOrder.desc(COLUMN_TIMESTAMP)));
            })
            .commit()
            .reactive()
            .then();
    }

    private Mono<Void> createEventTable(String tableName, EventMetadata event) {

        TableBuilder tableBuilder = databaseOperator
            .ddl()
            .createOrAlter(tableName)
            //id
            .addColumn(COLUMN_ID).type(CassandraTextType.INSTANCE).comment("ID").commit()
            //deviceId
            .addColumn(COLUMN_DEVICE_ID).type(CassandraTextType.INSTANCE).comment("设备ID").commit()
            //partition
            .addColumn(COLUMN_PARTITION).type(CassandraBigIntType.INSTANCE).comment("数据分区").commit()
            //timestamp
            .addColumn(COLUMN_TIMESTAMP).type(CassandraBigIntType.INSTANCE)
            /**/.custom(column -> column.setValueCodec(CassandraBigIntType.INSTANCE))
            /**/.comment("上报时间").commit()
            //createTime
            .addColumn(COLUMN_CREATE_TIME).type(CassandraBigIntType.INSTANCE)
            /**/.custom(column -> column.setValueCodec(CassandraBigIntType.INSTANCE))
            /**/.comment("创建时间").commit()
            .custom(table -> {
                CassandraPrimaryKey primaryKey;

                if (eventSetting().eventIsAllInOne()) {
                    primaryKey = CassandraPrimaryKey.of(
                        CassandraPrimaryKey.of(COLUMN_DEVICE_ID, COLUMN_EVENT_VALUE, COLUMN_PARTITION), COLUMN_TIMESTAMP);
                } else {
                    primaryKey = CassandraPrimaryKey
                        .of(CassandraPrimaryKey.of(COLUMN_DEVICE_ID, COLUMN_PARTITION), COLUMN_TIMESTAMP);
                }
                //配置主键
                table.addFeature(FixedCassandraPrimaryKeyBuilder.of(primaryKey));
                //默认排序列
                table.addFeature(ClusteringOrderBy.of(SortOrder.desc(COLUMN_TIMESTAMP)));
            });

        if (eventSetting().eventIsAllInOne()) {
            CassandraDataType dataType = CassandraTextType.INSTANCE;

            tableBuilder
                .addColumn(COLUMN_EVENT_VALUE)
                /**/.type(dataType)
                /**/.custom(column -> column.setValueCodec((ValueCodec<?, ?>) dataType))
                /**/.comment("事件值")
                /**/.commit()
                .addColumn(COLUMN_EVENT_ID)
                /**/.type(CassandraTextType.INSTANCE)
                /**/.custom(column -> column.setValueCodec(CassandraTextType.INSTANCE))
                /**/.comment("事件ID")
                /**/.commit();

        } else {
            DataType type = event.getType();
            if (type instanceof ObjectType) {
                //对象类型则平铺
                for (PropertyMetadata property : ((ObjectType) type).getProperties()) {
                    CassandraDataType dataType = convertCassandraType(property.getValueType());

                    tableBuilder.addColumn(property.getId())
                                .type(dataType)
                                .custom(column -> column.setValueCodec((ValueCodec<?, ?>) dataType))
                                .comment(property.getName())
                                .commit();
                }
            } else {
                CassandraDataType dataType = convertCassandraType(type);

                tableBuilder.addColumn(COLUMN_EVENT_VALUE)
                            .type(dataType)
                            .custom(column -> column.setValueCodec((ValueCodec<?, ?>) dataType))
                            .comment("事件值")
                            .commit();
            }
        }

        return tableBuilder
            .commit()
            .reactive()
            .then();
    }

    /**
     * 将无模型类型转换为Cassandra的类型,默认使用text类型
     *
     * @param dataType 无模型的类型
     * @return Cassandra类型
     */
    private CassandraDataType convertCassandraType(DataType dataType) {
        CassandraDataType mapped = typeMapping.get(dataType.getType());
        if (null != mapped) {
            return mapped;
        }
        return CassandraTextType.INSTANCE;
    }

    @Nonnull
    @Override
    public Flux<DeviceEvent> queryEvent(@Nonnull String deviceId,
                                        @Nonnull String event,
                                        @Nonnull QueryParamEntity query,
                                        boolean format) {
        return this
            .computePartitionQuery(query)
            .flatMap(param -> super.queryEvent(deviceId, event, param, format));
    }

    /**
     * 重构表名,将指定的表名转换为Cassandra支持的表名:替换一些cassandra不支持的字符为下划线.
     *
     * @param table 原始表名
     * @return 重构后的表明
     */
    private String refactorTable(String table) {
        char[] arr = table.toCharArray();
        boolean anyMatch = false;
        for (int i = 0; i < arr.length; i++) {
            char c = arr[i];
            if (c == '-' || c == '.') {
                arr[i] = '_';
                anyMatch = true;
            }
        }
        //有匹配才返回新的值
        if (anyMatch) {
            return new String(arr);
        }
        return table;
    }

    @Override
    protected String getPropertyTimeSeriesMetric(String productId) {
        return refactorTable(super.getPropertyTimeSeriesMetric(productId));
    }

    @Override
    protected String getDeviceLogMetric(String productId) {
        return refactorTable(super.getDeviceLogMetric(productId));
    }

    @Override
    protected String getDeviceEventMetric(String productId, String eventId) {
        return refactorTable(super.getDeviceEventMetric(productId, eventId));
    }

    @Nonnull
    @Override
    public Mono<Void> registerMetadata(@Nonnull String productId, @Nonnull DeviceMetadata metadata) {

        List<Mono<Void>> job = new ArrayList<>(8);
        //创建属性表
        job.add(createPropertiesTable(getPropertyTimeSeriesMetric(productId)));
        //创建日志表
        job.add(createLogTable(getDeviceLogMetric(productId)));

        //创建事件表
        if (eventSetting().eventIsAllInOne()) {
            //所有事件放在同一个表里时
            job.add(createEventTable(getDeviceEventMetric(productId, null), new JetLinksEventMetadata("all_in_one", "", StringType.GLOBAL)));
        } else {
            //一个事件一张表
            for (EventMetadata event : metadata.getEvents()) {
                job.add(createEventTable(getDeviceEventMetric(productId, event.getId()), event));
            }
        }
        return Flux.merge(job)
                   .then();
    }

    @Nonnull
    @Override
    public Mono<Void> reloadMetadata(@Nonnull String productId, @Nonnull DeviceMetadata metadata) {

        List<String> tables = new ArrayList<>(8);
        //属性表
        tables.add(getPropertyTimeSeriesMetric(productId));
        //日志表
        tables.add(getDeviceLogMetric(productId));

        //事件表
        if (eventSetting().eventIsAllInOne()) {
            tables.add(getDeviceEventMetric(productId, null));
        } else {
            for (EventMetadata event : metadata.getEvents()) {
                tables.add(getDeviceEventMetric(productId, event.getId()));
            }
        }

        return Flux
            .fromIterable(tables)
            .flatMap(databaseOperator
                         .getMetadata()
                         .getCurrentSchema()::getTableReactive)
            .then();
    }

    @Nonnull
    @Override
    public Flux<DeviceProperty> queryEachOneProperties(@Nonnull String deviceId,
                                                       @Nonnull QueryParamEntity query,
                                                       @Nonnull String... properties) {
        return this
            .computePartitionQuery(query.clone()
                                        //每个属性只查询1条数据
                                        .doPaging(0, 1)
                                        //添加deviceId查询条件
                                        .and(COLUMN_DEVICE_ID, TermType.eq, deviceId))
            .flatMap(param -> this
                .getProductAndMetadataByDevice(deviceId)
                .flatMapMany(tp2 -> {
                    DeviceMetadata metadata = tp2.getT2();
                    Set<String> props = new HashSet<>(Arrays.asList(properties));
                    //遍历物模型属性去查询,属性越多,查询越慢
                    return Flux
                        .fromIterable(metadata.getProperties())
                        .filter(prop -> props.isEmpty() || props.contains(prop.getId()))
                        .flatMap(prop -> this.queryProperty(tp2, param, prop.getId()));
                }))
            //查询结果重新排序,因为同时查询多个分区排序会混乱
            .as(flux -> ConverterUtils.convertSortedStream(flux, query.getSorts()))
            .distinct(DeviceProperty::getProperty);

    }

    @Nonnull
    @Override
    public Flux<DeviceProperty> queryEachProperties(@Nonnull String deviceId,
                                                    @Nonnull QueryParamEntity query,
                                                    @Nonnull String... property) {

        Set<String> properties = new HashSet<>(Arrays.asList(property));

        return this
            .computePartitionQuery(query.clone().and(COLUMN_DEVICE_ID, TermType.eq, deviceId))
            .flatMap(param -> this
                .getProductAndMetadataByDevice(deviceId)
                .flatMapMany(tp2 -> {
                    DeviceMetadata metadata = tp2.getT2();
                    return Flux
                        .fromIterable(metadata.getProperties())
                        .filter(prop -> properties.isEmpty() || properties.contains(prop.getId()))
                        .map(prop -> this.queryProperty(tp2, param, prop.getId()))
                        //每20个属性并行查询一次
                        .buffer(20)
                        .concatMap(Flux::merge);
                }))
            .as(flux -> ConverterUtils.convertSortedStream(flux, query.getSorts()));
    }

    @Nonnull
    @Override
    public Flux<DeviceProperty> queryProperty(@Nonnull String deviceId,
                                              @Nonnull QueryParamEntity query,
                                              @Nonnull String... property) {
        return getProductAndMetadataByDevice(deviceId)
            .flatMapMany(productAndMetadata -> this
                .computePartitionQuery(query)
                .flatMap(param -> this.queryProperty(productAndMetadata,
                                                     //添加deviceId查询条件
                                                     param.and(COLUMN_DEVICE_ID, TermType.eq, deviceId), property)))
            .as(flux -> ConverterUtils.convertSortedStream(flux, query.getSorts()));
    }

    //对查询结果进行聚合计算
    private Mono<Map<String, Object>> aggregation(Flux<DeviceProperty> propertyFlux,
                                                  List<DeviceDataService.DevicePropertyAggregation> aggregations) {

        //遍历属性,然后从查询结果中进行计算
        return Flux
            .fromIterable(aggregations)
            .flatMap(agg -> agg
                .getAgg()
                //从查询结果筛选出属性
                .compute(propertyFlux.filter(prop -> Objects.equals(agg.getProperty(), prop.getProperty())),
                         prop -> prop.getNumberValue() == null ? 0 : CastUtils.castNumber(prop.getNumberValue()))
                //转换为2元组: [查询别名,聚合结果],方便后续收集为map
                .map(num -> Tuples.of(agg.getAlias(), num)))
            //收集为map
            .collectMap(Tuple2::getT1, Tuple2::getT2, () -> Maps.newHashMapWithExpectedSize(aggregations.size()));
    }


    private Flux<Map<String, Object>> aggregationGroupByTime(Flux<DeviceProperty> propertyFlux,
                                                             DeviceDataService.AggregationRequest request,
                                                             List<DeviceDataService.DevicePropertyAggregation> aggregations) {
        //查询指定的时间格式
        DateTimeFormatter formatter = DateTimeFormat.forPattern(request.getFormat());

        Assert.notNull(request.getInterval(), "interval can not be null");

        return Flux
            .fromIterable(
                request
                    .getInterval()
                    //根据指定的时间分组间隔计算出所有的时间区间
                    .iterate(request.getFrom().getTime(), request.getTo().getTime())
            )
            .sort(Comparator.comparingLong(Long::longValue).reversed())
            //预选生成所有时间区间的数据，key为时间区间值,value为Map数据.
            .collectMap(Function.identity(),
                        (ts) -> {
                            Map<String, Object> map = Maps.newHashMapWithExpectedSize(aggregations.size() + 1);
                            map.put("time", new DateTime(ts).toString(formatter));
                            return map;
                        },
                        LinkedHashMap::new)
            .flatMapMany(times -> propertyFlux
                //按时间间隔分组
                .groupBy(prop -> request.getInterval().round(prop.getTimestamp()), Integer.MAX_VALUE)
                .flatMap(timeGroup -> {
                    Long time = timeGroup.key();
                    //查询结果里含有不存在的时间区间?
                    if (!times.containsKey(time)) {
                        return Mono.empty();
                    }
                    //聚合计算此时间间隔的数据
                    return this
                        //对分组数据进行cache,因为如果有多个聚合列,数据流会被订阅多次
                        .aggregation(timeGroup.cache(), aggregations)
                        //使用并行线程池来计算
                        .subscribeOn(Schedulers.parallel())
                        .doOnNext(map -> {
                            //将聚合结果填充到对应的时间区间
                            times.get(time).putAll(map);
                        });
                })
                //返回预先生成的每个周期的数据
                .thenMany(Flux.fromIterable(times.values()))
                //没有结果的时间区间要补0
                .doOnNext(map -> {
                    for (DeviceDataService.DevicePropertyAggregation aggregation : aggregations) {
                        map.putIfAbsent(aggregation.getAlias(), 0);
                    }
                }));
    }

    @Override
    public Flux<AggregationData> aggregationPropertiesByDevice(@Nonnull String deviceId,
                                                               @Nonnull DeviceDataService.AggregationRequest request,
                                                               @Nonnull DeviceDataService.DevicePropertyAggregation... properties) {
        //需要聚合的列
        List<DeviceDataService.DevicePropertyAggregation> aggregations = Arrays.asList(properties);

        //聚合属性ID
        String[] propertyArray = aggregations
            .stream()
            .map(DeviceDataService.DevicePropertyAggregation::getProperty)
            .distinct()
            .toArray(String[]::new);

        return request
            .getFilter()
            .clone()
            .toQuery()
            .where()
            //指定查询时间
            .between(COLUMN_TIMESTAMP, request.getFrom(), request.getTo())
            .execute(param -> this
                .computePartitionQuery(param.noPaging())
                //执行查询
                .flatMap(_param -> queryProperty(deviceId, _param, propertyArray)))
            //对查询结果进行计算
            .as(propertyFlux -> {
                //按时间分组聚合
                if (request.getInterval() != null) {
                    return this.aggregationGroupByTime(propertyFlux, request, aggregations);
                } else {
                    return this
                        .aggregation(
                            //对分组数据进行cache,因为如果有多个聚合列,数据流会被订阅多次
                            propertyFlux.cache(),
                            aggregations)
                        .flux();
                }
            })
            .map(AggregationData::of)
            .take(request.getLimit());

    }

    @Nonnull
    @Override
    public Mono<PagerResult<DeviceProperty>> queryPropertyPage(@Nonnull String deviceId,
                                                               @Nonnull String property,
                                                               @Nonnull QueryParamEntity query) {
        return queryPropertyPage(deviceId, query, property);
    }

    @Nonnull
    @Override
    public Mono<PagerResult<DeviceProperty>> queryPropertyPage(@Nonnull String deviceId,
                                                               @Nonnull QueryParamEntity query,
                                                               @Nonnull String... properties) {
        return getProductAndMetadataByDevice(deviceId)
            .flatMap(productAndMetadata -> queryPropertyPage(productAndMetadata, query.and(COLUMN_DEVICE_ID, TermType.eq, deviceId), properties));
    }

    private Flux<DeviceProperty> queryProperty(Tuple2<DeviceProductOperator, DeviceMetadata> tp2,
                                               QueryParamEntity query,
                                               String... properties) {
        String table = getPropertyTimeSeriesMetric(tp2.getT1().getId());

        DeviceMetadata metadata = tp2.getT2();
        return query
            .clone()
            .toQuery()
            //指定了属性，则按指定的属性数量进行分页
            .when(properties.length > 0, q -> {
                if (query.isPaging()) {
                    q.doPaging(query.getPageIndex(), properties.length * query.getPageSize());
                }
                q.in(COLUMN_PROPERTY_ID, (Object[]) properties);
            })
            //没有指定属性，则按全部属性数量进行分页
            .when(properties.length == 0, q -> q
                .doPaging(query.getPageIndex(), metadata.getProperties().size() * query.getPageSize()))
            //默认按时间倒序排序
            .when(CollectionUtils.isEmpty(query.getSorts()), q -> q.orderByDesc(COLUMN_TIMESTAMP))
            .execute(databaseOperator.dml().query(table)::setParam)
            .fetch(ResultWrappers.map())
            .reactive()
            .map(map -> {
                String property = String.valueOf(map.get(COLUMN_PROPERTY_ID));
                long timestamp = CastUtils.castNumber(map.get(COLUMN_TIMESTAMP)).longValue();

                return DeviceProperty
                    .of(TimeSeriesData.of(timestamp, map), metadata.getPropertyOrNull(property))
                    .generateId();
            });
    }

    private Mono<PagerResult<DeviceProperty>> queryPropertyPage(@Nonnull Tuple2<DeviceProductOperator, DeviceMetadata> productAndMetadata,
                                                                @Nonnull QueryParamEntity query,
                                                                @Nonnull String... property) {

        String table = getPropertyTimeSeriesMetric(productAndMetadata.getT1().getId());

        return query
            .toQuery()
            //指定了property,则添加property in 的查询条件
            .when(property.length > 0, q -> q.in(COLUMN_PROPERTY_ID, Arrays.asList(property)))
            .execute(param -> this
                .doQueryPager(table,
                              query,
                              //将查询结果转换为DeviceProperty
                              data -> DeviceProperty
                                  .of(data, data
                                      .getString(COLUMN_PROPERTY_ID)
                                      .map(productAndMetadata.getT2()::getPropertyOrNull)
                                      .orElse(null))
                                  .generateId()
                ));
    }

    @Override
    public Mono<PagerResult<DeviceOperationLogEntity>> queryDeviceMessageLog(@Nonnull String deviceId,
                                                                             @Nonnull QueryParamEntity entity) {

        for (Term term : entity.getTerms()) {
            //判断如果查询条件指定了多个日志类型,需要提示友好的错误信息,因为Cassandra不支持对索引列进行in查询
            if (COLUMN_DEVICE_LOG_TYPE.equals(term.getColumn())) {
                if (ConverterUtils.convertToList(term.getValue()).size() > 1) {
                    return Mono.error(new UnsupportedOperationException("error.cassandra_multi_type_unsupported"));
                }
                break;
            }
        }

        return super.queryDeviceMessageLog(deviceId, entity);
    }


    @Nonnull
    @Override
    public Flux<DeviceProperty> queryPropertyByProductId(@Nonnull String productId,
                                                         @Nonnull QueryParamEntity query,
                                                         @Nonnull String... property) {
        //不支持按产品ID查询,因为查询必须指定主键条件:deviceId
        return Flux.error(new UnsupportedOperationException("error.unsupported"));
    }

    @Override
    public Flux<AggregationData> aggregationPropertiesByProduct(@Nonnull String productId,
                                                                @Nonnull DeviceDataService.AggregationRequest request,
                                                                @Nonnull DeviceDataService.DevicePropertyAggregation... properties) {
        //不支持按产品ID查询,因为查询必须指定主键条件:deviceId
        return Flux.error(new UnsupportedOperationException("error.unsupported"));
    }

    @Nonnull
    @Override
    public Mono<PagerResult<DeviceProperty>> queryPropertyPageByProductId(@Nonnull String productId,
                                                                          @Nonnull QueryParamEntity query,
                                                                          @Nonnull String... properties) {
        //不支持按产品ID查询,因为查询必须指定主键条件:deviceId
        return Mono.error(new UnsupportedOperationException("error.unsupported"));
    }

    @Nonnull
    @Override
    public Mono<PagerResult<DeviceProperty>> queryPropertyPageByProductId(@Nonnull String productId,
                                                                          @Nonnull String property,
                                                                          @Nonnull QueryParamEntity query) {
        //不支持按产品ID查询,因为查询必须指定主键条件:deviceId
        return Mono.error(new UnsupportedOperationException("error.unsupported"));
    }

    @Nonnull
    @Override
    public Mono<PagerResult<DeviceEvent>> queryEventPageByProductId(@Nonnull String productId, @Nonnull String event, @Nonnull QueryParamEntity query, boolean format) {
        //不支持按产品ID查询,因为查询必须指定主键条件:deviceId
        return Mono.error(new UnsupportedOperationException("error.unsupported"));
    }
}
