package com.gitee.c0dehvb.reactive.sample.r2dbc.metadata;

import com.google.common.collect.Lists;
import io.r2dbc.spi.ConnectionFactory;
import io.r2dbc.spi.Row;
import lombok.extern.slf4j.Slf4j;
import org.apache.shardingsphere.core.rule.ShardingRule;
import org.apache.shardingsphere.sql.parser.binder.metadata.column.ColumnMetaData;
import org.apache.shardingsphere.sql.parser.binder.metadata.index.IndexMetaData;
import org.apache.shardingsphere.sql.parser.binder.metadata.schema.SchemaMetaData;
import org.apache.shardingsphere.sql.parser.binder.metadata.table.TableMetaData;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Scheduler;
import reactor.core.scheduler.Schedulers;
import reactor.util.function.Tuple2;
import reactor.util.function.Tuples;

import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;

/**
 * MySQL元数据Loader
 * @author LiYilin
 * @since 2022/6/2 10:37 AM
 **/
@Slf4j
public class MySQLR2dbcSchemaMetaDataLoader implements R2dbcSchemaMetaDataLoader {
    @Override
    public SchemaMetaData load(ShardingRule shardingRule, int maxConnectionCount, ConnectionFactory connectionFactory) {
        List<String> tableNames = loadAllTableName(connectionFactory);
        log.info("Loading {} tables' meta data.", tableNames.size());
        Map<String, TableMetaData> tableMetaDataMap = asyncLoad(connectionFactory, maxConnectionCount, tableNames);
        return new SchemaMetaData(tableMetaDataMap);
    }

    private Map<String, TableMetaData> asyncLoad(ConnectionFactory connectionFactory, int maxConnectionCount,
                                                 List<String> tableNames) {
        List<List<String>> tableGroups = Lists.partition(tableNames, Math.max(tableNames.size() / maxConnectionCount,
                                                                              1));
        final Map<String, TableMetaData> tableMetaDataMap = new ConcurrentHashMap<>(tableNames.size(), 1);
        Scheduler scheduler = Schedulers.newParallel("mysql-schema-loader", Math.min(tableGroups.size(),
                                                                                     maxConnectionCount));
        scheduler.start();
        CountDownLatch countDownLatch = new CountDownLatch(tableGroups.size());
        Flux.fromIterable(tableGroups)
                .parallel()
                .runOn(scheduler)
                .flatMap(tableGroup -> Flux.fromIterable(tableGroup).flatMap(tableName -> loadTableMetaData(connectionFactory, tableName))
                )
                .doOnNext(tuple -> tableMetaDataMap.put(tuple.getT1(), tuple.getT2()))
                .doOnComplete(countDownLatch::countDown)
                .subscribe();
        try {
            countDownLatch.await();
        } catch (InterruptedException e) {
            log.error("Interrupted!", e);
            Thread.currentThread().interrupt();
        }
        scheduler.dispose();
        return tableMetaDataMap;
    }

    private Mono<Tuple2<String, TableMetaData>> loadTableMetaData(ConnectionFactory connectionFactory,
                                                                  String tableName) {
        return loadColumnMetaData(connectionFactory, tableName)
                .collectList()
                .doOnNext(columnMetaData -> log.info("loaded: {}", tableName))
                .map(columnMetaDataList -> new TableMetaData(columnMetaDataList, Collections.singletonList(
                        new IndexMetaData("medium_sales_id"))))
                .map(e -> Tuples.of(tableName, e));
    }

    private Flux<ColumnMetaData> loadColumnMetaData(ConnectionFactory connectionFactory, String tableName) {
        return Mono.from(connectionFactory.create())
                .flatMapMany(connection -> Flux.from(connection.createStatement("desc " + tableName).execute()))
                .flatMap(result -> result.map((descRow, rowMetadata) -> new ColumnMetaData(getField(descRow),
                                                                                           getDataType(descRow),
                                                                                           getDataTypeName(descRow),
                                                                                           isPrimary(descRow),
                                                                                           isGenerated(descRow),
                                                                                           true)));
    }

    static String getField(Row descRow) {
        return descRow.get("Field", String.class);
    }

    static String getDataTypeName(Row descRow) {
        String type = descRow.get("Type", String.class);
        if (type == null) {
            return null;
        }
        if (type.contains("(")) {
            type = type.substring(0, type.indexOf("("));
        }
        return type;
    }

    static int getDataType(Row descRow) {
        // TODO 不影响shardingrewrite流程
        return 0;
    }

    static boolean isPrimary(Row descRow) {
        String key = descRow.get("Key", String.class);
        return key != null && key.contains("PRI");
    }

    static boolean isGenerated(Row descRow) {
        String extra = descRow.get("Extra", String.class);
        return extra != null && extra.contains("auto_increment");
    }

    private List<String> loadAllTableName(ConnectionFactory connectionFactory) {
        return Mono.from(connectionFactory.create())
                .flatMapMany(connection -> Flux.from(connection.createStatement("show tables").execute()))
                .flatMap(result -> result.map((row, rowMetadata) -> row.get(0, String.class)))
                .collectList().block();
    }
}
