package io.gitee.hefren.hhsharding.engine.impl;

import com.alibaba.druid.sql.SQLUtils;
import com.alibaba.druid.sql.ast.SQLExpr;
import com.alibaba.druid.sql.ast.SQLName;
import com.alibaba.druid.sql.ast.SQLStatement;
import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr;
import com.alibaba.druid.sql.ast.statement.SQLInsertStatement;
import com.alibaba.druid.sql.dialect.mysql.visitor.MySqlSchemaStatVisitor;
import static com.google.common.base.Preconditions.*;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import io.gitee.hefren.hhsharding.config.HhShardingProperties;
import io.gitee.hefren.hhsharding.core.HhShardingResult;
import io.gitee.hefren.hhsharding.demo.model.User;
import io.gitee.hefren.hhsharding.engine.HhShardingEngine;
import io.gitee.hefren.hhsharding.strategy.HhShardingStrategy;
import io.gitee.hefren.hhsharding.strategy.impl.HhHashShardingStrategy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.MultiValueMap;

import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.stream.Collectors;

/**
 * @Date 2024/8/21
 * @Author lifei
 */
public class HhStandardShardingEngine implements HhShardingEngine {
    private static final Logger log = LoggerFactory.getLogger(HhStandardShardingEngine.class);

    // 库名和表名的对应关系： 一个库名对应多个表名
    private static final MultiValueMap<String, String> actualDatabases = new LinkedMultiValueMap<>();
    // 表名和库名的对应关系：一个表名对应多个库名
    private static final MultiValueMap<String, String> actualTables = new LinkedMultiValueMap<>();

    // 存放逻辑表名对应分库的策略
    private static final Map<String, HhShardingStrategy> databaseShardingStrategies = Maps.newHashMap();
    // 存放逻辑表名对应分表的策略
    private static final Map<String, HhShardingStrategy> tableShardingStrategies = Maps.newHashMap();

    public HhStandardShardingEngine(HhShardingProperties hhShardingProperties) {
        // 提取所有表的分库分表策略
        for (Map.Entry<String, HhShardingProperties.HhTableProperties> tableEntry : hhShardingProperties.getTables().entrySet()) {
            // 提取实际的库名和表名
            for (String actualNodeName : tableEntry.getValue().getActualNodeNames()) {
                String[] sp = actualNodeName.split("\\.");
                String dbName = sp[0], tableName = sp[1];
                actualDatabases.add(dbName, tableName);
                actualTables.add(tableName, dbName);
            }
            // 提取逻辑表名，对应的分库策略
            Properties databaseStrategy = tableEntry.getValue().getDatabaseStrategy();
            String databaseShardingColumns = databaseStrategy.getProperty("shardingColumns");
            String databaseShardingStrategy = databaseStrategy.getProperty("shardingStrategy");
            databaseShardingStrategies.put(tableEntry.getKey(), new HhHashShardingStrategy(databaseShardingColumns, databaseShardingStrategy));
            // 提取逻辑表名，对应的分表策略
            Properties tableStrategy = tableEntry.getValue().getTableStrategy();
            String tableShardingColumns = tableStrategy.getProperty("shardingColumns");
            String tableShardingStrategy = tableStrategy.getProperty("shardingStrategy");
            tableShardingStrategies.put(tableEntry.getKey(), new HhHashShardingStrategy(tableShardingColumns, tableShardingStrategy));
        }
    }

    @Override
    public HhShardingResult sharding(String originalSQL, Object[] args) {
        // 1. 解析SQL，获取表名； 2. 根据获取策略； 3. 解析并执行分片策略，获取结果
        SQLStatement sqlStatement = SQLUtils.parseSingleMysqlStatement(originalSQL);
        String originalTableName;
        Map<String, Object> shardingParams;
        if (sqlStatement instanceof SQLInsertStatement sqlInsertStatement) {
            originalTableName = sqlInsertStatement.getTableName().getSimpleName();
            // 创建参数
            List<SQLExpr> columns = sqlInsertStatement.getColumns();
            List<String> columnNameList = Lists.newArrayList();
            for (SQLExpr sqlExpr : columns) {
                SQLIdentifierExpr sqlIdentifierExpr = (SQLIdentifierExpr) sqlExpr;
                String columnName = sqlIdentifierExpr.getName();
                columnNameList.add(columnName);
            }
            shardingParams = Maps.newHashMap();
            for (int i = 0; i < columnNameList.size(); i++) {
                shardingParams.put(columnNameList.get(i), args[i]);
            }
        } else {
            MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
            visitor.setParameters(List.of(args));
            sqlStatement.accept(visitor);
            List<String> tableNameList = visitor.getOriginalTables().stream().map(SQLName::getSimpleName).distinct().toList();
            checkState(tableNameList.size()==1, "目前只支持对单表的分片操作");
            originalTableName = tableNameList.get(0);
            shardingParams = visitor.getConditions().stream().collect(Collectors.toMap(c -> c.getColumn().getName(), c -> c.getValues().get(0)));
        }

        // 获取分库的策略
        HhShardingStrategy databaseShardingStrategy = databaseShardingStrategies.get(originalTableName);
        String targetDataSource = databaseShardingStrategy.doSharding(shardingParams);
        // 获取分表的策略
        HhShardingStrategy tableShardingStrategy = tableShardingStrategies.get(originalTableName);
        String targetTableName = tableShardingStrategy.doSharding(shardingParams);
        log.debug("===> 【sharding】targetDataSource: {}, targetTableName: {}", targetDataSource, targetTableName);

        return new HhShardingResult(targetDataSource, originalSQL.replace(originalTableName, targetTableName));
    }
}
