package com.chinasoft.reposearch.service.impl;

import com.chinasoft.reposearch.entity.po.Repo;
import com.chinasoft.reposearch.exception.hiveException.HiveException;
import com.chinasoft.reposearch.exception.hiveException.HiveFileException;
import com.chinasoft.reposearch.exception.hiveException.HiveSqlException;
import com.chinasoft.reposearch.service.HiveService;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.xcontent.XContentType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.core.io.ClassPathResource;
import org.springframework.dao.DataAccessException;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.stereotype.Service;
import org.springframework.util.StringUtils;

import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.sql.*;
import java.time.Instant;
import java.time.ZoneId;
import java.time.format.DateTimeFormatter;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;

@Service
public class HiveServiceImpl implements HiveService {

    private static final Logger logger = LoggerFactory.getLogger(HiveServiceImpl.class);
    private static final int sqlCut = 100;
    private final JdbcTemplate hiveJdbcTemplate;
    private final RestHighLevelClient client;

    @Autowired
    public HiveServiceImpl(JdbcTemplate hiveJdbcTemplate,RestHighLevelClient client) {
        this.hiveJdbcTemplate = hiveJdbcTemplate;
        this.client = client;
    }

    /**
     * 执行指定资源路径的SQL文件
     * @param resourcePath resources目录下的相对路径，如 "sql/init.sql"
     */
    @Override
    public void executeSqlFile(String resourcePath)  {
        try {
            ClassPathResource resource = new ClassPathResource(resourcePath);
            if (!resource.exists()) {
                throw new IOException("SQL文件不存在: " + resourcePath);
            }

            // 读取SQL文件内容
            String sqlContent = new String(resource.getInputStream().readAllBytes(), StandardCharsets.UTF_8);

            // 分割并清理SQL语句
            List<String> statements = parseSqlStatements(sqlContent);

            // 执行所有SQL语句
            executeStatements(statements);

            logger.info("成功执行SQL文件: {}, 包含 {} 条语句", resourcePath, statements.size());

        } catch (IOException e) {
            throw new HiveFileException("读取SQL文件失败:"+resourcePath, e);
        } catch (Exception e) {
            if (e.getMessage().contains("bad SQL grammar")){
                throw new HiveSqlException("SQL语句有误:",e);
            }else {
                throw new HiveException("执行SQL文件失败"+resourcePath,e);
            }
        }
    }

    /**
     * 解析SQL内容，分割成独立的SQL语句
     * 处理注释和空行
     */
    private List<String> parseSqlStatements(String sqlContent) {
        if (sqlContent == null) {
            return new ArrayList<>();
        }

        // 先移除多行注释，再分割语句
        String cleanedSql = sqlContent.replaceAll("/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/", "");

        return Arrays.stream(cleanedSql.split(";"))
                .map(String::trim)
                .filter(statement -> !statement.isEmpty())
                .map(statement -> statement.replaceAll("--.*", "")) // 移除行内注释
                .collect(Collectors.toList());
    }


    /**
     * 执行SQL语句列表
     *
     * @param statements SQL语句列表，按顺序执行
     */
    private void executeStatements(List<String> statements) {
        ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);

        try {
            List<CompletableFuture<Void>> futures = new ArrayList<>();

            // 遍历SQL语句列表，为每条SQL创建异步执行任务
            for (int i = 0; i < statements.size(); i++) {
                final String sql = statements.get(i);
                final int index = i;

                CompletableFuture<Void> future = new CompletableFuture<>();
                futures.add(future);

                // 延迟执行SQL语句，每条SQL间隔100毫秒执行
                scheduler.schedule(() -> {
                    try {
                        logger.debug("执行第 {} 条SQL: {}", index + 1, sqlShow(sql));
                        hiveJdbcTemplate.execute(sql);
                        future.complete(null);
                    } catch (Exception e) {
                        future.completeExceptionally(e);
                    }
                }, i * 100L, TimeUnit.MILLISECONDS);
            }

            // 等待所有任务完成
            CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join();

        } finally {
            scheduler.shutdown();
        }
    }


    /**
     * 将sql显示长度缩短为一定值内
     */
    private String sqlShow(String sql){
        return StringUtils.truncate(sql, sqlCut) + (sql.length() > sqlCut ? "..." : "");
    }

    /**
     * 执行单条SQL语句
     * @param sql 要执行的SQL语句
     */
    @Override
    public void executeSql(String sql) {
        try {
            hiveJdbcTemplate.execute(sql);
            logger.info("SQL执行成功: {}", sqlShow(sql));
        } catch (Exception e) {
            // 根据异常类型抛出不同的自定义异常
            if (e.getMessage().contains("bad SQL grammar")){
                throw new HiveSqlException("SQL语句有误:",e);
            }else {
                throw new HiveException("SQL执行失败"+sql,e);
            }
        }
    }

    /* ES 索引名，可按需传参或写死 */
    private final String ES_INDEX = "repo";
    private static final ObjectMapper MAPPER = new ObjectMapper();

    /**
     * 查询指定表中的数据，并将结果分批写入到JSON文件中。
     * <p>
     * 该方法会先清理并重建输出目录，然后执行SQL查询，按批次读取结果，
     * 每批数据写入一个独立的JSON文件。
     * </p>
     *
     * @param columns   要查询的列名数组
     * @param tableName 要查询的表名
     */
    public void queryForAllList(String[] columns, String tableName) {
        int batchSize = 10_000;          // ES 批量阈值
        AtomicInteger total = new AtomicInteger(0);
        AtomicInteger batchNo = new AtomicInteger(0);

        String sql = "SELECT " + String.join(",", columns) + " FROM " + tableName;

        hiveJdbcTemplate.execute((Connection conn) -> {
            try (PreparedStatement ps = conn.prepareStatement(sql,
                    ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY)) {
                ps.setFetchSize(10_000);

                try (ResultSet rs = ps.executeQuery()) {
                    ResultSetMetaData meta = rs.getMetaData();
                    int colCnt = meta.getColumnCount();

                    List<Map<String, Object>> batch = new ArrayList<>(batchSize);

                    while (rs.next()) {
                        Map<String, Object> flat = new HashMap<>(colCnt);
                        for (int i = 1; i <= colCnt; i++) {
                            flat.put(meta.getColumnName(i), rs.getObject(i));
                        }
                        batch.add(flat);

                        if (batch.size() >= batchSize) {
                            bulkToEs(batch);
                            batch.clear();
                            logger.info("已 bulk {} 条", total.addAndGet(batchSize));
                        }
                    }

                    /* 剩余 */
                    if (!batch.isEmpty()) {
                        bulkToEs(batch);
                        logger.info("最后 bulk {} 条", total.addAndGet(batch.size()));
                    }
                    logger.info("全部处理完成，总计 {} 条", total.get());
                }
            } catch (SQLException e) {
                throw new HiveSqlException("SQL 异常", e);
            } catch (Exception e) {
                throw new HiveException("查询失败", e);
            }
            return null;
        });
    }

    private static final DateTimeFormatter DAY_FMT =
            DateTimeFormatter.ofPattern("yyyy-MM-dd");
    /* ========== ES bulk 方法 ========== */
    private void bulkToEs(List<Map<String, Object>> batch) {
        if (batch.isEmpty()) return;

        BulkRequest bulk = new BulkRequest();
        for (Map<String, Object> flat : batch) {
            /* 1. 去前缀 */
            Map<String, Object> doc = new HashMap<>();
            flat.forEach((k, v) -> {
                String key = k.startsWith("repo.") ? k.substring(5) : k;
                doc.put(key, v);
            });

            Timestamp createtimestamp = (Timestamp) flat.get("repo.createdat");
            String createdDay = null;
            if (createtimestamp != null) {
                long createdTs = createtimestamp.getTime();
                createdDay = Instant.ofEpochMilli(createdTs)
                        .atZone(ZoneId.systemDefault())
                        .format(DAY_FMT);
            }
            Timestamp pushtimestamp = (Timestamp) flat.get("repo.pushedat");
            String pushedDay = null;
            if (pushtimestamp != null) {
                long pushedTs = pushtimestamp.getTime();
                pushedDay = Instant.ofEpochMilli(pushedTs)
                        .atZone(ZoneId.systemDefault())
                        .format(DAY_FMT);
            }

            doc.put("createdat", createdDay);
            doc.put("pushedat", pushedDay);

            /* 2. 字符串 JSON -> 对象 */
            try {
                doc.put("languages",
                        MAPPER.readValue((String) flat.get("repo.languages"),
                                new TypeReference<List<Map<String, Object>>>() {}));
                doc.put("topics",
                        MAPPER.readValue((String) flat.get("repo.topics"),
                                new TypeReference<List<Map<String, Object>>>() {}));
            } catch (Exception e) {
                throw new HiveFileException("languages/topics 解析失败", e);
            }

            /* 3. 构造文档 ID（owner_name） */
            String id = doc.get("owner") + "_" + doc.get("name");

            bulk.add(new IndexRequest(ES_INDEX)
                    .id(id)
                    .source(doc, XContentType.JSON));
        }

        /* 4. 刷 ES */
        try {
            BulkResponse resp = client.bulk(bulk, RequestOptions.DEFAULT);
            if (resp.hasFailures()) {
                logger.error("bulk 失败: {}", resp.buildFailureMessage());
                throw new HiveException("ES bulk 失败:" + resp.buildFailureMessage());
            }
        } catch (IOException e) {
            throw new HiveException("ES IO 异常", e);
        }
    }

    /**
     * 查询数据
     * @param columns 要查询的列名，多个列名用逗号分隔
     * @param tableName 要查询的表名
     * @return 查询结果列表，每个元素是一个Map，key为列名，value为对应的值
     */
    @Override
    public List<Map<String, Object>> queryForList(String[] columns, String tableName) {
        // 构造SQL查询语句
        String columnsStr = String.join(", ", columns);
        String sql = "select " + columnsStr + " from " + tableName ;
        try {
            // 执行查询并返回结果
            List<Map<String, Object>> list = hiveJdbcTemplate.queryForList(sql);
            logger.info("多条查询SQL执行成功: {}", sqlShow(sql));

            return list;
        } catch (DataAccessException e) {
            // 根据异常类型抛出不同的自定义异常
            if (e.getMessage().contains("bad SQL grammar")){
                throw new HiveSqlException("查询复数数据失败:"+ sql,e);
            }else {
                throw new HiveException("查询复数数据失败:"+ sql,e);
            }
        }
    }

}
