package com.wutoon.etl.handler;

import cn.hutool.core.collection.CollectionUtil;
import com.wutoon.etl.aop.PowerJobLogReport;
import com.wutoon.etl.constant.CommonConstant;
import com.wutoon.etl.mapper.BizOperateRecordManage;
import com.wutoon.etl.pojo.DataSourceDO;
import com.wutoon.etl.pojo.Http2DbTask;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.springframework.web.client.RestTemplate;
import org.springframework.web.context.request.RequestContextHolder;
import com.wutoon.etl.util.DateUtil;

import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicMarkableReference;
import java.util.stream.Collectors;

/**
 * @author 武兴云/72176468
 * @version 1.0
 * @date 2024/1/22 17:38
 * @description http采集存储db处理器
 */
@Setter
@Getter
@Slf4j
public class HttpCollectHandler {
    private Http2DbTask task;

    private RestTemplate restTemplate;

    private BizOperateRecordManage bizOperateRecordManage;

    // 默认初始化时不确定会不会用到二级线程池，因此默认大小是0，不创建线程, 后续判断需要使用，在修改核心大小
    private int coreSize = 0;

    private int maxPoolSize = 10;

    private int keepAlive = 30;

    // 二级并发时提交任务时间忽略不计，极端情况下所有子线程都提交二级任务，全部积压在队列中，为保证任务不丢失队列大小设置大一些
    // 最高一级并发设置8个，全部提交二级任务，假设每个一级任务下有100万数据，500一页，2000页。2000*8/2=8000
    private int queueCapacity = 8000;

    /**
     * 阻塞队列
     */
    private BlockingQueue<Object[]> queue = new LinkedBlockingQueue<>(CommonConstant.MAX_FETCH_SIZE);

    public HttpCollectHandler(Http2DbTask task, RestTemplate restTemplate, BizOperateRecordManage bizOperateRecordManage) {
        this.task = task;
        this.restTemplate = restTemplate;
        this.bizOperateRecordManage = bizOperateRecordManage;
    }

    private List<Connection> targetConnections = new ArrayList<>();

/**
 * 入参中的collectConcurrency，writeConcurrency，secondConcurrency，batchInsertSize，pageSize控制采集速度，需要评估设置
 * 测试1：十个代理，100万数据，
 * collectConcurrency=1，writeConcurrency=1，secondConcurrency=30，batchInsertSize=2000，pageSize=1000
 * 耗时：130S
 * 测试2：十个代理，100万数据，
 * collectConcurrency=1，writeConcurrency=2，secondConcurrency=30，batchInsertSize=2000，pageSize=1000
 * 耗时：60S
 * 测试3：十个代理，100万数据，
 * collectConcurrency=2，writeConcurrency=4，secondConcurrency=30，batchInsertSize=2000，pageSize=1000
 * 耗时：40S
 * 参数示例：
 * {
 *     "shardNum": 1,
 *     "customParam": {
 *         "collectSource": {
 *             "url": "https://vwork-vkal-uat.vivo.xyz/vkal/yz/abnormal",
 *             "method": "POST",
 *             "head": {
 *                 "Accept":"application/json"
 *             },
 *             "bodyTemplate":{
 *                 "firstAgentCode":"{firstAgentCode}",
 *                 "pageSize":"{pageSize}",
 *                 "pageNo":"{pageNo}"
 *             },
 *             "dimension": {
 *                 "key": "抓取维度字段firstAgentCode",
 *                 "value": "抓取维度值，多个逗号隔开，ref:开头时自定义方法获取M10A00"
 *             },
 *             "page": {
 *                 "key": "分页下标字段名接口不一致：pageNo",
 *                 "startIndex":"分页开始下标，0,1"
 *             },
 *             "pageSize": {
 *                 "key": "分页大小字段名接口不一致pageSize",
 *                 "size": 1000
 *             },
 *             "beginTime": {
 *                 "key": "开始时间字段，start",
 *                 "value": "支持变量占位符${-1d,timestamp} 2024-05-07 00:00:00"
 *             },
 *             "endTime": {
 *                 "key": "结束时间字段：end",
 *                 "value": "支持变量占位符${-1d,timestamp} 2024-05-07 00:00:00"
 *             },
 *             "pageTotalExpression": "aviator表达式，Integer.MAX_VALUE，当没有返回分页信息时固定为Integer.MAX_VALUE",
 *             // "pageTotalExpression": "aviator表达式，(data.total/1000)+((data.total%1000) >0?1:0)",
 *             "judgeSuccessExpression": "aviator表达式，code==200",
 *             "dataExpression": "aviator表达式，以ref开头时为自定义方法，ref:com.vivo.it.common.data.collection.service.handler.function.DealYzResponseFunction",
 *             "fieldList": "firstAgentCode,orderNo,customerCode,customerAccountCode,customerWhCode,skuCode,skuName,imei1,imei2,meid,createTime,updateTime,deleteFlag,biz_operate_record_id",
 *             "system":"YOUZAN",
 *             "secondConcurrency":30
 *         },
 *         "targetCode": "task_query_center",
 *         "increase": "false",
 *         "targetTable": "ods_yz_abnormal_uat",
 *         "targetSql": "insert into ods_yz_abnormal_uat(first_agent_code,order_no,customer_code,customer_account_code,customer_warehouse_code,sku_code,sku_name,imei1,imei2,meid,create_time,update_time,delete_flag,biz_operate_record_id) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)",
 *         "batchInsertSize": 2000,
 *         "collectConcurrency": 2,
 *         "writeConcurrency": 4
 *     }
 * }
 *
 * @author 武兴云/72176468
 * @date 2024/2/29 15:43
 * @update_by 武兴云/72176468
 * @update_at 2024/2/29 15:43
 * @creed if you have doubt , please contact me !!!
 */
public void start() throws Exception {
    PowerJobLogReport.report("DataTransferHandler start");
    try {
        // 部分参数做limit调整，例如：fetchSize不能超过2W
        limitParam(task);

        // 1、检查目标sql，白名单控制：只允许执行insert语句
        if (!task.getTargetSql().toLowerCase(Locale.ROOT).trim().startsWith(CommonConstant.START_WITH_INSERT) &&
                !task.getTargetSql().toLowerCase(Locale.ROOT).trim().startsWith(CommonConstant.START_WITH_UPSERT) &&
                !task.getTargetSql().toLowerCase(Locale.ROOT).trim().startsWith(CommonConstant.START_WITH_REPLACE_INTO)) {
            throw new RuntimeException("invalid danger target sql:" + task.getTargetSql());
        }

        // 2、初始化连接池，离线任务用得少，连接池实时建立
        for (int i = 0; i < task.getWriteConcurrency(); i++) {
            targetConnections.add(connect(task.getTargetSource()));
        }
        // 需要账号具有drop权限，否则会失败
        executePreSQl(targetConnections, task);

        // 主线程和子线程之间消息传递
        AtomicMarkableReference<String> msg = new AtomicMarkableReference<>(null, false);

        List<String> dimensionSet = getDimensionSet(task.getCollectSource());
        int collectConcurrency = Math.min(dimensionSet.size(), task.getCollectConcurrency());

        // 3、构建生产者
        ExecutorService executor = Executors.newFixedThreadPool(task.getWriteConcurrency() + collectConcurrency);
        CountDownLatch produceEnd = new CountDownLatch(collectConcurrency);
        CountDownLatch consumerEnd = new CountDownLatch(task.getWriteConcurrency());
        List<List<String>> subDimensions = CollectionUtil.split(dimensionSet, (dimensionSet.size()/collectConcurrency) + 1);
        // 使用当前任务定义的线程池，使用公共线程池存在以下问题：
        // 1、如果子线程存在异常，关闭线程池会对其他任务影响
        // 2、如果不关闭，已提交的大量任务依然在队列中，实际没有执行的必要了，依然会占用大量资源持续执行，也会给下游服务带来压力
        // 默认初始化时不确定会不会用到二级线程池，因此默认大小是0，不创建线程, 后续判断需要使用，在修改核心大小
        // ArrayBlockingQueue优点：
        // 有界队列（避免无限制的任务堆积），阻塞特性（尝试向队列中添加任务的线程将会被阻塞，直到队列中有空间为止），线程安全，FIFO顺序（避免互相等待）
        // ArrayBlockingQueue配合自定义拒绝策略，队列满时阻塞等待，缺点，主线程阻塞，无法及时感知子线程是否存在异常，因此需要设置较大的队列，避免频繁阻塞
        ThreadPoolExecutor workerPool = new ThreadPoolExecutor(coreSize, maxPoolSize, keepAlive, TimeUnit.SECONDS,
                new ArrayBlockingQueue<>(queueCapacity), new CustomBlockingPolicy());
        // 获取抓取维度，例如按照一代循环获取
        for(List<String> subDim : subDimensions) {
            HttpCollector collector = new HttpCollector(workerPool, restTemplate, bizOperateRecordManage, queue, task, produceEnd, msg, subDim, RequestContextHolder.getRequestAttributes());
            executor.execute(collector);
        }

        // 4、构建消费者
        for(int i = 0; i < task.getWriteConcurrency(); i ++) {
            JdbcWriter writer = new JdbcWriter(queue, task, targetConnections.get(i), produceEnd, consumerEnd, msg, RequestContextHolder.getRequestAttributes());
            executor.execute(writer);
        }
        // 6、关闭连接
        long start = System.currentTimeMillis();
        long now = System.currentTimeMillis();
        // 循环观测子线程是否存在异常，及时感知，否则发生异常时感知不及时，会等到全部结束或一小时才结束任务
        while ((now - start) / 1000 < CommonConstant.CONSUMER_TIME_OUT) {
            // 消费结束或存在异常，退出，否则继续观测
            if (consumerEnd.await(CommonConstant.LOOP_OBSERVATION_TIME_OUT, TimeUnit.SECONDS) || msg.isMarked()) {
                break;
            }
            now = System.currentTimeMillis();
        }
        if (!msg.isMarked()) {
            log.info("DataTransferHandler end success");
            // 正常关闭线程池
            executor.shutdown();
        } else {
            log.warn("DataTransferHandler not end in {} seconds, shutdown now", CommonConstant.CONSUMER_TIME_OUT);
            // 关闭当前正在执行的子线程，只会interrupt正在执行的线程，需要在子线程判断终止标志
            executor.shutdownNow();
            throw new RuntimeException(msg.getReference());
        }
    } catch (Exception e) {
        log.error("DataTransferHandler error", e);
        throw e;
    } finally {
        closeConnections(targetConnections);
    }
}

    /**
     * 目标预执行sql，主要是清空源表
     *
     * @param targetConnections List<Connection>
     * @param task Http2DbTask
     * @author 武兴云/72176468
     * @date 2024/2/27 15:24
     * @update_by 武兴云/72176468
     * @update_at 2024/2/27 15:24
     * @creed if you have doubt , please contact me !!!
     */
    private void executePreSQl(List<Connection> targetConnections, Http2DbTask task) {
        // 全量场景需要清空目标表
        if (!task.getIncrease()) {
            log.info("DataTransferHandler execute pre truncate sql");

            // 重命名旧表
            String tableBackUp = task.getTargetTable() + DateUtil.generateCurrentDayId(DateUtil.MMDDHHMM);
            String renameTableSql = getRenameTableSql(task, tableBackUp);
            try (PreparedStatement stmt = targetConnections.get(0).prepareStatement(renameTableSql)) {
                stmt.executeUpdate();
            } catch (Exception e) {
                log.error("truncate table error:{}", renameTableSql, e);
                throw new RuntimeException("truncate table error:" + renameTableSql + e.getMessage());
            }

            // 根据目标表创建新表，
            String createNewTableSql = getCreateNewTableSql(task, tableBackUp);
            try (PreparedStatement stmt = targetConnections.get(0).prepareStatement(createNewTableSql)) {
                stmt.executeUpdate();
            } catch (Exception e) {
                log.error("truncate table error:{}", createNewTableSql, e);
                throw new RuntimeException("truncate table error:" + createNewTableSql + e.getMessage());
            }
        }
    }

    /**
     * 获取重命名表sql
     *
     * @param task Http2DbTask
     * @param tableBackUp String
     * @return String
     * @author 武兴云/72176468
     * @date 2024/2/27 15:24
     * @update_by 武兴云/72176468
     * @update_at 2024/2/27 15:24
     * @creed if you have doubt , please contact me !!!
     */
    private static String getRenameTableSql(Http2DbTask task, String tableBackUp) {
        String renameTableSql;
        switch (task.getTargetSource().getDriver()) {
            // 默认是mysql语法
            case CommonConstant.MYSQL_DRIVER:
            default:
                renameTableSql = String.format(CommonConstant.RENAME_TABLE, task.getTargetTable(), tableBackUp);
        }
        return renameTableSql;
    }

    /**
     * 获取创建表sql
     *
     * @param task Http2DbTask
     * @param tableBackUp String
     * @return String
     * @author 武兴云/72176468
     * @date 2024/2/27 15:23
     * @update_by 武兴云/72176468
     * @update_at 2024/2/27 15:23
     * @creed if you have doubt , please contact me !!!
     */
    private static String getCreateNewTableSql(Http2DbTask task, String tableBackUp) {
        String renameTableSql;
        switch (task.getTargetSource().getDriver()) {
            // 默认是mysql语法
            case CommonConstant.MYSQL_DRIVER:
            default:
                renameTableSql = String.format(CommonConstant.CREATE_TABLE_LIKE, task.getTargetTable(), tableBackUp);
        }
        return renameTableSql;
    }

    /**
     * 关闭连接
     *
     * @param targetConnections List<Connection>
     * @author 武兴云/72176468
     * @date 2024/2/27 15:23
     * @update_by 武兴云/72176468
     * @update_at 2024/2/27 15:23
     * @creed if you have doubt , please contact me !!!
     */
    private void closeConnections(List<Connection> targetConnections) {
        for (Connection connection : targetConnections) {
            try {
                connection.close();
            } catch (SQLException e) {
                throw new RuntimeException(e);
            }
        }
    }

/**
 * 参数限制重写
 *
 * @param task Http2DbTask
 * @author 武兴云/72176468
 * @date 2024/2/27 15:23
 * @update_by 武兴云/72176468
 * @update_at 2024/2/27 15:23
 * @creed if you have doubt , please contact me !!!
 */
private void limitParam(Http2DbTask task) {
    // 部分值做限制
    Http2DbTask.HttpCollectSourceConfig collectSource = task.getCollectSource();
    Http2DbTask.HttpCollectParamConfig pageSize = collectSource.getPageSize();
    if (pageSize != null && pageSize.getSize() != null) {
        if (pageSize.getSize() > CommonConstant.HTTP_MAX_PAGE_SIZE || pageSize.getSize() < CommonConstant.NUMBER_ZERO) {
            pageSize.setSize(CommonConstant.HTTP_MAX_PAGE_SIZE);
        }
    }
    // 采集最大并发限制
    if (task.getCollectConcurrency() == null || task.getCollectConcurrency() > CommonConstant.MAX_CONCURRENCY_SIZE
            || task.getCollectConcurrency() < CommonConstant.NUMBER_ZERO) {
        task.setCollectConcurrency(CommonConstant.MAX_CONCURRENCY_SIZE);
    }
    // 最大写入批次大小
    if (task.getBatchInsertSize() > CommonConstant.MAX_BATCH_SIZE || task.getBatchInsertSize() < CommonConstant.NUMBER_ZERO) {
        task.setBatchInsertSize(CommonConstant.MAX_BATCH_SIZE);
    }
}

    /**
     * 获取链接
     *
     * @param dataSourceDO DataSourceDO
     * @return Connection
     * @author 武兴云/72176468
     * @date 2024/2/27 15:23
     * @update_by 武兴云/72176468
     * @update_at 2024/2/27 15:23
     * @creed if you have doubt , please contact me !!!
     */
    private Connection connect(DataSourceDO dataSourceDO) {
        try {
            Class.forName(dataSourceDO.getDriver());
            DriverManager.setLoginTimeout(CommonConstant.TIMEOUT_SECONDS);
            return DriverManager.getConnection(dataSourceDO.getJdbcUrl(), dataSourceDO.getUsername(), dataSourceDO.getPassword());
        } catch (Exception e) {
            throw new RuntimeException("initialize datasource connection error");
        }
    }


    /**
     * 获取查询维度列表
     *
     * @param sourceConfig Http2DbTHttpCollectSourceConfig
     * @return List<String>
     * @author 武兴云/72176468
     * @date 2024/2/27 11:53
     * @update_by 武兴云/72176468
     * @update_at 2024/2/27 11:53
     * @creed if you have doubt , please contact me !!!
     */
    private List<String> getDimensionSet(Http2DbTask.HttpCollectSourceConfig sourceConfig) {
        String set = sourceConfig.getDimension().getValue();
        if (set.startsWith(CommonConstant.REF_METHOD)) {
            // 获取方法对象
            Method method;
            try {
                method = this.getClass().getMethod(set.split(CommonConstant.REF_METHOD)[1].trim());
                return (List<String>) method.invoke(this);
            } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException e) {
                log.error("Http collector getDimensionSet error");
                throw new RuntimeException(e);
            }
        }
        return Arrays.stream(set.split(",")).map(String::trim).collect(Collectors.toList());
    }

    /**
     * 测试方法
     *
     * @return List<String>
     * @author 武兴云/72176468
     * @date 2024/2/27 11:52
     * @update_by 武兴云/72176468
     * @update_at 2024/2/27 11:52
     * @creed if you have doubt , please contact me !!!
     */
    public List<String> testGetDimensionSet() {
        return Collections.singletonList("3");
    }

    static class CustomBlockingPolicy extends ThreadPoolExecutor.AbortPolicy {
        @Override
        public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
            if (!executor.isShutdown()) {
                try {
                    // 阻塞等待队列有空闲位置
                    executor.getQueue().put(r);
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                }
            }
        }
    }
}
