package cn.ddiancan.dm.service;

import cn.ddiancan.dm.config.ConfigProperties;
import cn.ddiancan.dm.domain.DataMigrationDTO;
import cn.ddiancan.dm.domain.DataMigrationLogDTO;
import cn.ddiancan.dm.mulidatasource.MultiDBS;
import com.alibaba.fastjson.JSON;
import lombok.SneakyThrows;
import org.apache.commons.collections4.MapUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.BeanUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.ApplicationContextException;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.core.StringRedisTemplate;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
import org.springframework.stereotype.Service;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.TransactionDefinition;
import org.springframework.transaction.TransactionStatus;
import org.springframework.transaction.support.DefaultTransactionDefinition;
import org.springframework.util.CollectionUtils;

import java.time.LocalDate;
import java.time.format.DateTimeFormatter;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;

@Service
public class DataMigrationExecuteService {
    private static final Logger logger = LoggerFactory.getLogger(DataMigrationExecuteService.class);
    @Autowired
    private StringRedisTemplate stringRedisTemplate;
    private static final String SPECIAL_DATA_BACKUP = "special_data_backup";
    @Autowired
    private DataMigrationService dataMigrationService;
    @Autowired
    @Qualifier("ddcDataBackup")
    private ThreadPoolTaskExecutor threadPoolTaskExecutor;
    @Autowired
    private PlatformTransactionManager platformTransactionManager;
    @Autowired
    private DataMigrationExecuteService dataMigrationExecuteService;
    @Autowired
    private ConfigProperties configProperties;

    @Value("${ddc.data.backup.pagesize:1000}")
    private int pageSize;
    @Qualifier("redisTemplate")
    @Autowired
    private RedisTemplate redisTemplate;

    public String executeDataMigration(DataMigrationDTO dataMigrationDTO) {
        Optional<String> checkDate = checkDate(dataMigrationDTO.getBeginDate(), dataMigrationDTO.getEndDate());
        if (checkDate.isPresent()) {
            return checkDate.get();
        }
        Long beginTime = (Long) redisTemplate.opsForValue().get(dataMigrationDTO.getTableName() + "_beginTime");
        if (Objects.isNull(beginTime)) beginTime = System.currentTimeMillis();
        redisTemplate.opsForValue().set(dataMigrationDTO.getTableName() + "_beginTime", beginTime);
        dataMigrationDTO.setConditionDateColumnName(configProperties.getConditionColumn().get(dataMigrationDTO.getTableName()));
        dataMigrationDTO.setRefColumnName(configProperties.getRefColumn().get(dataMigrationDTO.getTableName()));
        dataMigrationDTO.setSubTableName(configProperties.getTableMap().get(dataMigrationDTO.getTableName()));
        // 进行分布式锁，因为暴露过api，禁止反复执行大数据操作
        if (tryLock(dataMigrationDTO)) {
            redisTemplate.opsForValue().set(dataMigrationDTO.getTableName() + "_beginTime", System.currentTimeMillis());
            // 执行数据操作
            try {
                dataMigrationDTO.setType("count");
                Map<String, String> sqlMap = new ConcurrentHashMap<>();
                Integer total = dataMigrationService.searchByConditionCount(dataMigrationDTO, sqlMap);
                if (total <= 1000) releaseLock(dataMigrationDTO);
                int pages = (int) Math.ceil((double) total / pageSize);
                dataMigrationDTO.setType(null);
                for (int i = pages - 1; i >= 0; i--) {
                    int startIndex = i * pageSize;
                    threadPoolTaskExecutor.execute(() -> extracted(dataMigrationDTO, startIndex, sqlMap));
                }
            } catch (Exception e) {
                logger.error(e.getMessage(), e);
            }
            return "Success";
        }
        Long endTime = System.currentTimeMillis();
        Long expireTime = (10 * 60 * 1000 - (endTime - Long.valueOf(String.valueOf(redisTemplate.opsForValue().get(dataMigrationDTO.getTableName() + "_beginTime"))))) / 1000;
        return String.format("任务执行中，请%s秒后重试。。。", expireTime);
    }

    private Optional<String> checkDate(String startDate, String endDate) {
        DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd");
        LocalDate starLocaltDate = LocalDate.parse(startDate, dateTimeFormatter);
        LocalDate endLocaltDate = LocalDate.parse(endDate, dateTimeFormatter);
        LocalDate localDate = LocalDate.now();
        LocalDate threeMonthBefore = localDate.minusMonths(3);
        if (endLocaltDate.isAfter(threeMonthBefore)) {
            return Optional.of("三个月内数据不允许迁移");
        }
        if (endLocaltDate.isBefore(starLocaltDate)) {
            return Optional.of("起始日期不允许大于结束日期");
        }
        return Optional.empty();
    }

    @SneakyThrows
    private boolean tryLock(DataMigrationDTO dataMigrationDTO) {
        String key = SPECIAL_DATA_BACKUP + "_" + dataMigrationDTO.getTableName();
        return Boolean.TRUE.equals(stringRedisTemplate.opsForValue().setIfAbsent(key, key, 10L, TimeUnit.MINUTES));
    }

    private void releaseLock(DataMigrationDTO dataMigrationDTO) {
        stringRedisTemplate.delete(SPECIAL_DATA_BACKUP + "_" + dataMigrationDTO.getTableName());
    }

    private void extracted(DataMigrationDTO dataMigrationDTO, int startIndex, Map<String, String> sqlMap) {
        DataMigrationDTO executeDto = new DataMigrationDTO();
        synchronized (executeDto) {
            BeanUtils.copyProperties(dataMigrationDTO, executeDto);
            executeDto.setStartIndex(startIndex);
            Map<String, DataMigrationLogDTO> logDTOMap = new ConcurrentHashMap<>();
            logDTOMap.put(executeDto.getTableName(), fetchLogVO(executeDto, null));
            try {
                extracted(executeDto, sqlMap, logDTOMap);
            } catch (Exception e) {
                logDTOMap.entrySet().stream().forEach(entity -> {
                    DataMigrationLogDTO value = entity.getValue();
                    value.setStatus(5);
                    value.setMessage(e.getMessage());
                    value.setFinishTime(new Date());
                });
                logger.error("执行迁移出错：{}", e.getMessage(), e);
            } finally {
                dataMigrationService.updateDataMigrationLog(logDTOMap.values().stream().collect(Collectors.toList()));
            }
        }
    }

    private void extracted(DataMigrationDTO dataMigrationDTO, Map<String, String> sqlMap, Map<String, DataMigrationLogDTO> logDTOMap) {
        synchronized (dataMigrationDTO) {
            try {
                // 主表迁移
                Map<String, Object[]> pendingExecuteInsertMap = new HashMap<>();
                Map<String, List<Map<String, Object>>> pendingExecuteDeletetMap = new HashMap<>();
                DataMigrationLogDTO masterTableLogVo = logDTOMap.get(dataMigrationDTO.getTableName());
                masterTableLogVo.setBatchNo(masterTableLogVo.getBatchNo());
                dataMigrationService.addDataMigrationLog(masterTableLogVo);
                List<Map<String, Object>> singleBatchData = dataMigrationService.searchByCondition(dataMigrationDTO, sqlMap);
                if (CollectionUtils.isEmpty(singleBatchData)) return;
                String executeSql = dataMigrationService.fetchInsertExecuteSql(dataMigrationDTO.getTableName(), singleBatchData);
                masterTableLogVo.setExecuteSql(executeSql);
                masterTableLogVo.setStatus(2);
                dataMigrationService.updateDataMigrationLog(Collections.singletonList(masterTableLogVo));
                // 放入执行sql与迁移数据，方便后期统一事务
                pendingExecuteInsertMap.put(dataMigrationDTO.getTableName(), new Object[]{executeSql, singleBatchData});
                pendingExecuteDeletetMap.put(dataMigrationDTO.getTableName(), singleBatchData);
                // 处理子表执行sql及迁移数据
                if (!CollectionUtils.isEmpty(dataMigrationDTO.getSubTableName())) {
                    for (String subTableName : dataMigrationDTO.getSubTableName()) {
                        DataMigrationDTO subDataMigrationDTO = new DataMigrationDTO();
                        subDataMigrationDTO.setTableName(subTableName);
                        subDataMigrationDTO.setRefColumnName(dataMigrationDTO.getRefColumnName());
                        subDataMigrationDTO.setSuperTableUniqueCodes(singleBatchData.stream().map(map -> MapUtils.getString(map, MapUtils.getString(dataMigrationDTO.getRefColumnName(), "master"))).collect(Collectors.toList()));
                        DataMigrationLogDTO dataMigrationLogDTO = fetchLogVO(subDataMigrationDTO, null);
                        dataMigrationLogDTO.setBatchNo(masterTableLogVo.getBatchNo());
                        logDTOMap.put(subTableName, dataMigrationLogDTO);
                        dataMigrationService.addDataMigrationLog(dataMigrationLogDTO);
                        List<Map<String, Object>> subDatas = dataMigrationService.searchByCondition(subDataMigrationDTO, sqlMap);
                        if (CollectionUtils.isEmpty(subDatas)) continue;
                        String subExecuteSql = dataMigrationService.fetchInsertExecuteSql(subTableName, subDatas);
                        dataMigrationLogDTO.setExecuteSql(subExecuteSql);
                        dataMigrationLogDTO.setStatus(2);
                        dataMigrationService.updateDataMigrationLog(Collections.singletonList(dataMigrationLogDTO));
                        if (!CollectionUtils.isEmpty(subDatas)) {
                            pendingExecuteInsertMap.put(subTableName, new Object[]{subExecuteSql, subDatas});
                            pendingExecuteDeletetMap.put(subTableName, subDatas);
                        }
                    }
                }
                // 备份成功后，原库数据删除并提交整个事务
                if (dataMigrationExecuteService.executeSlaveTransaction(pendingExecuteInsertMap, logDTOMap)) {
                    dataMigrationExecuteService.executeMasterTransaction(pendingExecuteDeletetMap);
                    logger.info("迁移已完成，迁移结果成功，当前批次为：{}", masterTableLogVo.getBatchNo());
                }
            } catch (Exception e) {
                logger.error("封装执行sql出错：{}", e.getMessage(), e);
            }
        }
    }

    private DataMigrationLogDTO fetchLogVO(DataMigrationDTO dataMigrationDTO, String executeSql) {
        DataMigrationLogDTO dataMigrationLogDTO = new DataMigrationLogDTO();
        dataMigrationLogDTO.setExecuteSql(executeSql);
        dataMigrationLogDTO.setStatus(1);
        dataMigrationLogDTO.setExecuteTime(new Date());
        dataMigrationLogDTO.setBatchNo(dataMigrationLogDTO.generateBatchNo());
        dataMigrationLogDTO.setTableName(dataMigrationDTO.getTableName());
        dataMigrationLogDTO.setParameters(JSON.toJSONString(dataMigrationDTO.getSuperTableUniqueCodes()));
        return dataMigrationLogDTO;
    }

    @MultiDBS("Slave")
    public boolean executeSlaveTransaction(Map<String, Object[]> pendingExecuteMap, Map<String, DataMigrationLogDTO> logDTOMap) {
        boolean flag = false;
        if (!CollectionUtils.isEmpty(pendingExecuteMap)) {
            DefaultTransactionDefinition defaultTransactionDefinition = new DefaultTransactionDefinition();
            defaultTransactionDefinition.setPropagationBehavior(TransactionDefinition.PROPAGATION_REQUIRES_NEW);
            TransactionStatus transaction = platformTransactionManager.getTransaction(defaultTransactionDefinition);
            try {
                for (Map.Entry<String, Object[]> stringListEntry : pendingExecuteMap.entrySet()) {
                    Object[] value = stringListEntry.getValue();
                    String executeSql = (String) value[0];
                    List<Map<String, Object>> records = (List<Map<String, Object>>) stringListEntry.getValue()[1];
                    Integer insertCount = dataMigrationService.backHistoryDatasToSlave(records, executeSql);
                    DataMigrationLogDTO dataMigrationLogDTO = logDTOMap.get(stringListEntry.getKey());
                    // 数据插入于当前查询数据条数不一致，则有失败记录或插入不成功，回滚
                    if (records.size() != insertCount && insertCount == 0) {
                        dataMigrationLogDTO.setStatus(4);
                        dataMigrationLogDTO.setParameters(JSON.toJSONString(records));
                        dataMigrationLogDTO.setMessage("已备份数据与实际查询数据不一致,jdbc插入时有已存在数据更新,迁移数据正常备份，但不会对此批次数据进行删除！");
                        dataMigrationLogDTO.setFinishTime(new Date());
                        logger.warn("迁移已完成，出现查询插入结果不一致，数据已备份，源数据保留不删除，批次号：{}，应迁移数据：{}，实际备份数据：{},执行sql为：{},入参为：{}", dataMigrationLogDTO.getBatchNo(), records.size(), insertCount, executeSql, JSON.toJSONString(records));
                    } else {
                        dataMigrationLogDTO.setStatus(3);
                        dataMigrationLogDTO.setMessage("success");
                        dataMigrationLogDTO.setFinishTime(new Date());
                    }
                }
                if (!transaction.isCompleted()) {
                    platformTransactionManager.commit(transaction);
                    flag = true;
                }
            } catch (Exception e) {
                logger.error("执行事务操作失败：{}", e.getMessage(), e);
                logDTOMap.entrySet().stream().forEach(entity -> {
                    DataMigrationLogDTO value = entity.getValue();
                    value.setStatus(5);
                    value.setMessage(e.getMessage());
                    value.setFinishTime(new Date());
                });
                platformTransactionManager.rollback(transaction);
            }
        }
        return flag;
    }

    @MultiDBS("Master")
    public void executeMasterTransaction(Map<String, List<Map<String, Object>>> pendingExecuteMap) {
        if (!CollectionUtils.isEmpty(pendingExecuteMap)) {
            DefaultTransactionDefinition defaultTransactionDefinition = new DefaultTransactionDefinition();
            defaultTransactionDefinition.setPropagationBehavior(TransactionDefinition.PROPAGATION_REQUIRES_NEW);
            TransactionStatus transaction = platformTransactionManager.getTransaction(defaultTransactionDefinition);
            try {
                pendingExecuteMap.entrySet().forEach(stringListEntry -> {
                    List<Map<String, Object>> records = stringListEntry.getValue();
                    dataMigrationService.deleteHistoryDatas(stringListEntry.getKey(), records);
                });
                if (!transaction.isCompleted()) {
                    platformTransactionManager.commit(transaction);
                }
            } catch (Exception e) {
                platformTransactionManager.rollback(transaction);
            }
        }
    }
}
