package cn.erperp.cdc.maxwell.config;


import cn.erperp.cdc.maxwell.dao.DataSourceDao;
import cn.erperp.cdc.maxwell.entity.BusEventsApiEntity;
import cn.erperp.cdc.maxwell.entity.BusEventsApiWaitAckEntity;
import cn.erperp.cdc.maxwell.entity.DataSourceEntity;
import cn.erperp.cdc.maxwell.msg.MQConfig;
import cn.erperp.cdc.maxwell.msg.MessageSenderKafka;
import cn.erperp.cdc.maxwell.msg.MessageSenderMns;
import cn.erperp.cdc.maxwell.rowmapper.MessageRowMapper;
import cn.erperp.cdc.maxwell.rowmapper.MessageWaitAckRowMapper;
import com.alibaba.druid.pool.DruidDataSource;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.BeanUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Configuration;
import org.springframework.dao.DataAccessException;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.util.StringUtils;

import javax.annotation.PostConstruct;
import java.io.IOException;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;

/**
 * 需要连接的外部数据源
 */
@Configuration
@Slf4j
public class ExternalDataSourceConfig {

    @Autowired
    DataSourceDao dataSourceDao;
    @Autowired
    MessageSenderMns messageSenderMnsl;
    @Autowired
    MessageSenderKafka messageSenderKafka;

    /**
     * 全局维护的数据源,定时检测链接有效性
     */
    public Map<String, DruidDataSource> dataSourceMap = new ConcurrentHashMap();

    /**
     * 到各种数据源拉取待处理消息
     */
    ScheduledExecutorService pullingMessageExecutor = Executors.newScheduledThreadPool(5);
    /**
     * 重新发送没有确认发送到MQ的消息
     */
    ScheduledExecutorService waitMQExecutorService = Executors.newScheduledThreadPool(1);

    @PostConstruct
    public void constructDataSource(){
        Iterable<DataSourceEntity> dataSourceEntities = dataSourceDao.findAll();
        for (DataSourceEntity entity : dataSourceEntities) {
            DruidDataSource druidDataSource = new DruidDataSource();
            druidDataSource.setUrl(entity.getUrl());
            druidDataSource.setUsername(entity.getUser());
            druidDataSource.setPassword(entity.getPasswd());
            druidDataSource.setKeepAlive(true);
            druidDataSource.setInitialSize(1);

            //初始化数据源
            try {
                druidDataSource.init();
            } catch (SQLException e) {
                e.printStackTrace();
            }

            dataSourceMap.put(entity.getName(),druidDataSource);
        }


        pullingMessage();

        reSendMQTask();
    }

    private void reSendMQTask() {
        waitMQExecutorService.scheduleAtFixedRate(new Runnable() {
            @Override
            public void run() {
                ObjectMapper objectMapper = new ObjectMapper();
                Collection<DruidDataSource> values = dataSourceMap.values();
                for (DruidDataSource dataSource : values) {
                    log.info("url: {}",dataSource.getUrl());
                    JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
                    log.info("获取没有发送MQ的消息start");
                    List<BusEventsApiWaitAckEntity> busEventsApiEntityList = new ArrayList<BusEventsApiWaitAckEntity>();
                    try {
                        busEventsApiEntityList = jdbcTemplate.query("select * from outbox_message_wait_ack where processing_state = 'waitMQ' order by ID asc limit 100", new MessageWaitAckRowMapper());
                    } catch (DataAccessException e) {
                        log.error(e.getMessage(),e);
                    }
                    log.info("获取没有发送MQ的消息end size: {}",busEventsApiEntityList.size());
                    for (BusEventsApiWaitAckEntity busEventsApiWaitAckEntity : busEventsApiEntityList) {
                        try {
                            String mqMessageId = null;
                            MQConfig mqConfig = objectMapper.readValue(busEventsApiWaitAckEntity.getMqJson(), MQConfig.class);
                            switch (busEventsApiWaitAckEntity.getMqType()){
                                case "mns":{
                                    mqMessageId = messageSenderMnsl.send(mqConfig.getQueueName(), busEventsApiWaitAckEntity.getEventJson(),mqConfig);
                                    break;
                                }
                                case "kafka":{
                                    mqMessageId = messageSenderKafka.send(mqConfig.getQueueName(), busEventsApiWaitAckEntity.getEventJson(),mqConfig);
                                    break;
                                }
                                default:{
                                    log.error("unknown mq type: {}",busEventsApiWaitAckEntity.getMqType());
                                }
                            }
                            if (!StringUtils.isEmpty(mqMessageId)){
                                busEventsApiWaitAckEntity.setProcessingState("waitAck");
                                //发送MQ成功
                                String updateSql = "update outbox_message_wait_ack set processing_state=? where id=?";
                                int waitAckCount = jdbcTemplate.update(updateSql, "waitAck", busEventsApiWaitAckEntity.getId());
                            }else{
                                log.error("重新发送mq失败 {}");
                            }
                        } catch (IOException e) {
                            log.error(e.getMessage(),e);
                        }catch (Exception e){
                            log.error(e.getMessage(),e);
                        }

                    }
                }
            }
        },1000*3,2000,TimeUnit.MILLISECONDS);
    }

    private void pullingMessage() {
        /**
         * 100毫秒-请求一次去拉去一次消息,投递到对于的MQ，
         * 1 如果投递成功，则把消息放入待确认
         * 2 如果投递失败，上报监控报警,失败投递次数
         */
        pullingMessageExecutor.scheduleAtFixedRate(new Runnable() {
            @Override
            public void run() {
                ObjectMapper objectMapper = new ObjectMapper();
                Collection<DruidDataSource> values = dataSourceMap.values();
                for (DruidDataSource dataSource : values) {
                    log.info("url: {}",dataSource.getUrl());
                    JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
                    List<BusEventsApiEntity> busEventsApiEntityList = jdbcTemplate.query("select * from outbox_message order by ID asc limit 100", new MessageRowMapper());
                    if (busEventsApiEntityList.size() == 0){
                        continue;
                    }
                    for (BusEventsApiEntity busEventsApiEntity : busEventsApiEntityList) {
                        //TODO 投递到MQ中间件, 数据库和MQ中间件也有网络故障问题,用两阶段提交解决? ...
                        //保存

                        try {
                            /**
                             * 1 放入bus_events_api_wait_ack中，消息为 MQ发送状态=未确认，（有可能消息没有发送成功,或者成功返回路途失败），
                             * 需要重新投递到MQ，消息处理器那边，需要幂等处理，因为这里 有可能重试
                             *
                             * 2 调用MQ发送message,
                             *      2.1 收到反馈，则把消息状态=待确认
                             *      2.2 否则都是失败,需要重试，如果报错，立马报警
                             *
                             * 3 消息消费端ack,把消息放入历史库
                             */
                            BusEventsApiWaitAckEntity target = new BusEventsApiWaitAckEntity();
                            BeanUtils.copyProperties(busEventsApiEntity,target);
                            target.setProcessingState("waitMQ");
                            String sql = "insert into outbox_message_wait_ack(" +
                                    "id," +
                                    "mq_type," +
                                    "mq_json," +
                                    "class_name," +
                                    "event_json," +
                                    "user_token," +
                                    "created_date," +
                                    "creating_owner," +
                                    "processing_owner," +
                                    "processing_available_date," +
                                    "processing_state," +
                                    "error_count," +
                                    "search_key1," +
                                    "search_key2" +
                                    ") value (?,?,?,?,?,?,?,?,?,?,?,?,?,?)";
                            int step1_move_to_ack_table_count = jdbcTemplate.update(sql,
                                    target.getId(),
                                    target.getMqType(),
                                    target.getMqJson(),
                                    target.getClassName(),
                                    target.getEventJson(),
                                    target.getUserToken(),
                                    target.getCreatedDate(),
                                    target.getCreatingOwner(),
                                    target.getProcessingOwner(),
                                    target.getProcessingAvailableDate(),
                                    target.getProcessingState(),
                                    target.getErrorCount(),
                                    target.getSearchKey1(),
                                    target.getSearchKey2()
                            );
                            if (step1_move_to_ack_table_count > 0){
                                String messageId = null;
                                MQConfig mqConfig = objectMapper.readValue(busEventsApiEntity.getMqJson(), MQConfig.class);
                                switch (busEventsApiEntity.getMqType()){
                                    case "mns":{
                                        /**
                                         * 发送了消息，消费端马上，就收到了,还没有等 Duplicate entry异常出现, 重新保存，导致不会出现异常，重复发送了消息
                                         *
                                         * 1 可以在消费端，那边处理，成功ack后，只是标记 为业务处理成功，由管理平台这边，定时删除，这样
                                         * 备注： mq发送成功，马上删除，不要等下一个周期再删除，可以一定程度上避免，重复投递
                                         */
                                        messageId = messageSenderMnsl.send(mqConfig.getQueueName(), busEventsApiEntity.getEventJson(),mqConfig);
                                        break;
                                    }
                                    case "kafka":{
                                        messageId = messageSenderKafka.send(mqConfig.getQueueName(), busEventsApiEntity.getEventJson(),mqConfig);
                                        break;
                                    }
                                    default:{
                                        log.error("unknown mq type: {}",busEventsApiEntity.getMqType());
                                    }
                                }
                                if (!StringUtils.isEmpty(messageId)){
                                    target.setProcessingState("waitAck");
                                    //发送MQ成功
                                    String updateSql = "update outbox_message_wait_ack set processing_state=? where id=?";
                                    int waitAckCount = jdbcTemplate.update(updateSql, "waitAck", target.getId());
                                    //删除pulling库消息，避免重复发送消息
                                    String deletePollingMsgSql = "delete from outbox_message  where id=?";
                                    int deletePollingMsgCount = jdbcTemplate.update(deletePollingMsgSql, target.getId());
                                }else{
                                    log.error("发送mq失败 {}");
                                }
                            }


                        } catch (IOException e) {
                            log.error(e.getMessage(),e);
                        }catch (Exception e){
                            if (e instanceof DuplicateKeyException){
                                String message = e.getCause().getMessage();
                                /**
                                 * 重复的entry消息,就不要一直重复保存了，直接删除 待发送消息
                                 * Duplicate entry '1622796119640006657' for key 'PRIMARY'
                                 */
                                if (message.endsWith("for key 'PRIMARY'")){
                                    String deleteSql = "delete from outbox_message where id=?";
                                    int update = jdbcTemplate.update(deleteSql, busEventsApiEntity.getId());
                                    if (log.isDebugEnabled()){
                                        log.debug("删除待发送消息: 以及投递到wait_ack消息列表了 {}",update > 0 ? true:false);
                                    }
                                }
                            }
                            log.error(e.getMessage(),e);
                        }

                    }
                }
            }
        },1000*10,500, TimeUnit.MILLISECONDS);
    }
}
