package com.lagou.es.config;

import com.alibaba.otter.canal.client.CanalConnector;
import com.alibaba.otter.canal.client.CanalConnectors;
import com.alibaba.otter.canal.protocol.CanalEntry;
import com.alibaba.otter.canal.protocol.Message;
import com.lagou.es.service.impl.PositionServiceImpl;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.RestHighLevelClient;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.ApplicationListener;
import org.springframework.context.event.ContextRefreshedEvent;
import org.springframework.stereotype.Component;

import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;

import static com.lagou.es.service.impl.PositionServiceImpl.POSITION_INDEX;

/**
 * @Author: Hugh
 * @Date: 2020/11/12
 */
@Component
public class CannalClient implements ApplicationListener<ContextRefreshedEvent> {
    private  static  final Logger logger = LogManager.getLogger(PositionServiceImpl.class);

    @Autowired
    private RestHighLevelClient client;

    @Autowired
    private BulkProcessor bulkProcessor;

    private HashMap<String,String>  map = new HashMap<>();
    ArrayList<HashMap<String,String>> dataList = new ArrayList<>();

    private final static int BATCH_SIZE = 1000;

    public void afterPropertiesSet() throws Exception {
        // 创建链接
        CanalConnector connector = CanalConnectors.newSingleConnector(new InetSocketAddress("192.168.1.7", 11111), "example", "", "");
        try {
            //打开连接
            connector.connect();
            //订阅数据库表,全部表
            connector.subscribe(".*\\..*");
            //回滚到未进行ack的地方，下次fetch的时候，可以从最后一个没有ack的地方开始拿
            connector.rollback();
            while (true) {
                // 获取指定数量的数据
                Message message = connector.getWithoutAck(BATCH_SIZE);
                //获取批量ID
                long batchId = message.getId();
                //获取批量的数量
                int size = message.getEntries().size();
                //如果没有数据
                if (batchId == -1 || size == 0) {
                    try {
                        //线程休眠2秒
                        Thread.sleep(2000);
                        //休眠后统一刷掉
//                        bulkProcessor.flush();
                    } catch (InterruptedException e) {
                        e.printStackTrace();
                    }
                } else {
                    //如果有数据,处理数据
                    handleEntry(message.getEntries());
                }
                //进行 batch id 的确认。确认之后，小于等于此 batchId 的 Message 都会被确认。
                connector.ack(batchId);
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            connector.disconnect();
        }
    }

    /**
     * 打印canal server解析binlog获得的实体类信息
     */
    private void handleEntry(List<CanalEntry.Entry> entrys) {
        for (CanalEntry.Entry entry : entrys) {
            if (entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONBEGIN || entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONEND) {
                //开启/关闭事务的实体类型，跳过
                continue;
            }
            //RowChange对象，包含了一行数据变化的所有特征
            //比如isDdl 是否是ddl变更操作 sql 具体的ddl sql beforeColumns afterColumns 变更前后的数据字段等等
            CanalEntry.RowChange rowChage;
            try {
                rowChage = CanalEntry.RowChange.parseFrom(entry.getStoreValue());
            } catch (Exception e) {
                throw new RuntimeException("ERROR ## parser of eromanga-event has an error , data:" + entry.toString(), e);
            }
            //获取操作类型：insert/update/delete类型
            CanalEntry.EventType eventType = rowChage.getEventType();
            //打印Header信息
            logger.info(String.format("================》; binlog[%s:%s] , name[%s,%s] , eventType : %s",
                    entry.getHeader().getLogfileName(), entry.getHeader().getLogfileOffset(),
                    entry.getHeader().getSchemaName(), entry.getHeader().getTableName(),
                    eventType));
            //判断是否是DDL语句
            if (rowChage.getIsDdl()) {
                logger.info("================》;isDdl: true,sql:" + rowChage.getSql());
            }
            //获取RowChange对象里的每一行数据，打印出来
            for (CanalEntry.RowData rowData : rowChage.getRowDatasList()) {
                //如果是删除语句
                if (eventType == CanalEntry.EventType.DELETE) {
                    printColumn(rowData.getBeforeColumnsList());
                    //如果是新增语句
                } else if (eventType == CanalEntry.EventType.INSERT) {
                    printColumn(rowData.getAfterColumnsList());
                    saveToEs(rowData.getAfterColumnsList());
                } else {
                    //如果是更新的语句
                    //变更前的数据
//                    System.out.println("------->; before");
//                    printColumn(rowData.getBeforeColumnsList());
                    //变更后的数据
                    logger.info("------->; after");
                    printColumn(rowData.getAfterColumnsList());
                    updateToEs(rowData.getAfterColumnsList());
                }
            }
        }
    }

    private void updateToEs(List<CanalEntry.Column> colData) {
        for (CanalEntry.Column column : colData){
            //这就搞出来了一条数据
            if(column.getUpdated()) {
                logger.info("updateToEs index:{}, key:{} val:{}", POSITION_INDEX, column.getName(), column.getValue());
            }
        }

    }

    private void saveToEs(List<CanalEntry.Column> colData) {
        int  count = 0;

        count ++;
        map = new HashMap<String,String>(128);
        for (CanalEntry.Column column : colData){
            //这就搞出来了一条数据
            map.put(column.getName(),column.getValue());
        }

        dataList.add(map);

        // 每1万条 写一次   不足的批次的数据 最后一次提交处理
//        if (count % 10000 == 0){
            logger.info("mysql handle data  number:"+count);
            // 将数据添加到 bulkProcessor
            for (HashMap<String,String> hashMap2 : dataList){
                bulkProcessor.add(new IndexRequest(POSITION_INDEX).source(hashMap2));
            }
        bulkProcessor.flush();
            // 每提交一次 清空 map 和  dataList
            map.clear();
            dataList.clear();
//        }

    }

    private void printColumn(List<CanalEntry.Column> columns) {
        String collect = columns.stream().filter(CanalEntry.Column::getUpdated).map(s -> s.getName() + "："+s.getValue() + " ").collect(Collectors.joining(","));
        logger.info("printColumn" + collect);
    }

    @Override
    public void onApplicationEvent(ContextRefreshedEvent contextRefreshedEvent) {
        ThreadPoolExecutor executor = new ThreadPoolExecutor(1, 2, 200, TimeUnit.MILLISECONDS,
                new ArrayBlockingQueue<>(500));
        executor.execute(new Runnable() {
            @Override
            public void run() {
                try {
                    afterPropertiesSet();
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        });
    }
}
