package net.xdclass.shopproject.canal;

import com.alibaba.fastjson.JSONObject;
import com.alibaba.otter.canal.client.CanalConnector;
import com.alibaba.otter.canal.client.CanalConnectors;
import com.alibaba.otter.canal.protocol.CanalEntry;
import com.alibaba.otter.canal.protocol.Message;
import com.alibaba.otter.canal.protocol.exception.CanalClientException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.ApplicationArguments;
import org.springframework.boot.ApplicationRunner;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Component;

import java.net.InetSocketAddress;
import java.util.List;

/**
 * ApplicationRunner接口用于在spring boot项目启动后运行一些代码
 */
@Component
public class CanalRedisConsumer implements ApplicationRunner {

    @Autowired
    private RedisTemplate redisTemplate;


    @Override
    public void run(ApplicationArguments args) throws Exception {
        System.out.println("ApplicationRunner 被调用了");

        CanalConnector canalConnector = CanalConnectors.newSingleConnector(new InetSocketAddress("120.24.7.58",11111),"example","","");
        try {
            while (true) {
                try {
                    canalConnector.connect();
                    break;
                } catch (CanalClientException e) {
                    e.printStackTrace();
                    Thread.sleep(1000);
                }
            }

            // 订阅数据库表，默认监听所有的数据库、表、等同于：.*\\..*
            canalConnector.subscribe(".*\\..*");
            // 监听指定的数据库表
//            canalConnector.subscribe("shop.product");
            // 回滚到上一次的 batchId，取消已经消费过的日志
            canalConnector.rollback();

            // 持续监听 Canal Server 推送的数据，并使用自定义的 CanalEventDownStreamHandler 处理器消费数据
            while (true) {
                // 允许指定batchSize，一次可以获取多条，每次返回的对象为Message，包含内容为：
                // batch id 唯一标识
                // entries 具体的数据对象
                Message message = canalConnector.getWithoutAck(100);
                long batchId = message.getId();
                // 如果没有新数据，则暂停固定时间后继续获取
                if (batchId == -1 || message.getEntries().isEmpty()) {
                    try {
                        System.out.println("No new data，waiting for push");
                        Thread.sleep(1000);
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                } else {
                    // 解析 binlog 数据并输出详细信息
                    for (CanalEntry.Entry entry : message.getEntries()) {
                        if (entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONBEGIN || entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONEND) {
                            continue;
                        }
                        CanalEntry.RowChange rowChange = null;
                        try {
                            rowChange = CanalEntry.RowChange.parseFrom(entry.getStoreValue());
                        } catch (Exception e) {
                            e.printStackTrace();
                            continue;
                        }
                        String schemaName = entry.getHeader().getSchemaName();
                        String table = entry.getHeader().getTableName();
                        CanalEntry.EventType eventType = rowChange.getEventType();
                        System.out.println(String.format("Binlog[%s:%s] schema[%s] table[%s] eventType[%s]",
                                entry.getHeader().getLogfileName(), entry.getHeader().getLogfileOffset(),
                                schemaName, table, eventType));
                        for (CanalEntry.RowData rowData : rowChange.getRowDatasList()) {
                            // 根据事件类型，输出变更前/后的列数据
                            if (eventType == CanalEntry.EventType.DELETE) {
                                deleteData(schemaName, table, rowData);
                            } else if (eventType == CanalEntry.EventType.INSERT) {
                                insertOrUpdateData(schemaName, table, rowData);
                            } else {
                                insertOrUpdateData(schemaName, table, rowData);
                            }
                        }
                    }
                    // 确认消费成功
                    canalConnector.ack(batchId);

                    // 处理失败，回滚数据
//                    canalConnector.rollback(batchId);
                }
            }

        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            canalConnector.disconnect();
        }
    }

    private void insertOrUpdateData(String schemaName, String table, CanalEntry.RowData rowData) {
        List<CanalEntry.Column> columns = rowData.getAfterColumnsList();
        JSONObject jsonObject = new JSONObject();
        for (CanalEntry.Column column : columns) {
            jsonObject.put(column.getName(), column.getValue());
        }
        System.out.println("新增或更新 redis数据：" + jsonObject.toJSONString());
        String key = table + ":" + columns.get(0).getValue();
        System.out.println("insertOrUpdate key：" + key);
        redisTemplate.opsForValue().set(key, jsonObject);
    }

    private void deleteData(String schemaName, String table, CanalEntry.RowData rowData) {
        List<CanalEntry.Column> columns = rowData.getBeforeColumnsList();
        JSONObject jsonObject = new JSONObject();
        for (CanalEntry.Column column : columns) {
            jsonObject.put(column.getName(), column.getValue());
        }
        System.out.println("删除redis数据：" + jsonObject.toJSONString());
        String key = table + ":" + columns.get(0).getValue();
        System.out.println("delete key：" + key);
        redisTemplate.delete(key);

    }

    private static void printColumn(List<CanalEntry.Column> columns) {
        for (CanalEntry.Column column : columns) {
            System.out.println(column.getName() + ":" + column.getValue() + "    update = " + column.getUpdated());
        }
    }
}
