package drds.data_propagate.parse.table_meta_data;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastsql.sql.repository.Schema;
import drds.data_propagate.driver.packets.server.ResultSetPacket;
import drds.data_propagate.filter.CanalEventFilter;
import drds.data_propagate.parse.ConnectionImpl;
import drds.data_propagate.parse.exception.ParseException;
import drds.data_propagate.parse.table_meta_data.dao.MetaHistoryDAO;
import drds.data_propagate.parse.table_meta_data.dao.MetaHistoryDO;
import drds.data_propagate.parse.table_meta_data.dao.MetaSnapshotDAO;
import drds.data_propagate.parse.table_meta_data.dao.MetaSnapshotDO;
import drds.data_propagate.parse.table_meta_data.ddl.DdlResult;
import drds.data_propagate.parse.table_meta_data.ddl.DruidDdlParser;
import drds.propagate.protocol.position.EntryPosition;
import org.apache.commons.beanutils.BeanUtils;
import org.apache.commons.lang.ObjectUtils;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;

import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.regex.Pattern;

/**
 * 基于db远程管理 see internal class: CanalTableMeta , ConsoleTableMetaTSDB
 */
public class DatabaseTableMetaDataStore implements TableMetaDataStore {

    public static final EntryPosition INIT_POSITION = new EntryPosition("0", 0L, -2L, -1L);
    private static Logger logger = LoggerFactory.getLogger(DatabaseTableMetaDataStore.class);
    private static Pattern pattern = Pattern.compile("Duplicate entry '.*' for key '*'");
    private static Pattern h2Pattern = Pattern.compile("Unique index or primary key violation");
    private static ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {

        @Override
        public Thread newThread(Runnable r) {
            Thread thread = new Thread(r,
                    "[scheduler-table-meta-snapshot]");
            thread.setDaemon(true);
            return thread;
        }
    });
    private ReadWriteLock lock = new ReentrantReadWriteLock();
    private AtomicBoolean initialized = new AtomicBoolean(false);
    private String destination;
    private MemoryTableMetaDataStore memoryTableMetaDataStore;
    private volatile ConnectionImpl connection;                                                                 // 查询meta信息的链接
    private CanalEventFilter filter;
    private CanalEventFilter blackFilter;
    private EntryPosition lastPosition;
    private boolean hasNewDdl;
    private MetaHistoryDAO metaHistoryDAO;
    private MetaSnapshotDAO metaSnapshotDAO;
    private int snapshotInterval = 24;
    private int snapshotExpire = 360;
    private ScheduledFuture<?> scheduleSnapshotFuture;

    public DatabaseTableMetaDataStore() {

    }

    @Override
    public boolean init(final String destination) {
        if (initialized.compareAndSet(false, true)) {
            this.destination = destination;
            this.memoryTableMetaDataStore = new MemoryTableMetaDataStore();

            // 24小时生成一份snapshot
            if (snapshotInterval > 0) {
                scheduleSnapshotFuture = scheduler.scheduleWithFixedDelay(new Runnable() {

                    @Override
                    public void run() {
                        boolean applyResult = false;
                        try {
                            MDC.put("destination", destination);
                            applyResult = applySnapshotToDB(lastPosition, false);
                        } catch (Throwable e) {
                            logger.error("scheudle applySnapshotToDB faield", e);
                        }

                        try {
                            MDC.put("destination", destination);
                            if (applyResult) {
                                snapshotExpire((int) TimeUnit.HOURS.toSeconds(snapshotExpire));
                            }
                        } catch (Throwable e) {
                            logger.error("scheudle snapshotExpire faield", e);
                        }
                    }
                }, snapshotInterval, snapshotInterval, TimeUnit.HOURS);
            }
        }
        return true;
    }

    @Override
    public void destory() {
        if (memoryTableMetaDataStore != null) {
            memoryTableMetaDataStore.destory();
        }

        if (connection != null) {
            try {
                connection.disconnect();
            } catch (IOException e) {
                logger.error("ERROR # disconnect meta connection for address:{}", connection.getConnector()
                        .getAddress(), e);
            }
        }

        if (scheduleSnapshotFuture != null) {
            scheduleSnapshotFuture.cancel(false);
        }
    }

    @Override
    public TableMetaData find(String schemaName, String tableName) {
        lock.readLock().lock();
        try {
            return memoryTableMetaDataStore.find(schemaName, tableName);
        } finally {
            lock.readLock().unlock();
        }
    }

    @Override
    public boolean apply(EntryPosition entryPosition, String schemaName, String ddl, String extra) {
        // 首先记录到内存结构
        lock.writeLock().lock();
        try {
            if (memoryTableMetaDataStore.apply(entryPosition, schemaName, ddl, extra)) {
                this.lastPosition = entryPosition;
                this.hasNewDdl = true;
                // 同步每次变更给远程做历史记录
                return applyHistoryToDB(entryPosition, schemaName, ddl, extra);
            } else {
                throw new RuntimeException("apply to memory is failed");
            }
        } finally {
            lock.writeLock().unlock();
        }
    }

    @Override
    public boolean rollback(EntryPosition entryPosition) {
        // 每次rollback需要重新构建一次memory data
        this.memoryTableMetaDataStore = new MemoryTableMetaDataStore();
        boolean flag = false;
        EntryPosition snapshotPosition = buildMemFromSnapshot(entryPosition);
        if (snapshotPosition != null) {
            applyHistoryOnMemory(snapshotPosition, entryPosition);
            flag = true;
        }

        if (!flag) {
            // 如果没有任何数据，则为初始化状态，全量dump一份关注的表
            if (dumpTableMeta(connection, filter)) {
                // 记录一下snapshot结果,方便快速恢复
                flag = applySnapshotToDB(INIT_POSITION, true);
            }
        }

        return flag;
    }

    @Override
    public Map<String, String> snapshot() {
        return memoryTableMetaDataStore.snapshot();
    }

    /**
     * 初始化的时候dump一下表结构
     */
    private boolean dumpTableMeta(ConnectionImpl connection, final CanalEventFilter filter) {
        try {
            ResultSetPacket packet = connection.query("show databases");
            List<String> schemas = new ArrayList<String>();
            for (String schema : packet.getColumnNameList()) {
                schemas.add(schema);
            }

            for (String schema : schemas) {
                // filter views
                packet = connection.query("show full tables from `" + schema + "` where Table_type = 'BASE TABLE'");
                List<String> tables = new ArrayList<String>();
                for (String table : packet.getColumnNameList()) {
                    if ("BASE TABLE".equalsIgnoreCase(table)) {
                        continue;
                    }
                    String fullName = schema + "." + table;
                    if (blackFilter == null || !blackFilter.filter(fullName)) {
                        if (filter == null || filter.filter(fullName)) {
                            tables.add(table);
                        }
                    }
                }

                if (tables.isEmpty()) {
                    continue;
                }

                StringBuilder sql = new StringBuilder();
                for (String table : tables) {
                    sql.append("show create table `" + schema + "`.`" + table + "`;");
                }

                List<ResultSetPacket> packets = connection.querys(sql.toString());
                for (ResultSetPacket onePacket : packets) {
                    if (onePacket.getColumnNameList().size() > 1) {
                        String oneTableCreateSql = onePacket.getColumnNameList().get(1);
                        memoryTableMetaDataStore.apply(INIT_POSITION, schema, oneTableCreateSql, null);
                    }
                }
            }

            return true;
        } catch (IOException e) {
            throw new ParseException(e);
        }
    }

    private boolean applyHistoryToDB(EntryPosition position, String schema, String ddl, String extra) {
        Map<String, String> content = new HashMap<String, String>();
        content.put("destination", destination);
        content.put("binlogFile", position.getJournalName());
        content.put("binlogOffest", String.valueOf(position.getPosition()));
        content.put("binlogMasterId", String.valueOf(position.getServerId()));
        content.put("binlogTimestamp", String.valueOf(position.getTimestamp()));
        content.put("useSchema", schema);
        if (content.isEmpty()) {
            throw new RuntimeException("apply failed caused by content is empty in applyHistoryToDB");
        }
        // 待补充
        List<DdlResult> ddlResults = DruidDdlParser.parse(ddl, schema);
        if (ddlResults.size() > 0) {
            DdlResult ddlResult = ddlResults.get(0);
            content.put("sqlSchema", ddlResult.getSchemaName());
            content.put("sqlTable", ddlResult.getTableName());
            content.put("sqlType", ddlResult.getEventType().name());
            content.put("sqlText", ddl);
            content.put("extra", extra);
        }

        MetaHistoryDO metaDO = new MetaHistoryDO();
        try {
            BeanUtils.populate(metaDO, content);
            // 会建立唯一约束,解决:
            // 1. 重复的binlog file+offest
            // 2. 重复的masterId+timestamp
            metaHistoryDAO.insert(metaDO);
        } catch (Throwable e) {
            if (isUkDuplicateException(e)) {
                // 忽略掉重复的位点
                logger.warn("dup apply for sql : " + ddl);
            } else {
                throw new ParseException("apply history to db failed caused by : " + e.getMessage(), e);
            }

        }
        return true;
    }

    /**
     * 发布数据到console上
     */
    private boolean applySnapshotToDB(EntryPosition position, boolean init) {
        // 获取一份快照
        Map<String, String> schemaDdls = null;
        lock.readLock().lock();
        try {
            if (!init && !hasNewDdl) {
                // 如果是持续构建,则识别一下是否有DDL变更过,如果没有就忽略了
                return false;
            }
            this.hasNewDdl = false;
            schemaDdls = memoryTableMetaDataStore.snapshot();
        } finally {
            lock.readLock().unlock();
        }

        MemoryTableMetaDataStore tmpMemoryTableMetaDataStore = new MemoryTableMetaDataStore();
        for (Map.Entry<String, String> entry : schemaDdls.entrySet()) {
            tmpMemoryTableMetaDataStore.apply(position, entry.getKey(), entry.getValue(), null);
        }

        // 基于临时内存对象进行对比
        boolean compareAll = true;
        for (Schema schema : tmpMemoryTableMetaDataStore.getSchemaRepository().getSchemas()) {
            for (String table : schema.showTables()) {
                String fullName = schema + "." + table;
                if (blackFilter == null || !blackFilter.filter(fullName)) {
                    if (filter == null || filter.filter(fullName)) {
                        // issue : https://github.com/alibaba/canal/issues/1168
                        // 在生成snapshot时重新过滤一遍
                        if (!compareTableMetaDbAndMemory(connection, tmpMemoryTableMetaDataStore, schema.getName(), table)) {
                            compareAll = false;
                        }
                    }
                }
            }
        }

        if (compareAll) {
            Map<String, String> content = new HashMap<String, String>();
            content.put("destination", destination);
            content.put("binlogFile", position.getJournalName());
            content.put("binlogOffest", String.valueOf(position.getPosition()));
            content.put("binlogMasterId", String.valueOf(position.getServerId()));
            content.put("binlogTimestamp", String.valueOf(position.getTimestamp()));
            content.put("data", JSON.toJSONString(schemaDdls));
            if (content.isEmpty()) {
                throw new RuntimeException("apply failed caused by content is empty in applySnapshotToDB");
            }

            MetaSnapshotDO snapshotDO = new MetaSnapshotDO();
            try {
                BeanUtils.populate(snapshotDO, content);
                metaSnapshotDAO.insert(snapshotDO);
            } catch (Throwable e) {
                if (isUkDuplicateException(e)) {
                    // 忽略掉重复的位点
                    logger.info("dup apply snapshot use position : " + position + " , just ignore");
                } else {
                    throw new ParseException("apply failed caused by : " + e.getMessage(), e);
                }
            }
            return true;
        } else {
            logger.error("compare failed , check log");
        }
        return false;
    }

    private boolean compareTableMetaDbAndMemory(ConnectionImpl connection, MemoryTableMetaDataStore memoryTableMetaDataStore,
                                                final String schema, final String table) {
        TableMetaData tableMetaDataFromMem = memoryTableMetaDataStore.find(schema, table);

        TableMetaData tableMetaDataFromDB = new TableMetaData();
        tableMetaDataFromDB.setSchemaName(schema);
        tableMetaDataFromDB.setTableName(table);
        String createDDL = null;
        try {
            ResultSetPacket packet = connection.query("show create table " + getFullName(schema, table));
            if (packet.getColumnNameList().size() > 1) {
                createDDL = packet.getColumnNameList().get(1);
                tableMetaDataFromDB.setColumnMetaDataList(TableMetaCache.parseTableMeta(schema, table, packet));
            }
        } catch (Throwable e) {
            try {
                // retry for broke pipe, see:
                // https://github.com/alibaba/canal/issues/724
                connection.reconnect();
                ResultSetPacket packet = connection.query("show create table " + getFullName(schema, table));
                if (packet.getColumnNameList().size() > 1) {
                    createDDL = packet.getColumnNameList().get(1);
                    tableMetaDataFromDB.setColumnMetaDataList(TableMetaCache.parseTableMeta(schema, table, packet));
                }
            } catch (IOException e1) {
                if (e.getMessage().contains("errorNumber=1146")) {
                    logger.error("table not exist in db , pls check :" + getFullName(schema, table) + " , mem : "
                            + tableMetaDataFromMem);
                    return false;
                }
                throw new ParseException(e);
            }
        }

        boolean result = compareTableMeta(tableMetaDataFromMem, tableMetaDataFromDB);
        if (!result) {
            logger.error("pls submit github issue, show create table ddl:" + createDDL + " , compare failed . \n db : "
                    + tableMetaDataFromDB + " \n mem : " + tableMetaDataFromMem);
        }
        return result;
    }

    private EntryPosition buildMemFromSnapshot(EntryPosition position) {
        try {
            MetaSnapshotDO snapshotDO = metaSnapshotDAO.findByTimestamp(destination, position.getTimestamp());
            if (snapshotDO == null) {
                return null;
            }
            String binlogFile = snapshotDO.getBinlogFile();
            Long binlogOffest = snapshotDO.getBinlogOffest();
            String binlogMasterId = snapshotDO.getBinlogMasterId();
            Long binlogTimestamp = snapshotDO.getBinlogTimestamp();

            EntryPosition snapshotPosition = new EntryPosition(binlogFile,
                    binlogOffest == null ? 0l : binlogOffest,
                    binlogTimestamp == null ? 0l : binlogTimestamp,
                    Long.valueOf(binlogMasterId == null ? "-2" : binlogMasterId));
            // data存储为Map<String,String>，每个分库一套建表
            String sqlData = snapshotDO.getData();
            JSONObject jsonObj = JSON.parseObject(sqlData);
            for (Map.Entry entry : jsonObj.entrySet()) {
                // 记录到内存
                if (!memoryTableMetaDataStore.apply(snapshotPosition,
                        ObjectUtils.toString(entry.getKey()),
                        ObjectUtils.toString(entry.getValue()),
                        null)) {
                    return null;
                }
            }

            return snapshotPosition;
        } catch (Throwable e) {
            throw new ParseException("apply failed caused by : " + e.getMessage(), e);
        }
    }

    private boolean applyHistoryOnMemory(EntryPosition position, EntryPosition rollbackPosition) {
        try {
            List<MetaHistoryDO> metaHistoryDOList = metaHistoryDAO.findByTimestamp(destination,
                    position.getTimestamp(),
                    rollbackPosition.getTimestamp());
            if (metaHistoryDOList == null) {
                return true;
            }

            for (MetaHistoryDO metaHistoryDO : metaHistoryDOList) {
                String binlogFile = metaHistoryDO.getBinlogFile();
                Long binlogOffest = metaHistoryDO.getBinlogOffest();
                String binlogMasterId = metaHistoryDO.getBinlogMasterId();
                Long binlogTimestamp = metaHistoryDO.getBinlogTimestamp();
                String useSchema = metaHistoryDO.getUseSchema();
                String sqlData = metaHistoryDO.getSqlText();
                EntryPosition snapshotPosition = new EntryPosition(binlogFile,
                        binlogOffest == null ? 0L : binlogOffest,
                        binlogTimestamp == null ? 0L : binlogTimestamp,
                        Long.valueOf(binlogMasterId == null ? "-2" : binlogMasterId));

                // 如果是同一秒内,对比一下history的位点，如果比期望的位点要大，忽略之
                if (snapshotPosition.getTimestamp() > rollbackPosition.getTimestamp()) {
                    continue;
                } else if (rollbackPosition.getServerId() == snapshotPosition.getServerId()
                        && snapshotPosition.compareTo(rollbackPosition) > 0) {
                    continue;
                }

                // 记录到内存
                if (!memoryTableMetaDataStore.apply(snapshotPosition, useSchema, sqlData, null)) {
                    return false;
                }

            }

            return metaHistoryDOList.size() > 0;
        } catch (Throwable e) {
            throw new ParseException("apply failed", e);
        }
    }

    private String getFullName(String schema, String table) {
        StringBuilder builder = new StringBuilder();
        return builder.append('`')
                .append(schema)
                .append('`')
                .append('.')
                .append('`')
                .append(table)
                .append('`')
                .toString();
    }

    private boolean compareTableMeta(TableMetaData source, TableMetaData target) {
        if (!StringUtils.equalsIgnoreCase(source.getSchemaName(), target.getSchemaName())) {
            return false;
        }

        if (!StringUtils.equalsIgnoreCase(source.getTableName(), target.getTableName())) {
            return false;
        }

        List<ColumnMetaData> sourceFields = source.getColumnMetaDataList();
        List<ColumnMetaData> targetFields = target.getColumnMetaDataList();
        if (sourceFields.size() != targetFields.size()) {
            return false;
        }

        for (int i = 0; i < sourceFields.size(); i++) {
            ColumnMetaData sourceField = sourceFields.get(i);
            ColumnMetaData targetField = targetFields.get(i);
            if (!StringUtils.equalsIgnoreCase(sourceField.getColumnName(), targetField.getColumnName())) {
                return false;
            }

            // if (!StringUtils.equalsIgnoreCase(sourceField.getColumnType(),
            // targetField.getColumnType())) {
            // return false;
            // }

            // https://github.com/alibaba/canal/issues/1100
            // 支持一下 int vs int(10)
            if ((sourceField.isUnsigned() && !targetField.isUnsigned())
                    || (!sourceField.isUnsigned() && targetField.isUnsigned())) {
                return false;
            }

            String sign = sourceField.isUnsigned() ? "unsigned" : "signed";
            String sourceColumnType = StringUtils.removeEndIgnoreCase(sourceField.getColumnType(), sign).trim();
            String targetColumnType = StringUtils.removeEndIgnoreCase(targetField.getColumnType(), sign).trim();

            boolean columnTypeCompare = false;
            columnTypeCompare |= StringUtils.containsIgnoreCase(sourceColumnType, targetColumnType);
            columnTypeCompare |= StringUtils.containsIgnoreCase(targetColumnType, sourceColumnType);
            if (!columnTypeCompare) {
                return false;
            }

            // if (!StringUtils.equalsIgnoreCase(sourceField.getDefaultValue(),
            // targetField.getDefaultValue())) {
            // return false;
            // }

            if (sourceField.isNullable() != targetField.isNullable()) {
                return false;
            }

            // mysql会有一种处理,针对show create只有uk没有pk时，会在desc默认将uk当做pk
            boolean isSourcePkOrUk = sourceField.isKey() || sourceField.isUnique();
            boolean isTargetPkOrUk = targetField.isKey() || targetField.isUnique();
            if (isSourcePkOrUk != isTargetPkOrUk) {
                return false;
            }
        }

        return true;
    }

    private int snapshotExpire(int expireTimestamp) {
        return metaSnapshotDAO.deleteByTimestamp(destination, expireTimestamp);
    }

    public void setFilter(CanalEventFilter filter) {
        this.filter = filter;
    }

    public MetaHistoryDAO getMetaHistoryDAO() {
        return metaHistoryDAO;
    }

    public void setMetaHistoryDAO(MetaHistoryDAO metaHistoryDAO) {
        this.metaHistoryDAO = metaHistoryDAO;
    }

    public MetaSnapshotDAO getMetaSnapshotDAO() {
        return metaSnapshotDAO;
    }

    public void setMetaSnapshotDAO(MetaSnapshotDAO metaSnapshotDAO) {
        this.metaSnapshotDAO = metaSnapshotDAO;
    }

    public void setBlackFilter(CanalEventFilter blackFilter) {
        this.blackFilter = blackFilter;
    }

    public int getSnapshotInterval() {
        return snapshotInterval;
    }

    public void setSnapshotInterval(int snapshotInterval) {
        this.snapshotInterval = snapshotInterval;
    }

    public int getSnapshotExpire() {
        return snapshotExpire;
    }

    public void setSnapshotExpire(int snapshotExpire) {
        this.snapshotExpire = snapshotExpire;
    }

    public ConnectionImpl getConnection() {
        return connection;
    }

    public void setConnection(ConnectionImpl connection) {
        this.connection = connection;
    }

    public boolean isUkDuplicateException(Throwable t) {
        if (pattern.matcher(t.getMessage()).find() || h2Pattern.matcher(t.getMessage()).find()) {
            // 违反外键约束时也抛出这种异常，所以这里还要判断包含字符串Duplicate entry
            return true;
        }
        return false;
    }
}
