package pers.cfeng.server.dataManage.recover;

import pers.cfeng.common.SubArray;
import pers.cfeng.server.dataManage.dataItem.DataItem;
import pers.cfeng.server.dataManage.logger.Logger;
import pers.cfeng.server.dataManage.logger.LoggerConstant;
import pers.cfeng.server.dataManage.page.Page;
import pers.cfeng.server.dataManage.page.PageCommon;
import pers.cfeng.server.dataManage.pageCache.PageCache;
import pers.cfeng.server.txManage.TransactionManager;
import pers.cfeng.server.utils.ByteBufferParser;
import pers.cfeng.server.utils.FaultHandler;

import java.util.*;

/**
 * 根据日志文件进行数据恢复
 * 按照数据库的理论，当break down时， 已经完成的事务应该进行Redo， 未完成的应该Undo
 * 当多线程并行操作的时候，如果事务可以进行未提交读，那么在恢复的时候就会出现问题
 * 因为按理一个应该撤销，一个应该Redo
 * 所以为了方便，这里就设置最低的隔离级别为读已提交
 *
 * 更新的日志： [LogType] [TID] [PageNo] [Offset] [oldRaw] [NewRaw]
 * 插入的日志: [LogType] [TID] [PageNo] [Offset] [raw]
 */

public class DataRecover {

    static class InsertLogInfo {
        long tid; //事务ID
        int pageNo; //页号
        short offset; //页内偏移
        byte[] raw; //数据
    }

    static class UpdateLogInfo {
        long tid;
        int pageNo; //页号
        short offset;
        byte[] oldRaw;
        byte[] newRew;
    }

    //依据日志对page进行操作，同时需要事务的配合
    public static void recover(TransactionManager tm, Logger logger, PageCache pageCache) {
        System.out.println("cfengBase is recovering.....");

        //日志文件的position先恢复
        logger.rewind();
        int maxPageNo = 0;
        while (true) {
            byte[] log = logger.next();
            if(log == null)
                break;
            int pageNo;
            if(isInsertLog(log)) {
                InsertLogInfo li = parseInsertLog(log);
                pageNo = li.pageNo;
            } else {
                UpdateLogInfo ui = parseUpdateLog(log);
                pageNo = ui.pageNo;
            }
            if(pageNo > maxPageNo)
                maxPageNo = pageNo;
        }
        //page从1开始
        if(maxPageNo == 0)
            maxPageNo = 1;
        //读取日志文件找到最大的日志的对应的页号，删除maxPage后面的页面
        pageCache.truncateAfterPageNo(maxPageNo);
        System.out.println("Truncate to " + maxPageNo + " pages");

        redoTransactions(tm, logger, pageCache);
        System.out.println("Redo Transactions Over");

        undoTransactions(tm, logger, pageCache);
        System.out.println("Undo Transactions Over");
    }

    private static boolean isInsertLog(byte[] log) {
        return log[0] == LoggerConstant.LOG_TYPE_INSERT;
    }

    /**
     * Redo所有已完成的事务
     */
    private static void redoTransactions(TransactionManager tm, Logger logger, PageCache pageCache) {
        //指针初始化
        logger.rewind();
        while(true) {
            byte[] log = logger.next();
            if(log == null)
                break;
            if(isInsertLog(log)) {
                InsertLogInfo logInfo = parseInsertLog(log);
                long tid = logInfo.tid;
                //判断事务是否是完成
                if(!tm.isRunning(tid)) {
                    //REDO
                    doInsertLog(pageCache, logInfo, LoggerConstant.REDO);
                }
            } else {
                UpdateLogInfo logInfo = parseUpdateLog(log);
                long tid = logInfo.tid;
                if(!tm.isRunning(tid)) {
                    doUpdateLog(pageCache, logInfo, LoggerConstant.REDO);
                }
            }
        }
    }

    //Undo, 再次扫描所有的事务，后期可以优化一下
    //这里为了保证数据安全同时简单的方案，所以不直接删除，插入的数据记录下来逻辑删除
    private static void undoTransactions(TransactionManager tm, Logger logger, PageCache pageCache) {
        Map<Long, List<byte[]>> logCache = new HashMap<>();
        logger.rewind();
        while(true) {
            byte[] log = logger.next();
            if(log == null)
                break;
            if(isInsertLog(log)) {
                InsertLogInfo logInfo = parseInsertLog(log);
                long tid = logInfo.tid;
                if(tm.isRunning(tid)) {
                    //UNDO
                    if(!logCache.containsKey(tid)) {
                        logCache.put(tid, new ArrayList<>());
                    }
                    logCache.get(tid).add(log);
                }
            } else {
                UpdateLogInfo logInfo = parseUpdateLog(log);
                long tid = logInfo.tid;
                if(tm.isRunning(tid)) {
                    if(!logCache.containsKey(tid)) {
                        logCache.put(tid, new ArrayList<>());
                    }
                    logCache.get(tid).add(log);
                }
            }
        }
        //所有的undo日志按照顺序放在了logCache中, 所有的log倒序undo
        //最后还需要修改tid的状态为rollback，因为撤销了操作
        for(Map.Entry<Long, List<byte[]>> entry : logCache.entrySet()) {
            List<byte[]> logs = entry.getValue();
            for(int i = logs.size() - 1; i >= 0; i --) {
                byte[] log = logs.get(i);
                if(isInsertLog(log)) {
                    doInsertLog(pageCache, parseInsertLog(log) , LoggerConstant.UNDO);
                } else {
                    doUpdateLog(pageCache, parseUpdateLog(log), LoggerConstant.UNDO);
                }
            }
            tm.rollback(entry.getKey());
        }
    }

    //将日志文件该日志对应的data的byte[]转为LOG
    private static InsertLogInfo parseInsertLog(byte[] log) {
        InsertLogInfo logInfo = new InsertLogInfo();
        logInfo.tid = ByteBufferParser.parseLong(Arrays.copyOfRange(log, LoggerConstant.OFFSET_TID, LoggerConstant.OFFSET_PAGE_NO));
        logInfo.pageNo = ByteBufferParser.parseInt(Arrays.copyOfRange(log,LoggerConstant.OFFSET_PAGE_NO, LoggerConstant.OFF_INSERT_OFFSET));
        logInfo.offset = ByteBufferParser.parseShort(Arrays.copyOfRange(log, LoggerConstant.OFF_INSERT_OFFSET, LoggerConstant.OF_INSERT_RAW));
        logInfo.raw = Arrays.copyOfRange(log, LoggerConstant.OF_INSERT_RAW, log.length);
        return logInfo;
    }

    //根据操作的tid，页面，raw，offset等拼接插入日志
    public static byte[] insertLog(long tid, Page page, byte[] raw) {
        byte[] logTypeRaw = {LoggerConstant.LOG_TYPE_INSERT};
        byte[] tidRaw = ByteBufferParser.long2Byte(tid);
        byte[] pageNoRaw = ByteBufferParser.int2Byte(page.getPageNumber());
        byte[] offsetRaw = ByteBufferParser.short2Byte(PageCommon.getFSO(page));
        return ByteBufferParser.mergeBytes(logTypeRaw,tidRaw,pageNoRaw,offsetRaw, raw);
    }

    //对插入日志的恢复操作
    private static void doInsertLog(PageCache pageCache, InsertLogInfo logInfo, int flag) {
        Page page = null;
        try {
            page = pageCache.getPage(logInfo.pageNo);
        } catch (Exception e) {
            FaultHandler.forcedStop(e);
        }
        try {
            if(flag == LoggerConstant.UNDO) {
                DataItem.setDataItemRawInvalid(logInfo.raw);
            } else {
                //REDO
                PageCommon.recoverInsert(page, logInfo.raw, logInfo.offset);
            }
        } finally {
            page.release();
        }
    }

    /**
     * 拼接更新日志, uid 为pageNo + offset （因为部分太多合并两项）
     */
    public static byte[] updateLog(long tid, DataItem di) {
        byte[] logType = {LoggerConstant.LOG_TYPE_UPDATE};
        byte[] tidRaw = ByteBufferParser.long2Byte(tid);
        byte[] uidRaw = ByteBufferParser.long2Byte(di.getUid());
        byte[] oldRaw = di.getOldRaw();
        SubArray raw = di.getRaw();
        byte[] newRaw = Arrays.copyOfRange(raw.raw, raw.start, raw.end);
        return ByteBufferParser.mergeBytes(logType, tidRaw, uidRaw, oldRaw, newRaw);
    }

    /**
     * byte[]转为更新日志
     */
    private static UpdateLogInfo parseUpdateLog(byte[] log) {
        UpdateLogInfo logInfo = new UpdateLogInfo();
        logInfo.tid = ByteBufferParser.parseLong(Arrays.copyOfRange(log, LoggerConstant.OFFSET_TID, LoggerConstant.OFFSET_PAGE_NO));
        long uid = ByteBufferParser.parseLong(Arrays.copyOfRange(log, LoggerConstant.OFF_UPDATE_UID, LoggerConstant.OFF_UPDATE_RAW));
        logInfo.offset = (short)(uid & (1L << 16) - 1); //取高位
        uid >>>= 32;
        logInfo.pageNo = (int)(uid & (1L << 32) - 1);
        int length = (log.length - LoggerConstant.OFF_UPDATE_RAW)/2; //RAW为新旧合并，所以除以2
        logInfo.oldRaw = Arrays.copyOfRange(log, LoggerConstant.OFF_UPDATE_RAW, LoggerConstant.OFF_UPDATE_RAW + length);
        logInfo.newRew = Arrays.copyOfRange(log, LoggerConstant.OFF_UPDATE_RAW + length, LoggerConstant.OFF_UPDATE_RAW + 2 * length);
        return logInfo;
    }

    /**
     *  恢复更新日志
     *  REDO,就是重新插一遍newRaw，Undo就是将oldNew更新进去
     */
    private static void doUpdateLog(PageCache pageCache, UpdateLogInfo logInfo, int flag) {
        int pageNo;
        short offset;
        byte[] raw;
        if(flag == LoggerConstant.REDO) {
            pageNo = logInfo.pageNo;
            offset = logInfo.offset;
            raw = logInfo.newRew;
        } else {
            pageNo = logInfo.pageNo;
            offset = logInfo.offset;
            raw = logInfo.oldRaw;
        }
        Page page = null;
        try {
            page = pageCache.getPage(pageNo);
        } catch (Exception e) {
            FaultHandler.forcedStop(e);
        }
        try {
            PageCommon.recoverUpdate(page, raw, offset);
        } finally {
            page.release();
        }

    }

}
