package com.fzk.ha;

import com.fzk.core.FileMeta;
import com.fzk.core.FileMetaStore;
import com.fzk.core.FileStoreService;
import com.fzk.env.conf.DataConf;
import com.fzk.env.conf.ServerConf;
import com.fzk.log.Logger;
import io.netty.buffer.ByteBuf;
import io.netty.channel.*;
import io.netty.handler.timeout.IdleState;
import io.netty.handler.timeout.IdleStateEvent;
import io.netty.util.ReferenceCountUtil;

import java.io.IOException;
import java.nio.channels.FileChannel;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.util.Set;
import java.util.concurrent.TimeUnit;

public class ServerHandler extends ChannelInboundHandlerAdapter {
    private final HAService haService;
    private final FileStoreService fileService;
    private final ServerConf serverConf;
    private final DataConf dataConf;

    public volatile long currentAck;// reactor模型中此变量应该只会是单线程访问

    public ServerHandler(HAService haService, FileStoreService fileService, ServerConf serverConf, DataConf dataConf) {
        this.haService = haService;
        this.fileService = fileService;
        this.serverConf = serverConf;
        this.dataConf = dataConf;
    }

    private void heartbeat(ChannelHandlerContext ctx, HAMsg.HAHeartMsg heart) {
        Logger.debug(String.format("heartbeat: %s, ip: %s, msg: %s", heart.type == HAMsg.Ping ? "ping" : "pong", ctx.channel().remoteAddress(), heart));
        if (heart.type == HAMsg.Ping) {
            // ping则响应pong
            HAMsg.HAHeartMsg pong = new HAMsg.HAHeartMsg(HAMsg.Pong, "pong".getBytes(StandardCharsets.UTF_8));
            ByteBuf buf = pong.serialize();
            ctx.writeAndFlush(buf);
        } else if (heart.type == HAMsg.Pong) {
            // 收到pong说明连接稳定
        } else {
            throw new RuntimeException("错误的心跳消息类型, msg: " + heart);
        }
    }

    private void handleReq(ChannelHandlerContext ctx, HAMsg.HASyncRequestMsg reqMsg, int reqCount) {
        if (reqCount == 0) {
            Logger.debug(String.format("accept ha sync req：from: %s, offset: %d, position: %d",
                    ctx.channel().remoteAddress(), reqMsg.header.getOffset(), reqMsg.header.getPosition()));
        } else {
            Logger.debug(String.format("长轮询 ha sync req：from: %s, offset: %d, position: %d",
                    ctx.channel().remoteAddress(), reqMsg.header.getOffset(), reqMsg.header.getPosition()));
        }
        // 0.长轮询处理
        long offset = reqMsg.header.getOffset();
        long position = reqMsg.header.getPosition();
        // 长轮询, 默认单次请求轮询最多等待15s
        if (offset == fileService.metaStore.writeIndex) {
            if (reqCount < 15) {
                ctx.executor().schedule(() -> handleReq(ctx, reqMsg, reqCount + 1), 1, TimeUnit.SECONDS);
            } else {
                // 向从节点输出空响应，以结束此次长轮询
                ctx.writeAndFlush(HAMsg.HASyncResponseMsg.Res_Empty.serialize());
            }
            return;
        } else if (offset > fileService.metaStore.writeIndex) {
            Logger.error(String.format("HA请求的offset: %d 超过了writeIndex: %d, fromIp: %s", offset, fileService.metaStore.writeIndex, ctx.channel().remoteAddress()));
            throw new RuntimeException(String.format("HA请求的offset: %d 超过了writeIndex: %d, fromIp: %s", offset, fileService.metaStore.writeIndex, ctx.channel().remoteAddress()));
        }

        // 1.找到请求文件
        // Logger.debug(String.format("HA sync, offset: %d, position: %d, ip: %s", offset, position, ctx.channel().remoteAddress()));
        FileMetaStore.FileMetaWrapper metaWrapper = fileService.metaStore.readMeta(offset);
        FileMeta meta = metaWrapper.meta;
        // 计算存储路径
        Path path = Path.of(dataConf.getDataDir(), meta.getRelativePath());
        if (Files.notExists(path)) {
            throw new RuntimeException(String.format("ha sync 文件不存在, offset: %d, meta: %s", offset, meta));
        }

        FileChannel fileChannel;
        try {
            fileChannel = FileChannel.open(path, Set.of(StandardOpenOption.READ));
        } catch (IOException e) {// 这里出现错误只能是文件不存在, 被删了? 比如让从节点请求下个文件?
            throw new RuntimeException(e + " 文件可能被删了, 可以考虑让从节点请求下个文件");
        }
        try {
            DefaultFileRegion fileRegion;
            // 2.文件响应块
            if (fileChannel.size() <= position) {
                // 出现这种情况很可能是分片上传大文件, 前端还在传输给主节点，从节点就请求超过了主节点暂时保持的文件所有内容
                // 长轮询5次
                if (reqCount < 5) {
                    ctx.executor().schedule(() -> handleReq(ctx, reqMsg, reqCount + 1), 1, TimeUnit.SECONDS);
                } else {
                    // 向从节点输出空响应，以结束此次长轮询
                    // 注意：此处返回空输出另一个好处是可以让从节点同步之后的新文件，而不是卡在这个还在分片上传的大文件上
                    ctx.writeAndFlush(HAMsg.HASyncResponseMsg.Res_Empty.serialize());
                }
                return;
            }
            final boolean isLast;
            if (fileChannel.size() - position > serverConf.getHaChunkSize()) {// 一次传输4MB内容
                isLast = false;
                fileRegion = new DefaultFileRegion(fileChannel, position, serverConf.getHaChunkSize());
            } else {// 将整个文件剩余内容全部传输
                isLast = true;
                fileRegion = new DefaultFileRegion(fileChannel, position, fileChannel.size() - position);
            }
            // 3.传输给从节点
            HAMsg.HASyncResponseMsg msg = new HAMsg.HASyncResponseMsg(new HAMsg.HASyncResponseHeader(
                    reqMsg.header.getHaSyncType(), offset, position, metaWrapper.metaLen, metaWrapper.meta),
                    fileRegion);
            ByteBuf buf = msg.serialize();
            ctx.write(buf);
            ChannelFuture sendFileFuture = ctx.writeAndFlush(fileRegion, ctx.newProgressivePromise());// 发文件内容
            // 4.回调关闭channel
            sendFileFuture.addListener(new ChannelProgressiveFutureListener() {
                @Override // 操作进行时调用，显示进度条?
                public void operationProgressed(ChannelProgressiveFuture future, long progress, long total) {
                    if (total < 0) { // total unknown
                        System.err.println(future.channel() + "HA Transfer progress: " + progress);
                    } else {
                        System.err.println(future.channel() + "HA Transfer progress: " + progress + " / " + total);
                    }
                }

                @Override // 操作完成时调用
                public void operationComplete(ChannelProgressiveFuture future) {
                    if (isLast) {
                        Logger.info("HA文件传输完成, offset: " + offset + ", path: " + path);
                    }
                    try {
                        // ctx.write()成功发送的消息将自动回调ReferenceCountUtil.removeCallback()方法释放资源，所以不需要手动再调用了
                        // ReferenceCountUtil.removeCallback(buf);
                        // ReferenceCountUtil.removeCallback(fileRegion);
                        if (fileChannel.isOpen())
                            fileChannel.close();// 关文件
                    } catch (IOException ignored) {
                    }
                }
            });
        } catch (Throwable e) {
            try {
                if (fileChannel.isOpen())
                    fileChannel.close();// 关文件
            } catch (IOException ignored) {
            }
            Logger.error(e.toString());
            e.printStackTrace();
        }
    }

    private void handleRes(ChannelHandlerContext ctx, HAMsg.HASyncResponseMsg resMsg) {
        /* 0.如果为空响应，说明：
         1.主节点现在无新数据，从节点立刻再请求同步
         2.正在同步的是分片上传的大文件，该文件目前尚未传输完成且已经5s内没有上传新内容到主节点了，此时从节点必须跳过它并请求下一个文件，即最新的writeIndex
         */
        if (resMsg == HAMsg.HASyncResponseMsg.Res_Empty || resMsg.headerLen == 0) {
            // 单个文件同步类型不需要继续ha sync req
//            if (resMsg.header.getHaSyncType() == HAMsg.HASyncRequestHeader.NormalSync) {
            ctx.executor().execute(() -> ack(ctx, HAMsg.HASyncRequestHeader.NormalSync, fileService.metaStore.writeIndex, 0L));
//            }
            return;
        }
        // 有响应头
        long offset = resMsg.header.getOffset();
        long position = resMsg.header.getPosition();
        if (position < 0) {
            // 0.单个文件同步 且 position为负数 表明这是 master发给replica 让其发起单个文件同步的信息
            // TODO 此消息还是单独弄出来，免得搞混淆了
            if (resMsg.header.getHaSyncType() == HAMsg.HASyncRequestHeader.SingleFileSync) {
                // 判断是否需要真正发出单个文件同步请求
                // 检查当前的正常文件持续同步请求的currentAck进度
                if (offset == currentAck) {
                    Logger.info(String.format("从节点收到主节点发来的单个文件同步的通知，经过检查offset=currentAck，当前的正常持续同步请求正在同步此文件，不必额外发出单个文件同步请求，currentAck: %d", currentAck));
                } else if (offset < currentAck) {
                    // 检查获取最新的position
                    FileMetaStore.FileMetaWrapper replicaMetaWrapper = fileService.metaStore.readMeta(offset);
                    if (replicaMetaWrapper == null) {
                        Logger.error(String.format("从节点收到主节点发来的单个文件同步的通知，即将发出单个文件同步请求，但是检查发现从节点不存在该文件元信息? 主节点发来的元信息为masterMeta: %s", resMsg.header.getMeta()));
                        return;
                    }
                    // 检查从节点该文件的上传状态
                    FileMeta replicaMeta = replicaMetaWrapper.meta;
                    if (replicaMeta.getUploadStatus() != FileMeta.Uploading) {
                        Logger.info(String.format("从节点收到主节点发来的单个文件同步的通知，经过检查从节点的该文件上传状态为uploadStatus: %d, 无需发出额外的单个文件请求", replicaMeta.getUploadStatus()));
                        return;
                    }
                    Path path = Path.of(dataConf.getDataDir(), replicaMetaWrapper.meta.getRelativePath());
                    if (Files.notExists(path)) {
                        Logger.error(String.format("从节点收到主节点发来的单个文件同步的通知，即将发出单个文件同步请求，但是检查发现从节点不存在该文件: %s? masterMeta: %s", path, resMsg.header.getMeta()));
                        return;
                    }
                    try {
                        position = Files.size(path);
                    } catch (IOException e) {
                        throw new RuntimeException(e);
                    }
                    Logger.info(String.format("从节点收到主节点发来的单个文件同步的通知，检查正常持续同步请求的currentAck: %d，需要额外发出单个文件同步请求: offset: %d, position: %d", currentAck, offset, position));
                    // 0.2 从节点发出额外的单个文件同步请求
                    ack(ctx, HAMsg.HASyncRequestHeader.SingleFileSync, offset, position);
                } else {
                    throw new RuntimeException(String.format("从节点收到主节点发来的单个文件同步的通知，单个文件请求的offset: %d怎么会大于currentAck: %d", offset, currentAck));
                }
            } else {
                throw new RuntimeException(String.format("position: %d为负数时，haSyncType必须是单个文件同步: %d", position, HAMsg.HASyncRequestHeader.SingleFileSync));
            }
            return;
        }

        Logger.debug(String.format("接受到文件主从同步响应：ip: %s, key: %s, offset: %d, position: %d",
                ctx.channel().remoteAddress(), resMsg.header.getMeta().getKey(), resMsg.header.getOffset(), resMsg.header.getPosition()));
        try {
            // 1.计算存储路径
            Path path = Path.of(dataConf.getDataDir(), resMsg.header.getMeta().getRelativePath());
            // 2.存储文件、元信息、索引
            FileMeta masterMeta = new FileMeta();
            masterMeta.setKey(resMsg.header.getMeta().getKey());
            masterMeta.setBucket(resMsg.header.getMeta().getBucket());
            fileService.doStore(resMsg.header.getMeta(), resMsg.header.getMeta().getBucket(), resMsg.header.getMeta().getKey(), path, position, resMsg.header.getMeta().getSize(), resMsg.content);
            // 3.判断该文件是否已经传递完成
            long nextPosition = position + resMsg.content.readableBytes();
            if (nextPosition < resMsg.header.getMeta().getSize()) {
                // 3.1 小于文件总大小，继续请求该文件
                ack(ctx, resMsg.header.getHaSyncType(), offset, nextPosition);
            } else if (nextPosition == resMsg.header.getMeta().getSize()) {// 此文件传输完成
                // 单个文件同步类型不需要继续ha sync req
                if (resMsg.header.getHaSyncType() == HAMsg.HASyncRequestHeader.NormalSync) {
                    // 3.2 再次发起 HA sync 请求，请求下个文件
                    long nextReqOffset = fileService.metaStore.writeIndex;// 下次请求的ack
                    if (nextReqOffset != (offset + 4 + resMsg.header.getMetaLen())) {// 检验：保证主从节点的元信息存储文件offset是一致的
                        String str = String.format("HA sync res，响应的offset: %d，metaLen: %d, 写入元信息后writeIndex: %d，此时writeIndex!=(offset+metaLen) ???", offset, resMsg.header.getMetaLen(), nextReqOffset);
                        Logger.error(str);
                        throw new RuntimeException(str);
                    }
                    ack(ctx, resMsg.header.getHaSyncType(), nextReqOffset, 0L);
                } else {
                    Logger.debug(String.format("特殊同步请求：单个文件同步已经完成，resHeader: %s", resMsg.header));
                }
            } else {
                String str = String.format("HA sync res, 计算的nextPosition: %d 怎么会超过元信息保存的文件总大小: %d呢?", nextPosition, resMsg.header.getMeta().getSize());
                Logger.error(str);
                throw new RuntimeException(str);
            }
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }

    // 请求同步发出 ha sync req
    private void ack(ChannelHandlerContext ctx, byte haSyncType, long ack, long position) {
        Logger.debug(String.format("send ha sync req, to: %s, offset: %d, position: %d", ctx.channel().remoteAddress(), ack, position));
        // 更新当前最新请求同步进度ack
        if (haSyncType == HAMsg.HASyncRequestHeader.NormalSync) {
            currentAck = ack;
        }
        HAMsg.HASyncRequestMsg req = new HAMsg.HASyncRequestMsg(new HAMsg.HASyncRequestHeader(haSyncType, ack, position));
        ByteBuf buf = req.serialize();
        ctx.writeAndFlush(buf);
    }

    @Override
    public void channelActive(ChannelHandlerContext ctx) {
        // 从节点连上主节点
        haService.channels.put(ctx.channel().id(), ctx.channel());
        ctx.fireChannelActive();// 向后传递
    }

    @Override
    public void channelInactive(ChannelHandlerContext ctx) {
        // 从节点断连主节点
        haService.channels.remove(ctx.channel().id());
        ctx.fireChannelInactive();// 向后传递
    }

    @Override// 兜底刷，避免忘记手动刷
    public void channelReadComplete(ChannelHandlerContext ctx) {
        ctx.flush();
        ctx.fireChannelReadComplete();// 向后传递
    }

    @Override
    public void channelRead(ChannelHandlerContext ctx, Object msg) {
        try {
            ByteBuf buf = (ByteBuf) msg;
            // 1.解析消息类型
            byte type = HAMsg.getMsgType(buf);
            switch (type) {
                case HAMsg.Ping, HAMsg.Pong -> {
                    HAMsg.HAHeartMsg heartMsg = new HAMsg.HAHeartMsg(buf);
                    heartbeat(ctx, heartMsg);
                }
                case HAMsg.Sync_Req -> {
                    HAMsg.HASyncRequestMsg reqMsg = HAMsg.HASyncRequestMsg.deserialize(buf);
                    handleReq(ctx, reqMsg, 0);
                }
                case HAMsg.Sync_Res -> {
                    HAMsg.HASyncResponseMsg resMsg = HAMsg.HASyncResponseMsg.deserialize(buf);
                    handleRes(ctx, resMsg);
                }
                default -> throw new RuntimeException("不支持的消息类型: " + type);
            }
        } finally {
            ReferenceCountUtil.release(msg);
        }
    }

    @Override
    public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
        if (evt instanceof IdleStateEvent event) {
            // 处理超时事件
            if (event.state() == IdleState.ALL_IDLE) {
                // 链路空闲超时
                Logger.info("链路空闲超时，即将关闭: " + ctx.channel().remoteAddress());
                ctx.close();// 关闭链路
            } else if (event.state() == IdleState.READER_IDLE) {
                // 读空闲超时
                Logger.info("链路读空闲超时，即将关闭: " + ctx.channel().remoteAddress());
                ctx.close();// 关闭链路
            } else if (event.state() == IdleState.WRITER_IDLE) {
                // 写空闲超时
                Logger.info("链路写空闲超时，即将关闭: " + ctx.channel().remoteAddress());
                ctx.close();// 关闭链路
            } else {
                throw new RuntimeException("未知超时事件：" + event);
            }
        } else {
            Logger.warning(String.format("未知自定义事件触发? event: %s", evt.toString()));
        }
    }

    @Override
    public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
        cause.printStackTrace();
        Logger.error("HA service: " + cause);
    }
}
