#include "Raft/raft.hpp"
#include "common.hpp"

using namespace std;


//用于保存处理客户端RPC请求时的上下文信息，每次调用start()且为leader时会存到对应的map中，key为start返回的日志index，独一无二
struct OpContext{
    OpContext(Operation op);
    Operation op;
    string value;
    ERR err;
    bool isIgnored;

    OpContext(Operation op){
        this->op = op;
        isIgnored = false;
        err = OK;
        value = "";
    }
};

struct kvServerInfo{
    PeersInfo peersInfo;
    vector<int> kvPort;
};

class KVServer{
public:
    KVServer() = default;
    static void* RPCserver(void* arg);
    static void* applyLoop(void* arg);      //持续监听raft层提交的msg的守护线程
    void StartKvServer(vector<kvServerInfo>& kvInfo, int id);
    vector<PeersInfo> getRaftPort(vector<kvServerInfo>& kvInfo);
    GetReply get(GetArgs args);
    PutAppendReply putAppend(PutAppendArgs args);

    string test(string key){ return database[key]; }  //测试其余不是leader的server的状态机
    bool getRaftState();  // 获取raft状态
    void killRaft();      // 测试安装快照功能时使用，让raft暂停接受日志
    void activateRaft();  // 重新激活raft的功能
    ~KVServer() = default;

private:
    mutex kv_lock;
    Raft *raft;
    int serverId;
    vector<int> port;  // 多端口
    atomic_int cur_port;
    MessageQueue<ApplyMsg> applyChan;
    bool dead; // set by kill();


    int lastAppliedIndex;	
    unordered_map<string, string> database;  // 模拟Key-Value数据库 key/value
    unordered_map<int, int> clientSeqMap;    //  存储了每个客户端当前最大请求ID，用来保证按序执行和重复检测 clientId/requestId
    unordered_map<int, OpContext*> requestMap;  //记录当前RPC对应的上下文   raftindex/OpContext
};

void KVServer::StartKvServer(vector<kvServerInfo>& kvInfo, int id){
    this->serverId = id;
    this->port = kvInfo[id].kvPort;
    vector<PeersInfo> peers = getRaftPort(kvInfo);
    this->applyChan.setcapacity(1);
    this->lastAppliedIndex = 0;
    raft->Make(peers, id);

    // 多端口监听
    for (int i = 0; i < port.size(); i++){
        thread listen_tid1(RPCserver, this);
        listen_tid1.detach();
    }
                        
    thread listen_tid2(RPCserver, this);
    listen_tid2.detach();
}

void* KVServer::RPCserver(void* arg){
    KVServer* kv = (KVServer*)arg;
    buttonrpc server;
    int currentPort = kv->cur_port.fetch_add(1);

    server.as_server(kv->port[currentPort]);
    server.bind("get", &KVServer::get, kv);
    server.bind("putAppend", &KVServer::putAppend, kv);
    server.run();
}

GetReply KVServer::get(GetArgs args){
    GetReply reply;
    reply.err = OK;
    reply.value = "";

    Operation operation;
    operation.op = "get";
    operation.key = args.key;
    operation.value = "random";
    operation.clientId = args.clientId;
    operation.requestId = args.requestId;

    StartState state = raft->start(operation.op);
    operation.term = state.startTerm;
    operation.index = state.startIndex;

    if (!state.isLeader){
        cout << "client " << args.clientId << "'s get request is wrong leader " << serverId << endl;
        reply.err = ErrWrongLeader;
        return reply;
    }

    OpContext opctx(operation);             //创建RPC时的上下文信息并暂存到map中，其key为start返回的该条请求在raft日志中唯一的索引
    unique_lock<mutex> lock(kv_lock);
    requestMap[state.startIndex] = &opctx;
    lock.unlock();

    auto [replyOp, ok] = this->applyChan.TPop();
    // bool ok = false;
    if (ok) {
        if (opctx.err == ErrWrongLeader){
            reply.err == ErrWrongLeader;
        }else if (opctx.err == ErrNoKey){
            reply.err = ErrNoKey;
        } else {
            reply.value = opctx.value;
        }
        
    } else {
        reply.err == ErrWrongLeader;
        cout << "TimeOut------------------" << endl;
    }
    lock.lock();
    requestMap.erase(state.startIndex);
    lock.unlock();
    return reply;
}

PutAppendReply KVServer::putAppend(PutAppendArgs args){
    PutAppendReply reply;
    reply.err = OK;
    Operation operation;
    operation.op = args.op;
    operation.key = args.key;
    operation.value = args.value;
    operation.clientId = args.clientId;
    operation.requestId = args.requestId;

    StartState state = raft->start(operation.op);
    operation.term = state.startTerm;
    operation.index = state.startIndex;

    if (!state.isLeader){
        cout << "client " << args.clientId << "'s get request is wrong leader " << serverId << endl;
        reply.err = ErrWrongLeader;
        return reply;
    }

    OpContext opctx(operation);             //创建RPC时的上下文信息并暂存到map中，其key为start返回的该条请求在raft日志中唯一的索引
    unique_lock<mutex> lock(kv_lock);
    requestMap[state.startIndex] = &opctx;
    lock.unlock();

    auto [replyOp, ok] = this->applyChan.TPop();
    // bool ok = false;
    if (ok) {
        if (opctx.err == ErrWrongLeader){
            reply.err == ErrWrongLeader;
        }
    } else {
        reply.err == ErrWrongLeader;
        cout << "TimeOut------------------" << endl;
    }
    lock.lock();
    requestMap.erase(state.startIndex);
    lock.unlock();
    return reply;
}

void* KVServer::applyLoop(void* arg){
    KVServer* kv = (KVServer*)arg;
    unique_lock<mutex> lock(kv->kv_lock); 
    lock.unlock();

    while(1){

        ApplyMsg msg= kv->raft->PopMQ();

        if(!msg.commandValid){                         
            continue;
        }else{
            Operation operation = msg.getOperation();
            int index = msg.commandIndex;

            lock.lock(); 
            kv->lastAppliedIndex = index;           //收到一个msg就更新lastAppliedIndex 
            bool isOpExist = false, isSeqExist = false;
            int prevRequestIdx = INT_MAX;
            OpContext* opctx = NULL;
            if(kv->requestMap.count(index)){
                isOpExist = true;
                opctx = kv->requestMap[index];
                if(opctx->op.term != operation.term){
                    opctx->err = ErrWrongLeader;
                    printf("not euqal term -> wrongLeader : opctx %d, op : %d\n", opctx->op.term, operation.term);
                }
            }
            if(kv->clientSeqMap.count(operation.clientId)){
                isSeqExist = true;
                prevRequestIdx = kv->clientSeqMap[operation.clientId];
            }
            kv->clientSeqMap[operation.clientId] = operation.requestId;

            if(operation.op == "put" || operation.op == "append"){
                //非leader的server必然不存在命令，同样处理状态机，leader的第一条命令也不存在，保证按序处理
                if(!isSeqExist || prevRequestIdx < operation.requestId){  
                    if(operation.op == "put"){
                        kv->database[operation.key] = operation.value;
                    }else if(operation.op == "append"){
                        if(kv->database.count(operation.key)){
                            kv->database[operation.key] += operation.value;
                        }else{
                            kv->database[operation.key] = operation.value;
                        }
                    }
                }else if(isOpExist){
                    opctx->isIgnored = true;
                }
            }else{
                if(isOpExist){
                    if(kv->database.count(operation.key)){
                        opctx->value = kv->database[operation.key];       //如果有则返回value
                    }else{
                        opctx->err = ErrNoKey;
                        opctx->value = "";                                  //如果无返回""
                    }
                }
            }

            lock.unlock(); 

            if(isOpExist){  
                kv->applyChan.Push(msg);
            }    
        }
    }
}

vector<PeersInfo> KVServer::getRaftPort(vector<kvServerInfo>& kvInfo){
    int n = kvInfo.size();
    vector<PeersInfo> ret(n);
    for(int i = 0; i < n; i++){
        ret[i] = kvInfo[i].peersInfo;
    }
    return ret;
}

vector<kvServerInfo> getKvServerPort(int num){
    vector<kvServerInfo> peers(num);
    for(int i = 0; i < num; i++){
        peers[i].peersInfo.m_peerId = i;
        peers[i].peersInfo.m_port.first = COMMOM_PORT + i;
        peers[i].peersInfo.m_port.second = COMMOM_PORT + i + num;
        for(int j = 0; j < EVERY_SERVER_PORT; j++){
            peers[i].kvPort.emplace_back(COMMOM_PORT + i + (j + 2) * num);
        }
    }
    return peers;
}

bool KVServer::getRaftState(){
    return raft->getState().second;
}

void KVServer::killRaft(){
    raft->kill();
}

void KVServer::activateRaft(){
    raft->activate();
}

int main(){
    vector<kvServerInfo> servers = getKvServerPort(5);
    srand((unsigned)time(NULL));
    KVServer* kv = new KVServer[servers.size()];
    for(int i = 0; i < 5; i++){
        kv[i].StartKvServer(servers, i);
    }

    //--------------------------------------test---------------------------------------------
    sleep(3);
    for(int i = 0; i < 5; i++){
        printf("server%d's key : abc -> value is %s\n", i, kv[i].test("abc").c_str());
    }
    sleep(5);
    int i = 2;
    while(1){
        i = rand() % 5;
        if(!kv[i].getRaftState()){
            kv[i].killRaft();          //先让某个不是leader的raft宕机，不接受leader的appendEntriesRPC，让日志落后于leader的快照状态
            break;
        }
    }
    sleep(3);
    kv[i].activateRaft();              //重新激活对应的raft，在raft层发起installRPC请求，且向对应落后的kvServer安装从raft的leader处获得的快照
    //--------------------------------------test---------------------------------------------
    while(1);
}
