
from collections import deque
import logging, pickle, socket, torch, gzip, struct, signal, time
from typing import Any, Optional, Union
from feasytools import ArgChecker,time2str

from utils.tools import Scaler
from rolling import _TPredFrontEnd, _TPredBackEnd
from models.wargs import WorkerArguments

class TPredInputPacket:
    @staticmethod
    def loads(buf: bytes) -> 'TPredInputPacket':
        return TPredInputPacket(*pickle.loads(gzip.decompress(buf)))
    
    def __init__(self, tl: deque[int], dt: deque[torch.Tensor]):
        self.tl = tl
        self.dt = dt

    def totuple(self)->'tuple[deque[int], deque[torch.Tensor]]':
        return self.tl, self.dt
    
    def tobytes(self)->bytes:
        return gzip.compress(pickle.dumps((self.tl,self.dt)),5)


class TPredOutputPacket:
    @staticmethod
    def loads(buf: bytes) -> 'TPredOutputPacket':
        return TPredOutputPacket(*pickle.loads(gzip.decompress(buf)))
    
    def __init__(self, tl: list[int], dt: torch.Tensor):
        self.tl = tl
        self.dt = dt

    def totuple(self)->'tuple[list[int], torch.Tensor]':
        return self.tl, self.dt
    
    def tobytes(self)->bytes:
        return gzip.compress(pickle.dumps((self.tl,self.dt)),5)


class TrafficPredictorClient(_TPredFrontEnd):
    def __init__(self, model_pars: 'Union[WorkerArguments, dict[str, Any]]',server_addr:tuple[str, int]):
        super().__init__(model_pars)
        self.__server_addr = server_addr
    
    def predict(self) -> 'tuple[list[int], torch.Tensor]':
        dpack = TPredInputPacket(self._tl, self._dt).tobytes()
        self.__sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        try_times = 0
        connected = False
        while try_times < 10 and not connected:
            try:
                self.__sock.connect(self.__server_addr)
                connected = True
            except ConnectionRefusedError:
                try_times += 1
                time.sleep(1)
        if not connected:
            raise ConnectionRefusedError("Cannot connect to server!")
        self.__sock.sendall(struct.pack("<I",len(dpack)))
        self.__sock.sendall(dpack)
        length = struct.unpack("<I",self.__sock.recv(4))[0]
        ret = self.__sock.recv(length)
        self.__sock.close()
        return TPredOutputPacket.loads(ret).totuple()


class TrafficPredictorServer:
    def __init__(self, model_pars: 'Union[WorkerArguments, dict[str, Any]]', scaler:Optional[Scaler], server_addr:tuple[str, int]):
        self.__pred = _TPredBackEnd(model_pars, scaler)
        self.__sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.__sock.settimeout(1)
        self.__sock.bind(server_addr)
        self.__stop = False
        self.__ok_cnt = 0
        self.__bad_cnt = 0
        self.__err_cnt = 0
    
    def predict(self, tl: deque[int], dt: deque[torch.Tensor]) -> 'tuple[list[int], torch.Tensor]':
        return self.__pred.predict(tl, dt)
    
    def __recv(self, clnt: socket.socket, length:int) -> bytes:
        recv_data = clnt.recv(min(4096,length))
        min_recv_times = length // 4096 + 1
        recv_cnt = 0
        while len(recv_data) < length and recv_cnt < min_recv_times + 20:
            recv_data += clnt.recv(length-len(recv_data))
            recv_cnt += 1
            if recv_cnt > min_recv_times:
                time.sleep(0.001)
        return recv_data
    
    def serve_action(self):
        try:
            clnt, addr = self.__sock.accept()
        except:
            return
        st = time.time()
        length:int = struct.unpack("<I",clnt.recv(4))[0]
        recv_data = self.__recv(clnt, length)
        if len(recv_data) < length:
            clnt.sendall(struct.pack("<I",0))
            rlen = 0
        else:
            data = TPredInputPacket.loads(recv_data)
            result = TPredOutputPacket(*self.predict(data.tl, data.dt)).tobytes()
            rlen = len(result)
            clnt.sendall(struct.pack("<I",len(result)))
            clnt.sendall(result)
        clnt.close()
        msg = f"CLNT: {addr}, I_LEN: {length}, T: {(time.time()-st)*1000:.0f}ms, O_LEN: {rlen}"
        if rlen == 0:
            self.__bad_cnt += 1
            logging.warning(msg)
        else:
            self.__ok_cnt += 1
            logging.info(msg)
    
    def serve_forever(self):
        self.__sock.listen(32)
        last_time = 0
        st_time = time.time()
        while not self.__stop:
            this_time = time.time()
            if this_time - last_time > 1:
                print(f"\rDuration: {time2str(this_time-st_time)}, OK: {self.__ok_cnt}, BAD: {self.__bad_cnt}, ERR: {self.__err_cnt}", end=" ")
                last_time = this_time
            try:
                self.serve_action()
            except Exception as e:
                self.__err_cnt += 1
                logging.exception(e)
    
    def shutdown(self):
        self.__stop = True
    
    def cleanup(self):
        self.__sock.close()


if __name__ == "__main__":
    pars = ArgChecker()
    addr = pars.pop_str("addr","localhost:9999").split(":")
    assert len(addr) == 2
    addr[1] = int(addr[1])
    logging.basicConfig(filename=pars.pop_str('logfile','server.log'), 
        level=logging.INFO, format='%(asctime)s | %(levelname)s | %(message)s')
    wa = WorkerArguments(pars)
    print("Traffic AI Predictor Server - Press Ctrl+C to exit")
    logging.debug("Loading scaler...")
    scaler_path = "./ckpts/"+wa.data+"/scaler.pkl"
    with open(scaler_path,"rb") as f:
        scaler = pickle.load(f)
    logging.debug("Creating server...")
    server = TrafficPredictorServer(wa, scaler, tuple(addr))
    def sigint_handler(signum, frame):
        print("\nShutting down server...")
        server.shutdown()
    signal.signal(signal.SIGINT, sigint_handler)
    logging.info("Server start!")
    server.serve_forever()
    server.cleanup()
    logging.info("Server stop!")
