#!/usr/bin/env python
# -*- coding: utf-8 -*-
# time: 2023/11/3 14:18
# file: api.py
# author: Yingxiao Zhang

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# time: 2023-7-11 16:18
# file: streamApi.py
# author: Yingxiao Zhang
from fastapi import FastAPI, Request
from sse_starlette.sse import ServerSentEvent, EventSourceResponse
from fastapi.responses import StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
import torch
from transformers import AutoTokenizer, AutoModel
import argparse
import logging
import os
import json
import sys
import datetime
import time


def getLogger(name, file_name, use_formatter=True):
    logger = logging.getLogger(name)
    logger.setLevel(logging.INFO)
    console_handler = logging.StreamHandler(sys.stdout)
    formatter = logging.Formatter('%(asctime)s    %(message)s')
    console_handler.setFormatter(formatter)
    console_handler.setLevel(logging.INFO)
    logger.addHandler(console_handler)
    if file_name:
        handler = logging.FileHandler(file_name, encoding='utf8')
        handler.setLevel(logging.INFO)
        if use_formatter:
            formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')
            handler.setFormatter(formatter)
        logger.addHandler(handler)
    return logger

date = datetime.datetime.now().strftime('%Y-%m-%d')
logger = getLogger('streamApi', f'/opt/log/streamApi_{date}.log')

MAX_HISTORY = 5


class ChatGLM():
    def __init__(self, quantize_level, gpu_id) -> None:
        logger.info("Start initialize model...")
        self.tokenizer = AutoTokenizer.from_pretrained(
            "/opt/chat-glm2-lora", trust_remote_code=True)
        self.model = self._model(quantize_level, gpu_id)
        # 将huggingface模型转换成fastllm模型
        # 目前from_hf接口只能接受原始模型，或者ChatGLM的int4, int8量化模型，暂时不能转换其它量化模型
        from fastllm_pytools import llm
        self.model = llm.from_hf(self.model, self.tokenizer, dtype="int4")  # dtype支持 "float16", "int8", "int4"
        # self.model.eval()
        _, _ = self.model.chat(self.tokenizer, "你好", history=[])
        logger.info("Model initialization finished.")

    def _model(self, quantize_level, gpu_id):
        model_name = "/opt/chat-glm2-lora"
        quantize = int(args.quantize)
        model = AutoModel.from_pretrained(model_name, trust_remote_code=True).quantize(quantize)
        return model

    def clear(self) -> None:
        if torch.cuda.is_available():
            for device in self.devices:
                with torch.cuda.device(device):
                    torch.cuda.empty_cache()
                    torch.cuda.ipc_collect()

    def answer(self, query: str, history, max_length):
        response, history = self.model.chat(self.tokenizer, query, history=history, max_length=max_length)
        history = [list(h) for h in history]
        return response, history

    def stream(self, query, history, max_length):
        if query is None or history is None:
            yield {"query": "", "delta": "", "response": "", "history": [], "finished": True}
        size = 0
        response = ""
        start = time.time()
        for response, history in self.model.stream_chat(self.tokenizer, query, history, max_length=max_length):
            this_response = response[size:]
            history = [list(h) for h in history]
            size = len(response)
            yield {"query": query, "delta": this_response, "response": response, "finished": False}
        logger.info("Query - {}".format(query))
        if len(history) > 0:
            logger.info("History - {}".format(history))
        tok_res = self.tokenizer.tokenize(response)
        logger.info("stream speed: {}token/s".format(len(tok_res) / (time.time() - start)))
        logger.info("Answer - {}".format(response))
        yield {"query": query, "delta": "[EOS]", "response": response, "history": history, "finished": True}


def start_server(quantize_level, http_address: str, port: int, gpu_id: str):
    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id

    bot = ChatGLM(quantize_level, gpu_id)

    app = FastAPI()
    app.add_middleware(CORSMiddleware,
                       allow_origins=["*"],
                       allow_credentials=True,
                       allow_methods=["*"],
                       allow_headers=["*"]
                       )

    @app.post("/")
    def index():
        return {'message': 'started', 'success': True}

    @app.post("/chat")
    def answer_question(arg_dict: dict):
        result = {"query": "", "response": "","history":[], "success": False}
        try:
            text = arg_dict["query"]
            ori_history = arg_dict["history"]
            history = ori_history[-MAX_HISTORY:]
            max_length = 2048
            if len(text) > max_length:
                text = text[-max_length:]
                history = []
            else:
                his_len = sum([len(item[0])+len(item[1]) for item in history])+len(text) if history != [] else 0
                while(his_len>max_length):
                    history.pop()
                    his_len = sum([len(item[0])+len(item[1]) for item in history])+len(text) if history != [] else 0
            history = [tuple(h) for h in history]
            start = time.time()
            response, history = bot.answer(text, history, max_length)
            tok_res = bot.tokenizer.tokenize(response)
            logger.info("chat speed: {}token/s".format(len(tok_res) / (time.time() - start)))
            logger.info("Query - {}".format(text))
            if len(ori_history) > 0:
                logger.info("History - {}".format(ori_history))
            logger.info("Answer - {}".format(response))
            ori_history.append((text, response))
            result = {"query": text, "response": response,
                      "history": ori_history, "success": True}
        except Exception as e:
            logger.error(f"error: {e}")
        return result

    @app.post("/stream")
    def answer_question_stream(arg_dict: dict):
        def decorate(generator):
            for item in generator:
                yield ServerSentEvent(json.dumps(item, ensure_ascii=False), event='delta')

        max_length = 2048
        try:
            text = arg_dict["query"]
            ori_history = arg_dict["history"]
            history = ori_history[-MAX_HISTORY:]

            if len(text) > max_length:
                text = text[-max_length:]
                history = []
            else:
                his_len = sum([len(item[0]) + len(item[1]) for item in history]) + len(text) if history != [] else 0
                while (his_len > max_length):
                    history.pop()
                    his_len = sum([len(item[0]) + len(item[1]) for item in history]) + len(text) if history != [] else 0
            history = [tuple(h) for h in history]
            return EventSourceResponse(decorate(bot.stream(text, history, max_length)))
        except Exception as e:
            logger.error(f"error: {e}")
            return EventSourceResponse(decorate(bot.stream(None, None, max_length)))

    @app.get("/clear")
    def clear():
        history = []
        try:
            bot.clear()
            return {"success": True}
        except Exception as e:
            return {"success": False}

    logger.info("starting server...")
    uvicorn.run(app=app, host=http_address, port=port, workers=1)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='API Service')
    parser.add_argument('--device', '-d', help='device，-1 means cpu, other means gpu ids', default='0')
    parser.add_argument('--quantize', '-q', help='level of quantize, option：16, 8 or 4', default=4)
    parser.add_argument('--host', '-H', help='host to listen', default='0.0.0.0')
    parser.add_argument('--port', '-P', help='port of this service', default=8800)
    args = parser.parse_args()
    start_server(args.quantize, args.host, int(args.port), args.device)
