# !/usr/bin/env python
# -*- coding:utf-8 -*-
# ==================================================================
# [CreatedDate]  : Tuesday, 2023-07-04 09:19:30
# [Author]       : shixiaofeng
# [Descriptions] :
# ==================================================================
# [ChangeLog]:
# [Date]    	[Author]	[Comments]
# ------------------------------------------------------------------

import json
import logging
import os
import re
import sys
import time
import traceback

import init_path
import numpy as np
import torch
import uvicorn
from asgiref.sync import sync_to_async
from cyg_conversation import covert_prompt_to_input_ids_with_history
from fastapi.responses import StreamingResponse
from flagai.auto_model.auto_loader import AutoLoader
# from flagai.data.tokenizer import Tokenizer
from flagai.model.predictor.aquila_server import (
    aquila_generate_by_ids, aquila_generate_by_ids_stream)
from flagai.model.predictor.predictor import Predictor
from transformers import CLIPTextModel
from transformers import AutoTokenizer, CLIPTokenizer

# from router import RouterTokenizer

# NOTE: fork from FlagAI/examples/Aquila/Aquila-server/aquila_server.py

logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
_LogFormat = logging.Formatter(
    "%(asctime)2s -%(name)-12s: %(levelname)-s/Line[%(lineno)d]/Thread[%(thread)d]  - %(message)s")

# create console handler with a higher log level
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(_LogFormat)
logger.addHandler(console)


USE_ROUTER = True
server_port = 9174

# base模型
base_model_dir = "/data/shixiaofeng/checkpoints/multilang_lora/basemodel"
base_model_name = 'aquilachat-7b'

# 分类器模型
router_path = "/data/shixiaofeng/checkpoints/multilang_lora/rounter_v2/lora_classify_model_5.pkl"
router_path = "/data/shixiaofeng/lora_router_model_0.pkl"

# lora模型
lora_adaptor_ch_path = "/data/shixiaofeng/checkpoints/multilang_lora/chi_lora_checkpoint"
lora_adaptor_en_path = "/data/shixiaofeng/checkpoints/multilang_lora/eng_lora_checkpoint"

lora_collections = [["中文", lora_adaptor_ch_path,"cuda:1"], ["英文", lora_adaptor_en_path,"cuda:2"]]

device = f"cuda:0"
# print(f"device is {device}")

# ================= LORA CLASSIFY ===============
class Lora_ClassifyModel(torch.nn.Module):
    def __init__(self):
        super(Lora_ClassifyModel, self).__init__()
        self.tokenizer = RouterTokenizer()
        self.embed_dim = 768
        self.num_class = 2
        self.fc = torch.nn.Linear(self.embed_dim, self.num_class)

    def forward(self, x):
        emb = self.tokenizer.emb(x)
        mean_emb = torch.mean(emb, 1)
        logits = self.fc(mean_emb)
        return logits

class RouterTokenizer(torch.nn.Module):
    def __init__(self):
        super(RouterTokenizer, self).__init__()
        self.text_encoder = CLIPTextModel()
        self.tokenizer = CLIPTokenizer()

    def emb(self, prompt):
        # prompt= 'a small cute dog.'
        input_ids = self.tokenizer(prompt, truncation=True, max_length=77, return_length=True,
                                   return_overflowing_tokens=False, padding="max_length", return_tensors="pt").to(device)
        output = self.text_encoder(input_ids=input_ids['input_ids'])
        embedding = output['last_hidden_state']
        return embedding

    def id_to_label(self, id):
        id2label = {"0": "英文", "1": "中文"}
        return id2label[str(id)]

class Router(object):
    def __init__(self, path):
        self.setup_model(path)

    def setup_model(self, path="/data0/shixiaofeng/lora/lora_classify/lora_classify_model_5.pkl"):
        self.router_model = torch.load(path)
        self.router_model.to(device=device)
        self.router_model.eval()

    def predict(self, inp_text):
        logits = self.router_model(inp_text)
        predicted = torch.max(logits, dim=1)[1]
        lora_type = self.router_model.tokenizer.id_to_label(predicted.tolist()[0])
        return lora_type

    def select_loramodel(self, lora_type):
        if lora_type == "中文":
            lora_model_path = "xxx"
        else:
            lora_model_path = "xxx"
        return lora_model_path


def predict(tokenizer, model, id2word, text,
            max_gen_len=200, top_p=0.95,
            prompts_tokens=[], seed=1234, topk=100,
            temperature=0.9, sft=True):

    prompt = re.sub('\n+', '\n', text)
    if not sft:
        prompts_tokens = tokenizer.encode_plus(prompt)["input_ids"][:-1]

    model_in = tokenizer.decode(prompts_tokens)
    with torch.cuda.device(0):
        with torch.no_grad():
            out, tokens, probs = aquila_generate_by_ids(model=model, tokenizer=tokenizer,
                                                        input_ids=prompts_tokens,
                                                        out_max_length=max_gen_len, top_p=top_p, top_k=topk,
                                                        seed=seed, temperature=temperature, device=device)
    convert_tokens = []
    for t in tokens:
        if t == 100006:
            convert_tokens.append("[CLS]")
        else:
            convert_tokens.append(id2word.get(t, "[unkonwn_token]"))

    if "###" in out:
        special_index = out.index("###")
        out = out[: special_index]
        token_length = len(tokenizer.encode_plus(out)["input_ids"][1:-1])
        convert_tokens = convert_tokens[:token_length]
        probs = probs[:token_length]

    if "[UNK]" in out:
        special_index = out.index("[UNK]")
        out = out[:special_index]
        token_length = len(tokenizer.encode_plus(out)["input_ids"][1:-1])
        convert_tokens = convert_tokens[:token_length]
        probs = probs[:token_length]

    if "</s>" in out:
        special_index = out.index("</s>")
        out = out[: special_index]
        token_length = len(tokenizer.encode_plus(out)["input_ids"][1:-1])
        convert_tokens = convert_tokens[:token_length]
        probs = probs[:token_length]

    if len(out) > 0 and out[0] == " ":
        out = out[1:]

        convert_tokens = convert_tokens[1:]
        probs = probs[1:]

    return out, convert_tokens, probs, model_in


def init_flask():
    from fastapi import FastAPI, Request

    app = FastAPI()

    @app.post("/func")
    async def get_generate_h(request: Request):
        json_post_raw = await request.json()
        config = json.loads(json_post_raw)

        print("request come in")
        text = config["prompt"]
        topp = config.get("top_p", 0.95)
        max_length = config.get("max_new_tokens", 256)
        topk = config.get("top_k_per_token", 1000)
        temperature = config.get("temperature", 0.9)
        sft = config.get("sft", False)
        seed = config.get("seed", 1234)
        history = config.get("history", [])

        if USE_ROUTER:
            lora_type = router.predict(text)
            lora_details = lora_map[lora_type]
            model = lora_details["model"]
            tokenizer = lora_details["tokenizer"]
            id2word = lora_details["id2word"]
            logger.info(f"USE_ROUTER: [{USE_ROUTER}], lora_type is [{lora_type}]")

        tokens = covert_prompt_to_input_ids_with_history(text, history, tokenizer, max_length)

        out, tokens, probs, model_in = await sync_to_async(predict)(tokenizer, model, id2word, text,
                                                                    max_gen_len=max_length, top_p=topp,
                                                                    prompts_tokens=tokens, topk=topk,
                                                                    temperature=temperature, sft=sft, seed=seed)

        result = {
            "completions": [{
                "text": out,
                "tokens": tokens,
                "logprobs": probs,
                "top_logprobs_dicts": [{k: v} for k, v in zip(tokens, probs)],
                "model_in": model_in
            }],
            "input_length": len(config["prompt"]),
            "model_info": base_model_name}

        return result

    @app.post("/stream_func")
    async def get_generate_stream(request: Request):
        json_post_raw = await request.json()
        config = json.loads(json_post_raw)

        contexts = config["prompt"]
        topk = config.get("top_k_per_token", 20)
        topp = config.get("top_p", 0.9)
        t = config.get("temperature", 0.9)
        seed = config.get("seed", 1234)
        history = config.get("history", [])
        max_length = config.get("max_new_tokens", 256)
        gene_time = config.get("time", 15)
        gene_time = 40

        if USE_ROUTER:
            lora_type = router.predict(contexts)
            lora_details = lora_map[lora_type]
            model = lora_details["model"]
            tokenizer = lora_details["tokenizer"]
            logger.info(f"USE_ROUTER: [{USE_ROUTER}], lora_type is [{lora_type}]，input question is [{contexts}]")

        tokens = covert_prompt_to_input_ids_with_history(contexts, history, tokenizer, max_length)

        with torch.no_grad():
            fun = aquila_generate_by_ids_stream(model, tokenizer, tokens,
                                                out_max_length=max_length+len(tokens),
                                                top_k=topk, top_p=topp,
                                                temperature=t,
                                                seed=seed, device=device)

        def trans():
            start_time = time.time()
            while True:
                try:
                    next_token = next(fun)
                    logger.info(f"chatmodel next token is: {next_token}")
                    yield next_token
                except StopIteration:
                    logger.info("get StopIteration")
                    break
                except Exception as e:
                    logger.info(traceback.print_exc())
                    pass
                if time.time() - start_time > gene_time:
                    print("time up")
                    break

        return StreamingResponse(trans(), media_type="text/plain")

    return app



# def test():
#     model = torch.load('/data/shixiaofeng/checkpoints/multilang_lora/rounter_v2/lora_classify_model_5.pkl')
#     with torch.no_grad():
#         model.eval()
#         model.to("cuda:0")
#         inputs = ["可以翻译这段话为中文吗：'hello'"]
#         logits = model(inputs)
#         predicted = torch.max(logits, dim=1)[1]
#         print(predicted)

# test()


print(f"building model...")

ts = time.time()
router = Router(router_path)
te = time.time()

logger.info("Load router time cose is [{}]".format(te-ts))


start_time = time.time()
# NOTE: 这里注意加上device这个变量，否则在cpu中加载，很慢

# 定义lora信息，loratype和对应的adaptor路径
if USE_ROUTER:
    lora_map = {}
    for lora_type, adaptor_path,_device in lora_collections:
        ts = time.time()
        if not os.path.exists(adaptor_path):
            logger.warning("Given adaptor path: [{}] not exists".format(adaptor_path))
            adaptor_path = None
        loader = AutoLoader("lm",
                            model_dir=base_model_dir,
                            model_name=base_model_name,
                            use_cache=True,
                            fp16=True,
                            device=_device,
                            adapter_dir=adaptor_path)

        model = loader.get_model()
        model.eval()
        model.to(_device)
        tokenizer = loader.get_tokenizer()
        vocab = tokenizer.get_vocab()
        id2word = {v: k for k, v in vocab.items()}

        lora_map[lora_type] = {}
        lora_map[lora_type]["model"] = model
        lora_map[lora_type]["tokenizer"] = tokenizer
        lora_map[lora_type]["vocab"] = vocab
        lora_map[lora_type]["id2word"] = id2word
        te = time.time()
        logger.info("Load [{}] lora model time cost is [{}]".format(lora_type, te-ts))
else:
    loader = AutoLoader("lm",
                        model_dir=base_model_dir,
                        model_name=base_model_name,
                        use_cache=True,
                        fp16=True,
                        device=device)

    model = loader.get_model()
    model.eval()
    model.to(device)
    tokenizer = loader.get_tokenizer()
    vocab = tokenizer.get_vocab()
    id2word = {v: k for k, v in vocab.items()}
    te = time.time()
    logger.info("Load Base model time cost is [{}]".format(te-ts))




app = init_flask()
uvicorn.run(app, host='0.0.0.0', port=int(server_port), workers=1)