# Copyright (c) 2023, Tri Dao.

# To run the huggingface implementation, we first need to convert the weights:
# https://github.com/huggingface/transformers/pull/21955
# python -m transformers.models.llama.convert_llama_weights_to_hf --input_dir $CHECKPOINT_DIR/llama --model_size 7B --output_dir $CHECKPOINT_DIR$/llama/7B-hf
# and repeat for 13B, 30B, 65B

import os
import time
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import random 
import numpy as np 
import torch
import types

from flash_attn.models.gpt import GPTLMHeadModel, combine_state_dicts_tp
from flash_attn.models.llama import remap_state_dict_meta_llama, llama_config_to_gpt2_config
from flash_attn.models.llama import config_from_checkpoint, state_dicts_from_checkpoint
from flash_attn.utils.pretrained import state_dict_from_pretrained
from flash_attn.utils.generation import update_graph_cache
from flagai.data.tokenizer import Tokenizer
import uvicorn, json, datetime
from asgiref.sync import sync_to_async
import requests
import re
from fastapi.responses import StreamingResponse
from conversation_convo_v2 import covert_prompt_to_input_ids, covert_prompt_to_input_ids_with_history
import argparse

# ip = "http://120.92.91.62" # 191 5060-5063  jiuding 7870-7872

parser = argparse.ArgumentParser(
                    prog='ProgramName',
                    description='What the program does',
                    epilog='Text at the bottom of help')

parser.add_argument('--server_port', default=7860, type=int)           # positional argument
parser.add_argument('--gpu_id', default=0, type=int)      # option that takes a value
parser.add_argument('--t', default=0.1, type=float)      # option that takes a value
parser.add_argument('--topp', default=0.6, type=float)      # option that takes a value

args = parser.parse_args()


tokenizer = Tokenizer.from_pretrained("llama-30b-en", 
                                      cache_dir="./gpt2_new_100k/")
vocab = tokenizer.get_vocab()

id2word = {v:k for k, v in vocab.items()}

model_name='7B'
t = args.t 
topp = args.topp
device = f"cuda:{args.gpu_id}"
server_port = args.server_port

model_info = f"Aquila-7b-V3-22n8g-67000-flashattn-t{t}-topp{topp}"

model_path = "./Aquila-7b-V3-22n8g-67000-flashattn/pytorch_model.bin"

checkpoint_path = './llama'
config = llama_config_to_gpt2_config(config_from_checkpoint(checkpoint_path, model_name))
config.vocab_size=100008
config.use_cache = True
config.attn_pdrop = 0.0
config.resid_pdrop = 0.0
config.fused_bias_fc = False
config.use_flash_attn = False
config.fused_mlp = False  # We don't have fused GatedMLP yet
config.fused_dropout_add_ln = False
config.residual_in_fp32 = False
config.layer_norm_epsilon = 1e-5
print(config)
dtype = torch.float16

torch.cuda.set_device(device)

model = GPTLMHeadModel(config, 
                       device=device, 
                       dtype=dtype)

sd = torch.load(model_path, map_location="cpu")#['module']

print(f"正在加载参数")
model.load_state_dict(sd, strict=True)
print(f"参数加载成功")

def set_random_seed(seed):
    """Set random seed for reproducability."""
    if seed is not None and seed > 0:
        random.seed(seed)
        np.random.seed(seed)
        torch.manual_seed(seed)

def make_batch(context, tokenizer):
    input_id = []
    length = []
    max_length = 0
    for item in context:
        tmp = re.sub('\n+', '\n', item)
        tmp = covert_prompt_to_input_ids(tmp, tokenizer)
        max_length = max(len(tmp), max_length)
        length.append(len(tmp))
        input_id.append(tmp)
    
    res = torch.zeros(len(input_id), max_length).int()
    
    for idx, item in enumerate(input_id):
        res[idx][:len(item)] = torch.tensor(item)[None, ]
        
    res = res.to(device)
    
    return res, length

def make_batch_lm(context, tokenizer):
    input_id = []
    length = []
    max_length = 0
    
    for item in context:
        tmp = re.sub('\n+', '\n', item)
        tmp = tokenizer.encode_plus(tmp)["input_ids"][:-1]
        max_length = max(max_length, len(tmp))
        length.append(len(tmp))
        input_id.append(tmp)
    
    res = torch.zeros(len(input_id), max_length).int()
    for idx, item in enumerate(input_id):
        res[idx][:len(item)] = torch.tensor(item)[None, ]
        
    res = res.to(device)
    
    return res, length

def init_flask():
    from fastapi import FastAPI, Request

    app = FastAPI()

    @app.post("/batch_func")
    async def get_generate_batch(request: Request):
        json_post_raw = await request.json()
        configs = json.loads(json_post_raw)
        global topp
        global t
        #import pdb
        print("request come in")
        print(configs)
        prompt = configs["prompt"]
        if type(prompt) == type('s'):
            prompt = [prompt]
        topk= configs.get("top_k_per_token", 100)
        topp = topp
        t = t
        seed = configs.get("seed", 1234)
        sft = configs.get("sft", False)
        max_length=configs['max_new_tokens']
        set_random_seed(seed)
        print(f"开始运算")
        
        if sft:
            input_ids, length = make_batch(prompt, tokenizer)
            input_length = input_ids.size(1)
        
        else:
            input_ids, length = make_batch_lm(prompt, tokenizer)
            input_length = input_ids.size(1)

        with torch.no_grad():
            completions = [{} for _ in range(len(input_ids))]
            model_in = []
            for idx, item in enumerate(input_ids):
                completions[idx]['model_in'] = tokenizer.decode(item[:length[idx]].cpu().numpy())
                
            #model_in = tokenizer.decode(input_ids[0].cpu().numpy())

            if max_length == 0:
                #pdb.set_trace()
                convert_tokens = []
                probs = []
                ## 计算每个token的预测概率，而不需要预测下一个了。
                #print(model(input_ids)[0].shape)
                #print(model(input_ids).shape)
                logits = model(input_ids)[0]
                logits = logits.softmax(dim=-1)
                # print(logits.shape)

                
                for idx, item in enumerate(logits):
                    probs_tmp = []
                    for index in range(1, length[idx]):
                        probs_tmp.append(logits[idx, index-1, input_ids[idx, index].item()].cpu().item())
                    completions[idx]['logprobs'] = [0] + probs_tmp
                    probs.append([0] + probs_tmp)

                print(len(input_ids[0]))
                print(len(probs))


                for idx, item in enumerate(input_ids):
                    convert_tokens_tmp = []
                    tokens_tmp = item[:length[idx]].cpu().numpy().tolist()
                
                    for t in tokens_tmp:
                        if t == 100006:
                            convert_tokens_tmp.append("[CLS]")
                        else :
                            convert_tokens_tmp.append(id2word.get(t, "[unkonwn_token]"))
                    completions[idx]['tokens'] = convert_tokens_tmp
                    convert_tokens.append(convert_tokens_tmp)
                
                top_logprobs_dicts = []
                idx = 0
                for a, b in zip(convert_tokens, probs):
                    tld = [{k: v} for k, v in zip(a, b)]
                    completions[idx]['top_logprobs_dicts'] = tld
                    completions[idx]['text'] = ""
                    completions[idx]['raw_pred'] = ""
                    idx += 1
                    #top_logprobs_dicts.append([{k: v} for k, v in zip(a, b)])
                    
                result = {
                "completions": completions,
                "input_length": input_length,
                "model_info": model_info}


                return result

            out = model.generate(input_ids=input_ids, max_length=input_length + max_length, top_k=topk,top_p=topp, temperature=t,
                            vocab_size=config.vocab_size, fused_ft_kernel=True,
                            return_dict_in_generate=True, output_scores=True, timing=True,
                            eos_token_id=100007)

            print(out.keys())
            #completions = [{} for _ in range(len(input_ids))]
            model_pred = []
            convert_tokens = []
            probs = []
            raw_pred = []
            top_logprobs_dicts = []
            
            idx = 0
            for item, p in zip(out.sequences, out.scores):
                tmp = tokenizer.decode(item.cpu().numpy()[input_length:])
            #model_pred = tokenizer.decode(out.sequences[0].cpu().numpy()[input_length:])

                raw_pred.append(tmp)
                completions[idx]['raw_pred'] = tmp

                convert_tokens_tmp = []
                ids = item.cpu().numpy()
                for t in ids:
                    convert_tokens_tmp.append(id2word.get(t, "[unkonwn_token]"))

                convert_tokens_tmp = convert_tokens_tmp[input_length:]

                #print(item.scores[0])

                probs_tmp = p.cpu().numpy()
                probs_tmp = probs_tmp[input_length: ]
                
                print(convert_tokens_tmp)
                #print(probs)

                if "###" in tmp:
                    special_index = tmp.index("###")
                    tmp = tmp[: special_index]
                    token_length = len(tokenizer.encode_plus(tmp)["input_ids"][1:-1])
                    convert_tokens_tmp = convert_tokens_tmp[:token_length]
                    probs_tmp = probs_tmp[:token_length]

                if "[UNK]" in tmp:
                    special_index = tmp.index("[UNK]")
                    tmp = tmp[:special_index]
                    token_length = len(tokenizer.encode_plus(tmp)["input_ids"][1:-1])
                    convert_tokens_tmp = convert_tokens_tmp[:token_length]
                    probs_tmp = probs_tmp[:token_length]

                if "</s>" in tmp:
                    special_index = tmp.index("</s>")
                    tmp = tmp[: special_index]
                    token_length = len(tokenizer.encode_plus(tmp)["input_ids"][1:-1])
                    convert_tokens_tmp = convert_tokens_tmp[:token_length]
                    probs_tmp = probs_tmp[:token_length]

                if len(tmp) > 0 and tmp[0] == " ":
                    tmp = tmp[1:]

                    convert_tokens_tmp = convert_tokens_tmp[1:]
                    probs_tmp = probs_tmp[1:]
                
                model_pred.append(tmp)
                completions[idx]['text'] = tmp
                convert_tokens.append(convert_tokens_tmp)
                completions[idx]['tokens'] = convert_tokens_tmp
                probs.append(probs_tmp)
                completions[idx]['logprobs'] = probs_tmp
                tld = [{k: v} for k, v in zip(convert_tokens_tmp, probs_tmp)]
                completions[idx]['top_logprobs_dicts'] = tld
                top_logprobs_dicts.append([{k: v} for k, v in zip(convert_tokens_tmp, probs_tmp)])
                idx += 1
            
            result = {
                "completions": completions,
                "input_length": input_length,
                "model_info": model_info}

            return result
 
    return app 

app = init_flask()

uvicorn.run(app, host='0.0.0.0', port=server_port, workers=1)


