
import torch
import os
import argparse
import random
import numpy as np
from pathlib import Path 
import time
import torch.distributed as dist
import json 
import uvicorn, json, datetime
from asgiref.sync import sync_to_async
import requests
import re
from fastapi.responses import StreamingResponse
from megatron.text_generation import generate_and_post_process
from fastapi import FastAPI, Request

# os.environ["CUDA_VISIBLE_DEVICES"] = "0"

def get_tokenizer():
    from megatron.tokenizer.tokenizer import _AquilaTokenizer
    vocab_file = "examples/aquila/tokenizer/vocab.json"
    merge_file = "examples/aquila/tokenizer/merges.txt"

    tokenizer = _AquilaTokenizer(vocab_file, merge_file)
    return tokenizer

tokenizer = get_tokenizer()
vocab = tokenizer.vocab

id2word = {v:k for k, v in vocab.items()}

header = "A chat between a curious user and an artificial intelligence assistant. " \
         "The assistant gives helpful, detailed, and polite answers to the user's questions. "


from server.conversation import get_prompt
def make_sft_prompts(prompts):
    new_prompts = []
    for p in prompts:
        p = get_prompt(p)
        new_prompts.append(p)
    return new_prompts

def predict(model, prompts, seed, max_length, topk, topp, t, sft):
    
    if not isinstance(prompts, list):
        prompts = [prompts,]

    completions = [{} for _ in range(len(prompts))]

    if sft:
        prompts = make_sft_prompts(prompts)

    input_length = max([len(tokenizer.tokenize(prompts[j])) for j in range(len(prompts))])
    try:
        response, _, response_logprobs, _ = \
                        generate_and_post_process(
                        model,
                        prompts=prompts,
                        tokens_to_generate=max_length,
                        return_output_log_probs=True,
                        top_k_sampling=topk,
                        top_p_sampling=topp,
                        top_p_decay=0.0,
                        top_p_bound=0.0,
                        temperature=t,
                        add_BOS=False,
                        use_eod_token_for_early_termination=True,
                        stop_on_double_eol=False,
                        stop_on_eol=False,
                        prevent_newline_after_colon=False,
                        random_seed=seed)
        torch.cuda.empty_cache()

        for i in range(len(prompts)):
            prompt = prompts[i]

            truncation_length = len(prompt)
            response_i = response[i]

            response_logprobs_i = response_logprobs[i]

            if max_length != 0:
                response_i: str = response_i[truncation_length:]

                if response_i.endswith("</s>"):
                    response_logprobs_i = response_logprobs_i[:-1]
                    response_i = response_i.replace("</s>", "")
                # else :
                ids = tokenizer.tokenize(response_i)
                response_logprobs_i = response_logprobs_i[-len(ids):]
                if len(ids) == 0:
                    response_logprobs_i = []

            else :
                ids = tokenizer.tokenize(response_i)
                response_logprobs_i = [0] + response_logprobs_i
            convert_tokens = []
            for t in ids:
                convert_tokens.append(id2word.get(t, "[unk]"))

            print(len(response_logprobs_i), len(convert_tokens))
            
            completions[i]['text'] = response_i
            completions[i]['tokens'] = convert_tokens
            completions[i]['logprobs'] = response_logprobs_i
            tld = [{k: v} for k, v in zip(convert_tokens, response_logprobs_i)]
            completions[i]['top_logprobs_dicts'] = tld
    except:
        print(f"occur a bug, please pay attention to it. return none result.")
        for i in range(len(prompts)):
            completions[i]['text'] = prompts[i]
            ids = tokenizer.tokenize(prompts[i])
            response_logprobs_i = [0.0] * len(ids)
            convert_tokens = []
            for t in ids:
                convert_tokens.append(id2word.get(t, "[unk]"))

            # print(len(response_logprobs_i), len(convert_tokens)) 
            completions[i]['tokens'] = convert_tokens
            completions[i]['logprobs'] = response_logprobs_i
            tld = [{k: v} for k, v in zip(convert_tokens, response_logprobs_i)]
            completions[i]['top_logprobs_dicts'] = tld
            torch.cuda.empty_cache()

    return completions, input_length 

def killport(port):
    '''root authority is required'''
    try:
        command="kill -9 $(netstat -nlp | grep :"+str(port)+" | awk '{print $7}' | awk -F'/' '{{ print $1 }}')"
        os.system(command)
        print(f"killing {port} is succeed.")
    except:
        pass 
        print(f"{port} no need to kill")
    time.sleep(2)

previous_data = ""
flag_hm = False
flag_rc = False
def stop_signal(model_info, request_model_name, engine, prompt, server_port):
    global flag_rc, flag_hm, previous_data
    if model_info == request_model_name and engine == "####Subjective_Inference_Ending####":
        flag_hm = True
    if  model_info == request_model_name and engine == "####eval_end####":
        flag_rc = True
    
    if flag_hm and flag_rc:
        if prompt == previous_data:
            unchanged_count += 1
        else:
            unchanged_count = 0
        print(f"Input unchanged for {unchanged_count} consecutive times.")
        if unchanged_count >= 120:
            print("Ternimating Singal Confirmed.")
            print("Input unchanged for 120 consecutive times. Terminating the server.")
            killport(server_port)
    previous_data = prompt

class UvicornServer:
    def __init__(self, model, server_port, model_info="aquila-33b") -> None:
        self.model = model
        self.server_port = server_port
        self.model_info = model_info

    def init_flask(self):
        from fastapi import FastAPI, Request

        app = FastAPI()

        @app.post("/batch_func")
        async def get_generate_h(request: Request):
            json_post_raw = await request.json()
            config = json.loads(json_post_raw)

            print("request come in")
            contexts = config.get("prompt", "")
            prompts = config.get("prompts", [])

            if len(prompts) != 0:
                contexts = prompts
            
            else:
                assert contexts != ""

            topk= config.get("top_k_per_token", 20)
            # topk = 0
            topp = config.get("top_p", 0.9)
            t = config.get("temperature", 0.9)
            seed = config.get("seed", 123)

            sft = config.get("sft", False)

            ## determine if we need to stop the server
            stop_signal(self.model_info, request_model_name=config.get("model_name", ""), 
                        engine=config.get("engine", ""), prompt=contexts,
                        server_port=self.server_port)

            s = time.time()
            res, input_length = await sync_to_async(predict)(self.model, contexts, seed, max_length=config['max_new_tokens'], topk=topk, topp=topp, t= t, sft = sft)
            e = time.time()
            print(f"computing time is {e - s}")
            print(self.model_info)
            result = {
                "completions": res,
                "input_length":input_length,
                "model_info":self.model_info}

            return result

        return app
    
    def run(self):
        app = self.init_flask()
        uvicorn.run(app, host='0.0.0.0', port=self.server_port, workers=1)


