import time
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED, FIRST_COMPLETED, FIRST_EXCEPTION, as_completed
import json
import requests
import argparse
import random
import numpy as np

from mindformers.tools.logger import logger
# from research.telechat2.telechat_tokenizer import TelechatTokenizer
from mindformers.models.llama.llama_tokenizer_fast import LlamaTokenizerFast


CompletedProgress = 0

def init_tokenizer(model_path="./tokenizer.model"):
    # tokenizer = TelechatTokenizer(model_path, fast_tokenizer=True, trust_remote_code=True)
    tokenizer = LlamaTokenizerFast(model_path, model_path,
                                   unk_token='<unk>',
                                   bos_token='<｜begin▁of▁sentence｜>',
                                   eos_token='<｜end▁of▁sentence｜>',
                                   fast_tokenizer=True, trust_remote_code=True)    
    return tokenizer

def get_text_token_num(tokenizer, text):
    tokens = tokenizer.tokenize(text)
    num_tokens = len(tokens)
    return num_tokens

class LlmClient:
    def __init__(self, port):
        self.url_mindie = f"http://10.127.18.247:{port}/generate"
        self.url_mindie_chat = f"http://127.18.247:{port}/v1/chat/completions"
        self.headers_mindie = {
            "Accept": "application/json", 
            "Content-type": "application/json"
          }
    
    def send_mindie_request(self, prompt, output):
        FLAG = (random.random() < 0.5)
        REP = random.uniform(1, 10)
        FULL = (random.random() < 0.5)
        #TEMP = random.uniform(1, 10)
        TEMP = random.uniform(0, 10)
        #TOPK = np.random.randint(0, 32000)
        TOPK = np.random.randint(0, 100)
        TOPP = random.random()
        data = {
            "inputs": prompt,
            "parameters": {
                "do_sample": FLAG,
                "max_new_tokens": get_text_token_num(Tokenizer, output),
                "repetition_penalty": REP,
                "return_full_text": FULL,
                #"seed": None,
                "temperature": TEMP,
                "top_k": TOPK,
                #"truncate": None,
                "top_p": TOPP
            },
            "stream": False
        }
        
        time_start = time.time()
        logger.info(f"input: {prompt}")
        res = requests.request("POST", self.url_mindie, data=json.dumps(data), headers=self.headers_mindie)

        res_time = round(time.time() - time_start, 2)
        generate_text = json.loads(res.text).get("generated_text")
        if generate_text == "":
            logger.info("++++++++++ output is null +++++++++++")
            logger.info(f"request: {data}")
            logger.info(f"res.text: {res.text}")
        if res.status_code != 200:
            logger.info(f"============status code is {res.status_code} ================")
            logger.info(f"request: {data}")
            logger.info(f"res.text: {res.text}")
            return
        else:
            logger.info(f"answer: {generate_text}")
        return {"prompt": prompt, 
                "input_token_num": get_text_token_num(Tokenizer, prompt),
                "output": generate_text, 
                "output_token_num": get_text_token_num(Tokenizer, generate_text), 
                "res_time": res_time}


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='test serving performance')
    parser.add_argument("-C", "--concurrency", required=False, default=1, type=int)
    parser.add_argument("-P", "--port", required=False, default=1025)
    parser.add_argument("-S", "--seq_len", required=False, default=8192, type=int)
    parser.add_argument("-V", "--vocab_file", required=False, default="/deepseek3/DeepSeeK-R1-bf16/tokenizer.json", type=str)
    parser.add_argument("-D", "--data", required=False, default="/deepseek3/merge_data.json", type=str)
    args = parser.parse_args()

    Tokenizer = init_tokenizer(args.vocab_file)

    with open(args.data) as f:
        merge_data = json.loads(f.read())
    logger.info(len(merge_data))
    logger.info("merge_data[0]:", merge_data[0])

    client = LlmClient(port=args.port)
    max_workers = args.concurrency
    all_task = []
    time_s = time.time()
    with ThreadPoolExecutor(max_workers=max_workers) as pool:
        output_tokens = 0
        input_tokens = 0
        logger.info("----test start----")
        while True:
            for i, dataset in enumerate(merge_data):
                input_tokens = get_text_token_num(Tokenizer, str(dataset["input"]))
                output_tokens = get_text_token_num(Tokenizer, str(dataset["output"]))
                if (input_tokens + output_tokens) <= args.seq_len and input_tokens <= 4096:
                    all_task.append(pool.submit(client.send_mindie_request, str(dataset["input"]), str(dataset["output"])))

            for future in as_completed(all_task):
                task_result = future.result()
                if task_result:
                    input_tokens += get_text_token_num(Tokenizer, task_result["prompt"])
                    output_tokens += task_result["output_token_num"]

                tmp_time = time.time() - time_s
                if CompletedProgress % 1 == 0:
                    logger.info(f"*************{task_result}***************")
                    logger.info(f"{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))} test Progress --> {CompletedProgress}/{len(merge_data)}")
                    logger.info(f"{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))} run_time: {tmp_time}, input_tokens: {input_tokens}, output_tokens: {output_tokens}")
                    logger.info(f"{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))} TPS is {output_tokens / tmp_time} tokens/s")
                CompletedProgress += 1

        task_time = time.time() - time_s
        logger.info("-----complete-----")
        logger.info(f"{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))} run_time: {task_time}, input_tokens: {input_tokens}, output_tokens: {output_tokens}")
        logger.info(f"{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))} TPS is {output_tokens / task_time} tokens/s")