import time
import logging
import json
import requests
import uuid
import traceback
import sseclient
import os
import model_config

from transformers import AutoTokenizer

# 加载 BERT 分词器
tokenizer = AutoTokenizer.from_pretrained('./deepseek-r1-tokenizer/')


def get_tokens(text: str):
    tokens = tokenizer.tokenize(text)
    token_ids = tokenizer.convert_tokens_to_ids(tokens)
    return token_ids


def get_tokens_from_messages(messages):
    token_ids = tokenizer.apply_chat_template(messages)
    return token_ids


def modify_list_with_uuid(original_list):
    new_list = []
    for index, item in enumerate(original_list):
        new_item = item.copy()
        # Modify only the first and second items (index 0 and 1)
        if index == 0 or index == 1:
            random_uuid = str(uuid.uuid4())
            new_item["content"] = f"{random_uuid} {new_item['content']}"
        new_list.append(new_item)
    return new_list

class ChatService(object):

    def __init__(self,
                 url,
                 model,
                 api_key='<API_KEY>',
                 wsid="11294",
                 config=None) -> None:
        self.url = url
        self.model = model
        self.headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {api_key}",
            "Wsid": wsid,
        }
        self.wsid = wsid
        self.config = config or {}

    def send_stream(self, messages, output_callback=None):
        if self.wsid == '':
            return self.send_stream_lkeap(messages, output_callback)

    def send_stream_lkeap(self, messages, output_callback=None):
        # prompt增加uuid前缀，避免prefix cache命中
        if int(self.config.get('use_uuid', 0)) == 1:
            messages = modify_list_with_uuid(messages)

        # 流式接口
        enable_stream = True
        start_perf_counter = time.perf_counter()
        query_id = "test_" + str(uuid.uuid4()).replace('-', '', -1)
        json_data = {
            "model": self.model,
            "query_id": query_id,
            "messages": messages,
            "temperature": 0.5,
            "top_p": 0.9,
            "top_k": 40,
            "repetition_penalty": 1,
            "max_tokens": int(self.config.get('max_tokens', 4096)),
            "max_input_seq_len": 64000,
            "stream": enable_stream,
            "random_seed": 1234,
        }

        response = requests.post(self.url, headers=self.headers, json=json_data, stream=enable_stream)
        prompt_text = json.dumps(messages, ensure_ascii=False)
        logging.info(f"## request: url {self.url} {self.model} response.status_code {response.status_code}")
        logging.info(f"## prompt_text length: {len(prompt_text)}, prompt_text[0:300] : {prompt_text[:300]}")
        if response.status_code != 200:
            raise Exception(f'status_code: {response.status_code}')
        # print(response)
        content = ''
        index = 0
        token_info = {}
        # 若直接压sglang服务,没有返回token数的情况,可以自行执行一下tokenizer用于统计
        token_info['prompt_tokens'] = len(get_tokens_from_messages(messages))
        id_ = None
        first_time = 0
        status = 'init'
        client = sseclient.SSEClient(response)
        for event in client.events():
            if index == 0:
                first_time = (time.perf_counter() - start_perf_counter) * 1000
            # print(chunk)
            index += 1
            if '[DONE]' in event.data:
                break
            chunk = json.loads(event.data)
            try:
                # print(chunk)
                if chunk['object'] == 'stream_server.event':
                    chunk_event = chunk.get('event')
                    if chunk_event.get('name', '') == 'thinking' and chunk_event.get('state', -1) == 0:
                        delta = '<think>'
                        content += delta
                        if output_callback:
                            output_callback(delta)
                    elif chunk_event.get('name', '') == 'thinking' and chunk_event.get('state', -1) == 2:
                        delta = '</think>'
                        content += delta
                        if output_callback:
                            output_callback(delta)
                    continue
                elif chunk['object'] == 'chat.completion.chunk':
                    delta = chunk['choices'][0]['delta']
                    reasoning_content = delta.get('reasoning_content') or ''
                    output_content = delta.get('content') or ''
                    if reasoning_content != '' and status == 'init':
                        content += '<think>'
                        status = 'thinking'
                    if reasoning_content != '':
                        content += reasoning_content
                    if output_content != '' and status == 'thinking':
                        content += '</think>'
                        status = 'output'
                    if output_content != '':
                        content += output_content
                    # print(content)
                    if output_callback:
                        output_callback(content)
                    if chunk.get('usage'):
                        token_info['completion_tokens'] = chunk['usage']['completion_tokens']
            except Exception as e:
                print(f'error: {e}, {traceback.format_exc()}')
                # print(chunk)
        # 若直接压sglang服务,没有返回token数的情况,可以自行执行一下tokenizer用于统计
        token_info['completion_tokens'] = len(get_tokens(content))
        all_time = (time.perf_counter() - start_perf_counter) * 1000
        token_time = all_time - first_time
        token_info['token_per_second'] = token_info['completion_tokens'] / token_time * 1000 if token_time > 0 else None
        logging.info(
            "id: %s, message: %s, content: %s, token_info:%s , first_time: %s, all_time: %s, token_per_second: %s" % (
            id, messages, content, token_info, first_time, all_time, token_info['token_per_second']))
        if token_info['completion_tokens'] == 0:
            raise Exception("completion_tokens is 0!")
        return content, first_time, all_time, token_info['prompt_tokens'], token_info['completion_tokens'], query_id, token_info['token_per_second']

    def send_oflline(self, messages):
        # 流式接口
        completion = self.client.chat.completions.create(
            model=self.model,
            messages=messages
        )

        print(completion)


if __name__ == "__main__":

    model_name = model_config.model_name
    url = model_config.url
    api_key = model_config.api_key
    # service = ChatService(url=url, model=model_name, api_key=api_key, wsid='11294')
    service = ChatService(url=url, model=model_name, api_key=api_key, wsid='')


    def output_callback(content):
        print(content)


    with open('yuanbao_eval-3k.ndjson', 'r', encoding='utf8') as f_mix:
        for line in f_mix.readlines():
            line_json = json.loads(line)
            messages = line_json['messages']
            rsp = service.send_stream(messages=messages, output_callback=output_callback)
            print('')
            line_json['content'] = rsp[0]
            line_json['first_token_ms'] = rsp[1]
            line_json['total_ms'] = rsp[2]
            line_json['prompt_tokens'] = rsp[3]
            line_json['completion_tokens'] = rsp[4]
            line_json['query_id'] = rsp[5]
            print(line_json)
