from .llm_serve import LLMBase
from transformers import AutoTokenizer
# from vllm import LLM, SamplingParams
import torch
import requests
import json


class VllmModel(LLMBase):
    def __init__(self, model_path='', history=False):
        pass
        # self.model_id_or_path = model_path

        # # Initialize the tokenizer
        # self.tokenizer = AutoTokenizer.from_pretrained(self.model_id_or_path, trust_remote_code=True)

        # # Pass the default decoding hyperparameters of Qwen2-7B-Instruct
        # # max_tokens is for the maximum length for generation.
        # self.sampling_params = SamplingParams(temperature=.1, top_p=.1, top_k=1, repetition_penalty=1.052)

        # # Input the model name or path. Can be GPTQ or AWQ models.
        # # 修改，服务器上没有4张卡，tensor_parallel_size=4 --> tensor_parallel_size=1。
        # # 并且无法百分百现存占用，gpu_memory_utilization=1 --> gpu_memory_utilization=0.98
        # # 原来是4卡，现在是单卡，单卡运行max_mmodel_len报错太短，因此，max_model_len=64 --> max_model_len=1024
        # self.llm = LLM(model=self.model_id_or_path, trust_remote_code=True, dtype=torch.float16, tensor_parallel_size=1,
        #                max_model_len=1024, gpu_memory_utilization=0.98, enforce_eager=True)

        # print(self.sampling_params, flush=True)
        # print('-' * 20, flush=True)
        # print(self.llm.__dict__, flush=True)

    # def chat_with_zhipuapi(self, content):
    #     api_key = "752b64f2028d4c49aa16614638b23a2d.1x13nPJWKUDTelGO"
    #     client = ZhipuAI(api_key=api_key) # 填写您自己的APIKey
    #     response = client.chat.completions.create(
    #         model="glm-4-plus",  # 填写需要调用的模型编码
    #         messages=[
    #             {"role": "user", "content": "农夫需要把狼、羊和白菜都带过河，但每次只能带一样物品，而且狼和羊不能单独相处，羊和白菜也不能单独相处，问农夫该如何过河。"}
    #         ],
    #     )
    #     return response.choices[0].message

    def chat_with_aiearth(self, content):
        url = "https://api-s1.aiearth.dev/v1/chat/completions"
        api_key = "sk-k1pYffA3tew730yv3ZIBTIm4wOzWYWytW8TYOgyMTWSwoe4Y"  # 替换成你的有效API Key

        headers = {
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json",
            "Accept": "text/event-stream",  # 关键：声明接受事件流
            "Cache-Control": "no-cache"
        }

        payload = {
            "max_tokens": 1024,
            "model": "qwen-max-latest",
            "temperature": 0.5,
            "top_p": 1,
            "presence_penalty": 0,
            "frequency_penalty": 0,
            "messages": [
                {
                    "role": "system",
                    "content": "You are a helpful assistant."
                },
                {
                    "role": "user",
                    "content": content
                }
            ],
            "stream": True  # 启用流式响应
        }

        # 发送请求并处理流式响应
        response = requests.post(
            url,
            headers=headers,
            json=payload,
            stream=True  # 关键：保持连接开放
        )

        full_response = ""
        for line in response.iter_lines():
            if line:
                decoded_line = line.decode('utf-8')
                if decoded_line.startswith('data:'):
                    data_str = decoded_line.replace('data: ', '', 1).strip()
                    if data_str == '[DONE]':
                        print("\n[Stream completed]")
                        break
                    
                    try:
                        data = json.loads(data_str)
                        if 'choices' in data:
                            content = data['choices'][0]['delta'].get('content', '')
                            print(content, end='', flush=True)  # 实时打印
                            full_response += content
                    except json.JSONDecodeError:
                        pass

        return full_response

    def chat(self, input, history=[]):
        print(f"Prompt: {input!r}")
        print(f"\nGenerated text: \n")
        # outputs_old = self.llm.generate(input, self.sampling_params)
        outputs = self.chat_with_aiearth(input)
        # print(outputs, flush=True)

        # # Print the outputs.
        # for output in outputs_old:
        #     prompt = output.prompt
        #     generated_text = output.outputs[0].text
        #     print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")

        # response = generated_text if generated_text else ''
        response = outputs if outputs else ''

        return response


if __name__ == '__main__':
    tensor_parallel_size = 2
