import requests
import json
from datetime import datetime
import time
import hashlib
import base64
from uuid import uuid4

# header 构造
def gen_proxy_header(
        appid,
        appKey,
        capabilityname,
    ):
    csid = appid+capabilityname+"0"*(24-len(capabilityname))+str(uuid4()).replace("-", "")
    x_server_param = base64.urlsafe_b64encode(json.dumps({"appid": appid, "csid": csid}).encode())
    curtime = int(time.time())
    checkSum = hashlib.md5(f"{appKey}{str(curtime)}{x_server_param.decode()}".encode('utf-8')).hexdigest()
    headers = {
        "Content-Type": "application/json;charset=UTF-8",
        "X-Server-Param": f"{x_server_param.decode()}",
        "X-CurTime": f"{curtime}",
        "X-CheckSum": f"{checkSum}",
    }
    return headers

def openai_api_streaming(headers, base_url, api_key, model, messages, stream=True,enable_thinking=True):
    temperature=0.5
    max_tokens=50
    headers["Authorization"] = "Bearer " + api_key
    data = {   
        "model": model,
        "messages": messages,
        "max_tokens": max_tokens,
        "temperature": temperature,
        "stream": stream,
        # 注意该参数为推理软开关
        "chat_template_kwargs": {"enable_thinking": enable_thinking}
    }
    st_time = time.time()
    response = requests.post(base_url, headers=headers, json=data, stream=stream, timeout=100)
    if response.status_code != 200:
        raise ValueError(f"Failed to generate response: {response.text}")
    if stream:
        result = []
        print(datetime.now())
        flag = True
        for chunk in response.iter_lines():
            if chunk and chunk[5:]!=b" [DONE]":
                # print(chunk)
                chunk = json.loads(chunk[5:])
                # print(chunk)
                if "choices" in chunk and len(chunk["choices"]) > 0:
                    if flag:
                        print(time.time()-st_time)
                        flag = False
                    choice = chunk["choices"][0]
                    if choice["delta"]:
                        result.append(choice["delta"].get("content", ""))
                        print(result[-1], end="", flush=True)
                if "usage" in chunk:
                    print()
                    print(chunk["usage"])
                    print(time.time()-st_time)
    else:
        # print(response)
        result = json.loads(response.content)['choices'][0]['message']['content']
        print(result)
        return result
    print()
    print(datetime.now())




# openai_api_streaming(
#     headers,
#     base_url=base_url,
#     api_key=api_key, 
#     model=model, 
#     messages=messages,
#     stream=False,
#     enable_thinking=False
#     )
class Qwen3Client:
    def __init__(self, headers, base_url, api_key, model, messages):
        self.headers = headers
        self.base_url = base_url
        self.api_key = api_key
        self.model = model
        self.messages = messages
    def chat(self,messages):
        return openai_api_streaming(
            self.headers,
            base_url=self.base_url,
            api_key=self.api_key, 
            model=self.model, 
            messages=messages,
            stream=False,
            enable_thinking=False
            )
if __name__ == "__main__":
    # 磐智认证参数
    appid = "dsjyybai"
    appKey = "bb9c5100caf0f84e22d88451e94ce2bb"
    capabilityname = "llmm"
    headers = gen_proxy_header(appid, appKey, capabilityname)

    # URL，办公网或呼池使用10.217.247.48，其它资源池请更换对应IP
    base_url = "http://10.217.247.48:9050/llmm-prod/v1/chat/completions"
    api_key = "cs-baac3b82" # "chatbi"
    model = "qwen3-32b-hc"

    messages=[{"role": "user","content": "hello, 介绍以下你自己"}]
    client = Qwen3Client(headers, base_url, api_key, model, messages)
    resp = client.chat(messages)