# -*- coding:utf-8 -*-
import requests
from pprint import pprint
from threading import Lock
import json
response = requests.get('http://10.31.3.154:1025/v1/models')
data = response.json()
model_name = data['data'][0]['id'] if data['data'] else None

class model_infer:
    def __init__(self):
        self.state = {"code":200, "isEnd":False, "message":""}
        self.init_client()
        self.lock = Lock()
        self.memory_list = [{'role': 'system', 'content': 'In this audio, what kind of sound can you hear?'}]

    def init_client(self):
        self.url = "http://10.31.3.154:1025/v1/chat/completions"
        self.headers = {"Content-Type": "application/json"}

    def getState(self,):
        with self.lock:
            return self.state.copy()

    def predict_stream(self, text):
        '''TODO'''
        with self.lock:
            self.state['isEnd'], self.state['message'] = False, ""
        if text == "":
            return

        # self.memory_list.append(
        #     {"type": "text", "text": "My name is Olivier and I"},
        #     {"type": "image_url", "image_url": "/xxxx/test.png"}
        # )
        self.message = [{
        "role": "user",
        "content": [
           {"type": "text", "text": "In this audio, what kind of sound can you hear?"},
           {"type": "audio_url", "audio_url": "/home/orangepi/audio/sigh.wav"}
        ]
    }]

        data = {
            "model": model_name,
            "messages": self.message,
            "stream": True,
            "presence_penalty": 1.03,
            "frequency_penalty": 1.0,
            "repetition_penalty": 1.0,
            "temperature": 0.5,
            "top_p": 0.95,
            "top_k": -1,
            "seed": 1,
            "stop": ["stop1", "stop2"],
            "stop_token_ids": [2],
            "ignore_eos": False,
            "max_tokens": 1024,
            "tool_choice": "none",
        }
        print(data)
        response = requests.post(self.url, 
                                 headers={"Content-Type": "application/json",
                                                    "Cache-Control":"no-cache",
                                                    "Connection":"keep-alive"}, 
                                data=json.dumps(data),stream=True)
        
        full_s = ""
        print('Answer:',end="")
        for s in response.iter_lines():
            if s not in [b'',b'data: [DONE]']:
                token = dict(json.loads(s.decode().strip("data:").strip()))
                # print("============================")
                # print(token)
                temp_s = token['choices'][0]['delta']['content']
                full_s += temp_s
                print(temp_s,end="")
                with self.lock:
                    self.state['message'] = full_s
        print("")
        self.state['isEnd'] = True
        
        # new_message = {
        #     'role': 'assistant',
        #     'content': full_s
        # }
        # self.memory_list.append(new_message)


if __name__ == "__main__":
    infer_engine = model_infer()
    while True:
        line = input("Question:")
        infer_engine.predict_stream(line) #流式
        #print(f"Answer:{infer_engine.predict(line)}") #非流式
