import gradio as gr
import os
import time
import json
import requests
os.environ["no_proxy"] = "localhost,127.0.0.1"

from inferemote.atlas_remote import AtlasRemote

system_prompt = "You are a helpful and friendly chatbot"

class Sender(AtlasRemote):
    def __init__(self, **kwargs):
        super().__init__(port=4270, **kwargs)

    def pre_process(self, input):
        blob = json.dumps(input).encode('utf-8')
        return blob

    def post_process(self, result):
        # print("DEBUG: post", result)
        result = result[0]
        return result.decode('utf-8')

def build_input_from_chat_history(chat_history, msg: str):
    messages = [{'role': 'system', 'content': system_prompt}]
    for user_msg, ai_msg in chat_history:
        messages.append({'role': 'user', 'content': user_msg})
        messages.append({'role': 'assistant', 'content': ai_msg})
    messages.append({'role': 'user', 'content': msg})
    return messages

def predict(message, history):
    history_transformer_format = history + [[message, ""]]

    # Formatting the input for the model.
    messages = build_input_from_chat_history(history, message)
    sender = Sender()
    # sender.use_remote('192.168.137.30')
    sender.use_remote('192.168.0.10')

    url = sender.inference_remote(messages)
    print("DEBUG url", url)
    # time.sleep(3)
    while True:
        response = requests.get(url, timeout=60)
        print("DEBUG: ", response.status_code)
        if response.status_code == 200:
            data = response.json()
            yield str(data)
        else:
            break
        # time.sleep(1)  # 每秒请求一次
    

# 创建 ChatInterface
gr.ChatInterface(
    fn=predict,  # 使用定义的 predict 函数,
    title="Qwen1.5-0.5b-Chat",  # 标题
    description="问几个问题",  # 描述
    examples=['你是谁？', '介绍一下华为公司'], 
    concurrency_limit = 3 # 示例
).launch(inbrowser = True, server_name = "192.168.0.10")  # 启动 Web 界面
