import os
import requests
import json
from dotenv import load_dotenv
import gradio as gr

# 禁用Gradio的分析请求
os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False'

# 加载 .env 文件中的环境变量
load_dotenv()

# 从环境变量中读取API keys和App IDs
API_KEY = os.getenv("API_KEY")
SECRET_KEY = os.getenv("SECRET_KEY")
XUNFEI_API_KEY = os.getenv("XUNFEI_API_KEY")
XUNFEI_SECRET_KEY = os.getenv("XUNFEI_SECRET_KEY")
QIANWEN_API_KEY = os.getenv("QIANWEN_API_KEY")
QIANWEN_APP_ID = os.getenv("QIANWEN_APP_ID")

print(f"API_KEY: {API_KEY}")
print(f"SECRET_KEY: {SECRET_KEY}")
print(f"XUNFEI_API_KEY: {XUNFEI_API_KEY}")
print(f"XUNFEI_SECRET_KEY: {XUNFEI_SECRET_KEY}")
print(f"QIANWEN_API_KEY: {QIANWEN_API_KEY}")
print(f"QIANWEN_APP_ID: {QIANWEN_APP_ID}")

if not all([API_KEY, SECRET_KEY, XUNFEI_API_KEY, XUNFEI_SECRET_KEY, QIANWEN_API_KEY, QIANWEN_APP_ID]):
    raise EnvironmentError("请在环境变量中设置所有必要的API keys和App IDs。")

# 初始化对话历史
message_history = []


# 获取一次 Access Token
def get_access_token(api_key, secret_key):
    url = "https://aip.baidubce.com/oauth/2.0/token"
    params = {
        'grant_type': 'client_credentials',
        'client_id': api_key,
        'client_secret': secret_key
    }
    response = requests.get(url, params=params)
    if response.status_code == 200:
        token_info = response.json()
        return token_info['access_token']
    else:
        raise Exception(f"Error getting access token: {response.text}")


access_token = get_access_token(API_KEY, SECRET_KEY)
print("Access Token:", access_token)


# 调用文心一言大模型
def call_wenxin_ai(messages, temperature, max_tokens):
    api_url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions_pro"
    headers = {
        "Content-Type": "application/json"
    }
    params = {
        'access_token': access_token
    }
    payload = {
        "messages": messages,
        "temperature": temperature,
        "max_tokens": max_tokens
    }

    try:
        response = requests.post(api_url, params=params, headers=headers, json=payload)
        response.raise_for_status()
        result = response.json()
        print("Wenxin AI response:", result)
        return result.get("result", "No result in response")
    except requests.exceptions.RequestException as e:
        print(f"Wenxin AI API call error: {str(e)}")
        return f"API call failed with error: {str(e)}"

# 调用星火大模型
def call_spark_ai_stream(messages, temperature, max_tokens):
    api_url = "https://spark-api-open.xf-yun.com/v1/chat/completions"
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {XUNFEI_API_KEY}:{XUNFEI_SECRET_KEY}"
    }
    payload = {
        "model": "generalv3.5",
        "messages": messages,
        "temperature": temperature,
        "max_tokens": max_tokens
    }

    try:
        response = requests.post(api_url, headers=headers, json=payload)
        response.raise_for_status()
        result = response.json()
        print("Spark AI response:", result)
        if result.get("choices"):
            return result["choices"][0]["message"]["content"]
        else:
            return "API返回的结果为空，可能是请求参数有误或者服务暂时不可用。"
    except requests.exceptions.RequestException as e:
        print(f"Spark AI API call error: {str(e)}")
        return f"API call failed with error: {str(e)}"

# 调用通义千问大模型
def call_qwen_max_stream(messages, temperature, max_tokens):
    api_url = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation"
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {QIANWEN_API_KEY}",
        "X-DashScope-SSE": "enable"
    }
    payload = {
        "model": "qwen2-72b-instruct",
        "input": {
            "messages": messages
        },
        "parameters": {
            "temperature": temperature,
            "max_tokens": max_tokens
        }
    }

    try:
        # 打印调试信息
        print("Sending request to Qwen AI")
        print("URL:", api_url)
        print("Headers:", headers)
        print("Payload:", json.dumps(payload, indent=4))

        response = requests.post(api_url, json=payload, headers=headers)

        # 打印响应状态码和内容
        print("Response status code:", response.status_code)
        response.encoding = 'utf-8'

        if response.status_code == 200:
            # 解析所有��� SSE 事件
            events = response.text.strip().split("\n\n")
            final_results = []

            for event in events:
                try:
                    for line in event.split("\n"):
                        if line.startswith("data:"):
                            json_data = line[len("data:"):]
                            data = json.loads(json_data)
                            final_results.append(data)
                except json.JSONDecodeError as e:
                    print(f"JSON Decode Error in event: {str(e)}")
                    print("Event data:", event)

                    # 打印所有结果
            print("All parsed results:", json.dumps(final_results, indent=4))

            # 通过检测 "finish_reason" 是否为 "stop" 识别完整的最终答案
            full_text = ""
            for result in final_results:
                if result.get("output", {}).get("finish_reason") == "stop":
                    full_text = result.get("output", {}).get("text", "")
                    break
                else:
                    full_text += result.get("output", {}).get("text", "")

            return full_text
        else:
            print("Non-OK HTTP status code received:", response.status_code, response.text)
            return "Failed with HTTP status: " + str(response.status_code)

    except requests.exceptions.RequestException as e:
        print(f"Qwen AI API call error: {str(e)}")
        return f"API call failed with error: {str(e)}"
    except Exception as e:
        print(f"Unexpected error: {str(e)}")
        return f"An unexpected error occurred: {str(e)}"

# 选择大模型并处理输入文本
def aggregate_models(model, text, temperature, max_tokens):
    global message_history  # 使用全局变量保存对话历史

    # 添加用户的消息到对话历史
    message_history.append({"role": "user", "content": text})

    response = ""
    if model == "wenxin":
        response = call_wenxin_ai(message_history, temperature, max_tokens)
    elif model == "spark":
        response = call_spark_ai_stream(message_history, temperature, max_tokens)
    elif model == "qianwen":
        response = call_qwen_max_stream(message_history, temperature, max_tokens)
    else:
        response = "Unknown model"

        # 添加AI生成的响应到对话历史
    message_history.append({"role": "assistant", "content": response})

    # 返回对话历史中所有消息展示给用户
    return '\n'.join([f"{msg['role']}: {msg['content']}" for msg in message_history])


# Gradio界面
with gr.Blocks() as demo:
    gr.Markdown("# 大模型聚合对话系统")

    model = gr.Dropdown(label="选择模型", choices=["wenxin", "spark", "qianwen"], value="wenxin")
    text = gr.Textbox(label="输入文本", placeholder="请输入你的对话文本")

    # 添加 temperature 和 max_tokens 控件
    temperature = gr.Slider(label="Temperature", minimum=0.0, maximum=1.0, value=0.7, step=0.01)
    max_tokens = gr.Slider(label="Max Tokens", minimum=1, maximum=2000, value=512, step=1)

    submit = gr.Button("提交")

    output = gr.Textbox(label="输出", placeholder="模型的回答将在这里显示")

    submit.click(aggregate_models, inputs=[model, text, temperature, max_tokens], outputs=output)


    # 添加一个按钮来重置对话
    def reset_conversation():
        global message_history
        message_history = []
        return "对话已重置。"


    reset_button = gr.Button("重置对话")
    reset_button.click(reset_conversation, outputs=output)

# 启动 Gradio 界面
demo.launch(share=True)
