from openai import OpenAI
import gradio as gr
import re
from functools import lru_cache
import time

# 必须使用gradio版本为3.41.0

#########################################
# 参数配置
max_window_tokens = 11000  # 上下文总token上限
history_rounds = 15        # 历史对话轮次
enable_cache = True        # 启用LRU缓存
enable_async = False       # 启用异步处理
#########################################

# 预编译正则表达式（修复标点符号匹配问题）
TOKEN_PATTERN = re.compile(r"""
    \b\w+\b|          # 匹配完整英文单词
    [\u4e00-\u9fff]|  # 匹配单个汉字
    [^\w\s]           # 匹配标点符号等非空白字符
""", re.VERBOSE | re.UNICODE)

# 创建OpenAI客户端
client = OpenAI(
    api_key="sk-vPd3q8Tknq1vILpSFa82Af27FcEd4e198c49Ce7e35F20a3f",
    base_url="http://localhost:30001/v1/"
)

@lru_cache(maxsize=1024)
def count_tokens_cached(content: str) -> int:
    """带缓存的token计数器（修复标点统计问题）"""
    return len(TOKEN_PATTERN.findall(content))

def smart_truncate(content: str, max_tokens: int) -> str:
    """智能截断函数（保留全部内容）"""
    tokens = TOKEN_PATTERN.findall(content)
    return ''.join(tokens[-max_tokens:]) if len(tokens) > max_tokens else content

def build_context_history(history: list, message: str) -> tuple:
    """构建上下文历史并返回统计信息（修复历史轮次问题）"""
    system_msg = {"role": "system", "content": "你是个靠谱的 AI 助手，尽量详细的解答用户的提问。"}
    work_content = [system_msg]
    tokens_used = 0
    history_tokens = 0

    # 当前消息统计
    current_msg_tokens = count_tokens_cached(message)
    current_msg_length = len(message)

    # 动态计算可用token空间
    available_tokens = max_window_tokens - current_msg_tokens - count_tokens_cached(system_msg["content"])

    # 正序处理历史记录（保留最近history_rounds轮）
    for human, assistant in history[-history_rounds:]:
        human_tokens = count_tokens_cached(human)
        assistant_tokens = count_tokens_cached(assistant)

        if tokens_used + human_tokens + assistant_tokens > available_tokens:
            remaining = available_tokens - tokens_used
            if remaining > 0:
                # 按比例分配剩余token
                human_ratio = human_tokens / (human_tokens + assistant_tokens)
                human_max = int(remaining * human_ratio)
                assistant_max = remaining - human_max

                truncated_human = smart_truncate(human, human_max)
                truncated_assistant = smart_truncate(assistant, assistant_max)
                work_content.extend([
                    {"role": "user", "content": truncated_human},
                    {"role": "assistant", "content": truncated_assistant}
                ])
                tokens_used += count_tokens_cached(truncated_human) + count_tokens_cached(truncated_assistant)
            break

        work_content.extend([
            {"role": "user", "content": human},
            {"role": "assistant", "content": assistant}
        ])
        tokens_used += human_tokens + assistant_tokens
        history_tokens += human_tokens + assistant_tokens

    work_content.append({"role": "user", "content": message})
    total_tokens = tokens_used + current_msg_tokens + count_tokens_cached(system_msg["content"])

    return work_content, {
        "current_msg_length": current_msg_length,
        "current_msg_tokens": current_msg_tokens,
        "history_tokens": history_tokens,
        "total_tokens": total_tokens,
        "remaining_tokens": max_window_tokens - total_tokens
    }

def predict(message, history):
    start_time = time.perf_counter()

    # 构建上下文并获取统计信息
    context_messages, stats = build_context_history(history, message)

    # 格式化输入上下文内容
    formatted_messages = ""
    for msg in context_messages:
        role = msg["role"].upper()
        content = msg["content"]
        formatted_messages += f"[{role}] {content}\n\n"

    # 打印输入上下文
    print("\n" + "=" * 40 + " 输入内容 " + "=" * 40)
    print(formatted_messages.rstrip())  # 使用 rstrip() 去除末尾多余的换行符
    print("=" * 90)

    # 流式请求
    stream = client.chat.completions.create(
        model='gpt-3.5-turbo',
        messages=context_messages,
        temperature=0.1,
        stream=True,
        extra_body={'repetition_penalty': 1, 'stop_token_ids': []}
    )

    # 处理响应
    partial_message = ""
    response_tokens = 0
    response_length = 0
    for chunk in stream:
        if chunk_content := chunk.choices[0].delta.content:
            partial_message += chunk_content
            response_length += len(chunk_content)
            response_tokens += count_tokens_cached(chunk_content)
            yield partial_message

    # 打印大模型的回答
    print("\n" + "=" * 40 + " 大模型回答 " + "=" * 40)
    print(partial_message)
    print("=" * 90 + "\n")

    # 打印统计信息
    print("\n" + "=" * 40 + " 对话统计 " + "=" * 40)
    print(f"当前提问字数: {stats['current_msg_length']} 字")
    print(f"当前提问token数（不含历史）: {stats['current_msg_tokens']}")  # 修复后的token统计
    print(f"上下文携带历史token数: {stats['history_tokens']}")
    print(f"本次实际消耗token数: {stats['total_tokens']}（提问）+ {response_tokens}（回答）= {stats['total_tokens'] + response_tokens}")
    print(f"剩余可用token数: {max(0, max_window_tokens - (stats['total_tokens'] + response_tokens))}")
    print("=" * 90 + "\n")

    # 性能统计
    process_time = time.perf_counter() - start_time
    print(f"总处理时间: {process_time:.2f}s")

# 启动Gradio界面
gr.ChatInterface(predict).queue().launch(
    server_port=30008,
    share=True,
    server_name='0.0.0.0'
)