"""
AI客户端模块 - 处理ERNIE API的调用
"""

import streamlit as st
import os
import time
import base64
from openai import OpenAI

from modules.prompts import create_system_prompt, create_context_prompt

@st.cache_resource
def init_ernie_client(api_key=None):
    """初始化ERNIE客户端"""
    if not api_key:
        api_key = os.environ.get("AI_STUDIO_API_KEY")
    
    if not api_key:
        return None
    
    try:
        client = OpenAI(
            api_key=api_key,
            base_url="https://aistudio.baidu.com/llm/lmapi/v3",
        )
        return client
    except Exception as e:
        st.error(f"⚠️ 初始化ERNIE客户端失败: {str(e)}")
        return None

def generate_response(client, query, context, image_path=None, artifact_name=None, stream_output=True):
    """使用ERNIE模型生成回答
    
    参数:
        client: ERNIE API客户端
        query: 用户问题
        context: 知识库上下文
        image_path: 可选的图片路径
        artifact_name: 文物名称
        stream_output: 是否在函数内显示流式输出，默认为True
    
    返回:
        生成的回答文本
    """
    if client is None:
        return "ERNIE客户端未初始化，请在侧边栏中设置正确的API密钥"
    
    try:
        system_prompt = create_system_prompt(artifact_name)
        
        messages: list = [
            {'role': 'system', 'content': system_prompt}
        ]
        
        context_prompt = create_context_prompt(query, context)
        
        if image_path:
            try:
                with open(image_path, "rb") as image_file:
                    base64_image = base64.b64encode(image_file.read()).decode('utf-8')
                
                messages.append({
                    "role": "user",
                    "content": [
                        {"type": "text", "text": context_prompt},
                        {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}}
                    ]
                })
            except Exception as e:
                st.warning(f"图像处理出错，将仅使用文本查询: {str(e)}")
                messages.append({"role": "user", "content": context_prompt})
        else:
            messages.append({"role": "user", "content": context_prompt})
        
        if stream_output:
            progress_bar = st.progress(0)
            
            spinner_container = st.empty()
            spinner_container.markdown("""
            <div class="simple-spinner">
                <div class="spinner-circle"></div>
            </div>
            """, unsafe_allow_html=True)
            
            response_container = st.empty()
        
        completion = client.chat.completions.create(
            model="ernie-4.5-turbo-vl-32k",
            messages=messages,
            stream=True,
        )
        
        full_response = ""
        response_chunks = []
        chunk_count = 0
        estimated_total_chunks = 50 
        
        for chunk in completion:
            if len(chunk.choices) > 0 and chunk.choices[0].delta.content:
                content = chunk.choices[0].delta.content
                full_response += content
                
                if stream_output:
                    response_chunks.append(content)
                    
                    chunk_count += 1
                    progress = min(chunk_count / estimated_total_chunks, 1.0)
                    progress_bar.progress(progress)
                    
                    if len(response_chunks) >= 3:
                        response_container.markdown(f'<div class="response-container">{full_response}</div>', unsafe_allow_html=True)
                        response_chunks = []
        
        if stream_output:
            progress_bar.progress(1.0)
            spinner_container.empty()
            response_container.markdown(f'<div class="response-container">{full_response}</div>', unsafe_allow_html=True)
            time.sleep(0.5)
            progress_bar.empty()
        
        return full_response
    except Exception as e:
        error_message = f"⚠️ 生成回答过程出错: {str(e)}"
        if stream_output:
            st.error(error_message)
        return f"抱歉，生成回答时出现了错误: {str(e)}。请稍后再试。" 