from flask import Flask, Response, request
from flask_cors import CORS
from functools import wraps
import json
import time
import warnings

from werkzeug.wrappers import response

# 过滤功能废弃类型警告信息，主要目的是忽略LangChainDeprecationWarning警告信息
warnings.filterwarnings("ignore", category=DeprecationWarning)

from services.agent_service import create_tools, create_llm, create_agent
from langchain.schema import HumanMessage
from langchain_ollama import ChatOllama
from langchain.agents import initialize_agent, AgentType
from langchain.memory import ConversationBufferMemory
from config.settings import OLLAMA_BASE_URL, OLLAMA_MODEL, OLLAMA_TEMPERATURE

app = Flask(__name__)
CORS(app)

def stream_response(func):
    @wraps(func)
    def wrapper(*args, **kwargs):
        try:
            # 在请求上下文中获取数据
            if request.is_json:
                data = request.get_json()
                kwargs['data'] = data
            
            def generate():
                # try:
                    for item in func(*args, **kwargs):
                        if isinstance(item, dict):
                            # 处理字典类型
                            yield json.dumps(item, ensure_ascii=False) + '\n'
                        else:
                            # 处理AIMessageChunk或其他类型
                            response_data = {}
                            if hasattr(item, 'content') and item.content:
                                response_data['response'] = item.content
                            elif hasattr(item, 'tool_calls') and item.tool_calls:
                                # 处理工具调用
                                tool_calls = []
                                for tool_call in item.tool_calls:
                                    tool_calls.append({
                                        'name': tool_call['name'],
                                        'args': tool_call['args']
                                    })
                                response_data['tool_calls'] = tool_calls
                            else:
                                response_data['response'] = str(item)
                            yield json.dumps(response_data, ensure_ascii=False) + '\n'
                # except Exception as e:
                #     yield json.dumps({'error': str(e)}, ensure_ascii=False) + '\n'
                    
            return Response(generate(), mimetype='application/x-ndjson')
        except Exception as e:
            return Response(json.dumps({'error': str(e)}), mimetype='application/json', status=400)
    return wrapper

@app.route('/api/stream', methods=['POST'])
@stream_response
def stream_data(data=None):
    if not data:
        raise ValueError('No data provided')
        
    count = data.get('count', 5)  # 默认生成5条数据
    delay = data.get('delay', 1)  # 默认延迟1秒
    
    for i in range(count):
        time.sleep(delay)  # 模拟处理延迟
        yield {
            'id': i + 1,
            'message': f'这是第{i + 1}条流式数据',
            'timestamp': time.time()
        }

@app.route('/api/chat', methods=['POST'])
@stream_response
def chat(data=None):
    # 测试通过，能实现大模型对话，可以调用工具，一次性输出(会返回工具调用步骤等信息)
    if not data:
        raise ValueError('No data provided')
        
    content = data.get('content')
    session_id = data.get('session_id')
    
    if not content:
        raise ValueError('Content is required')
    # 过滤功能废弃类型警告信息，主要目的是忽略LangChainDeprecationWarning警告信息
    warnings.filterwarnings("ignore", category=DeprecationWarning)
    # 创建工具列表
    tools = create_tools()
    # 创建LLM实例
    llm = create_llm()
    # 创建Agent实例
    agent = create_agent(tools, llm)
    # 创建用户消息并处理
    message = HumanMessage(content=content)

    
    for chunk in agent.stream({"input": message.content}):
        print("chunk", chunk)
        if isinstance(chunk, dict):
            # 处理字典类型的响应
            if "output" in chunk:
                # 直接返回output字段的内容
                yield {"response": str(chunk["output"])}
            elif "messages" in chunk and chunk["messages"]:
                # 处理AIMessage的content
                for msg in chunk["messages"]:
                    content = msg.content
                    # 尝试提取action_input
                    if "action_input" in content:
                        try:
                            # 查找action_input的值
                            start = content.find('"action_input": "') + 16
                            end = content.find('"', start)
                            if start > 15 and end > start:
                                yield {"response": content[start:end]}
                        except Exception:
                            # 如果解析失败，返回完整content
                            yield {"response": content}
            else:
                # 处理其他字典内容，确保可序列化
                response_text = ""
                for key, value in chunk.items():
                    if isinstance(value, (str, int, float, bool, type(None))):
                        response_text += str(value) + " "
                if response_text:
                    yield {"response": response_text.strip()}
        else:
            # 处理非字典类型的响应
            if chunk:
                yield {"response": str(chunk)}

@app.route('/api/stream_chat', methods=['POST'])
@stream_response
def stream_chat(data=None):
    # 测试通过，能实现流式输出，但不能调用工具
    if not data:
        raise ValueError('No data provided')
        
    content = data.get('content')
    # session_id = data.get('session_id')
    
    if not content:
        raise ValueError('Content is required')
    
    # 创建LLM实例
    llm = create_llm()
    
    # 直接使用LLM的stream方法获取流式输出
    for chunk in llm.stream(content):
        print("chunk", chunk)
        # print(chunk, end='', flush=True)
        if chunk:
            yield {"response": chunk}

@app.route('/api/chat_stream', methods=['POST'])
@stream_response
def chat_stream(data=None):
    # 测试失败
    if not data:
        raise ValueError('No data provided')
        
    content = data.get('content')
    session_id = data.get('session_id')
    
    if not content:
        raise ValueError('Content is required')
    
    # 创建工具列表
    tools = create_tools()
    
    # 创建ChatOllama实例，配置流式输出
    chat_model = ChatOllama(
        model=OLLAMA_MODEL,
        base_url=OLLAMA_BASE_URL,
        temperature=OLLAMA_TEMPERATURE,
        streaming=True
    )
    

    async def stream_agent_output(query):
        agent_response = create_agent(tools, chat_model).arun(query)  # 使用 async 方法调用 Agent
        async for response in agent_response:  # 处理流式输出
            print(response.text, end="", flush=True)  # 逐步打印输出结果
        print()  # 换行结束输出

    
    import asyncio
    asyncio.run(stream_agent_output(content))
    # result = chat_model.invoke(content)
    # print("result", result)
    # 直接使用LLM的stream方法获取流式输出
    for chunk in chat_model.stream(content):
        print("chunk", chunk)
        # print(chunk, end='', flush=True)
        if chunk:
            # 处理AIMessageChunk类型
            if hasattr(chunk, 'content'):
                yield {"response": chunk.content}
            else:
                yield {"response": str(chunk)}

    # # 创建会话记忆
    # memory = ConversationBufferMemory(output_key='output')
    
    # # 使用stream_service中的create_stream_chat函数获取流式输出
    # try:
    #     async for response in create_stream_chat(tools, chat_model, content, memory):
    #         print("response", response)
    #         # 确保响应是字典格式
    #         if isinstance(response, dict):
    #             yield response
    #         else:
    #             yield {"response": str(response)}
    # except Exception as e:
    #     # 捕获并处理可能的异常
    #     yield {"error": f"聊天过程发生错误: {str(e)}"}


@app.route('/api/test', methods=['POST'])
@stream_response
def test(data=None):
    # 待测试
    if not data:
        raise ValueError('No data provided')
        
    content = data.get('content')
    session_id = data.get('session_id')
    
    if not content:
        raise ValueError('Content is required')
    # 过滤功能废弃类型警告信息，主要目的是忽略LangChainDeprecationWarning警告信息
    warnings.filterwarnings("ignore", category=DeprecationWarning)
    # 创建工具列表
    tools = create_tools()
    # 创建LLM实例
    chat_model = ChatOllama(
        model=OLLAMA_MODEL,
        base_url=OLLAMA_BASE_URL,
        temperature=OLLAMA_TEMPERATURE
    )

    response =  chat_model.bind_tools(tools).stream(content)
    # print("response", response)
    for chunk in response:
        print("chunk", chunk)
        if chunk:
            yield {"response": chunk}

if __name__ == '__main__':
    app.run(debug=True, port=5000)