#!/usr/bin/env python
import sys
from inventory_management.crew import InventoryManagementCrew
import os
import uuid
import time
import json
import asyncio
from contextlib import asynccontextmanager
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse, StreamingResponse
from pydantic import BaseModel, Field
from typing import List, Optional
from inventory_management.tools.database_tool import DatabaseTool
from inventory_management.tools.nlp_tool import NLPTool
from inventory_management.tools.inventory_tool import InventoryTool
from langchain_openai import ChatOpenAI
import uvicorn


# 此主文件旨在让您运行
# crew，因此请避免在此文件中添加必要的逻辑。
# 替换为你想要测试的输入，它会自动
# 插入任何 task 和 agents 信息
def run():
    """
    Run the crew.
    """
    inputs = {
        'topic': 'AI LLMs'
    }
    InventoryManagementCrew().crew().kickoff(inputs=inputs)


def train():
    """
    Train the crew for a given number of iterations.
    """
    inputs = {
        "topic": "AI LLMs"
    }
    try:
        InventoryManagementCrew().crew().train(n_iterations=int(sys.argv[1]), filename=sys.argv[2], inputs=inputs)

    except Exception as e:
        raise Exception(f"An error occurred while training the crew: {e}")


def replay():
    """
    Replay the crew execution from a specific task.
    """
    try:
        InventoryManagementCrew().crew().replay(task_id=sys.argv[1])

    except Exception as e:
        raise Exception(f"An error occurred while replaying the crew: {e}")


# 非gpt大模型相关配置(oneapi方案 通义千问为例)
ONEAPI_API_BASE = "http://localhost:3000/v1"
ONEAPI_CHAT_API_KEY = "sk-M7C40emVuDhbUmwhD560F8A8F4D144Bd974aD5BcE2D24d9d"
ONEAPI_CHAT_MODEL = "ERNIE-Speed-128K"

# 初始化LLM模型
model = None
db_tool = None
nlp_tool = None
inventory_tool = None

# API服务设置
PORT = 8013


class Query(BaseModel):
    text: str


class Message(BaseModel):
    role: str
    content: str


class ChatCompletionRequest(BaseModel):
    messages: List[Message]
    stream: Optional[bool] = False


class ChatCompletionResponseChoice(BaseModel):
    index: int
    message: Message
    finish_reason: Optional[str] = None


class ChatCompletionResponse(BaseModel):
    id: str = Field(default_factory=lambda: f"chatcmpl-{uuid.uuid4().hex}")
    object: str = "chat.completion"
    created: int = Field(default_factory=lambda: int(time.time()))
    choices: List[ChatCompletionResponseChoice]
    system_fingerprint: Optional[str] = None


@asynccontextmanager
async def lifespan(app: FastAPI):
    global model, db_tool, nlp_tool, inventory_tool
    try:
        print("正在初始化服务...")
        model = ChatOpenAI(
            base_url=ONEAPI_API_BASE,
            api_key=ONEAPI_CHAT_API_KEY,
            model=ONEAPI_CHAT_MODEL,
            temperature=0.7,
            request_timeout=60,
            max_retries=2,
        )
        db_tool = DatabaseTool()
        nlp_tool = NLPTool(model)
        inventory_tool = InventoryTool(db_tool, nlp_tool)
        print("服务初始化完成")
    except Exception as e:
        print(f"初始化过程中出错: {str(e)}")
        raise
    yield
    print("正在关闭服务...")


app = FastAPI(title="库存管理系统API", lifespan=lifespan)


@app.post("/v1/chat/completions")
async def chat_completions(request: ChatCompletionRequest):
    if not model:
        raise HTTPException(status_code=500, detail="服务未初始化")
    try:
        query_prompt = request.messages[-1].content
        print(f"用户问题是: {query_prompt}")

        # 创建库存管理团队实例并传入model
        crew = InventoryManagementCrew(model)

        # 设置输入参数
        inputs = {
            "query": query_prompt
        }

        # 执行管理流程
        result = crew.crew().kickoff(inputs=inputs)
        formatted_response = str(result)
        print(f"LLM最终回复结果: {formatted_response}")

        # 处理流式响应
        if request.stream:
            async def generate_stream():
                chunk_id = f"chatcmpl-{uuid.uuid4().hex}"
                lines = formatted_response.split('\n')
                for i, line in enumerate(lines):
                    chunk = {
                        "id": chunk_id,
                        "object": "chat.completion.chunk",
                        "created": int(time.time()),
                        "choices": [
                            {
                                "index": 0,
                                "delta": {"content": line + '\n'},
                                "finish_reason": None
                            }
                        ]
                    }
                    yield f"{json.dumps(chunk)}\n"
                    await asyncio.sleep(0.5)

                final_chunk = {
                    "id": chunk_id,
                    "object": "chat.completion.chunk",
                    "created": int(time.time()),
                    "choices": [
                        {
                            "index": 0,
                            "delta": {},
                            "finish_reason": "stop"
                        }
                    ]
                }
                yield f"{json.dumps(final_chunk)}\n"

            return StreamingResponse(generate_stream(), media_type="text/event-stream")
        else:
            response = ChatCompletionResponse(
                choices=[
                    ChatCompletionResponseChoice(
                        index=0,
                        message=Message(role="assistant", content=formatted_response),
                        finish_reason="stop"
                    )
                ]
            )
            return JSONResponse(content=response.model_dump())

    except Exception as e:
        print(f"处理聊天完成时出错: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/process")
async def process_query(query: Query):
    """处理用户查询"""
    if not model:
        raise HTTPException(status_code=500, detail="服务未初始化")
    try:
        # 创建工具实例
        db_tool = DatabaseTool()
        nlp_tool = NLPTool(model)
        inventory_tool = InventoryTool(db_tool, nlp_tool)

        # 分析意图
        intent_result = nlp_tool.parse_purchase_intent(query.text)
        print(f"意图分析结果: {intent_result}")

        # 根据意图处理请求
        if intent_result.get('success', False):
            # 处理购买请求
            return inventory_tool.process_purchase(query.text)

        elif "补充" in query.text or "进货" in query.text or "补货" in query.text:
            # 处理补货请求
            return inventory_tool.process_restock(query.text)

        elif "支付" in query.text or "付款" in query.text or any(
                method in query.text.lower() for method in ["现金", "信用卡", "支付宝", "微信"]
        ):
            # 处理支付请求
            return inventory_tool.process_payment(query.text)

        elif "库存预警" in query.text or "库存不足" in query.text:
            # 处理库存预警查询
            return inventory_tool.get_low_stock_alerts()

        # 如果都不匹配，返回无法理解的提示
        return {
            "success": False,
            "message": "抱歉，我无法理解您的请求。请尝试重新描述您的需求。",
            "data": None
        }

    except Exception as e:
        print(f"处理查询时出错: {str(e)}")
        return {
            "success": False,
            "message": f"处理请求时出错: {str(e)}",
            "data": None
        }


if __name__ == "__main__":
    print(f"在端口 {PORT} 上启动服务器")
    uvicorn.run(app, host="0.0.0.0", port=PORT)
