import os
import sys
import uuid
import time
import json
import asyncio
from contextlib import asynccontextmanager
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse, StreamingResponse
from pydantic import BaseModel, Field
from typing import List, Optional
from product_recommendation.crew import ProductRecommendationCrew
from langchain_openai import ChatOpenAI
import uvicorn

# 非gpt大模型相关配置(oneapi方案 通义千问为例) 根据自己的实际情况进行调整
ONEAPI_API_BASE = "http://localhost:3000/v1"
ONEAPI_CHAT_API_KEY = "sk-M7C40emVuDhbUmwhD560F8A8F4D144Bd974aD5BcE2D24d9d"
ONEAPI_CHAT_MODEL = "ERNIE-Speed-128K"

# 初始化LLM模型
model = None
# API服务设置
PORT = 8012


class Query(BaseModel):
    text: str


class Message(BaseModel):
    role: str
    content: str


class ChatCompletionRequest(BaseModel):
    messages: List[Message]
    stream: Optional[bool] = False


class ChatCompletionResponseChoice(BaseModel):
    index: int
    message: Message
    finish_reason: Optional[str] = None


class ChatCompletionResponse(BaseModel):
    id: str = Field(default_factory=lambda: f"chatcmpl-{uuid.uuid4().hex}")
    object: str = "chat.completion"
    created: int = Field(default_factory=lambda: int(time.time()))
    choices: List[ChatCompletionResponseChoice]
    system_fingerprint: Optional[str] = None


@asynccontextmanager
async def lifespan(app: FastAPI):
    global model
    try:
        print("正在初始化模型...")
        model = ChatOpenAI(
            base_url=ONEAPI_API_BASE,
            api_key=ONEAPI_CHAT_API_KEY,
            model=ONEAPI_CHAT_MODEL,
            temperature=0.7,
            request_timeout=60,  # 设置60秒超时
            max_retries=2,  # 最多重试2次
        )
        print("LLM初始化完成")
    except Exception as e:
        print(f"初始化过程中出错: {str(e)}")
        raise
    yield
    print("正在关闭...")


app = FastAPI(title="商品推荐助手API", lifespan=lifespan)


@app.post("/v1/chat/completions")
async def chat_completions(request: ChatCompletionRequest):
    if not model:
        raise HTTPException(status_code=500, detail="服务未初始化")
    
    try:
        query_prompt = request.messages[-1].content
        print(f"用户问题是: {query_prompt}")

        # 创建推荐团队实例并传入model
        crew = ProductRecommendationCrew(model)

        # 设置输入参数
        inputs = {
            "query": query_prompt,
            "intent": None
        }

        # 执行推荐流程
        result = crew.crew().kickoff(inputs=inputs)
        formatted_response = str(result)
        print(f"LLM最终回复结果: {formatted_response}")

        # 处理流式响应
        if request.stream:
            async def generate_stream():
                chunk_id = f"chatcmpl-{uuid.uuid4().hex}"
                lines = formatted_response.split('\n')
                for i, line in enumerate(lines):
                    chunk = {
                        "id": chunk_id,
                        "object": "chat.completion.chunk",
                        "created": int(time.time()),
                        "choices": [
                            {
                                "index": 0,
                                "delta": {"content": line + '\n'},
                                "finish_reason": None
                            }
                        ]
                    }
                    yield f"{json.dumps(chunk)}\n"
                    await asyncio.sleep(0.5)

                final_chunk = {
                    "id": chunk_id,
                    "object": "chat.completion.chunk",
                    "created": int(time.time()),
                    "choices": [
                        {
                            "index": 0,
                            "delta": {},
                            "finish_reason": "stop"
                        }
                    ]
                }
                yield f"{json.dumps(final_chunk)}\n"

            return StreamingResponse(generate_stream(), media_type="text/event-stream")
        else:
            response = ChatCompletionResponse(
                choices=[
                    ChatCompletionResponseChoice(
                        index=0,
                        message=Message(role="assistant", content=formatted_response),
                        finish_reason="stop"
                    )
                ]
            )
            return JSONResponse(content=response.model_dump())

    except Exception as e:
        print(f"处理聊天完成时出错: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/recommend")
async def get_recommendation(query: Query):
    """获取商品推荐"""
    if not model:
        raise HTTPException(status_code=500, detail="服务未初始化")
    try:
        # 创建推荐团队实例并传入model
        crew = ProductRecommendationCrew(model)

        # 分析意图
        intent_result = crew.nlp_tool.analyze_intent(query.text)
        print(f"意图分析结果: {intent_result}")

        # 根据意图查询商品
        products = []
        if intent_result["query_type"] == "price_search":
            products = crew.db_tool.get_products_by_price_range(
                intent_result["parameters"]["min_price"],
                intent_result["parameters"]["max_price"]
            )
            # 如果价格范围内没有商品，尝试扩大范围
            if not products:
                expanded_min = max(0, intent_result["parameters"]["min_price"] - 1000)
                expanded_max = intent_result["parameters"]["max_price"] + 1000
                products = crew.db_tool.get_products_by_price_range(expanded_min, expanded_max)

        elif intent_result["query_type"] == "category_search":
            category = intent_result["parameters"]["category"]
            products = crew.db_tool.get_products_by_category(category)

            # 如果是做饭相关的查询，尝试获取更多相关类别的商品
            if "做饭" in query.text or "烹饪" in query.text or "厨艺" in query.text:
                # 获取厨具类商品
                kitchenware = crew.db_tool.get_products_by_category("厨具")
                # 获取食材类商品
                ingredients = crew.db_tool.get_products_by_category("食材")
                # 获取调味料类商品
                seasonings = crew.db_tool.get_products_by_category("调味料")

                # 合并所有相关商品
                all_products = []
                all_products.extend(kitchenware)
                all_products.extend(ingredients)
                all_products.extend(seasonings)

                # 如果找到了相关商品，使用这些商品
                if all_products:
                    products = all_products

            # 如果类别搜索没有结果，尝试关键词搜索
            if not products:
                products = crew.db_tool.search_products(category)

        else:  # keyword_search
            # 先尝试关键词搜索
            products = crew.db_tool.search_products(
                intent_result["parameters"]["keyword"]
            )
            # 如果没有结果，获取一些热门商品
            if not products:
                products = crew.db_tool.get_popular_products(5)

        print(f"找到 {len(products)} 个商品")

        # 生成推荐文案
        recommendation = crew.nlp_tool.generate_recommendation_text(products, query.text)

        return {
            "success": True,
            "data": {
                "intent": intent_result,
                "products": products[:5] if products else [],  # 最多返回5个商品
                "recommendation": recommendation,
                "total_found": len(products)
            }
        }
    except Exception as e:
        print(f"处理推荐请求时出错: {str(e)}")
        return {
            "success": False,
            "error": str(e)
        }


if __name__ == "__main__":
    print(f"在端口 {PORT} 上启动服务器")
    uvicorn.run(app, host="0.0.0.0", port=PORT)


def train():
    """
    Train the crew for a given number of iterations.
    """
    inputs = {
        "topic": "AI LLMs"
    }
    try:
        ProductRecommendationCrew().crew().train(n_iterations=int(sys.argv[1]), filename=sys.argv[2], inputs=inputs)

    except Exception as e:
        raise Exception(f"An error occurred while training the crew: {e}")


def replay():
    """
    Replay the crew execution from a specific task.
    """
    try:
        ProductRecommendationCrew().crew().replay(task_id=sys.argv[1])

    except Exception as e:
        raise Exception(f"An error occurred while replaying the crew: {e}")

# def test():
#     """
#     Test the crew execution and returns the results.
#     """
#     inputs = {
#         "topic": "AI LLMs"
#     }
#     try:
#         ProductRecommendationCrew().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2],
#                                                 inputs=inputs)
#
#     except Exception as e:
#         raise Exception(f"An error occurred while replaying the crew: {e}")
