from dashscope import Generation
from torch.cuda import temperature
from server.utils import BaseResponse
from fastapi import Request
from loguru import logger
from configs.logging_config import configure_logging
import asyncio
import datetime
import pytz
from configs.prompt_config import PROMPT_TEMPLATES
from server.utils import get_prompt_template
from fastapi import FastAPI

configure_logging()

DASHSCOPE_API_KEY = "sk-e68abfde4e7c455b902369dfad28c55f" # 灵萌的阿里云百炼api_key
MODEL_NAME = "qwen-max" # 阿里云百炼模型名
LAWYER_TEMPERATURE = 0.6 # 律师每日报告模型温度
EMOTION_TEMPERATURE = 0.6 # 情感专家每日报告模型温度

app = FastAPI()

@app.get("/chat/generate_lawyer_report")
async def generate_lawyer_report(request: Request = None):
    """生成律师每日新闻简报"""
    prompt = get_prompt_template("llm_chat", "lawyer_report") # 获取律师每日报告提示词模板
    if prompt is None:
        logger.error(f"律师每日报告生成失败 | 获取模板失败")
        return BaseResponse(
            code=500, 
            msg="律师每日报告生成失败 | 获取模板失败", 
            data=None
        )
    elapsed = 0
    start_time = datetime.datetime.now()
    try:
        response = Generation.call(
            model=MODEL_NAME,
            prompt=prompt,
            enable_search=True,
            stream=False,
            api_key=DASHSCOPE_API_KEY,
            temperature=LAWYER_TEMPERATURE
        )
        end_time = datetime.datetime.now()
        elapsed = (end_time - start_time).total_seconds()
        if response.status_code == 200:
            logger.success(f"律师报告生成成功 | 耗时: {elapsed:.2f}s")
            logger.info(response.output.text)
            return BaseResponse(
                code=response.status_code, 
                msg="律师每日报告生成成功...", 
                data=response
            )
        else:
            logger.error(f"律师报告生成失败 | 耗时: {elapsed:.2f}s | 错误: {response.message}")
            return BaseResponse(
                code=response.status_code, 
                msg=f"律师每日报告生成失败：{response.message}", 
                data=response
            )
    except Exception as e:
        logger.error(f"律师每日报告生成报错：{e}")
        return BaseResponse(
            code=500, 
            msg=f"律师每日报告生成报错：{e}", 
            data=None
        )

@app.get("/chat/generate_emotion_expert_report")
async def generate_emotion_expert_report(request: Request = None):
    """生成情感专家每日新闻简报"""
    prompt = get_prompt_template("llm_chat", "emotion_report") # 获取情感专家每日报告提示词模板
    if prompt is None:
        logger.error(f"情感专家每日报告生成失败 | 获取模板失败")
        return BaseResponse(
            code=500, 
            msg="情感专家每日报告生成失败 | 获取模板失败",
            data=None
        )
    elapsed = 0
    start_time = datetime.datetime.now()
    try:
        response = Generation.call(
            model=MODEL_NAME,
            prompt=prompt,
            enable_search=True,
            stream=False,
            api_key=DASHSCOPE_API_KEY,
            temperature=EMOTION_TEMPERATURE
        )
        end_time = datetime.datetime.now()
        elapsed = (end_time - start_time).total_seconds()
        if response.status_code == 200:
            logger.success(f"情感专家报告生成成功 | 耗时: {elapsed:.2f}s")
            logger.info(response.output.text)
            return BaseResponse(
                code=response.status_code, 
                msg="情感专家每日报告生成成功...", 
                data=response
            )
        else:
            logger.error(f"情感专家每日报告生成失败 | 耗时: {elapsed:.2f}s | 错误: {response.message}")
            return BaseResponse(
                code=response.status_code, 
                msg=f"情感专家每日报告生成失败：{response.message}", 
                data=response
            )
    except Exception as e:
        logger.error(f"情感专家每日报告生成报错：{e}")
        return BaseResponse(
            code=500, 
            msg=f"情感专家每日报告生成报错：{e}", 
            data=None
        )

if __name__ == "__main__":
    import uvicorn
    # lawyer_report = asyncio.run(generate_lawyer_report())
    # emotion_report = asyncio.run(generate_emotion_expert_report())
    # print("律师每日报告：\n", lawyer_report)
    # print("\n情感专家每日报告：\n", emotion_report)
    uvicorn.run(app, host="0.0.0.0", port=8000)