import json
import os
from datetime import datetime
import random
from typing import Dict, Any, List, Tuple, Optional
from openai import AsyncOpenAI
from core.config import settings

class AIService:
    def __init__(self):
        # 加载提示词模板
        prompt_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "core", "prompts.json")
        with open(prompt_path, "r", encoding="utf-8") as f:
            self.prompts = json.load(f)
        
        # 初始化OpenAI异步客户端
        self.client = AsyncOpenAI(
            api_key=settings.TONGYI_API_KEY,
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )
        self.model = "qwen-max-latest"
        self.extra_body={
        "enable_search": False
    }
    

    
    def _format_datetime(self) -> str:
        return datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
    
    def _prepare_reference_part(self, reference_title: Optional[str], reference_content: Optional[str]) -> str:
        if reference_title and reference_content:
            return f"参考风格标题：{reference_title}\n参考风格正文：{reference_content}"
        return ""
    
    async def _call_ai_model(self, system_prompt: str, user_prompt: str, response_format=None) -> Tuple[str, int]:
        """异步调用AI大模型并返回结果和token数量"""
        try:
            params = {
                "model": self.model,
                "messages": [
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": user_prompt}
                ],
                "extra_body":self.extra_body
            }
            print(f"输入内容：{params}")
            if response_format:
                params["response_format"] = response_format
                
            completion = await self.client.chat.completions.create(**params)
            content = completion.choices[0].message.content
            total_tokens = completion.usage.total_tokens
            print(f"输出内容:{content}")
            return content, total_tokens
        except Exception as e:
            # 记录错误并返回备用响应
            print(f"AI模型调用失败: {str(e)}")
       
            return f"AI模型调用失败: {str(e)}", 0
    
    async def generate_content(self, type_: str, platform: str, description: str, 
                         reference_title: Optional[str] = None, 
                         reference_content: Optional[str] = None) -> Tuple[str, str, List[str], int]:
        # 准备提示词
        system_prompt = self.prompts["content"]["generate"]["system"]
        reference_part = self._prepare_reference_part(reference_title, reference_content)
        user_prompt = self.prompts["content"]["generate"]["user_template"].format(
            platform=platform,
            type=type_,
            description=description,
            reference_part=reference_part
        )
        
        # 调用AI模型，要求返回JSON格式
        result, total_tokens = await self._call_ai_model(system_prompt, user_prompt, {"type": "json_object"})
        
        try:
            # 解析JSON响应
            response_data = json.loads(result)
            title = response_data.get("title", "")
            content = response_data.get("content", "")
            hashtag = response_data.get("hashtag", [])  # 获取 hashtag
            
            return title, content, hashtag, total_tokens
        except json.JSONDecodeError:
            print("解析json失败")
            lines = result.split('\n')
            title = lines[0] if lines else ""
            content = '\n'.join(lines[1:]) if len(lines) > 1 else result
            hashtag = []  # 解析失败时返回空列表
            
            return title, content, hashtag, total_tokens
    
    async def continuation_content(self, original_content: str, description: str,
                            type_: Optional[str] = None, platform: Optional[str] = None, 
                            reference_title: Optional[str] = None, 
                            reference_content: Optional[str] = None) -> Tuple[str, int]:
        # 准备提示词
        system_prompt = self.prompts["content"]["continuation"]["system"]
        reference_part = self._prepare_reference_part(reference_title, reference_content)
        
        # 如果type和platform为None，使用默认值或者不包含这些字段
        type_str = f"的{type_}类型" if type_ else ""
        platform_str = f"{platform}平台" if platform else ""
        
        user_prompt = self.prompts["content"]["continuation"]["user_template"].format(
            original_content=original_content,
            platform=platform_str,
            type=type_str,
            description=description,
            reference_part=reference_part
        )
        
        # 异步调用AI模型，不使用JSON格式
        return await self._call_ai_model(system_prompt, user_prompt, None)
    
    async def adaptation_content(self, original_content: str, content_to_be_adapted: str, description: str,
                          type_: Optional[str] = None, platform: Optional[str] = None,
                          reference_title: Optional[str] = None, 
                          reference_content: Optional[str] = None) -> Tuple[str, int]:
        # 准备提示词
        system_prompt = self.prompts["content"]["adaptation"]["system"]
        reference_part = self._prepare_reference_part(reference_title, reference_content)
        
        # 如果type和platform为None，使用默认值或者不包含这些字段
        type_str = f"的{type_}类型" if type_ else ""
        platform_str = f"{platform}平台" if platform else ""
        
        user_prompt = self.prompts["content"]["adaptation"]["user_template"].format(
            original_content=original_content,
            content_to_be_adapted=content_to_be_adapted,
            platform=platform_str,
            type=type_str,
            description=description,
            reference_part=reference_part
        )
        
        # 异步调用AI模型，不使用JSON格式
        return await self._call_ai_model(system_prompt, user_prompt, None)
    
    async def assess_content(self, type_: str, platform: str, content: str) -> Tuple[bool, List[str], str]:
        # 准备提示词
        system_prompt = self.prompts["content"]["assessment"]["system"]
        user_prompt = self.prompts["content"]["assessment"]["user_template"].format(
            platform=platform,
            type=type_,
            content=content
        )
        
        try:
            # 异步调用AI模型，使用JSON格式
            result, _ = await self._call_ai_model(system_prompt, user_prompt, {"type": "json_object"})
            
            # 解析JSON响应
            response_data = json.loads(result)
            
            is_recommend = response_data.get("is_recommend", False)
            tags = response_data.get("tags", [])
            recommend_reason = response_data.get("recommend_reason", "")
            
            return is_recommend, tags, recommend_reason
            
        except Exception as e:
            print(f"AI模型评估调用失败: {str(e)}")
            # 返回默认值
            return False, [], f"评估失败: {str(e)}"