import requests
import json
import logging
from typing import Dict, List, Any, Optional
from config import Config

logger = logging.getLogger(__name__)

class AlibabaCloudLLM:
    def __init__(self, api_key: str):
        self.api_key = api_key
        self.endpoint = Config.ALIBABA_CLOUD_ENDPOINT
        self.headers = {
            'Authorization': f'Bearer {api_key}',
            'Content-Type': 'application/json'
        }
    
    def analyze_sentiment(self, text: str) -> Dict[str, Any]:
        """情感分析"""
        prompt = f"""
请对以下关于新能源汽车的文本进行情感分析，返回JSON格式结果：
文本：{text}

请分析：
1. 情感倾向（正面/负面/中性）
2. 情感强度（0-1之间的数值）
3. 主要观点
4. 关键词提取

返回格式：
{{
    "sentiment_label": "正面/负面/中性",
    "sentiment_score": 0.8,
    "main_points": ["观点1", "观点2"],
    "keywords": ["关键词1", "关键词2"]
}}
"""
        
        try:
            response = self._call_llm(prompt)
            return self._parse_sentiment_response(response)
        except Exception as e:
            logger.error(f"情感分析失败: {e}")
            return self._default_sentiment_result()
    
    def extract_topics(self, articles: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """主题提取"""
        # 合并所有文章内容
        combined_text = " ".join([article.get('content', '') for article in articles[:10]])
        
        prompt = f"""
请分析以下关于新能源汽车的文本，提取主要主题和话题：

文本内容：{combined_text[:2000]}...

请返回JSON格式的主题分析结果：
{{
    "topics": [
        {{
            "topic_name": "主题名称",
            "topic_weight": 0.8,
            "keywords": ["关键词1", "关键词2"],
            "summary": "主题摘要"
        }}
    ]
}}
"""
        
        try:
            response = self._call_llm(prompt)
            return self._parse_topics_response(response)
        except Exception as e:
            logger.error(f"主题提取失败: {e}")
            return []
    
    def analyze_trends(self, articles: List[Dict[str, Any]]) -> Dict[str, Any]:
        """趋势分析"""
        # 按时间分组分析
        recent_articles = articles[-20:]  # 最近20篇文章
        combined_text = " ".join([article.get('content', '') for article in recent_articles])
        
        prompt = f"""
请分析以下关于新能源汽车的文本，进行趋势分析：

文本内容：{combined_text[:2000]}...

请返回JSON格式的趋势分析结果：
{{
    "trend_direction": "上升/下降/稳定",
    "hot_topics": ["热门话题1", "热门话题2"],
    "emerging_themes": ["新兴主题1", "新兴主题2"],
    "sentiment_trend": "积极/消极/中性",
    "key_insights": ["洞察1", "洞察2"]
}}
"""
        
        try:
            response = self._call_llm(prompt)
            return self._parse_trends_response(response)
        except Exception as e:
            logger.error(f"趋势分析失败: {e}")
            return self._default_trends_result()
    
    def generate_summary(self, articles: List[Dict[str, Any]]) -> str:
        """生成舆情摘要"""
        # 选择重要文章
        important_articles = articles[:5]
        combined_text = " ".join([article.get('title', '') + ' ' + article.get('content', '')[:500] 
                                for article in important_articles])
        
        prompt = f"""
请基于以下关于新能源汽车的新闻和讨论，生成一份舆情分析摘要：

内容：{combined_text[:3000]}...

请生成一份简洁的舆情摘要，包括：
1. 当前热点话题
2. 主要观点和态度
3. 发展趋势
4. 关键洞察

摘要长度控制在300字以内。
"""
        
        try:
            response = self._call_llm(prompt)
            return response.strip()
        except Exception as e:
            logger.error(f"生成摘要失败: {e}")
            return "摘要生成失败"
    
    def _call_llm(self, prompt: str) -> str:
        """调用大模型API"""
        data = {
            "model": "qwen-max",
            "input": {
                "messages": [
                    {
                        "role": "user",
                        "content": prompt
                    }
                ]
            },
            "parameters": {
                "temperature": 0.7,
                "max_tokens": 2000
            }
        }
        
        try:
            response = requests.post(
                self.endpoint,
                headers=self.headers,
                json=data,
                timeout=30
            )
            response.raise_for_status()
            
            result = response.json()
            if 'output' in result and 'text' in result['output']:
                return result['output']['text']
            else:
                logger.error(f"API响应格式错误: {result}")
                return ""
                
        except requests.exceptions.RequestException as e:
            logger.error(f"API调用失败: {e}")
            raise
        except Exception as e:
            logger.error(f"处理API响应失败: {e}")
            raise
    
    def _parse_sentiment_response(self, response: str) -> Dict[str, Any]:
        """解析情感分析响应"""
        try:
            # 尝试提取JSON
            json_start = response.find('{')
            json_end = response.rfind('}') + 1
            
            if json_start != -1 and json_end > json_start:
                json_str = response[json_start:json_end]
                result = json.loads(json_str)
                
                # 验证必要字段
                if all(key in result for key in ['sentiment_label', 'sentiment_score']):
                    return result
            
            # 如果JSON解析失败，尝试文本解析
            return self._parse_sentiment_text(response)
            
        except Exception as e:
            logger.error(f"解析情感分析响应失败: {e}")
            return self._default_sentiment_result()
    
    def _parse_sentiment_text(self, text: str) -> Dict[str, Any]:
        """从文本中解析情感分析结果"""
        result = {
            "sentiment_label": "中性",
            "sentiment_score": 0.5,
            "main_points": [],
            "keywords": []
        }
        
        # 简单的情感判断
        positive_words = ['好', '优秀', '推荐', '满意', '喜欢', '支持', '赞']
        negative_words = ['差', '不好', '问题', '失望', '反对', '批评']
        
        text_lower = text.lower()
        positive_count = sum(1 for word in positive_words if word in text_lower)
        negative_count = sum(1 for word in negative_words if word in text_lower)
        
        if positive_count > negative_count:
            result["sentiment_label"] = "正面"
            result["sentiment_score"] = 0.7
        elif negative_count > positive_count:
            result["sentiment_label"] = "负面"
            result["sentiment_score"] = 0.3
        
        return result
    
    def _parse_topics_response(self, response: str) -> List[Dict[str, Any]]:
        """解析主题提取响应"""
        try:
            json_start = response.find('[')
            json_end = response.rfind(']') + 1
            
            if json_start != -1 and json_end > json_start:
                json_str = response[json_start:json_end]
                topics = json.loads(json_str)
                return topics if isinstance(topics, list) else []
            
            return []
            
        except Exception as e:
            logger.error(f"解析主题响应失败: {e}")
            return []
    
    def _parse_trends_response(self, response: str) -> Dict[str, Any]:
        """解析趋势分析响应"""
        try:
            json_start = response.find('{')
            json_end = response.rfind('}') + 1
            
            if json_start != -1 and json_end > json_start:
                json_str = response[json_start:json_end]
                return json.loads(json_str)
            
            return self._default_trends_result()
            
        except Exception as e:
            logger.error(f"解析趋势响应失败: {e}")
            return self._default_trends_result()
    
    def _default_sentiment_result(self) -> Dict[str, Any]:
        """默认情感分析结果"""
        return {
            "sentiment_label": "中性",
            "sentiment_score": 0.5,
            "main_points": [],
            "keywords": []
        }
    
    def _default_trends_result(self) -> Dict[str, Any]:
        """默认趋势分析结果"""
        return {
            "trend_direction": "稳定",
            "hot_topics": [],
            "emerging_themes": [],
            "sentiment_trend": "中性",
            "key_insights": []
        }
