import logging
from typing import List, Dict, Any, Optional
from datetime import datetime, timedelta
import asyncio
from database import DatabaseManager
from llm_analyzer import AlibabaCloudLLM
from config import Config

logger = logging.getLogger(__name__)

class SentimentAnalyzer:
    def __init__(self, db_manager: DatabaseManager, llm_analyzer: AlibabaCloudLLM):
        self.db = db_manager
        self.llm = llm_analyzer
        
    async def analyze_articles(self, articles: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """分析文章情感"""
        results = []
        
        for article in articles:
            try:
                # 情感分析
                sentiment_result = self.llm.analyze_sentiment(
                    article.get('title', '') + ' ' + article.get('content', '')
                )
                
                # 保存分析结果
                analysis_data = {
                    'article_id': article.get('_id'),
                    'sentiment_label': sentiment_result.get('sentiment_label', '中性'),
                    'sentiment_score': sentiment_result.get('sentiment_score', 0.5),
                    'main_points': sentiment_result.get('main_points', []),
                    'keywords': sentiment_result.get('keywords', []),
                    'analysis_time': datetime.now()
                }
                
                sentiment_id = self.db.save_sentiment_analysis(
                    str(article.get('_id')), 
                    analysis_data
                )
                
                if sentiment_id:
                    results.append({
                        'article_id': str(article.get('_id')),
                        'sentiment_id': sentiment_id,
                        'sentiment_label': analysis_data['sentiment_label'],
                        'sentiment_score': analysis_data['sentiment_score']
                    })
                
                # 避免API调用过于频繁
                await asyncio.sleep(0.5)
                
            except Exception as e:
                logger.error(f"分析文章失败 {article.get('_id')}: {e}")
                continue
        
        return results
    
    def get_sentiment_statistics(self, days: int = 7) -> Dict[str, Any]:
        """获取情感统计"""
        try:
            stats = self.db.get_sentiment_stats(days)
            
            # 计算统计信息
            total_count = sum(stat['count'] for stat in stats)
            sentiment_distribution = {}
            avg_scores = {}
            
            for stat in stats:
                label = stat['_id']
                count = stat['count']
                avg_score = stat.get('avg_score', 0.5)
                
                sentiment_distribution[label] = {
                    'count': count,
                    'percentage': (count / total_count * 100) if total_count > 0 else 0
                }
                avg_scores[label] = avg_score
            
            return {
                'total_articles': total_count,
                'sentiment_distribution': sentiment_distribution,
                'average_scores': avg_scores,
                'analysis_period': f"{days}天"
            }
            
        except Exception as e:
            logger.error(f"获取情感统计失败: {e}")
            return {}
    
    def get_sentiment_trends(self, days: int = 30) -> List[Dict[str, Any]]:
        """获取情感趋势"""
        try:
            trend_data = self.db.get_trend_data(days)
            
            # 处理趋势数据
            processed_trends = []
            for data in trend_data:
                date_info = data['_id']
                processed_trends.append({
                    'date': f"{date_info['year']}-{date_info['month']:02d}-{date_info['day']:02d}",
                    'article_count': data['count'],
                    'avg_sentiment': data['avg_sentiment']
                })
            
            return processed_trends
            
        except Exception as e:
            logger.error(f"获取情感趋势失败: {e}")
            return []
    
    def analyze_topic_sentiment(self, articles: List[Dict[str, Any]]) -> Dict[str, Any]:
        """分析主题情感"""
        try:
            # 提取主题
            topics = self.llm.extract_topics(articles)
            
            # 分析每个主题的情感
            topic_sentiments = []
            for topic in topics:
                topic_name = topic.get('topic_name', '')
                topic_keywords = topic.get('keywords', [])
                
                # 找到相关文章
                related_articles = self._find_related_articles(articles, topic_keywords)
                
                if related_articles:
                    # 分析主题情感
                    topic_sentiment = self._analyze_topic_sentiment(related_articles)
                    topic_sentiments.append({
                        'topic_name': topic_name,
                        'topic_weight': topic.get('topic_weight', 0),
                        'sentiment': topic_sentiment,
                        'article_count': len(related_articles)
                    })
            
            return {
                'topics': topic_sentiments,
                'analysis_time': datetime.now()
            }
            
        except Exception as e:
            logger.error(f"分析主题情感失败: {e}")
            return {}
    
    def _find_related_articles(self, articles: List[Dict[str, Any]], keywords: List[str]) -> List[Dict[str, Any]]:
        """找到相关文章"""
        related = []
        
        for article in articles:
            content = (article.get('title', '') + ' ' + article.get('content', '')).lower()
            
            # 检查是否包含关键词
            keyword_count = sum(1 for keyword in keywords if keyword.lower() in content)
            
            if keyword_count > 0:
                related.append(article)
        
        return related
    
    def _analyze_topic_sentiment(self, articles: List[Dict[str, Any]]) -> Dict[str, Any]:
        """分析主题情感"""
        if not articles:
            return {'sentiment_label': '中性', 'sentiment_score': 0.5}
        
        # 合并文章内容
        combined_text = ' '.join([
            article.get('title', '') + ' ' + article.get('content', '')
            for article in articles
        ])
        
        # 使用LLM分析
        sentiment_result = self.llm.analyze_sentiment(combined_text)
        
        return {
            'sentiment_label': sentiment_result.get('sentiment_label', '中性'),
            'sentiment_score': sentiment_result.get('sentiment_score', 0.5)
        }
    
    def generate_sentiment_report(self, days: int = 7) -> str:
        """生成情感分析报告"""
        try:
            # 获取统计数据
            stats = self.get_sentiment_statistics(days)
            trends = self.get_sentiment_trends(days)
            
            # 获取最近文章
            recent_articles = self.db.get_articles(limit=20)
            
            # 生成报告
            report = f"""
# 新能源汽车舆情情感分析报告

## 分析概览
- 分析周期: {days}天
- 总文章数: {stats.get('total_articles', 0)}
- 分析时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}

## 情感分布
"""
            
            # 添加情感分布
            sentiment_dist = stats.get('sentiment_distribution', {})
            for label, data in sentiment_dist.items():
                report += f"- {label}: {data.get('count', 0)}篇 ({data.get('percentage', 0):.1f}%)\n"
            
            # 添加趋势分析
            if trends:
                report += "\n## 趋势分析\n"
                recent_trends = trends[-7:]  # 最近7天
                for trend in recent_trends:
                    report += f"- {trend['date']}: {trend['article_count']}篇, 平均情感: {trend['avg_sentiment']:.2f}\n"
            
            # 添加LLM生成的摘要
            if recent_articles:
                summary = self.llm.generate_summary(recent_articles)
                report += f"\n## 舆情摘要\n{summary}\n"
            
            return report
            
        except Exception as e:
            logger.error(f"生成情感报告失败: {e}")
            return "报告生成失败"
