import logging
from typing import List, Dict, Tuple
from datetime import datetime, timedelta
from collections import Counter, defaultdict
import matplotlib.pyplot as plt
import numpy as np
import io
import base64
import json

from app import db
from app.models import Article, SocialMediaPost

class LocationAnalyzer:
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        
        # 中国省份列表
        self.provinces = [
            '北京', '天津', '上海', '重庆', '河北', '山西', '辽宁', '吉林', '黑龙江',
            '江苏', '浙江', '安徽', '福建', '江西', '山东', '河南', '湖北', '湖南',
            '广东', '海南', '四川', '贵州', '云南', '陕西', '甘肃', '青海', '台湾',
            '内蒙古', '广西', '西藏', '宁夏', '新疆', '香港', '澳门'
        ]
        
        # 中国主要城市列表
        self.cities = [
            '北京', '上海', '广州', '深圳', '天津', '重庆', '成都', '杭州', '武汉',
            '西安', '南京', '郑州', '长沙', '沈阳', '青岛', '宁波', '东莞', '无锡',
            '苏州', '昆明', '大连', '福州', '厦门', '哈尔滨', '济南', '温州', '长春',
            '石家庄', '南宁', '合肥', '南昌', '贵阳', '太原', '乌鲁木齐', '昆山', '常州',
            '佛山', '南通', '兰州', '保定', '惠州', '中山', '泉州', '西宁', '银川',
            '呼和浩特', '拉萨', '海口', '汕头', '潍坊', '烟台', '临沂', '唐山', '镇江'
        ]
    
    def extract_locations(self, text: str) -> List[str]:
        """
        从文本中提取地域信息
        
        Args:
            text: 文本内容
            
        Returns:
            地域列表
        """
        locations = []
        
        # 检查省份
        for province in self.provinces:
            if province in text:
                locations.append(province)
        
        # 检查城市
        for city in self.cities:
            if city in text and city not in locations:  # 避免重复
                locations.append(city)
        
        return locations
    
    def analyze_location_distribution(self, days: int = 30) -> Dict:
        """
        分析地域分布
        
        Args:
            days: 时间范围（天）
            
        Returns:
            地域分布数据
        """
        try:
            # 获取指定时间范围内的文章和社交媒体帖子
            start_date = datetime.now() - timedelta(days=days)
            
            # 获取社交媒体帖子的地域信息
            social_posts = SocialMediaPost.query.filter(
                SocialMediaPost.collected_at >= start_date,
                SocialMediaPost.location.isnot(None)
            ).all()
            
            # 统计社交媒体帖子的地域分布
            location_counter = Counter()
            for post in social_posts:
                if post.location:
                    location_counter[post.location] += 1
            
            # 获取文章内容
            articles = Article.query.filter(
                Article.collected_at >= start_date
            ).all()
            
            # 从文章内容中提取地域信息
            for article in articles:
                text = article.title + ' ' + article.content
                locations = self.extract_locations(text)
                for location in locations:
                    location_counter[location] += 1
            
            # 获取前20个地域
            top_locations = location_counter.most_common(20)
            
            # 准备数据
            locations = [loc for loc, _ in top_locations]
            counts = [count for _, count in top_locations]
            
            # 生成图表
            plt.figure(figsize=(12, 8))
            plt.bar(locations, counts)
            plt.title('地域分布')
            plt.xlabel('地域')
            plt.ylabel('数量')
            plt.xticks(rotation=45)
            plt.tight_layout()
            
            # 将图表转换为base64编码的字符串
            buffer = io.BytesIO()
            plt.savefig(buffer, format='png')
            buffer.seek(0)
            image_base64 = base64.b64encode(buffer.getvalue()).decode('utf-8')
            plt.close()
            
            # 准备返回数据
            location_data = {
                'locations': locations,
                'counts': counts,
                'chart_image': image_base64
            }
            
            return location_data
            
        except Exception as e:
            self.logger.error(f"分析地域分布失败: {str(e)}")
            return {}
    
    def analyze_location_sentiment(self, days: int = 30) -> Dict:
        """
        分析地域情感分布
        
        Args:
            days: 时间范围（天）
            
        Returns:
            地域情感分布数据
        """
        try:
            # 获取指定时间范围内的文章和社交媒体帖子
            start_date = datetime.now() - timedelta(days=days)
            
            # 获取社交媒体帖子的地域和情感信息
            social_posts = SocialMediaPost.query.filter(
                SocialMediaPost.collected_at >= start_date,
                SocialMediaPost.location.isnot(None),
                SocialMediaPost.sentiment.isnot(None)
            ).all()
            
            # 统计社交媒体帖子的地域情感分布
            location_sentiment = defaultdict(lambda: {'正面': 0, '负面': 0, '中性': 0})
            for post in social_posts:
                if post.location and post.sentiment:
                    location_sentiment[post.location][post.sentiment] += 1
            
            # 获取文章内容和情感
            articles = Article.query.filter(
                Article.collected_at >= start_date,
                Article.sentiment.isnot(None)
            ).all()
            
            # 从文章内容中提取地域信息并统计情感
            for article in articles:
                text = article.title + ' ' + article.content
                locations = self.extract_locations(text)
                for location in locations:
                    location_sentiment[location][article.sentiment] += 1
            
            # 计算每个地域的总数
            location_totals = {loc: sum(sentiments.values()) for loc, sentiments in location_sentiment.items()}
            
            # 获取前10个地域
            top_locations = sorted(location_totals.items(), key=lambda x: x[1], reverse=True)[:10]
            top_location_names = [loc for loc, _ in top_locations]
            
            # 准备数据
            positive_counts = [location_sentiment[loc]['正面'] for loc in top_location_names]
            negative_counts = [location_sentiment[loc]['负面'] for loc in top_location_names]
            neutral_counts = [location_sentiment[loc]['中性'] for loc in top_location_names]
            
            # 生成图表
            fig, ax = plt.subplots(figsize=(12, 8))
            
            # 创建堆叠条形图
            bar_width = 0.6
            x = np.arange(len(top_location_names))
            
            ax.bar(x, positive_counts, bar_width, label='正面', color='green')
            ax.bar(x, neutral_counts, bar_width, bottom=positive_counts, label='中性', color='blue')
            ax.bar(x, negative_counts, bar_width, bottom=[p+n for p, n in zip(positive_counts, neutral_counts)], label='负面', color='red')
            
            ax.set_title('地域情感分布')
            ax.set_xlabel('地域')
            ax.set_ylabel('数量')
            ax.set_xticks(x)
            ax.set_xticklabels(top_location_names, rotation=45)
            ax.legend()
            
            plt.tight_layout()
            
            # 将图表转换为base64编码的字符串
            buffer = io.BytesIO()
            plt.savefig(buffer, format='png')
            buffer.seek(0)
            image_base64 = base64.b64encode(buffer.getvalue()).decode('utf-8')
            plt.close()
            
            # 准备返回数据
            sentiment_data = {
                'locations': top_location_names,
                'positive_counts': positive_counts,
                'negative_counts': negative_counts,
                'neutral_counts': neutral_counts,
                'chart_image': image_base64
            }
            
            return sentiment_data
            
        except Exception as e:
            self.logger.error(f"分析地域情感分布失败: {str(e)}")
            return {}
    
    def generate_map_data(self, days: int = 30) -> Dict:
        """
        生成地图数据
        
        Args:
            days: 时间范围（天）
            
        Returns:
            地图数据
        """
        try:
            # 获取地域分布数据
            location_data = self.analyze_location_distribution(days)
            
            # 准备地图数据
            map_data = []
            for i, location in enumerate(location_data.get('locations', [])):
                count = location_data.get('counts', [])[i] if i < len(location_data.get('counts', [])) else 0
                map_data.append({
                    'name': location,
                    'value': count
                })
            
            # 生成地图配置
            map_config = {
                'title': {
                    'text': '舆情地域分布',
                    'subtext': f'最近{days}天数据',
                    'left': 'center'
                },
                'tooltip': {
                    'trigger': 'item'
                },
                'visualMap': {
                    'min': 0,
                    'max': max(location_data.get('counts', [10])) if location_data.get('counts') else 10,
                    'text': ['高', '低'],
                    'calculable': True
                },
                'series': [{
                    'name': '舆情数量',
                    'type': 'map',
                    'map': 'china',
                    'roam': True,
                    'label': {
                        'show': True
                    },
                    'data': map_data
                }]
            }
            
            return {
                'map_config': json.dumps(map_config),
                'map_data': map_data
            }
            
        except Exception as e:
            self.logger.error(f"生成地图数据失败: {str(e)}")
            return {} 