#!/usr/bin/env python3
"""
大众点评爬虫 P4 版本 - 数据管理器
负责数据去重、存储和质量管理
"""

import csv
import logging
import pandas as pd
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Set, Optional, Any, Tuple
import hashlib
import json

logger = logging.getLogger(__name__)

class DataManager:
    """数据管理器 - 负责数据去重和存储"""
    
    def __init__(self, output_file: str = "dianping_data_p4.csv", data_dir: str = "data"):
        self.data_dir = Path(data_dir)
        self.data_dir.mkdir(exist_ok=True)
        
        self.output_file = self.data_dir / output_file
        self.backup_dir = self.data_dir / "backups"
        self.backup_dir.mkdir(exist_ok=True)
        
        # 核心字段定义
        self.core_fields = [
            'city', 'primary_category', 'secondary_category', 
            'shop_name', 'avg_price', 'crawl_time', 'data_hash'
        ]
        
        # 数据去重集合
        self.seen_shops: Set[str] = set()
        self.current_data: List[Dict[str, Any]] = []
        
        # 统计信息
        self.stats = {
            'total_processed': 0,
            'duplicates_filtered': 0,
            'new_records': 0,
            'last_save_time': None
        }
        
        # 加载现有数据
        self._load_existing_data()
        
        logger.info(f"[DATA_MANAGER] 初始化完成，输出文件: {self.output_file}")
        logger.info(f"[DATA_MANAGER] 已加载 {len(self.seen_shops)} 条现有记录")
    
    def _load_existing_data(self):
        """加载现有数据以避免重复"""
        if self.output_file.exists():
            try:
                df = pd.read_csv(self.output_file)
                for _, row in df.iterrows():
                    shop_hash = self._generate_shop_hash(
                        row.get('city', ''),
                        row.get('secondary_category', ''),
                        row.get('shop_name', '')
                    )
                    self.seen_shops.add(shop_hash)
                
                logger.info(f"[DATA_MANAGER] 从现有文件加载了 {len(self.seen_shops)} 条记录")
                
            except Exception as e:
                logger.error(f"[DATA_MANAGER] 加载现有数据失败: {e}")
                self.seen_shops = set()
    
    def _generate_shop_hash(self, city: str, category: str, shop_name: str) -> str:
        """生成商铺唯一标识哈希"""
        unique_key = f"{city}|{category}|{shop_name}".lower().strip()
        return hashlib.md5(unique_key.encode('utf-8')).hexdigest()
    
    def is_duplicate(self, shop_data: Dict[str, Any]) -> bool:
        """检查是否为重复数据"""
        shop_hash = self._generate_shop_hash(
            shop_data.get('city', ''),
            shop_data.get('secondary_category', ''),
            shop_data.get('shop_name', '')
        )
        return shop_hash in self.seen_shops
    
    def add_shop_data(self, shop_data: Dict[str, Any]) -> bool:
        """添加商铺数据（自动去重）"""
        self.stats['total_processed'] += 1
        
        # 检查重复
        if self.is_duplicate(shop_data):
            self.stats['duplicates_filtered'] += 1
            logger.debug(f"[DATA_MANAGER] 过滤重复数据: {shop_data.get('shop_name', 'Unknown')}")
            return False
        
        # 添加时间戳和哈希
        shop_data['crawl_time'] = datetime.now().isoformat()
        shop_hash = self._generate_shop_hash(
            shop_data.get('city', ''),
            shop_data.get('secondary_category', ''),
            shop_data.get('shop_name', '')
        )
        shop_data['data_hash'] = shop_hash
        
        # 确保所有核心字段存在
        for field in self.core_fields:
            if field not in shop_data:
                shop_data[field] = ''
        
        # 添加到数据集合
        self.current_data.append(shop_data)
        self.seen_shops.add(shop_hash)
        self.stats['new_records'] += 1
        
        logger.debug(f"[DATA_MANAGER] 添加新数据: {shop_data.get('shop_name', 'Unknown')}")
        return True
    
    def add_batch_data(self, shops_data: List[Dict[str, Any]], city_name: str = "", category_name: str = "") -> Tuple[int, int]:
        """批量添加数据"""
        if not shops_data:
            logger.warning(f"[DATA_MANAGER] 没有数据需要添加: {city_name}-{category_name}")
            return 0, 0

        logger.info(f"[DATA_MANAGER] 开始批量添加数据: {city_name}-{category_name} {len(shops_data)}条记录")

        added_count = 0
        duplicate_count = 0

        for shop_data in shops_data:
            if self.add_shop_data(shop_data):
                added_count += 1
            else:
                duplicate_count += 1

        logger.info(f"[DATA_MANAGER] 添加数据: {city_name}-{category_name} {added_count}条记录")
        logger.info(f"[DATA_MANAGER] 批量添加完成: 新增 {added_count}, 重复 {duplicate_count}")
        return added_count, duplicate_count
    
    def save_data(self, backup: bool = True) -> bool:
        """保存数据到CSV文件"""
        try:
            if not self.current_data:
                logger.warning(f"[DATA_MANAGER] 没有数据需要保存，当前数据量: {len(self.current_data)}")
                logger.warning(f"[DATA_MANAGER] 统计信息: {self.stats}")
                return False

            logger.info(f"[DATA_MANAGER] 准备保存数据: 共{len(self.current_data)}条记录")
            
            # 创建备份
            if backup and self.output_file.exists():
                self._create_backup()
            
            # 合并现有数据和新数据
            all_data = []
            
            # 加载现有数据
            if self.output_file.exists():
                try:
                    existing_df = pd.read_csv(self.output_file)
                    all_data.extend(existing_df.to_dict('records'))
                except Exception as e:
                    logger.warning(f"[DATA_MANAGER] 加载现有数据失败: {e}")
            
            # 添加新数据
            all_data.extend(self.current_data)
            
            # 保存到CSV
            df = pd.DataFrame(all_data)
            df.to_csv(self.output_file, index=False, encoding='utf-8')
            
            self.stats['last_save_time'] = datetime.now().isoformat()

            logger.info(f"[DATA_MANAGER] 保存数据到: {self.output_file} 共{len(all_data)}条记录")
            logger.info(f"[DATA_MANAGER] 数据保存成功: 总记录数: {len(all_data)}, 新增: {len(self.current_data)}")
            
            # 清空当前数据缓存
            self.current_data = []
            
            return True
            
        except Exception as e:
            logger.error(f"[DATA_MANAGER] 数据保存失败: {e}")
            return False
    
    def _create_backup(self):
        """创建数据备份"""
        try:
            timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
            backup_file = self.backup_dir / f"backup_{timestamp}.csv"
            
            import shutil
            shutil.copy2(self.output_file, backup_file)
            
            logger.info(f"[DATA_MANAGER] 创建备份: {backup_file}")
            
        except Exception as e:
            logger.error(f"[DATA_MANAGER] 创建备份失败: {e}")
    
    def get_data_quality_report(self) -> Dict[str, Any]:
        """生成数据质量报告"""
        if not self.current_data:
            return {'error': '没有数据可分析'}
        
        total_records = len(self.current_data)
        
        # 价格完整性分析
        price_complete = sum(1 for shop in self.current_data if shop.get('avg_price'))
        price_rate = (price_complete / total_records) * 100 if total_records > 0 else 0
        
        # 城市分布分析
        city_distribution = {}
        category_distribution = {}
        
        for shop in self.current_data:
            city = shop.get('city', 'Unknown')
            category = shop.get('secondary_category', 'Unknown')
            
            city_distribution[city] = city_distribution.get(city, 0) + 1
            category_distribution[category] = category_distribution.get(category, 0) + 1
        
        return {
            'total_records': total_records,
            'price_completion_rate': round(price_rate, 2),
            'price_complete_count': price_complete,
            'city_distribution': city_distribution,
            'category_distribution': category_distribution,
            'duplicate_filter_stats': {
                'total_processed': self.stats['total_processed'],
                'duplicates_filtered': self.stats['duplicates_filtered'],
                'new_records': self.stats['new_records'],
                'duplicate_rate': round((self.stats['duplicates_filtered'] / max(self.stats['total_processed'], 1)) * 100, 2)
            }
        }
    
    def validate_data_quality(self, min_price_rate: float = 0.90) -> Tuple[bool, str]:
        """验证数据质量"""
        report = self.get_data_quality_report()
        
        if 'error' in report:
            return False, report['error']
        
        price_rate = report['price_completion_rate'] / 100
        
        if price_rate >= min_price_rate:
            return True, f"数据质量良好，价格完整率: {report['price_completion_rate']}%"
        else:
            return False, f"数据质量不达标，价格完整率: {report['price_completion_rate']}% < {min_price_rate*100}%"
    
    def export_summary_report(self) -> str:
        """导出数据摘要报告"""
        report = self.get_data_quality_report()
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        report_file = self.data_dir / f"data_quality_report_{timestamp}.json"
        
        try:
            with open(report_file, 'w', encoding='utf-8') as f:
                json.dump(report, f, ensure_ascii=False, indent=2)
            
            logger.info(f"[DATA_MANAGER] 数据质量报告已导出: {report_file}")
            return str(report_file)
            
        except Exception as e:
            logger.error(f"[DATA_MANAGER] 导出报告失败: {e}")
            return ""
    
    def get_current_stats(self) -> Dict[str, Any]:
        """获取当前统计信息"""
        return {
            'current_data_count': len(self.current_data),
            'seen_shops_count': len(self.seen_shops),
            'output_file': str(self.output_file),
            'stats': self.stats.copy()
        }
    
    def clear_current_data(self):
        """清空当前数据缓存"""
        self.current_data = []
        logger.info("[DATA_MANAGER] 当前数据缓存已清空")
