#!/usr/bin/env python3
"""
数据去重器 - 批量爬取专用
负责检查历史数据，避免重复爬取已有数据
"""

import logging
import json
import csv
import os
import hashlib
from typing import Dict, List, Set, Optional, Any
from pathlib import Path
import pandas as pd
from difflib import SequenceMatcher

logger = logging.getLogger(__name__)

class DataDeduplicator:
    """数据去重器"""
    
    def __init__(self, history_data_dir: str):
        """初始化去重器"""
        self.history_data_dir = Path(history_data_dir)
        self.existing_data = {}  # 城市 -> 品类 -> 数据集合
        self.data_hashes = set()  # 数据哈希集合
        self.similarity_threshold = 0.9  # 相似度阈值
        
        logger.info(f"[DEDUPLICATOR] 初始化数据去重器，历史数据目录: {history_data_dir}")
        
        # 加载历史数据
        self.load_historical_data()
    
    def load_historical_data(self):
        """加载历史数据"""
        if not self.history_data_dir.exists():
            logger.warning(f"[DEDUPLICATOR] 历史数据目录不存在: {self.history_data_dir}")
            return
        
        logger.info("[DEDUPLICATOR] 开始加载历史数据...")
        
        total_loaded = 0
        
        # 遍历历史数据目录
        for file_path in self.history_data_dir.rglob("*.json"):
            try:
                loaded_count = self._load_json_file(file_path)
                total_loaded += loaded_count
                
            except Exception as e:
                logger.warning(f"[DEDUPLICATOR] 加载文件失败 {file_path}: {e}")
        
        # 尝试加载CSV文件
        for file_path in self.history_data_dir.rglob("*.csv"):
            try:
                loaded_count = self._load_csv_file(file_path)
                total_loaded += loaded_count
                
            except Exception as e:
                logger.warning(f"[DEDUPLICATOR] 加载CSV文件失败 {file_path}: {e}")
        
        logger.info(f"[DEDUPLICATOR] ✅ 历史数据加载完成，共加载 {total_loaded} 条记录")
        logger.info(f"[DEDUPLICATOR] 数据分布: {self._get_data_distribution()}")
    
    def _load_json_file(self, file_path: Path) -> int:
        """加载JSON文件"""
        with open(file_path, 'r', encoding='utf-8') as f:
            data = json.load(f)
        
        loaded_count = 0
        
        if isinstance(data, list):
            for item in data:
                if self._add_data_item(item):
                    loaded_count += 1
        elif isinstance(data, dict):
            if self._add_data_item(data):
                loaded_count += 1
        
        logger.debug(f"[DEDUPLICATOR] 从 {file_path.name} 加载 {loaded_count} 条记录")
        return loaded_count
    
    def _load_csv_file(self, file_path: Path) -> int:
        """加载CSV文件"""
        try:
            df = pd.read_csv(file_path, encoding='utf-8')
        except UnicodeDecodeError:
            df = pd.read_csv(file_path, encoding='gbk')
        
        loaded_count = 0
        
        for _, row in df.iterrows():
            item = row.to_dict()
            if self._add_data_item(item):
                loaded_count += 1
        
        logger.debug(f"[DEDUPLICATOR] 从 {file_path.name} 加载 {loaded_count} 条记录")
        return loaded_count
    
    def _add_data_item(self, item: Dict[str, Any]) -> bool:
        """添加数据项到去重集合"""
        try:
            # 提取关键信息
            shop_name = item.get('shop_name', item.get('name', ''))
            city = item.get('city', '')
            category = item.get('category', '')
            address = item.get('address', '')
            phone = item.get('phone', '')
            
            if not shop_name:
                return False
            
            # 生成数据哈希
            data_hash = self._generate_hash(shop_name, address, phone)
            self.data_hashes.add(data_hash)
            
            # 按城市和品类组织数据
            if city not in self.existing_data:
                self.existing_data[city] = {}
            
            if category not in self.existing_data[city]:
                self.existing_data[city][category] = []
            
            self.existing_data[city][category].append({
                'shop_name': shop_name,
                'address': address,
                'phone': phone,
                'hash': data_hash
            })
            
            return True
            
        except Exception as e:
            logger.debug(f"[DEDUPLICATOR] 添加数据项失败: {e}")
            return False
    
    def _generate_hash(self, shop_name: str, address: str, phone: str) -> str:
        """生成数据哈希"""
        content = f"{shop_name}|{address}|{phone}".lower().strip()
        return hashlib.md5(content.encode('utf-8')).hexdigest()
    
    def _get_data_distribution(self) -> Dict[str, Any]:
        """获取数据分布统计"""
        distribution = {}
        total_count = 0
        
        for city, categories in self.existing_data.items():
            city_count = 0
            city_categories = {}
            
            for category, items in categories.items():
                category_count = len(items)
                city_categories[category] = category_count
                city_count += category_count
            
            distribution[city] = {
                'total': city_count,
                'categories': city_categories
            }
            total_count += city_count
        
        distribution['total'] = total_count
        return distribution
    
    def is_duplicate(self, item: Dict[str, Any]) -> bool:
        """检查数据是否重复"""
        try:
            shop_name = item.get('shop_name', item.get('name', ''))
            address = item.get('address', '')
            phone = item.get('phone', '')
            
            if not shop_name:
                return False
            
            # 精确哈希匹配
            data_hash = self._generate_hash(shop_name, address, phone)
            if data_hash in self.data_hashes:
                logger.debug(f"[DEDUPLICATOR] 发现重复数据（精确匹配）: {shop_name}")
                return True
            
            # 相似度匹配
            city = item.get('city', '')
            category = item.get('category', '')
            
            if city in self.existing_data and category in self.existing_data[city]:
                for existing_item in self.existing_data[city][category]:
                    if self._is_similar(item, existing_item):
                        logger.debug(f"[DEDUPLICATOR] 发现重复数据（相似匹配）: {shop_name}")
                        return True
            
            return False
            
        except Exception as e:
            logger.warning(f"[DEDUPLICATOR] 重复检查失败: {e}")
            return False
    
    def _is_similar(self, item1: Dict[str, Any], item2: Dict[str, Any]) -> bool:
        """检查两个数据项是否相似"""
        try:
            # 店名相似度
            name1 = item1.get('shop_name', item1.get('name', '')).lower().strip()
            name2 = item2.get('shop_name', '').lower().strip()
            
            if not name1 or not name2:
                return False
            
            name_similarity = SequenceMatcher(None, name1, name2).ratio()
            
            if name_similarity >= self.similarity_threshold:
                return True
            
            # 地址相似度（如果店名相似度较高）
            if name_similarity >= 0.7:
                addr1 = item1.get('address', '').lower().strip()
                addr2 = item2.get('address', '').lower().strip()
                
                if addr1 and addr2:
                    addr_similarity = SequenceMatcher(None, addr1, addr2).ratio()
                    if addr_similarity >= 0.8:
                        return True
            
            return False
            
        except Exception as e:
            logger.debug(f"[DEDUPLICATOR] 相似度检查失败: {e}")
            return False
    
    def add_new_data(self, item: Dict[str, Any]):
        """添加新数据到去重集合"""
        self._add_data_item(item)
    
    def get_existing_count(self, city: str, category: str) -> int:
        """获取指定城市和品类的现有数据量"""
        if city in self.existing_data and category in self.existing_data[city]:
            return len(self.existing_data[city][category])
        return 0
    
    def get_total_existing_count(self) -> int:
        """获取总的现有数据量"""
        total = 0
        for city_data in self.existing_data.values():
            for category_data in city_data.values():
                total += len(category_data)
        return total
    
    def get_city_existing_count(self, city: str) -> int:
        """获取指定城市的现有数据量"""
        if city not in self.existing_data:
            return 0
        
        total = 0
        for category_data in self.existing_data[city].values():
            total += len(category_data)
        return total
    
    def get_deduplication_report(self) -> Dict[str, Any]:
        """获取去重报告"""
        return {
            'total_existing_data': self.get_total_existing_count(),
            'cities_count': len(self.existing_data),
            'data_distribution': self._get_data_distribution(),
            'hash_count': len(self.data_hashes),
            'similarity_threshold': self.similarity_threshold
        }
    
    def clear_cache(self):
        """清空缓存"""
        self.existing_data.clear()
        self.data_hashes.clear()
        logger.info("[DEDUPLICATOR] 缓存已清空")
    
    def save_deduplication_log(self, output_file: str):
        """保存去重日志"""
        try:
            report = self.get_deduplication_report()
            
            with open(output_file, 'w', encoding='utf-8') as f:
                json.dump(report, f, ensure_ascii=False, indent=2)
            
            logger.info(f"[DEDUPLICATOR] 去重日志已保存: {output_file}")
            
        except Exception as e:
            logger.error(f"[DEDUPLICATOR] 保存去重日志失败: {e}")
