"""
数据存储模块
Data Storage Module
"""

import json
import csv
import os
import pickle
from typing import List, Dict, Any, Set, Optional
from datetime import datetime
from pathlib import Path


class DataStorage:
    """数据存储器类"""
    
    def __init__(self, config_manager):
        """初始化数据存储器"""
        self.config = config_manager
        self.output_dir = self.config.get('storage.output_dir', './data')
        self.format = self.config.get('storage.format', 'json')
        self.enable_dedup = self.config.get('storage.enable_dedup', True)
        
        # 创建输出目录
        os.makedirs(self.output_dir, exist_ok=True)
        
        # 用于去重的URL集合和状态文件
        self.existing_urls: Set[str] = set()
        self.dedup_file = os.path.join(self.output_dir, '.dedup_urls.pkl')
        
        if self.enable_dedup:
            self._load_existing_urls()
    
    def save_data(self, data: List[Dict[str, Any]], city: str, business_type: str) -> int:
        """
        保存数据
        Returns: 实际保存的数据条数
        """
        if not data:
            return 0
        
        original_count = len(data)
        
        # 去重处理
        if self.enable_dedup:
            data = self._deduplicate_data(data)
        
        if not data:
            return 0
        
        # 根据格式保存数据
        if self.format == 'json':
            self._save_json(data, city, business_type)
        elif self.format == 'csv':
            self._save_csv(data, city, business_type)
        else:
            raise ValueError(f"Unsupported format: {self.format}")
        
        # 保存去重状态
        if self.enable_dedup:
            self._save_dedup_state()
        
        saved_count = len(data)
        print(f"Processed {original_count} items, saved {saved_count} new items")
        return saved_count
    
    def _load_existing_urls(self):
        """加载已存在的URL用于去重"""
        # 首先尝试从pickle文件加载
        if os.path.exists(self.dedup_file):
            try:
                with open(self.dedup_file, 'rb') as f:
                    self.existing_urls = pickle.load(f)
                print(f"Loaded {len(self.existing_urls)} existing URLs from cache")
                return
            except Exception as e:
                print(f"Warning: Failed to load dedup cache: {e}")
        
        # 如果没有缓存文件，扫描现有数据文件
        try:
            for filename in os.listdir(self.output_dir):
                if filename.endswith('.json'):
                    filepath = os.path.join(self.output_dir, filename)
                    with open(filepath, 'r', encoding='utf-8') as f:
                        existing_data = json.load(f)
                        for item in existing_data:
                            if 'detail_url' in item:
                                self.existing_urls.add(item['detail_url'])
                elif filename.endswith('.csv'):
                    filepath = os.path.join(self.output_dir, filename)
                    with open(filepath, 'r', encoding='utf-8') as f:
                        reader = csv.DictReader(f)
                        for row in reader:
                            if 'detail_url' in row:
                                self.existing_urls.add(row['detail_url'])
            
            print(f"Loaded {len(self.existing_urls)} existing URLs from data files")
            # 保存到缓存文件
            self._save_dedup_state()
        except Exception as e:
            print(f"Warning: Failed to load existing URLs: {e}")
    
    def _save_dedup_state(self):
        """保存去重状态到文件"""
        try:
            with open(self.dedup_file, 'wb') as f:
                pickle.dump(self.existing_urls, f)
        except Exception as e:
            print(f"Warning: Failed to save dedup state: {e}")
    
    def _deduplicate_data(self, data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """数据去重"""
        unique_data = []
        
        for item in data:
            detail_url = item.get('detail_url', '')
            if detail_url and detail_url not in self.existing_urls:
                unique_data.append(item)
                self.existing_urls.add(detail_url)
        
        return unique_data
    
    def _get_filename(self, city: str, business_type: str, extension: str) -> str:
        """生成文件名"""
        date_str = datetime.now().strftime('%Y%m%d')
        return f"{city}_{business_type}_{date_str}.{extension}"
    
    def _save_json(self, data: List[Dict[str, Any]], city: str, business_type: str):
        """保存为JSON格式"""
        filename = self._get_filename(city, business_type, 'json')
        filepath = os.path.join(self.output_dir, filename)
        
        # 如果文件已存在，加载现有数据并合并
        existing_data = []
        if os.path.exists(filepath):
            try:
                with open(filepath, 'r', encoding='utf-8') as f:
                    existing_data = json.load(f)
            except Exception as e:
                print(f"Warning: Failed to load existing data from {filepath}: {e}")
        
        # 合并数据
        all_data = existing_data + data
        
        # 保存数据
        try:
            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(all_data, f, ensure_ascii=False, indent=2)
            print(f"Saved {len(data)} items to {filepath}")
        except Exception as e:
            print(f"Error: Failed to save JSON data to {filepath}: {e}")
            raise
    
    def _save_csv(self, data: List[Dict[str, Any]], city: str, business_type: str):
        """保存为CSV格式"""
        if not data:
            return
        
        filename = self._get_filename(city, business_type, 'csv')
        filepath = os.path.join(self.output_dir, filename)
        
        # 定义标准字段顺序
        standard_fields = [
            'id', 'title', 'city', 'district', 'rent_range', 
            'area_range', 'industry', 'publish_date', 'detail_url', 
            'business_type', 'transfer_fee', 'contact_info', 'created_at'
        ]
        
        # 获取所有字段名，优先使用标准顺序
        all_fieldnames = set()
        for item in data:
            all_fieldnames.update(item.keys())
        
        # 按标准顺序排列字段，其他字段按字母顺序排在后面
        fieldnames = []
        for field in standard_fields:
            if field in all_fieldnames:
                fieldnames.append(field)
                all_fieldnames.remove(field)
        fieldnames.extend(sorted(all_fieldnames))
        
        # 检查文件是否存在
        file_exists = os.path.exists(filepath)
        
        try:
            with open(filepath, 'a', newline='', encoding='utf-8') as f:
                writer = csv.DictWriter(f, fieldnames=fieldnames)
                
                # 如果文件不存在，写入表头
                if not file_exists:
                    writer.writeheader()
                
                writer.writerows(data)
            print(f"Saved {len(data)} items to {filepath}")
        except Exception as e:
            print(f"Error: Failed to save CSV data to {filepath}: {e}")
            raise
    
    def get_stats(self) -> Dict[str, Any]:
        """获取存储统计信息"""
        stats = {
            'total_files': 0,
            'total_records': 0,
            'files_by_type': {},
            'files_by_city': {},
            'dedup_urls_count': len(self.existing_urls)
        }
        
        try:
            for filename in os.listdir(self.output_dir):
                if filename.endswith(('.json', '.csv')) and not filename.startswith('.'):
                    stats['total_files'] += 1
                    
                    # 统计记录数
                    filepath = os.path.join(self.output_dir, filename)
                    record_count = 0
                    
                    if filename.endswith('.json'):
                        try:
                            with open(filepath, 'r', encoding='utf-8') as f:
                                data = json.load(f)
                                record_count = len(data)
                        except Exception:
                            pass
                    elif filename.endswith('.csv'):
                        try:
                            with open(filepath, 'r', encoding='utf-8') as f:
                                reader = csv.reader(f)
                                record_count = sum(1 for _ in reader) - 1  # 减去表头
                                record_count = max(0, record_count)  # 确保不为负数
                        except Exception:
                            pass
                    
                    stats['total_records'] += record_count
                    
                    # 按类型和城市统计
                    parts = filename.split('_')
                    if len(parts) >= 2:
                        city = parts[0]
                        business_type = parts[1]
                        
                        stats['files_by_type'][business_type] = stats['files_by_type'].get(business_type, 0) + 1
                        stats['files_by_city'][city] = stats['files_by_city'].get(city, 0) + 1
        
        except Exception as e:
            print(f"Warning: Failed to get storage stats: {e}")
        
        return stats
    
    def clear_dedup_cache(self):
        """清除去重缓存"""
        self.existing_urls.clear()
        if os.path.exists(self.dedup_file):
            try:
                os.remove(self.dedup_file)
                print("Dedup cache cleared")
            except Exception as e:
                print(f"Warning: Failed to remove dedup cache file: {e}")
    
    def get_existing_files(self, city: Optional[str] = None, business_type: Optional[str] = None) -> List[str]:
        """获取现有文件列表"""
        files = []
        try:
            for filename in os.listdir(self.output_dir):
                if filename.endswith(('.json', '.csv')) and not filename.startswith('.'):
                    # 过滤条件
                    if city and not filename.startswith(f"{city}_"):
                        continue
                    if business_type and f"_{business_type}_" not in filename:
                        continue
                    files.append(filename)
        except Exception as e:
            print(f"Warning: Failed to list files: {e}")
        
        return sorted(files)
    
    def validate_data_format(self, data: List[Dict[str, Any]]) -> List[str]:
        """
        验证数据格式
        Returns: 错误信息列表
        """
        errors = []
        required_fields = [
            'id', 'title', 'city', 'district', 'rent_range', 
            'area_range', 'industry', 'publish_date', 'detail_url', 'business_type'
        ]
        
        for i, item in enumerate(data):
            for field in required_fields:
                if field not in item or not str(item[field]).strip():
                    errors.append(f"Item {i}: Missing or empty required field '{field}'")
        
        return errors