#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
数据存储模块
负责保存和管理收集到的内容
"""

import os
import json
import csv
import logging
from datetime import datetime, timedelta
from pathlib import Path
from typing import List, Dict, Any, Optional

class StorageManager:
    """存储管理器"""
    
    def __init__(self, data_dir: str = "data"):
        self.data_dir = Path(data_dir)
        self.data_dir.mkdir(exist_ok=True)
        self.logger = logging.getLogger(__name__)
        
        # 创建子目录
        (self.data_dir / "json").mkdir(exist_ok=True)
        (self.data_dir / "csv").mkdir(exist_ok=True)
        (self.data_dir / "html").mkdir(exist_ok=True)
        
    def save_content(self, site_name: str, content_items: List[Dict[str, Any]], 
                    formats: List[str] = None) -> bool:
        """保存内容
        
        Args:
            site_name: 网站名称
            content_items: 内容列表
            formats: 保存格式列表，默认为['json', 'csv']
            
        Returns:
            bool: 保存是否成功
        """
        if not content_items:
            self.logger.info(f"{site_name}: 没有内容需要保存")
            return True
            
        if formats is None:
            formats = ['json', 'csv']
            
        try:
            timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
            
            # 添加元数据
            for item in content_items:
                item['site_name'] = site_name
                item['collected_at'] = timestamp
                
            success = True
            
            # 保存为JSON格式
            if 'json' in formats:
                success &= self._save_as_json(site_name, content_items, timestamp)
                
            # 保存为CSV格式
            if 'csv' in formats:
                success &= self._save_as_csv(site_name, content_items, timestamp)
                
            # 保存为HTML格式（可读性更好）
            if 'html' in formats:
                success &= self._save_as_html(site_name, content_items, timestamp)
                
            # 更新索引文件
            self._update_index(site_name, len(content_items), timestamp)
            
            if success:
                self.logger.info(f"{site_name}: 成功保存 {len(content_items)} 条内容")
            else:
                self.logger.error(f"{site_name}: 保存过程中出现错误")
                
            return success
            
        except Exception as e:
            self.logger.error(f"{site_name}: 保存失败: {e}")
            return False
            
    def _save_as_json(self, site_name: str, content_items: List[Dict[str, Any]], timestamp: str) -> bool:
        """保存为JSON格式"""
        try:
            filename = f"{site_name}_{timestamp}.json"
            filepath = self.data_dir / "json" / filename
            
            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(content_items, f, ensure_ascii=False, indent=2)
                
            return True
        except Exception as e:
            self.logger.error(f"保存JSON失败: {e}")
            return False
            
    def _save_as_csv(self, site_name: str, content_items: List[Dict[str, Any]], timestamp: str) -> bool:
        """保存为CSV格式"""
        try:
            filename = f"{site_name}_{timestamp}.csv"
            filepath = self.data_dir / "csv" / filename
            
            if not content_items:
                return True
                
            # 获取所有字段名
            fieldnames = set()
            for item in content_items:
                fieldnames.update(item.keys())
            fieldnames = sorted(list(fieldnames))
            
            with open(filepath, 'w', newline='', encoding='utf-8-sig') as f:
                writer = csv.DictWriter(f, fieldnames=fieldnames)
                writer.writeheader()
                writer.writerows(content_items)
                
            return True
        except Exception as e:
            self.logger.error(f"保存CSV失败: {e}")
            return False
            
    def _save_as_html(self, site_name: str, content_items: List[Dict[str, Any]], timestamp: str) -> bool:
        """保存为HTML格式（便于阅读）"""
        try:
            filename = f"{site_name}_{timestamp}.html"
            filepath = self.data_dir / "html" / filename
            
            html_content = self._generate_html_report(site_name, content_items, timestamp)
            
            with open(filepath, 'w', encoding='utf-8') as f:
                f.write(html_content)
                
            return True
        except Exception as e:
            self.logger.error(f"保存HTML失败: {e}")
            return False
            
    def _generate_html_report(self, site_name: str, content_items: List[Dict[str, Any]], timestamp: str) -> str:
        """生成HTML报告"""
        html = f"""
<!DOCTYPE html>
<html lang="zh-CN">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>{site_name} - 内容收集报告</title>
    <style>
        body {{
            font-family: 'Microsoft YaHei', Arial, sans-serif;
            line-height: 1.6;
            margin: 0;
            padding: 20px;
            background-color: #f5f5f5;
        }}
        .container {{
            max-width: 1200px;
            margin: 0 auto;
            background: white;
            padding: 20px;
            border-radius: 8px;
            box-shadow: 0 2px 10px rgba(0,0,0,0.1);
        }}
        .header {{
            border-bottom: 2px solid #007acc;
            padding-bottom: 20px;
            margin-bottom: 30px;
        }}
        .header h1 {{
            color: #007acc;
            margin: 0;
        }}
        .meta {{
            color: #666;
            font-size: 14px;
            margin-top: 10px;
        }}
        .item {{
            border: 1px solid #ddd;
            border-radius: 6px;
            margin-bottom: 20px;
            padding: 20px;
            background: #fafafa;
        }}
        .item-title {{
            font-size: 18px;
            font-weight: bold;
            color: #333;
            margin-bottom: 10px;
        }}
        .item-content {{
            color: #555;
            margin-bottom: 15px;
            line-height: 1.8;
        }}
        .item-meta {{
            font-size: 12px;
            color: #888;
            border-top: 1px solid #eee;
            padding-top: 10px;
        }}
        .item-link {{
            color: #007acc;
            text-decoration: none;
        }}
        .item-link:hover {{
            text-decoration: underline;
        }}
        .stats {{
            background: #e7f3ff;
            padding: 15px;
            border-radius: 6px;
            margin-bottom: 20px;
        }}
    </style>
</head>
<body>
    <div class="container">
        <div class="header">
            <h1>{site_name} - 内容收集报告</h1>
            <div class="meta">
                收集时间: {timestamp}<br>
                内容数量: {len(content_items)} 条
            </div>
        </div>
        
        <div class="stats">
            <strong>统计信息:</strong><br>
            • 总计收集 {len(content_items)} 条内容<br>
            • 有链接的内容: {sum(1 for item in content_items if item.get('link'))} 条<br>
            • 有日期的内容: {sum(1 for item in content_items if item.get('date'))} 条
        </div>
"""
        
        # 添加内容项
        for i, item in enumerate(content_items, 1):
            title = item.get('title', '无标题')
            content = item.get('content', '无内容')
            link = item.get('link', '')
            date = item.get('date', '')
            
            # 限制内容长度
            if len(content) > 500:
                content = content[:500] + '...'
                
            html += f"""
        <div class="item">
            <div class="item-title">{i}. {title}</div>
            <div class="item-content">{content}</div>
            <div class="item-meta">
                {f'发布时间: {date}<br>' if date else ''}
                {f'链接: <a href="{link}" class="item-link" target="_blank">{link}</a><br>' if link else ''}
                提取时间: {item.get('extracted_at', '')}
            </div>
        </div>
"""
        
        html += """
    </div>
</body>
</html>
"""
        
        return html
        
    def _update_index(self, site_name: str, item_count: int, timestamp: str):
        """更新索引文件"""
        try:
            index_file = self.data_dir / "index.json"
            
            # 读取现有索引
            if index_file.exists():
                with open(index_file, 'r', encoding='utf-8') as f:
                    index = json.load(f)
            else:
                index = {}
                
            # 更新索引
            if site_name not in index:
                index[site_name] = []
                
            index[site_name].append({
                'timestamp': timestamp,
                'item_count': item_count,
                'date': datetime.now().isoformat()
            })
            
            # 保持最近50次记录
            index[site_name] = index[site_name][-50:]
            
            # 保存索引
            with open(index_file, 'w', encoding='utf-8') as f:
                json.dump(index, f, ensure_ascii=False, indent=2)
                
        except Exception as e:
            self.logger.error(f"更新索引失败: {e}")
            
    def get_last_update_time(self, site_name: str) -> Optional[str]:
        """获取网站最后更新时间"""
        try:
            index_file = self.data_dir / "index.json"
            if not index_file.exists():
                return None
                
            with open(index_file, 'r', encoding='utf-8') as f:
                index = json.load(f)
                
            site_records = index.get(site_name, [])
            if site_records:
                return site_records[-1].get('date')
                
            return None
        except Exception as e:
            self.logger.error(f"获取最后更新时间失败: {e}")
            return None
            
    def get_site_history(self, site_name: str) -> List[Dict[str, Any]]:
        """获取网站收集历史"""
        try:
            index_file = self.data_dir / "index.json"
            if not index_file.exists():
                return []
                
            with open(index_file, 'r', encoding='utf-8') as f:
                index = json.load(f)
                
            return index.get(site_name, [])
        except Exception as e:
            self.logger.error(f"获取网站历史失败: {e}")
            return []
            
    def cleanup_old_files(self, days: int = 30):
        """清理旧文件"""
        try:
            cutoff_date = datetime.now() - timedelta(days=days)
            
            for format_dir in ['json', 'csv', 'html']:
                dir_path = self.data_dir / format_dir
                if not dir_path.exists():
                    continue
                    
                for file_path in dir_path.glob('*'):
                    if file_path.is_file():
                        # 从文件名中提取时间戳
                        try:
                            parts = file_path.stem.split('_')
                            if len(parts) >= 2:
                                timestamp_str = '_'.join(parts[-2:])
                                file_date = datetime.strptime(timestamp_str, '%Y%m%d_%H%M%S')
                                
                                if file_date < cutoff_date:
                                    file_path.unlink()
                                    self.logger.info(f"删除旧文件: {file_path}")
                        except (ValueError, IndexError):
                            # 无法解析时间戳，跳过
                            continue
                            
            self.logger.info(f"清理完成，删除了 {days} 天前的文件")
            
        except Exception as e:
            self.logger.error(f"清理旧文件失败: {e}")
            
    def export_all_data(self, output_file: str, format: str = 'json'):
        """导出所有数据"""
        try:
            all_data = {}
            
            # 读取所有JSON文件
            json_dir = self.data_dir / "json"
            if json_dir.exists():
                for json_file in json_dir.glob('*.json'):
                    with open(json_file, 'r', encoding='utf-8') as f:
                        data = json.load(f)
                        
                    # 从文件名提取网站名称
                    site_name = json_file.stem.split('_')[0]
                    if site_name not in all_data:
                        all_data[site_name] = []
                    all_data[site_name].extend(data)
                    
            # 保存合并后的数据
            output_path = Path(output_file)
            
            if format.lower() == 'json':
                with open(output_path, 'w', encoding='utf-8') as f:
                    json.dump(all_data, f, ensure_ascii=False, indent=2)
            elif format.lower() == 'csv':
                # 将所有数据合并为一个列表
                all_items = []
                for site_name, items in all_data.items():
                    all_items.extend(items)
                    
                if all_items:
                    fieldnames = set()
                    for item in all_items:
                        fieldnames.update(item.keys())
                    fieldnames = sorted(list(fieldnames))
                    
                    with open(output_path, 'w', newline='', encoding='utf-8-sig') as f:
                        writer = csv.DictWriter(f, fieldnames=fieldnames)
                        writer.writeheader()
                        writer.writerows(all_items)
                        
            self.logger.info(f"数据导出完成: {output_path}")
            
        except Exception as e:
            self.logger.error(f"导出数据失败: {e}")