"""
数据存储管理器
用于管理原始HTML存储、增量爬取、断点续传等
"""
import json
import logging
from pathlib import Path
from typing import Optional, Dict, Any
from datetime import datetime, timedelta
from django.utils import timezone
from django.db import transaction
from django.core.exceptions import ValidationError

from spider.models import (
    CrawlTask,
    RawHTML,
    ListItem,
    DetailItem,
    SubDetailItem,
)
from spider.config.settings import (
    HTML_STORAGE_DIR,
    SAVE_HTML_TO_FILE,
    INCREMENTAL_CRAWL,
    CRAWL_SINCE_DAYS,
    CHECKPOINT_FILE,
    RESUME_FROM_CHECKPOINT,
)
from spider.data.validators import DataValidator

logger = logging.getLogger(__name__)


class StorageManager:
    """数据存储管理器"""
    
    def __init__(self):
        self.html_storage_dir = Path(HTML_STORAGE_DIR)
        self.html_storage_dir.mkdir(parents=True, exist_ok=True)
    
    def get_or_create_task(
        self,
        url: str,
        task_type: str,
        status: str = 'pending',
        parent_task: Optional[CrawlTask] = None
    ) -> CrawlTask:
        """
        获取或创建爬取任务
        
        Args:
            url: 任务URL
            task_type: 任务类型
            status: 任务状态
            parent_task: 上级任务（可选）
        
        Returns:
            CrawlTask对象
        """
        defaults = {
            'task_type': task_type,
            'status': status,
        }
        if parent_task:
            defaults['parent_task'] = parent_task
        
        task, created = CrawlTask.objects.get_or_create(
            url=url,
            defaults=defaults
        )
        
        # 如果任务已存在但parent_task未设置，更新它
        if not created and parent_task and not task.parent_task:
            task.parent_task = parent_task
            task.save(update_fields=['parent_task'])
            logger.debug(f"Updated parent_task for existing task: {task_type} - {url}")
        
        if created:
            parent_info = f" (parent: {parent_task.task_type if parent_task else 'none'})"
            logger.info(f"Created new task: {task_type} - {url}{parent_info}")
        else:
            logger.debug(f"Task already exists: {task_type} - {url}")
        
        return task
    
    def save_raw_html(
        self,
        url: str,
        html_content: str,
        task: CrawlTask,
    ) -> RawHTML:
        """保存原始HTML"""
        # 验证HTML内容
        is_valid, error = DataValidator.validate_html_content(html_content)
        if not is_valid:
            raise ValueError(f"Invalid HTML content: {error}")
        
        # 保存到本地文件（可选）
        file_path = None
        if SAVE_HTML_TO_FILE:
            file_path = self._save_html_to_file(url, html_content)
        
        # 保存到数据库
        raw_html, created = RawHTML.objects.update_or_create(
            url=url,
            defaults={
                'html_content': html_content,
                'content_length': len(html_content),
                'task': task,
                'file_path': file_path or '',
            }
        )
        
        logger.info(f"Saved raw HTML: {url} ({len(html_content)} bytes)")
        return raw_html
    
    def save_list_item(
        self,
        data: Dict[str, Any],
        task: CrawlTask,
    ) -> ListItem:
        """保存列表项数据"""
        # 验证数据
        is_valid, error = DataValidator.validate_list_item(data)
        if not is_valid:
            raise ValueError(f"Invalid list item data: {error}")
        
        # 解析发布时间
        publish_time = None
        if 'publish_time' in data and data['publish_time']:
            publish_time = self._parse_datetime(data['publish_time'])
        
        list_item, created = ListItem.objects.update_or_create(
            url=data['url'],
            defaults={
                'title': data.get('title', ''),
                'publish_time': publish_time,
                'province': data.get('province', ''),
                'source_platform': data.get('source_platform', ''),
                'business_type': data.get('business_type', ''),
                'info_type': data.get('info_type', ''),
                'industry': data.get('industry', ''),
                'task': task,
            }
        )
        
        logger.info(f"Saved list item: {list_item.title[:50]}")
        return list_item
    
    def save_detail_item(
        self,
        data: Dict[str, Any],
        list_item: ListItem,
        task: CrawlTask,
    ) -> DetailItem:
        """
        保存详情项数据
        
        注意：会验证 list_item.url 和 task.url 的一致性（规范化后）
        """
        # 验证数据
        is_valid, error = DataValidator.validate_detail_item(data)
        if not is_valid:
            raise ValueError(f"Invalid detail item data: {error}")
        
        # 验证 URL 一致性（规范化后）
        normalized_list_url = DetailItem._normalize_url(list_item.url)
        normalized_task_url = DetailItem._normalize_url(task.url)
        
        if normalized_list_url != normalized_task_url:
            error_msg = (
                f"List item URL and task URL do not match (after normalization):\n"
                f"List item URL: {list_item.url}\n"
                f"Task URL: {task.url}\n"
                f"Normalized list URL: {normalized_list_url}\n"
                f"Normalized task URL: {normalized_task_url}"
            )
            logger.error(error_msg)
            raise ValueError(error_msg)
        
        # 使用 update_or_create，但需要确保验证通过
        # 注意：update_or_create 不会自动调用 clean()，所以需要手动验证
        try:
            detail_item, created = DetailItem.objects.update_or_create(
                project_number=data['project_number'],
                defaults={
                    'info_source': data.get('info_source', ''),
                    'list_item': list_item,
                    'task': task,
                    'sub_detail_urls': data.get('sub_detail_urls', []),
                    'url': normalized_list_url,  # Set the url field explicitly
                }
            )
            # 手动调用验证（因为 update_or_create 不会触发 clean()）
            # 这会验证 URL 一致性等规则
            detail_item.full_clean()
            # 重新保存以应用验证（如果验证通过）
            detail_item.save()
        except ValidationError as e:
            # 如果验证失败，记录错误并重新抛出
            logger.error(f"Validation failed for detail item: {e}")
            raise
        except Exception as e:
            # 其他错误
            logger.error(f"Failed to save detail item: {e}")
            raise
        
        action = "Created" if created else "Updated"
        logger.info(
            f"{action} detail item: {detail_item.project_number} "
            f"(linked to list item ID: {list_item.id}, title: {list_item.title[:50]}, "
            f"URL: {detail_item.url})"
        )
        return detail_item
    
    def save_sub_detail_item(
        self,
        url: str,
        html_content: str,
        detail_item: Optional[DetailItem],
        task: CrawlTask,
    ) -> SubDetailItem:
        """保存子详情项数据"""
        # 验证HTML内容
        is_valid, error = DataValidator.validate_html_content(html_content)
        if not is_valid:
            raise ValueError(f"Invalid HTML content: {error}")
        
        sub_detail_item, created = SubDetailItem.objects.update_or_create(
            url=url,
            defaults={
                'html_content': html_content,
                'detail_item': detail_item,
                'task': task,
                'classify_input_tokens': 0,
                'classify_output_tokens': 0,
                'process_input_tokens': 0,
                'process_output_tokens': 0,
                'ai_input_tokens': 0,
                'ai_output_tokens': 0,
            }
        )
        
        logger.info(f"Saved sub detail item: {url}")
        return sub_detail_item
    
    def update_task_status(
        self,
        task: CrawlTask,
        status: str,
        error_message: Optional[str] = None,
    ):
        """更新任务状态"""
        task.status = status
        if error_message:
            task.error_message = error_message
        if status == 'crawled':
            task.crawled_at = timezone.now()
        task.save()
        logger.debug(f"Updated task status: {task.url} -> {status}")
    
    def should_crawl_url(self, url: str, task_type: str) -> bool:
        """判断是否应该爬取该URL（增量爬取逻辑）"""
        if not INCREMENTAL_CRAWL:
            return True
        
        # 检查是否已存在
        task = CrawlTask.objects.filter(url=url).first()
        if task and task.status == 'crawled':
            logger.debug(f"URL already crawled: {url}")
            return False
        
        # 对于列表页，检查时间范围
        if task_type == 'list':
            # 这里可以根据实际需求添加时间过滤逻辑
            pass
        
        return True
    
    def get_pending_tasks(
        self,
        task_type: Optional[str] = None,
        limit: Optional[int] = None,
    ):
        """
        获取待爬取的任务
        
        包括状态为 'pending' 和 'failed' 的任务，以便重新执行失败的任务
        """
        queryset = CrawlTask.objects.filter(status__in=['pending', 'failed'])
        
        if task_type:
            queryset = queryset.filter(task_type=task_type)
        
        # 按创建时间排序，优先处理较早的任务
        queryset = queryset.order_by('created_at')
        
        if limit:
            queryset = queryset[:limit]
        
        return queryset
    
    def _save_html_to_file(self, url: str, html_content: str) -> str:
        """保存HTML到本地文件"""
        # 生成文件名（基于URL的hash）
        import hashlib
        url_hash = hashlib.md5(url.encode()).hexdigest()
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        filename = f"{timestamp}_{url_hash}.html"
        file_path = self.html_storage_dir / filename
        
        # 保存文件
        file_path.write_text(html_content, encoding='utf-8')
        logger.debug(f"Saved HTML to file: {file_path}")
        
        return str(file_path)
    
    def _parse_datetime(self, dt: Any) -> Optional[datetime]:
        """解析日期时间字符串"""
        if isinstance(dt, datetime):
            return dt
        
        if isinstance(dt, str):
            formats = [
                '%Y-%m-%d %H:%M:%S',
                '%Y-%m-%d',
                '%Y/%m/%d %H:%M:%S',
                '%Y/%m/%d',
            ]
            for fmt in formats:
                try:
                    return datetime.strptime(dt, fmt)
                except ValueError:
                    continue
        
        return None
    
    def save_checkpoint(self, data: Dict[str, Any]):
        """保存检查点（用于断点续传）"""
        if not RESUME_FROM_CHECKPOINT:
            return
        
        checkpoint_file = Path(CHECKPOINT_FILE)
        checkpoint_file.parent.mkdir(parents=True, exist_ok=True)
        
        with open(checkpoint_file, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)
        
        logger.info(f"Saved checkpoint to {checkpoint_file}")
    
    def load_checkpoint(self) -> Optional[Dict[str, Any]]:
        """加载检查点"""
        if not RESUME_FROM_CHECKPOINT:
            return None
        
        checkpoint_file = Path(CHECKPOINT_FILE)
        if not checkpoint_file.exists():
            return None
        
        try:
            with open(checkpoint_file, 'r', encoding='utf-8') as f:
                data = json.load(f)
            logger.info(f"Loaded checkpoint from {checkpoint_file}")
            return data
        except Exception as e:
            logger.error(f"Failed to load checkpoint: {e}")
            return None

