from django.db import models
from django.core.exceptions import ValidationError
from django.utils import timezone
from urllib.parse import urlparse, urlunparse


class CrawlTask(models.Model):
    """爬取任务模型"""
    STATUS_CHOICES = [
        ('pending', '待爬取'),
        ('crawling', '爬取中'),
        ('crawled', '已爬取'),
        ('failed', '失败'),
    ]
    
    url = models.URLField(max_length=255, unique=True, verbose_name='URL')
    task_type = models.CharField(
        max_length=20,
        choices=[
            ('list', '列表页'),
            ('detail', '详情页'),
            ('sub_detail', '子详情页'),
        ],
        verbose_name='任务类型'
    )
    status = models.CharField(
        max_length=20,
        choices=STATUS_CHOICES,
        default='pending',
        verbose_name='状态'
    )
    retry_count = models.IntegerField(default=0, verbose_name='重试次数')
    max_retries = models.IntegerField(default=3, verbose_name='最大重试次数')
    created_at = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
    updated_at = models.DateTimeField(auto_now=True, verbose_name='更新时间')
    crawled_at = models.DateTimeField(null=True, blank=True, verbose_name='爬取时间')
    error_message = models.TextField(blank=True, verbose_name='错误信息')
    parent_task = models.ForeignKey(
        'self',
        on_delete=models.SET_NULL,
        null=True,
        blank=True,
        related_name='child_tasks',
        verbose_name='上级任务'
    )
    
    class Meta:
        db_table = 'spider_crawl_task'
        verbose_name = '爬取任务'
        verbose_name_plural = '爬取任务'
        indexes = [
            models.Index(fields=['status', 'task_type']),
            models.Index(fields=['created_at']),
        ]
    
    def __str__(self):
        return f"{self.task_type} - {self.url[:50]}"


class RawHTML(models.Model):
    """原始HTML存储模型"""
    url = models.URLField(max_length=255, unique=True, verbose_name='URL')
    html_content = models.TextField(verbose_name='HTML内容')
    content_length = models.IntegerField(verbose_name='内容长度')
    task = models.OneToOneField(
        CrawlTask,
        on_delete=models.CASCADE,
        related_name='raw_html',
        verbose_name='关联任务'
    )
    created_at = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
    file_path = models.CharField(
        max_length=500,
        blank=True,
        verbose_name='本地文件路径'
    )
    
    class Meta:
        db_table = 'spider_raw_html'
        verbose_name = '原始HTML'
        verbose_name_plural = '原始HTML'
        indexes = [
            models.Index(fields=['created_at']),
        ]
    
    def clean(self):
        """验证模型数据"""
        super().clean()
        
        # 验证URL长度
        if self.url and len(self.url) > 255:
            raise ValidationError({
                'url': f'URL长度不能超过255个字符，当前长度为{len(self.url)}个字符'
            })
    
    def save(self, *args, **kwargs):
        """保存前验证数据"""
        self.full_clean()  # 调用 clean() 方法
        super().save(*args, **kwargs)
    
    def __str__(self):
        return f"HTML - {self.url[:50]}"


class ListItem(models.Model):
    """列表页数据模型"""
    title = models.CharField(max_length=500, verbose_name='项目标题')
    url = models.URLField(max_length=255, unique=True, verbose_name='详情页URL')
    publish_time = models.DateTimeField(null=True, blank=True, verbose_name='发布时间')
    province = models.CharField(max_length=50, blank=True, verbose_name='省份')
    source_platform = models.CharField(max_length=200, blank=True, verbose_name='来源平台')
    business_type = models.CharField(max_length=100, blank=True, verbose_name='业务类型')
    info_type = models.CharField(max_length=100, blank=True, verbose_name='信息类型')
    industry = models.CharField(max_length=100, blank=True, verbose_name='行业')
    task = models.ForeignKey(
        CrawlTask,
        on_delete=models.CASCADE,
        related_name='list_items',
        verbose_name='关联任务'
    )
    created_at = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
    updated_at = models.DateTimeField(auto_now=True, verbose_name='更新时间')
    
    class Meta:
        db_table = 'spider_list_item'
        verbose_name = '列表项'
        verbose_name_plural = '列表项'
        indexes = [
            models.Index(fields=['publish_time']),
            models.Index(fields=['province', 'business_type']),
        ]
    
    def clean(self):
        """验证模型数据"""
        super().clean()
        
        # 验证URL长度
        if self.url and len(self.url) > 255:
            raise ValidationError({
                'url': f'URL长度不能超过255个字符，当前长度为{len(self.url)}个字符'
            })
    
    def save(self, *args, **kwargs):
        """保存前验证数据"""
        self.full_clean()  # 调用 clean() 方法
        super().save(*args, **kwargs)
    
    def __str__(self):
        return self.title


class DetailItem(models.Model):
    """
    详情页数据模型
    
    注意：一个列表项（ListItem）对应一个详情页（DetailItem），它们的 URL 应该是一样的。
    即：list_item.url == task.url（规范化后）
    """
    project_number = models.CharField(
        max_length=200,
        verbose_name='招标项目编号'
    )
    info_source = models.CharField(max_length=200, blank=True, verbose_name='信息来源')
    list_item = models.OneToOneField(
        ListItem,
        on_delete=models.CASCADE,
        related_name='detail_item',
        verbose_name='关联列表项'
    )
    task = models.ForeignKey(
        CrawlTask,
        on_delete=models.CASCADE,
        related_name='detail_items',
        verbose_name='关联任务'
    )
    sub_detail_urls = models.JSONField(
        default=list,
        verbose_name='子详情页URL列表'
    )
    url = models.CharField(max_length=255, verbose_name='详情页URL')
    created_at = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
    updated_at = models.DateTimeField(auto_now=True, verbose_name='更新时间')

    class Meta:
        db_table = 'spider_detail_item'
        verbose_name = '详情项'
        verbose_name_plural = '详情项'
        indexes = [
            models.Index(fields=['project_number']),
        ]
    
    def __str__(self):
        return f"{self.project_number} - {self.list_item.title[:30]}"

    def get_url(self) -> str:
        """
        获取详情页URL
        
        优先返回 task.url，如果 task 不存在则返回 list_item.url
        理论上这两个 URL 应该是一样的
        """
        if self.task:
            return self.task.url
        elif self.list_item:
            return self.list_item.url
        return ''
    
    @staticmethod
    def _normalize_url(url: str) -> str:
        """
        规范化URL用于比较（移除查询参数和fragment）
        
        Args:
            url: 原始URL
        
        Returns:
            规范化后的URL
        """
        try:
            parsed = urlparse(url)
            # 移除查询参数和fragment，只保留scheme、netloc和path
            normalized = urlunparse((
                parsed.scheme,
                parsed.netloc,
                parsed.path,
                '',  # params
                '',  # query
                ''   # fragment
            ))
            return normalized.rstrip('/')
        except Exception:
            return url
    
    def clean(self):
        """
        验证模型数据的一致性
        
        确保 list_item.url 和 task.url 一致（规范化后）
        """
        super().clean()
        
        # 验证URL长度
        if self.url and len(self.url) > 255:
            raise ValidationError({
                'url': f'URL长度不能超过255个字符，当前长度为{len(self.url)}个字符'
            })
        
        if not self.list_item or not self.task:
            return  # 如果关联对象不存在，跳过验证（可能在创建过程中）
        
        list_item_url = self.list_item.url
        task_url = self.task.url
        
        # 规范化URL后比较
        normalized_list_url = self._normalize_url(list_item_url)
        normalized_task_url = self._normalize_url(task_url)
        
        if normalized_list_url != normalized_task_url:
            raise ValidationError(
                f"列表项URL和任务URL不一致：\n"
                f"列表项URL: {list_item_url}\n"
                f"任务URL: {task_url}\n"
                f"规范化后列表项URL: {normalized_list_url}\n"
                f"规范化后任务URL: {normalized_task_url}"
            )
    
    def save(self, *args, **kwargs):
        """保存前验证数据一致性"""
        self.full_clean()  # 调用 clean() 方法
        super().save(*args, **kwargs)


class SubDetailItem(models.Model):
    """子详情页数据模型"""
    url = models.URLField(max_length=255, unique=True, verbose_name='URL')
    html_content = models.TextField(verbose_name='HTML内容')
    detail_item = models.ForeignKey(
        DetailItem,
        on_delete=models.CASCADE,
        related_name='sub_detail_items',
        null=True,
        blank=True,
        verbose_name='关联详情项'
    )
    task = models.ForeignKey(
        CrawlTask,
        on_delete=models.CASCADE,
        related_name='sub_detail_items',
        verbose_name='关联任务'
    )
    created_at = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
    
    # 新增字段
    return_count = models.IntegerField(default=0, verbose_name='返回次数')
    is_processed = models.BooleanField(default=False, verbose_name='是否已处理')
    is_suspect = models.BooleanField(default=False, verbose_name='是否疑似无效')
    requires_large_context = models.BooleanField(default=False, verbose_name='是否需要大上下文模型')
    recognized_type = models.CharField(max_length=50, default='', blank=True, verbose_name='识别类型')
    recognized_confidence = models.FloatField(null=True, blank=True, verbose_name='识别置信度')
    recognized_reason = models.TextField(default='', blank=True, verbose_name='识别理由')
    classify_input_tokens = models.IntegerField(default=0, blank=True, null=True, verbose_name='分类输入Tokens')
    classify_output_tokens = models.IntegerField(default=0, blank=True, null=True, verbose_name='分类输出Tokens')
    process_input_tokens = models.IntegerField(default=0, blank=True, null=True, verbose_name='处理输入Tokens')
    process_output_tokens = models.IntegerField(default=0, blank=True, null=True, verbose_name='处理输出Tokens')
    ai_input_tokens = models.IntegerField(default=0, blank=True, null=True, verbose_name='AI输入Tokens')
    ai_output_tokens = models.IntegerField(default=0, blank=True, null=True, verbose_name='AI输出Tokens')
    
    class Meta:
        db_table = 'spider_sub_detail_item'
        verbose_name = '子详情项'
        verbose_name_plural = '子详情项'
    
    def clean(self):
        """验证模型数据"""
        super().clean()
        
        # 验证URL长度
        if self.url and len(self.url) > 255:
            raise ValidationError({
                'url': f'URL长度不能超过255个字符，当前长度为{len(self.url)}个字符'
            })
    
    def save(self, *args, **kwargs):
        """保存前验证数据"""
        self.full_clean()  # 调用 clean() 方法
        super().save(*args, **kwargs)
    
    def __str__(self):
        return f"SubDetail - {self.url[:50]}"
