# -*- coding: utf-8 -*-
"""
数据清洗工具
"""

import re
import html
from typing import List, Optional
from bs4 import BeautifulSoup
from config.settings import CLEANER_CONFIG

class DataCleaner:
    """数据清洗工具类"""
    
    def __init__(self):
        self.config = CLEANER_CONFIG
    
    def clean_text(self, text: str) -> str:
        """
        清洗文本内容
        
        Args:
            text: 原始文本
            
        Returns:
            清洗后的文本
        """
        if not text:
            return ""
        
        # 解码HTML实体
        text = html.unescape(text)
        
        # 移除HTML标签
        if self.config["remove_html_tags"]:
            text = self._remove_html_tags(text)
        
        # 移除多余空格
        if self.config["remove_extra_spaces"]:
            text = self._remove_extra_spaces(text)
        
        # 移除特殊字符
        if self.config["remove_special_chars"]:
            text = self._remove_special_chars(text)
        
        return text.strip()
    
    def clean_html_content(self, element) -> str:
        """
        清洗HTML内容
        
        Args:
            element: BeautifulSoup元素
            
        Returns:
            清洗后的文本内容
        """
        if not element:
            return ""
        
        # 移除脚本和样式标签
        for script in element(["script", "style", "noscript"]):
            script.decompose()
        
        # 移除广告和无关内容
        for ad in element.find_all(class_=re.compile(r"ad|advertisement|banner")):
            ad.decompose()
        
        # 获取文本内容
        text = element.get_text()
        
        return self.clean_text(text)
    
    def clean_title(self, title: str) -> str:
        """
        清洗标题
        
        Args:
            title: 原始标题
            
        Returns:
            清洗后的标题
        """
        title = self.clean_text(title)
        
        # 限制标题长度
        if len(title) > self.config["max_title_length"]:
            title = title[:self.config["max_title_length"]] + "..."
        
        return title
    
    def clean_summary(self, summary: str) -> str:
        """
        清洗简介
        
        Args:
            summary: 原始简介
            
        Returns:
            清洗后的简介
        """
        summary = self.clean_text(summary)
        
        # 限制简介长度
        if len(summary) > self.config["max_summary_length"]:
            summary = summary[:self.config["max_summary_length"]] + "..."
        
        return summary
    
    def clean_chapter_content(self, content: str) -> str:
        """
        清洗章节内容
        
        Args:
            content: 原始章节内容
            
        Returns:
            清洗后的章节内容
        """
        content = self.clean_text(content)
        
        # 检查章节内容长度
        if len(content) < self.config["min_chapter_length"]:
            return ""
        
        # 移除章节中的广告和无关内容
        content = self._remove_ads_from_content(content)
        
        return content
    
    def extract_tags(self, text: str) -> List[str]:
        """
        提取标签
        
        Args:
            text: 包含标签的文本
            
        Returns:
            标签列表
        """
        if not text:
            return []
        
        # 常见的标签分隔符
        separators = [',', '，', ';', '；', '|', '、', ' ']
        
        for sep in separators:
            if sep in text:
                tags = [tag.strip() for tag in text.split(sep) if tag.strip()]
                return tags
        
        return [text.strip()] if text.strip() else []
    
    def extract_word_count(self, text: str) -> int:
        """
        提取字数统计
        
        Args:
            text: 文本内容
            
        Returns:
            字数
        """
        if not text:
            return 0
        
        # 移除空格和标点符号
        clean_text = re.sub(r'[\s\p{P}]', '', text, flags=re.UNICODE)
        
        return len(clean_text)
    
    def extract_rating(self, text: str) -> float:
        """
        提取评分
        
        Args:
            text: 包含评分的文本
            
        Returns:
            评分值
        """
        if not text:
            return 0.0
        
        # 匹配评分模式
        patterns = [
            r'(\d+\.?\d*)/5',  # 5分制
            r'(\d+\.?\d*)/10',  # 10分制
            r'(\d+\.?\d*)分',   # 中文分
            r'(\d+\.?\d*)',     # 纯数字
        ]
        
        for pattern in patterns:
            match = re.search(pattern, text)
            if match:
                rating = float(match.group(1))
                # 标准化到5分制
                if '10' in pattern:
                    rating = rating / 2
                return min(5.0, max(0.0, rating))
        
        return 0.0
    
    def extract_status(self, text: str) -> str:
        """
        提取小说状态
        
        Args:
            text: 状态文本
            
        Returns:
            标准化状态
        """
        if not text:
            return "未知"
        
        text = text.lower()
        
        status_mapping = {
            "连载": "连载中",
            "完结": "已完结",
            "太监": "已太监",
            "暂停": "暂停更新",
            "连载中": "连载中",
            "已完结": "已完结",
            "已太监": "已太监",
            "暂停更新": "暂停更新"
        }
        
        for key, value in status_mapping.items():
            if key in text:
                return value
        
        return "未知"
    
    def _remove_html_tags(self, text: str) -> str:
        """移除HTML标签"""
        return re.sub(r'<[^>]+>', '', text)
    
    def _remove_extra_spaces(self, text: str) -> str:
        """移除多余空格"""
        # 移除多余的空格和换行
        text = re.sub(r'\s+', ' ', text)
        # 移除行首行尾空格
        text = re.sub(r'^\s+|\s+$', '', text, flags=re.MULTILINE)
        return text
    
    def _remove_special_chars(self, text: str) -> str:
        """移除特殊字符"""
        # 保留中文、英文、数字和基本标点
        return re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9\s.,!?;:()（）]', '', text)
    
    def _remove_ads_from_content(self, content: str) -> str:
        """从内容中移除广告"""
        # 移除常见的广告文本
        ad_patterns = [
            r'广告.*?',
            r'推广.*?',
            r'点击.*?',
            r'关注.*?',
            r'订阅.*?',
            r'收藏.*?',
            r'推荐.*?',
            r'求票.*?',
            r'求收藏.*?',
            r'求推荐.*?',
            r'求月票.*?',
            r'求打赏.*?',
        ]
        
        for pattern in ad_patterns:
            content = re.sub(pattern, '', content, flags=re.IGNORECASE)
        
        return content
    
    def normalize_url(self, url: str, base_url: str = "") -> str:
        """
        标准化URL
        
        Args:
            url: 原始URL
            base_url: 基础URL
            
        Returns:
            标准化后的URL
        """
        if not url:
            return ""
        
        # 如果是相对URL，添加基础URL
        if url.startswith('/') and base_url:
            from urllib.parse import urljoin
            return urljoin(base_url, url)
        
        # 确保URL有协议
        if not url.startswith(('http://', 'https://')):
            url = 'https://' + url
        
        return url
    
    def extract_novel_id(self, url: str) -> Optional[str]:
        """
        从URL中提取小说ID
        
        Args:
            url: 小说URL
            
        Returns:
            小说ID
        """
        if not url:
            return None
        
        # 常见的ID提取模式
        patterns = [
            r'/book/(\d+)',
            r'/info/(\d+)',
            r'/novel/(\d+)',
            r'id=(\d+)',
            r'book_id=(\d+)',
        ]
        
        for pattern in patterns:
            match = re.search(pattern, url)
            if match:
                return match.group(1)
        
        return None
    
    def extract_chapter_id(self, url: str) -> Optional[str]:
        """
        从URL中提取章节ID
        
        Args:
            url: 章节URL
            
        Returns:
            章节ID
        """
        if not url:
            return None
        
        # 常见的章节ID提取模式
        patterns = [
            r'/chapter/(\d+)',
            r'/read/(\d+)',
            r'chapter_id=(\d+)',
            r'cid=(\d+)',
        ]
        
        for pattern in patterns:
            match = re.search(pattern, url)
            if match:
                return match.group(1)
        
        return None 