"""数据清洗（去重、去噪、格式标准化）"""
import re
import jieba
import jieba.analyse
from datetime import datetime
from fin_senti_entity_platform.utils.config_loader import config_loader
from fin_senti_entity_platform.utils.logger import Logger
# 初始化日志记录器
logger = Logger.get_logger('data_cleaner', 'data_processing.log')

class DataCleaner:
    """
    数据清洗工具类，负责对爬取的原始数据进行清洗和预处理
    """
    def __init__(self):
        """
        初始化数据清洗工具
        """
        # 加载清洗配置
        self.clean_config = config_loader.get('data_collection', {}).get('data_processing', {}).get('cleaning', {})
        
        # 初始化停用词表
        self.stopwords = self._load_stopwords()
        
        # 初始化关键词提取器
        jieba.initialize()
        
        # 加载自定义词典
        custom_dict_path = self.clean_config.get('custom_dict_path', 'data/dict/custom_dict.txt')
        try:
            jieba.load_userdict(custom_dict_path)
            logger.info(f"已加载自定义词典: {custom_dict_path}")
        except Exception as e:
            logger.warning(f"加载自定义词典失败: {str(e)}")
            
        # 定义正则表达式模式
        self.patterns = {
            'html_tags': re.compile(r'<[^>]+>'),
            'special_chars': re.compile(r'[^一-龥a-zA-Z0-9.,?!，。？！:：;；\s]'),
            'extra_spaces': re.compile(r'\s+'),
            'url_pattern': re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'),
            'date_pattern': re.compile(r'\d{4}[-/]\d{1,2}[-/]\d{1,2}'),
            'time_pattern': re.compile(r'\d{1,2}:\d{1,2}(?::\d{1,2})?')
        }
        
    def _load_stopwords(self):
        """
        加载停用词表
        :return: 停用词集合
        """
        stopwords = set()
        stopwords_path = self.clean_config.get('stopwords_path', 'data/dict/stopwords.txt')
        
        try:
            with open(stopwords_path, 'r', encoding='utf-8') as f:
                for line in f:
                    word = line.strip()
                    if word:
                        stopwords.add(word)
            logger.info(f"已加载停用词 {len(stopwords)} 个")
        except Exception as e:
            logger.warning(f"加载停用词表失败: {str(e)}")
            
        return stopwords
    
    def clean_text(self, text):
        """
        清洗文本内容
        :param text: 原始文本
        :return: 清洗后的文本
        """
        if not text or not isinstance(text, str):
            return ""
            
        # 移除HTML标签
        text = self.patterns['html_tags'].sub('', text)
        
        # 移除URL
        text = self.patterns['url_pattern'].sub('', text)
        
        # 移除特殊字符，保留中文、英文、数字和基本标点
        text = self.patterns['special_chars'].sub('', text)
        
        # 合并多余的空格
        text = self.patterns['extra_spaces'].sub(' ', text)
        
        # 去除首尾空格
        text = text.strip()
        
        return text
    
    def normalize_text(self, text):
        """
        规范化文本内容
        :param text: 清洗后的文本
        :return: 规范化后的文本
        """
        if not text:
            return ""
            
        # 转换为小写
        text = text.lower()
        
        # 替换全角字符为半角字符
        text = self._full_to_half(text)
        
        # 标准化数字格式
        text = self._normalize_numbers(text)
        
        # 标准化日期格式
        text = self._normalize_dates(text)
        
        return text
    
    def _full_to_half(self, text):
        """
        全角字符转半角字符
        :param text: 包含全角字符的文本
        :return: 转换后的文本
        """
        result = []
        for char in text:
            code = ord(char)
            if code == 0x3000:  # 全角空格
                code = 0x0020
            elif 0xFF01 <= code <= 0xFF5E:  # 全角字符
                code -= 0xFEE0
            result.append(chr(code))
        return ''.join(result)
    
    def _normalize_numbers(self, text):
        """
        标准化数字格式
        :param text: 包含数字的文本
        :return: 标准化后的文本
        """
        # 这里可以根据实际需求实现数字标准化逻辑
        # 例如：中文数字转阿拉伯数字，百分比转小数等
        return text
    
    def _normalize_dates(self, text):
        """
        标准化日期格式
        :param text: 包含日期的文本
        :return: 标准化后的文本
        """
        # 这里可以根据实际需求实现日期标准化逻辑
        # 例如：中文日期转ISO格式等
        return text
    
    def tokenize(self, text):
        """
        对文本进行分词
        :param text: 预处理后的文本
        :return: 分词结果列表
        """
        if not text:
            return []
            
        # 使用jieba进行分词
        words = jieba.lcut(text)
        
        # 过滤停用词
        filtered_words = [word for word in words if word not in self.stopwords and word.strip()]
        
        return filtered_words
    
    def extract_keywords(self, text, top_k=10):
        """
        提取文本关键词
        :param text: 预处理后的文本
        :param top_k: 返回的关键词数量
        :return: 关键词列表
        """
        if not text:
            return []
            
        try:
            # 使用TF-IDF算法提取关键词
            keywords = jieba.analyse.extract_tags(
                text, 
                topK=top_k,
                withWeight=False,
                allowPOS=('n', 'nr', 'ns', 'nt', 'nz', 'v', 'vn')  # 限定词性
            )
            return keywords
        except Exception as e:
            logger.error(f"关键词提取失败: {str(e)}")
            return []
    
    def clean_financial_news(self, news_item):
        """
        清洗金融新闻数据
        :param news_item: 金融新闻数据项
        :return: 清洗后的新闻数据
        """
        if not news_item:
            return None
            
        cleaned_item = {}
        
        try:
            # 清洗URL
            cleaned_item['url'] = news_item.get('url', '')
            
            # 清洗域名
            cleaned_item['domain'] = news_item.get('domain', '')
            
            # 清洗标题
            title = news_item.get('title', '')
            cleaned_title = self.clean_text(title)
            cleaned_item['title'] = cleaned_title
            
            # 清洗内容
            content = news_item.get('content', '')
            cleaned_content = self.clean_text(content)
            cleaned_item['content'] = cleaned_content
            
            # 规范化内容
            normalized_content = self.normalize_text(cleaned_content)
            cleaned_item['normalized_content'] = normalized_content
            
            # 分词
            tokens = self.tokenize(normalized_content)
            cleaned_item['tokens'] = tokens
            
            # 提取关键词
            keywords = self.extract_keywords(normalized_content)
            cleaned_item['keywords'] = keywords
            
            # 处理发布时间
            publish_time = news_item.get('publish_time', '')
            cleaned_item['publish_time'] = self._parse_publish_time(publish_time)
            
            # 处理来源
            source = news_item.get('source', '')
            cleaned_item['source'] = self.clean_text(source)
            
            # 保留原始关键词
            original_keywords = news_item.get('keywords', [])
            cleaned_item['original_keywords'] = original_keywords
            
            # 添加清洗时间
            cleaned_item['cleaned_time'] = datetime.now().isoformat()
            
            # 添加清洗状态
            cleaned_item['status'] = 'cleaned'
            
            # 计算文本长度
            cleaned_item['content_length'] = len(cleaned_content)
            cleaned_item['token_count'] = len(tokens)
            
            return cleaned_item
            
        except Exception as e:
            logger.error(f"清洗新闻数据失败: {str(e)}")
            return None
    
    def _parse_publish_time(self, publish_time_str):
        """
        解析发布时间字符串
        :param publish_time_str: 发布时间字符串
        :return: 解析后的时间字符串（ISO格式）或原始字符串
        """
        if not publish_time_str:
            return ''
            
        try:
            # 尝试从字符串中提取日期
            date_match = self.patterns['date_pattern'].search(publish_time_str)
            time_match = self.patterns['time_pattern'].search(publish_time_str)
            
            if date_match:
                date_str = date_match.group()
                # 规范化日期格式
                if '/' in date_str:
                    date_str = date_str.replace('/', '-')
                
                time_str = time_match.group() if time_match else '00:00:00'
                
                # 组合日期和时间
                datetime_str = f"{date_str} {time_str}"
                
                # 解析为datetime对象
                try:
                    # 尝试不同的日期格式
                    formats = ['%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d']
                    for fmt in formats:
                        try:
                            dt = datetime.strptime(datetime_str, fmt)
                            return dt.isoformat()
                        except ValueError:
                            continue
                except Exception:
                    pass
            
            # 如果无法解析，返回原始字符串
            return publish_time_str
            
        except Exception as e:
            logger.warning(f"解析发布时间失败: {str(e)}")
            return publish_time_str
    
    def validate_data(self, data_item):
        """
        验证数据质量
        :param data_item: 数据项
        :return: 验证结果（布尔值）和验证信息
        """
        if not data_item:
            return False, "数据项为空"
            
        # 检查必要字段
        required_fields = ['url', 'title', 'content']
        for field in required_fields:
            if field not in data_item or not data_item[field]:
                return False, f"缺少必要字段: {field}"
                
        # 检查内容长度
        content_length = len(data_item.get('content', ''))
        min_length = self.clean_config.get('min_content_length', 50)
        max_length = self.clean_config.get('max_content_length', 10000)
        
        if content_length < min_length:
            return False, f"内容长度过短: {content_length} < {min_length}"
        
        if content_length > max_length:
            return False, f"内容长度过长: {content_length} > {max_length}"
            
        # 检查是否为有效金融新闻（简单的关键词匹配）
        financial_keywords = self.clean_config.get('financial_keywords', [
            '股票', '基金', '债券', '金融', '银行', '保险', '投资', '理财',
            '市场', '经济', '政策', '利率', '汇率', '通胀', 'CPI', 'GDP',
            '央行', '证监会', '银保监会', '上市公司', '财报', '业绩', '分红'
        ])
        
        # 将关键词和文本转换为小写进行匹配
        content_lower = data_item.get('content', '').lower()
        title_lower = data_item.get('title', '').lower()
        
        keyword_matched = False
        for keyword in financial_keywords:
            keyword_lower = keyword.lower()
            if keyword_lower in content_lower or keyword_lower in title_lower:
                keyword_matched = True
                break
                
        if not keyword_matched:
            return False, "未匹配到金融相关关键词"
            
        return True, "数据验证通过"
