"""
文本清洗模块
清理商品名称中的无关信息
"""
import re
import logging
from typing import List, Dict, Set, Optional
import pandas as pd

# 尝试导入jieba
try:
    import jieba
    import jieba.analyse
    JIEBA_AVAILABLE = True
except ImportError:
    JIEBA_AVAILABLE = False
    logging.warning("未安装jieba库，中文分词功能将不可用。请通过pip install jieba安装")

# 创建日志记录器
logger = logging.getLogger(__name__)

class TextCleaner:
    """商品名称清洗器类"""
    
    def __init__(self, marketing_words: List[str] = None,product_names: List[str] = None, bracket_patterns: List[str] = None):
        """
        初始化文本清洗器
        
        Args:
            marketing_words: 营销词列表
            bracket_patterns: 括号模式列表，默认为常见的中英文括号
        """
        # 标准化传入的列表
        self.marketing_words = marketing_words or []
        self.product_names = product_names or []
        self.bracket_patterns = bracket_patterns or [
            r'\[(.*?)\]',      # [内容]
            r'【(.*?)】',       # 【内容】
            r'\((.*?)\)',      # (内容)
            r'（(.*?)）'        # （内容）
        ]
        
        # 无关信息标记
        self.irrelevant_markers = [
            '顺丰', '包邮', '包到付', '包邮到家', '免费送货', '亏本冲量',
            '新老包装', '新老包装随机', '新老包装随机发', '新老包装随机发货', 
            '整箱', '整包', '整件装', '整箱装', '整箱发货',
            '不用蓝矾', '无蓝矾', '一手货源', '源头直发', '源头直采', '直发', '直采', 
            '多仓就近发货', '就近发货', '全国配送', '全国直发',
            '鲜活速发', '生鲜速递', '产地直发', '产地直供', '工厂直发', '特价', '优惠', '限时',
            '限时优惠', '限时特价', '抢购', '秒杀', '热卖', '爆款', '超值', '活动价', '促销',
            '福利', '礼品', '礼盒', '伴手礼', '送礼', '馈赠', '赠品', '有赠品', '送',
            '精选', '手工', '手工制作', '纯手工', '纯天然', '健康营养', '营养', '天然', '原生态',
            '生态', '有机', '纯正', '正宗', '传统工艺', '古法', '传统', '老字号', '非转基因',
            '无添加', '不含防腐剂', '零添加', '无污染', '绿色食品', '放心', '安心',
            '一级', '特级', '上等', '优质', '精品', 'A级', 'AAA级', '头等', '特选', '尊享',
            '进口', '原装进口', '原装', '进口原装', '正品', '官方正品', '官方授权', '授权',
            '标准', '国标', '达标', '合格', '品质保障', '保障', '认证', '官方认证',
            '会员专享', 'vip专享', '专享', '专供', '专卖', '独享', '独家',
            '破损包赔', '包退', '售后无忧', '假一赔十', '正品保障', '假一罚万', '包试用',
            '下单减', '满减', '买赠', '买一送一', '多买多送', '多买多减', '赠品','水果菜','本地菜'
        ]
        
        # 无关短语（模式匹配）
        self.irrelevant_phrases = [
            r'包邮\w{0,5}',
            r'顺丰\w{0,5}',
            r'多仓就近\w{0,10}',
            r'新老包装\w{0,10}',
            r'整箱\w{0,10}',
            r'限时\w{0,5}',
            r'特价\w{0,5}',
            r'直发\w{0,5}',
            r'包\w{0,3}退',
            r'不用\w{0,5}',
            r'官方\w{0,5}'
        ]
        
        # 处理单个字符的无关信息（一般是描述性的）
        self.single_char_modifiers = [
            '新', '鲜', '鲜嫩', '大', '小', '中', '特', '生', '熟', '优', '精', '佳', '纯', '真',
            '全', '好', '高', '低', '薄', '厚', '轻', '重', '冷', '热', '暖', '凉', '湿', '干', '软', '硬',
            '甜', '咸', '酸', '辣', '苦', '香', '脆', '嫩', '老', '松', '粘', '粗', '细', '快', '慢'
        ]
        
        # 新增：常见形容词和名词组合，这些应该被保持在一起
        self.common_modifier_noun_pairs = [
            '新鲜蔬菜', '新鲜水果', '新鲜食材', '新鲜牛肉', '新鲜猪肉', '新鲜羊肉', '新鲜鸡肉',
            '新鲜海鲜', '新鲜鱼', '新鲜虾', '新鲜蟹', '新鲜贝', '新鲜菌菇', '新鲜豆制品',
            '鲜嫩蔬菜', '鲜嫩肉类', '鲜甜水果', '鲜美海鲜', '鲜活海鲜', '新鲜农产品',
            '优质蔬菜', '优质肉类', '精选水果', '精选肉类', '精选坚果', '精选海鲜',
            '有机蔬菜', '有机水果', '有机谷物', '有机米', '有机茶', '绿色蔬菜', '绿色食品',
            '本地蔬菜', '当季水果', '当季蔬菜', '高品质水果', '高品质蔬菜'
        ]
        
        # 初始化jieba分词
        if JIEBA_AVAILABLE:
            # 确保在jieba词典中添加词组，强制同时使用add_word和suggest_freq
            # 添加营销词到jieba的自定义词典
            for word in self.marketing_words:
                if word and len(word) >= 2:
                    jieba.add_word(word, freq=1000, tag='nz')
                    
            # 添加商品名到jieba的自定义词典
            for word in self.product_names:
                if word and len(word) >= 2:
                    jieba.add_word(word, freq=1000, tag='nz')

            # 添加无关信息标记到jieba词典
            for word in self.irrelevant_markers:
                if word and len(word) >= 2:
                    jieba.add_word(word, freq=1000, tag='nz')
            
            # 添加形容词和名词组合到jieba词典，使用高频率确保不被分开
            for word in self.common_modifier_noun_pairs:
                if word and len(word) >= 2:
                    # 两步操作：先添加词，然后调整词频
                    jieba.add_word(word, freq=3000, tag='n')  # 高频率确保这些词被优先识别
                    jieba.suggest_freq(word, True)  # 强制调整词频，确保优先匹配
            
            # 特别处理"新鲜蔬菜"，确保它不被分开
            jieba.add_word('新鲜蔬菜', freq=10000, tag='n')
            jieba.suggest_freq('新鲜蔬菜', True)
            
            # 为了调试目的，测试分词结果
            logger.info(f"分词测试 - 新鲜蔬菜: {list(jieba.cut('新鲜蔬菜'))}")
            logger.info(f"分词测试 - 新鲜 蔬菜: {list(jieba.cut('新鲜 蔬菜'))}")
        
        logger.info(f"文本清洗器初始化完成，加载营销词 {len(self.marketing_words)} 个")
    
    def clean_text(self, text: str) -> Dict:
        """
        清理商品名称中的无关信息
        
        Args:
            text: 商品名称文本
            
        Returns:
            清洗结果字典，格式为:
            {
                'cleaned_text': 清洗后的文本,
                'removed_parts': 移除的部分列表,
                'is_significantly_changed': 是否有重大变化,
                'confidence': 清洗置信度(0-1)
            }
        """
        result = {
            'cleaned_text': text,
            'removed_parts': [],
            'is_significantly_changed': False,
            'confidence': 1.0
        }
        
        # 如果文本为空或过短，直接返回
        if not text or len(text) < 2:
            return result
        
        # 特殊处理：检查是否包含"新鲜"+"蔬菜"分开的情况，如果有则预先合并
        if '新鲜' in text and '蔬菜' in text and '新鲜蔬菜' not in text:
            # 先尝试替换有空格的情况
            if '新鲜 蔬菜' in text:
                text = text.replace('新鲜 蔬菜', '新鲜蔬菜')
            # 再检查是否有相邻但被其他字符分隔的情况
            else:
                # 使用正则表达式匹配"新鲜"后面可能跟着的"蔬菜"
                text = re.sub(r'新鲜[\s,，.。、]*蔬菜', '新鲜蔬菜', text)
        
        # 同样处理其他常见的组合
        for pair in self.common_modifier_noun_pairs:
            if pair not in text:
                # 分解成形容词和名词
                parts = []
                for i in range(len(pair)):
                    if i > 0 and pair[:i] in self.single_char_modifiers and len(pair[i:]) >= 2:
                        parts = [pair[:i], pair[i:]]
                        break
                
                if parts and parts[0] in text and parts[1] in text:
                    # 使用正则表达式匹配可能被分隔的形容词和名词
                    pattern = rf'{re.escape(parts[0])}[\s,，.。、]*{re.escape(parts[1])}'
                    text = re.sub(pattern, pair, text)
        
        # 保护单位信息，特别是1.8L这样的格式
        protected_patterns = []
        protected_replacements = {}
        
        # 保护 数字+L 格式
        l_pattern = re.compile(r'(\d+(?:\.\d+)?)[Ll](?!\w)')
        for i, match in enumerate(l_pattern.finditer(text)):
            placeholder = f"__PROTECTED_L_{i}__"
            protected_patterns.append((match.group(), placeholder))
            protected_replacements[placeholder] = match.group()
        
        # 应用保护
        protected_text = text
        for original, placeholder in protected_patterns:
            protected_text = protected_text.replace(original, placeholder)
        
        # 应用正常的清洗逻辑到受保护的文本上
        cleaned_text = protected_text
        removed_parts = []
        
        # 预处理 - 处理方括号中的内容
        bracket_contents = {}
        for i, pattern in enumerate(self.bracket_patterns):
            brackets = re.findall(pattern, cleaned_text)
            
            for j, bracket_content in enumerate(brackets):
                # 检查括号内容是否为营销或无关信息
                if self._is_irrelevant_info(bracket_content):
                    # 生成替换模式，注意处理正则表达式中的特殊字符
                    if '(.*?)' in pattern:
                        bracket_pattern = pattern.replace('(.*?)', re.escape(bracket_content))
                    else:
                        # 直接构建完整匹配的模式
                        if pattern.startswith(r'\['):
                            bracket_pattern = r'\[' + re.escape(bracket_content) + r'\]'
                        elif pattern.startswith(r'【'):
                            bracket_pattern = r'【' + re.escape(bracket_content) + r'】'
                        elif pattern.startswith(r'\('):
                            bracket_pattern = r'\(' + re.escape(bracket_content) + r'\)'
                        elif pattern.startswith(r'（'):
                            bracket_pattern = r'（' + re.escape(bracket_content) + r'）'
                        else:
                            # 默认情况
                            continue
                    
                    # 替换整个括号内容
                    cleaned_text = re.sub(bracket_pattern, ' ', cleaned_text)
                    removed_parts.append(bracket_content)
                else:
                    # 如果括号内容不是无关信息，保留内容但去除括号符号
                    placeholder = f"__BRACKET_{i}_{j}__"
                    bracket_contents[placeholder] = bracket_content
                    
                    if pattern.startswith(r'\['):
                        bracket_pattern = r'\[' + re.escape(bracket_content) + r'\]'
                    elif pattern.startswith(r'【'):
                        bracket_pattern = r'【' + re.escape(bracket_content) + r'】'
                    elif pattern.startswith(r'\('):
                        bracket_pattern = r'\(' + re.escape(bracket_content) + r'\)'
                    elif pattern.startswith(r'（'):
                        bracket_pattern = r'（' + re.escape(bracket_content) + r'）'
                    else:
                        # 默认情况
                        continue
                    
                    cleaned_text = re.sub(bracket_pattern, f" {placeholder} ", cleaned_text)
        
        # 2. 移除常见无关信息标记
        for marker in self.irrelevant_markers:
            if marker in cleaned_text:
                # 避免移除部分词汇（只有完整匹配才移除）
                pattern = r'\b' + re.escape(marker) + r'\b'
                if re.search(pattern, cleaned_text):
                    cleaned_text = re.sub(pattern, ' ', cleaned_text)
                    removed_parts.append(marker)
        
        # 3. 使用模式匹配移除无关短语
        for phrase_pattern in self.irrelevant_phrases:
            matches = re.finditer(phrase_pattern, cleaned_text)
            for match in matches:
                phrase = match.group(0)
                cleaned_text = cleaned_text.replace(phrase, ' ')
                removed_parts.append(phrase)
        
        # 4. 处理营销词
        for word in self.marketing_words:
            if word in cleaned_text:
                # 避免移除部分词汇（只有完整匹配才移除）
                pattern = r'\b' + re.escape(word) + r'\b'
                if re.search(pattern, cleaned_text):
                    cleaned_text = re.sub(pattern, ' ', cleaned_text)
                    removed_parts.append(word)
        
        # 5. 恢复保留的括号内容
        for placeholder, content in bracket_contents.items():
            cleaned_text = cleaned_text.replace(placeholder, content)
        
        # 6. 使用jieba分词分析，处理更复杂的情况
        if JIEBA_AVAILABLE and len(cleaned_text) > 5:
            # 预处理文本 - 处理方括号中的内容
            bracket_content = {}
            bracket_pattern = re.compile(r'【([^】]+)】|\[([^\]]+)\]')
            
            # 替换方括号内容为占位符
            text_for_jieba = cleaned_text
            for i, match in enumerate(bracket_pattern.finditer(cleaned_text)):
                content = match.group(1) or match.group(2)
                placeholder = f"BRACKET_CONTENT_{i}"
                bracket_content[placeholder] = content
                text_for_jieba = text_for_jieba.replace(match.group(), placeholder)
            
            # 在分词前，先检查并保护形容词-名词组合
            for pair in self.common_modifier_noun_pairs:
                if pair in text_for_jieba:
                    # 替换形容词-名词组合为特殊标记，防止被分开
                    placeholder = f"__NOUN_PAIR_{pair}__"
                    text_for_jieba = text_for_jieba.replace(pair, placeholder)
                    bracket_content[placeholder] = pair
            
            # 分词
            words = list(jieba.cut(text_for_jieba))
            
            # 恢复方括号内容和形容词-名词组合
            for i, word in enumerate(words):
                if word in bracket_content:
                    words[i] = bracket_content[word]
            
            # 检查单字形容词后面是否跟着实体词，如果是，将它们连接起来
            i = 0
            combined_words = []
            while i < len(words):
                if (i < len(words) - 1 and 
                    words[i] in self.single_char_modifiers and 
                    len(words[i+1]) >= 2):
                    # 组合形容词和后面的名词
                    combined_word = words[i] + words[i+1]
                    combined_words.append(combined_word)
                    i += 2
                else:
                    combined_words.append(words[i])
                    i += 1
            
            # 使用TF-IDF提取关键词
            keywords = jieba.analyse.extract_tags(cleaned_text, topK=5)
            
            # 计算每个词的重要性（在关键词中的词更重要）
            important_words = []
            for word in combined_words:
                # 跳过单个字符的修饰词（但这些已经被上面的代码合并了）
                if len(word) == 1 and word in self.single_char_modifiers:
                    continue
                
                # 在关键词中的词或较长的词更可能是重要词
                if word in keywords or len(word) > 1:
                    important_words.append(word)
            
            # 如果找到了重要词，重构文本
            if important_words:
                reconstructed_text = ' '.join(important_words)
                
                # 检查重构文本是否有重大变化
                if len(reconstructed_text) < len(cleaned_text) * 0.7:
                    # 变化太大，可能丢失关键信息，降低置信度
                    result['confidence'] = 0.7
                else:
                    cleaned_text = reconstructed_text
                    result['confidence'] = 0.9
        
        # 7. 清理多余空白字符
        cleaned_text = re.sub(r'\s+', ' ', cleaned_text).strip()
        
        # 8. 确保清洗后的文本不为空
        if not cleaned_text:
            cleaned_text = text  # 回退到原始文本
            result['confidence'] = 0.5
            
        # 9. 恢复被保护的单位信息
        for placeholder, original in protected_replacements.items():
            cleaned_text = cleaned_text.replace(placeholder, original)
        
        # 评估变化是否显著
        is_significantly_changed = len(text) - len(cleaned_text) > len(text) * 0.3
        
        result['cleaned_text'] = cleaned_text
        result['removed_parts'] = removed_parts
        result['is_significantly_changed'] = is_significantly_changed
        
        return result
    
    def _is_irrelevant_info(self, text: str) -> bool:
        """判断文本是否为无关信息"""
        # 检查文本是否包含无关信息标记
        for marker in self.irrelevant_markers:
            if marker in text:
                return True
        
        # 检查是否匹配无关短语模式
        for pattern in self.irrelevant_phrases:
            if re.search(pattern, text):
                return True
        
        # 如果文本很长且包含多个关键字，可能是无关信息
        keyword_count = 0
        for word in ['发货', '包装', '包邮', '顺丰', '直发', '包退', '包换', '优惠', '特价']:
            if word in text:
                keyword_count += 1
        
        if keyword_count >= 2 or (len(text) > 5 and keyword_count >= 1):
            return True
        
        return False
    
    def clean_batch(self, df: pd.DataFrame, text_column: str = '商品名称', output_column: str = 'cleaned_name') -> pd.DataFrame:
        """
        批量处理DataFrame中的商品名称，清理无关信息
        
        Args:
            df: 包含商品名称的DataFrame
            text_column: 商品名称列名
            output_column: 输出列名
            
        Returns:
            添加了清洗结果的DataFrame
        """
        result_df = df.copy()
        
        # 添加输出列
        if output_column not in result_df.columns:
            result_df[output_column] = ''
        
        # 处理每一行
        for idx, row in df.iterrows():
            text = str(row[text_column]) if not pd.isna(row[text_column]) else ''
            if not text:
                continue
                
            # 清洗文本
            cleaned_info = self.clean_text(text)
            
            # 更新结果
            result_df.at[idx, output_column] = cleaned_info['cleaned_text']
        
        return result_df

# 简单测试代码
if __name__ == "__main__":
    # 示例营销词
    test_marketing_words = [
        "促销", "特价", "优惠", "秒杀", "限时", "抢购", "爆款", "热卖", "超值", "折扣",
        "精选", "手工", "纯天然", "有机", "高品质", "特级", "一级", "正宗", "传统", "原产地"
    ]
    
    # 创建文本清洗器
    cleaner = TextCleaner(test_marketing_words)
    
    # 测试文本
    test_texts = [
        "福临门 花生油 1.8L 新老包装随机发货",
        "每日鲜泥胡萝卜 约600g【顺丰配送】",
        "高原菜西兰花 特价促销 约450g 新鲜蔬菜 营养健康",
        "金龙鱼 东北大米 5kg 源头直采直发 包邮到家",
        "洽洽 原味瓜子 (多仓就近发货)(整箱新老包装随机发)休闲零食 【破损包赔】",
        "鸡蛋礼盒福利健康营养礼品 【无抗黑猪】 包鲜不包活源头直发"
    ]
    
    # 测试清洗效果
    for text in test_texts:
        result = cleaner.clean_text(text)
        print(f"原文: {text}")
        print(f"清洗后: {result['cleaned_text']}")
        print(f"移除部分: {', '.join(result['removed_parts']) if result['removed_parts'] else '无'}")
        print(f"置信度: {result['confidence']}")
        print("-" * 40) 