"""
品牌识别增强模块
使用NLP技术增强品牌识别能力
"""
import re
import logging
from typing import List, Dict, Tuple, Optional, Set, Union, Any
import pandas as pd
import time
from collections import Counter

# 尝试导入模糊匹配库
try:
    from fuzzywuzzy import fuzz
    from fuzzywuzzy import process as fuzz_process
    FUZZY_AVAILABLE = True
except ImportError:
    FUZZY_AVAILABLE = False
    logging.warning("未安装fuzzywuzzy库，模糊匹配功能将不可用。请通过pip install fuzzywuzzy安装")

# 尝试导入jieba
try:
    import jieba
    import jieba.analyse
    JIEBA_AVAILABLE = True
except ImportError:
    JIEBA_AVAILABLE = False
    logging.warning("未安装jieba库，中文分词功能将不可用。请通过pip install jieba安装")

# 尝试导入spaCy（增加Python 3.12兼容性检查）
SPACY_AVAILABLE = False
nlp = None
try:
    import sys
    if sys.version_info >= (3, 12):
        # Python 3.12及以上不支持当前版本的spaCy
        logging.warning("检测到Python 3.12+，当前不兼容spaCy库，命名实体识别功能将不可用")
    else:
        import spacy
        try:
            nlp = spacy.load("zh_core_web_sm")
            SPACY_AVAILABLE = True
        except:
            SPACY_AVAILABLE = False
            logging.warning("未加载spaCy中文模型，命名实体识别功能将不可用。请通过python -m spacy download zh_core_web_sm安装")
except ImportError:
    SPACY_AVAILABLE = False
    logging.warning("未安装spaCy库，命名实体识别功能将不可用。请通过pip install spacy安装")

# 创建日志记录器
logger = logging.getLogger(__name__)

class BrandRecognizer:
    """品牌识别器类"""
    
    def __init__(
        self, 
        brands: List[str], 
        min_similarity: float = 80.0,
        use_fuzzy: bool = FUZZY_AVAILABLE,
        use_ner: bool = SPACY_AVAILABLE,
        use_jieba: bool = JIEBA_AVAILABLE,
        cache_size: int = 1000,
        multi_brand_detection: bool = True  # 新增多品牌检测开关
    ):
        """
        初始化品牌识别器
        
        Args:
            brands: 已知品牌列表
            min_similarity: 最小相似度阈值，用于模糊匹配
            use_fuzzy: 是否启用模糊匹配
            use_ner: 是否启用命名实体识别
            use_jieba: 是否启用中文分词
            cache_size: 缓存大小
            multi_brand_detection: 是否启用多品牌检测
        """
        self.brands = [b.strip().lower() for b in brands if b and len(b.strip()) > 1]
        self.min_similarity = min_similarity
        self.use_fuzzy = use_fuzzy and FUZZY_AVAILABLE
        self.use_ner = use_ner and SPACY_AVAILABLE
        self.use_jieba = use_jieba and JIEBA_AVAILABLE
        self.multi_brand_detection = multi_brand_detection
        
        # 预处理品牌列表
        self._preprocess_brands()
        
        # 缓存
        self.exact_match_cache = {}
        self.fuzzy_match_cache = {}
        self.cache_size = cache_size
        
        # 记录初始化配置
        logger.info(f"初始化BrandRecognizer: {len(self.brands)}个品牌")
        logger.info(f"模糊匹配: {'启用' if self.use_fuzzy else '禁用'}")
        logger.info(f"NER功能: {'启用' if self.use_ner else '禁用'}")
        logger.info(f"中文分词: {'启用' if self.use_jieba else '禁用'}")
        logger.info(f"多品牌检测: {'启用' if self.multi_brand_detection else '禁用'}")
    
    def _preprocess_brands(self):
        """预处理和优化品牌列表"""
        # 排序品牌列表(长度降序)，避免短品牌名先匹配导致的问题
        self.brands.sort(key=len, reverse=True)
        
        # 创建精确匹配模式
        brand_patterns = [re.escape(brand) for brand in self.brands]
        self.exact_pattern = re.compile(
            r'\b(' + '|'.join(brand_patterns) + r')\b', 
            re.IGNORECASE
        )
        
        # 为中文品牌添加jieba自定义词典
        if self.use_jieba:
            for brand in self.brands:
                if any('\u4e00' <= ch <= '\u9fff' for ch in brand):
                    jieba.add_word(brand, freq=100000)
    
    def extract_brands_from_text(self, text: str) -> List[Dict[str, Any]]:
        """
        支持多品牌识别，融合递归清理思想，先替换连接符再滑动窗口递归查找所有品牌。
        Args:
            text: 输入文本
        Returns:
            List[Dict]: 包含品牌匹配结果的列表
        """
        if not text or not isinstance(text, str):
            return []
        text = text.strip()
        if not text:
            return []
        norm_text = re.sub(r'[×xX]', ' ', text)
        results = []
        used = set()
        # 1. 分词
        words = list(jieba.cut(norm_text)) if self.use_jieba and JIEBA_AVAILABLE else norm_text.split()
        # 2. 品牌库按长度降序排列
        brands = sorted(self.brands, key=lambda x: -len(x))
        # 3. 严格全词匹配
        for word in words:
            for brand in brands:
                if word == brand and brand not in used:
                    results.append({"brand": brand, "confidence": 100.0, "match_type": "exact"})
                    used.add(brand)
                    break  # 一个词只匹配一个品牌
        # 4. 可选：NER和模糊匹配（建议关闭或仅补充）
        # ...（如需保留可加，但要确保不会重复或误判）
        logger.info(f"品牌识别结果>>>>>>>>>>>results: {results}")
        return results
    
    def _find_exact_matches(self, text: str) -> Set[str]:
        """在文本中查找精确匹配的品牌"""
        # 检查缓存
        if text in self.exact_match_cache:
            return self.exact_match_cache[text]
            
        # 使用正则表达式查找所有匹配项
        matches = set(match.group().lower() for match in self.exact_pattern.finditer(text.lower()))
        
        # 更新缓存
        if len(self.exact_match_cache) >= self.cache_size:
            self.exact_match_cache.clear()  # 简单缓存策略，缓存满时清空
        self.exact_match_cache[text] = matches
        
        return matches
    
    def _find_fuzzy_matches(self, text: str) -> List[Tuple[str, float]]:
        """使用模糊匹配查找品牌"""
        if not self.use_fuzzy or not FUZZY_AVAILABLE:
            return []
            
        # 检查缓存
        if text in self.fuzzy_match_cache:
            return self.fuzzy_match_cache[text]
            
        matches = []
        
        # 对每个品牌计算相似度
        for brand in self.brands:
            # 计算比例相似度
            ratio = fuzz.ratio(text.lower(), brand.lower())
            partial_ratio = fuzz.partial_ratio(text.lower(), brand.lower())
            token_sort_ratio = fuzz.token_sort_ratio(text.lower(), brand.lower())
            
            # 取最高分数
            max_score = max(ratio, partial_ratio, token_sort_ratio)
            
            # 如果超过阈值，添加到匹配列表
            if max_score >= self.min_similarity:
                matches.append((brand, max_score))
        
        # 按相似度降序排序
        matches.sort(key=lambda x: x[1], reverse=True)
        
        # 更新缓存
        if len(self.fuzzy_match_cache) >= self.cache_size:
            self.fuzzy_match_cache.clear()  # 简单缓存策略，缓存满时清空
        self.fuzzy_match_cache[text] = matches
        
        return matches
    
    def _extract_brands_with_ner(self, text: str) -> List[Tuple[str, float]]:
        """使用命名实体识别提取可能的品牌"""
        if not self.use_ner or not SPACY_AVAILABLE:
            return []
            
        try:
            doc = nlp(text)
            
            # 提取所有ORG和PRODUCT实体
            entities = []
            for ent in doc.ents:
                if ent.label_ in ["ORG", "PRODUCT"]:
                    # 简单的置信度计算 (长度较长的实体可信度更高)
                    confidence = min(0.8 + len(ent.text) / 20, 0.95)
                    entities.append((ent.text, confidence))
                    
            return entities
        except Exception as e:
            logger.error(f"NER处理失败: {str(e)}")
            return []
    
    def extract_and_clean_brands(self, text: str) -> Dict[str, Any]:
        """
        从文本中提取品牌并清理文本
        """
        if not text or not isinstance(text, str):
            logger.info(f"[extract_and_clean_brands] 输入为空: {text}")
            return {"brands": [], "cleaned_text": "", "original_text": text}
        logger.info(f"[extract_and_clean_brands] 输入文本: {text}")
        # 提取品牌
        start_time = time.time()
        brands = self.extract_brands_from_text(text)
        logger.info(f"[extract_and_clean_brands] 识别brands: {brands}")
        # 清理文本 - 仅移除精确匹配和高置信度匹配
        cleaned_text = text
        for brand_info in brands:
            if brand_info["match_type"] == "exact" or brand_info["confidence"] >= 90:
                brand = brand_info["brand"]
                # 判断品牌是否包含特殊字符
                if re.search(r'[^a-zA-Z0-9\u4e00-\u9fa5]', brand):
                    # 包含特殊字符，整体替换
                    pattern = re.compile(re.escape(brand), re.IGNORECASE)
                else:
                    # 普通品牌，使用单词边界
                    pattern = re.compile(r'\b' + re.escape(brand) + r'\b', re.IGNORECASE)
                cleaned_text = pattern.sub('', cleaned_text)
        # 清理所有"数字+%"结构
        cleaned_text = re.sub(r'\d+\s*%', '', cleaned_text)
        # 清理多余空格
        cleaned_text = re.sub(r'\s+', ' ', cleaned_text).strip()
        process_time = time.time() - start_time
        # 生成品牌合并字符串
        brand_string = self.combine_brands_to_string(brands)
        logger.info(f"[extract_and_clean_brands] brand_string: {brand_string}, cleaned_text: {cleaned_text}")
        return {
            "brands": brands,
            "brand_string": brand_string,
            "cleaned_text": cleaned_text,
            "original_text": text,
            "process_time": process_time
        }
    
    def combine_brands_to_string(self, brands_list: List[Dict[str, Any]]) -> str:
        """
        将品牌列表合并为逗号分隔的字符串
        
        Args:
            brands_list: 品牌信息列表
            
        Returns:
            str: 逗号分隔的品牌字符串
        """
        if not brands_list:
            return ""
        # 过滤出高置信度的品牌 (threshold = 85)
        high_confidence_brands = [b["brand"] for b in brands_list if b["confidence"] >= 85]
        # 直接用中文逗号拼接所有高置信度品牌
        return "，".join(high_confidence_brands)

    def process_dataframe(
        self, 
        df: pd.DataFrame, 
        text_column: str, 
        brand_column: str = 'extracted_brands',
        brand_string_column: str = 'brand_string',  # 新增品牌字符串列
        cleaned_text_column: str = 'cleaned_text',
        inplace: bool = False
    ) -> pd.DataFrame:
        """
        处理DataFrame中的文本列，提取品牌并生成新列
        
        Args:
            df: 输入DataFrame
            text_column: 文本列名
            brand_column: 存储提取品牌的列名
            brand_string_column: 存储品牌字符串的列名（逗号分隔的多品牌）
            cleaned_text_column: 存储清理后文本的列名
            inplace: 是否原地修改DataFrame
            
        Returns:
            DataFrame: 处理后的DataFrame
        """
        if not inplace:
            df = df.copy()
            
        if text_column not in df.columns:
            raise ValueError(f"列 '{text_column}' 不存在于DataFrame中")
            
        # 创建进度记录变量
        total_rows = len(df)
        start_time = time.time()
        last_log_time = start_time
        
        # 处理每一行
        for i, row in enumerate(df[text_column].items()):
            idx, text = row
            
            # 处理文本
            result = self.extract_and_clean_brands(text)
            
            # 更新DataFrame
            df.at[idx, brand_column] = result["brands"]
            df.at[idx, brand_string_column] = result["brand_string"]  # 添加品牌字符串列
            df.at[idx, cleaned_text_column] = result["cleaned_text"]
            
            # 每1000行或每30秒记录一次进度
            current_time = time.time()
            if (i+1) % 1000 == 0 or current_time - last_log_time >= 30:
                progress = (i+1) / total_rows * 100
                elapsed = current_time - start_time
                rate = (i+1) / elapsed if elapsed > 0 else 0
                eta = (total_rows - (i+1)) / rate if rate > 0 else 0
                
                logger.info(f"处理进度: {progress:.1f}% ({i+1}/{total_rows}), "
                           f"速率: {rate:.1f}行/秒, 预计剩余时间: {eta:.1f}秒")
                
                last_log_time = current_time
        
        total_time = time.time() - start_time
        logger.info(f"数据处理完成！共处理 {total_rows} 行，耗时 {total_time:.2f} 秒")
        
        return df

    def extract_potential_brands(self, product_names: List[str]) -> Dict[str, Any]:
        """
        从商品名称列表中提取潜在的品牌名称
        
        Args:
            product_names: 商品名称列表
            
        Returns:
            Dict: 包含品牌分析结果的字典
        """
        logger.info(f"开始从{len(product_names)}个商品名称中提取潜在品牌")
        start_time = time.time()
        
        if not product_names:
            return {
                "known_brands": [],
                "unknown_brands": [],
                "processing_time": 0
            }
        
        # 1. 预处理名称列表，去除明显的非品牌前缀
        preprocessed_names = []
        prefix_pattern = re.compile(r'^预售[\s\-]*')
        
        for name in product_names:
            if not isinstance(name, str) or not name.strip():
                continue
                
            # 移除"预售-"等前缀
            processed_name = prefix_pattern.sub('', name.strip())
            preprocessed_names.append(processed_name)
        
        # 2. 从每个名称中提取可能的品牌部分
        brand_candidates = []
        
        # 正则模式识别特征：汉字前缀、特定位置模式
        cn_prefix_pattern = re.compile(r'^([\u4e00-\u9fa5]{2,6})\s*[^长\u4e00-\u9fa5]')
        
        # 针对中文品牌的提取模式
        for name in preprocessed_names:
            # 方式1: 直接提取开头的2-6个汉字作为潜在品牌
            cn_match = cn_prefix_pattern.search(name)
            if cn_match:
                brand_candidates.append(cn_match.group(1))
                continue
            
            # 方式2: 使用jieba分词进行分析
            if self.use_jieba and JIEBA_AVAILABLE:
                # 预处理文本 - 处理方括号中的内容
                bracket_content = {}
                bracket_pattern = re.compile(r'【([^】]+)】|\[([^\]]+)\]')
                
                # 替换方括号内容为占位符
                name_for_jieba = name
                for i, match in enumerate(bracket_pattern.finditer(name)):
                    content = match.group(1) or match.group(2)
                    placeholder = f"BRACKET_CONTENT_{i}"
                    bracket_content[placeholder] = content
                    name_for_jieba = name_for_jieba.replace(match.group(), placeholder)
                
                # 执行分词
                words = list(jieba.cut(name_for_jieba))
                
                # 恢复方括号内容
                for i, word in enumerate(words):
                    if word in bracket_content:
                        words[i] = bracket_content[word]
                
                # 提取长度大于1的前两个词
                word_candidates = [w for w in words[:2] if len(w) > 1 and any('\u4e00' <= ch <= '\u9fa5' for ch in w)]
                brand_candidates.extend(word_candidates)
        
        # 3. 统计分析品牌候选项
        if not brand_candidates:
            logger.warning("未能提取到任何潜在品牌")
            return {
                "known_brands": [],
                "unknown_brands": [],
                "processing_time": time.time() - start_time
            }
        
        # 统计每个品牌候选的出现频率
        brand_counter = Counter(brand_candidates)
        
        # 过滤低频品牌（出现次数小于2的可能是噪声）
        filtered_brands = {brand: count for brand, count in brand_counter.items() if count >= 2}
        
        if not filtered_brands:
            logger.warning("过滤后没有高频潜在品牌")
            return {
                "known_brands": [],
                "unknown_brands": [],
                "processing_time": time.time() - start_time
            }
        
        # 4. 对比已知品牌库
        known_brands = []
        unknown_brands = []
        
        for brand, count in filtered_brands.items():
            if brand.lower() in [b.lower() for b in self.brands]:
                known_brands.append({
                    "brand": brand,
                    "count": count,
                    "status": "known"
                })
            else:
                # 检查模糊匹配
                if self.use_fuzzy and FUZZY_AVAILABLE:
                    best_match = None
                    best_score = 0
                    
                    for known_brand in self.brands:
                        score = fuzz.ratio(brand.lower(), known_brand.lower())
                        if score > best_score and score >= 85:  # 使用较高阈值确保质量
                            best_score = score
                            best_match = known_brand
                    
                    if best_match:
                        known_brands.append({
                            "brand": brand,
                            "count": count,
                            "status": "similar",
                            "similar_to": best_match,
                            "similarity": best_score
                        })
                        continue
                
                # 如果不是已知品牌且没有高相似度匹配，则认为是未知品牌
                unknown_brands.append({
                    "brand": brand,
                    "count": count,
                    "status": "unknown"
                })
        
        # 按出现次数排序
        known_brands.sort(key=lambda x: x["count"], reverse=True)
        unknown_brands.sort(key=lambda x: x["count"], reverse=True)
        
        processing_time = time.time() - start_time
        logger.info(f"品牌提取完成，已知品牌: {len(known_brands)}个，未知品牌: {len(unknown_brands)}个，耗时: {processing_time:.2f}秒")
        
        return {
            "known_brands": known_brands,
            "unknown_brands": unknown_brands,
            "processing_time": processing_time
        }

# 简单测试代码
if __name__ == "__main__":
    # 设置日志级别
    logging.basicConfig(level=logging.INFO)
    
    # 测试品牌列表
    test_brands = [
        "Apple", "Samsung", "Xiaomi", "Huawei", "Nike", "Adidas", 
        "阿里巴巴", "腾讯", "百度", "京东", "小米", "华为",
        "缤纷田园", "每日鲜", "多点工坊", "马大姐"  # 添加测试用的中文品牌
    ]
    
    # 初始化品牌识别器
    recognizer = BrandRecognizer(
        brands=test_brands,
        min_similarity=75.0
    )
    
    # 测试文本
    test_texts = [
        "I love my Apple iPhone and my Samsung TV.",
        "Nike shoes are better than Adidas for running.",
        "我喜欢使用小米手机，但华为的相机更好。",
        "我在京东买了阿迪达斯(Adidas)的鞋子。",
        "这个产品不错，推荐购买。",  # 没有品牌
        "三星(Samsung)和苹果(Apple)哪个好？",
        "阿狸巴巴是购物网站。",  # 模糊匹配测试
        "缤纷田园每日鲜小白菜 约300g",  # 多品牌测试
        "多点工坊×马大姐内酯豆腐 350g"  # 特殊格式品牌连接测试
    ]
    
    # 测试处理
    for i, text in enumerate(test_texts):
        print(f"\n===== 测试 {i+1} =====")
        print(f"原文: {text}")
        
        result = recognizer.extract_and_clean_brands(text)
        
        print("识别的品牌:")
        for brand_info in result["brands"]:
            print(f"  - {brand_info['brand']} (置信度: {brand_info['confidence']:.1f}%, 匹配类型: {brand_info['match_type']})")
        
        print(f"品牌字符串: {result.get('brand_string', '')}")
        print(f"清理后文本: {result['cleaned_text']}")
        print(f"处理时间: {result['process_time']*1000:.2f}毫秒") 