"""
HanLP品牌识别增强模块
使用HanLP技术增强品牌识别能力
"""
import re
import logging
from typing import List, Dict, Set, Tuple, Optional, Union, Any
import pandas as pd
import time
from collections import Counter

# 尝试导入模糊匹配库
try:
    from fuzzywuzzy import fuzz
    from fuzzywuzzy import process as fuzz_process
    FUZZY_AVAILABLE = True
except ImportError:
    FUZZY_AVAILABLE = False
    logging.warning("未安装fuzzywuzzy库，模糊匹配功能将不可用。请通过pip install fuzzywuzzy安装")

# 尝试导入HanLP
try:
    import hanlp
    HANLP_AVAILABLE = True
except ImportError:
    HANLP_AVAILABLE = False
    logging.warning("未安装HanLP库，高级分词和NER功能将不可用。请通过pip install hanlp安装")

# 尝试导入jieba作为备选
try:
    import jieba
    import jieba.analyse
    JIEBA_AVAILABLE = True
except ImportError:
    JIEBA_AVAILABLE = False
    logging.warning("未安装jieba库，将使用基础分词功能。")

# 创建日志记录器
logger = logging.getLogger(__name__)

class HanLPBrandRecognizer:
    """基于HanLP的品牌识别器类"""
    
    def __init__(
        self, 
        brands: List[str], 
        min_similarity: float = 80.0,
        use_fuzzy: bool = FUZZY_AVAILABLE,
        use_hanlp: bool = HANLP_AVAILABLE,
        use_jieba: bool = JIEBA_AVAILABLE,
        use_pos_filter: bool = True,
        cache_size: int = 1000
    ):
        """
        初始化品牌识别器
        
        Args:
            brands: 已知品牌列表
            min_similarity: 最小相似度阈值，用于模糊匹配
            use_fuzzy: 是否启用模糊匹配
            use_hanlp: 是否启用HanLP功能
            use_jieba: 是否使用jieba作为备选
            use_pos_filter: 是否使用词性过滤
            cache_size: 缓存大小
        """
        self.brands = [b.strip().lower() for b in brands if b and len(b.strip()) > 1]
        self.min_similarity = min_similarity
        self.use_fuzzy = use_fuzzy and FUZZY_AVAILABLE
        self.use_hanlp = use_hanlp and HANLP_AVAILABLE
        self.use_jieba = use_jieba and JIEBA_AVAILABLE
        self.use_pos_filter = use_pos_filter
        
        # 预处理品牌列表
        self._preprocess_brands()
        
        # 初始化HanLP模型
        self.tokenizer = None
        self.ner = None
        self.pos = None
        
        if self.use_hanlp:
            try:
                # 使用中文电子分词模型
                self.tokenizer = hanlp.load(hanlp.pretrained.tok.FINE_ELECTRA_SMALL_ZH)
                
                # 加载NER模型 (可选，根据具体需求)
                try:
                    self.ner = hanlp.load(hanlp.pretrained.ner.MSRA_NER_ELECTRA_SMALL_ZH)
                except Exception as e:
                    logger.warning(f"HanLP NER模型加载失败: {str(e)}")
                
                # 加载词性标注模型 (可选，根据具体需求)
                try:
                    self.pos = hanlp.load(hanlp.pretrained.pos.CTB9_POS_ELECTRA_SMALL)
                except Exception as e:
                    logger.warning(f"HanLP POS模型加载失败: {str(e)}")
                    self.use_pos_filter = False
            except Exception as e:
                logger.error(f"HanLP模型初始化失败: {str(e)}")
                self.use_hanlp = False
        
        # 缓存
        self.exact_match_cache = {}
        self.fuzzy_match_cache = {}
        self.hanlp_cache = {}
        self.cache_size = cache_size
        
        # 记录初始化配置
        logger.info(f"初始化HanLPBrandRecognizer: {len(self.brands)}个品牌")
        logger.info(f"HanLP功能: {'启用' if self.use_hanlp else '禁用'}")
        logger.info(f"模糊匹配: {'启用' if self.use_fuzzy else '禁用'}")
        logger.info(f"Jieba备选: {'启用' if self.use_jieba else '禁用'}")
        logger.info(f"词性过滤: {'启用' if self.use_pos_filter else '禁用'}")
    
    def _preprocess_brands(self):
        """预处理和优化品牌列表"""
        # 排序品牌列表(长度降序)，避免短品牌名先匹配导致的问题
        self.brands.sort(key=len, reverse=True)
        
        # 创建精确匹配模式
        brand_patterns = [re.escape(brand) for brand in self.brands]
        self.exact_pattern = re.compile(
            r'\b(' + '|'.join(brand_patterns) + r')\b', 
            re.IGNORECASE
        )
        
        # 为中文品牌添加自定义词典
        if self.use_jieba:
            for brand in self.brands:
                if any('\u4e00' <= ch <= '\u9fff' for ch in brand):
                    jieba.add_word(brand, freq=100000)
    
    def _find_exact_matches(self, text: str) -> List[str]:
        """
        在文本中查找精确匹配的品牌
        
        Args:
            text: 输入文本
            
        Returns:
            List[str]: 匹配的品牌列表
        """
        if text in self.exact_match_cache:
            return self.exact_match_cache[text]
            
        matches = []
        
        # 使用正则表达式查找精确匹配
        found = self.exact_pattern.findall(text.lower())
        if found:
            matches.extend(found)
        
        # 还要检查不带词边界的完整匹配
        for brand in self.brands:
            if brand in text.lower() and brand not in matches:
                matches.append(brand)
        
        # 更新缓存
        if len(self.exact_match_cache) < self.cache_size:
            self.exact_match_cache[text] = matches
            
        return matches
    
    def _get_hanlp_tokens(self, text: str) -> List[str]:
        """
        使用HanLP进行分词
        
        Args:
            text: 输入文本
            
        Returns:
            List[str]: 分词结果
        """
        if not self.use_hanlp or not self.tokenizer:
            return []
            
        if text in self.hanlp_cache:
            return self.hanlp_cache[text]
            
        try:
            tokens = self.tokenizer(text)
            
            # 更新缓存
            if len(self.hanlp_cache) < self.cache_size:
                self.hanlp_cache[text] = tokens
                
            return tokens
        except Exception as e:
            logger.error(f"HanLP分词错误: {str(e)}")
            return []
    
    def _get_hanlp_entities(self, text: str) -> List[Dict]:
        """
        使用HanLP进行命名实体识别
        
        Args:
            text: 输入文本
            
        Returns:
            List[Dict]: 实体列表
        """
        if not self.use_hanlp or not self.ner:
            logger.warning(">>>>>>>>>>>>>>>HanLP或NER模型未加载, 无法进行实体识别")
            return []
            
        try:
            # 预处理文本
            processed_text = text.replace(" ", "")
            if not processed_text or len(processed_text) < 2:
                return []
                
            # 安全地进行实体识别
            try:
                # logger.info(">>>>>>>>>>>>>>>开始进行实体识别{}".format(processed_text))
                entities = self.ner(processed_text)
                # logger.info(">>>>>>>>>>>>>>>实体识别完成 len(entities):{}".format(len(entities)))
                return entities
            except IndexError as e:
                # 处理索引错误，通常是因为张量维度问题
                logger.error(f"《《《《《《《《《《《HanLP实体识别索引错误: {str(e)}")
                return []
            except RuntimeError as e:
                # 处理运行时错误，比如 "too many indices for tensor of dimension X"
                if "too many indices for tensor" in str(e):
                    logger.error(f"《《《《《《《《《《《HanLP实体识别张量维度错误: {str(e)}")
                    return []
                raise  # 重新抛出其他运行时错误
        except Exception as e:
            logger.error(f"《《《《《《《《《《《HanLP实体识别错误 text : {text}")
            logger.error(f"《《《《《《《《《《《HanLP实体识别错误: {str(e)}")
            return []
    
    def _get_token_pos(self, text: str) -> List[Tuple[str, str]]:
        """
        使用HanLP进行词性标注
        
        Args:
            text: 输入文本
            
        Returns:
            List[Tuple[str, str]]: 词和词性的元组列表
        """
        if not self.use_hanlp or not self.pos or not self.use_pos_filter:
            return []
            
        try:
            tokens = self.tokenizer(text)
            pos_tags = self.pos(tokens)
            return list(zip(tokens, pos_tags))
        except Exception as e:
            logger.error(f"HanLP词性标注错误: {str(e)}")
            return []
    
    def _get_potential_brand_tokens(self, text: str) -> List[str]:
        """
        从文本中提取可能的品牌token
        
        Args:
            text: 输入文本
            
        Returns:
            List[str]: 可能是品牌的token列表
        """
        potential_tokens = []
        
        # 使用HanLP分词
        if self.use_hanlp and self.tokenizer:
            tokens = self._get_hanlp_tokens(text)
            
            # 使用词性过滤
            if self.use_pos_filter and self.pos:
                try:
                    pos_tags = self.pos(tokens)
                    token_pos_pairs = list(zip(tokens, pos_tags))
                    
                    # 筛选名词(n)、专有名词(np)等可能是品牌的词性
                    # CTB9标签集: https://hanlp.hankcs.com/docs/annotations/pos/ctb.html
                    brand_pos_tags = {'NN', 'NR', 'NT', 'FW'}  # 名词、人名、机构名、外文
                    
                    for token, pos in token_pos_pairs:
                        if (pos in brand_pos_tags and 
                            len(token) >= 2 and 
                            self._is_valid_brand_candidate(token)):
                            potential_tokens.append(token)
                except Exception as e:
                    logger.error(f"词性过滤错误: {str(e)}")
                    # 回退到不使用词性过滤
                    for token in tokens:
                        if (len(token) >= 2 and 
                            self._is_valid_brand_candidate(token)):
                            potential_tokens.append(token)
            else:
                # 不使用词性过滤，使用长度和有效性过滤
                for token in tokens:
                    if (len(token) >= 2 and 
                        self._is_valid_brand_candidate(token)):
                        potential_tokens.append(token)
                        
            # 实体识别可能的品牌
            if self.ner:
                try:
                    entities = self._get_hanlp_entities(text)
                    # 提取可能是品牌的实体类型 (ORG, NT等)
                    brand_entity_types = {'nt', 'org', 'ns'}  # 机构名、组织名、地点名
                    
                    for entity in entities:
                        # 如果entity长度大于1，则取第一个元素
                        if len(entity) == 0:
                            continue
                        
                        # logger.info(">>>>>>>>>>>>>>>entity:{}".format(entity[0]))
                        # 获取元组的第一个元素
                        entity_text = entity[0][0]
                        # logger.info(">>>>>>>>>>>>>>>entity_text:{}".format(entity_text))
                        # 获取元组的第二个元素
                        entity_type = entity[0][1].lower()
                        # logger.info(">>>>>>>>>>>>>>>entity_type:{}".format(entity_type))
                        # logger.info("@@<<<<<<<<<<<<<<brand_entity_types:{}".format(brand_entity_types))
                        # logger.info("@@<<<<<<<<<<<<<<len(entity_text):{}".format(len(entity_text)))
                        # logger.info("@@<<<<<<<<<<<<<<self._is_valid_brand_candidate(entity_text):{}".format(self._is_valid_brand_candidate(entity_text)))
                        # logger.info("@@<<<<<<<<<<<<<<entity_text not in potential_tokens:{}".format(entity_text not in potential_tokens))
                        if (entity_type in brand_entity_types and 
                            len(entity_text) >= 2 and
                            self._is_valid_brand_candidate(entity_text) and
                            entity_text not in potential_tokens):
                            # logger.info(">>>>>>>>>>>>>>>entity_text:{}".format(entity_text))
                            potential_tokens.append(entity_text)
                            # logger.info(">>>>>>>>>>>>>>>potential_tokens:{}".format(potential_tokens))
                except Exception as e:
                    logger.error(f"《《《《《《《《《《《实体识别错误: {str(e)}")
        
        # 如果HanLP不可用或分词结果为空，回退到jieba
        if (not potential_tokens) and self.use_jieba:
            # 预处理文本 - 处理方括号中的内容
            bracket_content = {}
            bracket_pattern = re.compile(r'【([^】]+)】|\[([^\]]+)\]')
            
            # 替换方括号内容为占位符
            text_for_jieba = text
            for i, match in enumerate(bracket_pattern.finditer(text)):
                content = match.group(1) or match.group(2)
                placeholder = f"BRACKET_CONTENT_{i}"
                bracket_content[placeholder] = content
                text_for_jieba = text_for_jieba.replace(match.group(), placeholder)
            
            # 分词
            words = list(jieba.cut(text_for_jieba))
            
            # 恢复方括号内容
            for i, word in enumerate(words):
                if word in bracket_content:
                    words[i] = bracket_content[word]
            
            for word in words:
                if (len(word) >= 2 and 
                    self._is_valid_brand_candidate(word) and
                    word not in potential_tokens):
                    potential_tokens.append(word)
        
        return potential_tokens
    
    def _is_valid_brand_candidate(self, token: str) -> bool:
        """
        检查token是否可能是有效的品牌
        
        Args:
            token: 待检查的token
            
        Returns:
            bool: 是否可能是有效品牌
        """
        # 过滤明显不可能是品牌的token
        
        # 过滤纯数字
        if token.isdigit():
            return False
            
        # 过滤特殊符号比例过高的token
        special_chars = sum(1 for c in token if not c.isalnum())
        if special_chars / len(token) > 0.3:
            return False
            
        # 过滤常见非品牌词
        common_non_brands = {
            '全新', '正品', '新款', '热销', '促销', '特价', '限时', '包邮',
            '官方', '专柜', '专卖', '旗舰', '正版', '授权', '代理', '批发',
            '新品', '现货', '直发', '顺丰', '包装', '保质期', '到期', '保修',
            '假一赔', '售后', '联保', '质保', '三包', '急速', '闪送', '次日达',
            '超值', '划算', '性价比', '优惠', '打折', '折扣', '特惠', '满减',
            '满赠', '赠品', '增量', '多买', '套装', '礼盒', '礼品', '福袋',
            '限量', '限定', '珍藏', '典藏', '收藏', '特供', '专供', '订制',
            '最新', '最热', '人气', '明星', '网红', '达人', '推荐', '热卖',
            '包邮', '免邮', '邮费', '运费', '商品', '产品', '货品', '物品'
        }
        
        if token in common_non_brands:
            return False
            
        # 检查是否为常见单位
        common_units = {
            '件', '个', '箱', '包', '袋', '盒', '瓶', '罐', '桶', '支',
            '片', '张', '块', '粒', '条', '根', '双', '对', '套', '组',
            '千克', '公斤', 'kg', 'g', '克', '斤', '两', '吨', 
            '升', 'l', '毫升', 'ml', '厘米', '米', 'cm', 'm'
        }
        
        if token in common_units:
            return False
        
        # 检查是否为单个汉字
        if len(token) == 1 and '\u4e00' <= token <= '\u9fff':
            return False
            
        # 其他规则可以根据需要添加
            
        return True
    
    def extract_brands_from_text(self, text: str) -> List[Dict[str, Any]]:
        """
        从文本中提取品牌
        
        Args:
            text: 输入文本
            
        Returns:
            List[Dict]: 包含品牌匹配结果的列表
        """
        if not text or not isinstance(text, str):
            return []
            
        text = text.strip()
        if not text:
            return []
            
        results = []
        
        # 1. 精确匹配已知品牌
        exact_matches = self._find_exact_matches(text)
        results.extend([
            {"brand": brand, "confidence": "high", "match_type": "exact"} 
            for brand in exact_matches
        ])
        
        # 文本预处理 - 移除已经精确匹配的品牌
        processed_text = text
        for brand in exact_matches:
            # 使用单词边界确保只替换完整的品牌名
            pattern = re.compile(r'\b' + re.escape(brand) + r'\b', re.IGNORECASE)
            processed_text = pattern.sub('', processed_text)
            # 如果没有边界匹配, 尝试直接替换
            if brand in processed_text:
                processed_text = processed_text.replace(brand, '')
        
        # 2. 使用HanLP/jieba进行分词，提取潜在品牌
        if processed_text:
            potential_tokens = self._get_potential_brand_tokens(processed_text)
            
            # 将潜在品牌与已知品牌进行模糊匹配
            if self.use_fuzzy and potential_tokens:
                for token in potential_tokens:
                    # 过滤太短的token
                    if len(token) < 2:
                        continue
                        
                    # 检查是否已在结果中
                    if any(r["brand"].lower() == token.lower() for r in results):
                        continue
                    
                    # 模糊匹配
                    best_match = None
                    best_score = 0
                    
                    for brand in self.brands:
                        # 对英文品牌不区分大小写
                        score = fuzz.ratio(token.lower(), brand.lower())
                        if score > best_score and score >= self.min_similarity:
                            best_score = score
                            best_match = brand
                    
                    if best_match:
                        confidence = "medium"
                        if best_score >= 90:
                            confidence = "high"
                        elif best_score < 85:
                            confidence = "low"
                            
                        results.append({
                            "brand": best_match,
                            "confidence": confidence,
                            "match_type": "fuzzy",
                            "similarity": best_score,
                            "original": token
                        })
                    else:
                        # 未匹配到已知品牌，可能是新品牌
                        if self._is_valid_brand_candidate(token):
                            results.append({
                                "brand": token,
                                "confidence": "low",
                                "match_type": "new",
                                "similarity": 0
                            })
        
        # 过滤重复结果，保留置信度最高的
        unique_results = {}
        for r in results:
            brand = r["brand"].lower()
            if brand not in unique_results or self._confidence_score(r["confidence"]) > self._confidence_score(unique_results[brand]["confidence"]):
                unique_results[brand] = r
        
        return list(unique_results.values())
    
    def _confidence_score(self, confidence: str) -> int:
        """
        将置信度转换为数值分数
        
        Args:
            confidence: 置信度字符串
            
        Returns:
            int: 置信度分数
        """
        scores = {"high": 3, "medium": 2, "low": 1}
        return scores.get(confidence, 0)
    
    def extract_potential_brands(self, product_names: List[str]) -> Dict[str, Any]:
        """
        从商品名称列表中提取潜在品牌
        
        Args:
            product_names: 商品名称列表
            
        Returns:
            Dict: 包含已知和未知品牌的结果
        """
        if not product_names:
            return {
                "known_brands": [],
                "unknown_brands": [],
                "processing_time": 0,
                "brand_product_mapping": {}
            }
            
        start_time = time.time()
        
        # 品牌候选
        brand_candidates = []
        
        # 品牌-产品映射
        brand_product_map = {}
        
        # 对每个产品名称进行处理
        for name in product_names:
            if not name or not isinstance(name, str) or len(name.strip()) < 2:
                continue
                
            # 提取品牌
            extracted_brands = self.extract_brands_from_text(name)
            
            if not extracted_brands:
                continue
                
            # 收集品牌候选并构建品牌-产品映射
            for brand_info in extracted_brands:
                brand = brand_info["brand"]
                if brand not in brand_product_map:
                    brand_product_map[brand] = []
                brand_product_map[brand].append(name)
                brand_candidates.append((brand, brand_info["confidence"], brand_info.get("match_type", "unknown")))
        
        # 统计分析品牌候选项
        if not brand_candidates:
            return {
                "known_brands": [],
                "unknown_brands": [],
                "processing_time": time.time() - start_time,
                "brand_product_mapping": {}
            }
        
        # 分组统计
        known_brands = []
        unknown_brands = []
        
        # 对候选品牌计数和分类
        brand_counter = Counter([b[0].lower() for b in brand_candidates])
        
        # 已知品牌(在数据库中)
        known_brand_set = set(b.lower() for b in self.brands)
        
        for brand, count in brand_counter.items():
            # 确定品牌的置信度
            confidence_votes = Counter([b[1] for b in brand_candidates if b[0].lower() == brand.lower()])
            most_common_confidence = confidence_votes.most_common(1)[0][0]
            
            # 获取最常见的匹配类型
            match_type_votes = Counter([b[2] for b in brand_candidates if b[0].lower() == brand.lower()])
            most_common_match_type = match_type_votes.most_common(1)[0][0]
            
            # 构建品牌信息
            brand_info = {
                "brand": brand,
                "count": count,
                "confidence": most_common_confidence,
                "match_type": most_common_match_type,
                "productCount": len(brand_product_map.get(brand, []))
            }
            
            # 分类为已知或未知品牌
            if brand.lower() in known_brand_set:
                known_brands.append(brand_info)
            else:
                unknown_brands.append(brand_info)
        
        # 按出现频率排序
        known_brands.sort(key=lambda x: x["count"], reverse=True)
        unknown_brands.sort(key=lambda x: x["count"], reverse=True)
        
        return {
            "known_brands": known_brands,
            "unknown_brands": unknown_brands,
            "processing_time": time.time() - start_time,
            "brand_product_mapping": brand_product_map
        } 