"""
增强版数据清洗处理器模块
整合NLP技术，提升数据清洗效果
"""
import pandas as pd
import logging
import time
import os
import re
from typing import List, Dict, Optional, Tuple, Any
import numpy as np
import json
import string
import unicodedata
from collections import Counter
try:
    import jieba
    import jieba.analyse
    JIEBA_AVAILABLE = True
except ImportError:
    JIEBA_AVAILABLE = False

try:
    from transformers import pipeline
    TRANSFORMERS_AVAILABLE = True
except ImportError:
    TRANSFORMERS_AVAILABLE = False

# 导入自定义增强模块
from backend.data_clean.enhanced_brand_recognition import BrandRecognizer
from backend.data_clean.enhanced_specification_extractor import SpecificationExtractor
from backend.data_clean.text_cleaner import TextCleaner
from backend.data_clean.config_manager import ConfigManager, config_manager
from backend.data_clean.clean_product_data import generate_deepseek_prompt, prepare_product_data
from backend.data_clean.api_service import call_deepseek_api
from backend.data_clean.processor import Processor

# 创建日志记录器
logger = logging.getLogger(__name__)

class EnhancedProcessor(Processor):
    """增强版数据处理器，提供高级文本清洗和分析功能"""
    
    def __init__(self, config_manager: ConfigManager = None):
        """
        初始化处理器
        
        Args:
            config_manager: 配置管理器实例
        """
        super().__init__()
        self.config_manager = config_manager or config_manager
        
        # 确保配置管理器已初始化
        if not hasattr(self.config_manager, '_is_initialized') or not self.config_manager._is_initialized:
            try:
                self.config_manager.initialize()
                logger.info("配置管理器初始化完成")
            except Exception as e:
                logger.error(f"配置管理器初始化失败: {str(e)}")
                raise
        
        # 初始化各个组件
        self._init_components()
        
        # 初始化结巴分词（如果可用）
        if JIEBA_AVAILABLE:
            jieba.initialize()
            self.extract_keywords = jieba.analyse.extract_tags
        
        # 初始化transformers模型（如果可用）
        self.ner_pipeline = None
        self.sentiment_pipeline = None
        if TRANSFORMERS_AVAILABLE:
            try:
                self.ner_pipeline = pipeline("ner", model="dbmdz/bert-large-cased-finetuned-conll03-english")
                self.sentiment_pipeline = pipeline("sentiment-analysis")
            except Exception as e:
                print(f"无法加载transformers模型: {e}")
        
        logger.info("增强版数据处理器初始化完成")
    
    def _init_components(self):
        """初始化各个处理组件"""
        # 1. 初始化品牌识别器
        self.brand_recognizer = BrandRecognizer(
            brands=self.config_manager.brands,
            min_similarity=85  # 默认85%相似度阈值
        )
        
        # 2. 初始化规格提取器
        self.spec_extractor = SpecificationExtractor(
            units_list=self.config_manager.units,
            unit_conversion=self.config_manager.unit_conversion,
            standard_unit_map=self.config_manager.standard_unit_map
        )
        
        # 3. 初始化文本清洗器
        self.text_cleaner = TextCleaner(
            marketing_words=self.config_manager.marketing_words,
            product_names=self.config_manager.common_products
        )
        
        logger.info("处理组件初始化完成")
    
    def _preprocess_text(self, text: str) -> str:
        """预处理文本，去除多余空格等"""
        if not text:
            return ""
        
        # 替换多个空格为单个空格
        cleaned = re.sub(r'\s+', ' ', text)

        # 去除特殊字符，及() () {} [] 【】包含的内容
        cleaned = re.sub(r'\(.*?\)', '', cleaned)
        cleaned = re.sub(r'\{.*?\}', '', cleaned)
        cleaned = re.sub(r'\[.*?\]', '', cleaned)
        cleaned = re.sub(r'【.*?】', '', cleaned)
        cleaned = re.sub(r'[*+~!@#$^&()_={}[\]|\\:;<>?]', ' ', cleaned)

        # 再次合并多个空格
        cleaned = re.sub(r'\s+', ' ', cleaned).strip()
        return cleaned
    
    def extract_product_info(self, product_name: str, price: float = 0.0) -> Dict:
        """
        从商品名称中提取结构化信息，确保链式剥离品牌、规格等信息
        """
        # 初始化结果
        result = {
            'original_name': product_name,
            'original_price': price,
            'cleaned_name': '',
            'brand': '',
            'brand_in_database': False,
            'brand_confidence': 0.0,
            'specification': '',
            'spec_value': 0.0,
            'spec_unit': '',
            'standard_value': 0.0,
            'standard_unit': '',
            'standard_price': 0.0,
            'confidence_score': 0.0,
            'processing_steps': []
        }
        if not product_name or len(product_name.strip()) < 2:
            return result
            
        logger.info(f"[extract_product_info] 原始商品名: {product_name}")
        steps = []
        
        # 1. 预处理
        processed_text = self._preprocess_text(product_name)
        # logger.info(f"[extract_product_info] 预处理后: {processed_text}")
        steps.append({"step": "预处理", "input": product_name, "output": processed_text})
        
        # 2. 去营销词
        cleaned_info_1 = self.text_cleaner.clean_text(processed_text)
        processed_text = cleaned_info_1['cleaned_text']
        # logger.info(f"[extract_product_info] 去营销词后: {processed_text}")
        steps.append({"step": "去营销词", "output": processed_text})
        
        # 3. 去规格 - 使用正则确保完全移除规格信息
        spec_info = self.spec_extractor.extract_specifications(processed_text)
        if spec_info['spec_text']:
            result['specification'] = spec_info['spec_text']
            result['spec_value'] = spec_info['spec_value']
            result['spec_unit'] = spec_info['spec_unit']
            result['standard_value'] = spec_info['standard_value']
            result['standard_unit'] = spec_info['standard_unit']
            # 确保规格信息被完全移除
            processed_text = spec_info['cleaned_text']
            # 额外检查：移除常见规格模式
            processed_text = re.sub(r'\d+\s*[千克|克|g|kg|ml|L|升|毫升|斤|公斤|ml|ML|KG|G]+', '', processed_text)
            processed_text = re.sub(r'[0-9一二三四五六七八九十百千万亿]+\s*[件|包|袋|盒|箱|瓶|罐|个|对|片]+装?', '', processed_text)
            processed_text = re.sub(r'\s+', ' ', processed_text).strip()
            # logger.info(f"[extract_product_info] 去规格后: {processed_text}")
            steps.append({"step": "去规格", "found": spec_info['spec_text'], "output": processed_text})
        
        # 4. 去品牌 - 使用正则确保完全移除品牌信息
        brand_result = self.brand_recognizer.extract_and_clean_brands(processed_text)
        logger.info(f"[extract_product_info] 品牌识别结果: brand_string={brand_result.get('brand_string','')}, brand_list={ [b['brand'] for b in brand_result.get('brands', []) ] }")
        result['brand'] = brand_result.get('brand_string', '')
        result['brand_list'] = [b['brand'] for b in brand_result.get('brands', [])]
        result['brand_extract_method'] = 'BrandRecognizer'
        
        # 确保品牌信息被完全移除
        processed_text = brand_result.get('cleaned_text', processed_text)
        if result['brand']:
            # 额外检查：移除品牌名及其变体
            for brand in result['brand_list']:
                # 判断品牌是否包含特殊字符
                if re.search(r'[^a-zA-Z0-9\u4e00-\u9fa5]', brand):
                    # 包含特殊字符，整体替换
                    pattern = re.compile(re.escape(brand), re.IGNORECASE)
                else:
                    # 普通品牌，使用单词边界
                    pattern = re.compile(r'\b' + re.escape(brand) + r'\b', re.IGNORECASE)
                processed_text = pattern.sub('', processed_text)
            processed_text = re.sub(r'\s+', ' ', processed_text).strip()
        # logger.info(f"[extract_product_info] 去品牌后: {processed_text}")
        steps.append({"step": "去品牌", "found": result['brand'], "output": processed_text})
        
        # 5. 再去一次营销词
        cleaned_info_2 = self.text_cleaner.clean_text(processed_text)
        processed_text = cleaned_info_2['cleaned_text']
        # logger.info(f"[extract_product_info] 二次去营销词后: {processed_text}")
        steps.append({"step": "二次去营销词", "output": processed_text})
        
        # 6. 最终清理
        # 去除可能残留的括号及其内容
        processed_text = re.sub(r'[\(（].*?[\)）]', '', processed_text)
        # 去除多余空格
        processed_text = re.sub(r'\s', '', processed_text).strip()
        # 去除可能残留的标点
        processed_text = re.sub(r'[,，。.、/\\"\']+', '', processed_text).strip()
        
        # 7. cleaned_name赋值
        # 优化主名提取逻辑：取剩余文本中最长连续中文片段
        matches = re.findall(r'[\u4e00-\u9fa5]{2,}', processed_text)
        if matches:
            result['cleaned_name'] = max(matches, key=len)
        else:
            result['cleaned_name'] = processed_text.strip()
        # logger.info(f"[extract_product_info] 最终cleaned_name: {processed_text}")
        
        # 8. 质量检查
        if len(processed_text) < 2:
            logger.warning(f"[extract_product_info] 清洗后文本{processed_text}过短，回退到原始名称: {product_name}")
            result['cleaned_name'] = product_name
            steps.append({"step": "名称质量检查失败", "reason": "清洗后名称过短", "action": "回退到原始名称"})
        
        # 9. 计算标准价格（如果价格和标准值都有效）
        if price > 0 and result['standard_value'] > 0:
            result['standard_price'] = round(price / result['standard_value'], 2)
            steps.append({"step": "计算标准价格", "price": price, "standard_value": result['standard_value'], "standard_price": result['standard_price']})
        
        # 10. 置信度等其它逻辑（可保留原有）
        confidence = 50.0
        if result['brand']:
            if result['brand_in_database']:
                confidence += 20.0 * result['brand_confidence']
            else:
                confidence += 10.0 * result['brand_confidence']
        if result['specification']:
            confidence += 20.0
            if result['standard_unit']:
                confidence += 10.0
        if cleaned_info_1.get('is_significantly_changed') or cleaned_info_2.get('is_significantly_changed'):
            confidence -= 5.0
        if len(processed_text) < 2:
            confidence -= 30.0
            result['cleaned_name'] = product_name
            steps.append({"step": "名称质量检查失败", "reason": "清洗后名称过短", "action": "回退到原始名称"})
        confidence = max(0.0, min(100.0, confidence))
        result['confidence_score'] = confidence / 100.0
        result['processing_steps'] = steps
        logger.info(f"[extract_product_info] 最终写入结果: brand={result['brand']}, brand_list={result['brand_list']}，cleaned_name={result['cleaned_name']}，steps={steps}")
        return result
    
    def process_dataframe(self, df: pd.DataFrame, name_column: str = '商品名称', price_column: str = '商品价格') -> pd.DataFrame:
        """
        处理整个DataFrame
        
        Args:
            df: 输入DataFrame
            name_column: 商品名称列名
            price_column: 商品价格列名
            
        Returns:
            处理后的DataFrame
        """
        start_time = time.time()
        logger.info(f"开始处理DataFrame，共{len(df)}条数据")
        
        # 复制输入的DataFrame
        result_df = df.copy()
        
        # 确保必要的列存在
        if name_column not in result_df.columns:
            raise ValueError(f"输入DataFrame中缺少'{name_column}'列")
        
        # 处理价格列
        price_col = None
        if price_column in result_df.columns:
            price_col = price_column
            # 确保价格列是数值类型
            result_df[price_col] = pd.to_numeric(result_df[price_col], errors='coerce')
        else:
            logger.warning(f"未找到价格列'{price_column}'，将使用默认值0.0")
        
        # 添加结果列
        new_columns = [
            'brand', 'brand_in_database', 'brand_confidence',
            'cleaned_name', 'specification', 'spec_value', 'spec_unit',
            'standard_value', 'standard_unit', 'standard_price', 'confidence_score'
        ]
        
        for col in new_columns:
            if col not in result_df.columns:
                if 'value' in col or 'price' in col or 'confidence' in col:
                    result_df[col] = 0.0
                else:
                    result_df[col] = ''
        
        # 记录处理细节
        processing_details = []
        
        # 处理每一行
        for idx, row in result_df.iterrows():
            # 获取商品名称
            product_name = str(row[name_column]) if not pd.isna(row[name_column]) else ''
            if not product_name:
                continue
            
            # 获取价格
            price = 0.0
            if price_col:
                price = float(row[price_col]) if not pd.isna(row[price_col]) else 0.0
            
            # 处理商品信息
            try:
                product_info = self.extract_product_info(product_name, price)
                
                # 更新结果DataFrame
                for col in new_columns:
                    if col in product_info:
                        result_df.at[idx, col] = product_info[col]
                
                # 记录处理细节
                processing_details.append({
                    'index': idx,
                    'original_name': product_name,
                    'processed_info': product_info
                })
                
                # 记录日志（只记录部分数据，避免日志过大）
                if idx < 5 or idx % 100 == 0 or idx == len(result_df) - 1:
                    logger.info(f"处理第{idx+1}/{len(result_df)}条: {product_name} -> 品牌:{product_info['brand']},品名:{product_info['cleaned_name']}, 规格:{product_info['specification']}, 置信度:{product_info['confidence_score']:.2f}")
                    # logger.info(f"[process_dataframe] 行{idx}: brand={result_df.at[idx, 'brand']},  '__str__', lambda: str(result_df.at[idx, 'brand_list']))()}")
            
            except Exception as e:
                logger.error(f"处理第{idx+1}条数据时出错: {str(e)}")
                # 出错时，保留原始数据
                result_df.at[idx, 'cleaned_name'] = product_name
                result_df.at[idx, 'confidence_score'] = 0.0
        
        # 计算处理统计
        processed_count = len([d for d in processing_details if d['processed_info']['confidence_score'] > 0])
        high_confidence_count = len([d for d in processing_details if d['processed_info']['confidence_score'] >= 0.7])
        
        logger.info(f"DataFrame处理完成，耗时{time.time() - start_time:.2f}秒")
        logger.info(f"处理统计: 总数据={len(df)}, 成功处理={processed_count}, 高置信度({'>='} 0.7)={high_confidence_count}")
        
        return result_df
    
    def hybrid_process(self, df: pd.DataFrame, use_llm: bool = True, llm_threshold: float = 0.7, force_llm: bool = False) -> pd.DataFrame:
        """
        混合处理策略：先用规则处理，对于低置信度的数据使用大模型
        
        Args:
            df: 输入DataFrame
            use_llm: 是否使用大模型处理低置信度数据
            llm_threshold: 使用大模型的置信度阈值，低于此值的数据用大模型处理
            force_llm: 是否强制所有数据都用大模型处理
            
        Returns:
            处理后的DataFrame
        """
        start_time = time.time()
        logger.info(f"开始混合处理，数据量: {len(df)}, use_llm={use_llm}, llm_threshold={llm_threshold}, force_llm={force_llm}")
        
        # 重新初始化组件以确保使用最新配置
        logger.info("重新初始化处理组件以使用最新配置")
        self._init_components()
        logger.info(f"处理组件已重新初始化，当前品牌数量: {len(self.config_manager.brands)}")
        
        # 先使用规则处理所有数据
        logger.info("第一步: 使用增强规则处理所有数据")
        rule_results_df = self.process_dataframe(df)
        
        # 如果不使用大模型，直接返回
        if not use_llm:
            logger.info("不使用大模型，直接返回规则处理结果")
            rule_results_df['process_method'] = 'rule'
            return rule_results_df
        
        # 筛选低置信度的数据
        if force_llm:
            logger.info("强制使用大模型处理所有数据")
            low_confidence_df = df.copy()
            low_confidence_indices = df.index.tolist()
        else:
            logger.info(f"筛选置信度低于{llm_threshold}的数据用大模型处理")
            low_confidence_mask = rule_results_df['confidence_score'] < llm_threshold
            low_confidence_indices = rule_results_df.index[low_confidence_mask].tolist()
            low_confidence_df = df.loc[low_confidence_indices].copy()
            
            logger.info(f"共找到{len(low_confidence_df)}条低置信度数据需要大模型处理")
        
        # 如果没有低置信度数据，直接返回规则处理结果
        if len(low_confidence_df) == 0:
            logger.info("没有需要大模型处理的数据，直接返回规则处理结果")
            rule_results_df['process_method'] = 'rule'
            return rule_results_df
        
        # 使用大模型处理低置信度数据
        logger.info(f"第二步: 使用大模型处理{len(low_confidence_df)}条低置信度数据")
        try:
            # 准备数据
            product_data = prepare_product_data(low_confidence_df)
            
            # 调用大模型API
            from backend.data_clean.processor import deepseek_data_clean, process_deepseek_results
            model_results = deepseek_data_clean(product_data, db=None, batch_size=50)
            
            # 处理结果
            if model_results:
                model_df = process_deepseek_results(model_results, db=None)
                logger.info(f"大模型处理完成，获得{len(model_df)}条结果")
                
                # 合并结果
                final_df = rule_results_df.copy()
                final_df['process_method'] = 'rule'
                
                # 安全更新
                if len(model_df) == len(low_confidence_indices):
                    # 设置索引
                    model_df.index = low_confidence_indices
                    
                    # 更新处理方法
                    final_df.loc[low_confidence_indices, 'process_method'] = 'model'
                    
                    # 更新字段
                    for col in model_df.columns:
                        if col in final_df.columns:
                            final_df.loc[low_confidence_indices, col] = model_df[col].values
                else:
                    # 索引不匹配，逐行更新
                    logger.warning(f"模型结果数量({len(model_df)})与低置信度数据数量({len(low_confidence_indices)})不匹配，使用安全更新")
                    available_count = min(len(model_df), len(low_confidence_indices))
                    
                    for i in range(available_count):
                        idx = low_confidence_indices[i]
                        final_df.at[idx, 'process_method'] = 'model'
                        
                        for col in model_df.columns:
                            if col in final_df.columns:
                                final_df.at[idx, col] = model_df.iloc[i][col]
                
                # 统计
                rule_count = sum(final_df['process_method'] == 'rule')
                model_count = sum(final_df['process_method'] == 'model')
                
                logger.info(f"混合处理完成，耗时: {time.time() - start_time:.2f}秒")
                logger.info(f"处理统计: 总数据={len(final_df)}, 规则处理={rule_count}条, 模型处理={model_count}条")
                
                return final_df
            else:
                logger.warning("大模型未返回有效结果，使用规则处理结果")
                rule_results_df['process_method'] = 'rule'
                return rule_results_df
        
        except Exception as e:
            logger.error(f"大模型处理出错: {str(e)}")
            rule_results_df['process_method'] = 'rule'
            return rule_results_df
    
    def process(self, text):
        """
        增强处理文本，执行高级清洗
        
        参数:
            text (str): 需要处理的文本
            
        返回:
            str: 处理后的文本
        """
        if not text or not isinstance(text, str):
            return ""
        
        # 先执行基础清洗
        processed_text = super().process(text)
        
        # 去除重复内容
        processed_text = self._remove_duplicates(processed_text)
        
        # 去除噪声（如特殊ASCII字符）
        processed_text = self._remove_noise(processed_text)
        
        # 数字和英文混合处理
        processed_text = self._normalize_alphanumeric(processed_text)
        
        return processed_text
    
    def _remove_duplicates(self, text):
        """移除文本中的重复内容"""
        # 分词
        tokens = self.tokenize(text)
        
        # 计数
        token_counts = Counter(tokens)
        
        # 过滤掉频率过高的词（可能是重复内容）
        filtered_tokens = []
        for token in tokens:
            if token_counts[token] <= 3 or token not in filtered_tokens:
                filtered_tokens.append(token)
        
        # 重新组合
        return ' '.join(filtered_tokens)
    
    def _remove_noise(self, text):
        """去除噪声，如特殊的ASCII字符、控制字符等"""
        # 去除控制字符
        text = re.sub(r'[\x00-\x1F\x7F-\x9F]', '', text)
        
        # 去除可能的HTML标签
        text = re.sub(r'<[^>]*>', '', text)
        
        # 去除URL
        text = re.sub(r'https?://\S+|www\.\S+', '', text)
        
        # 去除邮箱
        text = re.sub(r'\S+@\S+', '', text)
        
        return text
    
    def _normalize_alphanumeric(self, text):
        """处理数字和英文混合的情况"""
        # 分离数字和字母（例如：将"iphone13"转换为"iphone 13"）
        text = re.sub(r'([a-zA-Z])(\d)', r'\1 \2', text)
        text = re.sub(r'(\d)([a-zA-Z])', r'\1 \2', text)
        
        # 统一单位格式（例如：将"5kg"转换为"5 kg"）
        text = re.sub(r'(\d)(kg|g|ml|l|mm|cm|m|GB|MB|TB)', r'\1 \2', text, flags=re.IGNORECASE)
        
        return text
    
    def tokenize(self, text):
        """
        使用结巴分词进行中文分词，如果结巴不可用则回退到基础分词
        
        参数:
            text (str): 需要分词的文本
            
        返回:
            list: 分词结果列表
        """
        # 预处理
        processed_text = self.process(text) if text != super().process(text) else text
        
        # 使用结巴分词（如果可用）
        if JIEBA_AVAILABLE:
            # 预处理文本 - 处理方括号中的内容，确保不会被过度分词
            bracket_content = {}
            bracket_pattern = re.compile(r'【([^】]+)】|\[([^\]]+)\]')
            
            # 替换方括号内容为占位符
            text_for_jieba = processed_text
            for i, match in enumerate(bracket_pattern.finditer(processed_text)):
                content = match.group(1) or match.group(2)
                placeholder = f"BRACKET_CONTENT_{i}"
                bracket_content[placeholder] = content
                text_for_jieba = text_for_jieba.replace(match.group(), placeholder)
            
            # 执行分词
            words = list(jieba.cut(text_for_jieba))
            
            # 恢复方括号内容
            for i, word in enumerate(words):
                if word in bracket_content:
                    words[i] = bracket_content[word]
                    
            return [token for token in words if token.strip()]
        else:
            # 回退到基础分词
            return super().tokenize(processed_text)
    
    def extract_entities(self, text):
        """
        使用命名实体识别提取关键实体
        
        参数:
            text (str): 需要分析的文本
            
        返回:
            list: 实体列表，每个实体是一个dict，包含类型和文本
        """
        if not self.ner_pipeline:
            return []
        
        try:
            # 执行命名实体识别
            entities = self.ner_pipeline(text)
            
            # 整理结果
            processed_entities = []
            for entity in entities:
                processed_entities.append({
                    'entity': entity['entity'],
                    'word': entity['word'],
                    'score': float(entity['score'])
                })
            
            return processed_entities
        except Exception as e:
            print(f"实体识别错误: {e}")
            return []
    
    def analyze_sentiment(self, text):
        """
        分析文本情感
        
        参数:
            text (str): 需要分析的文本
            
        返回:
            dict: 情感分析结果，包含标签和分数
        """
        if not self.sentiment_pipeline:
            return {}
        
        try:
            # 执行情感分析
            sentiment = self.sentiment_pipeline(text)[0]
            
            # 整理结果
            return {
                'label': sentiment['label'],
                'score': float(sentiment['score'])
            }
        except Exception as e:
            print(f"情感分析错误: {e}")
            return {}
    
    def _extract_brands(self, text: str) -> Dict[str, Any]:
        """
        从文本中提取品牌并返回统一格式
        
        Args:
            text: 输入文本
            
        Returns:
            Dict: 包含品牌信息的字典
        """
        result = {
            'matched_brand': '',
            'in_database': False,
            'confidence': 0.0,
            'cleaned_text': text
        }
        
        if not text or not isinstance(text, str):
            return result
            
        # 调用新的品牌识别方法
        brand_matches = self.brand_recognizer.extract_brands_from_text(text)
        
        if not brand_matches:
            return result
            
        # 取置信度最高的品牌
        top_match = brand_matches[0]
        brand = top_match["brand"]
        confidence = top_match["confidence"] / 100.0 if isinstance(top_match["confidence"], (int, float)) else 0.8
        
        # 判断是否在数据库中的品牌
        in_database = brand.lower() in [b.lower() for b in self.config_manager.brands]
        
        # 记录匹配信息
        if in_database:
            logger.debug(f"找到品牌匹配: '{brand}' 在数据库中存在，置信度:{confidence:.2f}")
        else:
            logger.debug(f"找到品牌匹配: '{brand}' 不在数据库中，置信度:{confidence:.2f}")
        
        # 从文本中移除品牌名，获取清洗后的文本
        cleaned_text = text
        pattern = re.compile(r'\b' + re.escape(brand) + r'\b', re.IGNORECASE)
        cleaned_text = pattern.sub('', cleaned_text)
        cleaned_text = re.sub(r'\s+', ' ', cleaned_text).strip()
        
        return {
            'matched_brand': brand,
            'in_database': in_database,
            'confidence': confidence,
            'cleaned_text': cleaned_text
        }

# 简单测试代码
if __name__ == "__main__":
    # 设置日志
    logging.basicConfig(level=logging.INFO)
    
    # 实例化增强处理器
    processor = EnhancedProcessor()
    
    # 测试数据
    test_data = [
        {"商品名称": "每日鲜本地油菜 约350g", "商品价格": 2.98},
        {"商品名称": "金龙鱼 大米 5kg装 新老包装随机发", "商品价格": 59.9},
        {"商品名称": "鲁花 压榨一级花生油 1.8L", "商品价格": 69.9},
        {"商品名称": "泸州老窖泸州传奇白酒礼盒 52度浓香型白酒 500ml*2瓶 仁缘聚福礼盒装", "商品价格": 399.3},
        {"商品名称": "多点工坊×马大姐内酯豆腐 350g", "商品价格": 1.68},
        {"商品名称": "黄天鹅溏心蛋盐味12枚即食早餐七分熟高蛋白营养加餐轻食健康食品 盐味12枚", "商品价格": 70.0}
    ]
    
    # 创建测试DataFrame
    test_df = pd.DataFrame(test_data)
    
    # 处理测试数据
    result_df = processor.process_dataframe(test_df)
    
    # 显示结果
    print("\n处理结果:\n")
    for _, row in result_df.iterrows():
        print(f"原名称: {row['商品名称']}")
        print(f"清洗后: {row['cleaned_name']}")
        print(f"品牌: {row['brand']}, 规格: {row['specification']}")
        if row['standard_unit']:
            print(f"标准化: {row['standard_value']} {row['standard_unit']}, 标准价格: {row['standard_price']}")
        print(f"置信度: {row['confidence_score']:.2f}")
        print("-" * 60) 