#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File: attribute_matching.py
# Time: 2023/8/19
# File-Desp: System for matching product attributes between different sources

import re
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from fuzzywuzzy import fuzz
import json
from sentence_transformers import SentenceTransformer
import jieba
import difflib
from typing import Dict, List, Tuple, Any, Union

import warnings
warnings.filterwarnings('ignore')

# Load data from presto
from data_presto import ahs_property, zz_property

# 优化单例实现，确保线程安全
import threading

# 改进的单例处理器类
class EmbeddingModelSingleton:
    _instance = None
    _lock = threading.Lock()
    embedding_model = None
    is_loaded = False
    
    @classmethod
    def get_instance(cls, model_name='paraphrase-multilingual-MiniLM-L12-v2', use_gpu=False):
        # 双重检查锁定模式，确保线程安全
        if cls._instance is None:
            with cls._lock:
                if cls._instance is None:
                    cls._instance = cls()
                    if not cls.is_loaded:
                        try:
                            print("Loading embedding model (this will only happen once)...")
                            cls.embedding_model = SentenceTransformer(model_name)
                            device = 'cuda' if use_gpu else 'cpu'
                            cls.embedding_model.to(device)
                            print(f"Loaded embedding model on {device}")
                            
                            # 预热模型
                            print("Warming up the model...")
                            _ = cls.embedding_model.encode(["测试模型预热", "确保模型完全加载"])
                            print("Model warmed up and ready!")
                            cls.is_loaded = True
                        except Exception as e:
                            print(f"Failed to load embedding model: {e}")
                            print("Falling back to TF-IDF vectorization")
                            cls.embedding_model = None
        return cls._instance

# 在模块级别初始化单例，确保加载只发生一次
print("Initializing model singleton...")
model_singleton = EmbeddingModelSingleton.get_instance()
print(f"Model singleton initialization complete. Model loaded: {model_singleton.is_loaded}")

class AttributeMatcher:
    
    def __init__(self, 
                 embedding_model_name: str = 'paraphrase-multilingual-MiniLM-L12-v2',
                 use_gpu: bool = False):
        """
        Initialize the attribute matcher with required models and settings
        
        Args:
            embedding_model_name: Name of the sentence-transformers model to use
            use_gpu: Whether to use GPU for embeddings
        """
        # Get the embedding model from singleton
        self.embedding_model = EmbeddingModelSingleton.get_instance(
            model_name=embedding_model_name, 
            use_gpu=use_gpu
        ).embedding_model
            
        # TF-IDF vectorizer for simpler text comparison
        self.tfidf_vectorizer = TfidfVectorizer(analyzer='char_wb', 
                                               ngram_range=(2, 3))
    
    def preprocess_text(self, text: str) -> str:
        """
        Preprocess text for better matching
        
        Args:
            text: Input text string
            
        Returns:
            Preprocessed text
        """
        if not text or not isinstance(text, str):
            return ""
            
        # Convert to lowercase
        text = text.lower()
        
        # Remove extra spaces
        text = re.sub(r'\s+', ' ', text)
        
        # Cut Chinese text using jieba
        if any('\u4e00' <= char <= '\u9fff' for char in text):
            text = ' '.join(jieba.cut(text))
            
        return text.strip()
    
    def extract_numbers(self, text: str) -> List[str]:
        """
        Extract numbers and units from text
        
        Args:
            text: Input text string
            
        Returns:
            List of extracted numbers and units
        """
        if not text or not isinstance(text, str):
            return []
            
        # Pattern for memory capacity, e.g., 8G+128G, 16GB, etc.
        memory_pattern = r'(\d+)\s*[G|T|M][B]?(?:\s*\+\s*(\d+)\s*[G|T|M][B]?)?'
        memory_matches = re.findall(memory_pattern, text, re.IGNORECASE)
        
        # Pattern for other numerical values with units
        number_pattern = r'(\d+(?:\.\d+)?)\s*([a-zA-Z%]*)'
        number_matches = re.findall(number_pattern, text)
        
        results = []
        # Process memory matches
        for match in memory_matches:
            results.extend([f"{m}G" for m in match if m])
            
        # Process other numerical matches
        for num, unit in number_matches:
            results.append(f"{num}{unit}")
            
        return results
    
    def get_text_embedding(self, text: str) -> np.ndarray:
        """
        Get embedding vector for text
        
        Args:
            text: Input text to embed
            
        Returns:
            Embedding vector
        """
        preprocessed = self.preprocess_text(text)
        
        if not preprocessed:
            return np.zeros(384)  # Default dimension for most models
            
        if self.embedding_model:
            # Use sentence transformer if available
            return self.embedding_model.encode([preprocessed])[0]
        else:
            # Fallback to TF-IDF
            tfidf_matrix = self.tfidf_vectorizer.fit_transform([preprocessed])
            return tfidf_matrix.toarray()[0]
    
    def calculate_similarity(self, 
                           text1: str, 
                           text2: str, 
                           method: str = 'auto') -> float:
        """
        Calculate similarity between two text strings
        
        Args:
            text1: First text string
            text2: Second text string
            method: Similarity method ('auto', 'embedding', 'edit_distance', 'tfidf')
            
        Returns:
            Similarity score between 0 and 1
        """
        if not text1 or not text2:
            return 0.0
            
        # Determine the best method if auto is specified
        if method == 'auto':
            # Extract numbers from both texts
            nums1 = self.extract_numbers(text1)
            nums2 = self.extract_numbers(text2)
            
            # If both have numerical content, use edit distance
            if nums1 and nums2:
                method = 'edit_distance'
            else:
                method = 'embedding' if self.embedding_model else 'tfidf'
        
        # Calculate similarity based on method
        if method == 'embedding':
            # Use sentence embeddings
            emb1 = self.get_text_embedding(text1)
            emb2 = self.get_text_embedding(text2)
            similarity = cosine_similarity([emb1], [emb2])[0][0]
            
        elif method == 'tfidf':
            # Use TF-IDF vectorization
            text1_prep = self.preprocess_text(text1)
            text2_prep = self.preprocess_text(text2)
            
            if not text1_prep or not text2_prep:
                return 0.0
                
            tfidf = TfidfVectorizer(analyzer='char_wb', ngram_range=(2, 3))
            tfidf_matrix = tfidf.fit_transform([text1_prep, text2_prep])
            similarity = cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:2])[0][0]
            
        elif method == 'edit_distance':
            # Use fuzzy matching for edit distance
            similarity = fuzz.token_sort_ratio(text1, text2) / 100.0
            
            # If numbers are involved, give extra weight to number matching
            nums1 = self.extract_numbers(text1)
            nums2 = self.extract_numbers(text2)
            
            if nums1 and nums2:
                # Calculate Jaccard similarity for the extracted numbers
                intersection = len(set(nums1).intersection(set(nums2)))
                union = len(set(nums1).union(set(nums2)))
                if union > 0:
                    number_sim = intersection / union
                    # Blend the similarities (give more weight to number matching)
                    similarity = 0.3 * similarity + 0.7 * number_sim
        else:
            raise ValueError(f"Unknown similarity method: {method}")
            
        return max(0.0, min(1.0, similarity))  # Ensure result is between 0 and 1
    
    def find_best_attribute_matches(self, 
                                  source_attrs: Dict[str, Union[str, List[str]]], 
                                  target_attrs: Dict[str, Union[str, List[str]]],
                                  threshold: float = 0.6) -> List[Tuple[str, str, float]]:
        """
        Find the best matches between source and target attributes
        
        Args:
            source_attrs: Dictionary of source attributes {key: value}
            target_attrs: Dictionary of target attributes {key: value}
            threshold: Minimum similarity threshold to consider a match
            
        Returns:
            List of (source_key, target_key, similarity_score) tuples
        """
        matches = []
        
        # Normalize source attributes
        normalized_source = {}
        for key, value in source_attrs.items():
            # Handle the case when value is a list (like in your example)
            if isinstance(value, list):
                if value:  # Use the first value if list is not empty
                    normalized_source[key] = value[0]
            else:
                normalized_source[key] = value
        
        # For each source attribute
        for src_key, src_value in normalized_source.items():
            best_match = None
            best_score = 0
            best_target_key = None
            
            # Compare with each target attribute
            for tgt_key, tgt_value in target_attrs.items():
                # Calculate key similarity
                key_sim = self.calculate_similarity(src_key, tgt_key)
                
                # Calculate value similarity
                value_sim = self.calculate_similarity(src_value, tgt_value)
                
                # Combined similarity (weighted)
                combined_sim = 0.4 * key_sim + 0.6 * value_sim
                
                if combined_sim > best_score:
                    best_score = combined_sim
                    best_match = (tgt_key, tgt_value)
                    best_target_key = tgt_key
            
            # Add to matches if above threshold
            if best_score >= threshold and best_match:
                matches.append((src_key, best_target_key, best_score))
        
        # Sort by similarity score (descending)
        matches.sort(key=lambda x: x[2], reverse=True)
        
        return matches
    
    def process_products_from_dataframes(self, 
                                      df_source: pd.DataFrame, 
                                      df_target: pd.DataFrame,
                                      source_prod_col: str = 'product_name',
                                      target_prod_col: str = 'product_name',
                                      source_attr_col: str = 'property_name',
                                      target_attr_col: str = 'property_items_name',
                                      source_val_col: str = 'property_value_name',
                                      target_val_col: str = 'property_value_name') -> Dict:
        """
        Process and match attributes for products from two dataframes
        
        Args:
            df_source: Source dataframe
            df_target: Target dataframe
            *_col: Column name mappings
            
        Returns:
            Dictionary with matching results
        """
        results = {}
        
        # Get unique products from source
        source_products = df_source[source_prod_col].unique()
        
        # Get unique products from target
        target_products = df_target[target_prod_col].unique()
        
        # For demonstration, limit to first 5 products
        for src_product in source_products[:5]:
            print(f"Processing source product: {src_product}")
            
            # Extract attributes for this source product
            src_attrs = df_source[df_source[source_prod_col] == src_product]
            src_attr_dict = {}
            
            for _, row in src_attrs.iterrows():
                src_attr_dict[row[source_attr_col]] = row[source_val_col]
            
            product_results = {}
            
            # Find best matching target product
            best_product_match = None
            best_product_score = 0
            
            for tgt_product in target_products:
                # Simple name similarity for products
                prod_sim = self.calculate_similarity(src_product, tgt_product)
                
                if prod_sim > best_product_score:
                    best_product_score = prod_sim
                    best_product_match = tgt_product
            
            # If we found a reasonable product match
            if best_product_match and best_product_score > 0.5:
                product_results["matched_product"] = {
                    "name": best_product_match,
                    "similarity": best_product_score
                }
                
                # Extract attributes for this target product
                tgt_attrs = df_target[df_target[target_prod_col] == best_product_match]
                tgt_attr_dict = {}
                
                for _, row in tgt_attrs.iterrows():
                    tgt_attr_dict[row[target_attr_col]] = row[target_val_col]
                
                # Find attribute matches
                attribute_matches = self.find_best_attribute_matches(src_attr_dict, tgt_attr_dict)
                product_results["attribute_matches"] = attribute_matches
            
            results[src_product] = product_results
        
        return results

def example_usage():
    """Example usage of the attribute matching system"""
    # Example data from the problem statement
    product_a = {
        "Root情况": ["未破解Root"],
        "侧键": ["侧键正常"],
        "充电功能": ["充电/快充/无线充电正常"],
        "内存": ["8G+128G"],
        "声音功能": ["声音功能正常"],
        "官方维修情况": ["无官方维修记录"],
        "屏幕传感器功能": ["光线、距离感应正常"],
        "屏幕外观": ["屏幕完美无划痕"],
        "屏幕显示": ["显示完美，无瑕疵"],
        "振动功能": ["振动功能正常"],
        "摄像头功能及维修情况": ["相机画面正常，无维修或更换"],
        "整机维修及受潮情况": ["无维修无受潮"],
        "无线功能": ["wifi/蓝牙/nfc/指南针功能正常"],
        "机器重启情况": ["无重启或卡死情况"],
        "机身弯曲情况": ["机身无弯曲"],
        "机身颜色": ["亮黑色"],
        "生物识别功能": ["指纹/面容/虹膜功能正常"],
        "网络制式": ["全网通"],
        "触摸功能": ["触摸功能正常"],
        "账号": ["账号可退出（请先退出再回收）"],
        "购买渠道": ["大陆国行"],
        "边框背板": ["外壳有划痕/侧键异色"],
        "运行情况": ["正常开机进入桌面"],
        "还原激活功能": ["还原激活正常"],
        "通话功能": ["通话正常"]
    }

    product_b = {
        "个人账号情况": "华为账号可退出（请先退出再回收）",
        "使用情况": "正常开机进入桌面",
        "容量": "8G+128G",
        "屏幕、电池维修": "屏幕/电池无维修更换【原装机】",
        "屏幕外观": "屏幕完美，无划痕【几乎未使用】",
        "屏幕显示": "显示/显色无任何异常",
        "机身外壳、边框": "外壳有细微使用痕迹【较少使用】"
    }
    
    # Initialize the attribute matcher
    matcher = AttributeMatcher()
    
    # Find matches between product A and product B
    matches = matcher.find_best_attribute_matches(product_a, product_b)
    
    # Print the matches
    print("\nTop attribute matches between products:")
    for src_key, tgt_key, score in matches[:10]:  # Show top 10 matches
        print(f"Source: '{src_key}' -> Target: '{tgt_key}' (similarity: {score:.2f})")
        print(f"  Source value: '{product_a[src_key][0]}'")
        print(f"  Target value: '{product_b[tgt_key]}'")
        print()

if __name__ == "__main__":
    # Run the example
    print("Running attribute matching example...")
    example_usage()
    
    # Initialize the matcher
    matcher = AttributeMatcher()
    
    # Check if we have data from presto
    try:
        print("\nAnalyzing data from Presto...")
        if 'ahs_property' in globals() and 'zz_property' in globals():
            print(f"AHS properties: {len(ahs_property)} rows")
            print(f"ZZ properties: {len(zz_property)} rows")
            
            # Process a sample of products
            results = matcher.process_products_from_dataframes(
                ahs_property, 
                zz_property,
                source_prod_col='product_name',
                target_prod_col='product_name',
                source_attr_col='property_name',
                target_attr_col='property_items_name',
                source_val_col='property_value_name',
                target_val_col='property_value_name'
            )
            
            print(f"\nProcessed {len(results)} products")
    except NameError:
        print("Data from Presto not available") 