import pandas as pd
import numpy as np
from gensim.models import Word2Vec
import jieba
from rapidfuzz import fuzz
from prettytable import PrettyTable
import itertools
import os
from datetime import datetime
from tqdm import tqdm
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfVectorizer
from transformers import AutoTokenizer, AutoModel
import torch

class AddressSimilarityComparison:
    def __init__(self, csv_path, output_dir):
        """初始化地址相似度比较器"""
        self.csv_path = csv_path
        self.output_dir = output_dir
        self.addresses = self._load_addresses()
        self.word2vec_model = None
        self.address_vectors = None
        self.bge_model = None
        self.bge_tokenizer = None
        self.bge_vectors = None
        self.tfidf_vectorizer = None
        self.tfidf_vectors = None
        
    def _load_addresses(self):
        """加载CSV文件中的地址数据"""
        try:
            df = pd.read_csv(self.csv_path)
            # 过滤空值并获取地址列表
            addresses = df['Address'].dropna().tolist()
            print(f"成功加载 {len(addresses)} 条有效地址")
            return addresses
        except Exception as e:
            print(f"Error loading CSV file: {str(e)}")
            return []
            
    def _preprocess_address(self, address):
        """预处理地址文本"""
        # 使用结巴分词
        return list(jieba.cut(address))
        
    def _preprocess_for_tfidf(self, address):
        """为TF-IDF预处理地址文本"""
        # 使用结巴分词并将词语连接成字符串
        return ' '.join(jieba.cut(address))
        
    def _train_word2vec(self, addresses):
        """训练Word2Vec模型并生成地址向量"""
        print("开始训练Word2Vec模型...")
        
        # 对所有地址进行分词
        print("Step 1/3: 地址分词")
        segmented_addresses = []
        for addr in tqdm(addresses, desc="分词处理"):
            segmented_addresses.append(self._preprocess_address(addr))
            
        # 训练Word2Vec模型
        print("\nStep 2/3: 训练Word2Vec模型")
        self.word2vec_model = Word2Vec(sentences=segmented_addresses, 
                                     vector_size=100, 
                                     window=5, 
                                     min_count=1, 
                                     workers=4)
        
        # 预计算所有地址的向量表示
        print("\nStep 3/3: 生成地址向量")
        self.address_vectors = {}
        for addr in tqdm(addresses, desc="向量化地址"):
            self.address_vectors[addr] = self._get_address_vector(addr)
            
        print("Word2Vec模型训练和向量化完成")
                                     
    def _get_address_vector(self, address):
        """获取地址的向量表示"""
        words = self._preprocess_address(address)
        vectors = []
        for word in words:
            try:
                vectors.append(self.word2vec_model.wv[word])
            except KeyError:
                continue
        return np.mean(vectors, axis=0) if vectors else np.zeros(100)
            
    def calculate_word2vec_similarity(self, addr1, addr2):
        """计算两个地址的Word2Vec余弦相似度"""
        if self.word2vec_model is None:
            self._train_word2vec(self.addresses)
            
        vec1 = self.address_vectors[addr1].reshape(1, -1)
        vec2 = self.address_vectors[addr2].reshape(1, -1)
        
        if not np.any(vec1) or not np.any(vec2):
            return 0.0
            
        return float(cosine_similarity(vec1, vec2)[0][0])
        
    def calculate_edit_distance_similarity(self, addr1, addr2):
        """计算两个地址的编辑距离相似度"""
        return fuzz.ratio(addr1, addr2) / 100.0
        
    def _load_bge_model(self):
        """加载BGE模型"""
        print("\n加载BGE模型...")
        self.bge_tokenizer = AutoTokenizer.from_pretrained("BAAI/bge-base-zh-v1.5")
        self.bge_model = AutoModel.from_pretrained("BAAI/bge-base-zh-v1.5")
        if torch.cuda.is_available():
            self.bge_model = self.bge_model.cuda()
        self.bge_model.eval()
        
    def _get_bge_embeddings(self, addresses):
        """使用BGE模型获取地址嵌入"""
        print("计算BGE向量...")
        self.bge_vectors = {}
        
        # 批处理大小
        batch_size = 32
        
        for i in tqdm(range(0, len(addresses), batch_size), desc="BGE向量化"):
            batch_addresses = addresses[i:i + batch_size]
            inputs = self.bge_tokenizer(
                batch_addresses,
                padding=True,
                truncation=True,
                max_length=512,
                return_tensors="pt"
            )
            
            if torch.cuda.is_available():
                inputs = {k: v.cuda() for k, v in inputs.items()}
            
            with torch.no_grad():
                outputs = self.bge_model(**inputs)
                embeddings = outputs.last_hidden_state[:, 0].cpu().numpy()
                
            for addr, emb in zip(batch_addresses, embeddings):
                self.bge_vectors[addr] = emb
                
        print("BGE向量计算完成")
            
    def calculate_bge_similarity(self, addr1, addr2):
        """计算两个地址的BGE相似度"""
        if self.bge_model is None:
            self._load_bge_model()
            self._get_bge_embeddings(self.addresses)
            
        vec1 = self.bge_vectors[addr1].reshape(1, -1)
        vec2 = self.bge_vectors[addr2].reshape(1, -1)
        
        return float(cosine_similarity(vec1, vec2)[0][0])
        
    def _train_tfidf(self, addresses):
        """训练TF-IDF模型并生成地址向量"""
        print("\n开始训练TF-IDF模型...")
        
        # 对所有地址进行分词预处理
        print("Step 1/2: 地址分词")
        preprocessed_addresses = []
        for addr in tqdm(addresses, desc="分词处理"):
            preprocessed_addresses.append(self._preprocess_for_tfidf(addr))
            
        # 训练TF-IDF模型并转换文本
        print("\nStep 2/2: 计算TF-IDF向量")
        self.tfidf_vectorizer = TfidfVectorizer(
            analyzer='word',
            token_pattern=r'(?u)\b\w+\b'  # 匹配任何词语
        )
        self.tfidf_vectors = self.tfidf_vectorizer.fit_transform(preprocessed_addresses)
        
        # 创建地址到向量的映射
        self.tfidf_address_to_index = {addr: idx for idx, addr in enumerate(addresses)}
        
        print("TF-IDF模型训练完成")
        
    def calculate_tfidf_similarity(self, addr1, addr2):
        """计算两个地址的TF-IDF相似度"""
        if self.tfidf_vectorizer is None:
            self._train_tfidf(self.addresses)
            
        idx1 = self.tfidf_address_to_index[addr1]
        idx2 = self.tfidf_address_to_index[addr2]
        
        vec1 = self.tfidf_vectors[idx1]
        vec2 = self.tfidf_vectors[idx2]
        
        return float(cosine_similarity(vec1, vec2)[0][0])
        
    def compare_addresses(self, similarity_threshold=0.65):
        """比较地址相似度并展示结果"""
        if not self.addresses:
            print("No addresses found in the CSV file.")
            return
            
        # 创建结果表格
        table = PrettyTable()
        table.field_names = ["地址1", "地址2", "Word2Vec相似度", "BGE相似度", "TF-IDF相似度", "编辑距离相似度"]
        
        # 准备保存到CSV的数据
        results_data = []
        
        # 获取地址对的组合
        address_pairs = list(itertools.combinations(self.addresses, 2))
        total_pairs = len(address_pairs)
        
        print(f"\n开始比较 {total_pairs} 对地址...")
        
        # 使用tqdm创建进度条
        for addr1, addr2 in tqdm(address_pairs, desc="计算相似度", unit="对"):
            word2vec_sim = self.calculate_word2vec_similarity(addr1, addr2)
            bge_sim = self.calculate_bge_similarity(addr1, addr2)
            tfidf_sim = self.calculate_tfidf_similarity(addr1, addr2)
            edit_dist_sim = self.calculate_edit_distance_similarity(addr1, addr2)
            
            # 只保存相似度大于阈值的结果
            if (word2vec_sim >= similarity_threshold or 
                bge_sim >= similarity_threshold or 
                tfidf_sim >= similarity_threshold or 
                edit_dist_sim >= similarity_threshold):
                
                # 添加到表格显示
                table.add_row([
                    addr1[:50] + "..." if len(addr1) > 50 else addr1,
                    addr2[:50] + "..." if len(addr2) > 50 else addr2,
                    f"{word2vec_sim:.4f}",
                    f"{bge_sim:.4f}",
                    f"{tfidf_sim:.4f}",
                    f"{edit_dist_sim:.4f}"
                ])
                
                # 添加到结果数据
                results_data.append({
                    "地址1": addr1,
                    "地址2": addr2,
                    "Word2Vec相似度": word2vec_sim,
                    "BGE相似度": bge_sim,
                    "TF-IDF相似度": tfidf_sim,
                    "编辑距离相似度": edit_dist_sim
                })
        
        # 设置表格样式
        table.align = "l"  # 左对齐
        table.max_width = 50  # 限制列宽
        
        # 打印表格
        print(f"\n=== 地址相似度比较结果 (相似度 >= {similarity_threshold:.2%}) ===")
        print(f"找到 {len(results_data)} 对相似地址")
        print(table)
        
        # 保存结果到CSV
        self._save_results(results_data)
        
    def _save_results(self, results_data):
        """保存结果到CSV文件"""
        # 确保输出目录存在
        os.makedirs(self.output_dir, exist_ok=True)
        
        # 生成输出文件名
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        output_file = os.path.join(self.output_dir, f"address_similarity_results_{timestamp}.csv")
        
        print("\n保存结果到CSV文件...")
        # 将结果保存为DataFrame并写入CSV
        df = pd.DataFrame(results_data)
        df.to_csv(output_file, index=False, encoding='utf-8')
        print(f"结果已保存到: {output_file}")

def main():
    # 输入输出路径配置
    csv_path = "/Users/marion/Documents/我的资料/db/order/order_address.csv"
    output_dir = "/Users/marion/Documents/我的资料/db/order/results"
    
    # 创建比较器实例
    comparator = AddressSimilarityComparison(csv_path, output_dir)
    
    # 执行比较并展示结果
    comparator.compare_addresses()

if __name__ == "__main__":
    main() 