# -*- coding: utf-8 -*-
"""
门店相似度智能匹配系统（严格路径版）
严格遵循用户指定的文件路径和数据库路径
"""
import sqlite3
import pandas as pd
import jieba
import re
import logging
import sys
from tqdm import tqdm
from datetime import datetime
import difflib
import numpy as np
import os

# 导入配置文件
sys.path.append(os.path.join(os.path.dirname(__file__), 'config'))
import config

# 在程序启动时预加载jieba词典，避免在进度条显示过程中打断
if os.path.exists(config.JIEBA_DICT_PATH):
    jieba.load_userdict(config.JIEBA_DICT_PATH)
    # 预先执行一次分词以完成jieba的初始化
    jieba.lcut("初始化")

# ===== 1. 日志系统 =====
class TqdmLoggingHandler(logging.Handler):
    def emit(self, record):
        try:
            msg = self.format(record)
            tqdm.write(msg, end='\n')
        except Exception:
            self.handleError(record)

def setup_logger():
    logger = logging.getLogger("门店匹配系统")
    logger.setLevel(logging.DEBUG)
    
    # 文件日志 (记录所有DEBUG级别及以上的详细信息)
    file_handler = logging.FileHandler("matching.log", mode='w', encoding='utf-8')
    file_handler.setLevel(logging.DEBUG)
    file_formatter = logging.Formatter(
        "%(asctime)s [%(levelname)s] - %(message)s",
        datefmt="%Y-%m-%d %H:%M:%S"
    )
    file_handler.setFormatter(file_formatter)
    
    logger.addHandler(file_handler)
    return logger

# ===== 2. 核心函数 =====
def clean_text(text):
    """清洗文本"""
    if pd.isna(text): return ""
    text = str(text).strip()
    
    # 同义词替换
    for original, replacement in config.SYNONYM_DICT.items():
        text = text.replace(original, replacement)
    
    return re.sub(r'(店|分店|加盟店|直营店|专卖店|\s|\(|\)|（|）)', '', text)

def get_core_keywords(name):
    """分词"""
    if not name: return []
    
    words = jieba.lcut(name)
    # 记录分词结果到日志
    logger = logging.getLogger("门店匹配系统")
    logger.debug(f"分词结果: {name} -> {words}")
    return [word for word in words if len(word) > 1]

def generate_patterns(keywords):
    """四级匹配模式"""
    patterns = []
    if not keywords: return patterns
    
    # L1: 完整词序组合 (修改为使用.*连接词)
    patterns.append('.*' + '.*'.join(keywords) + '.*')  
    
    # L2: 去掉第一个词后的组合
    if len(keywords) >= 2:
        patterns.append('.*' + '.*'.join(keywords[1:]) + '.*')  
    
    # L3: 去掉前两个词后的组合
    if len(keywords) >= 3:
        patterns.append('.*' + '.*'.join(keywords[2:]) + '.*')  
    
    # L4: 最后一个关键词
    patterns.append('.*' + keywords[-1] + '.*')  
    
    return patterns

def calculate_similarity(str1, str2):
    """字符级相似度"""
    return difflib.SequenceMatcher(None, str1, str2).ratio()

def calculate_cosine_similarity(str1, str2):
    """余弦相似度"""
    # 将字符串转换为字符级别的向量
    chars1 = list(str1)
    chars2 = list(str2)
    
    # 获取所有唯一字符
    all_chars = list(set(chars1 + chars2))
    
    # 创建向量
    vec1 = [chars1.count(char) for char in all_chars]
    vec2 = [chars2.count(char) for char in all_chars]
    
    # 计算点积
    dot_product = sum(a * b for a, b in zip(vec1, vec2))
    
    # 计算向量的模
    magnitude1 = sum(a * a for a in vec1) ** 0.5
    magnitude2 = sum(b * b for b in vec2) ** 0.5
    
    # 避免除零错误
    if magnitude1 == 0 or magnitude2 == 0:
        return 0.0
    
    # 计算余弦相似度
    return dot_product / (magnitude1 * magnitude2)


# ===== 3. 数据加载（严格使用用户指定路径）=====
def load_data():
    logger = logging.getLogger("门店匹配系统")
    try:
        # 读取Excel数据（用户指定路径）
        excel_path = config.EXCEL_FILE_PATH
        excel_df = pd.read_excel(excel_path, sheet_name="督察部24-7")
        excel_df['清洗名称'] = excel_df['店铺名称'].apply(clean_text)
        
        # 读取SQLite数据（用户指定路径）
        db_path = config.DB_FILE_PATH
        conn = sqlite3.connect(db_path)
        
        # 获取所有记录，使用ROW_NUMBER()窗口函数进行去重
        # 按大区经理和门店名称分组，优先选择大区经理非空的记录，如果都为空则选择任意一条
        query = '''
        SELECT * FROM (
            SELECT *, 
                   ROW_NUMBER() OVER (
                       PARTITION BY 大区经理, 门店名称 
                       ORDER BY CASE WHEN 大区经理 IS NOT NULL AND 大区经理 != '' THEN 0 ELSE 1 END, 
                                    更新日期 DESC
                   ) as rn
            FROM info
        ) WHERE rn = 1
        '''
        sqlite_df = pd.read_sql(query, conn)
        conn.close()
        
        # 在匹配时再进行去重，而不是在数据加载时
        # 这样可以确保在匹配特定经理时能看到所有相关记录
        
        # 不再需要在pandas中进行额外的去重操作
        
        sqlite_df['清洗名称'] = sqlite_df['门店名称'].apply(clean_text)
        sqlite_df['清洗地址'] = sqlite_df['门店详细地址'].apply(clean_text)
        
        logger.info(f"数据加载完成: Excel={len(excel_df)}条, SQLite={len(sqlite_df)}条")
        return excel_df, sqlite_df
    except Exception as e:
        logger.critical(f"数据加载失败: {str(e)}", exc_info=True)
        raise

# ===== 4. 匹配主逻辑 =====
def progressive_matcher(excel_row, sqlite_df):
    logger = logging.getLogger("门店匹配系统")
    manager = excel_row["大区经理"]
    target_name = excel_row["清洗名称"]
    original_name = excel_row["店铺名称"]
    
    # 获取最低相似度阈值
    MIN_SIMILARITY_THRESHOLD = config.MIN_SIMILARITY_THRESHOLD

    
    try:
        # 按大区经理筛选
        manager_data = sqlite_df[sqlite_df["大区经理"] == manager]
        if manager_data.empty:
            # 大区为空时，使用整体数据
            manager_data = sqlite_df
            if manager_data.empty:
                logger.warning(f"无数据: {original_name}")
                return None, "无数据", 0.0, None
            else:
                logger.warning(f"大区无数据，使用整体数据: 经理={manager}")
        else:
            # 数据已在加载时去重，此处无需再次去重
            manager_data = manager_data
        
        # 生成四级匹配模式
        keywords = get_core_keywords(target_name)
        patterns = generate_patterns(keywords)
        
        # 渐进匹配（L1→L2→L3→L4）
        for level, pattern in enumerate(patterns, 1):
            name_match = manager_data["清洗名称"].str.contains(pattern, regex=True, na=False)
            addr_match = manager_data["清洗地址"].str.contains(pattern, regex=True, na=False)
            matches = manager_data[name_match | addr_match]
            
            logger.debug(f"L{level}匹配: {original_name} 使用模式'{pattern}'找到{len(matches)}个匹配项")
            
            if not matches.empty:
                # 在匹配项中选择相似度最高的
                best_match_idx = 0
                best_score = 0.0
                scores = [calculate_similarity(target_name, matches.iloc[i]["清洗名称"]) for i in range(len(matches))]
                best_match_idx = np.argmax(scores)
                best_score = scores[best_match_idx]
                match_rec = matches.iloc[best_match_idx]
                
                logger.debug(f"L{level}匹配: {original_name} 最佳匹配为'{match_rec['门店名称']}' (清洗名称: {match_rec['清洗名称']}) 相似度{best_score:.4f}")
            
                # 检查相似度是否达到阈值
                if best_score >= MIN_SIMILARITY_THRESHOLD:
                    logger.info(f"L{level}匹配: {original_name} → {match_rec['门店名称']} (相似度{best_score:.4f})")
                    return match_rec["门店编号"], f"L{level}", best_score, match_rec["门店名称"]
                else:
                    # 如果相似度不满足阈值，则继续尝试下一级匹配
                    logger.info(f"L{level}匹配: {original_name} 相似度低于阈值 {MIN_SIMILARITY_THRESHOLD}，继续尝试下一级匹配")
                    continue
        
        # 大区内正弦匹配
        candidate_names = manager_data["清洗名称"].tolist()
        best_idx, score = 0, 0.0
        if candidate_names:
            scores = [calculate_similarity(target_name, c) for c in candidate_names]
            best_idx = np.argmax(scores)
            score = scores[best_idx]
            best_match = manager_data.iloc[best_idx]
            
            # 添加最低相似度阈值检查
            MIN_SIMILARITY_THRESHOLD = config.MIN_SIMILARITY_THRESHOLD  # 最低相似度阈值
            

            
            # 检查相似度是否达到阈值
            if score >= MIN_SIMILARITY_THRESHOLD:
                logger.info(f"大区正弦匹配: {original_name} → {best_match['门店名称']} (相似度{score:.4f})")
                return best_match["门店编号"], "大区正弦", score, best_match["门店名称"]
            else:
                # 当正弦相似度低于阈值时，使用余弦相似度重新匹配
                logger.info(f"大区正弦匹配: {original_name} 相似度低于阈值 {MIN_SIMILARITY_THRESHOLD}，尝试使用余弦相似度匹配")
                cosine_scores = [calculate_cosine_similarity(target_name, c) for c in candidate_names]
                best_cosine_idx = np.argmax(cosine_scores)
                best_cosine_score = cosine_scores[best_cosine_idx]
                best_cosine_match = manager_data.iloc[best_cosine_idx]
                
                # 检查余弦相似度是否达到阈值
                if best_cosine_score >= MIN_SIMILARITY_THRESHOLD:
                    logger.info(f"余弦相似度匹配: {original_name} → {best_cosine_match['门店名称']} (相似度{best_cosine_score:.4f})")
                    return best_cosine_match["门店编号"], "余弦相似度", best_cosine_score, best_cosine_match["门店名称"]
                else:
                    logger.info(f"大区正弦匹配: {original_name} 相似度低于阈值 {MIN_SIMILARITY_THRESHOLD}，未找到合适匹配 (最佳匹配: {best_match['门店名称']} 相似度{score:.4f})")
                    # 记录所有候选匹配项的相似度信息（用于调试）
                    logger.debug(f"所有候选匹配项信息（正弦相似度）: {list(zip(candidate_names, scores))}")
                    logger.debug(f"所有候选匹配项信息（余弦相似度）: {list(zip(candidate_names, cosine_scores))}")
                    return None, "无匹配", 0.0, None
        else:
            logger.warning(f"无候选数据进行正弦匹配: {original_name}")
            return None, "无匹配", 0.0, None
    
    except Exception as e:
        logger.error(f"匹配异常: {str(e)}", exc_info=True)
        return None, "系统错误", 0.0, None

# ===== 5. 主执行流程 =====
if __name__ == "__main__":
    logger = setup_logger()
    logger.info("===== 门店匹配任务启动 =====")
    start_time = datetime.now()
    
    try:
        excel_df, sqlite_df = load_data()
        results = []
        progress_bar = tqdm(
            excel_df.iterrows(), 
            total=len(excel_df),
            desc="智能匹配",
            bar_format="{l_bar}{bar:50}{r_bar}",
            colour="GREEN"
        )
        
        for idx, row in progress_bar:
            shop_id, match_type, score, match_name = progressive_matcher(row, sqlite_df)
            score_str = f"{score:.4f}" if isinstance(score, float) else ""
            progress_bar.set_postfix_str(f"{match_type} {score_str}")
            results.append({
                "原始店铺": row["店铺名称"],
                "匹配门店编号": shop_id,
                "匹配门店名称": match_name,
                "匹配方式": match_type,
                "相似度": score_str
            })
        
        # 保存结果（用户指定桌面路径）
        output_path = config.OUTPUT_FILE_PATH
        result_df = pd.DataFrame(results)
        result_df.to_excel(output_path, index=False)
        logger.info(f"结果已保存至: {output_path}")
        
    except Exception as e:
        logger.critical(f"系统致命错误: {str(e)}", exc_info=True)
    finally:
        time_cost = (datetime.now()-start_time).total_seconds()
        logger.info(f"总耗时: {time_cost:.2f}秒")
        logger.info("===== 任务结束 =====")