import sqlite3
import logging
import jieba
import jieba.analyse
from difflib import SequenceMatcher
import re

# 设置日志
logging.basicConfig(level=logging.INFO, format='[material_manager] %(asctime)s - %(levelname)s - %(message)s')

# 数据库路径
DB_PATH = "material_db/material_db.sqlite"

def load_material_tags():
    """
    从数据库加载所有标签和对应的素材路径
    :return: 返回标签到素材路径的映射（dict）
    """
    logging.info("开始加载标签与素材映射")
    conn = sqlite3.connect(DB_PATH)
    cursor = conn.cursor()

    cursor.execute("SELECT DISTINCT tag, path FROM material_tags")
    tag_to_materials = {}
    for tag, path in cursor.fetchall():
        if tag not in tag_to_materials:
            tag_to_materials[tag] = []
        tag_to_materials[tag].append(path)

    conn.close()
    logging.debug("共加载 %d 个标签", len(tag_to_materials))
    return tag_to_materials

def preprocess_text(text):
    """
    预处理文本
    :param text: 输入文本
    :return: 处理后的文本
    """
    if not text:
        return ""
    # 转换为小写
    text = text.lower()
    # 移除特殊字符，保留中文、英文和数字
    text = re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9]', ' ', text)
    # 移除多余空格
    text = re.sub(r'\s+', ' ', text).strip()
    return text

def extract_keywords(text, topK=20):
    """
    提取文本中的关键词
    :param text: 输入文本
    :param topK: 提取关键词数量
    :return: 关键词列表
    """
    if not text:
        return []

    try:
        # 使用jieba的TF-IDF算法提取关键词
        keywords = jieba.analyse.extract_tags(text, topK=topK)
        # 如果TF-IDF没有提取到关键词，则使用基础分词
        if not keywords:
            keywords = list(jieba.cut(text))
        # 过滤掉空字符串和单字符（除非是中文）
        keywords = [kw for kw in keywords if kw.strip() and (len(kw) > 1 or '\u4e00' <= kw <= '\u9fff')]
        return keywords
    except Exception as e:
        logging.warning("关键词提取失败: %s，使用基础分词", e)
        return [w for w in jieba.cut(text) if w.strip()]

def calculate_similarity(text1, text2):
    """
    计算两个文本的相似度，结合多种方法
    :param text1: 文本1
    :param text2: 文本2
    :return: 相似度分数 (0-1)
    """
    # 预处理文本
    text1 = preprocess_text(text1)
    text2 = preprocess_text(text2)

    if not text1 or not text2:
        return 0.0

    # 1. 基础字符串相似度
    basic_sim = SequenceMatcher(None, text1, text2).ratio()

    # 2. 关键词相似度 (使用更多关键词)
    keywords1 = set(extract_keywords(text1, topK=20))
    keywords2 = set(extract_keywords(text2, topK=20))

    # 移除空字符串
    keywords1 = {kw for kw in keywords1 if kw.strip()}
    keywords2 = {kw for kw in keywords2 if kw.strip()}

    keyword_sim = 0.0
    if keywords1 and keywords2:
        # 计算关键词交集占比
        intersection = keywords1.intersection(keywords2)
        union = keywords1.union(keywords2)
        keyword_sim = len(intersection) / len(union) if union else 0.0

        # 计算关键词权重相似度
        max_keywords = max(len(keywords1), len(keywords2))
        keyword_weight_sim = len(intersection) / max_keywords if max_keywords > 0 else 0.0

        # 取两种关键词相似度的平均值
        keyword_sim = (keyword_sim + keyword_weight_sim) / 2
    elif not keywords1 and not keywords2:
        # 如果两个都没有关键词，根据基础文本判断
        keyword_sim = basic_sim * 0.5
    else:
        # 如果其中一个没有关键词
        keyword_sim = 0.1

    # 3. 包含关系检查
    inclusion_score = 0.0
    if text1 and text2:
        # 检查一个文本是否包含另一个文本的关键部分
        if text1 in text2:
            inclusion_score = 0.8
        elif text2 in text1:
            inclusion_score = 0.8
        # 检查关键词包含关系
        if keywords1 and keywords2:
            if keywords1.issubset(keywords2):
                inclusion_score = max(inclusion_score, 0.9)
            elif keywords2.issubset(keywords1):
                inclusion_score = max(inclusion_score, 0.9)
            # 部分包含
            elif len(intersection) > 0:
                inclusion_score = max(inclusion_score, len(intersection) / min(len(keywords1), len(keywords2)) * 0.7)

    # 4. 词汇重叠度
    words1 = set(text1.split())
    words2 = set(text2.split())
    overlap_score = 0.0
    if words1 and words2:
        overlap = len(words1.intersection(words2))
        overlap_score = overlap / max(len(words1), len(words2))

    # 5. 长度相似度（长度差异越小越好）
    len_sim = 1.0 - abs(len(text1) - len(text2)) / max(len(text1), len(text2), 1)

    # 6. 综合相似度 (基础相似度15% + 关键词相似度25% + 包含关系30% + 词汇重叠20%)
    final_sim = 0.15 * basic_sim + 0.25 * keyword_sim + 0.3 * inclusion_score + 0.2 * overlap_score + 0.1 * len_sim

    return final_sim

def match_materials(segments, similarity_threshold=0.15, video_duration=None):
    """
    根据语音识别的每个片段文本，通过语义匹配素材标签
    :param segments: Whisper 输出的语音片段列表
    :param similarity_threshold: 相似度阈值，降低到0.15以提高召回率
    :param video_duration: 视频总时长（秒），用于排除结尾部分
    :return: 插入列表（包含素材路径和插入时间）
    """
    logging.info("开始语义匹配素材，基于语音片段内容")
    matched = []
    seen_materials = set()

    try:
        # 加载标签与素材的映射
        tag_to_materials = load_material_tags()
        if not tag_to_materials:
            logging.warning("未找到任何标签与素材的映射")
            return []

        # 获取所有标签
        all_tags = list(tag_to_materials.keys())
        logging.info(f"加载了 {len(all_tags)} 个标签用于匹配")

        # 遍历每个语音片段，通过语义相似度匹配标签
        for seg in segments:
            text = seg.get('text', '').strip()
            start = seg.get('start', 0.0)

            # 跳过视频前5秒和后5秒的匹配
            if start < 5.0:
                logging.debug("跳过前5秒内容: 时间 %.2f 秒，文本: '%s'", start, text)
                continue

            if video_duration and start > (video_duration - 5.0):
                logging.debug("跳过末尾5秒内容: 时间 %.2f 秒，文本: '%s'", start, text)
                continue

            if not text:
                continue

            # 计算与所有标签的相似度
            best_tag = None
            max_similarity = 0.0
            similarity_scores = []

            for tag in all_tags:
                similarity = calculate_similarity(text, tag)
                similarity_scores.append((tag, similarity))
                if similarity > max_similarity:
                    max_similarity = similarity
                    best_tag = tag

            # 输出前几个最匹配的标签用于调试
            similarity_scores.sort(key=lambda x: x[1], reverse=True)
            top_matches = similarity_scores[:3]
            logging.debug("文本 '%s' 的最佳匹配: %s", text, [(tag, f"{score:.2f}") for tag, score in top_matches])

            # 降低阈值以提高召回率，确保更多匹配
            if best_tag and max_similarity >= similarity_threshold:
                paths = tag_to_materials.get(best_tag, [])
                for path in paths:
                    if path not in seen_materials:
                        matched.append({
                            "material_path": path,
                            "time": start,
                            "similarity": max_similarity,
                            "matched_tag": best_tag
                        })
                        seen_materials.add(path)
                        logging.info("语义匹配到素材: %s，时间点: %.2f 秒，相似度: %.2f，匹配标签: %s，原始文本: '%s'",
                                   path, start, max_similarity, best_tag, text)
            else:
                logging.debug("未匹配到足够相似的素材，最佳匹配: %s，相似度: %.2f，原始文本: '%s'",
                            best_tag or "无", max_similarity, text)

        logging.info("共语义匹配到 %d 个素材", len(matched))
        return matched

    except sqlite3.Error as e:
        logging.error("数据库操作失败: %s", e)
        return []
    except Exception as e:
        logging.error("语义匹配过程中出现未预期的错误: %s", e)
        return []
