# keyword_extractor.py
import jieba
import logging
import jieba.analyse
import os
# 设置日志
logging.basicConfig(level=logging.INFO, format='[keyword_extractor] %(asctime)s - %(levelname)s - %(message)s')
# 停用词文件（可选）
STOPWORDS_PATH = "stopwords.txt"

# 加载停用词（如果存在）
stopwords = set()
if os.path.exists(STOPWORDS_PATH):
    with open(STOPWORDS_PATH, 'r', encoding='utf-8') as f:
        stopwords = set(line.strip() for line in f.readlines())
    logging.info(f"已加载停用词文件: {STOPWORDS_PATH}")

def extract_keywords_with_time(segments):
    """从语音识别结果中提取关键词及其时间点"""
    logging.info("开始提取关键词...")

    # 输入验证
    if not isinstance(segments, list):
        logging.error("输入 segments 必须是列表类型，当前类型: %s", type(segments))
        return []

    # 拼接完整文本
    full_text = ''.join([seg.get('text', '') for seg in segments])
    logging.debug("拼接后的完整文本: %s", full_text)

    # 使用搜索引擎模式分词（提升短语识别能力）
    words = " ".join(jieba.cut_for_search(full_text))
    logging.debug("搜索引擎模式分词结果: %s", words)

    # 提取 TF-IDF 关键词
    tfidf_keywords = jieba.analyse.extract_tags(full_text, topK=10, withWeight=True)
    logging.debug("TF-IDF 提取的关键词: %s", [kw[0] for kw in tfidf_keywords])

    # 提取 TextRank 关键词
    textrank_keywords = jieba.analyse.textrank(full_text, topK=10, withWeight=True)
    logging.debug("TextRank 提取的关键词: %s", [kw[0] for kw in textrank_keywords])

    # 合并并去重关键词
    combined_keywords = {}
    for keyword, weight in tfidf_keywords + textrank_keywords:
        combined_keywords[keyword] = max(combined_keywords.get(keyword, 0), weight)

    # 词性过滤（保留名词、动词等）
    pos_filtered = []
    for keyword in combined_keywords:
        if any(pos in jieba.posseg.cut(keyword).__next__().flag for pos in ['n', 'v', 'vn', 'j']):
            pos_filtered.append(keyword)

    # 过滤停用词
    pos_filtered = [kw for kw in pos_filtered if kw not in stopwords]

    # 按权重排序并过滤低权重词（阈值从 0.5 降低至 0.2）
    keyword_list = [kw for kw in pos_filtered if combined_keywords[kw] > 0.2]
    keyword_list = sorted(keyword_list, key=lambda x: combined_keywords[x], reverse=True)[:10]

    logging.info("提取到的关键词列表: %s", keyword_list)

    keyword_times = []
    seen = set()

    # 遍历识别片段，记录关键词首次出现的时间
    for i, seg in enumerate(segments):
        logging.debug("处理第 %d 个 segment，内容: %s", i, seg)

        if not isinstance(seg, dict):
            logging.warning("segment %d 不是字典类型，跳过", i)
            continue

        if 'text' not in seg or 'start' not in seg:
            logging.warning("segment %d 缺少必要字段，跳过", i)
            continue

        text = seg['text']
        start = seg['start']

        for keyword in keyword_list:
            if keyword in text and keyword not in seen:
                keyword_times.append({'keyword': keyword, 'time': start})
                seen.add(keyword)
                logging.info("匹配到关键词: %s，时间点: %.2f 秒", keyword, start)

    logging.info("共提取到 %d 个关键词时间点", len(keyword_times))
    return keyword_times