import json
import os
import random
import re

import torch
import jieba  # 用于中文分词
import numpy as np
from transformers import BertTokenizer, BertModel
from sklearn.metrics.pairwise import cosine_similarity
from multiprocessing import Pool, cpu_count  # 导入多进程相关模块
import tqdm  # 确保tqdm库已安装
import logging  # 直接使用logging模块，而不是通过setup_logger
ENGLISH_ONLY_PATTERN = re.compile(r"^[a-zA-Z]+$")
# 导入项目配置和工具函数
from config import (
    BERT_MODEL_DIR, INITIAL_CLIMATE_WORDS_FILE, INITIAL_RISK_WORDS_FILE,
    DATA_DIR,
    OUTPUTS_DIR,
    # SENTENCE_JSON_DIR, # 将作为参数传入或从config直接使用
    STOPWORDS_DIR
)
from src.utils import setup_logger, load_text_file, save_text_list, get_progress_bar
# 为此模块设置独立的日志记录器
logger = setup_logger(__name__, "step1_keyword_expansion.log")

# 定义每个种子词扩展结果的输出目录
SEED_LIB_OUTPUT_DIR = os.path.join(DATA_DIR, "dictionaries", "seed_specific_libraries")


def load_stopwords_from_folder(folder_path):
    """
    从指定文件夹中的所有 .txt 文件加载停用词。
    每个 .txt 文件每行包含一个停用词。

    Args:
        folder_path (str): 包含停用词文件的文件夹路径。

    Returns:
        set: 包含所有停用词的集合。
    """
    stop_words = set()
    if not folder_path:
        logger.info("未提供停用词文件夹路径，不加载停用词。")
        return stop_words
    if not os.path.exists(folder_path) or not os.path.isdir(folder_path):
        logger.warning(f"停用词文件夹 '{folder_path}' 未找到或不是一个目录。将不加载停用词。")
        return stop_words

    logger.info(f"正在从文件夹加载停用词: {folder_path}")
    loaded_count = 0
    for filename in os.listdir(folder_path):
        if filename.endswith(".txt"):
            filepath = os.path.join(folder_path, filename)
            try:
                with open(filepath, 'r', encoding='utf-8') as f:
                    file_stopwords_count = 0
                    for line in f:
                        word = line.strip()
                        if word:
                            if word not in stop_words:
                                stop_words.add(word)
                                file_stopwords_count += 1
                    logger.debug(f"从文件 '{filename}' 加载了 {file_stopwords_count} 个新的唯一停用词。")
                    loaded_count += file_stopwords_count
            except Exception as e:
                logger.error(f"读取停用词文件 {filepath} 时出错: {e}")
    logger.info(f"总共加载了 {len(stop_words)} 个唯一的停用词 (从所有文件中新增 {loaded_count} 个)。")
    return stop_words


def get_bert_embedding(text_list, model, tokenizer, device, batch_size=32, max_length=64):
    """
    获取输入文本列表的BERT嵌入，通过平均其tokens的隐藏状态得到（排除[CLS]和[SEP]）。
    针对短文本（如词语或短语）进行了优化。
    (Implementation from previous step)
    """
    if not text_list:
        return np.array([])

    model.eval()
    all_mean_embeddings_np = []
    pbar = tqdm.tqdm(range(0, len(text_list), batch_size), desc="Generating word embeddings")

    for i in pbar:
        batch_texts = text_list[i:i + batch_size]
        pbar.set_postfix_str(f"Batch {i // batch_size + 1}/{(len(text_list) + batch_size - 1) // batch_size}",
                             refresh=True)

        inputs = tokenizer(
            batch_texts,
            return_tensors="pt",
            padding=True,
            truncation=True,
            max_length=max_length,
            add_special_tokens=True
        ).to(device)

        with torch.no_grad():
            outputs = model(**inputs)
            last_hidden_states = outputs.last_hidden_state
            attention_mask = inputs['attention_mask']

            current_batch_mean_embeddings_list = []
            for j in range(last_hidden_states.shape[0]):
                num_actual_tokens_inc_special = int(torch.sum(attention_mask[j]).item())
                if num_actual_tokens_inc_special > 2:  # >2 means at least [CLS], one token, [SEP]
                    word_token_embeddings = last_hidden_states[j, 1:num_actual_tokens_inc_special - 1, :]
                    if word_token_embeddings.shape[0] > 0:
                        mean_embedding = torch.mean(word_token_embeddings, dim=0)
                    else:  # Should not happen if num_actual_tokens_inc_special > 2 and truncation works
                        logger.warning(f"Text '{batch_texts[j]}' (num_tokens={num_actual_tokens_inc_special}) "
                                       f"yielded no tokens for averaging. Using zero vector.")
                        mean_embedding = torch.zeros(model.config.hidden_size, device=device,
                                                     dtype=last_hidden_states.dtype)
                # Fallback for very short texts that might only have [CLS] and [SEP] after tokenization,
                # or if something unexpected happens.
                else:
                    original_text = batch_texts[j]
                    # If the original text was not empty but tokenization results in <=2 tokens (e.g. only CLS, SEP)
                    if len(original_text) > 0:
                        logger.warning(f"Non-empty text '{original_text}' resulted in problematic tokenization "
                                       f"(num_tokens={num_actual_tokens_inc_special}). Using [CLS] token embedding as fallback.")
                        mean_embedding = last_hidden_states[j, 0, :]  # Fallback to [CLS] token embedding
                    else:  # Original text was empty
                        logger.debug(
                            f"Empty text resulted in num_tokens={num_actual_tokens_inc_special}. Using zero vector.")
                        mean_embedding = torch.zeros(model.config.hidden_size, device=device,
                                                     dtype=last_hidden_states.dtype)
                current_batch_mean_embeddings_list.append(mean_embedding.detach().cpu().numpy())
            all_mean_embeddings_np.extend(current_batch_mean_embeddings_list)

    if not all_mean_embeddings_np:  # Should not happen if text_list was not empty
        return np.array([])
    return np.vstack(all_mean_embeddings_np)


def segment_sentence(sentence):
    """
    对单个句子进行分词，并返回非空、长度大于1、非数字且非纯英文的词语。
    """
    processed_sentence = sentence.replace(" ", "").replace('%', '')
    words = jieba.lcut(processed_sentence)
    candidate_words = set()
    for word in words:
        word = word.strip()
        if not word or len(word) <= 1:
            continue
        try:
            float(word)
            continue
        except ValueError:
            pass
        if ENGLISH_ONLY_PATTERN.match(word):
            continue
        candidate_words.add(word)
    return candidate_words


def get_all_candidate_words_and_embeddings(all_sentences, model, tokenizer, device,
                                           batch_size=64, stopwords_dir=None,
                                           num_processes=None):
    """
    从所有句子中提取唯一的候选词（过滤停用词后）并生成它们的BERT嵌入。
    使用多进程并行化 jieba.lcut。
    """
    logger.info(f"从 {len(all_sentences)} 条句子中提取所有唯一的候选词 (并行化 jieba.lcut)...")
    if not all_sentences:
        logger.warning("输入的句子列表为空，无法提取候选词。")
        return [], np.array([])

    if num_processes is None:
        num_processes = min(cpu_count(), 64)

    logger.info(f"将使用 {num_processes} 个进程进行分词。")
    candidate_words_set = set()

    # jieba.initialize() # 确保jieba在主进程初始化，子进程会继承（通常是自动的）

    # 使用 imap 以便与 tqdm 更好地结合显示进度，并且是惰性评估
    with Pool(processes=num_processes) as pool:
        results_iter = pool.imap(segment_sentence, all_sentences, chunksize=100)  # chunksize可以调整
        # 使用tqdm包装迭代器
        for sentence_words in tqdm.tqdm(results_iter, total=len(all_sentences),
                                        desc="Segmenting all sentences (Parallel)"):
            if sentence_words:  # 确保返回的集合非空
                candidate_words_set.update(sentence_words)

    logger.info(f"初步提取到 {len(candidate_words_set)} 个唯一的候选词。")

    if stopwords_dir:
        stop_words_to_exclude = load_stopwords_from_folder(stopwords_dir)
        if stop_words_to_exclude:
            original_count = len(candidate_words_set)
            candidate_words_set = candidate_words_set - stop_words_to_exclude
            removed_count = original_count - len(candidate_words_set)
            logger.info(f"根据停用词表移除了 {removed_count} 个候选词。剩余候选词: {len(candidate_words_set)}")
    else:
        logger.info("未配置停用词目录，不进行停用词过滤。")

    candidate_words_list = sorted(list(candidate_words_set))

    if not candidate_words_list:
        logger.warning("过滤停用词后，未能从句子中提取到任何候选词。")
        return [], np.array([])

    logger.info(f"过滤后剩余 {len(candidate_words_list)} 个唯一的候选词用于生成嵌入。")
    # 保存候选词列表到文件 (可选，但对于调试和分析有用)
    save_text_list(candidate_words_list, os.path.join(OUTPUTS_DIR, 'candidate_words.txt'))

    logger.info("正在为所有候选词生成BERT嵌入 (这可能需要一些时间)...")
    candidate_embeddings_matrix = get_bert_embedding(
        candidate_words_list, model, tokenizer, device, batch_size=batch_size
    )

    return candidate_words_list, candidate_embeddings_matrix


# expand_and_save_single_seed_similarities 函数保持不变
# ... (此处省略未改变的函数，请保留它们在您的文件中) ...
def expand_and_save_single_seed_similarities(
        seed_word,
        candidate_words_list,
        all_candidate_embeddings,
        model,
        tokenizer,
        device,
        top_k_to_save,
        output_dir
):
    """
    为单个种子词计算与候选词的相似度，并保存top_k结果。
    (Implementation from previous step, with a typo fix)
    """
    logger.info(f"开始为种子词 '{seed_word}' 计算相似度并扩展...")

    if not candidate_words_list or all_candidate_embeddings.shape[0] == 0:
        logger.warning(f"候选词列表为空或无效，无法为种子词 '{seed_word}' 进行扩展。")
        return

    logger.debug(f"正在为种子词 '{seed_word}' 生成BERT嵌入...")
    seed_embedding_array = get_bert_embedding([seed_word], model, tokenizer, device)

    if seed_embedding_array.shape[0] == 0:
        logger.warning(f"无法为种子词 '{seed_word}' 生成嵌入，跳过。")
        return

    seed_embedding = seed_embedding_array[0].reshape(1, -1)

    similarities = cosine_similarity(seed_embedding, all_candidate_embeddings)[0]
    word_similarity_pairs = []
    for i, candidate_word in enumerate(candidate_words_list):
        word_similarity_pairs.append((candidate_word, similarities[i]))

    word_similarity_pairs.sort(key=lambda x: x[1], reverse=True)
    top_results = word_similarity_pairs[:top_k_to_save]

    if not top_results:
        logger.info(f"种子词 '{seed_word}' 未找到任何相似词。")
        return

    os.makedirs(output_dir, exist_ok=True)
    safe_seed_word_filename = "".join(c if c.isalnum() else "_" for c in seed_word)
    if not safe_seed_word_filename:
        safe_seed_word_filename = "empty_or_special_seed"  # 处理空种子词或仅含特殊字符的种子词
    output_filepath = os.path.join(output_dir, f"{safe_seed_word_filename}_library.txt")

    lines_to_save = [f"{word}\t{score:.6f}" for word, score in top_results]

    try:
        with open(output_filepath, 'w', encoding='utf-8') as f:
            for line in lines_to_save:
                f.write(line + '\n')
        logger.info(f"为种子词 '{seed_word}' 保存了 Top {len(top_results)} 个相似词到: {output_filepath}")
    except IOError as e:
        logger.error(f"无法写入文件 {output_filepath}: {e}")


def run_keyword_expansion_step(sentences_source):
    """
    执行关键词扩展步骤的主函数。
    Args:
        sentences_source (str or list):
            - 如果是字符串，则假定为包含句子JSON分块文件的目录路径。
            - 如果是列表，则假定为句子列表（主要用于测试）。
    """
    logger.info("开始执行步骤1：关键词扩展...")
    all_sentences = []

    if isinstance(sentences_source, str):
        sentence_json_dir = sentences_source
        logger.info(f"从目录加载分块句子JSON文件: {sentence_json_dir}")
        if not os.path.isdir(sentence_json_dir):
            logger.error(f"句子JSON目录 '{sentence_json_dir}' 未找到。关键词扩展中止。")
            return

        json_files = [f for f in os.listdir(sentence_json_dir) if f.endswith('.json')]
        if not json_files:
            logger.error(f"在目录 '{sentence_json_dir}' 中未找到JSON文件。关键词扩展中止。")
            return

        for json_file in tqdm.tqdm(sorted(json_files), desc="加载句子JSON块"):
            filepath = os.path.join(sentence_json_dir, json_file)
            try:
                with open(filepath, 'r', encoding='utf-8') as f:
                    sentences_in_chunk = json.load(f)
                    if isinstance(sentences_in_chunk, list):
                        all_sentences.extend(sentences_in_chunk)
                    else:
                        logger.warning(f"文件 {json_file} 内容格式不正确 (不是列表)，已跳过。")
            except Exception as e:
                logger.error(f"读取或解析JSON文件 {filepath} 时出错: {e}")

        logger.info(f"从所有JSON块总共加载了 {len(all_sentences)} 条句子。")

    elif isinstance(sentences_source, list):
        all_sentences = sentences_source
        logger.info(f"直接使用传入的 {len(all_sentences)} 条句子进行处理 (可能为测试模式)。")
    else:
        logger.error("无效的 'sentences_source' 参数类型。必须是目录路径或句子列表。")
        return

    if not all_sentences:
        logger.error("未能加载或提供任何句子用于关键词扩展。中止。")
        return

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info(f"将使用设备: {device}")
    if device.type == 'cuda':
        try:
            logger.info(f"CUDA设备名称: {torch.cuda.get_device_name(0)}")
        except Exception as e:
            logger.warning(f"无法获取CUDA设备名称: {e}")

    logger.info(f"从本地路径加载BERT模型和分词器: {BERT_MODEL_DIR}")
    try:
        tokenizer = BertTokenizer.from_pretrained(BERT_MODEL_DIR)
        model = BertModel.from_pretrained(BERT_MODEL_DIR)
        model.to(device)
    except Exception as e:
        logger.error(f"加载BERT模型或分词器失败: {e}")
        logger.error(f"请确保已将bert-base-chinese模型文件 (config.json, pytorch_model.bin, vocab.txt) "
                     f"正确放置在目录: {BERT_MODEL_DIR}")
        return

    candidate_words, candidate_embeddings = get_all_candidate_words_and_embeddings(
        all_sentences, model, tokenizer, device,
        batch_size=128,  # 可以调整
        stopwords_dir=STOPWORDS_DIR,
        num_processes=24  # get_all_candidate_words_and_embeddings 会自行决定进程数
    )

    if not candidate_words or candidate_embeddings.shape[0] == 0:
        logger.error("未能从句子中提取候选词或其嵌入，关键词扩展中止。")
        return

    TOP_K_TO_SAVE_PER_SEED = 100  # 可以配置
    initial_climate_seeds = load_text_file(INITIAL_CLIMATE_WORDS_FILE)
    initial_risk_seeds = load_text_file(INITIAL_RISK_WORDS_FILE)

    if not initial_climate_seeds and not initial_risk_seeds:
        logger.warning("气候和风险的初始种子词典均为空，无法进行扩展。")
        return

    os.makedirs(SEED_LIB_OUTPUT_DIR, exist_ok=True)

    if initial_climate_seeds:
        logger.info(f"--- 开始处理气候种子词 ({len(initial_climate_seeds)}个) ---")
        for seed_word in tqdm.tqdm(initial_climate_seeds, desc="Expanding Climate Seeds"):
            if not seed_word.strip():
                logger.warning("发现空的种子词，跳过。")
                continue
            expand_and_save_single_seed_similarities(
                seed_word, candidate_words, candidate_embeddings,
                model, tokenizer, device,
                TOP_K_TO_SAVE_PER_SEED, SEED_LIB_OUTPUT_DIR
            )
    else:
        logger.warning(f"初始气候词典 {INITIAL_CLIMATE_WORDS_FILE} 为空或加载失败。")

    if initial_risk_seeds:
        logger.info(f"--- 开始处理风险种子词 ({len(initial_risk_seeds)}个) ---")
        for seed_word in tqdm.tqdm(initial_risk_seeds, desc="Expanding Risk Seeds"):
            if not seed_word.strip():
                logger.warning("发现空的种子词，跳过。")
                continue
            expand_and_save_single_seed_similarities(
                seed_word, candidate_words, candidate_embeddings,
                model, tokenizer, device,
                TOP_K_TO_SAVE_PER_SEED, SEED_LIB_OUTPUT_DIR
            )
    else:
        logger.warning(f"初始风险词典 {INITIAL_RISK_WORDS_FILE} 为空或加载失败。")

    logger.info(f"步骤1：关键词扩展（相似度计算）全部完成。结果保存在: {SEED_LIB_OUTPUT_DIR}")

def load_sentences(sentences_source):
    """
    执行关键词扩展步骤的主函数。
    Args:
        sentences_source (str or list):
            - 如果是字符串，则假定为包含句子JSON分块文件的目录路径。
            - 如果是列表，则假定为句子列表（主要用于测试）。
    """
    logger.info("开始执行步骤1：关键词扩展...")
    all_sentences = []

    if isinstance(sentences_source, str):
        sentence_json_dir = sentences_source
        logger.info(f"从目录加载分块句子JSON文件: {sentence_json_dir}")
        if not os.path.isdir(sentence_json_dir):
            logger.error(f"句子JSON目录 '{sentence_json_dir}' 未找到。关键词扩展中止。")
            return

        json_files = [f for f in os.listdir(sentence_json_dir) if f.endswith('.json')]
        if not json_files:
            logger.error(f"在目录 '{sentence_json_dir}' 中未找到JSON文件。关键词扩展中止。")
            return

        for json_file in tqdm.tqdm(sorted(json_files), desc="加载句子JSON块"):
            filepath = os.path.join(sentence_json_dir, json_file)
            try:
                with open(filepath, 'r', encoding='utf-8') as f:
                    sentences_in_chunk = json.load(f)
                    if isinstance(sentences_in_chunk, list):
                        all_sentences.extend(sentences_in_chunk)
                    else:
                        logger.warning(f"文件 {json_file} 内容格式不正确 (不是列表)，已跳过。")
            except Exception as e:
                logger.error(f"读取或解析JSON文件 {filepath} 时出错: {e}")

        logger.info(f"从所有JSON块总共加载了 {len(all_sentences)} 条句子。")
        return all_sentences

if __name__ == '__main__':
    logger.info("独立运行关键词扩展模块测试...")

    # --- 为测试设置 ---
    # (创建示例种子词典和停用词文件的逻辑保持不变)
    # ... (省略这部分代码，请保留它们) ...
    dictionaries_base_dir = os.path.dirname(INITIAL_CLIMATE_WORDS_FILE)
    if dictionaries_base_dir:  # Ensure base directory for seed files exists
        os.makedirs(dictionaries_base_dir, exist_ok=True)

    if not os.path.exists(INITIAL_CLIMATE_WORDS_FILE):
        logger.info(f"创建示例气候词典: {INITIAL_CLIMATE_WORDS_FILE}")
        save_text_list(["气候变化", "极端天气", "碳排放"], INITIAL_CLIMATE_WORDS_FILE)

    if not os.path.exists(INITIAL_RISK_WORDS_FILE):
        logger.info(f"创建示例风险词典: {INITIAL_RISK_WORDS_FILE}")
        save_text_list(["市场风险", "操作风险", "信用风险"], INITIAL_RISK_WORDS_FILE)

    if STOPWORDS_DIR:
        os.makedirs(STOPWORDS_DIR, exist_ok=True)
        sample_stopword_file = os.path.join(STOPWORDS_DIR, "test_sample_stopwords.txt")
        if not os.path.exists(sample_stopword_file):
            logger.info(f"创建示例停用词文件: {sample_stopword_file} (用于测试)")
            save_text_list(["的", "是", "一个", "不容小觑", "测试句子"], sample_stopword_file)
        logger.info(f"测试时将尝试从 '{STOPWORDS_DIR}' 加载停用词。")
    else:
        logger.info("config.STOPWORDS_DIR 未设置，停用词过滤将被跳过。")

    logger.warning("为了加速独立测试，将使用预置的少量示例文本进行关键词扩展测试。")
    logger.warning(f"确保 {BERT_MODEL_DIR} 包含正确的BERT模型文件。")

    # 测试时，可以直接传入句子列表
    all_sentences_for_testing = [
        "这是一个关于气候变化的测试句子，其中提到了全球变暖和碳排放，以及极端天气事件。",
        "企业面临的市场风险不容小觑，同时操作风险和信用风险也需要得到妥善管理。",
        # ... (保留您原来的测试句子列表) ...
        "可持续发展要求我们关注环境风险，并采取措施应对气候变化带来的挑战。",
        "金融市场的波动性带来了投资风险，投资者应谨慎评估。",
        "低碳经济是未来的发展方向，旨在减少温室气体排放。",
        "供应链中断可能引发严重的经营风险，地缘政治风险加剧。",
        "碳排放是一个重要指标。",
        "风险评估需要关注。",
        "人工智能正在改变世界。",
        "大数据分析是当今热门领域。",
        "量子计算仍处于早期阶段。",
        "区块链技术具有分布式特性。",
        "云计算提供了灵活的计算资源。",
        "机器学习是人工智能的核心。",
        "物联网连接万物。",
        "边缘计算减少延迟。",
        "虚拟现实带来沉浸体验。",
        "增强现实融合虚拟和现实。",
        "生物技术创新不断。",
        "基因编辑技术潜力巨大。",
        "新能源汽车是未来趋势。",
        "智能家居提升生活品质。",
        "自动驾驶汽车将改变交通。",
        "智慧城市构建高效社区。",
        "机器人技术发展迅速。",
        "无人机应用于多领域。",
        "5G技术开启新时代。",
        "卫星互联网覆盖全球。",
        "可穿戴设备监测健康。",
        "数字货币前景广阔。",
        "网络安全至关重要。",
        "数据隐私是重要议题。",
        "在线教育普及。",
        "远程工作成为常态。",
        "线上购物便捷。",
        "社交媒体改变生活。"
    ]

    if not all_sentences_for_testing:
        logger.error("测试句子列表为空，无法进行关键词扩展测试。")
    else:
        logger.info(f"使用 {len(all_sentences_for_testing)} 个示例文本句子进行测试。")
        # run_keyword_expansion_step 现在接收句子列表或目录路径
        run_keyword_expansion_step(all_sentences_for_testing)

    logger.info("关键词扩展模块独立测试运行结束。")
    logger.info(f"检查输出目录 '{SEED_LIB_OUTPUT_DIR}' 以获取每个种子词的相似度列表。")
    if STOPWORDS_DIR:
        logger.info(f"检查日志确认是否从 '{STOPWORDS_DIR}' 加载并应用了停用词。")