import jieba
from collections import Counter
import os
from tqdm import tqdm

# ============== 硬编码配置 ==============
train_path = "./weibo_tsv/train.tsv"  # 训练集路径
dev_path = "./weibo_tsv/dev.tsv"  # 开发集路径
test_path = "./weibo_tsv/test.tsv"  # 测试集路径
vocab_path = "E:\Code\Python\word2vec\K-BERT\models\google_vocab_ext.txt"  # 原词表路径
stopwords_path = "./stopwords.txt"  # 停用词表路径
min_count = 5  # 词语最低出现次数


# =======================================

def load_stopwords():
    """加载停用词表"""
    if not os.path.exists(stopwords_path):
        return set()
    with open(stopwords_path, 'r', encoding='utf-8') as f:
        return {line.strip() for line in f}


def extract_words():
    """从数据集中提取词语并统计词频"""
    word_counts = Counter()
    stopwords = load_stopwords()

    for path in [train_path, dev_path, test_path]:
        # 获取文件行数用于进度条
        with open(path, 'r', encoding='utf-8') as f:
            total_lines = sum(1 for _ in f)

        with open(path, 'r', encoding='utf-8') as f:
            i = 0
            for line in tqdm(f, total=total_lines, desc=f"Processing {os.path.basename(path)}"):
                if i == 0:
                    i += 1
                    continue
                elif '\t' in line:
                    text = line.split('\t')[1]  # 假设第二列是文本
                    # 直接统计词频
                    word_counts.update(
                        w for w in jieba.lcut(text)
                        if len(w) > 1 and w not in stopwords
                    )
    return word_counts



def load_existing_vocab():
    """加载现有词表"""
    existing_words = set()
    if os.path.exists(vocab_path):
        with open(vocab_path, 'r', encoding='utf-8') as f:
            for line in f:
                parts = line.strip().split()
                if parts:
                    existing_words.add(parts[0])
    return existing_words


def generate_vocab():
    # 1. 加载停用词表
    # stopwords = load_stopwords()

    # 2. 从数据集中提取词语（使用set去重）
    print("开始提取词语并统计词频...")
    word_counts = extract_words()
    print(f"共提取到{len(word_counts)}个唯一词语")

    # 3. 统计词频并过滤低频词
    top_words = [word for word, count in word_counts.items()
                 if count >= min_count]

    # 4. 加载现有词表并去重
    existing_words = load_existing_vocab()
    new_words = [w for w in top_words if w not in existing_words]

    # 5. 分配新ID（基于当前词表行数）
    max_id = len(existing_words)  # 现有词表行数 = 当前最大ID

    # 6. 追加新词到词表文件
    with open(vocab_path, 'a', encoding='utf-8') as f:
        for word in new_words:
            f.write(f"{word}\n")  # 按原格式（每行一个词语）写入

    print(f"词表扩展完成，新增{len(new_words)}个词语。示例：{new_words[:20]}")


if __name__ == "__main__":
    # 初始化jieba分词器（加载自定义词典）
    if os.path.exists(vocab_path):
        jieba.load_userdict(vocab_path)

    generate_vocab()