import random

# import jieba
import pandas as pd

# # 加载停用词表
# stopwords = set()
# with open('TikTok/stopwords.txt', 'r', encoding='utf-8') as f:
#     for line in f:
#         stopwords.add(line.strip())

# 加载数据
data = pd.read_csv('TikTok/data.csv')

label_mapping = {
    '赌博诈骗': 0,
    '未成年不良信息': 1,
    '色情低俗': 2,
    '设计侵犯版权、名誉、肖像、隐私、商标权等': 3,
    '不实信息': 4,
    '血腥暴力': 5,
    '封建迷信': 6
}

# 分词并保存结果
with open('TikTok/data_tokenized_no_stopwords.txt', 'w', encoding='utf-8') as f:
    for _, row in data.iterrows():
        category = row['分类']
        # 使用字典映射标签为数字，如果找不到对应的标签，将其映射为-1
        label = label_mapping.get(category, -1)
        title = row['视频标题']
        # words = jieba.lcut(title)
        # # words_filtered = [word for word in words if word not in stopwords]
        # words_filtered = list(words)
        # tokenized_title = ' '.join(words_filtered)
        # f.write(f'{tokenized_title}\t{label}\n')
        f.write(f'{title}\t{label}\n')


# 读取分词结果文件
with open('TikTok/data_tokenized_no_stopwords.txt', 'r', encoding='utf-8') as f:
    data_lines = f.readlines()

# 随机打乱数据集
random.shuffle(data_lines)

# 划分数据集
train_index = int(0.8 * len(data_lines))
dev_index = int(0.9 * len(data_lines))

train_data = data_lines[:train_index]
dev_data = data_lines[train_index:dev_index]
test_data = data_lines[dev_index:]

# 保存训练集
with open('TikTok/data/no_stopwords/train.txt', 'w', encoding='utf-8') as f:
    f.writelines(train_data)

# 保存开发集
with open('TikTok/data/no_stopwords/dev.txt', 'w', encoding='utf-8') as f:
    f.writelines(dev_data)

# 保存测试集
with open('TikTok/data/no_stopwords/test.txt', 'w', encoding='utf-8') as f:
    f.writelines(test_data)
