# -*- coding: utf-8 -*-
import jieba

from settings.path import *
import os

if not os.path.exists(path_model):
    os.mkdir(path_model)

not_jieba = False

# data_raw = path_train_txt
# data_processed = path_data_train_fasttext_processed_txt

data_raw = path_test_txt  # 处理测试集
data_processed = path_data_test_fasttext_processed_txt


def data_festtest(data_raw=data_raw, data_processed=data_processed):
    id2name = {}

    with open(path_class_txt, 'r', encoding='utf-8') as file:
        lines = file.readlines()

    for idx, line in enumerate(lines):
        if not line:
            continue
        id2name[idx] = line.strip()

    # 构建训练数据构造
    datas = []
    with open(data_raw, 'r', encoding='utf-8') as f:
        lines = f.readlines()

    for line in lines:
        line = line.strip()
        if not line:
            continue
        text, label = line.split('\t')
        label_name = f'__label__{id2name[int(label)]}'
        text = text.replace('：', '')  # 部分文本含有冒号，这里移除冒号
        # use_char_segmentation为ture，是词级分词，为false,则是jieba词级分词
        # print(text)
        words = list(text) if not_jieba else jieba.cut(text)
        text_processed = ' '.join(word for word in words if word.strip())
        fasttest_line = f'{label_name} {text_processed}'
        datas.append(fasttest_line)

    # 预处理后数据保存
    with open(data_processed, mode='w', encoding='utf-8') as f:
        for data in datas:
            # print(data)
            f.write(data + '\n')

# data_festtest(data_raw,data_processed)
