# -*- coding: utf-8 -*-
import os
import random
import model.config as conf
import jieba


if __name__ == '__main__':
    path = r'E:\czy\文本分类公开数据集\文本分类公开数据集'
    dir_list = os.listdir(path)
    train_list = []
    valid_list = []
    for i in range(len(dir_list)):
        dir = dir_list[i]
        label = conf.cls[dir]
        # print(label)
        dir_path = os.path.join(path, dir)
        dir_path_list = os.listdir(dir_path)
        count_path = len(dir_path_list)
        # train = dir_path_list[:int(count * 0.8)]
        # valid = dir_path_list[int(count * 0.8):]
        count = 0
        for t in dir_path_list:
            text = ''
            t_path = os.path.join(dir_path, t)
            try:
                with open(t_path, encoding='gb18030') as f:
                    for line in f.readlines():
                        text += line
            except:
                try:
                    with open(t_path, encoding='utf-8') as f:
                        for line in f.readlines():
                            text += line
                except:
                    continue
            if text == '':
                continue
            text = text.replace('\n', '。').replace(' ', '').replace('　', '')
            text = text + ' ' + str(label)
            if count <= int(len(dir_path_list) * 0.8):
                train_list.append(text)
            elif int(len(dir_path_list) * 0.8) < count < int(len(dir_path_list) * 1):
                valid_list.append(text)
            else:
                pass

            count += 1
    random.shuffle(train_list)
    random.shuffle(valid_list)
    with open('public_dataset/train_dataset.txt', 'w', encoding='utf-8') as f:
        for train in train_list:
            f.write(train + '\n')
    with open('public_dataset/valid_dataset.txt', 'w', encoding='utf-8') as f:
        for valid in valid_list:
            f.write(valid + '\n')



