import os
import numpy as np
import random
import math
 
## 此程序文件主要用于对采集到脑电伪迹数据进行预处理。包括随机打乱顺序，并切割成训练集、验证集、测试集 ##
## 在网络开源程序的基础上，对参数进行了调整。此程序文件参考了网络文章《深度学习模型：Pytorch搭建ResNet、DenseNet网络，完成一维数据分类任务》 ##
 
 
def normalization(data):  # 归一化:均值0,方差1
    data = (data - data.mean()) / data.std()
    return data
 
 
def random_rowl(data):  # 把行的顺序随机排列
    idx = np.arange(data.shape[0])
    np.random.shuffle(idx)
    data = data[idx[0:]]
    return data
 
 
def getfilelist(path):  # 读取文件夹下所有txt文件
    filelist = []
    for filename in os.listdir(path):
        if os.path.splitext(filename)[1] == '.txt':
            filelist.append(filename)
    random.shuffle(filelist)  # 随机打乱文件列表里的顺序
    return filelist
 
 
path = './random_data/','./eyebrow_data/','./teeth_data/'   #需要提前将咬牙、皱眉、其他随机三个数据文件分别放到这三个文件夹里
 
sort_num = 3  # 数据种类，我们采集的数据包括咬牙、皱眉、其他随机3类
 
for i in range(sort_num):  # 个数修改为路径的数目，与文件夹个数相同
    fileName = "{}/".format(path[i])
    files = getfilelist(fileName)
    # files=getfilelist(path[i]+'/')
    a = len(files)
    print(path[i], a)
    #data = np.loadtxt(fileName + files[0])[:, 1]
    data = np.loadtxt(fileName + files[0])
    data = normalization(data)
 
    # 加标签
    label = np.zeros((len(data), sort_num))  # 标签也改为分类的数目，与文件夹个数相同
 
    for m in range(len(label)):
        label[m, i] = 1
    data = np.c_[data, label]
    data = random_rowl(data)
 
    t = math.floor(len(data) * 0.7)
 
    v = math.floor(len(data) * 0.2)
    train = data[:t, :]
    val = data[t:(t + v), :]
    test = data[(t + v):, :]
 
    np.savetxt(path[i] + '_train.txt', train, fmt='%.6f')
    np.savetxt(path[i] + '_val.txt', val, fmt='%.6f')
    np.savetxt(path[i] + '_test.txt', test, fmt='%.6f')
 
train = np.loadtxt(path[0] + '_train.txt')
val = np.loadtxt(path[0] + '_val.txt')
test = np.loadtxt(path[0] + '_test.txt')
 
for i in range(1, sort_num):  # 需要修改为与分类个数相同，最后一个数与文件夹
    train1 = np.loadtxt(path[i] + '_train.txt')
    val1 = np.loadtxt(path[i] + '_val.txt')
    test1 = np.loadtxt(path[i] + '_test.txt')
 
    train = random_rowl(np.r_[train, train1])
    print(val)
    print(val1)
    val = random_rowl(np.r_[val, val1])
    test = random_rowl(np.r_[test, test1])
 
np.savetxt('train.txt', train, fmt='%.6f') # 划分训练集
np.savetxt('val.txt', val, fmt='%.6f')  # 划分验证集
np.savetxt('test.txt', test, fmt='%.6f')  # 划分测试集
 
 
# 从train.txt、val.txt、test.txt中分别获取数据和标签
 
class_num = sort_num  # 分类的类数 
 
train_labels = np.loadtxt('./train.txt')[:, -class_num:]
test_labels = np.loadtxt('./test.txt')[:, -class_num:]
val_labels = np.loadtxt('./val.txt')[:, -class_num:]
 
train_data = np.loadtxt('./train.txt')[:, :-class_num]
test_data = np.loadtxt('./test.txt')[:, :-class_num]
val_data = np.loadtxt('./val.txt')[:, :-class_num]
 
np.savetxt('train_data.txt', train_data, fmt='%.6f')
np.savetxt('val_data.txt', val_data, fmt='%.6f')
np.savetxt('test_data.txt', test_data, fmt='%.6f')
 
np.savetxt('train_labels.txt', train_labels, fmt='%d')
np.savetxt('val_labels.txt', val_labels, fmt='%d')
np.savetxt('test_labels.txt', test_labels, fmt='%d')
