import pickle
import pywt
import torch
from scipy.spatial.distance import pdist, cosine
from tools1 import *
import numpy as np
import conv_classifier
import utils
#%%
file = open('./alldata.pkl','rb')
data = pickle.load(file)
#%%
normal_data  = data["normal"]
# fault_inner  = data["fault_inner"]
fault_out    = data["fault_out"]
fault_roller = data["fault_roller"]
fault_com    = data["fault_compound"]
#%%
# normal_data = utils.zero_average(normal_data)
# fault_inner   = utils.zero_average(fault_inner)
# fault_out     = utils.zero_average(fault_out)
# fault_roller  = utils.zero_average(fault_roller)
# fault_com     = utils.zero_average(fault_com)
#%%
fault_data = []
# fault_data.append(fault_inner)
fault_data.append(fault_out)
fault_data.append(fault_roller)
fault_data.append(fault_com)
#%%
#样本总数量
#训练样本总数
#测试样本总数
finall_result = []
for n in range(0, 30):
    total = 400
    test_num = 395
    k_short = 5
    idx = [i for i in range (len(normal_data ))]
    np.random.shuffle(idx)
    normal_train = normal_data[idx][0:total-200,:]
    normal_test  = normal_data[idx][total-200:total,:]
    label_train = np.zeros((total-200))
    label_test = np.zeros((len(normal_test)))
    #%%故障样本
    train_data, test_data, train_label, train_label1, test_label = getrawtrain_test(fault_data, k_short, test_num, total, normal_train, normal_test, label_train, label_test)
    #增强
    train_data = dataaugmentation(total, train_data, 3, 5, normal_train, k_short, 5 , 0.99, 5)   
    # train_data = dataaugmentation(total, train_data, 3, 30, normal_train, k_short, 10, 0.9, 1)
    # train_data = dataaugmentation(total, train_data, 3, 20, normal_train, k_short, 10, 0.7, 1)
    #%% 训练-测试
    x_train = np.concatenate(train_data)
    y_train = np.concatenate(train_label1)
    x_test = np.concatenate(test_data)
    y_test = np.concatenate(test_label)
    #%%
    X_train = x_train.reshape(-1, 1,1024)
    X_test = x_test.reshape(-1, 1,1024) 
    args = {        'learning_rate': 0.01,
                     'number_epoch': 200,
                       'batch_size': 16,
                     'weight_decay':  0.0001,
                         'lr_decay': 0.96,
                       'outputsize': 5
                }
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = conv_classifier.trainModel(args, X_train, y_train, X_test, y_test)
    result = model.train(device)
    temp = np.vstack((result[3][-1],result[4][-1],result[5][-1],result[6][-1])).reshape(-1,4)
    finall_result.append(temp)
    
#%%
f_result = np.array(finall_result).reshape(-1, 4)
np.savetxt("bear_LSWF2_final_5.csv", f_result,  delimiter=",")







