import pandas as pd
import torch
import torch.nn as nn
from transformer import TransformerEncoderLayer, TransformerEncoder
import numpy as np
import time
import math
from matplotlib import pyplot
import os, sys
from sklearn.model_selection import train_test_split
from dec_pytorch_master.lib.stackedDAE import StackedDAE
import pickle
import matplotlib as mpl
mpl.rcParams['font.family'] = 'SimSun'
pyplot.rcParams['axes.unicode_minus'] = False   # 步骤二（解决坐标轴负数的负号显示问题）

class Logger(object):
    def __init__(self,filename='default.log',stream=sys.stdout) -> None:
        self.terminal = stream
        self.log = open(filename,'w+')
    def write(self,message):
        self.terminal.write(message)
        self.log.write(message)
    def flush(self):
        pass



os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
# torch.manual_seed(0)
# np.random.seed(0)


calculate_loss_over_all_values = False

len_series = 40

batch_size =4 # batch size
vail_index = 3  

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
sys.stdout = Logger('Logger/{}_{}.log'.format(vail_index,batch_size),sys.stdout)

sdae = StackedDAE(input_dim=2560, z_dim=512, binary=False,
        encodeLayer=[1024], decodeLayer=[1024], activation="sigmoid", 
        dropout=0)


sdae.load_state_dict(torch.load("dec_pytorch_master/model/sdae.pt"))  



# for batch_idx, (inputs, _) in enumerate(train_loader):
#         inputs = inputs.view(inputs.size(0), -1).float()
#         if torch.cuda.is_available():
#             inputs = inputs.cuda()
#         h, _ = sdae(inputs)

def get_data():
    train_data=np.empty([0,len_series,2561])
    for_index = [1,2,3,4,5,6,7]
    for_index.remove(vail_index)
    for i in for_index:
        with open('Bearing_data_40/Bearing1_{}.plk'.format(i),'rb') as tf:
            arr = np.array(pickle.load(tf))
            
            train_data = np.concatenate((train_data,arr),axis=0)
    
    test_data=np.empty([0,len_series,2561])
    for i in [vail_index]:
        with open('Bearing_data_40/Bearing1_{}.plk'.format(i),'rb') as tf:
            arr = np.array(pickle.load(tf))
            test_data = np.concatenate((test_data,arr),axis=0)

    # train_data1 = train_data
    # test_data1 = test_data
    train_data_X = train_data[:,:,0:-1]
    test_data_X = test_data[:,:,0:-1]
    train_data_Y = train_data[:,:,[-1]]
    test_data_Y = test_data[:,:,[-1]]


    train_seq = torch.from_numpy(train_data_X)
    train_label = torch.from_numpy(train_data_Y)

    test_seq = torch.from_numpy(test_data_X)
    test_label = torch.from_numpy(test_data_Y)

    train_sequence = torch.cat((train_seq, train_label), dim=2).type(torch.FloatTensor)
    test_data = torch.cat((test_seq, test_label), dim=2).type(torch.FloatTensor)

    index = [i for i in range(len(train_sequence))] # 打乱train_sequence
    np.random.shuffle(index) # 打乱索引
    train_sequence = train_sequence[index]

    return train_sequence.to(device), test_data.to(device)



def get_batch(source, i, batch_size):
    seq_len = min(batch_size, len(source) - 1 - i)
    data = source[i:i + seq_len]
    input = data[:,:,:-1]  # 1 is feature size
    target = data[:,:,-1]
    return input, target


#### positional encoding ####
class PositionalEncoding(nn.Module):

    def __init__(self, d_model, max_len=len_series):   # max_len的大小为序列的长度，即句子的字数
        super(PositionalEncoding, self).__init__()
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)
        # pe.requires_grad = False
        self.register_buffer('pe', pe)

    def forward(self, x):
        return x + self.pe[:x.size(0), :]   #一个序列看作一个句子，序列中的元素看作句子中的一个字，元素不用编码，字需要编码，但是他们的位置编码保持一致


#### model stracture ####
class TransAm(nn.Module):
    def __init__(self, feature_size=512, num_layers=4, dropout=0):
        super(TransAm, self).__init__()
        self.model_type = 'Transformer'
        self.src_mask = None
        self.sdae = sdae
        # self.encoder1 = nn.Linear(128, 256)
        # self.relu1 = nn.ReLU(inplace=True)
        # self.encoder2 = nn.Linear(128, feature_size)
        # self.relu2 = nn.ReLU(inplace=True)
        self.pos_encoder = PositionalEncoding(feature_size)   # 位置编码
        self.encoder_layer = TransformerEncoderLayer(d_model=feature_size, nhead=8, dropout=dropout)
        self.transformer_encoder = TransformerEncoder(self.encoder_layer, num_layers=num_layers)
        self.decoder = nn.Linear(feature_size, 1)
        self.init_weights()
        self.src_key_padding_mask = None

    def init_weights(self):
        initrange = 0.1
        self.decoder.bias.data.zero_()
        self.decoder.weight.data.uniform_(-initrange, initrange)

    def forward(self, src, src_padding=None):
        # if self.src_mask is None or self.src_mask.size(0) != len(src):
        #     device = src.device
        #     mask = self._generate_square_subsequent_mask(len(src)).to(device)
        #     self.src_mask = mask
        # if self.src_key_padding_mask is None:
        
        mask_key = src_padding
        self.src_key_padding_mask = mask_key

        inputs = src.view(-1, src.size(-1)).float()
        # self.sdae.eval()
        src_sdae,_ = self.sdae(inputs)
        src = src_sdae.view(src.size(0),src.size(1),-1)
        # src = self.encoder1(src)
        # src = self.relu1(src)
        # src = self.encoder2(src)
        # src = self.relu2(src)
        src = self.pos_encoder(src)
        output = self.transformer_encoder(src, self.src_mask, self.src_key_padding_mask)
        output = self.decoder(output)
        return output


def train(train_data):
    model.train()  # Turn on the train mode
    total_loss = 0.
    total_loss_MAE = 0.
    start_time = time.time()

    for batch, i in enumerate(range(0, len(train_data) - 1, batch_size)):
        data, targets = get_batch(train_data, i, batch_size)
        optimizer.zero_grad()
        output = model(data)
        output = output.reshape(-1,len_series)

        loss = criterion(output, targets).sqrt()
        loss_MAE = criterion_MAE(output, targets)
        loss.backward()

        torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)   # 梯度阈值
        optimizer.step()

        total_loss += loss.item()
        total_loss_MAE += loss_MAE.item()
        log_interval = int(len(train_data) / batch_size / 5)
        if batch % log_interval == 0 and batch > 0:
            cur_loss = total_loss / log_interval
            cur_MAE = total_loss_MAE / log_interval
            elapsed = time.time() - start_time
            print('| train epoch {:3d} | {:5d}/{:5d} batches | lr {:02.6f} | {:5.2f} ms | RMSE_loss {:5.5f} | MAE_loss {:5.5f}'.format(
                                        epoch, batch, len(train_data) // batch_size, scheduler.get_last_lr()[0],
                                        elapsed * 1000 / log_interval, cur_loss, cur_MAE
                                        )
                )
            total_loss = 0
            total_loss_MAE = 0
            start_time = time.time()
    torch.save(model,'transformer.pth')
    net = torch.load('transformer.pth')

def plot_and_loss(eval_model, data_source, epoch):
    eval_model.eval()
    total_loss = 0.
    total_loss_MAE = 0.
    eval_batch_size = batch_size
    test_result = torch.Tensor(0)
    truth = torch.Tensor(0)
    test_result1 = torch.Tensor(0)
    truth1 = torch.Tensor(0)
    with torch.no_grad():
        for i in range(0, len(data_source) - 1, eval_batch_size):
            data, target = get_batch(data_source, i, eval_batch_size)
            # look like the model returns static values for the output window
            output = eval_model(data)
            output = output.reshape(-1,len_series)
            total_loss += criterion(output, target).sqrt().item()
            total_loss_MAE += criterion_MAE(output, target).item()

            test_result = torch.cat((test_result, output.squeeze(1).view(-1).cpu()),
                                    0)  # todo: check this. -> looks good to me
            truth = torch.cat((truth, target.squeeze(1).view(-1).cpu()), 0)
            test_result1 = torch.cat((test_result1, output.squeeze(1).cpu()),
                                     0)  # todo: check this. -> looks good to me
            
            truth1 = torch.cat((truth1, target.squeeze(1).cpu()), 0)
    
    pyplot.plot(truth, color="blue", alpha=0.5,label='标签')
    pyplot.plot(test_result, color="red", alpha=0.5,label='实验结果')
    pyplot.plot(test_result - truth, color="green", alpha=0.8,label='实验结果与标签差值')
    np.savetxt('{}_{}/truth_epo{}.txt'.format(vail_index,batch_size,epoch), truth,fmt='%.5f')
    np.savetxt('{}_{}/test_result_epo{}.txt'.format(vail_index,batch_size,epoch), test_result,fmt='%.5f')
    np.savetxt('{}_{}/test_result_truth_epo{}.txt'.format(vail_index,batch_size,epoch), test_result - truth,fmt='%.5f')
    pyplot.grid(True, which='both')
    pyplot.axhline(y=0, color='k')
    # pyplot.ylim((-2, 2))
    pyplot.legend(fontsize=13)
    pyplot.xlabel('样本索引',fontsize=13)
    pyplot.ylabel('剩余寿命占比',fontsize=13)
    pyplot.tight_layout()
    pyplot.savefig('{}_{}/epo{}.png'.format(vail_index,batch_size,epoch))
    pyplot.savefig('{}_{}/epo{}.svg'.format(vail_index,batch_size,epoch))
    pyplot.close()


    return total_loss / i, test_result, truth, test_result1, truth1 , total_loss_MAE/i 



def evaluate(eval_model, data_source):
    eval_model.eval()  # Turn on the evaluation mode
    total_loss = 0.
    total_loss_MAE = 0.
    eval_batch_size = batch_size
    with torch.no_grad():
        for i in range(0, len(data_source) - 1, eval_batch_size):
            data, targets = get_batch(data_source, i, eval_batch_size)
            output = eval_model(data)
            output = output.reshape(-1,len_series)

            total_loss += output.shape[0]* criterion(output, targets).cpu().item()
            total_loss_MAE +=  output.shape[0] *criterion_MAE(output, targets).cpu().item()
    return (total_loss / len(data_source))**0.5 , total_loss_MAE/ len(data_source)

train_data, val_data = get_data()
model = TransAm().to(device)

criterion = nn.MSELoss()
criterion_MAE = nn.L1Loss()

lr = 0.00001
# optimizer = torch.optim.SGD(model.parameters(), lr=lr)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 3, gamma=0.96)
from torch.optim.lr_scheduler import StepLR
best_val_loss = float("inf")
epochs = 300  # The number of epochs
best_model = None

for epoch in range(1, epochs + 1):
    epoch_start_time = time.time()
    train(train_data)          # 训练
    train_loss, train_loss_MAE= evaluate(model, train_data)    # 验证

    if (epoch % 1 is 0):   # 5轮后绘制图
        val_loss, tran_output, tran_true, tran_output5, tran_true5, _ = plot_and_loss(model, val_data, epoch)
    # else:
        val_loss, val_loss_MAE = evaluate(model, val_data)

    print('-' * 89)
    print('| end of epoch {:3d} | time: {:5.2f}s | valid loss RMSE {:5.5f} | valid loss MAE {:5.5f}| train loss {:5.5f} '.format(
        epoch, (time.time() - epoch_start_time),
        val_loss, val_loss_MAE, train_loss)) 
    print('-' * 89)
    scheduler.step()




