# -*- coding: utf-8 -*-
"""
Created on Mon Apr 25 16:56:34 2022

@author: Fusong
"""
import torch
import torch.nn as nn
from torch.utils.data import DataLoader,TensorDataset
import torch.optim as optim
import numpy as np
from sklearn.metrics import accuracy_score,recall_score, f1_score,precision_score
import warnings
warnings.filterwarnings("ignore")
from tools import ForeverDataIterator

class ConvNet(nn.Module):
    def __init__(self, outputsize):
        super(ConvNet, self).__init__()
        self.conv1 = nn.Sequential(nn.Conv1d(in_channels=1, out_channels=8, kernel_size=3, stride=1, padding=1),
                                   nn.ReLU(),
                                   # nn.LayerNorm(1024))
                                     nn.BatchNorm1d(8))
        self.conv2 = nn.Sequential(nn.Conv1d(in_channels=8, out_channels=16, kernel_size=3, stride=1, padding=1),
                                   nn.ReLU(),
                                   # nn.LayerNorm(1024))
                                    nn.BatchNorm1d(16))
        self.pool = nn.AdaptiveAvgPool1d(1)

        self.classifier = nn.Sequential(nn.Linear(in_features=16, out_features=outputsize))

    def forward(self,x):
        out = self.conv1(x)
        out = self.conv2(out)
        out = self.pool(out)
        out1 = out.view(-1, out.shape[1])
        out = self.classifier(out1)
        return out, out1
#%%
class trainModel(object):
    def __init__(self, args, train_samples, labels_train, test_samples, labels_test):
        self.train_samples = train_samples
        self.test_samples = test_samples
        self.labels_train = labels_train
        self.labels_test = labels_test
        self.args = args
        self.learning_rate = args['learning_rate']
        self.number_epoch = args['number_epoch']
        self.batch_size = args['batch_size']
        self.weight_decay = args['weight_decay']
        self.lr_decay = args['lr_decay']
        self.outputsize = args['outputsize']

    def train(self, device):
        x_train = torch.from_numpy(self.train_samples)
        y_train = torch.from_numpy(self.labels_train)
        # print(x_train.shape[0])
        # print(y_train.shape[0])
        train_source_dataset = TensorDataset(x_train,y_train)
        train_source_loader = DataLoader(train_source_dataset, batch_size = self.batch_size,
                                     shuffle=True,  drop_last=True)
        train_source_iter = ForeverDataIterator(train_source_loader)

        x_test = torch.from_numpy(self.test_samples)#测试集读取器
        y_test = torch.from_numpy(self.labels_test)
        target_dataset_test = TensorDataset(x_test,y_test)
        test_target_loader = DataLoader(target_dataset_test,batch_size = self.batch_size,
                                         shuffle=False)
        mynet = ConvNet(self.outputsize).to(device)

        train_loss = []
        train_acc = []
        valid_loss = []
        valid_acc = []
        recall = []
        precision = []
        f1 = []
        result_pre = []

        feature_pre_fianl = []
        
        min_valid_loss = -1
        step_run = 0
        count = 0

        for p in mynet.parameters():
            p.requires_grad = True
        class_loss = nn.CrossEntropyLoss()
        for epoch in range(self.number_epoch):
            mynet.train()
            lr = self.learning_rate*(self.lr_decay**epoch)
            optimizer = optim.Adam(mynet.parameters(), lr=lr, weight_decay = self.weight_decay)
            len_dataloader = len(train_source_loader)

            # train_source_iter = iter(train_source_loader)
            i = 0
            total_train_loss = []
            total_train_acc = []
            while i < 50:
                # train_source = train_source_iter.next()
                train_source = next(train_source_iter)
                x_s, labels_s = train_source
                x_s = x_s.float().to(device)
                labels_s = labels_s.float().to(device)

                y_pre,_ = mynet(x_s)
                loss = class_loss(y_pre, labels_s.long())
                total_train_loss.append(loss.item())

                # print(y_pre.shape)
                # print(labels_s.shape)
                label_pre = np.argmax(y_pre.detach().cpu().numpy(), axis=1)
                # print(label_pre)
                acc = accuracy_score(label_pre, labels_s.detach().cpu().numpy())
                total_train_acc.append(acc)

                # compute gradient and do SGD step
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                i = i + 1
            train_loss.append(np.mean(total_train_loss))  #存入平均损失
            train_acc.append(np.mean(total_train_acc))


            total_valid_loss = []
            total_valid_acc = []
            feature_pre = []
            mynet.eval()
            for step, (b_x, b_y) in enumerate(test_target_loader):
                b_x = b_x.float().to(device)
                b_y = b_y.float().to(device)

                with torch.no_grad():
                    prediction,feature = mynet(b_x) #
                    # print(prediction.shape)
                    # print(b_y.shape)


                    loss_pre = class_loss(prediction, b_y.long())         # calculate loss
                    total_valid_loss.append(loss_pre.item())
                    label_pre = np.argmax(prediction.detach().cpu().numpy(), axis=1)
                    feature_p = feature.detach().cpu().numpy()
                    # print(label_pre)
                    feature_pre.append(feature_p)
                    total_valid_acc.append(label_pre)
                    # acc = accuracy_score(np.argmax(prediction.detach().cpu().numpy(), axis=1), b_y.detach().cpu().numpy())


                # total_valid_acc.append(acc)
            total_valid_acc = np.concatenate(total_valid_acc)
            feature_pre = np.concatenate(feature_pre)


            result_pre.append(total_valid_acc)
            # print(total_valid_acc.shape)
            valid_loss.append(np.mean(total_valid_loss))
            feature_pre_fianl.append(feature_pre)

            valid_acc.append(accuracy_score(self.labels_test, total_valid_acc ))

            precision.append(precision_score(self.labels_test, total_valid_acc,average='macro'))
            recall.append(recall_score(self.labels_test, total_valid_acc,average='macro'))
            f1.append(f1_score(self.labels_test, total_valid_acc,average='macro'))


            if (train_acc[-1] == 1):
                  count += 1
                  torch.save(mynet,'./mynetds.pth')
                  min_valid_loss = train_acc[-1]
            else:
                count =0

            # step_run.append(j)
            print( 'iter: [{:d}/{:d}], train_loss: {:0.6f}, train_Acc: {:0.6f}, valid_loss: {:0.6f}, valid_Acc: {:0.6f},'
                       'best_valid_loss: {:0.6f}'.format(              epoch, self.number_epoch,
                                                                      train_loss[-1],
                                                                      train_acc[-1],
                                                                      valid_loss[-1],
                                                                      valid_acc[-1],
                                                                      min_valid_loss))
            # print(count)
            # if train_acc[-1] == 1:
            #     break
            if count == 10:
                break


            # if len(valid_acc)>10:

            #        #如果连续20个epoch
            #     if len(valid_acc)>100 or (valid_acc[-1] - valid_acc[-10] <= 0.01):
            #          print("Early stopping")
            #       # 结束模型训练
                     # break
        return  train_loss, valid_loss, train_acc, valid_acc, precision, recall, f1, result_pre, step_run,feature_pre_fianl
#%%
# mynet = ConvNet()
# train = torch.randn(100, 1, 1024).detach().cpu().numpy()
# label = np.zeros((100))
# label[50:] = 1

# # out = mynet(input)
# # #%%
# # input1 = torch.randn(32, 1024, 1)
# # input3 = input1.detach().cpu().numpy()
# # input2 = input1.view(-1, 1024)
# # input4 = input2.detach().cpu().numpy()
# args = {'learning_rate': 0.1,
#         'number_epoch': 100,
#         'batch_size': 6,
#         'weight_decay':  0.0001,
#         'lr_decay': 0.96
#        }
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# model = trainModel(args, train, label, train, label)
# model.train(device)
#%%
# from sklearn.metrics import accuracy_score, recall_score, f1_score,precision_score
# from sklearn.metrics import classification_report
# y_true =  [0,0,0,1,1,1,1,2,2,2]
# y_pred =  [0,1,0,1,0,1,1,1,2,1]
# acc = accuracy_score(y_true, y_pred)
# pre = precision_score(y_true, y_pred,average='macro')
# recall = recall_score(y_true, y_pred,average='macro')
# f1_score = f1_score(y_true, y_pred, average='macro')

# print(classification_report(y_true, y_pred))