import sys
import random
import os
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
from data_loader import SigLoader, get_data
from torchvision import datasets
from torchvision import transforms
import numpy as np
import augmentations
import time

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

class_num = 28
data_root = "data//"
data_name = 'SingleDay'

save_path = 'results'
model_root = 'weights'
cudnn.benchmark = True
lr = 1e-4
batch_size = 100
test_batch_size = 250
n_epoch = 50  #训练层数
save_epoch = n_epoch
log_step = 50
repeatn = 1
drop_out = 0.0
train_frac = 0.1
test_frac = 0.5


# sp_len = 8192
# sample_len = 8192

class FCN(nn.Module):

    def __init__(self):
        super(FCN, self).__init__()
        self.net = nn.Sequential(
            nn.Linear(512, 1024),
            nn.ReLU(),
            nn.BatchNorm1d(1024),
            nn.Linear(1024, 1024),
            nn.ReLU(),
            nn.BatchNorm1d(1024),
            nn.Linear(1024, 512)
        )
        self.fc = nn.Linear(512, 28)
        self.softmax = nn.LogSoftmax(dim=1)

    def forward(self, x):
        x = torch.cat([x[:, 0, :], x[:, 1, :]], dim=-1)
        x = self.net(x)
        y = x
        x = self.fc(x)
        x = self.softmax(x)
        return x, y


def test(dataloader):
    test_iter = iter(dataloader)
    i = 0
    n_test_correct = 0
    loss_test_sum = 0.
    conf = np.zeros([class_num, class_num])

    while i < len(dataloader):
        my_net.eval()

        img, label = test_iter.__next__()
        img, label = img.float().to(device), label.to(device)

        with torch.no_grad():
            class_output, _ = my_net(img)
            err_label = loss_class(class_output, label)

            pred = class_output.data.max(1, keepdim=True)[1]
            n_test_correct += pred.eq(label.data.view_as(pred)).cpu().sum()
            loss_test_sum += float(err_label)
            conf[np.squeeze(pred.data.cpu().numpy()), np.squeeze(label.data.cpu().numpy())] += 1.0

        i += 1

    return float(n_test_correct) / float(len(dataloader.dataset)), loss_test_sum / float(len(dataloader)), conf


if __name__ == '__main__':

    for rpi in range(repeatn):

        manual_seed = random.randint(1, 10000)
        random.seed(manual_seed)
        torch.manual_seed(manual_seed)

        # itransform = jitter
        train_transform = transforms.Compose([
            augmentations.Normalize(),
        ])

        test_transform = transforms.Compose([
            augmentations.Normalize(),
        ])

        sig_train, id_train, sig_test, id_test = get_data(data_root, data_name, train_frac, test_frac)

        dataset_train = SigLoader(sig_train, id_train)
        dataset_test = SigLoader(sig_test, id_test)

        dataloader_train = torch.utils.data.DataLoader(
            dataset=dataset_train,
            batch_size=batch_size,
            shuffle=True,
            drop_last=True,
            num_workers=0,
            # pin_memory = True
        )

        dataloader_test = torch.utils.data.DataLoader(
            dataset=dataset_test,
            batch_size=test_batch_size,
            shuffle=True,
            drop_last=False,
            num_workers=0,
            # pin_memory = True
        )

        my_net = FCN().to(device)
        print(my_net)
        trainable_num = sum(p.numel() for p in my_net.parameters() if p.requires_grad)
        print('number of trainable parameters:', trainable_num)

        optimizer = optim.Adam(my_net.parameters(), lr=lr)
        # scheduler = optim.lr_scheduler.StepLR(optimizer, 50, 0.5)

        loss_class = torch.nn.NLLLoss().to(device)

        acc_train = np.zeros([n_epoch, ])
        loss_train = np.zeros([n_epoch, ])
        acc_test = np.zeros([n_epoch, ])
        loss_test = np.zeros([n_epoch, ])

        if not os.path.exists(str(rpi)):
            os.mkdir(str(rpi))
        if not os.path.exists(str(rpi) + '/' + save_path):
            os.mkdir(str(rpi) + '/' + save_path)
        if not os.path.exists(str(rpi) + '/' + model_root):
            os.mkdir(str(rpi) + '/' + model_root)

        for epoch in range(n_epoch):

            train_iter = iter(dataloader_train)

            n_train_correct = 0
            loss_train_sum = 0.

            i = 0
            while i < len(dataloader_train):

                my_net.train()

                img, label = train_iter.__next__()
                img, label = img.float().to(device), label.to(device)

                batch_size = len(label)

                class_output, _ = my_net(img)
                err_label = loss_class(class_output, label)

                optimizer.zero_grad()
                err_label.backward()
                optimizer.step()

                pred = class_output.data.max(1, keepdim=True)[1]
                n_train_correct += pred.eq(label.data.view_as(pred)).cpu().sum()
                loss_train_sum += float(err_label)

                i += 1
                if (i % log_step == 0):
                    print('Epoch [{}/{}] Step [{}/{}]: loss class={:.5f}'.format(
                        epoch, n_epoch, i, len(dataloader_train), err_label))

            # scheduler.step()
            acc_train[epoch] = float(n_train_correct) / float(len(dataset_train))
            loss_train[epoch] = loss_train_sum / float(len(dataloader_train))
            start_time = time.time()
            acc_test[epoch], loss_test[epoch], conf_test = test(dataloader_test)
            end_time = time.time()
            inference_time = end_time - start_time
            print(inference_time)
            np.savetxt(str(rpi) + '/' + save_path + '/acc_train.txt', acc_train)
            np.savetxt(str(rpi) + '/' + save_path + '/loss_train.txt', loss_train)
            np.savetxt(str(rpi) + '/' + save_path + '/acc_test.txt', acc_test)
            np.savetxt(str(rpi) + '/' + save_path + '/loss_test.txt', loss_test)

            if ((epoch + 1) % save_epoch == 0):
                torch.save(my_net, '{0}/model_epoch_{1}.pth'.format(str(rpi) + '/' + model_root, epoch))

            print('epoch: {} train acc: {:.4f} train loss: {:.4f} test acc: {:.4f} test loss: {:.4f}'.format(epoch,
                                                                                                             acc_train[
                                                                                                                 epoch],
                                                                                                             loss_train[
                                                                                                                 epoch],
                                                                                                             acc_test[
                                                                                                                 epoch],
                                                                                                             loss_test[
                                                                                                                 epoch]))

        print('done')
