import torch
from torch import nn
from tqdm import tqdm
import matplotlib.pyplot as plt

from examples.dataset import *
from spdnet.spd import SPDTransform, SPDTangentSpace, SPDRectified
from spdnet.optimizer import StiefelMetaOptimizer
from spdnet.spd_res2net import SPDRes2Block

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.trans1 = SPDTransform(400, 200)
        self.trans2 = SPDTransform(200, 100)
        self.trans3 = SPDTransform(100, 50)
        self.rect1  = SPDRectified()
        self.rect2  = SPDRectified()
        self.rect3  = SPDRectified()
        self.res2block = SPDRes2Block(50,50,4)
        self.tangent = SPDTangentSpace(50)
        self.linear = nn.Linear(1275, 7, bias=True)
        # self.dropout = nn.Dropout(p=0.5)

    def forward(self, x):
        x = self.trans1(x)
        x = self.rect1(x)
        x = self.trans2(x)
        x = self.rect2(x)
        x = self.trans3(x)
        x = self.rect3(x)
        x = self.res2block(x)
        x = self.tangent(x)
        # x = self.dropout(x)
        x = self.linear(x)
        return x

# transformed_dataset = AfewDataset(train=True)
# dataloader = DataLoader(transformed_dataset, batch_size=30,
#                     shuffle=True, num_workers=4)
#
# transformed_dataset_val = AfewDataset(train=False)
# dataloader_val = DataLoader(transformed_dataset_val, batch_size=30,
#                     shuffle=False, num_workers=4)
#
# use_cuda = True
# model = Net()
# if use_cuda:
#     model = model.cuda()
# criterion = nn.CrossEntropyLoss()
# optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
# # optimizer = torch.optim.Adadelta(model.parameters())
# # optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
# optimizer = StiefelMetaOptimizer(optimizer)

# Training
def train(epoch):
    print('\nEpoch: %d' % epoch)
    model.train()
    train_loss = 0
    correct = 0.0
    total = 0.0
    bar = tqdm(enumerate(dataloader))
    for batch_idx, sample_batched in bar:
        inputs = sample_batched['data']
        targets = sample_batched['label'].squeeze()

        if use_cuda:
            inputs = inputs.cuda()
            targets = targets.cuda()

        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        # for name, parameter in model.named_parameters():
        #     if name.startswith("res2block.pre_transs.0"):
        #         print(f'Gradient of {name} : ', parameter)
        #     if name.startswith("res2block.transs.0"):
        #         print(f'Gradient of {name} : ', parameter)
        #     if name.startswith("res2block.rect"):
        #         print(f'Gradient of {name} : ', parameter)
        #     if name.startswith("res2block.tangent"):
        #         print(f'Gradient of {name} : ', parameter)
        #     if name.startswith("res2block.powers.0"):
        #         print(f'Gradient of {name} : ', parameter)
            # break
        optimizer.step()

        train_loss += loss.data.item()
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum().data.item()

        bar.set_description('Loss: %.3f | Acc: %.3f%% (%d/%d)'
            % (train_loss/(batch_idx+1.0), 100.*correct/total, correct, total))

    epoch_train_loss.append(train_loss / (batch_idx + 1.0))
    epoch_train_acc.append(correct/total)

    if not os.path.isdir('loss'):
        os.mkdir('loss')
    plt.plot(list(range(len(epoch_train_loss))), epoch_train_loss, label='train_loss', color='blue')
    plt.plot(list(range(len(epoch_train_acc))), epoch_train_acc, label='train_acc', color='red')
    plt.title("res2net_v2_lr0.1_train")
    plt.legend()
    plt.savefig('./loss/res2net_v2_lr0.1_train_loss_acc.jpg')
    plt.close()

    return (train_loss/(batch_idx+1), 100.*correct/total)

# best_acc = 0
def test(epoch):
    global best_acc
    model.eval()
    test_loss = 0
    correct = 0.0
    total = 0.0
    bar = tqdm(enumerate(dataloader_val))
    for batch_idx, sample_batched in bar:
        inputs = sample_batched['data']
        targets = sample_batched['label'].squeeze()

        if use_cuda:
            inputs = inputs.cuda()
            targets = targets.cuda()

        outputs = model(inputs)
        loss = criterion(outputs, targets)

        test_loss += loss.data.item()
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum().data.item()

        bar.set_description('Loss: %.3f | Acc: %.3f%% (%d/%d)'
            % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))

    epoch_val_loss.append(test_loss/(batch_idx+1))
    epoch_val_acc.append(correct / total)

    if not os.path.isdir('loss'):
        os.mkdir('loss')
    plt.plot(list(range(len(epoch_val_loss))), epoch_val_loss, label='val_loss', color='blue')
    plt.plot(list(range(len(epoch_val_acc))), epoch_val_acc, label='val_acc', color='red')
    plt.title("res2net_v2_lr0.1_val")
    plt.legend()
    plt.savefig('./loss/res2net_v2_lr0.1_val_loss_acc.jpg')
    plt.close()

    # Save checkpoint.
    acc = 100.*correct/total
    if acc > best_acc:
        print('Saving..')
        state = {
            'net': model,
            'acc': acc,
            'epoch': epoch,
        }
        if not os.path.isdir('checkpoint'):
            os.mkdir('checkpoint')
        torch.save(state, './checkpoint/res2net_v2_lr0.1_ckpt.t7')
        best_acc = acc

    return (test_loss/(batch_idx+1), 100.*correct/total)

# log_file = open('log.txt', 'a')


if __name__ == '__main__':
    transformed_dataset = AfewDataset(train=True)
    dataloader = DataLoader(transformed_dataset, batch_size=30,
                            shuffle=True, num_workers=4)

    transformed_dataset_val = AfewDataset(train=False)
    dataloader_val = DataLoader(transformed_dataset_val, batch_size=30,
                                shuffle=False, num_workers=4)

    start_epoch = 1
    best_acc = 0
    epoch_train_loss = []
    epoch_train_acc = []
    epoch_val_loss = []
    epoch_val_acc = []

    use_cuda = True
    stop = False
    model = Net()
    if stop:
        print('===> Try resume from checkpoint')
        if os.path.isdir('./checkpoint'):
            try:
                checkpoint = torch.load('./checkpoint/res2net_v2_ckpt.t7')
                # model = Net()
                model = checkpoint['net']  # 从字典中依次读取
                start_epoch = checkpoint['epoch'] + 1
                best_acc = checkpoint['acc']
                print('===> Load last checkpoint data')
                print('last epoch:{}'.format(checkpoint['epoch']))
            except FileNotFoundError:
                print('Can\'t found autoencoder.t7')
        else:
            start_epoch = 0
            print('===> Start from scratch')

    if use_cuda:
        model = model.cuda()
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
    # optimizer = torch.optim.Adadelta(model.parameters())
    # optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
    optimizer = StiefelMetaOptimizer(optimizer)

    log_file = open('res2net_v2_lr0.1_log.txt', 'a')

    for epoch in range(start_epoch, 200+1):
        train_loss, train_acc = train(epoch)
        test_loss, test_acc = test(epoch)

        log_file.write('%d,%f,%f,%f,%f\n' % (epoch, train_loss, train_acc, test_loss, test_acc))
        log_file.flush()

    log_file.close()

    # model = Net()
    # for name, parameter in model.named_parameters():
    #     print(name)