import os
import torch
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
from utils import precision, recall, fmeasure, sigmoid, accuracy
from data_load import data_generator
# from tcn import TCN
from origin_tcn import TCN as O_TCN
# from resnet import resnet18, resnet34
# from focal_loss import FocalLoss
import numpy as np
import argparse
import time as t

parser = argparse.ArgumentParser(description='Sequence Modeling - (Permuted) Sequential MNIST')
parser.add_argument('--batch_size', type=int, default=8, metavar='N',
                    help='batch size (default: 64)')
parser.add_argument('--cuda', action='store_false', default=True,
                    help='use CUDA (default: False)')
parser.add_argument('--dropout', type=float, default=0.2,
                    help='dropout applied to layers (default: 0.05)')
parser.add_argument('--clip', type=float, default=-1,
                    help='gradient clip, -1 means no clip (default: -1)')
parser.add_argument('--epochs', type=int, default=1000,
                    help='upper epoch limit (default: 20)')
parser.add_argument('--ksize', type=int, default=7,
                    help='kernel size (default: 7)')
parser.add_argument('--levels', type=int, default=4,
                    help='# of levels (default: 8)')
parser.add_argument('--log-interval', type=int, default=1000, metavar='N',
                    help='report interval (default: 100')
parser.add_argument('--lr', type=float, default=1e-3,
                    help='initial learning rate (default: 2e-3)')
parser.add_argument('--optim', type=str, default='SGD',
                    help='optimizer to use (default: Adam)')
parser.add_argument('--nhid', type=int, default=25,
                    help='number of hidden units per layer (default: 25)')
parser.add_argument('--seed', type=int, default=1111,
                    help='random seed (default: 1111)')
parser.add_argument('--permute', action='store_true',
                    help='use permuted MNIST (default: false)')
parser.add_argument('-resume', default=False, help='resume training flag')
args = parser.parse_args()

torch.manual_seed(args.seed)
if torch.cuda.is_available():
    if not args.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")

batch_size = args.batch_size
n_classes = 1
input_channels = 13
seq_length = 600
epochs = args.epochs
steps = 0

print(args)
train_loader, test_loader = data_generator(split=0.8, bs=batch_size, seq_length=seq_length)
# weights = torch.tensor(weights.values, dtype=torch.float32)

# permute = torch.Tensor(np.random.permutation(784).astype(np.float64)).long()
channel_sizes = [args.nhid] * args.levels
kernel_size = args.ksize
epoch_start = 0
# model = TCN(input_channels, n_classes, channel_sizes, kernel_size=kernel_size, dropout=args.dropout)
model = O_TCN(input_channels, n_classes, channel_sizes, kernel_size=kernel_size, dropout=args.dropout, length=seq_length)
# model = resnet34()

lr = args.lr
# optimizer = getattr(optim, args.optim)(model.parameters(), lr=lr)
# optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr, weight_decay=5e-4)
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=lr, momentum=.9, weight_decay=5e-4)

if args.resume:
    checkpoint = torch.load('weights/checkpoint_epoch100.pt')
    model.load_state_dict(checkpoint['model'])
    epoch_start = checkpoint['epoch']
    # optimizer.load_state_dict(checkpoint['optimizer'])
    print('Checkpoint loaded')

if args.cuda:
    model.cuda()
    # permute = permute.cuda()


def train(ep):
    global steps
    train_loss = 0
    correct = 0
    model.train()
    # print('start')
    start_time = t.time()
    for batch_idx, (data, target) in enumerate(train_loader):
        if args.cuda:
            data, target = data.cuda(), target.cuda()

        data = data.view(-1, input_channels, seq_length)
        target = target.unsqueeze(1)
        optimizer.zero_grad()
        output = model(data)
        # print(ep, batch_idx)
        # assert (output <= 1.0).all()
        # assert (output >= 0.0).all()
        # focal_loss = FocalLoss()
        # loss = focal_loss(output, target)
        # loss = F.multilabel_soft_margin_loss(output, target, weight=weights.cuda() if args.cuda else weights)
        # loss = F.multilabel_soft_margin_loss(output, target)
        # criterion_bce = torch.nn.BCEWithLogitsLoss()
        criterion_bce = torch.nn.BCELoss()
        loss = criterion_bce(output, target)
        loss.backward()
        optimizer.step()
        train_loss += loss
        y_true = target.cpu().numpy()
        y_pred = output.cpu().detach().numpy()
        # y_pred = sigmoid(y_pred)
        acc, cor = accuracy(y_true, y_pred)
        correct += cor
        predict = precision(y_true, y_pred)
        rc = recall(y_true, y_pred)
        f1 = fmeasure(predict, rc)
        if batch_idx > 0 and batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.4f}\taccuracy:{:.2f}\tF1:{:.2f}'.format(
                ep, batch_idx, len(train_loader),
                100. * batch_idx / len(train_loader), train_loss.item()/args.log_interval, acc, f1))

    train_loss /= len(train_loader)
    all = len(train_loader) * batch_size
    print('Train Epoch: {}: Average loss: {:.4f}, Accuracy: {}\t{}\t({:.2f}%)'.format(
        ep, train_loss, correct, all, 100. * correct / all))
    with open('train.csv', 'a') as loss_file:
        loss_file.write('%d,%.4f,%.4f\n' % (ep, train_loss, correct / all))
    # checkpoint = {'epoch': epoch,
    #               'model': model.state_dict(),
    #               'optimizer': optimizer.state_dict()}
    # torch.save(checkpoint, 'weights/latest.pt')


def test():
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for batch_index, (data, target) in enumerate(test_loader):
            if args.cuda:
                data, target = data.cuda(), target.cuda()
            data = data.view(-1, input_channels, seq_length)
            target = target.unsqueeze(1)
            output = model(data)
            loss = torch.nn.BCELoss()(output, target)
            test_loss += loss.item()

            y_true = target.cpu().numpy()
            y_pred = output.cpu().detach().numpy()
            acc, cor = accuracy(y_true, y_pred)
            correct += cor
            predict = precision(y_true, y_pred)
            rc = recall(y_true, y_pred)
            f1 = fmeasure(predict, rc)
            show_interval = 1000
            if batch_index > 0 and batch_index % show_interval == 0:
                print('batch_index: [%d/%d]' % (batch_index, len(test_loader)),
                      'Test epoch: [%d]' % epoch,
                      'Loss: %.6f' % (test_loss / show_interval),
                      'Accuracy:%.6f' % acc,
                      'Predict:%.6f' % predict,
                      'Recall:%.6f' % rc,
                      'F-measure:%.6f' % f1)

        test_loss /= len(test_loader)
        all = len(test_loader) * batch_size
        print('Test  Epoch: {}: Average loss: {:.4f}, Accuracy: {}\t{:4d}\t({:.2f}%)'.format(
            epoch, test_loss, correct, all, 100. * correct / all))
        with open('test.csv', 'a') as loss_file:
            loss_file.write('{},{:.4f},{:.4f}\n'.format(epoch, test_loss, correct / all))

        a = correct / all

        return a, test_loss


if __name__ == "__main__":
    best_acc = 0
    if os.path.exists('train.csv'):
        os.remove('train.csv')
    if os.path.exists('test.csv'):
        os.remove('test.csv')
    for epoch in range(epoch_start+1, epochs+1):
        train(epoch)
        acc, loss = test()
        if acc > best_acc:
            best_acc = acc
            checkpoint = {'epoch': epoch,
                          'model': model.state_dict(),
                          'optimizer': optimizer.state_dict()}
            torch.save(checkpoint, 'weights/best%.4f.pt' % acc)
            print('saved best model, loss is %.4f, accuracy is %.4f' % (loss, acc))

        if epoch % 100 == 0:
            checkpoint = {'epoch': epoch,
                          'model': model.state_dict(),
                          'optimizer': optimizer.state_dict()}
            torch.save(checkpoint, 'weights/checkpoint_epoch%d.pt' % epoch)
        # if epoch % 50 == 0:
        #     lr /= 10
        #     for param_group in optimizer.param_groups:
        #         param_group['lr'] = lr
        #         print('lr changed to %.5f' % param_group['lr'])
