import os
import torch
import time
import pandas as pd
from torch import nn
from config import DefaultConfig
from models.model import scatterNet, para_init
from dataprocess.dataProcess import ScatterData
from torch.optim import lr_scheduler, Adam
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter


def forward_design(opt):
    # Device configuration
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # step1: model
    print("\t* Building model...")
    model = scatterNet(opt.input_size, opt.hidden_size1, opt.output_size)
    if os.path.exists(opt.model_path):
        model.load_state_dict(torch.load(opt.model_path)['model'])
    else:
        model.apply(para_init)
    model.to(device)

    # step2: load data
    print("\t* Loading training data...")
    trainDataSet = ScatterData(opt.traindata_path, opt.trainlabel_path)
    train_dataloader = DataLoader(trainDataSet, batch_size=opt.batch_size,
                                  shuffle=True, num_workers=opt.num_workers)
    print("\t* Loading validation data...")
    evalDataSet = ScatterData(opt.evaldata_path, opt.evallabel_path)
    eval_dataloader = DataLoader(evalDataSet, batch_size=opt.batch_size,
                                 shuffle=True, num_workers=opt.num_workers)

    # step3: optimizer
    loss_fn = torch.nn.MSELoss(reduce=True, size_average=True)
    # optimizer = torch.optim.RMSprop(model.parameters(), lr=opt.lr,
    #                                 weight_decay=opt.weight_decay)
    optimizer = Adam(model.parameters(), lr=opt.lr)
    scheduler = lr_scheduler.StepLR(optimizer, gamma=opt.lr_decay, step_size=opt.decay_step, last_epoch=-1)

    # step4: training
    print("\t* Training...")
    previous_loss = float('inf')
    start = time.time()

    writer = SummaryWriter()

    for i in range(opt.epoch):
        train_loss = 0.0
        num_train = 0
        lr = opt.lr
        for _, one_batch in enumerate(train_dataloader):
            # Put the training set data and labels on the GPU
            data = torch.as_tensor(one_batch['x'], dtype=torch.float32).to(device)
            label = torch.as_tensor(one_batch['y'], dtype=torch.float32).to(device)
            # Calculate the output of network
            output = model(data).to(device)
            # Calculate the loss between output and real values
            # loss = torch.mean((output-label)/torch.abs(label))
            loss = loss_fn(output, label)
            # Clear the past gradient,get the back propagation,update network parameters
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step()
            # Record training loss
            train_loss += loss
            num_train += 1
        epoch_loss = train_loss/num_train
        writer.add_scalar('test/train_loss', epoch_loss, i)
        print('Epoch:{}, train_loss:{}'.format(i, epoch_loss))

        if epoch_loss > previous_loss:
            pass
            # lr = opt.lr * opt.lr_decay  # In paper, no mentioned
            # for param_group in optimizer.param_groups:
            #     param_group['lr'] = lr
        else:
            torch.save({'model': model.state_dict()}, opt.model_path)
        writer.add_scalar('test/learning rate', scheduler.get_lr()[0], i)
        previous_loss = epoch_loss

    writer.close()
    end = time.time()
    print('Time_usage: {:.5f} s'.format(end-start))

    # evaluating
    print("\t* Validating...")
    model.eval()    # Set the model to validation mode
    eval_loss = 0.0
    num_val = 0
    for _, one_batch in enumerate(eval_dataloader):
        # Put the validating set data and labels on the GPU
        x = torch.as_tensor(one_batch['x'], dtype=torch.float32).to(device)
        y = torch.as_tensor(one_batch['y'], dtype=torch.float32).to(device)
        # Calculate the output of network
        output_val = model(x).to(device)
        # Calculate the loss between output and real values
        # loss_val = torch.mean((output_val-y)/torch.abs(y))
        loss_val = loss_fn(output_val, y)
        # Record validating loss
        eval_loss += loss_val
        num_val += 1
    print('Validation loss: {}'.format(eval_loss/(num_val+1)))
    model.train()   # The model reverts to training mode

    # testing
    print("\t* Testing...")
    model = scatterNet(opt.input_size, opt.hidden_size1, opt.output_size)
    if os.path.exists(opt.model_path):
        model.load_state_dict(torch.load(opt.model_path)['model'])
    model.to(device)
    print("\t* Loading testing data...")
    testDataSet = ScatterData(opt.testdata_path, opt.testlabel_path)
    test_dataloader = DataLoader(testDataSet, batch_size=opt.batch_size,
                                 shuffle=True, num_workers=opt.num_workers)
    test_loss = 0.0
    num_test = 0
    for _, one_batch in enumerate(test_dataloader):
        # Put the testing set data and labels on the GPU
        input = torch.as_tensor(one_batch['x'], dtype=torch.float32).to(device)
        result = torch.as_tensor(one_batch['y'], dtype=torch.float32).to(device)
        # Calculate the output of network
        output_test = model(input).to(device)
        # Calculate the loss between output and real values
        # loss_test = torch.mean((output_test - result / torch.abs(result)))
        loss_test = loss_fn(output_test, result)
        # Record testing loss
        test_loss += loss_test
        num_test += 1
    print('Test_loss: {}'.format(test_loss / (num_test + 1)))


def inverse_design(opt):
    # Device configuration
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # step1: model
    print("\t* Building model...")
    model = scatterNet(opt.input_size, opt.hidden_size2, opt.output_size)
    if os.path.exists(opt.model_path):
        model.load_state_dict(torch.load(opt.model_path)['model'])
        model.to(device)
    else:
        print('Error: 未找到训练好的模型权值')
        exit()

    # Ensure the loaded parameters won't be changed
    for p in model.parameters():
        p.requires_grad = False

    # step2: load data
    print("\t* Loading excepted spectrum...")
    designSpectrum = pd.read_csv(opt.expected_spectrum_path, header=None).values
    # Put the designed spectrum data and labels on the GPU
    designSpectrum = torch.as_tensor(designSpectrum, dtype=torch.float32).to(device)
    # Generate the random input
    designData = (torch.rand(size=(1, opt.input_size))*40.0+30.0).to(device)
    print('designData', designData)
    # Set input changeable
    designData.requires_grad = True

    # step3: optimizer
    loss_fn = torch.nn.MSELoss(reduce=True, size_average=True)
    optimizer = Adam([designData], lr=opt.lr,)

    # step4: designing
    print("\t* Designing...")
    loss = float('inf')
    start = time.time()
    num_design = 0
    writer = SummaryWriter()
    while loss > opt.loss_expect:
        # Clear the gradient
        optimizer.zero_grad()
        # Calculate the output of network
        output = model(designData).to(device)
        # Calculate the loss between output and real values
        loss = loss_fn(output, designSpectrum)
        writer.add_scalar('design loss', loss, num_design+1)
        # Get the back propagation,update network parameters
        loss.backward()
        optimizer.step()
        # Record the number of design
        num_design += 1
        if num_design % 200 == 0:
            print('Num_design:{}, loss:{}'.format(num_design, loss))
    writer.close()
    end = time.time()
    print('Time_usage: {:.5f} s'.format(end - start))
    print('Designed structure is {}'.format(designData.detach().cpu().numpy()))


if __name__ == '__main__':
    args = DefaultConfig()
    # forward_design(args)
    inverse_design(args)
