from operator import ne
from numpy.lib.shape_base import dstack
import torch
from torch.utils.data import DataLoader
from datasets.normalDist import NormalDistDataSet
from models.MLP import MLP
import torch.nn as nn
import torch.optim as optim
import time
import numpy as np
import os

def train_singleEpoch(net, train_iter, loss_function, optimizer, epoch):
    start_time = time.time()

    net.train()  # 网络会启用dropout 和 batch Normalization

    train_loss = 0.0

    for batch_idx, (inputs, labels) in enumerate(train_iter):
        inputs = inputs.cuda()
        labels = labels.cuda()

        optimizer.zero_grad()
        outputs = net(inputs)
        loss = loss_function(outputs, labels)
        train_loss += loss

        loss.backward()
        optimizer.step()

    finish_time = time.time()
    print('train loss:{:.4f}, lr:{:.4f}, time consumed:{:.2f}'.format(
        train_loss / len(train_iter.dataset),
        optimizer.param_groups[0]['lr'],
        finish_time - start_time
    ))

    return {'Train loss': (train_loss / len(train_iter.dataset)).item(),
            'lr': optimizer.param_groups[0]['lr'],
            'time consumed': finish_time - start_time
            }

@torch.no_grad()
def eval_validation(net, val_iter, loss_function):
    start = time.time()
    net.eval()  # 网络不会启用 dropout 和 batch Normalization

    val_loss = 0.0

    for (inputs, labels) in val_iter:
        inputs = inputs.cuda()
        labels = labels.cuda()

        outputs = net(inputs)
        loss = loss_function(outputs, labels)

        val_loss += loss.item()

    net.train()
    finish = time.time()
    print('eval on validation: avg_loss:{:.4f}, time consumed:{:.2f}'.format(
        val_loss / len(val_iter.dataset),
        finish - start
    ))
    # 直接len(val_iter)是batch数目 数据集大小是len(val_iter.dataset)

    return {'avg_loss': val_loss / len(val_iter.dataset),
            'time consumed': finish - start
            }

def train(epoch_num=50, lr=0.005, batchSize=32):
    
    net = MLP()
    net.double() # ?模型的参数是double类型的?
    net.cuda()

    lr = 0.01

    # loss_func = nn.L1Loss()
    # loss_func = nn.SmoothL1Loss()
    loss_func = nn.MSELoss()
    train_data = NormalDistDataSet(set='training', dataset_path='./Data')
    val_data = NormalDistDataSet(set='validation', dataset_path='./Data')
    train_dataloader = DataLoader(dataset=train_data, batch_size=batchSize, shuffle=True)
    val_dataloader = DataLoader(dataset=val_data, batch_size=batchSize, shuffle=True)

    optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)

    for i in range(1, epoch_num + 1):
        print('\n-----------------------epoch:{}-----------------------'.format(i))
        train_return = train_singleEpoch(net=net, train_iter=train_dataloader,loss_function=loss_func, optimizer=optimizer, epoch=i)
        val_return = eval_validation(net=net, val_iter=val_dataloader, loss_function=loss_func)
    
    # 保存一下模型
    print('*********** model.state_dict *************')
    print(net.state_dict)
    save_model_path = r'./checkpoint'
    if not os.path.exists(save_model_path):
        os.makedirs(save_model_path)
    save_model_path = os.path.join(save_model_path, 'model_' + time.strftime("%Y-%m-%d", time.localtime()) + '.pth')
    torch.save({'epoch':epoch_num, 'state_dict':net.state_dict, 'optimizer': optimizer.state_dict}, save_model_path)
    """
    pytorch 中的 state_dict 是一个简单的python的字典对象,将每一层与它的对应参数建立映射关系.(如model的每一层的weights及偏置等等)
    (注意,只有那些参数可以训练的layer才会被保存到模型的state_dict中,如卷积层,线性层等等)
    优化器对象Optimizer也有一个state_dict,它包含了优化器的状态以及被使用的超参数(如lr, momentum,weight_decay等)
    """
    
    return net

def inference(net):
    n = 1e3

    # 原来的x y 的数据均匀分布在 [-2, 4] in x / [-1, 5] in y
    # 拓展范围 增加模型没见过数据 用来inference
    x = -4. + 10. * np.random.rand(int(n))
    #print(X[:5])
    y = -3. + 10. * np.random.rand(int(n))

    xy = np.dstack((x, y))
    xy = np.squeeze(xy)

    result = []
    for xy_ in xy:
        xy_ = torch.tensor(xy_).cuda()  # 从numpy转化为tensor 然后将tensor放到GPU上
        z = net(xy_)
        z = z.cpu() # 模型产生的结果是在GPU上，将结果转移到CPU
        z = z.detach().numpy() # 然后从tensor转化为numpy
        result.append(z)
    
    result = np.array(result)
    result = np.squeeze(result)
    xyz = np.dstack((x, y, result))
    xyz = np.squeeze(xyz)
    np.save('./Data/Result_MSELoss', xyz)


if __name__ == '__main__':
    net = train()
    inference(net)



"""
Loss的数值和本身的计算方式也关系密切
有时候换一个Loss，看起来是Loss下降了，但其实是Loss的计算方式变了导致的下降
比如L2Loss计算平方会比L1Loss小，如果换了Loss，那就不能看Loss来比较网络训练好坏了，还是得看最后的效果（客观的评价标准）
"""