import torch
import torchvision.transforms as transforms
import torch.nn as nn
# from torch.autograd import Variable
import torch.optim as optim 
import numpy as np 

from net_structure import netStructure
from dataloader_ import GraspDataset
from normal_resize import GeneralizedRCNNTransform



# mytransform = transforms.Compose([transforms.Resize((227, 227)), transforms.CenterCrop(227), transforms.ToTensor()])

mytransform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])

mytransform1 = transforms.Compose([transforms.ToTensor()])
# def loadtestdata():
#     testset = GraspDataset('test数据集路径（包含Images和Masks的文件夹）', transform = mytransform)

#     testloader = torch.utils.data.DataLoader(testset, batch_size=1,     
#                                              shuffle=True, num_workers=0)   # batch_size表示一次预测几张图片
#     return testloader

#  根据类型修改,但是用在哪？计算损失or分类器个数
num_class = 2    # background and grasp


# -----------training-------------- #
def trainandsave():    #如果要resize图片则在形参加入： min_size=224, max_size=227
    # trainloader = loadtraindata()
    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    
    # 加载训练数据集
    train_set = GraspDataset('/home/qq/graspnet/dataset', mytransform, mytransform1)

    trainloader = torch.utils.data.DataLoader(train_set, batch_size=2,
                                              shuffle=True, num_workers=0)  #num_workers设置加载数据采用多进程（>0）还是主进程（0）
    
    
    # 图片归一化处理
    # if image_mean is None:
    #     image_mean = [0.485, 0.456, 0.406]
    # if image_std is None:
    #     image_std = [0.229, 0.224, 0.225]
    
    
    # 神经网络结构
    net = netStructure()
    
    
    # move model to the right device
    net.to(device)

    # 训练模式
    net.train()

    # 定义loss（交叉熵）和优化器（SGD）
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)   # 学习率为0.001
    # softMax = nn.Softmax(dim=1) # softmax层，对输出结果正则化

    sigmoid = nn.Sigmoid()
    # criterion = nn.CrossEntropyLoss()   # 损失函数也可以自己定义，我们这里用的交叉熵损失函数(自带softmax层)
    # BCELoss专门用来处理二分类问题，属于交叉熵损失
    criterion = nn.BCELoss()



    # 训练部分
    num_epochs = 1000
    for epoch in range(num_epochs):
        # 训练的数据量为775个epoch，每个epoch为一个循环,每个epoch要训练所有的图片，每训练完成200张便打印一下训练的效果（loss值）      
        running_loss = 0.0              # 定义一个变量方便我们对loss进行输出
        if (epoch==500)or(epoch==750):
            optimizer = optim.SGD(net.parameters(), lr=0.0001, momentum=0.9, weight_decay=0.0001)        
        
        for i, data in enumerate(trainloader, 0):  
            # 这里我们遇到了第一步中出现的trainloader，代码传入数据
            # enumerate是python的内置函数，既获得索引也获得数据; 0 代表i从0开始，也可以为1
            # get the inputs
            inputs, targets, mask = data  # data是从enumerate返回的data，包含数据和标签信息，分别赋值给inputs和targets
            
            inputs = list(input.to(device) for input in inputs)

            # inputs = torch.tensor(inputs)
            
            inputs = torch.tensor([item.cpu().detach().numpy() for item in inputs]).cuda() 
            # print(type(targets)) ->  dict

            targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
            
            mask = list(input.to(device) for input in mask)

            mask = torch.tensor([item.cpu().detach().numpy() for item in mask]).cuda() 

            # 这里只用了图像归一化的函数 np.array(inputs).dtype
            # dtype, device = inputs.type, inputs.device

            # dtype, device = np.array(inputs).dtype, inputs.device


            # dtype = inputs[0].dtype

            # # mean = torch.as_tensor(image_mean, dtype=dtype, device=device)
            # mean = torch.as_tensor(image_mean, dtype=dtype).to(device)
            # std = torch.as_tensor(image_std, dtype=dtype).to(device)
            # inputs = (torch.tensor(inputs) - mean[:, None, None]) / std[:, None, None]
            
            # 对图像resize和图像归一化
            # transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)

            # wrap them in Variable，老版本需要采用variable进行封装
            # 但是新版本不需要了
            # inputs, targets = Variable(inputs), Variable(targets)  # 转换数据格式用Variable
           

            # forward + backward + optimize
            outputs = net(inputs) # 把数据输进网络net
            outputs = sigmoid(outputs) # 计算softmax层
            # loss = criterion(outputs, targets)  # 计算损失值
            loss = criterion(outputs, mask)
            
            
            optimizer.zero_grad()        # 梯度置零，因为反向传播过程中梯度会累加上一次循环的梯度
            loss.backward()                    # loss反向传播
            optimizer.step()                   # 反向传播后参数更新 
            
            running_loss += loss.item()       # loss累加
            if i % 13== 12:                 
                print('[%d, %5d] loss: %.3f' %
                    (epoch + 1, i + 1, running_loss / 13))  # 然后再除以200，就得到这两百次的平均损失值
                running_loss = 0.0  # 这一个200次结束后，就把running_loss归零，下一个200次继续使用

    print('Finished Training')
    # 保存神经网络
    torch.save(net, 'net.pth')                      # 保存整个神经网络的结构和模型参数
    torch.save(net.state_dict(), 'net_params.pth')  # 只保存神经网络的模型参数


trainandsave()