import torch 
import numpy as np 
import torch.nn as nn 
from torch.utils.data import DataLoader 
from MyDataloader.myConfig import Config
from MyDataloader.dataloader import MyDataset
from nets.getModel import get_model
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.autograd import Variable
from nets.model_training import MultiBoxLoss

Cuda = True
txt_name = "train.txt"

with open(txt_name) as f:
    lines = f.readlines()
train_data = MyDataset(lines, (300, 300))

a = DataLoader(train_data, batch_size=Config['Batch_size'], num_workers=0, drop_last=True)
print("Get model...............")
model = get_model("train", Config['num_classes'])
print(model)
net = model.train()
if Cuda:
    net = torch.nn.DataParallel(model)
    cudnn.benchmark = True 
    net = net.cuda()

criterion = MultiBoxLoss(Config['num_classes'], 0.5, True, 0, True, 3, 0.5, False, Cuda)

if True:
    optimizer = optim.Adam(net.parameters(), lr = 0.0001)
    for epoch in range(10):
        loc_loss = 0
        conf_loss = 0
        for iteration, batch in enumerate(a):
            images, targets = batch[0], batch[1] 
            with torch.no_grad():
                if Cuda:
                    images = images.type(torch.FloatTensor).cuda()
                    targets = targets.type(torch.FloatTensor).cuda()

            # 前向传播
            out = net(images)
            print(out[0].size())
            print(out[1].size())    
            print(out[2].size())    
            # 梯度清零
            optimizer.zero_grad()
            # 计算loss
            loss_l, loss_c = criterion(out, targets)
            loss = loss_l + loss_c
            # 反向传播
            loss.backward()
            optimizer.step()
            # 加上
            loc_loss += loss_l.item()
            conf_loss += loss_c.item()
            print(loss)
            print(loc_loss/(iteration+1))
            print(conf_loss/(iteration+1))