from config import *
from data import load_data
from sampler import TripletSampler
import torch
import numpy as np
import torch.nn as nn
import torch.autograd as autograd
import torch.optim as optim
from model import LBNet
from datetime import datetime
import sys
from utils import collate_fn
import torch.nn.functional as F
import torch.nn.init as init


def train():

    train_source = load_data(flag='train')
    triplet_sampler = TripletSampler(train_source,batch_size)
    train_loader = torch.utils.data.DataLoader(
        dataset = train_source,
        batch_sampler = triplet_sampler,
        collate_fn = collate_fn,
        num_workers = 8
    )
    model = LBNet().float()

    for mod in list(model.children())[0].children():
        if isinstance(mod, nn.Conv2d):
            init.normal_(mod.weight, 0.0, 0.01)
            init.constant_(mod.bias, 0.9)

    optimizer = optim.Adam([{'params':model.parameters()}],lr=lr,weight_decay=0.0005)

    iteration = train_start_iteration
    log_path = './log.txt'
    best_record = './best_record.txt'
    with open(log_path, 'w') as f:
        f.write('\n')
    with open(best_record, 'w') as f:
        f.write('\n')

    if train_start_iteration != 0:
        checkpoint = torch.load(checkpoint_dir)
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']
        iteration = checkpoint['iteration']
        with open(log_path, 'a') as f:
            f.write('\n')
        with open(best_record, 'a') as f:
            f.write('\n')

    model.cuda()
    model.train()

    flag = 'train'

    _time1 = datetime.now()
    for seq,identity,condition,angle,bat_frame in train_loader:
        iteration += 1
        optimizer.zero_grad()
        for i in range(len(seq)):
            seq[i] = autograd.Variable(torch.from_numpy(seq[i])).cuda().float()
        label_for_two = [1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0]
        label_for_two = autograd.Variable(torch.from_numpy(np.array(label_for_two))).cuda().float()
        feature = model(*seq,flag)
        loss = F.binary_cross_entropy(feature.squeeze(1),label_for_two)

        if loss > 1e-9:
            loss.backward()
            optimizer.step()

        if iteration % 600000 == 0:
            for param in optimizer.param_groups:
                param['lr'] = lr / 10

        if iteration % 1000 == 0:
            print(datetime.now() - _time1)
            _time1 = datetime.now()

        if iteration % 100 == 0:
            print('iter {}:'.format(iteration), end='')
            print(', ce_loss{0:.8f}'.format(loss), end='')
            with open(log_path, 'a') as f:
                # f.write('iter {}:'.format(iteration)+', full_loss_metric={0:.8f}'.format(np.mean(full_loss_metric_list))+'\n')
                f.write('iter {}:'.format(iteration) + ', loss={0:.8f}'.format(loss) + '\n')
            print(', lr=%f' % optimizer.param_groups[0]['lr'], end='\n')
            sys.stdout.flush()

        if iteration % 50000 == 0:

            state2 = {'iteration': iteration,
                     'model': model,
                     'optimizer': optimizer}
            torch.save(state2, './checkpoint/Latest_checkpoint_{}.pth.tar'.format(iteration))
            print('Save checkpoint!')
        if iteration == total_iter:
            break

if __name__ == '__main__':
    train()