from src.dataset import DeepFashionCAPDataset
from src.const import base_path
import matplotlib.pyplot as plt
from src.networks import VGG16BaselineNet
import pandas as pd
import torch
import torch.utils.data
from torch import nn
import numpy as np
from torch.nn import functional as F
from src.const import base_path
from src import const
from src.utils import parse_args_and_merge_const
from tensorboardX import SummaryWriter
import os



if __name__ == '__main__':
    parse_args_and_merge_const()
    if os.path.exists('models') is False:
        os.makedirs('models')
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    df = pd.read_csv(base_path + 'info.csv')
    train_df = df[df['evaluation_status'] == 'train']
    train_dataset = DeepFashionCAPDataset(train_df, mode='RANDOM')
    train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=const.BATCH_SIZE, shuffle=True, num_workers=4)

    net = VGG16BaselineNet().to(device)

    optimizer = torch.optim.Adam(net.parameters(), lr=const.LEARNING_RATE)

    category_loss_func = torch.nn.CrossEntropyLoss()
    attr_loss_func = torch.nn.CrossEntropyLoss(weight=torch.tensor([const.WEIGHT_ATTR_NEG, const.WEIGHT_ATTR_POS]).to(device))
    lm_vis_loss_func = torch.nn.CrossEntropyLoss(weight=torch.tensor([const.WEIGHT_LANDMARK_VIS_NEG, const.WEIGHT_LANDMARK_VIS_POS]).to(device))
    lm_pos_loss_func = torch.nn.MSELoss()

    writer = SummaryWriter()

    total_step = len(train_dataloader)
    step = 0
    for epoch in range(const.NUM_EPOCH):
        net.train()
        for i, sample in enumerate(train_dataloader):
            step += 1
            for key in sample:
                sample[key] = sample[key].to(device)
            output = net(sample)

            category_loss = category_loss_func(output['category_output'], sample['category_label'])
            # 所有的都是2类，每个2类有1000个，都分别计算交叉熵，计算之后的交叉熵加入weight，之后再全部取平均，与我们的期望符合
            attr_loss = attr_loss_func(output['attr_output'], sample['attr'])
            lm_vis_loss = lm_vis_loss_func(output['lm_vis_output'], sample['landmark_vis'])
            landmark_vis_float = torch.unsqueeze(sample['landmark_vis'].float(), dim=2)
            landmark_vis_float = torch.cat([landmark_vis_float, landmark_vis_float], dim=2)  # 用真实值当mask，只计算vis=1时的损失
            lm_pos_loss = lm_pos_loss_func(
                landmark_vis_float * output['lm_pos_output'],
                landmark_vis_float * sample['landmark_pos_normalized']
            )
            loss = const.WEIGHT_LOSS_CATEGORY * category_loss + \
                const.WEIGHT_LOSS_ATTR * attr_loss + \
                const.WEIGHT_LOSS_LM_VIS * lm_vis_loss + \
                const.WEIGHT_LOSS_LM_POS * lm_pos_loss

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (i + 1) % 10 == 0:
                writer.add_scalar('loss/category_loss', category_loss.item(), step)
                writer.add_scalar('loss_weighted/category_loss', const.WEIGHT_LOSS_CATEGORY * category_loss.item(), step)
                writer.add_scalar('loss/attr_loss', attr_loss.item(), step)
                writer.add_scalar('loss_weighted/attr_loss', const.WEIGHT_LOSS_ATTR * attr_loss.item(), step)
                writer.add_scalar('loss/lm_vis_loss', lm_vis_loss.item(), step)
                writer.add_scalar('loss_weighted/lm_vis_loss', const.WEIGHT_LOSS_LM_VIS * lm_vis_loss.item(), step)
                writer.add_scalar('loss/lm_pos_loss', lm_pos_loss.item(), step)
                writer.add_scalar('loss_weighted/lm_pos_loss', const.WEIGHT_LOSS_LM_POS * lm_pos_loss.item(), step)
                writer.add_scalar('loss_weighted/all', loss.item(), step)
                print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
                      .format(epoch + 1, const.NUM_EPOCH, i + 1, total_step, loss.item()))
            if (i + 1) % 1000 == 0:
                print('Saving Model....')
                net.step = i + 1
                torch.save(net.state_dict(), 'models/vgg16.pkl')
                print('OK.')
