from __future__ import division

import logging
import os
import pickle

import torch
import numpy as np
import pandas as pd
import torch.optim as optim
from mxnet import metric as mx_metric
from torch.autograd import Variable
from torch.nn import CrossEntropyLoss
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
import torch.nn.functional as F

from data.classification.img_aug import *
from utils.common import log_init
from data.classification.retail import RetailClassficationDataset
from utils.torch_wrapper import TorchWrapper
import torchvision
import tqdm
class Resnet50(torch.nn.Module):
    def __init__(self,*args,**kwargs):
        super(Resnet50,self).__init__()
        self.res = torchvision.models.resnet50(*args,**kwargs)
        self.res.fc = torch.nn.Linear(self.res.fc.in_features, 7)
    def forward(self, x):
        feat = self.res
        x = feat.conv1(x)
        x = feat.bn1(x)
        x = feat.relu(x)
        x = feat.maxpool(x)

        x = feat.layer1(x)
        x = feat.layer2(x)
        x = feat.layer3(x)
        x = feat.layer4(x)
        x = x.reshape(shape = (x.shape[0],x.shape[1],-1))
        x = x.mean(dim = 2)
        x = x.view(x.size(0), -1)
        x = feat.fc(x)
        return F.sigmoid(x)
def train(model,
          epoch_num,
          start_epoch,
          optimizer,
          criterion,
          exp_lr_scheduler,
          data_set,
          data_loader,
          save_dir,
          print_inter=200,
          val_inter=3500,
          metrics = {}
          ):
    for epoch in range(start_epoch,epoch_num):

        # train phase
        exp_lr_scheduler.step(epoch)
        model.train(True)  # Set model to training mode
        for key in metrics:
            metrics[key].reset()
        for batch_cnt, data in enumerate(data_loader['train']):
            if batch_cnt % val_inter == 0:
                logging.info('current lr:%s' % exp_lr_scheduler.get_lr())
                name1, accu1 = ["nan",0.0]

                save_path = os.path.join(save_dir,
                        'weights-%d-%d-[%s-%.4f].pth'%(epoch,batch_cnt,name1, accu1))
                torch.save(model.state_dict(), save_path)
                logging.info('saved model to %s' % (save_path))
                logging.info('--' * 30)
            model.train(True)
            inputs, labels, masks = [Variable(x.float().cuda()) for x in data]
            optimizer.zero_grad()
            y = model(inputs)

            loss1 = labels[:,0] *  torch.log(y[:,0] + 1e-7) + (1 - labels[:,0]) * torch.log(1- y[:,0] + 1e-7)
            loss1 = (loss1 / y.shape[0] * -1).sum()
            loss2 = labels[:,1:] *  torch.log(y[:,1:] + 1e-7) + (1 - labels[:,1:]) * torch.log(1- y[:,1:] + 1e-7)

            loss2 = (loss2 / y.shape[0] * masks * -1).sum()

            loss = loss1 + loss2
            (loss2 + loss1).backward()
            optimizer.step()

            metrics["batch_loss1"].update(None, loss1)
            metrics["batch_loss2"].update(None, loss2)
            if batch_cnt % print_inter == 0:
                # noinspection PyStringFormat
                n1,v1 = metrics["batch_loss1"].get()
                n2,v2 = metrics["batch_loss2"].get()
                logging.info('%s [%d-%d] | %s: %.3f | %s: %.3f '
                             % (0, epoch, batch_cnt,
                                n1, v1,
                                n2, v2
                                ))
                metrics["batch_loss1"].reset()
def inference(net, data_transforms):
    import cv2
    resume = "output/classification/output/classification/weights-16-0-[nan-0.0000].pth"
    logging.info('resuming finetune from %s' % resume)
    net.load_state_dict(torch.load(resume), strict=False)
    with open("output/classification/res_combine5_3_7.csv","rt") as f:
        lines = f.readlines()[1:]
    all_objs = {}
    for n,l in tqdm.tqdm(enumerate(lines), total=len(lines)):
        l = l.strip().split(',')
        name,x0,y0,x1,y1,score,image_id = l[:7]
        x0_i, y0_i, x1_i, y1_i = np.array([x0,y0,x1,y1]).astype('f').astype('i')
        image = cv2.imread(os.path.join("/data1/zyx/yks/dataset/retail/test_images/test", os.path.basename(name)))
        image = image[y0_i:y1_i,x0_i:x1_i]
        tensor = data_transforms["val"](image)[np.newaxis]
        tensor = torch.from_numpy(tensor).float().cuda()
        r = net(tensor).data.cpu().numpy().tolist()[0]
        person, customer, sit, gender, stand, play_with_phone, staff = r
        one_obj = {"customer": customer,
                   "minx": float(x0),
                   "miny": float(y0),
                   "maxx": float(x1),
                   "maxy": float(y1),
                   "sit": sit,
                   "male": 1- gender,
                   "stand": stand,
                   "female": gender,
                   "play_with_phone": play_with_phone,
                   "staff": staff,
                   "confidence":float(score)
                   }
        try:
            all_objs[image_id].append(one_obj)
        except KeyError:
            all_objs[image_id] = [one_obj]
    re = {}
    re["results"] = []
    for key in all_objs:
        one_img = {}
        one_img["image_id"] = key
        one_img["object"] = all_objs[key]
        re["results"].append(one_img)
    import json
    json.dump(re,open("output/submit.json","wt"))
    os.system("cat output/submit.json | jq . > output/submit_format.json")
if __name__ == '__main__':
    os.environ["CUDA_VISIBLE_DEVICES"] = "6"

    save_dir = 'output/classification/'
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    logfile = '%s/trainlog.log' % save_dir
    log_init(logfile)

    data_transforms = {
        'train': Compose([
            RandomHflip(),
            RandomRotate(angles=(-15, 15)),
            ExpandBorder(size=(368, 368), resize=True),
            RandomResizedCrop(size=(336, 336)),
            Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
        'val': Compose([
            ExpandBorder(size=(336, 336), resize=True),
            Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
    }

    # data_set = {}
    # data_set['train'] = RetailClassficationDataset(transforms=data_transforms["train"])
    # data_set['val'] = RetailClassficationDataset(transforms=data_transforms["val"])
    # dataloader = {}
    # dataloader['train'] = DataLoader(data_set['train'], batch_size=8,
    #                                 shuffle=True,
    #                                 num_workers=8,
    #                                 pin_memory = True)
    # dataloader['val'] = DataLoader(data_set['val'],
    #                                batch_size=16,
    #                                shuffle=True, num_workers=8,pin_memory = True)
    train_metrics = {}
    train_metrics["batch_loss1"] = TorchWrapper(mx_metric.Loss(name="batch_loss1"))
    train_metrics["batch_loss2"] = TorchWrapper(mx_metric.Loss(name="batch_loss2"))
    model = Resnet50(pretrained = False)
    base_lr = 1e-3
    resume = None
    model = model.cuda()
    # model= torch.nn.DataParallel(model)
    if resume:
        logging.info('resuming finetune from %s' % resume)
        model.load_state_dict(torch.load(resume), strict=False)

    optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=1e-5)

    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=12, gamma=0.1)

    inference(model, data_transforms)
    # train(model,
    #       epoch_num=50,
    #       start_epoch=0,
    #       optimizer=optimizer,
    #       criterion=None,
    #       exp_lr_scheduler=exp_lr_scheduler,
    #       data_set=data_set,
    #       data_loader=dataloader,
    #       save_dir=save_dir,
    #       print_inter=50,
    #       val_inter=2000,
    #       metrics=train_metrics
    #       )
    # save_features(model,train_root,test_root,data_transforms["val"],"inceptionv4_features.pkl")