import os
import sys
import time
import logging
import argparse
import numpy as np
import itertools

import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss, DataParallel
from torch.optim import Adam
from torch.autograd import Variable
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter

import settings
from dataset import Fashiondataset
from fashionnet import Fashionnet

logger = settings.logger
torch.cuda.manual_seed_all(66)
torch.manual_seed(66)
torch.cuda.set_device(settings.device_id)


def ensure_dir(dir_path):
    if not os.path.isdir(dir_path):
        os.makedirs(dir_path)
        

class Session:
    def __init__(self):
        self.log_dir = settings.log_dir
        self.model_dir = settings.model_dir
        ensure_dir(settings.log_dir)
        ensure_dir(settings.model_dir)
        logger.info('set log dir as %s' % settings.log_dir)
        logger.info('set model dir as %s' % settings.model_dir)

        self.step = 0
        self.net = Fashionnet()
        if settings.num_GPU > 1:
            self.net = DataParallel(self.net, 
                    device_ids=list(range(settings.num_GPU)))
        else:
            self.net = self.net.cuda()
        self.save_steps = settings.save_steps

        self.num_workers = settings.num_workers
        self.batch_size = settings.batch_size
        self.writers = {}
        self.dataloaders = {}

    def tensorboard(self, name):
        self.writers[name] = SummaryWriter(os.path.join(self.log_dir, name + '.events'))
        return self.writers[name]

    def start(self):
        self.save_checkpoints('latest')

    def write(self, name, out):
        for k, v in out.items():
            self.writers[name].add_scalar(k, v, self.step)

        out['step'] = self.step
        outputs = [
            "{}:{:.4g}".format(k, v) 
            for k, v in out.items()
        ]
        logger.info(' '.join(outputs))

        if self.step % self.save_steps == self.save_steps - 1:
            self.save_checkpoints('latest')
            self.save_checkpoints('step_%d' % self.step)

    def get_dataloader(self, dataset_name, train_or_val):
        dataset = Fashiondataset(dataset_name, train_or_val)
       # self.dataloaders[dataset_name] = \
        self.dataloaders = \
                     DataLoader(dataset, batch_size=self.batch_size, 
                            shuffle=True, num_workers=self.num_workers)
       # return itertools.cycle(self.dataloaders[dataset_name])
        return iter(self.dataloaders)

    def save_checkpoints(self, name):
        ckp_path = os.path.join(self.model_dir, name)
        obj = {
            'network': self.net.state_dict(),
            'clock': self.step
        }
        torch.save(obj, ckp_path)

    def load_checkpoints(self, name):
        ckp_path = os.path.join(self.model_dir, name)
        try:
            obj = torch.load(ckp_path)
        except FileNotFoundError:
            return
        self.net.load_state_dict(obj['network'])
        self.step = obj['clock']


def error_rate(pred, label, vis):
   # pred = pred.max(dim=1)[1]
   # err_rate = (pred.data != label.data).sum() / label.size()[0] * 100
    err = 0
    vis = vis.cpu()
    vis = vis.data.numpy()
   # print(vis)

    #vis_sum = torch.sum(vis==1)
    vis_sum = np.sum(vis==1)
    
    for k in range(0, label.size()[0]):
        predi, labeli, visi = pred[k].cpu(), label[k].cpu(), vis[k]
        predi = predi.data.numpy()
        labeli = labeli.data.numpy()
       # visi = visi.data.numpy()

        sk_norm = np.sqrt(np.sum(np.square(labeli[5] - labeli[6])))
       # print('sk_norm\n',sk_norm)

        for i in range(0,23):
            if int(visi[i])==1:
                dk = np.sqrt(np.sum(np.square(predi[i] - labeli[i])))
               # print(dk)
                err += (dk / sk_norm)
    
    err_rate = err/vis_sum
   # print('err_rate \n',err_rate)
    return err_rate


def run_train_val(ckp_name='latest'):
    sess = Session()
    train_tb = sess.tensorboard('train')
    val_tb = sess.tensorboard('val')

#    crit_cross = CrossEntropyLoss().cuda()
    crit_L1 = torch.nn.L1Loss().cuda()
    crit_mse = MSELoss().cuda()

    opt = Adam(sess.net.parameters(), lr=settings.lr)

    dts = {
        'train': sess.get_dataloader('blouse', 'train'),
        'val': sess.get_dataloader('blouse', 'val'),
    }

    sess.load_checkpoints(ckp_name)

    def inf_batch(name):
        #batch = next(dts[name]) 
        try:
            batch = next(dts[name])
        except StopIteration:
            dts[name] = sess.get_dataloader('blouse' ,name)
            batch = next(dts[name])

        data, label = batch['data'], batch['label']
        label_bf = label
        data, label = data.cuda(), label.cuda()
        data, label = Variable(data), Variable(label)
       
        vis, category = batch['visable'], batch['category']
        #vis_bf = vis
        vis, category = vis.cuda(), category.cuda()
        vis, category = Variable(vis), Variable(category)
        pred = sess.net(data)
       
       # print('vis size',pred[1].size(),'real vis size',label.size())
       # loss = 0.1*crit_mse(pred[0], category)  + 0.5*crit_mse(pred[1], label) + 0.4*crit_L1(pred[2], vis)
       # loss = 0.5*crit_mse(pred[1], label) + 0.5*crit_L1(pred[2], vis)

        loss = crit_mse(pred[1], label)
        err = error_rate(pred[1], label, vis)
        sess.write(name, {'loss': loss.data[0], 'err': err})
       # sess.write(name, {'loss': loss.data[0]})
        return loss

    while True:
        sess.net.zero_grad()
        loss = inf_batch('train')
        loss.backward()
        opt.step()

        loss = inf_batch('val')

        sess.step += 1

def run_test(ckp_name):
    sess = Session()
    sess.load_checkpoints(ckp_name)

    dt = Fashiondataset('blouse', 'val')
    dt = DataLoader(dt, batch_size=settings.batch_size, 
            shuffle=False, num_workers=settings.num_workers)

    all_num = 0
    err_num = 0
    for i, batch in enumerate(dt):
        data, label = batch['data'], batch['label']
        data, label = data.cuda(), label.cuda()
        data, label = Variable(data), Variable(label)
       
        vis, category = batch['visable'], batch['category']
        vis, category = vis.cuda(), category.cuda()
        vis, category = Variable(vis), Variable(category)

        pred = sess.net(data)
       # pred = pred.max(dim=1)[1]
       # batch_err_num = (pred.data != label.data).sum()
       # batch_ins_num = label.size()[0]
       # pred = pred[1]  
        batch_err = error_rate(pred[1], label, vis)
        logger.info('batch %d error rate: %f' % (i, batch_err))
       # logger.info('batch %d error rate: %f' % (i, batch_err_num / batch_ins_num))
       # err_num += batch_err_num
       # all_num += batch_ins_num
        err_num += batch_err
        all_num += 1

    logger.info('total error rate: %f' % (err_num / all_num))



if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-a', '--action', default='train')
    parser.add_argument('-m', '--model', default='latest')

    args = parser.parse_args(sys.argv[1:])
    
    if args.action == 'train':
        run_train_val(args.model)
    elif args.action == 'test':
        run_test(args.model)

    
    class_name = ['blouse', 'dress', 'outwear', 'skirt', 'trousers']






