import os
import time
import pandas as pd
from sklearn.model_selection import train_test_split
from LMdata.LMdataset import LMdata, collate_fn
import torch
import torch.utils.data as torchdata
import torch.optim as optim
from torch.optim import lr_scheduler
from models.losses import *
from models.DeepLabV2 import Res_Deeplab
from utils.train_MltLdr import train, trainlog
from LMdata.LMaug import *
from models.DRN import *
from models.GCN import GCN
import logging
from utils.preprocessing import *
from LMdata.LMdataset_mscl import multiDataLoader

def main(root_path,save_dir,model, train_sizes,val_size,resize_rate, optimizer,start_epoch,bs=8):

    class trainAug(object):
        def __init__(self, size):
            self.augment = Compose([
                RandomRotate(angles=[-20., 20.], bound=False),
                ResizeImg(size=size),
                RandomHflip(),
                GenGauMask(r=resize_rate),
                Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
            ])

        def __call__(self, *args):
            return self.augment(*args)

    class valAug(object):
        def __init__(self,size):
            self.augment = Compose([
                ResizeImg(size=size),
                Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
            ])

        def __call__(self, *args):
            return self.augment(*args)


    # prepare train val
    train_pd, val_pd = get_train_val(root_path)
    val_pd = drop_shitdata(val_pd.copy())

    train_pd.index = range(train_pd.shape[0])
    val_pd.index = range(val_pd.shape[0])



    data_set = {}
    data_set['train'] = []
    for size in train_sizes:
        data_set['train'].append(LMdata(train_pd, trainAug(size=size)))

    multi_loader = multiDataLoader(*data_set['train'], bs=bs, num_workers=3)
    data_set['val'] = LMdata(val_pd, valAug(size=val_size))


    data_loader = {}
    data_loader['val'] = torchdata.DataLoader(data_set['val'], 2, num_workers=2,
                                           shuffle=False, pin_memory=True, collate_fn=collate_fn)

    # logging info of dataset
    logging.info(train_pd.shape)
    logging.info(val_pd.shape)
    logging.info('train augment:')
    for i in xrange(len(data_set['train'])):
        for item in data_set['train'][i].transforms.augment.transforms:
            logging.info('  %s %s' % (item.__class__.__name__, item.__dict__))

    logging.info('val augment:')
    for item in data_set['val'].transforms.augment.transforms:
        logging.info('  %s %s' % (item.__class__.__name__, item.__dict__))


    logging.info(model)

    criterion = BCELogitsLossWithMask(size_average=True)

    # learning scheduler
    step1_bs_rate = 8. / 8.
    step2_bs_rate = 13. / 8.
    steps_bs_rate = 16. / 8.
    step1 = int(bs * step1_bs_rate)
    step2 = int(bs * step2_bs_rate)
    steps = int(bs * steps_bs_rate)
    logging.info('lr steps1: %d' % step1)
    logging.info('lr steps2: %d' % step2)
    logging.info('total steps: %d' % steps)

    lr_lambda = lambda x: 1 if x < step1 else (0.1 if x<step2 else 0.05)
    exp_lr_scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)

    # training
    best_acc,best_model_wts = train(  model,
                                      epoch_num=steps,
                                      start_epoch=start_epoch,
                                      optimizer=optimizer,
                                      criterion=criterion,
                                      exp_lr_scheduler=exp_lr_scheduler,
                                      multi_loader=multi_loader,
                                      val_data_set=data_set['val'],
                                      val_data_loader=data_loader['val'],
                                      save_dir=save_dir,
                                      print_inter=150,
                                      val_inter=2000
                                     )


if __name__=='__main__':
    os.environ["CUDA_VISIBLE_DEVICES"] = '1,2'
    root_path = "/media/gserver/data/landmark"
    save_dir = '/media/gserver/models/LandMarks/round2/GCN(368-512-768)_bs8_rotate_warm'

    # saving dir
    save_dir = save_dir
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    logfile = '%s/trainlog.log'%save_dir
    trainlog(logfile)


    # model prepare
    resume = None
    model = GCN(num_classes=24,layers=50)
    optimizer = optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-5)
    model = torch.nn.DataParallel(model)
    if resume:
        model.eval()
        logging.info('resuming finetune from %s'%resume)
        try:
            model.load_state_dict(torch.load(resume))
        except KeyError:
            model = torch.nn.DataParallel(model)
            model.load_state_dict(torch.load(resume))
        optimizer.load_state_dict(torch.load(os.path.join(save_dir, 'optimizer-state.pth')))
    model.cuda()

    main(root_path,
         save_dir,
         model,
         train_sizes=[(368,368),(512,512),(768,768)],
         val_size=(512,512),
         resize_rate=1,
         optimizer=optimizer,
         start_epoch=0,
         bs=8)