import os
import time
import pandas as pd
from sklearn.model_selection import train_test_split
from LMdata.LMdataset import LMdata, collate_fn
import torch
import torch.utils.data as torchdata
import torch.optim as optim
from torch.optim import lr_scheduler
from models.losses import *
from models.DeepLabV2 import Res_Deeplab
from utils.train_CSAIL import train, trainlog
from LMdata.LMaug import *
from models.DRN import *
from models.GCN import GCN
import logging
from utils.preprocessing import *
from models.model_CSAIL import CSAIL1

def main(img_root,save_dir,model, input_size,resize_rate, optimizer,start_epoch):

    class trainAug(object):
        def __init__(self):
            self.augment = Compose([
                ResizeImg(size=input_size),
                RandomHflip(),
                GenGauMask(r=resize_rate),
                Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
            ])

        def __call__(self, *args):
            return self.augment(*args)

    class valAug(object):
        def __init__(self):
            self.augment = Compose([
                ResizeImg(size=input_size),
                Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
            ])

        def __call__(self, *args):
            return self.augment(*args)




    img_root = img_root
    annotation = pd.read_csv(os.path.join(img_root, "Annotations/train.csv"))
    annotation['image_id'] = annotation['image_id'].apply(lambda x: os.path.join(img_root, x))


    train_pd, val_pd = train_test_split(annotation, test_size=0.1, random_state=42,
                                        stratify=annotation['image_category'])

    val_pd = drop_shitdata(val_pd.copy())

    data_set = {}
    data_set['train'] = LMdata(train_pd, trainAug())
    data_set['val'] = LMdata(val_pd, valAug())

    data_loader = {}
    data_loader['train'] = torchdata.DataLoader(data_set['train'], 4, num_workers=4,
                                           shuffle=True, pin_memory=True, collate_fn=collate_fn,
                                            drop_last=True)
    data_loader['val'] = torchdata.DataLoader(data_set['val'], 2, num_workers=4,
                                           shuffle=False, pin_memory=True, collate_fn=collate_fn)

    # logging info of dataset
    logging.info(train_pd.shape)
    logging.info(val_pd.shape)
    logging.info('train augment:')
    for item in data_set['train'].transforms.augment.transforms:
        logging.info('  %s %s' % (item.__class__.__name__, item.__dict__))

    logging.info('val augment:')
    for item in data_set['val'].transforms.augment.transforms:
        logging.info('  %s %s' % (item.__class__.__name__, item.__dict__))


    logging.info(model)

    criterion = BCELogitsLossWithMask(size_average=True)

    lr_lambda = lambda x: 1 if x < 6 else (0.1 if x<11 else 0.05)
    exp_lr_scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)

    # training
    best_acc,best_model_wts = train(  model,
                                      epoch_num=13,
                                      start_epoch=start_epoch,
                                      optimizer=optimizer,
                                      criterion=criterion,
                                      exp_lr_scheduler=exp_lr_scheduler,
                                      data_set=data_set,
                                      data_loader=data_loader,
                                      save_dir=save_dir,
                                      print_inter=50,
                                      val_inter=1000,
                                     )


if __name__=='__main__':
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    img_root = "/media/gserver/data/landmark/rawdata/train"
    save_dir = '/media/gserver/models/LandMarks/CSAIL1_bs4'

    # saving dir
    save_dir = save_dir
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    logfile = '%s/trainlog.log'%save_dir
    trainlog(logfile)


    # model prepare
    resume = None
    model = CSAIL1(num_classes=24)
    optimizer = optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-5)
    # model = torch.nn.DataParallel(model)
    if resume:
        model.eval()
        logging.info('resuming finetune from %s'%resume)
        model.load_state_dict(torch.load(resume))
        optimizer.load_state_dict(torch.load(os.path.join(save_dir, 'optimizer-state.pth')))
    model.cuda()

    main(img_root,
         save_dir,
         model,
         input_size=(512,512),
         resize_rate=8,
         optimizer=optimizer,
         start_epoch=0)