# coding=utf-8
import os
import pandas as pd
from sklearn.model_selection import train_test_split
from dataset.dataset import collate_fn, dataset_train
import torch
import torch.utils.data as torchdata
from torchvision import datasets, models, transforms
import shutil
from models.MobileNet import MobileNet
from models.resnet import resnet50
import torch.optim as optim
from torch.optim import lr_scheduler
from utils.train_util import train, trainlog
from torch.nn import CrossEntropyLoss
import logging
import glob
from dataset.data_aug import *
from utils.generateXMl import read_json,update_json
import time
import random
import numpy as np
# os.environ['CUDA_VISIBLE_DEVICES'] = '3'    # todo


def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    random.seed(seed)
    np.random.seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False  # notice


def retrain(dirName,iters='1,1,1'):
    # setup_seed(1024)
    iters = iters.split(',')
    # os.environ["CUDA_VISIBLE_DEVICES"] = "0"

    # save_dir = 'pic/{}_output'.format(dirName)
    save_dir = '{}_output'.format(dirName)
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    logfile = '%s/trainlog.log'%save_dir
    trainlog(logfile)

    # rawdata_root = 'pic/{}'.format(dirName)
    rawdata_root = dirName
    img_list = glob.glob(rawdata_root+"/*/*.JPEG")
    random.shuffle(img_list)
    random.shuffle(img_list)
    random.shuffle(img_list)
    random.shuffle(img_list)
    random.shuffle(img_list)
    print(len(img_list))
    val_img_list = glob.glob(rawdata_root+"/*/*.JPEG")
    random.shuffle(img_list)
    random.shuffle(img_list)
    random.shuffle(img_list)
    random.shuffle(img_list)
    random.shuffle(img_list)
    print(len(img_list))
    logging.info("len(img_list):%d" % len(img_list))
    all_pd = pd.DataFrame(img_list, columns=["image_path"])
    all_pd["label"] = all_pd["image_path"].apply(lambda x:int(x.split("/")[-2]))
    train_pd, val_pd = train_test_split(all_pd, test_size=0.1, random_state=1, stratify=all_pd['label'])
    # pd_0 = train_pd[train_pd["label"]==0]
    # pd_1 = train_pd[train_pd["label"]==1]
    # pd_2 = train_pd[train_pd["label"]==2]
    #
    # train_new_pd = train_pd.append(pd_0).sample(frac=1)     # frac is random shuffle to extra
    #
    # #
    # # if len(img_list) < 50:
    # #     print("before min num balance: ", len(pd_0), len(pd_1), len(pd_2))
    # #     logging.info("before min num balance:%d, %d, %d " % (len(pd_0), len(pd_1), len(pd_2)))
    # #     for i in range(30):
    # #         train_pd = train_pd.append(pd_0).sample(frac=1)
    # #         train_pd = train_pd.append(pd_1).sample(frac=1)
    # #         train_pd = train_pd.append(pd_2).sample(frac=1)
    # #
    # #     pd_0 = train_pd[train_pd["label"]==0]
    # #     pd_1 = train_pd[train_pd["label"]==1]
    # #     pd_2 = train_pd[train_pd["label"]==2]
    # #     print("after min num balance: ", len(pd_0), len(pd_1), len(pd_2))
    # #     logging.info("after min num balance:%d, %d, %d " % (len(pd_0), len(pd_1), len(pd_2)))
    #
    # # auto add data
    # train_new_pd = train_pd.copy()
    # pd_0_num, pd_1_num, pd_2_num = len(pd_0), len(pd_1), len(pd_2)
    # pd_max_num = max(pd_0_num, pd_1_num, pd_2_num)
    # iters = [int(pd_max_num/(pd_0_num+1))-1, int(pd_max_num/(pd_1_num+1))-1, int(pd_max_num/(pd_2_num+1))-1]
    # print("before train balance: ", pd_0_num, pd_1_num, pd_2_num, ", iters: ", iters)
    # logging.info("before train balance:%d, %d, %d " % (len(pd_0), len(pd_1), len(pd_2)))
    #
    # for count in range(int(iters[0])):
    #     train_new_pd = train_new_pd.append(pd_0).sample(frac=1)
    # for count in range(int(iters[1])):
    #     train_new_pd = train_new_pd.append(pd_1).sample(frac=1)
    # for count in range(int(iters[2])):
    #     train_new_pd = train_new_pd.append(pd_2).sample(frac=1)
    #
    # pd_0 = train_new_pd[train_new_pd["label"]==0]
    # pd_1 = train_new_pd[train_new_pd["label"]==1]
    # pd_2 = train_new_pd[train_new_pd["label"]==2]
    # print("after train balance: ", len(pd_0), len(pd_1), len(pd_2))
    # logging.info("after train balance:%d, %d, %d " % (len(pd_0), len(pd_1), len(pd_2)))

    # for i, path in enumerate(zip(train_pd['image_path'], train_pd['label'])):
    #     if i < 100:
    #         print(i, path)
    #     else:
    #         exit(0)
    print(train_pd.shape)
    print(val_pd.shape)
    train_size = train_pd.shape[0]
    batch_size = 512
    print_eval_step = (train_size // batch_size) + 1
    print("print_eval_step: ", print_eval_step)
    logging.info("print_eval_step:%d " % (print_eval_step))
    '''数据扩增'''
    data_transforms = {
        'train': Compose([
            RandomRotate(angles=(-7,7)),
            ExpandBorder(size=(272,272),resize=True),
            Lighter(alpha=(9,11), beta=(-6,6)),
            # RandomHflip(),
            # RandomResizedCrop(size=(272, 272)),
            Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
        'val': Compose([
            ExpandBorder(size=(272,272),resize=True),
            # RandomHflip(),
            Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
    }

    data_set = {}
    data_set['train'] = dataset_train(anno_pd=train_pd, transforms=data_transforms["train"])
    data_set['val'] = dataset_train(anno_pd=val_pd, transforms=data_transforms["val"])
    dataloader = {}
    dataloader['train']=torch.utils.data.DataLoader(data_set['train'], batch_size=batch_size,
                                                   shuffle=True, num_workers=16,collate_fn=collate_fn)
    dataloader['val']=torch.utils.data.DataLoader(data_set['val'], batch_size=batch_size,
                                                   shuffle=True, num_workers=16,collate_fn=collate_fn)
    '''model'''
    model = MobileNet(120, alpha=0.5)
    # model = resnet50(pretrained=True)
    # model.avgpool = torch.nn.AdaptiveAvgPool2d(output_size=1)   # resnet need
    model.fc = torch.nn.Linear(model.fc.in_features, 120)
    # model.fc = torch.nn.Conv2d(in_channels=2048, out_channels=2, kernel_size=1, padding=0)
    # model.fc = torch.nn.Conv2d(in_channels=512, out_channels=5, kernel_size=1, padding=0)

    resume = None
    # resume = './sucess_premodel_mobilenet.pth'
    # resume = '/data1/sheng/1116/04_geli_switch/4_output/weights-49-0-[0.9868995633187773]-1608802475.2189593.pth'
    if resume:
        logging.info('resuming finetune from %s' % resume)
        # model.load_state_dict(torch.load(resume))
        model.load_state_dict(torch.load(resume))

    # data parallel for multi-GPU
    # device_index = 0
    # device = torch.device("cuda:"+str(device_index))  # todo
    # print("device: ", device)
    model = model.cuda()
    # model = torch.nn.DataParallel(model).to([device])
    model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3])
    print("device: ", model.device_ids)

    #  ------ weights init
    # for i, key in enumerate(model.state_dict()):
    #     # print("key: ", key)
    #     if key.split('.')[-1] == 'weight':
    #         if '0.weight' in key or '3.weight' in key:
    #             # print("before:", model.state_dict()[key])
    #             # torch.nn.init.kaiming_normal_(model.state_dict()[key], )
    #             torch.nn.init.xavier_normal_(model.state_dict()[key])
    #             # print("after:", model.state_dict()[key])
    #             print("key: ", key, "kaiming_normal conv")
    #         if '1.weight' in key or '4.weight' in key:
    #             # torch.nn.init.kaiming_normal_(model.state_dict()[key][...], )
    #             model.state_dict()[key]= 1
    #             print("key: ", key, "kaiming_normal bn")
    #     elif key.split('.')[-1] == 'bias':
    #             model.state_dict()[key] = 0


    base_lr = 0.4
    optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=1e-5)
    criterion = CrossEntropyLoss().cuda()
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.85)
    #  /media/hszc/model/detao/model/face_glass/resnet50_add_p/weights-4-2536-[0.9819].pth
    final_path = train(model,
          epoch_num=500,
          start_epoch=0,
          optimizer=optimizer,
          criterion=criterion,
          exp_lr_scheduler=exp_lr_scheduler,
          data_set=data_set,
          data_loader=dataloader,
          save_dir=save_dir,
          print_inter=print_eval_step, # print_eval_step,
          val_inter=print_eval_step)

    return final_path


def weights_init(model):
    classname = model.__class__.__name__
    print("classname:", classname)
    if classname.find('Conv') != -1:
        model.weight.data.normal_(0.0, 0.2)


def web_retrain(tag):
    dirName = tag
    model_file = 'static/modelManagement.json'
    model_json = read_json(model_file)
    iters = '30,30,30'
    # dirName = 'circle'
    # iters = '2,2,5'
    # dirName = 'gear'
    # iters = '2,3,2'
    # dirName = 'open'
    # iters = '5,2,6'
    # movePic(dirName)
    try:
        retrain(dirName,iters)
        model_name = sorted(glob.glob('pic/{}_output/wei*'.format(tag)), key=os.path.getmtime)[-1]
        print('modelname',model_name)
        shutil.copy(model_name,'models/{}.pth'.format(tag))
        os.system('screen -X -S appRecog quit')
        # os.system('screen -dmS appRecog python app_meter_combine623.py')
        os.system("screen -dmS appRecog bash -c 'MXNET_LIBRARY_PATH=/usr/local/nvidia/lib/libmxnet.so /home/app/app_meter_combine623'")
        model_json[tag]['modelStatus'] = '2'
        model_json[tag]['isTrain'] = '1'
        model_json[tag]['modelVersion'] = str(float(model_json[tag]['modelVersion']) + 1)
        update_json(model_json,model_file)
    except:
        model_json[tag]['modelStatus'] = '3'
        model_json[tag]['isTrain'] = '1'
        model_json[tag]['modelVersion'] = str(float(model_json[tag]['modelVersion']) + 1)
        update_json(model_json, model_file)


if __name__ == '__main__':
    import optparse

    parser = optparse.OptionParser()
    parser.add_option(
        '-t', '--tag',
        help="option a tag to run",
        type='str', default='1')


    opts, args = parser.parse_args()

    tag = opts.tag
    print(tag, '*'*100)
    web_retrain(tag)
    # import optparse
    #
    # parser = optparse.OptionParser()
    # parser.add_option(
    #     '-n', '--name',
    #     help="option a class name to run",
    #     type='str', default='1')
    # parser.add_option(
    #     '-i', '--iters',
    #     help="option a array to run aiters",
    #     type='str', default='2,4,5')
    #
    # opts, args = parser.parse_args()
    #
    # dirName = opts.name
    # iters = opts.iters
    # iters = iters.split(',')
    # print(iters)
    # retrain(dirName,iters)
    # print(dirName)
    # print(type(iters))