# coding=utf-8
import os,shutil,glob,logging,torch,random,numpy as np,time,math
import pandas as pd
from sklearn.model_selection import train_test_split
from dataset.dataset import collate_fn, dataset_train
import torch.utils.data as torchdata
from torchvision import datasets, models, transforms
from models.MobileNet import MobileNet,mobilenet_v2,weigth_init
import tkinter.filedialog as filedialog
from models.resnet import resnet50
import torch.optim as optim
from torch.optim import lr_scheduler
from utils.train_util import train, trainlog
from torch.nn import CrossEntropyLoss
from dataset.data_aug import *
from dataset.data_aug import RandomBrightness
from utils.generateXMl import read_json,update_json
from dataset.data_aug import pepper_salt_noise, perspective_transformation, dog_way
from CAWB_main.cawb import CosineAnnealingWarmbootingLR
from models.MobileviT import mobilevit_s,mobilevit_xs,mobilevit_xxs


# os.environ['CUDA_VISIBLE_DEVICES'] = '3'    # todo

def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    random.seed(seed)
    np.random.seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False  # notice


def retrain(dirName,save_path,FT_params_path,opt):
    setup_seed(1024)
    save_dir = '{}/{}_output'.format(save_path,time.strftime("%Y%m%d",time.localtime()))
    # save_dir = '{}/{}'.format(save_path,"20211020_mobilenet_v2_NotSigmoid_NOinit")
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    logfile = '%s/trainlog.log'%save_dir
    trainlog(logfile)

    #split train_data and val_data
    # img_list =glob.glob(dirName+"/*/*.jpg")
    fileExtensions = ["jpg", "jpeg", "png"]
    img_list = []
    for extension in fileExtensions:
        img_list.extend(glob.glob(dirName+"/*/*." + extension))
    all_pd=pd.DataFrame(img_list,columns=["image_path"])
    #apply函数 会自动遍历每一行DataFrame的数据，最后将所有结果组合成一个Series数据结构并返回。
    all_pd["label"]=all_pd["image_path"].apply(lambda x:int(x.split("/")[-2].split("_")[0]))

    train_pd, val_pd = train_test_split(all_pd, test_size=0.1, random_state=1,
                                       stratify=all_pd['label'])  #

    pd_0 = train_pd[train_pd["label"] == 0]
    pd_1 = train_pd[train_pd["label"] == 1]
    pd_2 = train_pd[train_pd["label"] == 2]
    pd_3 = train_pd[train_pd["label"] == 3]

    # pd_2 =train_pd[train_pd["label"]==2]
    # train_new_pd = train_pd.append(pd_0).sample(frac=1)     # frac is random shuffle to extra

    # auto add data
    train_new_pd = train_pd.copy()
    pd_0_num, pd_1_num ,pd_2_num, pd_3_num = len(pd_0), len(pd_1),len(pd_2), len(pd_3)
    pd_max_num = max(pd_0_num, pd_1_num ,pd_2_num, pd_3_num)
    # iters = [np.rint(pd_max_num / (pd_0_num + 1) - 1), np.rint(pd_max_num / (pd_1_num + 1) - 1)]
    iters = [int(pd_max_num/(pd_0_num+1)-1), int(pd_max_num/(pd_1_num+1)-1),int(pd_max_num/(pd_2_num+1)-1), int(pd_max_num/(pd_3_num+1)-1)]
    print("before train balance: ", pd_0_num, pd_1_num,pd_2_num, pd_3_num ," iters: ", iters)
    logging.info("before train balance:%d, %d,%d, %d" % (len(pd_0), len(pd_1),len(pd_2), len(pd_3)))
    # iters = [0, 0, 0]


    for count in range(int(iters[0])):
        train_new_pd = train_new_pd.append(pd_0).sample(frac=1)
    for count in range(int(iters[1])):
        train_new_pd = train_new_pd.append(pd_1).sample(frac=1)
    for count in range(int(iters[2])):
        train_new_pd = train_new_pd.append(pd_2).sample(frac=1)
    for count in range(int(iters[3])):
        train_new_pd = train_new_pd.append(pd_3).sample(frac=1)


    pd_0 = train_new_pd[train_new_pd["label"] == 0]
    pd_1 = train_new_pd[train_new_pd["label"] == 1]
    pd_2 = train_new_pd[train_new_pd["label"]==2]
    pd_3 = train_new_pd[train_new_pd["label"]==3]
    print("after train balance: ", len(pd_0), len(pd_1), len(pd_2), len(pd_3))
    logging.info("after train balance:%d, %d,%d, %d" % (len(pd_0), len(pd_1), len(pd_2), len(pd_3)))

    # for i, path in enumerate(zip(train_pd['image_path'], train_pd['label'])):
    #     if i < 100:
    #         print(i, path)
    #     else:
    #         exit(0)
    print(train_pd.shape)
    print(val_pd.shape)
    train_size = train_pd.shape[0]


    '''数据扩增'''
    data_transforms = {
        'train': Compose([
            # RandomRotate(angles=(-7, 7)),
            RandomBrightness(),
            Random_Drawline(),
            # dog_way(),
            # ExpandBorder(size=(48, 26), resize=True),
            # ExpandBorder(size=(48,48),resize=True),
            # Lighter(alpha=(9, 11), beta=(-6, 6)),
            # perspective_transformation(resize=48),
            # pepper_salt_noise(),
            # RandomHflip(),
            # RandomResizedCrop(size=(272,272)),
            ShuffleRgb(),
            Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
        'val': Compose([
            Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
    }
    data_set = {}
    data_set['train'] = dataset_train(anno_pd=train_pd, transforms=data_transforms["train"], resize=224) #224
    data_set['val'] = dataset_train(anno_pd=val_pd, transforms=data_transforms["val"],resize=224)
    dataloader = {}
    batch_size = 16
    dataloader['train']=torch.utils.data.DataLoader(data_set['train'], batch_size=batch_size,
                                                   shuffle=True, num_workers=8, collate_fn=collate_fn)
    dataloader['val']=torch.utils.data.DataLoader(data_set['val'], batch_size=batch_size,
                                                   shuffle=True, num_workers=8, collate_fn=collate_fn)
    print_eval_step = (train_size // batch_size) + 1
    print("print_eval_step: ", print_eval_step)
    logging.info("print_eval_step:%d " % (print_eval_step))


    '''model'''
    # model = MobileNet(2, alpha=0.5)
    model=mobilenet_v2(class_number=4)
    # model.apply(weigth_init)   #init
    # model = mobilevit_s(2)
    # model = mobilevit_xs(4)
    # model = mobilevit_xxs(2)



    fintune=True
    if os.path.exists(FT_params_path):
        if fintune:
            model_dict=model.state_dict()
            pre_weights=torch.load(FT_params_path)
            #remove unnecessary layers param.
            pre_dict={k:v for k,v in pre_weights.items() if "classifier" not in k}
            #update net layers.
            model_dict.update(pre_dict)
            model.load_state_dict(pre_dict,strict=False)
            model.fc = torch.nn.Linear(1280, 2)
            # FT---->train
            # model.fc = None
            # model.load_state_dict(torch.load(FT_params_path))
            # model.fc = torch.nn.Linear(100352, 2)
        else:
            model.load_state_dict(torch.load(FT_params_path))
        print('Parameters loaded succussful!')
    else:
        print("No Parameters!")
    # logging.info('success resuming finetune from %s' % FT_params_path)

    #-----------------------------------------------------------------
    #    freeze features weights   只训练最后一层  精度94%  训练全部98%
    #-----------------------------------------------------------------
    # for param in model.feature.parameters():
    #     param.requires_grad=True   #feature 梯度更新  训练全部


    model = model.cuda()

    #-------------------------------------------------------------------------
    #                      optimizer
    #--------------------------------------------------------------------------
    optimizer=optim.Adam(model.parameters(),lr=1e-3)
    #特征提取层feature layer设置较小的学习率，在分类器器层classifier layer设置较大的学习率
    # optimizer = optim.SGD([{'params': model.features.parameters()},
    #                        {'params': model.classifier.parameters(), 'lr': 1e-3}], lr=1e-5)
    # optimizer = optim.SGD(model.parameters(), lr=1e-4, momentum=0.9, weight_decay=1e-5)
    criterion = CrossEntropyLoss()
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.70)
    # exp_lr_scheduler = lr_scheduler.CosineAnnealingLR(optimizer,T_max=3,eta_min=0,last_epoch=-1)
    lf = lambda x, y=opt.endepoch: (((1 + math.cos(x * math.pi / y)) / 2) ** 1.0) * 0.8 + 0.2
    # lf = lambda x, y=opt.epochs: (1.0 - (x / y)) * 0.9 + 0.1
    # exp_lr_scheduler = CosineAnnealingWarmbootingLR(optimizer, epochs=opt.endepoch, steps=opt.cawb_steps, step_scale=0.7,
    #                                          lf=lf, batchs=len(dataloader['train'])/batch_size, warmup_epoch=3,epoch_scale=4.0)




    final_path = train(model,
          start_epoch=0,
          epoch_num=opt.endepoch,
          optimizer=optimizer,
          criterion=criterion,
          exp_lr_scheduler=exp_lr_scheduler,
          data_set=data_set,
          data_loader=dataloader,
          save_dir=save_dir,
          print_inter=print_eval_step, # print_eval_step,
          val_inter=print_eval_step)

    return final_path



if __name__ == '__main__':
    import optparse

    parser = optparse.OptionParser()
    parser.add_option(
        '-t', '--tag',
        help="option a tag to run",
        type='str', default='1')


    opts, args = parser.parse_args()

    tag = opts.tag
    print(tag, '*'*100)