#coding=utf-8
import os,glob
import pandas as pd
from sklearn.model_selection import train_test_split
from dataset.dataset import collate_fn, dataset,dataset_train
import torch
import torch.utils.data as torchdata
from torchvision import datasets, models, transforms
from torchvision.models import resnet50,resnet101
import torch.optim as optim
from torch.optim import lr_scheduler
from utils.train_util import train, trainlog
from torch.nn import CrossEntropyLoss
import logging
from models.resnet import resnet50
from models.resnet import resnet18
from models.resnet import resnet34
from models.MobileNet import MobileNet
from dataset.data_aug import *
os.environ["CUDA_VISIBLE_DEVICES"] = "2"

save_dir = './save_models'
if not os.path.exists(save_dir):
    os.makedirs(save_dir)
logfile = '%s/trainlog.log'%save_dir
trainlog(logfile)


rawdata_root = '/data1/sheng/1116/04_geli_switch/4'
# rawdata_root = '/data1/sheng/temp/1211/2_2fenlei'
# rawdata_root = '/data1/sheng/1116/04_geli_switch/train_dataset'
img_list =glob.glob(rawdata_root+"/*/*.jpg")
print(len(img_list))
all_pd=pd.DataFrame(img_list,columns=["image_path"])
all_pd["label"]=all_pd["image_path"].apply(lambda x:int(x.split("/")[-2]))
train_pd, val_pd = train_test_split(all_pd, test_size=0.1, random_state=43,
                                   stratify=all_pd['label'])
# print(type(train_pd))

# for i, ii in enumerate(zip(train_pd['image_path'], train_pd['label'])):
# for i, ii in enumerate(train_pd['label']==1):
#     print(i, ii)
# exit(0)

# pd_9 =train_pd[train_pd["label"]==0]
# pd_10 =train_pd[train_pd["label"]==1]
# pd_11 =train_pd[train_pd["label"]==2]
# train_new_pd=train_pd.append(pd_9).sample(frac=1)
# train_new_pd=train_new_pd.append(pd_11).sample(frac=1)
# train_new_pd=train_new_pd.append(pd_11).sample(frac=1)
# train_new_pd=train_new_pd.append(pd_11).sample(frac=1)
# train_new_pd=train_new_pd.append(pd_11).sample(frac=1)
# train_new_pd=train_new_pd.append(pd_11).sample(frac=1)


print(train_pd.shape)
print(val_pd.shape)
train_size = train_pd.shape[0]
print_eval_step = (train_size // 32) + 1

'''数据扩增'''
data_transforms = {
    'train': Compose([
        RandomRotate(angles=(-5,5)),
        ExpandBorder(size=(272, 272),resize=True),
        # RandomHflip(),
        # RandomResizedCrop(size=(272, 272)),
        Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ]),
    'val': Compose([
        ExpandBorder(size=(272, 272),resize=True),
        # RandomHflip(),
        Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
}

data_set = {}
data_set['train'] = dataset_train(anno_pd=train_pd, transforms=data_transforms["train"])
data_set['val'] = dataset_train(anno_pd=val_pd, transforms=data_transforms["val"])
dataloader = {}
dataloader['train']=torch.utils.data.DataLoader(data_set['train'], batch_size=32,
                                               shuffle=True, num_workers=4,collate_fn=collate_fn)
dataloader['val']=torch.utils.data.DataLoader(data_set['val'], batch_size=32,
                                               shuffle=True, num_workers=4,collate_fn=collate_fn)
'''model'''
model = resnet50(pretrained=True)
# model = resnet34(pretrained=True)
# model =MobileNet(num_classes=3, alpha=0.5)
model.avgpool = torch.nn.AdaptiveAvgPool2d(output_size=1)   # resnet need
model.fc = torch.nn.Linear(model.fc.in_features, 3)  # todo org :3

base_lr = 0.01
# resume = r'./save_models/first.pth'
resume = None
if resume:
    logging.info('resuming finetune from %s'% resume)
    model.load_state_dict(torch.load(resume))
model = model.cuda()

optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=1e-5)
# optimizer = optim.Adam(model.parameters(), lr=base_lr, betas=(0.9, 0.999))
criterion = CrossEntropyLoss()
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.95)   # resnet50: 2, 0.5

a = train(model,
      epoch_num=30,
      start_epoch=0,
      optimizer=optimizer,
      criterion=criterion,
      exp_lr_scheduler=exp_lr_scheduler,
      data_set=data_set,
      data_loader=dataloader,
      save_dir=save_dir,
      print_inter=print_eval_step,
      val_inter=print_eval_step)
print(a, ' over ~_~ ')
