from torch.optim.lr_scheduler import StepLR,CosineAnnealingLR
from warmup_scheduler import GradualWarmupScheduler
from sklearn.metrics import f1_score
from torch.utils.data import DataLoader,Dataset
import albumentations as albu
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import time
import random
import cv2
import torchvision
import numpy as np
import pandas as pd
from scipy import ndimage

loss_fun = nn.CrossEntropyLoss()


Model_Save_Patch = '../model_save/jizhu_category/res18_baseline'
Record_File_Dir = './record/jizhu_category/res18_baseline.txt'
Image_Size = (1024,512)
Batch_Size = 8
lr = 1e-4
num_epoch = 50
accumlation_step = 4
Num_Class = 2
os.makedirs(Model_Save_Patch,exist_ok=True)
os.makedirs(Record_File_Dir.replace(Record_File_Dir.split('/')[-1],''),exist_ok=True)


def random_rotate_and_crop(image,heatmap=None,crop_limit=20,p = 1.,angle_limit = 15):
    if np.random.uniform() < p:
        ## 输入的是image和heatmap。注意旋转后因为避免损失图片信息，尺寸会变大。
        ## 再通过随机裁剪操作还原成原来的尺寸
        ## 旋转
        ori_size = image.shape
        angle = np.random.randint(0-angle_limit,angle_limit)
        image  = ndimage.rotate(image,angle=angle,axes=(1,0))

        ## 旋转后尺寸必定变大，进行裁剪。为了保持在得到box后裁剪进行预测的尺寸和训练的尺寸一致，则
        ## 根据图片尺寸转换后进行裁剪操作。
        random_cut_x_1 = np.random.randint(1,crop_limit)
        random_cut_x_2 = np.random.randint(1,crop_limit)
        random_cut_y_1 = np.random.randint(1,crop_limit)
        random_cut_y_2 = np.random.randint(1,crop_limit)

        image = image[random_cut_x_1:-random_cut_x_2,random_cut_y_1:-random_cut_y_2,:]
        new_size = image.shape
        image = ndimage.zoom(image, (ori_size[0] / new_size[0], ori_size[1] / new_size[1], 1), order=2)
        if heatmap is not None:
            heatmap = ndimage.rotate(heatmap, angle=angle, axes=(2, 1))
            heatmap = heatmap[:, random_cut_x_1:-random_cut_x_2,random_cut_y_1:-random_cut_y_2]
            heatmap = ndimage.zoom(heatmap,(1,ori_size[0]/new_size[0],ori_size[1]/new_size[1]),order=1)
    if heatmap is not None:
        return image,heatmap
    else:
        return image




def aug_data(image):
    image = albu.Compose([
        albu.CoarseDropout(max_holes=10,max_width=10,max_height=10),
        albu.HorizontalFlip(),
        albu.VerticalFlip(),
        albu.Downscale(scale_max=0.9,scale_min=0.5),
        albu.GaussNoise(),
        albu.GaussianBlur(),
        albu.MedianBlur(),
        albu.RandomBrightnessContrast(p=0.9),
        albu.ElasticTransform(p=0.5),
    ])(image = image)['image']
    return image


class Mydataset(Dataset):
    def __init__(self,cut_image_list):
        self.cut_image_list = cut_image_list

    def __getitem__(self, index):
        image = self.cut_image_list[index][0]
        label = self.cut_image_list[index][1]
        label = torch.tensor(label)
        image = aug_data(image)
        image = random_rotate_and_crop(image,crop_limit=5,angle_limit=30,p = 1.)
        image = torch.from_numpy(np.transpose(image, (2, 0, 1))).float()
        return image,label

    def __len__(self):
        return len(self.cut_image_list)

def get_model():
    # model = torchvision.models.resnet34(pretrained=False,num_classes = Num_Class)
    model = torchvision.models.resnet18(pretrained=False,num_classes = Num_Class)

    return model.cuda()

def pred_fun(model,image,size=None):
    with torch.no_grad():
        model.eval()
        if size is not None:
            image = cv2.resize(image,size)
        image = torch.from_numpy(np.transpose(image, (2, 0, 1))).float()[np.newaxis, ...].cuda()
        pred = model(image)
        pred = torch.argmax(F.log_softmax(pred[0], dim=0), dim=0)
        pred = pred.item()
        return pred


def val_fun(model,cut_image_list):
    result = []
    pred_list = []
    label_list = []
    with torch.no_grad():
        model.eval()
        for index in range(len(cut_image_list)):
            image = cut_image_list[index][0]
            label = cut_image_list[index][1]
            # if label == 0:
            #     continue
            pred = pred_fun(model,image)
            # print(pred)
            result.append(pred == label)
            pred_list.append(pred)
            label_list.append(label)
    f1 = f1_score(label_list,pred_list,average=None)
    return np.mean(result),f1

def get_data_list(csv_file_1 = '../data/csv_label/0522/zhengwei/zhengwei_transform_train.csv',
                  csv_file_2 = '../data/csv_label/0522/cewei/cewei_transform_train.csv',
                  image_path = '../data/data/jpg_file_0522',size = Image_Size):
    data_list = []
    zheng_list = pd.read_csv(csv_file_1)['image_path'].values.tolist()
    cewei_list = pd.read_csv(csv_file_2)['image_path'].values.tolist()
    zheng_list = [image_path + '/' + p.split('/')[-1].replace('npz','png') for p in zheng_list]
    cewei_list = [image_path + '/' + p.split('/')[-1].replace('npz','png') for p in cewei_list]
    print('num of zhengwei is ',len(zheng_list),'--num of cewei is ',len(cewei_list))
    for dir in zheng_list:
        image = cv2.imread(dir)
        image = cv2.resize(image,size)
        data_list.append([image,0])
    for dir in cewei_list:
        image = cv2.imread(dir)
        image = cv2.resize(image,size)
        data_list.append([image,1])
    random.shuffle(data_list)
    return data_list
    
    


    
    

def train(model):
    cut_image_list_train = get_data_list(size = Image_Size)
    cut_image_list_val = get_data_list(csv_file_1='../data/csv_label/0522/zhengwei/zhengwei_transform_val.csv',
                                       csv_file_2='../data/csv_label/0522/cewei/cewei_transform_val.csv',
                                       size=Image_Size)
    best_result = 0
    if not os.path.exists(Model_Save_Patch):
        os.makedirs(Model_Save_Patch)
    My_dataSet = Mydataset(cut_image_list_train)
    train_data_loader = DataLoader(dataset=My_dataSet, batch_size=Batch_Size, shuffle=True,
                                   num_workers=0, pin_memory=True)

    optimizer = torch.optim.Adam(model.parameters(),lr = lr, weight_decay= 2e-5)
    # optimizer = torch.optim.SGD(model.parameters(),lr = lr,momentum=0.9,weight_decay=2e-5)
    scheduler = CosineAnnealingLR(optimizer,T_max=num_epoch,)
    scheduler = GradualWarmupScheduler(optimizer, multiplier=3, total_epoch=3, after_scheduler=scheduler)
    total_step = len(train_data_loader)



    for epoch in range(num_epoch):
        model.train()
        start_time  = time.time()
        #注意必须每次训练都设置model.train()，因为在val时会设置model.eval(),
        #需要再次进行转换。

        print('epoch{}'.format(epoch+1))
        scheduler.step()
        train_loss = 0
        for step,(batch_image,batch_label) in enumerate(train_data_loader):
            # pass
            if torch.cuda.is_available():
                batch_image = batch_image.float().cuda()
                batch_label = batch_label.float().long().cuda()
            # ### 使用pytorch中的函数进行随机缩放
            # batch_image, batch_label = tensor_resize(batch_image,batch_label,train_size=Image_Size)
            # print('batch_image',batch_image.shape,'batch_label',batch_label.shape)
            pred = model(batch_image)
            # print(pred.shape,batch_label.shape)
            loss = loss_fun(pred,batch_label)
            # loss = loss_fun(pred[0][0],batch_label)+loss_fun(pred[0][1],batch_label)+\
            #        loss_fun(pred[0][2],batch_label)+loss_fun(pred[0][3],batch_label)+4*loss_fun(pred[1],batch_label)
            loss.backward()
            if (step % accumlation_step) == 0:
                optimizer.step()
                optimizer.zero_grad()
            if step % 20 ==0 and epoch <3:
                print('Step [{}/{}],Loss:{:.7f}'.format(step + 1, total_step, loss))
            train_loss += loss.item()

        train_loss = train_loss/total_step
        print('Epoch [{}/{}],Loss:{:.7f}'.format(epoch+1,num_epoch,train_loss))

        # checkpoint = {
        #     'epoch':epoch+1,
        #     'model':model.state_dict(),
        #     'optimizer':optimizer.state_dict(),
        # }

        torch.cuda.empty_cache()
        record_file = open(Record_File_Dir,'a')
        val_Acc,val_f1 = val_fun(model,cut_image_list_val)
        train_Acc,train_f1 = val_fun(model,cut_image_list_train)
        print('--train_loss--',train_loss,'--val_Acc--',val_Acc,'--train_Acc--',train_Acc,
              '--val_f1--', val_f1, '--train_f1--', train_f1)
        record_file.write(
            'epoch--' + str(epoch + 1) + '--train_loss--' + str(train_loss)
            + '--val_ACC--' + str(val_Acc)+'--val_f1--'+str(val_f1))
        record_file.write('\n')
        record_file.write(
            'epoch--' + str(epoch + 1) + '--train_ACC--' + str(train_Acc)+'--train_f1--'+str(train_f1))
        record_file.write('\n')
        record_file.close()

        torch.cuda.empty_cache()

        torch.save(model.state_dict(), (Model_Save_Patch + '/' + 'new_' + '.pth'))
        if val_Acc > best_result:
            print('get better model with acc of ',val_Acc)
            torch.save(model.state_dict(), (Model_Save_Patch + '/' + 'best_' + '.pth'))
            best_result = val_Acc
        print('---------------------------------------------')

        if epoch < 3:
            print('use time -------',time.time()-start_time)
        torch.save(model.state_dict(),Model_Save_Patch+'/'+str(num_epoch+1)+'.pth')
        torch.cuda.empty_cache()


def predict(image):
    ## 传入image data，返回正侧位的判断结果。正位0,侧位1
    model = get_model()
    model.load_state_dict(torch.load('../model_save/jizhu_category/res18_baseline/18.pth'))
    pred = pred_fun(model,image,Image_Size)
    return pred
    

if __name__ == '__main__':
    # get_data_list()
    '''
    train the model
    '''
    model = get_model()
    train(model)

    '''
    val the model
    '''
    # cut_image_list_train = get_data_list(size=Image_Size)
    # cut_image_list_val = get_data_list(csv_file_1='../data/csv_label/0522/zhengwei/zhengwei_transform_val.csv',
    #                                    csv_file_2='../data/csv_label/0522/cewei/cewei_transform_val.csv',
    #                                    size=Image_Size)
    # model = get_model()
    # model.load_state_dict(torch.load(Model_Save_Patch+'/best_.pth'))
    # acc = val_fun(model,cut_image_list_val)
    # print(acc)
    # acc = val_fun(model, cut_image_list_train)
    # print(acc)