from process_data import get_cut_data
from torch.optim.lr_scheduler import StepLR,CosineAnnealingLR
from warmup_scheduler import GradualWarmupScheduler
from torch.utils.data import DataLoader,Dataset
import albumentations as albu
from data_aug import random_rotate_and_crop
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import time
import cv2
import torchvision
import numpy as np
from process_data import dicom2array
loss_fun = nn.CrossEntropyLoss()


Model_Save_Patch = '../model_save/zhuiti/res34_0719'
Record_File_Dir = './record/zhuiti/res34_0719.txt'
Image_Size = (128,128)
Batch_Size = 16
lr = 1e-4
num_epoch = 300
accumlation_step = 8
Num_Class = 2
train_csv_dir = '../data/csv_label/train150_anno.csv'
val_csv_dir= '../data/csv_label/train51_anno.csv'
os.makedirs(Model_Save_Patch,exist_ok=True)
os.makedirs(Record_File_Dir.replace(Record_File_Dir.split('/')[-1],''),exist_ok=True)


def aug_data(image):
    image = albu.Compose([
        albu.CoarseDropout(max_holes=4,max_width=4,max_height=4),
        albu.HorizontalFlip(),
        albu.VerticalFlip(),
        albu.Downscale(scale_max=0.9,scale_min=0.75),
        albu.GaussNoise(),
        albu.GaussianBlur(),
        albu.MedianBlur(),
        albu.RandomBrightnessContrast(p=0.8),
        albu.ElasticTransform(p=0.3),
    ])(image = image)['image']
    return image


class Mydataset(Dataset):
    def __init__(self,cut_image_list):
        self.cut_image_list = cut_image_list

    def __getitem__(self, index):
        image = self.cut_image_list[index][0]
        label = self.cut_image_list[index][1]
        label = torch.tensor(label)
        image = aug_data(image)
        image = random_rotate_and_crop(image,crop_limit=5,angle_limit=30,p = 0.8)
        image = torch.from_numpy(np.transpose(image, (2, 0, 1))).float()
        return image,label

    def __len__(self):
        return len(self.cut_image_list)

def get_model():
    model = torchvision.models.resnet34(pretrained=False,num_classes = Num_Class)
    return model.cuda()

def pred_fun(model,image,size=None):
    with torch.no_grad():
        model.eval()
        if size is not None:
            image = cv2.resize(image,size)
        image = torch.from_numpy(np.transpose(image, (2, 0, 1))).float()[np.newaxis, ...].cuda()
        pred = model(image)
        pred = torch.argmax(F.log_softmax(pred[0], dim=0), dim=0)
        pred = pred.item()
        return pred


def val_fun(model,cut_image_list):
    result = []
    with torch.no_grad():
        model.eval()
        for index in range(len(cut_image_list)):
            image = cut_image_list[index][0]
            label = cut_image_list[index][1]
            if label == 1:
                continue
            pred = pred_fun(model,image)
            # print(pred)
            result.append(pred == label)
    return np.mean(result)

def get_zhuiti_category(image_dir,point,spacing,x_range = 33,y_range = 26):
    ## 传入T2矢状位用来预测关键点的图像和关键点信息，直接获取关键点对应椎体的类别。
    image_dir = image_dir.replace('\\', '/')
    if 'dcm' in image_dir:
        image = dicom2array(image_dir)
        image = np.stack([image, image, image], -1)
    else:
        image = cv2.imread(image_dir)
    model = get_model()
    model.load_state_dict(torch.load(Model_Save_Patch+'/new_.pth'))
    x_range_l2 = x_range // spacing
    y_range_l2 = y_range // spacing
    cut_image = image[int(point[1]-y_range_l2):int(point[1]+y_range_l2),int(point[0]-x_range_l2):int(point[0]+x_range_l2)]
    print(cut_image.shape,point)
    try:
        pred = pred_fun(model,cut_image,Image_Size)
    except Exception:
        pred = 1
    # print(pred)
    if pred == 1:
        return 'v2'
    else:
        return 'v1'


def train(model):
    cut_image_list_train = get_cut_data(csv_file=train_csv_dir,size = Image_Size)
    cut_image_list_val = get_cut_data(csv_file=val_csv_dir, size=Image_Size)
    best_result = 0
    if not os.path.exists(Model_Save_Patch):
        os.makedirs(Model_Save_Patch)
    My_dataSet = Mydataset(cut_image_list_train)
    train_data_loader = DataLoader(dataset=My_dataSet, batch_size=Batch_Size, shuffle=True,
                                   num_workers=0, pin_memory=True)

    optimizer = torch.optim.Adam(model.parameters(),lr = lr, weight_decay= 2e-5)
    # optimizer = torch.optim.SGD(model.parameters(),lr = lr,momentum=0.9,weight_decay=2e-5)
    scheduler = CosineAnnealingLR(optimizer,T_max=num_epoch,)
    scheduler = GradualWarmupScheduler(optimizer, multiplier=10, total_epoch=20, after_scheduler=scheduler)
    total_step = len(train_data_loader)



    for epoch in range(num_epoch):
        model.train()
        start_time  = time.time()
        #注意必须每次训练都设置model.train()，因为在val时会设置model.eval(),
        #需要再次进行转换。

        print('epoch{}'.format(epoch+1))
        scheduler.step()
        train_loss = 0
        for step,(batch_image,batch_label) in enumerate(train_data_loader):
            # pass
            if torch.cuda.is_available():
                batch_image = batch_image.float().cuda()
                batch_label = batch_label.float().long().cuda()
            # ### 使用pytorch中的函数进行随机缩放
            # batch_image, batch_label = tensor_resize(batch_image,batch_label,train_size=Image_Size)
            # print('batch_image',batch_image.shape,'batch_label',batch_label.shape)
            pred = model(batch_image)
            # print(pred.shape,batch_label.shape)
            loss = loss_fun(pred,batch_label)
            # loss = loss_fun(pred[0][0],batch_label)+loss_fun(pred[0][1],batch_label)+\
            #        loss_fun(pred[0][2],batch_label)+loss_fun(pred[0][3],batch_label)+4*loss_fun(pred[1],batch_label)
            loss.backward()
            if (step % accumlation_step) == 0:
                optimizer.step()
                optimizer.zero_grad()
            if step % 20 ==0 and epoch <3:
                print('Step [{}/{}],Loss:{:.7f}'.format(step + 1, total_step, loss))
            train_loss += loss.item()

        train_loss = train_loss/total_step
        print('Epoch [{}/{}],Loss:{:.7f}'.format(epoch+1,num_epoch,train_loss))

        # checkpoint = {
        #     'epoch':epoch+1,
        #     'model':model.state_dict(),
        #     'optimizer':optimizer.state_dict(),
        # }

        torch.cuda.empty_cache()
        record_file = open(Record_File_Dir,'a')
        val_Acc = val_fun(model,cut_image_list_val)
        train_Acc = val_fun(model,cut_image_list_train)
        print('--train_loss--',train_loss,'--val_Acc--',val_Acc,'--train_Acc--',train_Acc)
        record_file.write(
            'epoch--' + str(epoch + 1) + '--train_loss--' + str(train_loss) + '--val_ACC--' + str(val_Acc))
        record_file.write('\n')
        record_file.write(
            'epoch--' + str(epoch + 1) + '--train_ACC--' + str(train_Acc))
        record_file.write('\n')
        record_file.close()

        torch.cuda.empty_cache()

        torch.save(model.state_dict(), (Model_Save_Patch + '/' + 'new_' + '.pth'))
        if val_Acc > best_result:
            print('get better model with acc of ',val_Acc)
            torch.save(model.state_dict(), (Model_Save_Patch + '/' + 'best_' + '.pth'))
            best_result = val_Acc
        print('---------------------------------------------')

        if epoch < 3:
            print('use time -------',time.time()-start_time)
        torch.save(model.state_dict(),Model_Save_Patch+'/'+str(num_epoch+1)+'.pth')
        torch.cuda.empty_cache()


if __name__ == '__main__':
    '''
    train the model
    '''
    model = get_model()
    # train(model)

    '''
    val the model
    '''
    cut_image_list_train = get_cut_data(csv_file=train_csv_dir, size=Image_Size)
    cut_image_list_val = get_cut_data(csv_file=val_csv_dir, size=Image_Size)
    model = get_model()
    model.load_state_dict(torch.load(Model_Save_Patch+'/new_.pth'))
    acc = val_fun(model,cut_image_list_val)
    print(acc)