# -*- coding: utf-8 -*-

import torch
import torch.nn as nn

import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.tensorboard import SummaryWriter
import torchvision.transforms as transforms
from utils import EarlyStopping
from bone_data.DataLoad import BoneDataSet
from model.BoneageModel import BoneAgeNet
from torch.utils.data import Dataset, DataLoader
import os
import datetime
import matplotlib.pyplot as plt
import math
from tqdm import tqdm
import torch
import pandas as pd
import numpy as np
import cv2
from PIL import Image

from multiprocessing.spawn import freeze_support
import datetime
import numpy as np
import ttach as tta

# For reproducibility use the seeds below
torch.manual_seed(1498920)
torch.cuda.manual_seed(1498920)
torch.backends.cudnn.deterministic=True

# Hyperparameters Setting 
save_path = './checkpoint/'
data_root = '/mnt/datawow/lyl/images/bone/RSNA-BoneAge'
img_size = 500
batch_size = 4
test_img_path = os.path.join(data_root, 'boneage-test-dataset/boneage-test-dataset/')
#test_csv_path = os.path.join(data_root, 'boneage-test-dataset.csv')
test_csv_path = os.path.join(data_root, 'boneage-test-GT.csv')

# Augmentation List
# transforms.RandomAffine(0, translate=(0.1, 0.1)), # tlanslation <= 20
aug_list=[transforms.RandomRotation(20)] # rotate <=20%
aug_list2 = [transforms.Compose([transforms.Pad(50), transforms.Resize((img_size, img_size))])]

# Transform Setting
train_composed = transforms.Compose([transforms.RandomApply(aug_list),transforms.Resize((img_size,img_size)),transforms.RandomApply(aug_list2),transforms.ToTensor()])
validation_composed = transforms.Compose([transforms.Resize((img_size,img_size)),transforms.ToTensor()])

df_test = pd.read_csv(test_csv_path)
df_test['male'] = df_test['male'].astype(np.int32)
testset = BoneDataSet(test_img_path, df_test, validation_composed)
test_data_loader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=4)


# data load
model = BoneAgeNet()



# loss, optimizer, scheduler
criterion = nn.L1Loss()

#%%


def eval(model, test_loader):
    model.eval()

    result_array = np.array([])
    test_loss = 0.0

    with torch.no_grad():
        
        for batch_no, batch in enumerate(test_loader):
            
            # Load batch
            img = batch['image'].to(device)
            
            gender = batch['gender'].to(device)
            age = batch['bone_age'].to(device)
            
            avg_pred = []
            
            transforms = tta.Compose([tta.FiveCrops(crop_height=470, crop_width=470)])

            for transformer in transforms:
                # Forward propagation (순전파)
                augmented_image = transformer.augment_image(img)
                print("input shape:", augmented_image.shape)
                output = model(augmented_image, gender)
                print("output",output)
                loss = criterion(output, age)

                preds = output.cpu().numpy()
                preds = preds.reshape(preds.shape[0])
                avg_pred.append(preds)

            preds = sum(avg_pred,0.0)/len(avg_pred)
            result_array = np.concatenate((result_array,preds))

            test_loss += loss.item()

            if (batch_no + 1) % 25 == 0: print('{}/200, batch loss: {}'.format(4*(batch_no+1), loss.item())) # 100장마다 출력
    return result_array, test_loss / 50

#%%
if __name__ == '__main__':
    freeze_support()
   # device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    device = torch.device('cpu')
    ckp_name = 'epoch-320-loss-3.2523-val_loss-16.2333.pt'
    #ckp_name = 'epoch-155-loss-3.5556-val_loss-12.3412.pt'
    checkpoint = torch.load(save_path+ckp_name,map_location=device)
    model.load_state_dict(checkpoint['model_state_dict'])
    model.to(device)

    print('{}\n==================={}===========test start==============================\n'.format(datetime.datetime.now(),ckp_name))
    
    result_array, test_loss = eval(model, test_data_loader)
    predict_df = df_test.copy()
    predict_df['output'] = result_array
    predict_df['output'] = np.round(predict_df['output'], decimals=2)
    predict_df['MAE'] = np.abs(predict_df['output']-predict_df['boneage'])

    print('MAE={}'.format(predict_df['MAE'].mean()))

    predict_df.to_csv('predict.csv', sep=',', na_rep='NaN')
