import os
import torch
import json
from torch import nn
import numpy as np
import pandas as pd
from torcheval.metrics.functional import binary_auroc, multiclass_auroc
from sklearn.model_selection import StratifiedGroupKFold
from sklearn.model_selection import train_test_split
import torch.optim as optim

from utils import set_seed, print_trainable_parameters
from models import setup_model
from training import fetch_scheduler, valid_one_epoch, run_training
from models import ISICModel, ISICModelEdgnet, setup_model
from datasets import prepare_loaders, select_data
from augmentations import get_augmentations

def criterion(outputs, targets):
    return nn.BCELoss()(outputs, targets)

device = torch.device("cuda:7")
print(f"Using device: {device}")
if torch.cuda.is_available():
    print(f"GPU: {torch.cuda.get_device_name(0)}")
    print(f"Number of GPUs: {torch.cuda.device_count()}")

# MODEL_NAME = "EVA" 
MODEL_NAME = "EDGENEXT"
CONFIG = {
    "seed": 42,
    "epochs": 50,
    "img_size": 336 if MODEL_NAME == 'EVA' else 256,
    "train_batch_size": 32,
    "valid_batch_size": 64,
    "num_workers": 16,
    "learning_rate": 1e-4,
    "scheduler": 'CosineAnnealingLR',
    "min_lr": 1e-6,
    "T_max": 2000,
    "weight_decay": 1e-6,
    "device": device,
    "save_period": 5,
    "tasks": [2020], #[2018, 2019, 2020, 2024],
    "resume": None,
    # 'balance_type': False,
    # "data_per_task": 20000,
}
save_path = "../models/CNN_2020_norepeat"
train_data_path = '../data/data_train.csv'
# train_data_path = '../data/data_train_repeated.csv'
test_data_path = '../data/data_test.csv'
drop_path_rate=0
drop_rate=0
set_seed(CONFIG['seed'])

if MODEL_NAME == 'EVA':
    ISICModelPrep = ISICModel
    model_name = "eva02_small_patch14_336.mim_in22k_ft_in1k"
    model_path = '/home/lihao/jlk/emoji/isic-2024/models/eva02_small_patch14_336/pytorch_model.bin'

else:
    ISICModelPrep = ISICModelEdgnet
    model_name = "edgenext_base.in21k_ft_in1k"
    model_path = '/home/lihao/jlk/emoji/isic-2024/models/edgenext_base/pytorch_model.bin'
    
if not os.path.exists(model_path):
    #模型下载
    from modelscope import snapshot_download
    model_dir = snapshot_download('timm/eva02_small_patch14_336.mim_in22k_ft_in1k', cache_dir='.cache/modelscope/hub')

df_train = pd.read_csv(train_data_path)
df_test = pd.read_csv(test_data_path)
df_train = df_train.loc[df_train['task'].isin(CONFIG['tasks'])]
df_test = df_test.loc[df_test['task'].isin(CONFIG['tasks'])]

data_transforms = get_augmentations(CONFIG)

models_folder = save_path
os.makedirs(models_folder, exist_ok=True)

model = setup_model(model_name, model_path, drop_path_rate=drop_path_rate, drop_rate=drop_rate, model_maker=ISICModelPrep,
                    device=device)
print_trainable_parameters(model)

train_loader, valid_loader = prepare_loaders(df_train, df_test, CONFIG, data_transforms)

optimizer = optim.Adam(model.parameters(), lr=CONFIG['learning_rate'], 
                    weight_decay=CONFIG['weight_decay'])
scheduler = fetch_scheduler(optimizer, CONFIG)

if CONFIG['resume'] is not None:
    checkpoint = torch.load(CONFIG['resume'])
    model.load_state_dict(checkpoint['model'])
    optimizer.load_state_dict(checkpoint['optimizer'])
    scheduler.load_state_dict(checkpoint['scheduler'])
    start_epoch = checkpoint['epoch'] + 1
else:
    start_epoch = 1

model, history = run_training(
    train_loader, valid_loader,
    model, optimizer, scheduler,
    CONFIG=CONFIG, 
    model_folder=models_folder,
    tolerance_max=20,
    start_epoch=start_epoch)
torch.save(model.state_dict(), os.path.join(models_folder, "model"))
with open(os.path.join(models_folder, "result.json"), 'w', encoding='utf-8') as file:
    json.dump(history, file, ensure_ascii=False, indent=4)

val_epoch_loss, val_epoch_auroc = valid_one_epoch(model, valid_loader, device, 1, optimizer, CONFIG)
print('Final Result:', val_epoch_auroc)
