from datasets import CustomDataset, TestDataset
# from tlhengine.utils import random_split
import lightning as L
from engine import Model
from torch.utils.data.dataloader import DataLoader
from lightning.pytorch.loggers import TensorBoardLogger
from lightning.pytorch.callbacks import EarlyStopping, ModelCheckpoint
from utils import get_hparms, EMA
from torchvision import transforms
from torch.utils.data import ConcatDataset
import torch
import tlhengine.utils as tutils

# implement a dict class that can use dot to access its value
# and can be prompted by vscode
class Hparams(dict):
    def __getattr__(self, attr):
        return self[attr]

    def __setattr__(self, attr, value):
        self[attr] = value
from tlhengine.utils import Hparams


hparams = Hparams(
    arch = 'dino_small_patch16' ,
    no_pretrain = True,
    weight_path = '/root/code/tianchi/weights/best.pth',
    deploy = 'vanilla',
    batch_size = 16, 
    epochs = 300,
    ta = True,
    frozen=True,
    ramdom_erase_rate = 0.5,
    lr = 1e-3,
    weight_decay = 1e-2,
    optimizer = 'AdamW',
    
)

tb_logger = TensorBoardLogger(save_dir='log', name='noname', )

loggers = [tb_logger]
monitor = 'val_acc/all'
early_stop = EarlyStopping(monitor=monitor, mode='max', verbose=True,
                           patience=30, min_delta=1e-3, strict=True)

ckpt_callback = ModelCheckpoint(
    save_last=True, save_top_k=1, filename='best_{val_acc/all}',
    auto_insert_metric_name=False,  monitor=monitor, mode='max',
)

ema_callback = EMA(0.99, )
trainer = L.Trainer( devices=1, max_epochs=hparams.epochs, precision='16-mixed',
                    logger=loggers, callbacks=[early_stop, ckpt_callback])

####### DATA #########
train_preprocessing = []
if hparams.ta:
    train_preprocessing.append(transforms.TrivialAugmentWide())
train_preprocessing.extend([
    transforms.RandomResizedCrop((176, 176)),
    # transforms.Resize((176, 176)),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            transforms.PILToTensor(),
            transforms.ConvertImageDtype(torch.float32),
            transforms.Normalize( mean = [0.485, 0.456, 0.406] , std = [0.229, 0.224, 0.225]),
])
if hparams.ramdom_erase_rate > 0:
    train_preprocessing.append(transforms.RandomErasing(p=hparams.ramdom_erase_rate))
train_preprocessing = transforms.Compose(train_preprocessing)


val_preprocessing = transforms.Compose([
            transforms.Resize((224, 224)),
            # transforms.RandomHorizontalFlip(),
            # transforms.RandomVerticalFlip(),
            transforms.PILToTensor(),
            transforms.ConvertImageDtype(torch.float32),
            transforms.Normalize( mean = [0.485, 0.456, 0.406] , std = [0.229, 0.224, 0.225])
        ])

# use only few data to train
train_set = CustomDataset('data/train', train_preprocessing)
# train_set, val_set = random_split(train_set, [0.8, 0.2])
val_set = CustomDataset('data/crawl', val_preprocessing, 'data/crawl/val.txt')

# USE MORE DATA to train
# crawl_train_set = CustomDataset('data/', train_preprocessing, 'data/crawl/train.txt')
# org_train_set = CustomDataset('data/train', train_preprocessing, )
# train_set = ConcatDataset([ org_train_set, crawl_train_set])
# print(f"concated trainset length: {len(train_set)}")
# val_set = CustomDataset('data/crawl', val_preprocessing, 'data/crawl/val.txt')



train_loader = DataLoader(train_set, batch_size=hparams.batch_size, drop_last=False, shuffle=True, num_workers=4)
val_loader = DataLoader(val_set, batch_size=hparams.batch_size, num_workers=4)
hparams.train_size = len(train_set)
hparams.val_size = len(val_set)
model = Model(**hparams)
trainer.fit(model, train_dataloaders=train_loader, val_dataloaders=val_loader)           
                                      