from datasets import CustomDataset, TestDataset
# from tlhengine.utils import random_split
import lightning as L
from engine import Model
from torch.utils.data.dataloader import DataLoader
from lightning.pytorch.loggers import TensorBoardLogger
from lightning.pytorch.callbacks import EarlyStopping, ModelCheckpoint
from utils import get_hparms
from torchvision import transforms
from torch.utils.data import ConcatDataset
import torch

import argparse
parser = argparse.ArgumentParser()

parser.add_argument('--weights', type=str, 
                    default='/root/code/tianchi/log/noname/version_0/checkpoints/last.ckpt')
args = parser.parse_args()

# implement a dict class that can use dot to access its value
# and can be prompted by vscode
class Hparams(dict):
    def __getattr__(self, attr):
        return self[attr]

    def __setattr__(self, attr, value):
        self[attr] = value
    
# hparams = Hparams(
#     epochs = 100,
#     lr = 1e-3,
#     batch_size = 32,
# )

hparams = Hparams(
    batch_size = 32, 
    epochs = 100,
)

tb_logger = TensorBoardLogger(save_dir='log', name='noname', )

loggers = [tb_logger]
early_stop = EarlyStopping(monitor='val_acc/all', mode='max', 
                           patience=30, min_delta=1e-3, strict=True)

ckpt_callback = ModelCheckpoint(
    save_last=True, save_top_k=1, filename='best',monitor='val_acc/all', mode='max',
)


trainer = L.Trainer( max_epochs=hparams.epochs, logger=False)

####### DATA #########
train_preprocessing = transforms.Compose([
            transforms.TrivialAugmentWide(),
            transforms.RandomResizedCrop((176, 176)),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            transforms.PILToTensor(),
            transforms.ConvertImageDtype(torch.float32),
            transforms.Normalize( mean = [0.485, 0.456, 0.406] , std = [0.229, 0.224, 0.225]),
            transforms.RandomErasing(p=0.1),
        ])

val_preprocessing = transforms.Compose([
            transforms.Resize((224, 224)),
            # transforms.RandomHorizontalFlip(),
            # transforms.RandomVerticalFlip(),
            transforms.ToTensor(),
            transforms.Normalize( mean = [0.485, 0.456, 0.406] , std = [0.229, 0.224, 0.225])
        ])
train_set = CustomDataset('data/train', train_preprocessing)
# # train_set, val_set = random_split(train_set, [0.8, 0.2])
# val_set = CustomDataset('data/crawl', val_preprocessing)
# crawl_train_set = CustomDataset('data/train', train_preprocessing, 'data/crawl/train.txt')
# org_train_set = CustomDataset('data/train', train_preprocessing, )
# train_set = ConcatDataset([ org_train_set, crawl_train_set])
print(f"concated trainset length: {len(train_set)}")
# val_set = CustomDataset('data/crawl', val_preprocessing, 'data/crawl/val.txt')
val_set = CustomDataset('data/crawl', val_preprocessing, )



train_loader = DataLoader(train_set, batch_size=hparams.batch_size, drop_last=False, shuffle=True, num_workers=4)
val_loader = DataLoader(val_set, batch_size=hparams.batch_size, num_workers=4)
hparams.train_size = len(train_set)
hparams.val_size = len(val_set)
model = Model.load_from_checkpoint(args.weights)
# trainer.fit(model, train_dataloaders=train_loader, val_dataloaders=val_loader)
trainer.validate(model, dataloaders=val_loader)