import os

from torch.utils.data import DataLoader 
from classifier import Classifier
from classifierv10 import Classifierv10
from models.ddmimv5 import DDMIMV5
from models.ddmim import DDMIM
from models.ddmimv2 import DDMIMV2
from models.ddmimv4 import DDMIMV4
from models.ddmimv6 import DDMIMV6
from models.ddmimv9 import DDMIMV9
import pytorch_lightning as pl 
import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from pytorch_lightning.callbacks import ModelCheckpoint
from pl_bolts.datamodules import ImagenetDataModule
from config.option_10_percent_data_pretrain import parse_args

from models.simmimforddmimv4 import SimMimForDDMIMV4
from pl_bolts.datasets import UnlabeledImagenet
class TenPercentImagenetDatamodule(ImagenetDataModule):
    
    def train_dataloader(self) -> DataLoader:
        
        transforms = self.train_transform() if self.train_transforms is None else self.train_transforms

        dataset = UnlabeledImagenet(
            self.data_dir,
            num_imgs_per_class=150,
            num_imgs_per_class_val_split=self.num_imgs_per_val_class,
            meta_dir=self.meta_dir,
            split="train",
            transform=transforms,
        )
        loader: DataLoader = DataLoader(
            dataset,
            batch_size=self.batch_size,
            shuffle=self.shuffle,
            num_workers=self.num_workers,
            drop_last=self.drop_last,
            pin_memory=self.pin_memory,
        )
        return loader
        # return super().train_dataloader()
    pass




def main(args):

    pl.seed_everything(args.seed)
    

    '''
    model=DDMIM(
        image_size=224,
        patch_size=32,
        dim=1024,
        stages_depth=[1,1,3,1],
        heads=8,
        mlp_dim=2048,
        channels=3,
        dim_head=128,
        masking_ratio=0.5,
        stagefactor=[1e-4,1e-3,0.01,0.1],
        lr=args.lr,
        momentum=args.momentum,
        weight_decay=args.weight_decay,
        )
    
    model=DDMIMV2(
        image_size=224,
        patch_size=16,
        tsfm_dim=768,
        stages_depth=[2,2,6,2],
        heads=12,
        mlp_dim=3096,
        channels=3,
        dim_head=64,
        masking_ratio=0.75,
        stagefactor=[1e-3,1e-2,0.1,1],
        lr=args.lr,
        momentum=args.momentum,
        weight_decay=args.weight_decay,
        tau=0.3,
        alpha=1
        )
    '''
    # model=SimMimForDDMIMV4(
    #     masking_ratio=0.75
    # )
    # model=DDMIMV4(
    #     # stagefactor=[0.,0.,0.,0.]
    #     # alpha=0,
    #     lr=args.lr,
    # )
    
    # model=Classifier(
    #     model=DDMIMV6,
    #     resume=args.resume,
    #     )
    # model=Classifier.load_from_checkpoint("ddmim/log/seed1/version_260/checkpoints/last.ckpt")
    # model=Classifierv10(
    #     resume=args.resume,
    #     lr=5e-3,
    # )
 
    model=Classifierv10(
        # model=SimMimForDDMIMV4,
        # resume="ddmim/log/seed1/version_234/checkpoints/epoch=7-step=294999.ckpt",
        model=DDMIMV9,
        resume="ddmim/log/seed3407/version_31/checkpoints/epoch=48_val_loss=0.04_DDMIMV9.ckpt",
        # model=DDMIMV4,
        # resume="ddmim/log/seed1/version_224/checkpoints/epoch=19-step=784999.ckpt",
        # model=swint,
        # resume="ddmim/log/seed1/version_199/checkpoints/epoch=35-step=719999.ckpt",
        # model=DDMIMV6,
        # resume="ddmim/log/seed1/version_254/checkpoints/epoch=41-step=844999.ckpt",
        # model=DDMIMV5,
        # resume="ddmim/log/seed1/version_243/checkpoints/epoch=21-step=434999.ckpt",
        # model=DDMIMV8,
        # resume="ddmim/log/seed3407/version_13/checkpoints/epoch=22_val_loss=0.06_DDMIMV8.ckpt",
        # model=DDMIMV7_alter,
        # resume="ddmim/log/seed3407/version_4/checkpoints/DDMIMV7_epoch=51_val_loss=2.06.ckpt",
        # model=DDMIMV4_alter,
        # resume="ddmim/log/seed1/version_221/checkpoints/last.ckpt",
        lr=1e-3,
        # num_classes=10,
        )#.load_from_checkpoint("ddmim/log/seed3407/version_48/checkpoints/epoch=241_step=151249_val_loss=0.72_val_acc1=74.49.ckpt")
 
 
    
    datamodule=TenPercentImagenetDatamodule(args.data,num_workers=8,batch_size=args.batch_size)
    
    os.makedirs(args.log_dir,exist_ok=True)
    
    checkpoint_callback = ModelCheckpoint(
        # save_top_k=1,
        verbose=True,
        monitor="val_loss",
        mode="min",
        save_last=True,
        # every_n_train_steps=5000,
        filename='{epoch}_{step}_{val_loss:.2f}_{val_acc1:.2f} '+model._get_name()+' 10%data'
    )
    lr_callback = pl.callbacks.LearningRateMonitor(logging_interval="step")
    callbacks=[checkpoint_callback,lr_callback]

    logger=pl.loggers.TensorBoardLogger(
        args.log_dir,
        name=f'seed{args.seed}',
    )
    
    trainer=pl.Trainer(
        # resume_from_checkpoint=args.lpresume,
        #stagefactor=[1e-3,1e-2,0.1,1],
        gpus=args.gpu,
        # gpus=0,
        #amp_backend='apex',
        #precision=16,
        #auto_scale_batch_size=True,
        max_epochs=args.epochs if args.epochs else 1000,
        #max_steps=None,
        callbacks=callbacks,
        logger=logger,
        #log_every_n_steps=10,
        # limit_train_batches=100, 
        # check_val_every_n_epoch=5,
        # limit_train_batches=0.1, 
        # limit_val_batches=20,
        
    )
    
    # if not args.evaluate:
    trainer.fit(model=model,datamodule=datamodule)
    # else:
    #     trainer.test(model=model,datamodule=datamodule)
    



if __name__ == '__main__':
    
    args= parse_args()
 
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    # print(args.gpu)
    main(args)