import os 
from classifier import Classifier
from classifierv7 import Classifierv7
from classifierv8 import Classifierv8
from classifierv9 import Classifierv9,LinearClassifierv9
from classifierv10 import Classifierv10
from classifierv_swint import swint,classifier_swint
from models.ddmimv8 import DDMIMV8
from models.ddmimv5 import DDMIMV5
from models.ddmim import DDMIM
from models.ddmimv2 import DDMIMV2
from models.ddmimv4 import DDMIMV4
from models.ddmimv4_alter import DDMIMV4_alter
from models.ddmimv7_alter import DDMIMV7_alter
from models.ddmimv6 import DDMIMV6
from models.ddmimv8 import DDMIMV8
from models.lightning_mae import Lightning_mae
from models.ddmimv9 import DDMIMV9
from models.swinTransformerV2 import SwinTransformerV2
from models.classifier_mae import Classifier_mae
import pytorch_lightning as pl 
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from pl_bolts.transforms.dataset_normalizations import cifar10_normalization
import torchvision.datasets as datasets
from pytorch_lightning.callbacks import ModelCheckpoint
from pl_bolts.datamodules import ImagenetDataModule,CIFAR10DataModule
from config.option_finetune import parse_args

from models.simmimforddmimv4 import SimMimForDDMIMV4






def main(args):

    pl.seed_everything(args.seed)
    

    '''
    model=DDMIM(
        image_size=224,
        patch_size=32,
        dim=1024,
        stages_depth=[1,1,3,1],
        heads=8,
        mlp_dim=2048,
        channels=3,
        dim_head=128,
        masking_ratio=0.5,
        stagefactor=[1e-4,1e-3,0.01,0.1],
        lr=args.lr,
        momentum=args.momentum,
        weight_decay=args.weight_decay,
        )
    
    model=DDMIMV2(
        image_size=224,
        patch_size=16,
        tsfm_dim=768,
        stages_depth=[2,2,6,2],
        heads=12,
        mlp_dim=3096,
        channels=3,
        dim_head=64,
        masking_ratio=0.75,
        stagefactor=[1e-3,1e-2,0.1,1],
        lr=args.lr,
        momentum=args.momentum,
        weight_decay=args.weight_decay,
        tau=0.3,
        alpha=1
        )
    '''
    # model=SimMimForDDMIMV4(
    #     masking_ratio=0.75
    # )
    # model=DDMIMV4(
    #     # stagefactor=[0.,0.,0.,0.]
    #     # alpha=0,
    #     lr=args.lr,
    # )
    
    # model=Classifier(
    #     model=DDMIMV8,
    #     resume=args.resume,
    #     )
    
    # model=Classifierv7(
    #     resume=args.resume,
    # )
    # model=Classifierv8(
    #     resume=args.resume,
    # ).load_from_checkpoint(args.lpresume)
    # model=Classifier_mae(
    #     lr=5e-4,
    #     resume=args.resume,
    # ).load_from_checkpoint(args.lpresume)
    # model=Classifier_mae.load_from_checkpoint(args)
    # model=Classifierv9(
    #     resume=args.resume,
    #     lr=5e-3,
    # )
    # model=Classifierv10(
    #     resume=args.resume,
    #     lr=5e-3,
    # )
    # model=LinearClassifierv9(
    #     resume=args.resume,
    #     lr=5e-3,
    # )
    model=Classifierv10(
        # model=SimMimForDDMIMV4,
        # resume="ddmim/log/seed1/version_234/checkpoints/epoch=7-step=294999.ckpt",
        # model=DDMIMV9,
        # resume="ddmim/log/seed3407/version_31/checkpoints/epoch=48_val_loss=0.04_DDMIMV9.ckpt",
        # model=DDMIMV4,
        # resume="ddmim/log/seed1/version_224/checkpoints/epoch=19-step=784999.ckpt",
        # model=swint,
        # resume="ddmim/log/seed1/version_199/checkpoints/epoch=35-step=719999.ckpt",
        # model=DDMIMV6,
        # resume="ddmim/log/seed1/version_254/checkpoints/epoch=41-step=844999.ckpt",
        # model=DDMIMV5,
        # resume="ddmim/log/seed1/version_243/checkpoints/epoch=21-step=434999.ckpt",
        # model=DDMIMV8,
        # resume="ddmim/log/seed3407/version_13/checkpoints/epoch=22_val_loss=0.06_DDMIMV8.ckpt",
        model=DDMIMV7_alter,
        resume="ddmim/log/seed3407/version_4/checkpoints/DDMIMV7_epoch=51_val_loss=2.06.ckpt",
        # model=DDMIMV4_alter,
        # resume="ddmim/log/seed1/version_221/checkpoints/last.ckpt",
        lr=5e-3,
        num_classes=10,
        )#.load_from_checkpoint("ddmim/log/seed3407/version_48/checkpoints/epoch=241_step=151249_val_loss=0.72_val_acc1=74.49.ckpt")
        #.load_from_checkpoint("ddmim/log/seed3407/version_44/checkpoints/epoch=52_step=33124_val_loss=0.23_val_acc1=93.08.ckpt")

    # model=classifier_swint(
    #     model=Lightning_mae,
    #     #resume="ddmim/log/seed1/version_199/checkpoints/epoch=35-step=719999.ckpt",
    #     resume="ddmim/log/seed3407/version_21/checkpoints/epoch=88_val_loss=0.30_Lightning_mae.ckpt",
    #     lr=5e-3,
    #     num_classes=10,
    #     )
    # model=Classifier_mae(
    #     resume="ddmim/log/seed3407/version_21/checkpoints/epoch=88_val_loss=0.30_Lightning_mae.ckpt",
    #     lr=5e-3,
    #     num_classes=10,
        
    # )
    # datamodule=ImagenetDataModule(args.data,num_workers=8,batch_size=args.batch_size)
    datamodule=CIFAR10DataModule(
        "./cifar10/",
        num_workers=32,
        batch_size=args.batch_size,   
    )
    datamodule.train_transforms=transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), 
        cifar10_normalization(),
    ])
    datamodule.val_transforms=transforms.Compose(
            [
                transforms.Resize(224),
                # transforms.CenterCrop(224),
                transforms.ToTensor(),
                cifar10_normalization(),
            ]
        )
    datamodule.test_transforms=transforms.Compose(
            [
                transforms.Resize(224),
                # transforms.CenterCrop(224),
                transforms.ToTensor(),
                cifar10_normalization(),
            ]
        )
    
    os.makedirs(args.log_dir,exist_ok=True)
    
    checkpoint_callback = ModelCheckpoint(
        # save_top_k=1,
        verbose=True,
        monitor="val_loss",
        mode="min",
        save_last=True,
        # every_n_train_steps=5000,
        filename='{epoch}_{step}_{val_loss:.2f}_{val_acc1:.2f}'
    )
    lr_callback = pl.callbacks.LearningRateMonitor(logging_interval="step")
    callbacks=[checkpoint_callback,lr_callback]

    logger=pl.loggers.TensorBoardLogger(
        args.log_dir,
        name=f'seed{args.seed}',
    )
    
    trainer=pl.Trainer(
        # resume_from_checkpoint=args.lpresume,
        #stagefactor=[1e-3,1e-2,0.1,1],
        gpus=args.gpu,
        # gpus=0,
        #amp_backend='apex',
        #precision=16,
        #auto_scale_batch_size=True,
        
        max_epochs=args.epochs if args.epochs else 1000,
        #max_steps=None,
        callbacks=callbacks,
        logger=logger,
        #log_every_n_steps=10,
        # limit_train_batches=100, 
        
        # limit_val_batches=20,
        
    )
    
    # if not args.evaluate:
    trainer.fit(model=model,datamodule=datamodule)
    # else:
    # trainer.test(model=model,datamodule=datamodule)
    



if __name__ == '__main__':
    
    args= parse_args()
 
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    #print(parser.parse_args().gpu)
    # print("the file  is running")
    main(args)