import os

import lightning as L
from lightning.pytorch.callbacks import EarlyStopping, ModelCheckpoint, TQDMProgressBar
from lightning.pytorch.loggers import CSVLogger, TensorBoardLogger
from lightning.pytorch.tuner import Tuner
from lightning.pytorch.callbacks.progress.tqdm_progress import Tqdm
from train_utils import *
import shutil
from split import split_data
import sys
import random
class MyTqdm(TQDMProgressBar):
    def init_validation_tqdm(self) -> Tqdm:
        """Override this to customize the tqdm bar for validation."""
        # The train progress bar doesn't exist in `trainer.validate()`
        has_main_bar = self.trainer.state.fn != "validate"
        return Tqdm(
            desc=self.validation_description,
            position=(2 * self.process_position + has_main_bar),
            disable=self.is_disabled,
            leave= True,
            dynamic_ncols=True,
            file=sys.stdout,
            bar_format=self.BAR_FORMAT,
        )


if __name__ == "__main__":
    args = get_args()
    random.seed(42)
    if os.getenv('face_dev'):
        data_dir = "../data/2792"
    else:
        data_dir = '/home/data/2792'
    if os.path.exists(os.path.join(data_dir,"train.txt")) and os.path.exists(os.path.join(data_dir,"val.txt")):
        # print("train.txt or val.txt already exists, please remove them first")
        # exit()
        print("exist train.txt, will overwrite", )
    else:
        print("train.txt and val.txt will be created")
        
    val_size = args.val_size
    split_data(data_dir, val_size)


    os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
    tb_logger = TensorBoardLogger(save_dir="/project/train/tensorboard/cls")
    csv_logger = CSVLogger(save_dir="/project/train/models/cls/")
    early_stop = EarlyStopping(
        monitor="val_loss", mode="min", patience=30, verbose=True
    )
    # ckpt_callback = ModelCheckpoint('/project/train/models/cls', filename='{epoch}-{val_loss:.2f}', save_last=True)
    # ckpt_callback = ModelCheckpoint(
    #     csv_logger.log_dir, filename="{epoch}-{val_loss:.2f}", save_last=True, save_top_k=2, mode='min', monitor='val_loss'
    # )
    ckpt_callback = ModelCheckpoint(
        csv_logger.log_dir, filename="best_e{epoch}", save_last=True, save_top_k=5, mode='min', monitor='val_loss',
    )

    tqdm_callback = MyTqdm()
    tqdm_callback.disable()
    print("total epochs: ", args.epochs)
    print("args:\n", args)
    # clean log
    dev_mode = os.getenv('face_dev')
    # if not dev_mode:
    #     # shutil.rmtree('/project/train/models/face_det',ignore_errors=True)
    #     if args.ckpt_path: 
    #         pdir = os.path.dirname(args.ckpt_path)
    #         # remove all except the pdir
    #         items = os.listdir('/project/train/models/cls/lightning_logs')
    #         for item in items:
    #             dir = os.path.join('/project/train/models/cls/lightning_logs', item)
    #             if os.path.isdir(dir) and dir != pdir:
    #                 shutil.rmtree(dir)
    #                 print(f"Removed: {dir}")
    #     else:
    #         shutil.rmtree('/project/train/models/cls/lightning_logs', ignore_errors=True)
    if args.pretrained_ckpt:
        print(f'loading from {args.pretrained_ckpt}')
        model = CVModule.load_from_checkpoint(args.pretrained_ckpt)
        model.hparams.update(vars(args))
        # model.save_hyperparameters()
    else:
        model = CVModule(**vars(args))
    train_set, val_set, train_loader, val_loader = get_data(args.batch_size)
    trainer = L.Trainer(
        max_epochs=args.epochs,
        devices=1,
        accelerator="gpu",
        precision="16-mixed",
        log_every_n_steps=20,
        logger=[csv_logger, tb_logger],
        enable_checkpointing=True,
        callbacks=[ckpt_callback, tqdm_callback, early_stop],
        # limit_val_batches=0,
        num_sanity_val_steps=0
    )

    # Auto-scale batch size with binary search
    if args.tuner:
        tuner = Tuner(trainer)
        tuner.scale_batch_size(model, mode="power")
    print("log_dir is", trainer.log_dir)
    print("tb logger at", tb_logger.log_dir)
    trainer.fit(model=model, ckpt_path=args.ckpt_path, train_dataloaders=train_loader, val_dataloaders=val_loader)


