import json
import argparse
import torch
import logging
import numpy as np
from utils.toolkit import NamespaceDict
from learners.components.bp.learner_factory import LearnerFactory
from models.single_net import BaseSingle


def create_data_manager(args):
    # Placeholder function for creating a data manager
    # Replace with actual implementation
    pass


def main():
    args = setup_parser().parse_args()
    param = load_json(args.config)
    args = merge_config(args, param)
    # TODO new arg type
    args = NamespaceDict(**args)

    # Setup logging
    logging.basicConfig(
        level=logging.INFO if not args.verbose else logging.DEBUG,
        format='%(asctime)s - %(levelname)s - %(message)s'
    )

    # Set random seeds
    if args.train_seed is not None:
        torch.manual_seed(args.train_seed)
        np.random.seed(args.train_seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
        logging.info(f"Random seed set to {args.train_seed}")

    # Create data manager
    data_manager = create_data_manager(args)

    # Create learner using factory
    learner = LearnerFactory.create_learner(
        learner_type=args.learner_type,
        args=args,
        data_manager=data_manager,
        model_func=BaseSingle
    )

    # Display model architecture if verbose
    if args.verbose:
        logging.info(f"Model architecture: {learner._network}")

    # Train incrementally
    logging.info("Starting incremental training")
    all_results = []
    for task_idx in range(data_manager.nb_tasks):
        logging.info(f"Training task {task_idx}")
        learner.incremental_train()
        cnn_accy, nme_accy = learner.eval_task()

        # Log results
        logging.info(f"Task {task_idx} CNN accuracy: {cnn_accy}")
        if nme_accy:
            logging.info(f"Task {task_idx} NME accuracy: {nme_accy}")

        all_results.append({
            'task': task_idx,
            'cnn_accuracy': cnn_accy,
            'nme_accuracy': nme_accy
        })

    # Print summary of results
    logging.info("Training completed")
    logging.info(f"Final results: {all_results}")


def setup_parser():
    parser = argparse.ArgumentParser(
        description="Reproduce of multiple pre-trained incremental learning algorthms."
    )
    parser.add_argument(
        "--config",
        type=str,
        default="./exps/simplecil.json",
        help="Json file of settings.",
    )
    parser.add_argument(
        "--prefix",
        type=str,
        default=None,
        help="Json file of settings.",
    )

    # Incremental learning configs
    parser.add_argument(
        "--init_cls",
        type=int,
        default=None,
        help="init classes",
    )
    parser.add_argument(
        "--inc_cls",
        type=int,
        default=None,
        help="incremental classes",
    )

    # fc config
    parser.add_argument(
        "--fc_temperture",
        action="store_true",
        help="fc_temperture",
    )

    # training config
    parser.add_argument(
        "--interval",
        type=int,
        default=None,
        help="evaluation intervals",
    )
    parser.add_argument(
        "--batch_size",
        type=int,
        default=None,
        help="batch size",
    )
    parser.add_argument(
        "--init_epochs",
        type=int,
        default=None,
        help="first session epoch",
    )
    parser.add_argument(
        "--init_lr",
        type=float,
        default=None,
        help="first session learning rat",
    )
    parser.add_argument(
        "--inc_epochs",
        type=int,
        default=None,
        help="incremental session epoch",
    )
    parser.add_argument(
        "--inc_lr",
        type=float,
        default=None,
        help="incremental session learning rat",
    )
    parser.add_argument(
        "--ca_epochs",
        type=int,
        default=None,
        help="classifier alignment epoch",
    )
    parser.add_argument(
        "--device",
        type=int,
        nargs="+",
        help="devices, accepted in list",
    )
    parser.add_argument(
        "--train_seed",
        type=int,
        default=None,
        help="training seed",
    )
    parser.add_argument(
        "--seed",
        type=int,
        nargs="+",
        default=None,
        help="random seed",
    )
    parser.add_argument(
        "--verbose",
        action="store_true",
        help="show detailed logs",
    )
    parser.add_argument(
        "--early_stop",
        action="store_true",
        help="store the best model",
    )

    # loss config
    parser.add_argument(
        "--distill_alpha",
        type=float,
        default=None,
        help="distill_alpha",
    )
    parser.add_argument(
        "--scale",
        type=float,
        default=None,
        help="distill_alpha",
    )
    parser.add_argument(
        "--margin",
        type=float,
        default=None,
        help="distill_alpha",
    )
    parser.add_argument(
        "--projector",
        type=str,
        default=None,
        help="projector for contrastive learning",
    )

    # model config
    parser.add_argument(
        "--ffn_rank",
        type=int,
        default=None,
        help="Adapter Rank",
    )
    parser.add_argument(
        "--topk",
        type=int,
        default=None,
        help="top k adapter/lora/components",
    )
    parser.add_argument(
        "--align_alpha",
        type=float,
        default=None,
        help="Adapter Rank",
    )
    parser.add_argument(
        "--learner_type",
        type=str,
        default=None,
        help="Type of learner to use",
    )

    # Hook module configs
    parser.add_argument(
        "--feature_aug_strength",
        type=float,
        default=0.0,
        help="Strength of feature augmentation (0 to disable)",
    )
    parser.add_argument(
        "--mixup_alpha",
        type=float,
        default=0.0,
        help="Alpha parameter for mixup (0 to disable)",
    )
    parser.add_argument(
        "--grad_clip_norm",
        type=float,
        default=0.0,
        help="Max norm for gradient clipping (0 to disable)",
    )

    return parser


if __name__ == "__main__":
    main()
