import torch as t
import torch.nn as nn
from torch.optim import AdamW
import wandb
from data import construct_dataset
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import CosineAnnealingLR
from engine import train_one_epoch, evalulate
from tqdm import tqdm
import wandb
from args import args
import argparse


def train(args: args):
    # 1. prepare data
    train_set, test_set = construct_dataset(args.dataset_name, args.data_root)
    # 2. construct dataloader
    train_loader, test_loader = DataLoader(
        train_set,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=8,
        pin_memory=True,
    ), DataLoader(
        test_set,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=8,
        pin_memory=True,
    )
    # 3. prepare model
    model: nn.Module = args.moe_model(**args.model_kwargs)
    t.compile(model)
    optimizer = AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    scheduler = CosineAnnealingLR(optimizer, args.epoch)
    loss_func = args.loss_func()
    # 4. begin the main loop
    for ep in tqdm(range(args.epoch)):
        train_loss, expert_loss = train_one_epoch(
            model=model,
            criterion=loss_func,
            data_loader=train_loader,
            optimizer=optimizer,
            device=args.device,
            expert_loss_coeff=args.expert_loss_coeff,
        )

        test_loss, test_matrix = evalulate(
            model=model, data_loader=test_loader, device=args.device
        )
        scheduler.step()
        print(
            "EP:[{}/{}]--> train_loss:{},expert_loss:{},test_loss:{}, test_matrix:{}".format(
                ep, args.epoch, train_loss, expert_loss, test_loss, test_matrix
            )
        )

        wandb.log(
            {
                "train_loss": train_loss,
                "expert_loss": expert_loss,
                "test_loss": test_loss,
                "learning_rate": optimizer.param_groups[0]["lr"],
            },
            commit=False,
        )
        wandb.log(test_matrix)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Training Config", add_help=False)
    parser.add_argument(
        "-c",
        "--config",
        default="",
        type=str,
        metavar="FILE",
        help="YAML config file specifying default arguments",
    )
    arguments = parser.parse_args()

    training_args = args()
    training_args.from_yaml_file(arguments.config)
    print("Current config:", training_args.to_dict())

    wandb.init(
        # set the wandb project where this run will be logged
        project="moe",
        # track hyperparameters and run metadata
        config=training_args.to_dict(),
    )
    train(training_args)
