from datasets import Dataset
import pandas as pd
import numpy as np
from torch.utils.data import DataLoader
import torch.nn as nn
import torch
import accelerate
import random
from argparse import ArgumentParser
from sklearn.metrics import ndcg_score


def main():
    parser = ArgumentParser()
    parser.add_argument("--order", type=str, default="fbsz", choices=['fzbs', 'fbsz', 'zfbs', ],
                        help="The order of module.forward, accelerator.backward, optimizer.step, optimizer.zero_grad"
                        )
    parser.add_argument("--optimizer", type=str, default="Adam", choices=['Adam', 'AdamW', 'RMSprop', 'SGD'])

    args = parser.parse_args()

    def seed_everything(seed) -> int:
        random.seed(seed)
        np.random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
        return seed

    seed_everything(42)

    # Dataloader
    num_samples = 32
    X = np.arange(num_samples)
    Y = np.arange(num_samples)
    df = pd.DataFrame({"x": X, "y": Y})
    toy_dataset = Dataset.from_pandas(df)

    toy_dataloader = DataLoader(
        toy_dataset,
        batch_size=4,
        shuffle=True,
        pin_memory=True,
        num_workers=4
    )

    # Model and optimizer
    model = nn.Linear(1, 1)
    model.train()

    optimizer_cls = getattr(torch.optim, args.optimizer)
    optimizer = optimizer_cls(model.parameters(), lr=1e-3)

    # Prepare
    accelerator = accelerate.Accelerator(gradient_accumulation_steps=2)
    model, optimizer, toy_dataloader = accelerator.prepare(
        model, optimizer, toy_dataloader
    )

    # Train loop
    for step, batch in enumerate(toy_dataloader):
        x, y = batch['x'].view(-1, 1).to(torch.float), batch['y'].to(torch.float)
        loss_fn = torch.nn.MSELoss()
        with accelerator.accumulate(model):
            if args.order == "fzbs":
                loss = loss_fn(model.forward(x).flatten(), y)  # Forward
                optimizer.zero_grad()  # Zero grad
                accelerator.backward(loss)  # Backward
                info = f"Grad of the bias: {model.module.bias.grad.item()} - {model.module.bias.device}"
                optimizer.step()  # Step

            elif args.order == "fbsz":
                loss = loss_fn(model.forward(x).flatten(), y)  # Forward
                accelerator.backward(loss)  # Backward
                info = f"Grad of the bias: {model.module.bias.grad.item()} - {model.module.bias.device}"
                optimizer.step()  # Step
                optimizer.zero_grad()  # Zero grad

            elif args.order == "zfbs":
                optimizer.zero_grad()  # Zero grad
                loss = loss_fn(model.forward(x).flatten(), y)  # Forward
                accelerator.backward(loss)  # Backward
                info = f"Grad of the bias: {model.module.bias.grad.item()} - {model.module.bias.device}"
                optimizer.step()  # Step

            else:
                raise ValueError(f"Unknown order: {args.order}")

        # 把info写入文件
        with open(f"{args.optimizer}-{args.order}.txt", "a+") as f:
            f.write(info + "\n")

        accelerator.wait_for_everyone()


if __name__ == '__main__':
    main()
