import os
import warnings
import datetime
from dataclasses import field
from argparse import ArgumentParser
import torch
import numpy as np
from lightning import Trainer
from lightning.pytorch import seed_everything

from rwkv_trainer import train_callback
from rwkv_dataset import get_dataset
from rwkv_model import load_rwkv_model
from lightning_utilities.core.rank_zero import rank_zero_info


def parse_args():
    parser = ArgumentParser()

    # Project
    parser.add_argument("--swanlab", default="rwkv_grpo", type=str)  # swanlab project name. if "" then don't use swanlab
    parser.add_argument("--proj_dir", default="out", type=str)
    parser.add_argument("--random_seed", default="443", type=int)


    # Dataset
    parser.add_argument("--data_file", default="/nlp/yubin.tang/project/rwkv_grpo/data/all_st_500h_small.jsonl", type=str)
    parser.add_argument("--ctx_len", default=1024, type=int)
    parser.add_argument("--vocab_size", default=65536, type=int)  # vocab_size = 0 means auto (for char-level LM and .txt data)
    parser.add_argument("--data_type", default="jsonl", choices=["jsonl"], type=str)
    parser.add_argument("--data_shuffle", default=True, type=bool, help="Whether to shuffle the data")
    parser.add_argument("--micro_bsz", default=1, type=int)  # micro batch size (batch size per GPU)
    parser.add_argument("--epoch_steps", default=100, type=int)  # a mini "epoch" has [epoch_steps] steps
    parser.add_argument("--epoch_count", default=2, type=int)  # if you load a model trained for x "epochs", set epoch_count = x
    parser.add_argument("--epoch_begin", default=0, type=int)  # if you load a model trained for x "epochs", set epoch_begin = x
    parser.add_argument("--max_epochs", type=int, default=500, help="Maximum number of epochs to train")
    parser.add_argument("--epoch_save", default=1, type=int)  # save the model every [epoch_save] "epochs"
    parser.add_argument("--my_exit", default=99999999, type=int)
    parser.add_argument("--check_val_every_n_epoch", type=int, default=int(1e20), help="Check validation every n epochs")


    # Model
    parser.add_argument("--load_model", default="", type=str)  # full path, with .pth
    parser.add_argument("--my_testing", default='x070', type=str)
    parser.add_argument("--head_size_a", default=64, type=int) # can try larger values for larger models
    parser.add_argument("--n_layer", default=12, type=int)
    parser.add_argument("--n_embd", default=768, type=int)
    parser.add_argument("--dim_att", default=0, type=int)
    parser.add_argument("--dim_ffn", default=0, type=int)
    parser.add_argument("--lr_init", default=6e-4, type=float)  # 6e-4 for L12-D768, 4e-4 for L24-D1024, 3e-4 for L24-D2048
    parser.add_argument("--lr_final", default=1e-5, type=float)
    parser.add_argument("--lr_schedule", default="cos", type=str, choices=["cos", "wsd"])
    parser.add_argument("--warmup_steps", default=-1, type=int)  # try 50 if you load a model
    parser.add_argument("--adam_eps", default=1e-8, type=float)
    parser.add_argument("--layerwise_lr", default=1, type=int)  # layerwise lr for faster convergence (but slower it/s)
    parser.add_argument("--head_size_divisor", default=8, type=int)
    parser.add_argument("--avg_loss", default=0, type=int)
    parser.add_argument("--beta1", default=0.9, type=float)
    parser.add_argument("--beta2", default=0.99, type=float)  # use 0.999 when your model is close to convergence
    parser.add_argument("--weight_decay", default=0, type=float) # try 0.1 / 0.01 / 0.001
    parser.add_argument("--weight_decay_final", default=-1, type=float)   # 这里还没有实现


    # Framework
    parser.add_argument("--accelerator", type=str, default="gpu", help="Accelerator type (e.g., 'cpu', 'gpu')")
    parser.add_argument("--strategy", type=str, default="deepspeed_stage_1", help="Distributed training strategy")
    parser.add_argument("--devices", type=int, default=1, help="Number of devices to use")
    parser.add_argument("--num_nodes", type=int, default=1, help="Number of nodes to use")
    parser.add_argument("--precision", type=str, default="bf16", choices=["bf16"], help="Precision for training")
    parser.add_argument("--num_sanity_val_steps", type=int, default=0, help="Number of sanity validation steps")
    parser.add_argument("--log_every_n_steps", type=int, default=int(1e20), help="Log every n steps")
    parser.add_argument("--enable_checkpointing", type=bool, default=True, help="Enable checkpointing")
    parser.add_argument("--accumulate_grad_batches", type=int, default=1, help="Accumulate gradients over n batches")
    parser.add_argument("--gradient_clip_val", type=float, default=1.0, help="Value for gradient clipping")
    parser.add_argument("--op", default="cuda", choices=["fla", "triton", "cuda"], type=str, help="Operation type")
    parser.add_argument("--fused_kernel", type=bool, default=True, help="Enable rwkv-fla fused kernel")
    parser.add_argument("--grad_cp", default=True, type=bool)  # gradient checkpt: saves VRAM, but slower

    return parser.parse_args()

def main():
    args = parse_args()

    if args.random_seed >= 0:
        print(f"########## WARNING: GLOBAL SEED {args.random_seed} THIS WILL AFFECT MULTIGPU SAMPLING ##########\n" * 3)
        seed_everything(args.random_seed)
    np.set_printoptions(precision=4, suppress=True, linewidth=200)
    warnings.filterwarnings("ignore", ".*Consider increasing the value of the `num_workers` argument*")
    warnings.filterwarnings("ignore", ".*The progress bar already tracks a metric with the*")

    args.my_timestamp = datetime.datetime.today().strftime("%Y-%m-%d-%H-%M-%S")

    args.betas = (args.beta1, args.beta2)
    args.real_bsz = int(args.num_nodes) * int(args.devices) * args.micro_bsz

    os.environ["RWKV_MY_TESTING"] = args.my_testing
    os.environ["RWKV_HEAD_SIZE_A"] = str(args.head_size_a)
    os.environ["RWKV_TRAIN_TYPE"]=''
    print(f"########## WKV OP           {args.op}               ##########\n" * 3)
    print(f"########## FUSED OP    {args.fused_kernel}          ##########\n" * 3)
    os.environ["WKV"]= args.op
    os.environ["FUSED_KERNEL"] = '1' if args.fused_kernel else '0'

    if args.dim_att <= 0:
        args.dim_att = args.n_embd
    if args.dim_ffn <= 0:
        args.dim_ffn = int((args.n_embd * 3.5) // 32 * 32) # default = 3.5x emb size

    args.run_name = f"{args.vocab_size} ctx{args.ctx_len} L{args.n_layer} D{args.n_embd}"
    if not os.path.exists(args.proj_dir):
        os.makedirs(args.proj_dir)

    if args.lr_final == 0 or args.lr_init == 0:
        rank_zero_info("\n\nNote: lr_final = 0 or lr_init = 0. Using linear LR schedule instead.\n\n")

    os.environ["RWKV_FLOAT_MODE"] = args.precision

    torch.backends.cudnn.benchmark = True
    torch.backends.cudnn.enabled = True

    if args.precision == "fp32":
        torch.backends.cudnn.allow_tf32 = False
        torch.backends.cuda.matmul.allow_tf32 = False
    else:
        torch.backends.cudnn.allow_tf32 = True
        torch.backends.cuda.matmul.allow_tf32 = True

    if "32" in args.precision:
        args.precision = 32
    elif args.precision == "fp16":
        args.precision = 16
    else:
        args.precision = "bf16"

    train_data = get_dataset(args=args)

    args, model = load_rwkv_model(args)

    trainer = Trainer(accelerator=args.accelerator,strategy=args.strategy,devices=args.devices,num_nodes=args.num_nodes,precision=args.precision,
                      logger=False,callbacks=[train_callback(args)],max_epochs=args.max_epochs,check_val_every_n_epoch=args.check_val_every_n_epoch,
                      num_sanity_val_steps=args.num_sanity_val_steps, log_every_n_steps=args.log_every_n_steps,
                      enable_checkpointing=args.enable_checkpointing,accumulate_grad_batches=args.accumulate_grad_batches,
                      gradient_clip_val=args.gradient_clip_val)

    trainer.fit(model, train_data)


if __name__ == "__main__":
    main()
    
