# pip install swanlab
# 查看详细文档
# https://docs.swanlab.cn/guide_cloud/general/quick-start.html
# https://swanlab.cn/@allragedbody 登录
import swanlab
import random

# 在服务器上登录账号后执行以下代码
# 创建一个SwanLab项目
# project = finetuning_args.swanlab_project,
# workspace = finetuning_args.swanlab_workspace,
# experiment_name = finetuning_args.swanlab_run_name,
# mode = finetuning_args.swanlab_mode,
# config = {"Framework": "🦙LlamaFactory"},
# logdir = finetuning_args.swanlab_logdir,
# tags = ["🦙LlamaFactory"],

swanlab.init(
    # 设置项目名
    project="test_training_001",
    # workspace="test",
    experiment_name="cloud",
    # ['disabled', 'cloud', 'offline', 'local']
    mode="offline",
    logdir="saves",
    tags=["test0001"],
    # 设置超参数
    config={
        "model_args": {
            "model_name_or_path": "meta-llama/Meta-Llama-3-8B-Instruct",
            "trust_remote_code": True,
            "stage": "sft",
            "do_train": True,
            "finetuning_type": "lora",
            "lora_rank": 8,
            "lora_target": "all",
        },
        "data_args": {
            "dataset": "identity",
            "alpaca_en_demo": True,
            "template": "llama3",
            "cutoff_len": 2048,
            "max_samples": 1000,
            "overwrite_cache": True,
            "preprocessing_num_workers": 16,
            "dataloader_num_workers": 4,
        },
        "training_args": {
            "per_device_train_batch_size": 1,
            "gradient_accumulation_steps": 8,
            "learning_rate": 1.0e-4,
            "num_train_epochs": 3.0,
            "lr_scheduler_type": "cosine",
            "warmup_ratio": 0.1,
            "bf16": True,
            "ddp_timeout": 180000000,
            "resume_from_checkpoint": None,
            "output_dir": "saves/llama3-8b/lora/sft",
            "logging_steps": 10,
            "save_steps": 500,
            "plot_loss": True,
            "overwrite_output_dir": True,
            "save_only_model": False,
            "report_to": "swanlab"
        },
        "finetuning_args": {},
        "generating_args": {},
    }
)

# swanlab.config.update(
#     {
#         "model_args": {
#             "model_name_or_path": "bigscience/bloom-560m",
#         },
#         "data_args": {
#             "dataset": "CIFAR-100",
#         },
#         "finetuning_args": {
#             "epochs": 10,
#         },
#         "generating_args": {
#             "max_new_tokens": 100,
#         },
#     }
# )

# # 模拟一次训练
# epochs = 10
# offset = random.random() / 5
# for epoch in range(2, epochs):
#   acc = 1 - 2 ** -epoch - random.random() / epoch - offset
#   loss = 2 ** -epoch + random.random() / epoch + offset
#
#   # 记录训练指标,需要使用log方法
#   swanlab.log({"acc": acc, "loss": loss})

# [可选] 完成训练，这在notebook环境中是必要的
swanlab.finish()

###########################################################
# swanlab_public_config = swanlab.get_run().public.json()
# print(swanlab_public_config)
