import modelscope
import datasets
import transformers
import os
from accelerate import Accelerator
import torch


## execute cmd
# cd root_path
# /mnt/data02/anaconda3/envs/gputest/bin/accelerate launch ./scripts/pretrain/pretrain_with_transformers_qwen2_5_0_5B.py

## path define
root_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
filename = os.path.basename(__file__).split('.')[0]
dataset_path = os.path.join(root_path, "datasets/pretrain/wiki-datasets/wikipedia-zh-cn-20241020.json")
model_ID = 'Qwen/Qwen2.5-0.5B'

model_path = os.path.join(root_path, 'llms/' + model_ID)
checkpoint_path = os.path.join(root_path,"checkpoints/"+filename)
model_save_path = os.path.join(root_path,'llms/' + filename)
log_path = os.path.join(root_path,'logs/' + filename)

def train_qwen_with_transformers_on_distributed_env():
    # load dataset
    raw_datasets = datasets.load_dataset(
        "json", data_files = dataset_path)

    raw_datasets = raw_datasets["train"].train_test_split(test_size=0.2, seed=3000)
    print("dataset info")
    print(raw_datasets)

    # initial model and tokenizer
    modelscope.AutoConfig.from_pretrained(model_ID).save_pretrained(model_path)
    modelscope.AutoTokenizer.from_pretrained(model_ID).save_pretrained(model_path)
    
    context_length = 128
    tokenizer = transformers.AutoTokenizer.from_pretrained(model_path)

    # process dataset
    def tokenize(element):
        return tokenizer(
            element["text"],
            truncation=True,
            padding=True,
            max_length=context_length,
        )

    tokenized_datasets = raw_datasets.map(tokenize, batched=True, remove_columns=raw_datasets['train'].column_names)
    tokenizer.pad_token = tokenizer.eos_token
    data_collator = transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False)
    
    train_dataloader = tokenized_datasets['train']
    eval_dataloader = tokenized_datasets['test']

    # construct model
    config = transformers.AutoConfig.from_pretrained(
        model_path,
        vocab_size=len(tokenizer),
        hidden_size=256,
        intermediate_size=2048,
        num_attention_heads=8,
        num_hidden_layers=12,
        n_ctx=context_length,
        bos_token_id=tokenizer.bos_token_id,
        eos_token_id=tokenizer.eos_token_id,)
    print("Model Config:")
    print(config)

    # config to accelerator distribute env
    accelerator = Accelerator()
    model = transformers.Qwen2ForCausalLM(config)
    optimizer = torch.optim.AdamW(model.parameters(), lr=3e-5)
    
    train_dataloader, eval_dataloader, optimizer, model = accelerator.prepare(train_dataloader, eval_dataloader, optimizer, model)

    args = transformers.TrainingArguments(
        output_dir = checkpoint_path,
        per_device_train_batch_size = 8,  # 每个GPU的训练batch数
        per_device_eval_batch_size = 8,  # 每个GPU的测试batch数
        eval_strategy = "steps",
        eval_steps = 5_00,
        logging_steps = 50,
        gradient_accumulation_steps = 8,  # 梯度累计总数
        num_train_epochs = 2,  # 训练epoch数
        weight_decay = 0.1,
        warmup_steps = 2_00,
        optim = "adamw_torch",  # 优化器使用adamw
        lr_scheduler_type = "cosine",  # 学习率衰减策略
        learning_rate = 5e-4,  # 基础学习率，
        save_steps = 5_00,
        save_total_limit = 10,
        fp16 = True,  # 开启bf16训练, 对于Amper架构以下的显卡建议替换为fp16=True
        remove_unused_columns = False,
        save_safetensors = False,
        logging_dir = log_path,
        log_on_each_node = True
    )

    # train model
    trainer = transformers.Trainer(
        model=model,
        processing_class=tokenizer,
        args=args,
        data_collator=data_collator,
        train_dataset=train_dataloader,
        eval_dataset=eval_dataloader,
    )
    trainer.train()

    # save model
    trainer.save_model(model_save_path)

if __name__ == "__main__":
    print(f'Hi  hihihihi. hahahahahah   ')
    train_qwen_with_transformers_on_distributed_env()



