|
import os |
|
|
|
os.environ['CUDA_VISIBLE_DEVICES'] = '0' |
|
|
|
kwargs = { |
|
'per_device_train_batch_size': 2, |
|
'save_steps': 5, |
|
'gradient_accumulation_steps': 4, |
|
'num_train_epochs': 1, |
|
} |
|
|
|
|
|
def test_rm(): |
|
from swift.llm import rlhf_main, RLHFArguments, infer_main, InferArguments |
|
result = rlhf_main( |
|
RLHFArguments( |
|
rlhf_type='rm', |
|
model='Shanghai_AI_Laboratory/internlm2-1_8b-reward', |
|
dataset=['hjh0119/shareAI-Llama3-DPO-zh-en-emoji#100'], |
|
split_dataset_ratio=0.01, |
|
**kwargs)) |
|
last_model_checkpoint = result['last_model_checkpoint'] |
|
infer_main(InferArguments(adapters=last_model_checkpoint, load_data_args=True, merge_lora=True)) |
|
|
|
|
|
def test_ppo(): |
|
from swift.llm import rlhf_main, RLHFArguments, infer_main, InferArguments |
|
result = rlhf_main( |
|
RLHFArguments( |
|
rlhf_type='ppo', |
|
model='LLM-Research/Llama-3.2-1B-Instruct', |
|
reward_model='AI-ModelScope/GRM-Llama3.2-3B-rewardmodel-ft', |
|
dataset=['AI-ModelScope/alpaca-gpt4-data-zh#100', 'AI-ModelScope/alpaca-gpt4-data-en#100'], |
|
**kwargs)) |
|
last_model_checkpoint = result['last_model_checkpoint'] |
|
infer_main(InferArguments(adapters=last_model_checkpoint, load_data_args=True, merge_lora=True)) |
|
|
|
|
|
def test_ppo2(): |
|
from swift.llm import rlhf_main, RLHFArguments, infer_main, InferArguments |
|
result = rlhf_main( |
|
RLHFArguments( |
|
rlhf_type='ppo', |
|
model='Qwen/Qwen2.5-7B-Instruct', |
|
reward_model='Qwen/Qwen2.5-7B-Instruct', |
|
dataset=['AI-ModelScope/alpaca-gpt4-data-zh#100', 'AI-ModelScope/alpaca-gpt4-data-en#100'], |
|
**kwargs)) |
|
last_model_checkpoint = result['last_model_checkpoint'] |
|
infer_main(InferArguments(adapters=last_model_checkpoint, load_data_args=True, merge_lora=True)) |
|
|
|
|
|
def test_ppo_vl(): |
|
|
|
os.environ['MAX_PIXELS'] = '1003520' |
|
from swift.llm import rlhf_main, RLHFArguments, infer_main, InferArguments |
|
result = rlhf_main( |
|
RLHFArguments( |
|
rlhf_type='ppo', |
|
model='Qwen/Qwen2-VL-2B-Instruct', |
|
reward_model='Qwen/Qwen2-VL-7B-Instruct', |
|
dataset=['modelscope/coco_2014_caption:validation#100'], |
|
**kwargs)) |
|
last_model_checkpoint = result['last_model_checkpoint'] |
|
infer_main(InferArguments(adapters=last_model_checkpoint, load_data_args=True, merge_lora=True)) |
|
|
|
|
|
if __name__ == '__main__': |
|
|
|
|
|
|
|
test_ppo_vl() |
|
|