from modelscope.msdatasets import MsDataset
import json
import random
import torch

random.seed(42)

'''
写入到数据集里面去

'''

ds = MsDataset.load('krisfu/delicate_medical_r1_data', subset_name='default', split='train')
data_list = list(ds)
random.shuffle(data_list)

split_idx = int(len(data_list) * 0.9)

train_data = data_list[:split_idx]
val_data = data_list[split_idx:]

with open('train.jsonl', 'w', encoding='utf-8') as f:
    for item in train_data:
        json.dump(item, f, ensure_ascii=False)
        f.write('\n')

with open('val.jsonl', 'w', encoding='utf-8') as f:
    for item in val_data:
        json.dump(item, f, ensure_ascii=False)
        f.write('\n')

print(f"The dataset has been split successfully.")
print(f"Train Set Size：{len(train_data)}")
print(f"Val Set Size：{len(val_data)}")



'''

加载模型
'''

# model_folder = r"D:\models\qwen3-0.6b"
# from modelscope import snapshot_download, AutoTokenizer
# from transformers import AutoModelForCausalLM, TrainingArguments, Trainer, DataCollatorForSeq2Seq

# # 在modelscope上下载Qwen模型到本地目录下
# # model_dir = snapshot_download("Qwen/Qwen3-1.7B", cache_dir="./", revision="master")

# # Transformers加载模型权重
# tokenizer = AutoTokenizer.from_pretrained(model_folder, use_fast=False, trust_remote_code=True)
# model = AutoModelForCausalLM.from_pretrained(model_folder, device_map="auto", torch_dtype=torch.bfloat16)
# print(tokenizer)
# print(model)

# from transformers import TrainingArguments, Trainer




