from transformers import AutoTokenizer
from src.llamafactory.hparams import read_args, get_ray_args, get_train_args
from transformers import HfArgumentParser
from src.llamafactory.hparams import ModelArguments, DataArguments, TrainingArguments, FinetuningArguments, \
    GeneratingArguments


# print(
#     ray_args)  # RayArguments(ray_run_name=None, ray_storage_path='./saves', ray_num_workers=1, resources_per_worker={'GPU': 1}, placement_strategy='PACK')

def _parse_args(
        parser, args=None, allow_extra_keys: bool = False
):
    args = read_args(args)
    if isinstance(args, dict):
        return parser.parse_dict(args, allow_extra_keys=allow_extra_keys)

    (*parsed_args, unknown_args) = parser.parse_args_into_dataclasses(args=args, return_remaining_strings=True)

    if unknown_args and not allow_extra_keys:
        print(parser.format_help())
        print(f"Got unknown args, potentially deprecated arguments: {unknown_args}")
        raise ValueError(f"Some specified arguments are not used by the HfArgumentParser: {unknown_args}")

    return tuple(parsed_args)


_TRAIN_ARGS = [ModelArguments, DataArguments, TrainingArguments, FinetuningArguments, GeneratingArguments]


def _parse_train_args(args):
    parser = HfArgumentParser(_TRAIN_ARGS)
    allow_extra_keys = True
    return _parse_args(parser, args, allow_extra_keys=allow_extra_keys)


args = read_args()
ray_args = get_ray_args(args)
# print(_parse_train_args(args))

model_args, data_args, training_args, finetuning_args, generating_args = get_train_args(args)


model_dir = '/home/dengyunfei/deepseek-r1-8b'
tokenizer = AutoTokenizer.from_pretrained(model_dir)
# # 自定义 chat_template（示例）
# tokenizer.chat_template = """<|SystemBegin|>{system_message}<|SystemEnd|>
# <|UserBegin|>{user_message}<|UserEnd|>
# <|AssistantBegin|>"""

# 假设我们有一个聊天对话
messages = [
    {"role": "system", "content": "你是一个助手。"},
    {"role": "user", "content": "今天天气如何？"}
]

# 使用 tokenizer 的 chat_template 格式化对话
formatted_text = tokenizer.chat_template.format(messages)

from llamafactory.data import get_template_and_fix_tokenizer
from llamafactory.model import load_tokenizer
# 根据一系列的参数进行训练操作 返回一个  {"tokenizer": tokenizer, "processor": processor}字典
tokenizer_module = load_tokenizer(model_args)
tokenizer = tokenizer_module["tokenizer"]
# 获取适配tokenizer的模版
template = get_template_and_fix_tokenizer(tokenizer, data_args)
tokenizer.chat_template = template._get_jinja_template(tokenizer)