【获取模型的可训练参数】
import numpy as np
# 定义总参数量、可训练参数量及非可训练参数量变量
Total_params = 0
Trainable_params = 0
NonTrainable_params = 0
# 遍历model.parameters()返回的全局参数列表
for param in model.parameters():
    mulValue = np.prod(param.size())  # 使用numpy prod接口计算参数数组所有元素之积
    Total_params += mulValue  # 总参数量
    if param.requires_grad:
        Trainable_params += mulValue  # 可训练参数量
    else:
        NonTrainable_params += mulValue  # 非可训练参数量
print(f'Total params: {Total_params}')
print(f'Trainable params: {Trainable_params}')
print(f'Non-trainable params: {NonTrainable_params}')

【dataset处理】
# 每次从dataset取两个结果，然后repeat 4遍
RepeatSampler(
    data_source=dataset,
    mini_repeat_count=self.num_generations,
    batch_size=self.args.generation_batch_size // self.num_generations,
    repeat_count=self.num_iterations * self.args.steps_per_generation,
    shuffle=self.shuffle_dataset,
    seed=self.args.seed,
)
self.args.generation_batch_size, self.num_generations, self.num_iterations, self.args.steps_per_generation,
(8, 4, 1, 2)
for chunk in indexes:
    for _ in range(self.repeat_count):
        for index in chunk:
            for _ in range(self.mini_repeat_count): # self.mini_repeat_count=4
                yield index
batch_samples[0][0].keys()
dict_keys(['problem', 'level', 'solution', 'type', 'prompt'])
batch_samples[0][0]['solution']
'In order for the expression to have a domain of all real numbers, the quadratic $x^2+bx+8 = 0$ must have no real roots.  The discriminant of this quadratic is $b^2 - 4 \\cdot 1 \\cdot 8 = b^2 - 32$.  The quadratic has no real roots if and only if the discriminant is negative, so $b^2 - 32 < 0$, or $b^2 < 32$.  The greatest integer $b$ that satisfies this inequality is $\\boxed{5}$.'
batch_samples[0][1]['solution']
'In order for the expression to have a domain of all real numbers, the quadratic $x^2+bx+8 = 0$ must have no real roots.  The discriminant of this quadratic is $b^2 - 4 \\cdot 1 \\cdot 8 = b^2 - 32$.  The quadratic has no real roots if and only if the discriminant is negative, so $b^2 - 32 < 0$, or $b^2 < 32$.  The greatest integer $b$ that satisfies this inequality is $\\boxed{5}$.'
print([batch_samples[0][0]==batch_samples[0][i] for i in range(1,4)])
[True, True, True]

【生成备选解&logits&loss】
# 从生成结果中sample，使得每次运行结果不一样。
/home/zhangwei/anaconda3/envs/x_r1/lib/python3.12/site-packages/transformers/generation/utils.py
if do_sample:
    probs = nn.functional.softmax(next_token_scores, dim=-1)
    # TODO (joao): this OP throws "skipping cudagraphs due to ['incompatible ops']", find solution
    next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
# 获取每个结果的logits
def _get_per_token_logps(self, model, input_ids, attention_mask, logits_to_keep):
    # We add 1 to `logits_to_keep` because the last logits of the sequence is later excluded
    logits = model(input_ids=input_ids, attention_mask=attention_mask, logits_to_keep=logits_to_keep + 1).logits
    logits = logits[:, :-1, :]  # (B, L-1, V), exclude the last logit: it corresponds to the next token pred

    input_ids = input_ids[:, -logits_to_keep:]
    # For transformers<=4.48, logits_to_keep argument isn't supported, so here we drop logits ourselves.
    # See https://github.com/huggingface/trl/issues/2770
    logits = logits[:, -logits_to_keep:]
    return selective_log_softmax(logits, input_ids)  #  compute logprobs for the input tokens

【XGRPOTrainer】
def training_step
  inputs = self._prepare_inputs(inputs)
    inputs = self._generate_and_score_completions(inputs)
        # 从生成结果中sample，使得每次运行结果不一样。
        prompts_text = [maybe_apply_chat_template(example, self.processing_class)["prompt"] for example in inputs]
        prompt_inputs = self.processing_class(
            prompts_text, return_tensors="pt", padding=True, padding_side="left", add_special_tokens=False
        )
        prompt_completion_ids = unwrapped_model.generate(
            prompt_ids, attention_mask=prompt_mask, generation_config=self.generation_config
        )
          # do_sample:
          probs = nn.functional.softmax(next_token_scores, dim=-1)
          # TODO (joao): this OP throws "skipping cudagraphs due to ['incompatible ops']", find solution
          next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)

【ref_per_token_logps】
inputs['ref_per_token_logps'] 是使用原始的QWEN模型计算？YES
print('is peft')
with self.accelerator.unwrap_model(self.model).disable_adapter(): # disable_adapter功能：临时禁用模型中已加载的适配器（如 LoRA、Prefix Tuning 等）。
    ref_per_token_logps = self._get_per_token_logps(
        self.model, prompt_completion_ids, attention_mask, logits_to_keep
    )
【old_per_token_logps】
prompt_completion_ids = unwrapped_model.generate(
    prompt_ids, attention_mask=prompt_mask, generation_config=self.generation_config
)
# When using num_iterations == 1, old_per_token_logps == per_token_logps, so we can skip it's
# computation here, and use per_token_logps.detach() instead.
if self.num_iterations > 1:
    old_per_token_logps = self._get_per_token_logps(
        self.model, prompt_completion_ids, attention_mask, logits_to_keep
    )
【per_token_logps】等价于【old_per_token_logps】
prompt_ids, prompt_mask = inputs["prompt_ids"], inputs["prompt_mask"]
completion_ids, completion_mask = inputs["completion_ids"], inputs["completion_mask"]
input_ids = torch.cat([prompt_ids, completion_ids], dim=1)
attention_mask = torch.cat([prompt_mask, completion_mask], dim=1)
logits_to_keep = completion_ids.size(1)  # we only need to compute the logits for the completion tokens
per_token_logps = self._get_per_token_logps(model, input_ids, attention_mask, logits_to_keep)

【模型结构】fintune最后的lm_head
PeftModelForCausalLM(
  (base_model): LoraModel(
    (model): Qwen2ForCausalLM(
      (model): Qwen2Model(
        (embed_tokens): Embedding(151936, 896)
        (layers): ModuleList(
          (0-23): 24 x Qwen2DecoderLayer(
            (self_attn): Qwen2Attention(
              (q_proj): Linear(in_features=896, out_features=896, bias=True)
              (k_proj): Linear(in_features=896, out_features=128, bias=True)
              (v_proj): Linear(in_features=896, out_features=128, bias=True)
              (o_proj): Linear(in_features=896, out_features=896, bias=False)
            )
            (mlp): Qwen2MLP(
              (gate_proj): Linear(in_features=896, out_features=4864, bias=False)
              (up_proj): Linear(in_features=896, out_features=4864, bias=False)
              (down_proj): Linear(in_features=4864, out_features=896, bias=False)
              (act_fn): SiLU()
            )
            (input_layernorm): Qwen2RMSNorm((896,), eps=1e-06)
            (post_attention_layernorm): Qwen2RMSNorm((896,), eps=1e-06)
          )
        )
        (norm): Qwen2RMSNorm((896,), eps=1e-06)
        (rotary_emb): Qwen2RotaryEmbedding()
      )
      (lm_head): lora.Linear(
        (base_layer): Linear(in_features=896, out_features=151936, bias=False)
        (lora_dropout): ModuleDict(
          (default): Identity()
        )
        (lora_A): ModuleDict(
          (default): Linear(in_features=896, out_features=4, bias=False)
        )
        (lora_B): ModuleDict(
          (default): Linear(in_features=4, out_features=151936, bias=False)
        )
        (lora_embedding_A): ParameterDict()
        (lora_embedding_B): ParameterDict()
        (lora_magnitude_vector): ModuleDict()
      )
    )
  )
)


【】