# reference.py
import torch
import sentencepiece as spm
from config.config import paths_cfg,model_cfg
from scripts.load_model import load_model_and_optimizer

@torch.no_grad()
def generate_text(model,
                  tokenizer,
                  prompt,
                  max_new_tokens=256,
                  temperature=0.9,
                  top_k=50,
                  device="cuda",
                  repetition_penalty=1.2,
                  ):
    model.eval()
    input_ids = torch.tensor([tokenizer.encode(prompt)], dtype=torch.long).to(device) # token_id [1,seq_len]
    for step in range(max_new_tokens):
        # 截断输入，确保长度不超过block_size
        input_cond = input_ids[:, -model_cfg.block_size:]
        # print(input_cond.shape)
        logits = model(input_cond) # [1,seq_len,vocab_size]
        # 取最后一个词的输出概率分布 并且 用温度调节相对大小
        logits = logits[:, -1, :] / temperature # [1,vocab_size]

        # ================================================================================= #
        # 🔁 新增：应用重复惩罚（Repetition Penalty）
        # 来自 Hugging Face 的标准实现方式
        if repetition_penalty != 1.0:
            score = logits # [1,1,vocab_size]

            # 获取已生成的所有 token（当前序列）
            input_ids_ = input_ids[0]  # 当前已生成的 token IDs [seq_len]

            # 对每个已出现的 token 施加惩罚
            for id_ in set(input_ids_.tolist()):  # 去重遍历
                if score[0, id_] < 0:
                    score[0, id_] *= repetition_penalty
                else:
                    score[0, id_] /= repetition_penalty

            logits = score  # 更新 logits [1,vocab_size]
        # ================================================================================= #

        if top_k is not None:
            values, _ = torch.topk(logits, k=top_k) # 找最大的k个元素 [1,1,k] k是按由大到小排列的

            # values[:, -1] -> [1,1]找k个中最小的元素 unsqueeze(-1) -> [1,1,1]便于广播
            min_values = values[:, -1].unsqueeze(-1)

            # torch.where(condition, value_if_true, value_if_false)
            # condition得到的是[1,1,k]形状，内容为True和False的张量
            # value_if_true, value_if_false是两个[1,1,k]形状的张量，如果condition中
            # 对应位置为True就从value_if_true张量对应位置取值，否则从value_if_false对应位置取值
            logits = torch.where(
                logits < min_values,                        # condition
                torch.full_like(logits, -float("Inf")),     # value_if_true
                logits,                                     # value_if_false
            )

        probs = torch.softmax(logits, dim=-1) # 对最后一维softmax [1,vocab_size]

        eos_id = tokenizer.eos_id()
        eos_logit = logits[0, eos_id].item()
        eos_prob = probs[0, eos_id].item()
        print(f"Step {step + 1:3d} | EOS (id={eos_id}) | Logit: {eos_logit:6.2f} | Prob: {eos_prob:8.6f}")

        next_id = torch.multinomial(probs, num_samples=1) # 在最后一维采样 [1,1] 内容是索引即token_id
        # next_id = torch.argmax(probs, dim=-1, keepdim=True)  # [1,1]

        input_ids = torch.cat([input_ids, next_id], dim=1) # [1,seq_len]+[1,1]

        if next_id.item() == tokenizer.eos_id():
            print("遇到eos结束生成~")
            break

    # input_ids[1,seq_len] input_ids[0] -> [seq_len]
    generated_text = tokenizer.decode(input_ids[0].tolist())
    return generated_text

def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")

    tokenizer = spm.SentencePieceProcessor(
        model_file=paths_cfg.tokenizer_model_path
    )

    model, optimizer, loss_fn, start_step, scheduler = load_model_and_optimizer()

    prompt = "Artificial Intelligence is"
    print(f"Prompt: {prompt}")

    output = generate_text(
        model=model,
        tokenizer=tokenizer,
        prompt=prompt,
        max_new_tokens=128,
        temperature=0.5,
        top_k=10,
        device=device,
        repetition_penalty=1.1,
    )
    print("\n--- Generated Text ---")
    print(output)

if __name__ == "__main__":
    main()