import os
import sys
import torch
import torch.nn.functional as F
from transformers import GPT2LMHeadModel, BertTokenizerFast

# 获取项目根目录 (不这么搞的话，导包不了  from gpt2_chatbot import *****)
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
print(f'project_root-->\n{project_root}')
sys.path.append(project_root)
from gpt2_chatbot.config import ParameterConfig

def top_k_filtering(logits, top_k=10, filter_value=-float('Inf')):
    """
    Top-k 筛选：保留概率最高的 top_k 个 token，其余设为 -Inf
    """
    if top_k > 0:
        top_k = min(top_k, logits.size(-1))  # 防止超界
        indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
        logits[indices_to_remove] = filter_value
    return logits


def main():
    params = ParameterConfig()

    # 设备选择
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # 加载分词器和模型
    tokenizer = BertTokenizerFast(
        vocab_file=params.vocab_path,  # 替换成你的 vocab 文件路径
        sep_token="[SEP]",
        pad_token="[PAD]",
        cls_token="[CLS]"
    )
    model = GPT2LMHeadModel.from_pretrained(params.save_model_path)
    model.to(device)
    model.eval()

    # 获取用户输入
    # text = input("用户: ")
    text = '你好'
    print(f'text-->\n{text}')
    text_ids = tokenizer.encode(text, add_special_tokens=False)
    print(f'text_ids-->\n{text_ids}')
    input_ids = [tokenizer.cls_token_id] + text_ids + [tokenizer.sep_token_id]
    print(f'input_ids-->\n{input_ids}')
    input_tensor = torch.tensor([input_ids], dtype=torch.long).to(device)
    print(f'input_tensor-->\n{input_tensor}')

    response = []
    max_len = 200
    top_k = 10

    for idx in range(max_len):
        with torch.no_grad():
            outputs = model(input_ids=input_tensor)
            # print(f'outputs shape -->{outputs.logits.shape}')
            next_token_logits = outputs.logits[0, -1, :]
            # print(f'next_token_logits shape -->{next_token_logits.shape}')

            # 去除 [UNK] token
            next_token_logits[tokenizer.convert_tokens_to_ids('[UNK]')] = -float('Inf')

            # Top-k 采样
            filtered_logits = top_k_filtering(next_token_logits, top_k=top_k)
            # print(f'filtered_logits shape -->{filtered_logits.shape}')
            probs = F.softmax(filtered_logits, dim=-1)
            # print(f'probs shape -->{probs.shape}')
            next_token = torch.multinomial(probs, num_samples=1)
            # print(f'next_token -->{next_token}    next_token shape -->{next_token.shape}')

            if next_token.item() == tokenizer.sep_token_id:
                break

            response.append(next_token.item())
            input_tensor = torch.cat((input_tensor, next_token.unsqueeze(0)), dim=1)
            print(f'idx-->{idx}')
            print(tokenizer.convert_ids_to_tokens(input_tensor[0]))

    # 输出结果
    print()
    response_text = tokenizer.convert_ids_to_tokens(response)
    print("Chatbot：" + "".join(response_text))


if __name__ == "__main__":
    main()
