import sys
import os
import torch
from transformers import GPT2Config, GPT2LMHeadModel
from transformers import AutoModelForCausalLM, BertTokenizerFast

# 获取项目根目录 (不这么搞的话，导包不了  from gpt2_chatbot import *****)
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
print(f'project_root-->\n{project_root}')
sys.path.append(project_root)
from gpt2_chatbot.config import ParameterConfig



def create_model():
    # 1 初始化 config
    params = ParameterConfig()

    # 2 初始化模型
    model_config = GPT2Config.from_json_file(params.config_json)
    print(f'model的配置信息--->\n{model_config}')
    model = GPT2LMHeadModel(config=model_config)
    print(f'模型的结构--->\n{model}')

    # 3 保存模型
    model.save_pretrained(params.save_model_path)
    ...




def load_model():
    # 1 初始化 config
    params = ParameterConfig()

    # 2 初始化 tokenizer
    tokenizer = BertTokenizerFast(params.vocab_path,
                                    sep_token="[SEP]",
                                    pad_token="[PAD]",
                                    cls_token="[CLS]")
                                    



    # 加载预训练模型
    model = AutoModelForCausalLM.from_pretrained(params.save_model_path)
    print("✅ Pretrained model loaded successfully.")
    print(model)

if __name__ == '__main__':
    # create_model()
    load_model()
    ...
