from transformers import GPTNeoForCausalLM, GPT2TokenizerFast
import torch
import os
import shutil

def build_model_and_tokenizer():
    local_path = "./model/gpt-neo125m"
    try:
        # 优先从本地加载
        tokenizer = GPT2TokenizerFast.from_pretrained(local_path)
        model = GPTNeoForCausalLM.from_pretrained(local_path)
        print("[INFO]成功从本地加载模型和分词器：%s", local_path)
    except Exception as e:
        print("[WARNNING]从本地加载失败，改为从 Hugging Face 加载：%s", e)
        tokenizer = GPT2TokenizerFast.from_pretrained("EleutherAI/gpt-neo-125M")
        model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-125M")
        print("[INFO]成功从 Hugging Face 加载模型和分词器")
        
        # 自动保存一份到本地，供下次加载
        os.makedirs(local_path, exist_ok=True)
        tokenizer.save_pretrained(local_path)
        model.save_pretrained(local_path)
        print("[INFO]已将模型和分词器保存到本地：%s", local_path)

    tokenizer.pad_token = tokenizer.eos_token  # 设置pad_token
    return model, tokenizer

def build_model():
    local_path = "./model/gpt-neo125m"
    try:
        # 优先从本地加载
        model = GPTNeoForCausalLM.from_pretrained(local_path)
        print("[INFO]成功从本地加载模型：%s", local_path)
    except Exception as e:
        print("[WARNNING]从本地加载失败，改为从 Hugging Face 加载：%s", e)
        model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-125M")
        print("[INFO]成功从 Hugging Face 加载模型")
        
        # 自动保存一份到本地，供下次加载
        os.makedirs(local_path, exist_ok=True)
        model.save_pretrained(local_path)
        print("[INFO]已将模型保存到本地：%s", local_path)
    return model

def build_tokenizer():
    local_path = "./model/gpt-neo125m"
    try:
        # 优先从本地加载
        tokenizer = GPT2TokenizerFast.from_pretrained(local_path)
        print("[INFO]成功从本地加载分词器：%s", local_path)
    except Exception as e:
        print("[WARNNING]从本地加载失败，改为从 Hugging Face 加载：%s", e)
        tokenizer = GPT2TokenizerFast.from_pretrained("EleutherAI/gpt-neo-125M")
        print("[INFO]成功从 Hugging Face 加载分词器")
        # 自动保存一份到本地，供下次加载
        os.makedirs(local_path, exist_ok=True)
        tokenizer.save_pretrained(local_path)
        print("[INFO]已将分词器保存到本地：%s", local_path)

    tokenizer.pad_token = tokenizer.eos_token  # 设置pad_token
    return tokenizer

def delete_local_model():
    """
    清空 ./model/gpt-neo125m 文件夹下的内容，但保留该文件夹本身。
    """
    local_path = "./model/gpt-neo125m"
    if os.path.exists(local_path):
        try:
            # 遍历并删除目录下的所有文件和子文件夹
            for filename in os.listdir(local_path):
                file_path = os.path.join(local_path, filename)
                if os.path.isfile(file_path) or os.path.islink(file_path):
                    os.unlink(file_path)  # 删除文件或符号链接
                elif os.path.isdir(file_path):
                    shutil.rmtree(file_path)  # 删除子文件夹
            print(f"[INFO] 成功清空目录内容: {local_path}")
        except Exception as e:
            print(f"[ERROR] 清空目录内容失败: {local_path}，错误信息: {e}")
    else:
        print(f"[WARN] 模型目录不存在: {local_path}")
