import torch
import os
from model.model import GPT
from gpt2_finetune import get_peft_model, LoraConfig, TaskType 
from model.dataloader import get_dataset_info
from safetensors.torch import load_file

lora_chatbot_dir = 'lora_64_weights.bin/adapter_model.safetensors'
lora_spm_dir = 'lora_spm_weights.bin/adapter_model.safetensors'
lora_correct_dir = 'lora_correct_weights.bin/adapter_model.safetensors'

lora_weights_dir = ''

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
word2id, id2word, vocab_size = get_dataset_info()

def load_gpt_pretrained_weights(gpt_model, pretrained_model_path):
    if os.path.exists(pretrained_model_path):
        state_dict = torch.load(pretrained_model_path)
        gpt_model.load_state_dict(state_dict, strict=False)
    else:
        raise FileNotFoundError(f"找不到 GPT 预训练权重文件: {pretrained_model_path}")
    
def load_lora_weights(gpt_model, lora_path):
    if os.path.exists(lora_path):
        if lora_path.endswith('.safetensors'): 
            state_dict = load_file(lora_path)  
        else:
            state_dict = torch.load(lora_path)
        gpt_model.load_state_dict(state_dict, strict=False)
    else:
        raise FileNotFoundError(f"LoRA weights file {lora_path} not found.")
    
def run_moe_with_lora(input_text):
    gpt_model = GPT()
    pretrained_model_path = 'ResGPT2.pt'  
    load_gpt_pretrained_weights(gpt_model, pretrained_model_path)

    lora_config = LoraConfig(
        task_type=TaskType.CAUSAL_LM,
        r=8,
        lora_alpha=16,
        lora_dropout=0.1,
        target_modules=['W_Q', 'W_K', 'W_V']  
    )
    gpt_model = get_peft_model(gpt_model, lora_config)
    gpt_model.to(device)
    gpt_model.eval() 
    
    input_ids = [word2id.get(word, word2id['<unk>']) for word in input_text]
    input_tensor = torch.tensor(input_ids, dtype=torch.long).unsqueeze(0).to(device).float()
    padding_tensor = torch.tensor([word2id["<pad>"]] * 50, dtype=torch.long).unsqueeze(0).to(device)
    input_tensor = torch.cat([input_tensor, padding_tensor], dim=1) 

    input_tensor = input_tensor.unsqueeze(1).unsqueeze(3).expand(-1, 3, -1, 20)

    lora_file_path = lora_correct_dir
    # lora_file_path = lora_spm_dir
    # lora_file_path = lora_chatbot_dir

    load_lora_weights(gpt_model, lora_file_path)

    input_text +=  '\t'
    if len(input_text) > 200:
        t_index = input_text.find('\t')
        input_text = input_text[t_index + 1:]
    answer = gpt_model.answer(input_text)  
    return answer

if __name__ == "__main__":
    while True:
        input_question = input("User:")

        response = run_moe_with_lora(input_question)
        print("MRGPT:", response)
