import os
import torch
from transformers import AutoTokenizer, LlamaForCausalLM


from utils.model_path_getter import load_yaml
from read_word_dataset import WordsDataset

def save_proj(start,end,layer_nums=None,step=1,only_target=False):
    if layer_nums is None:
        layer_nums = {x for x in range(32)}

    # 模型路径
    model_dir = load_yaml()['model_path']
    offload_path = load_yaml()['offload_path']
    current_dir = load_yaml()['ffn_path']
    # 加载分词器
    tokenizer = AutoTokenizer.from_pretrained(model_dir)

    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token

    model = LlamaForCausalLM.from_pretrained(
        model_dir,
        device_map="auto",
        offload_folder=offload_path,  # 如果有较大的磁盘空间，可以用来存放 offloaded 的权重
        #fp16
        torch_dtype = torch.float16,
    )

    first_line_input = []
    first_line_output = []

    second_line_input = []
    second_line_output = []

    gate_proj_input = []
    gate_proj_output = []

    def print_memory_usage(message=""):
        """打印当前显存使用情况"""
        allocated = torch.cuda.memory_allocated() / (1024 ** 2)  # 转为 MB
        reserved = torch.cuda.memory_reserved() / (1024 ** 2)  # 转为 MB
        print(f"{message}\nAllocated: {allocated:.2f} MB, Reserved: {reserved:.2f} MB\n")
    def up_proj_hook(module, input, output):
        first_line_input.append(input[0].detach().cpu())
        first_line_output.append(output.detach().cpu())


    def down_proj_hook(module, input, output):
        second_line_input.append(input[0].detach().cpu())
        second_line_output.append(output.detach().cpu())

    def gate_proj_hook(module, input, output):
        gate_proj_input.append(input[0].detach().cpu())
        gate_proj_output.append(output.detach().cpu())


    def save_tensors(tensors, path,other,step):
        path = f"{current_dir}{path}"
        os.makedirs(path, exist_ok=True)
        for i, tensor in enumerate(tensors):
            file_name = f"{path}/{other}tensor{i}.pth"
            torch.save(tensor, file_name)
            print(f"Saved {file_name}",end=" ")
            print(tensor.shape)

    for i, layer in enumerate(model.model.layers):
        if i in layer_nums:
            layer.mlp.up_proj.register_forward_hook(up_proj_hook)
            layer.mlp.down_proj.register_forward_hook(down_proj_hook)
            layer.mlp.gate_proj.register_forward_hook(gate_proj_hook)

    print(model.model.layers[0].mlp.act_fn)

    up_limit = end

    ds = WordsDataset()

    for i in range(start, up_limit,step):

    # data = load_texts(6)
    # data = ["This is a test sentence.", "This is another test sentence."]
    #     prompts = ["This is a test sentence."]
        prompts = ds.load_texts(i,i+step)
        inputs = tokenizer(prompts, return_tensors="pt", padding=True)

        inputs = {key: value.to(model.device) for key, value in inputs.items()}

        # 禁用梯度计算，进行推理`
        with torch.no_grad():
            outputs = model(**inputs)

        # first_line_output shape: [(step, seq_len, hidden_size)]

        if not only_target:
            save_tensors(first_line_output, "/ffn/first_line_output",other=i,step=step)
            save_tensors(second_line_output, "/ffn/second_line_output",other=i,step=step)
            save_tensors(first_line_input, "/ffn/first_line_input",other=i,step=step)
            save_tensors(gate_proj_input, "/ffn/gate_proj_input",other=i,step=step)
            save_tensors(gate_proj_output, "/ffn/gate_proj_output"  ,other=i,step=step)

        save_tensors(second_line_input, "/ffn/second_line_input",other=i,step=step)

        first_line_input.clear()
        first_line_output.clear()
        second_line_input.clear()
        second_line_output.clear()
        gate_proj_input.clear()
        gate_proj_output.clear()
    # print_memory_usage("After saving tensors")
    # del model
    # del tokenizer
    # del inputs
    # del outputs
    # del first_line_input
    # del first_line_output
    # del second_line_input
    # del second_line_output
    # del gate_proj_input
    # del gate_proj_output
    # torch.cuda.empty_cache()
    # gc.collect()
    # torch.cuda.synchronize()
    # print_memory_usage("After clearing memory")

if __name__ == '__main__':
    save_proj(0,10,layer_nums={0},only_target=True)




