from transformers import LlamaForCausalLM, LlamaTokenizer
import torch

from eval import show_without_plt
from quantize_util import QuantizationUtils
from read_data import WordsDataset
from utils.model_path_getter import load_yaml
from transformers import BitsAndBytesConfig

# ✅ 加载模型路径
model_path = load_yaml()["model_path"]
offload_path = load_yaml()["offload_path"]

change_on = True
rot = True
# ✅ 加载 tokenizer
tokenizer = LlamaTokenizer.from_pretrained(model_path)

# 创建量化配置
quant_config = BitsAndBytesConfig(
    load_in_4bit=True,  # 设置为 4-bit 量化
    bnb_4bit_use_double_quant=True,
    bnb_4bit_quant_type="nf4",  # 使用 nf4 格式
    bnb_4bit_compute_dtype=torch.float16
)


# ✅ 加载量化模型
model = LlamaForCausalLM.from_pretrained(
    model_path,
    device_map="auto",  # 自动分配设备
    offload_folder=offload_path,
    # quantization_config=quant_config,
    torch_dtype=torch.float16,
)

scale_forward = 1
scale_back = 1

# ✅ 加载旋转矩阵并转换为 float32
rotateMatrix = (torch.load('../../1-10-1 (1).pth', weights_only=True)
                # .to(torch.float16)
                # .to(torch.float32)
                .to(torch.device('cuda')))



rotateMatrix = rotateMatrix.to(torch.float16)

if change_on:
    # ✅ 修改 down_proj 的权重和输入值
    for i, layer in enumerate(model.model.layers):
        if i == 0:
            down_proj = layer.mlp.down_proj
            original_weight = down_proj.weight.data

            print(original_weight.dtype)

            # 打印原始权重的均值和标准差
            print(f"Original weight mean: {original_weight.mean().item()}, std: {original_weight.std().item()}")

            # ✅ 修改权重：插入反旋转矩阵
            if rot:
                modified_weight = torch.matmul(original_weight, rotateMatrix)
            else:
                modified_weight = original_weight
            down_proj.weight.data = modified_weight * scale_back
            # down_proj.weight.data = original_weight
            # 打印修改后的权重均值和标准差
            print(f"Modified weight mean: {down_proj.weight.data.mean().item()}, std: {down_proj.weight.data.std().item()}")


quanter = QuantizationUtils(scale=2 / 255, zero_point=0)

# ✅ 钩子函数：修改 down_proj 的输入值
def modify_down_proj_input(module, input):
    # global rotateMatrix
    original_input = input[0]
    print(original_input.dtype)
    print(original_input.shape)
    # torch.save(original_input, 'original_input.pth')
    ans = show_without_plt(original_input.to(torch.float32))
    print(ans["lower_ratio_1e_3"], ans["lower_ratio_1e_4"])

    k = torch.sum(original_input == 0).item() / torch.numel(original_input)
    print(k)



    original_input = quanter.quantize(original_input)
    print(original_input.dtype)
    original_input = quanter.dequantize(original_input)
    print(original_input.dtype)

    if rot:
        modified_input = torch.matmul(original_input, rotateMatrix) * scale_forward
    else:
        modified_input = original_input
    print('hook_input')
    # 打印修改前后的输入张量信息
    print(f"Original input mean: {original_input.mean().item()}, std: {original_input.std().item()}")
    print(f"Modified input mean: {modified_input.mean().item()}, std: {modified_input.std().item()}")

    modified_input = quanter.quantize(modified_input)
    modified_input = quanter.dequantize(modified_input)

    ans = show_without_plt(modified_input.to(torch.float32))
    print(ans["lower_ratio_1e_3"], ans["lower_ratio_1e_4"])

    k = torch.sum(modified_input == 0).item() / torch.numel(modified_input)
    print(k)

    # torch.save(modified_input, 'modified_input.pth')

    return (modified_input,)

if change_on:
# ✅ 注册钩子到 down_proj 层
    model.model.layers[0].mlp.down_proj.register_forward_pre_hook(modify_down_proj_input)

# ✅ 准备输入数据
# prompts = [
#     "Artificial intelligence is transforming the way we interact with technology."
# ]

dataset = WordsDataset()
prompts = dataset.load_texts(0, 1)

inputs = tokenizer(prompts, return_tensors="pt", truncation=True, max_length=50)

# ✅ 确保输入张量在正确的设备上并转换为 float16
inputs = {key: value.to(model.device) for key, value in inputs.items()}

inputs["labels"] = inputs["input_ids"]

# ✅ 禁用梯度计算，进行推理
with torch.no_grad():
    outputs = model(**inputs)

loss = outputs.loss  # 按 token 数量归一化
ppl = torch.exp(loss).item()
print(f"Perplexity: {ppl}")

# # ✅ 生成文本
# with torch.no_grad():
#     generated_ids = model.generate(
#         inputs["input_ids"],
#     )
#
# # ✅ 解码生成的文本
# generated_texts = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
#
# print("prompts:", prompts)
# # ✅ 打印生成的文本
# print("Generated text:", generated_texts)
