import os

import numpy as np
import torch
from tqdm import tqdm
from transformers import AutoTokenizer, LlamaForCausalLM, BitsAndBytesConfig



from eval import show_without_plt
from quantize_util import QuantizationUtils
from read_word_dataset import WordsDataset
from utils.model_path_getter import load_yaml

class OnlineModel:
    def __init__(self, dataset_start_end: tuple, rot: bool, act_quant: bool, weight_quant: bool):

        self.rot = rot
        self.act_quant = act_quant
        self.weight_quant = weight_quant

        # 加载模型路径
        self.model_dir = load_yaml()['model_path']
        self.offload_path = load_yaml()['offload_path']
        self.current_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

        # 加载 tokenizer
        self.tokenizer = AutoTokenizer.from_pretrained(self.model_dir)

        self.model = None
        self.quant_config = None
        self.rotateMatrix = None

        self.start = dataset_start_end[0]
        self.end = dataset_start_end[1]
        self.current_dataset_num = self.start

        self.step = 1
        self.dataset = WordsDataset()
        self.quanter = QuantizationUtils(2 / 255, 0)

        self.rot = rot
        self.act_quant = act_quant
        self.weight_quant = weight_quant

        self.ppl_arr = []
        self.counts_pre_arr = []
        self.radio_arr = []
        self.counts_aft_arr = []
        self.radio_aft_arr = []

        self.init()

    def init(self):
        self.quant_config = BitsAndBytesConfig(
            load_in_4bit=True,  # 设置为 4-bit 量化
            bnb_4bit_use_double_quant=True,
            bnb_4bit_quant_type="nf4",  # 使用 nf4 格式
            bnb_4bit_compute_dtype=torch.float16
        )
        self.rotateMatrix = (torch.load('../1-10-1 (1).pth', weights_only=True)).to(torch.float16)

        if self.weight_quant:
            self.model = LlamaForCausalLM.from_pretrained(
                self.model_dir,
                torch_dtype=torch.float16,
                quantization_config=self.quant_config,
                device_map="auto",
                offload_folder=self.offload_path
            )

        else:
            self.model = LlamaForCausalLM.from_pretrained(
                self.model_dir,
                torch_dtype=torch.float16,
                device_map="auto",
                offload_folder=self.offload_path
            )

        self.change_second_line_weight()
        print("Weight changed.")

        self.add_modify_down_proj_input_hook()
        print("Hook added.")


    def change_second_line_weight(self):
        for i, layer in enumerate(self.model.model.layers):
            down_proj = layer.mlp.down_proj
            original_weight = down_proj.weight.data

            if self.rot:
                modified_weight = torch.matmul(original_weight, self.rotateMatrix)
            else:
                modified_weight = original_weight

            down_proj.weight.data = modified_weight


    def add_modify_down_proj_input_hook(self):
        def modify_down_proj_input(module, input):

            original_input = input[0]


            ans = show_without_plt(original_input.to(torch.float32))
            self.counts_pre_arr.append(ans["counts"])
            self.radio_arr.append((ans["lower_ratio_1e_3"], ans["lower_ratio_1e_4"]))

            if self.act_quant:
                original_input = self.quanter.quantize(original_input)
                original_input = self.quanter.dequantize(original_input)


            if self.rot:
                modified_input = torch.matmul(original_input, self.rotateMatrix)
            else:
                modified_input = original_input

            if self.act_quant:
                modified_input = self.quanter.quantize(modified_input)
                modified_input = self.quanter.dequantize(modified_input)


            ans = show_without_plt(modified_input.to(torch.float32))
            self.counts_aft_arr.append(ans["counts"])

            if not self.act_quant:
                self.radio_aft_arr.append((ans["lower_ratio_1e_3"], ans["lower_ratio_1e_4"]))

            return (modified_input,)
        for i in range(len(self.model.model.layers)):
            # self.model.model.layers[0].mlp.down_proj.register_forward_pre_hook(modify_down_proj_input)
            self.model.model.layers[i].mlp.down_proj.register_forward_pre_hook(modify_down_proj_input)

    def run_one(self):
        prompts = self.dataset.load_texts(self.current_dataset_num, self.current_dataset_num + self.step)

        # 预处理输入
        inputs = self.tokenizer(prompts, return_tensors="pt")
        inputs = {key: value.to(self.model.device) for key, value in inputs.items()}
        inputs["labels"] = inputs["input_ids"]

        with torch.no_grad():
            outputs = self.model(**inputs)
        loss = outputs.loss

        print('----')

        # 正确的 PPL 计算方法
        ppl = torch.exp(loss).item()
        print(f"PPL (正确计算): {ppl}")

        # 不要再除以 token 数量
        # ppl = torch.exp(loss / inputs["input_ids"].size(1)).item()  # 这个会导致 PPL 过小

        self.ppl_arr.append(ppl)
        print('----')

    def run_all(self):
        for i in tqdm(range(self.start, self.end, self.step)):
            self.run_one()
            self.current_dataset_num = i

    # 统计稀疏度
    def get_xishu(self):
        # print(*self.counts_pre_arr)
        radios = np.array(self.radio_arr)
        # print(radios)
        counts_aft = np.array(self.counts_aft_arr)
        # print(counts_aft)
        counts_sum = np.sum(counts_aft, axis=1)
        # print(counts_sum)
        counts_aft = counts_aft[:,0]
        # print(counts_aft)
        spar_arr = counts_aft / counts_sum
        # print(spar_arr)
        spar = sum(spar_arr) / len(spar_arr)

        low_value_counts_1e_3_pre = np.sum(radios[:,0]) / len(radios)
        low_value_counts_1e_4_pre = np.sum(radios[:,1]) / len(radios)

        if not self.act_quant:
            # self.radio_aft_arr
            radios_aft = np.array(self.radio_aft_arr)
            low_value_counts_1e_4_aft = np.sum(radios_aft[:, 1]) / len(radios_aft)
            low_value_counts_1e_3_aft = np.sum(radios_aft[:, 0]) / len(radios_aft)

            with open(f'{self.rot}-{self.act_quant}-{self.weight_quant}.txt', 'w') as f:
                f.write('---------------------------------')
                f.write(f"self.counts_pre_arr:{self.counts_pre_arr}\n")
                f.write(f"radios:{radios}\n")
                f.write(f"radios_aft:{radios_aft}\n")
                # f.write(f"spar_arr:{spar_arr}\n")
                f.write(f"low_value_counts_1e_3_pre: {low_value_counts_1e_3_pre}\n")
                f.write(f"low_value_counts_1e_4_pre: {low_value_counts_1e_4_pre}\n")
                f.write(f"low_value_counts_1e_4_aft: {low_value_counts_1e_4_aft}\n")
                f.write(f"low_value_counts_1e_3_aft: {low_value_counts_1e_3_aft}\n")
                # f.write(f"spar: {spar}\n")
                f.write(f"ppl: {self.get_ppl()}\n")
                f.write('---------------------------------')

            return low_value_counts_1e_3_pre, low_value_counts_1e_4_pre, low_value_counts_1e_4_aft, low_value_counts_1e_3_aft



        with open(f'{self.rot}-{self.act_quant}-{self.weight_quant}.txt', 'w') as f:
            f.write('---------------------------------')
            f.write(f"{self.rot} {self.act_quant} {self.weight_quant}\n")
            f.write(f"self.counts_pre_arr:{self.counts_pre_arr}\n")
            f.write(f"radios:{radios}\n")
            f.write(f"counts_aft:{counts_aft}\n")
            f.write(f"spar_arr:{spar_arr}\n")
            f.write(f"low_value_counts_1e_3_pre: {low_value_counts_1e_3_pre}\n")
            f.write(f"low_value_counts_1e_4_pre: {low_value_counts_1e_4_pre}\n")
            f.write(f"spar: {spar}\n")
            f.write(f"ppl: {self.get_ppl()}\n")
            f.write('---------------------------------')
        return low_value_counts_1e_3_pre, low_value_counts_1e_4_pre, spar


    def get_ppl(self):
        return sum(self.ppl_arr) / len(self.ppl_arr)


if __name__ == '__main__':
    online_model = OnlineModel((0,10), False, False, False)
    online_model.run_all()

    print("xishu",online_model.get_xishu())
    # print("ppl",online_model.get_ppl())

