import numpy as np
import torch
import transformers
from transformers import LlamaForCausalLM, LlamaTokenizer
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from safetensors import safe_open
import cv2
from safetensors.torch import save_file

par_dict={}
llamaLayersNum=32
llama_layer_name_suffix_dict = {
    "q": ".self_attn.q_proj.weight",
    "k": ".self_attn.k_proj.weight",
    "v": ".self_attn.v_proj.weight",
    "o": ".self_attn.o_proj.weight",
    'mlp_down': ".mlp.down_proj.weight",
    'mlp_gate': ".mlp.gate_proj.weight",
    'mlp_up': ".mlp.up_proj.weight",
}

llama_layer_name_suffix_list = [
    ".self_attn.q_proj.weight",
    ".self_attn.k_proj.weight",
    ".self_attn.v_proj.weight",
    ".self_attn.o_proj.weight",
    ".mlp.down_proj.weight",
    ".mlp.gate_proj.weight",
    ".mlp.up_proj.weight",
]


resList=[]#按照llama_layer_name_suffix_list中的顺序排列的权重矩阵



def store2ParDict(model_info):
    global par_dict
    for i in range(model_info['FileNum']):
        if i < 9:
            s = '0' + str(i + 1)
        else:
            s = str(i + 1)
        path = model_info['PathPrefix'] + s + model_info['PathSuffix']
        print(path)
        getModel(path)

def getModel(model_path):
    global par_dict
    with safe_open(model_path, framework="pt", device='cpu') as f:
        for k in f.keys():
            par_dict[k] = f.get_tensor(k)


def showParDict():
    global par_dict
    for i, name in enumerate(par_dict):
        parameter = par_dict[name].numpy()
        print(name, parameter.shape)


def getLayerInfo(txtName,model_info):
    # 输出模型每一层的相关信息到txtName.txt文件中，model_path为模型文件的权重路径
    def eleNum(a):
        if a.ndim == 1:
            return a.shape[0]
        return a.shape[0] * a.shape[1]

    def func(a, avg, std):
        pd1 = avg + std  # 距离在正的一个方差之内 positive delta
        pd2 = avg + 2 * std
        pd3 = avg + 3 * std
        nd1 = avg - std
        nd2 = avg - 2 * std
        nd3 = avg - 3 * std
        count3 = np.sum((a >= nd3) * (a <= pd3))
        count2 = np.sum((a >= nd2) * (a <= pd2))
        count1 = np.sum((a >= nd1) * (a <= pd1))
        return count1, count2, count3


    global par_dict
    for i in range(model_info['FileNum']):
        if i < 9:
            s = '0' + str(i + 1)
        else:
            s = str(i + 1)
        path = model_info['PathPrefix'] + s + model_info['PathSuffix']
        print(path)

        with safe_open(path, framework="pt", device='cpu') as f:
            # print(f.metadata())
            for k in f.keys():
                par_dict[k] = f.get_tensor(k)

    f=open(txtName+'weight.txt','a')
    for layerCount in range(llamaLayersNum):
        for index,suffix_name in enumerate(llama_layer_name_suffix_list):

            if layerCount<=0 :#只有符合条件的index对应的矩阵种类会出现在resList结果中
                #只需要在这里修改index部分的代码即可
                name='model.layers.' + str(layerCount)+suffix_name
                parameter = par_dict[name].numpy()
                avg = np.average(parameter)
                std = np.std(parameter)
                max = np.max(parameter)
                min = np.min(parameter)
                count1, count2, count3 = func(parameter, avg, std)
                elesum = eleNum(parameter)
                print(layerCount)
                print(name, parameter.shape, file=f)
                print("avg:%f std:%f max:%f min:%f" % (avg, std, max, min), file=f)
                print("一个方差内的数据个数:%d 占比:%f" % (count1, float(count1 / elesum)), file=f)
                print("两个方差内的数据个数:%d 占比:%f" % (count2, float(count2 / elesum)), file=f)
                print("三个方差内的数据个数:%d 占比:%f" % (count3, float(count3 / elesum)), file=f)
                print("", file=f)


def getLayerName(txtName,model_path):
    # 输出模型每一层的name与size到txtName.txt文件中，model_path为模型文件的权重路径
    def eleNum(a):
        if a.ndim == 1:
            return a.shape[0]
        return a.shape[0] * a.shape[1]

    getModel(model_path)
    f=open(txtName+'name.txt','a')
    for i, name in enumerate(par_dict):
        parameter = par_dict[name].numpy()
        elesum = eleNum(parameter)
        print(i)
        print(name, parameter.shape, file=f)
        print("", file=f)


def getSingleImg(model_path):
    global res
    getModel(model_path)
    for i,name in enumerate(par_dict):
        parameter=par_dict[name].numpy()
    if parameter.ndim>=2 and ('model.layers.0.self_attn.o_proj.weight' in name):
        avg = np.average(parameter)
        std = np.std(parameter)
        #白色为1，黑色为0
        img = 255 - 255 * ((parameter >= avg - std) * (parameter <= avg + std))
        res=img
    elif parameter.ndim>=2 and ('self_attn.o_proj.weight' in name):
        print(i)
        print(res.shape)
        avg = np.average(parameter)
        std = np.std(parameter)
        img = 255 - 255 * ((parameter >= avg - std) * (parameter <= avg + std))
        res = np.concatenate([res, img], axis=1)
        # axis=1横向拼接
        # axis=0纵向拼接


def initResList():
    global  resList

    for index,suffix_name in enumerate(llama_layer_name_suffix_list):
        resList.append(np.zeros([1,1]))
    resList.append(np.zeros([1, 1]))
    resList.append(np.zeros([1, 1]))

def getBinaryImg(parameter):#1为白色，0为黑色
    #大于零为1，小于零为0
    # img=255-255*(parameter>=0)#原矩阵中大于零白色，小于零黑色
    # 大于零为1，小于零为0

    #一个方差内为0，其余为1
    # avg = np.average(parameter)
    # std = np.std(parameter)
    # img = 255 - 255 * ((parameter >= avg - std) * (parameter <= avg + std))
    #一个方差内为0，其余为1

    avg = np.average(parameter)
    std = np.std(parameter)
    # 让黑色表示更重要的值，即一个方差内的值表示白色，也即一个方差内值为255
    img = 255 * ((parameter >= avg - std) * (parameter <= avg + std))

    # 两个个方差内为0，其余为1
    # avg = np.average(parameter)
    # std = np.std(parameter)
    # img = 255 - 255 * ((parameter >= avg - 2*std) * (parameter <= avg + 2*std))
    # 两个方差内为0，其余为1

    # 无精度损失版
    # max=np.max(parameter)
    # min=np.min(parameter)
    # diff=max-min
    # diffMatrix=parameter-min
    # scale=diffMatrix/diff
    # img=(255*scale)
    # 无精度损失版
    return img

def getSingleImg2(model_info):
    global resList,par_dict,llama_layer_name_suffix_list
    store2ParDict(model_info)
    showParDict()
    initResList()
    for layerCount in range(llamaLayersNum):
        for index,suffix_name in enumerate(llama_layer_name_suffix_list):

            if layerCount<=0 :#只有符合条件的index对应的矩阵种类会出现在resList结果中
                #只需要在这里修改index部分的代码即可
                name='model.layers.' + str(layerCount)+suffix_name
                print(layerCount)
                print(index)
                print(name)
                cur_matrix=par_dict[name].numpy()
                print(cur_matrix)

                if layerCount==0:
                    resList[index]=getBinaryImg(cur_matrix)
                else:
                    resList[index]=np.concatenate([resList[index], getBinaryImg(cur_matrix)], axis=1)
                    print(resList[index].shape)
                    # axis=1横向拼接
                    # axis=0纵向拼接


#
# llama_layer_name_suffix_list = [
#     ".self_attn.q_proj.weight",
#     ".self_attn.k_proj.weight",
#     ".self_attn.v_proj.weight",
#     ".self_attn.o_proj.weight",
#     ".mlp.down_proj.weight",
#     ".mlp.gate_proj.weight",
#     ".mlp.up_proj.weight",
# ]
def getZeroOneImg(parameter):
    avg = np.average(parameter)
    std = np.std(parameter)
    return 1 - ((parameter >= avg - std) * (parameter <= avg + std))


def getQKImg(model_info):
    global resList,par_dict,llama_layer_name_suffix_list
    store2ParDict(model_info)
    showParDict()
    initResList()
    for layerCount in range(llamaLayersNum):
        startLayer=6
        if layerCount>=startLayer and layerCount <=10:
            nameK='model.layers.' + str(layerCount)+'.self_attn.k_proj.weight'
            nameQ='model.layers.' + str(layerCount)+'.self_attn.q_proj.weight'
            Wq=par_dict[nameQ].numpy()
            Wk=par_dict[nameK].numpy()
            # QK=np.dot(Wq,Wk.T)
            print(Wq)
            print(getZeroOneImg(Wq))
            binaryQK=np.dot(getZeroOneImg(Wq),getZeroOneImg(Wk.T))
            print(binaryQK)
            if layerCount==startLayer:
                # resList[7]=getBinaryImg(QK)
                resList[7] = binaryQK
            else:
                # resList[7]=np.concatenate([resList[7], getBinaryImg(QK)], axis=1)
                resList[7] = np.concatenate([resList[7], binaryQK], axis=1)

def qkres(model_info):
    global resList
    getQKImg((model_info))
    name='qkT.png'
    cv2.imwrite(name, resList[7].astype(np.uint8))

def getVOImg(model_info):
    global resList,par_dict,llama_layer_name_suffix_list
    store2ParDict(model_info)
    showParDict()
    initResList()
    for layerCount in range(llamaLayersNum):
        startLayer=0
        if layerCount>=startLayer and layerCount <=5:
            nameO='model.layers.' + str(layerCount)+'.self_attn.o_proj.weight'
            nameV='model.layers.' + str(layerCount)+'.self_attn.v_proj.weight'
            Wo=par_dict[nameO].numpy()
            Wv=par_dict[nameV].numpy()
            # QK=np.dot(Wq,Wk.T)
            print(Wo)
            print(getZeroOneImg(Wo))
            binaryVO=np.dot(getZeroOneImg(Wv),getZeroOneImg(Wo))
            print(binaryVO)
            if layerCount==startLayer:
                # resList[7]=getBinaryImg(QK)
                resList[8] = binaryVO
            else:
                # resList[7]=np.concatenate([resList[7], getBinaryImg(QK)], axis=1)
                resList[8] = np.concatenate([resList[8], binaryVO], axis=1)

def vores(model_info):
    global resList
    getVOImg((model_info))
    name='voT.png'
    cv2.imwrite(name, resList[8].astype(np.uint8))

def getImg(model_info):#getSingleImg2函数得到的结果
    global resList

    getSingleImg2(model_info)
    # cv2.imwrite("self_attn.weight.o0.png", resList[3].astype(np.uint8))

    for index,suffix_name in enumerate(llama_layer_name_suffix_list):
        name=str(index)+suffix_name+'.png'
        cv2.imwrite(name,resList[index].astype(np.uint8))

    # cv2.imwrite("mlp.down_proj.weight.png",resList[4].astype(np.uint8))
    # cv2.imwrite("mlp.gate_proj.weight.png", resList[5].astype(np.uint8))
    # cv2.imwrite("mlp.up_proj.weight.png", resList[6].astype(np.uint8))
    # qk = np.concatenate([resList[0], resList[1]], axis=1)
    # print(qk.shape)
    # cv2.imwrite("self_attn.weight.qk.png", qk.astype(np.uint8))


# def doSomething(model_info):
#     for i in range(model_info['FileNum']):
#         if i < 9:
#             s = '0' + str(i + 1)
#         else:
#             s = str(i + 1)
#         path = model_info['PathPrefix'] + s + model_info['PathSuffix']


        # 做想要的操作

        # getLayerName(model_info['ModelName'],path)#获得每层的名称信息
        #
        #
        # getLayerInfo(model_info['ModelName'],path)#获得每层的所有信息
        #
        # getSingleImg(res,path)

        # getSingleImg2(path)

def testSaveModel(model_info):
    global resList, par_dict, llama_layer_name_suffix_list
    store2ParDict(model_info)
    showParDict()
    initResList()
    for layerCount in range(llamaLayersNum):
        for index, suffix_name in enumerate(llama_layer_name_suffix_list):

            if layerCount <= 0:  # 只有符合条件的index对应的矩阵种类会出现在resList结果中
                # 只需要在这里修改index部分的代码即可
                name = 'model.layers.' + str(layerCount) + suffix_name
                print(layerCount)
                print(index)
                print(name)
                print(type(par_dict[name]))
                cur_matrix = par_dict[name].numpy()
                cur_matrix[0,0]=1

    save_file(par_dict,'./0.safetensors')

# def mytest():
#     tensors = {
#         "embedding": torch.zeros((2, 2)),
#         "attention": torch.zeros((2, 3))
#     }
#     save_file(tensors, "model.safetensors")

if __name__=='__main__':
    originLlamaModelInfo = {}
    originLlamaModelInfo['PathPrefix']=r"/gdata/hpdesktop/webui/models/Llama-2-7b-hf/model-000"
    originLlamaModelInfo['PathSuffix']="-of-00002.safetensors"
    originLlamaModelInfo['FileNum']=2
    originLlamaModelInfo['ModelName']='llama'

    chatLlamaModelInfo = {}
    chatLlamaModelInfo['PathPrefix'] = r"/gdata/gpu/Llama-2-7b-chat-hf/model-000"
    chatLlamaModelInfo['PathSuffix'] = "-of-00002.safetensors"
    chatLlamaModelInfo['FileNum'] = 2
    chatLlamaModelInfo['ModelName'] = 'llamachat'


    loraModelInfo={}
    loraModelInfo['PathPrefix'] = r"/gdata/hpdesktop/webui/models/Llama-2-7b-hf-OA2000/model-000"
    loraModelInfo['PathSuffix'] = "-of-00006.safetensors"
    loraModelInfo['FileNum'] = 6
    loraModelInfo['ModelName'] = 'lora'

    mixtralInstructInfo={}
    mixtralInstructInfo['PathPrefix'] = r"/gdata/gpu/textgen3/models/Mixtral-7B_8/model-000"
    mixtralInstructInfo['PathSuffix'] = "-of-00019.safetensors"
    mixtralInstructInfo['FileNum'] = 19
    mixtralInstructInfo['ModelName'] = 'mixtralinstruct'

    changeModelInfo={}
    changeModelInfo['PathPrefix']=r"/gdata/gpu8613/changeWeightTest/"
    changeModelInfo['PathSuffix']=".safetensors"
    changeModelInfo['FileNum']=1
    changeModelInfo['ModelName']='changeModel'

    lora10000ModelInfo = {}
    lora10000ModelInfo['PathPrefix'] = r"/gdata/hpdesktop/webui/models/Llama-2-7b-hf-OAC10000/model-000"
    lora10000ModelInfo['PathSuffix'] = "-of-00006.safetensors"
    lora10000ModelInfo['FileNum'] = 6
    lora10000ModelInfo['ModelName'] = 'lora'

    lora2000ModelInfo = {}
    lora2000ModelInfo['PathPrefix'] = r"/gdata/hpdesktop/zym/LoraModel/2000/model-000"
    lora2000ModelInfo['PathSuffix'] = "-of-00006.safetensors"
    lora2000ModelInfo['FileNum'] = 6
    lora2000ModelInfo['ModelName'] = 'lora'

    # doSomething(model_info=originLlamaModelInfo)
    # getImg(model_info=lora2000ModelInfo)
    # qkres(model_info=originLlamaModelInfo)
    # vores(model_info=originLlamaModelInfo)
    # testSaveModel(model_info=changeModelInfo)
    # mytest()
    # getImg(model_info=lora2000ModelInfo)
    getLayerInfo("lora10000",model_info=lora10000ModelInfo)