import numpy as np

from init import *
from sklearn.decomposition import PCA
from skfuzzy.cluster import cmeans
from icmeans import *
import copy
from collections import Counter

from init import *
import pandas as pd
from math import exp

plt.rcParams['font.sans-serif'] = ['Arial Unicode MS']
plt.rcParams['axes.unicode_minus'] = False


def get_feature(img_data, flag, ksize):
    '''
    对图像的特征进行提取
    :param img_data: 图像信息
    :param flag: 奇数点还是偶数点
    :return:
    '''
    row, col = img_data.shape
    size = int((row * col) / 2)
    F = np.zeros((ksize, size))

    # 预测点
    predict = np.zeros((row, row))
    # 误差值
    erromap = np.zeros((row, row))
    index = 0
    for i in range(2, row - 2):
        for j in range(2, col - 2):
            # 像素上下左右四个像素
            v1 = int(img[i - 1][j])
            v2 = int(img[i][j - 1])
            v3 = int(img[i + 1][j])
            v4 = int(img[i][j + 1])
            # 选中点
            x = int(img[i][j])
            # 其他九个点
            v5 = int(img[i - 2][j - 1])
            v6 = int(img[i - 1][j - 2])
            v7 = int(img[i + 1][j - 2])
            v8 = int(img[i + 2][j - 1])
            v9 = int(img[i + 2][j + 1])
            v10 = int(img[i + 1][j + 2])
            v11 = int(img[i - 1][j + 2])
            v12 = int(img[i - 2][j + 1])
            # 左梯度和右梯度
            lg = np.array([[v5 - v1, v2 - v1], [v1 - v4, v3 - v4], [v2 - v3, v8 - v3], [v6 - v2, v7 - v2]])
            rg = np.array([[v12 - v1, v4 - v1], [v11 - v4, v10 - v4], [v4 - v3, v9 - v3], [v1 - v2, v3 - v2]])
            # # 特征
            f1 = np.max([v1, v2, v3, v4])
            f2 = np.min([v1, v2, v3, v4])
            sod1 = abs(v2 + v4 + v5 + v12 - 4 * v1)
            sod2 = abs(v1 + v3 + v6 + v7 - 4 * v2)
            sod3 = abs(v2 + v4 + v8 + v9 - 4 * v3)
            sod4 = abs(v1 + v3 + v10 + v11 - 4 * v4)
            f3 = np.sum([sod1, sod2, sod3, sod4])
            f4_arr = [abs(v1 - v2), abs(v2 - v3), abs(v3 - v4), abs(v4 - v1)]
            f4 = np.sum(f4_arr)
            # p2 = np.average([abs(v1 - v2), abs(v2 - v3), abs(v3 - v4), abs(v4 - v1)])
            p1 = (math.modf(np.average([v1, v2, v3, v4])))[0]
            p = np.ceil(np.average([v1, v2, v3, v4])) if p1 >= 0.5 else np.floor(np.average([v1, v2, v3, v4]))
            f5_arr = [abs(v6 - v7), abs(v5 - v2), abs(v2 - v8), abs(v1 - v3), abs(v12 - v4), abs(v4 - v9),
                      abs(v11 - v10), abs(v5 - v12), abs(v6 - v1), abs(v1 - v11), abs(v2 - v4), abs(v7 - v3),
                      abs(v3 - v10), abs(v8 - v9)]
            f5 = np.sum(f5_arr)
            f6_arr = [abs(p - v1), abs(p - v2), abs(p - v3), abs(p - v4), abs(p - v5), abs(p - v6), abs(p - v7),
                      abs(p - v8), abs(p - v9), abs(p - v10), abs(p - v11), abs(p - v12)]
            f6 = np.sum(f6_arr)
            f7 = np.sum(np.sqrt(np.sum(np.square(lg), axis=1)))
            f8 = np.sum(np.sqrt(np.sum(np.square(rg), axis=1)))
            gradDiff = []
            gradDiff2 = []
            # W = cal_w(ecfinal,)
            for s in range(3):
                for q in range(s, 4):
                    gradDiff.append(lg[s] - lg[q])
                    gradDiff2.append(rg[s] - rg[q])
            gradDiff = np.array(gradDiff)
            gradDiff2 = np.array(gradDiff2)
            f9 = np.sum(np.sqrt(np.sum(np.square(gradDiff), axis=1)))
            f10 = np.sum(np.sqrt(np.sum(np.square(gradDiff2), axis=1)))
            a = np.array([f1, f2, f4, f3, f10, f9, f8, f7, f5, f6])
            if np.mod(i + j, 2) == 0 and flag == 0:
                # print(a)
                F[:, index] = a
                index += 1
                erromap[i][j] = int(img_data[i][j]) - int(p)
            if np.mod(i + j, 2) == 1 and flag == 1:
                F[:, index] = a
                index += 1
                erromap[i][j] = int(img_data[i][j]) - int(p)
    pca = PCA(n_components=3)
    data = pca.fit_transform(F.T)

    data = np.array(data)
    # enter, u, u0, d, jm, p, fpc = cmeans(data.T, m=2, c=16, error=0.5, maxiter=10000)
    center, u, u0, d, jm, p, fpc = cmeans(data.T, 16, 2, error=0.00001, maxiter=10)
    label = np.argmax(u, axis=0)
    print('label', label)
    print(np.unique(label))
    return label, erromap


def complexity_map(img_data, label, flags):
    cm = np.zeros(img_data.shape)
    A, B = img_data.shape
    count = 0
    for i in range(2, A - 2):
        for j in range(2, B - 2):
            if np.mod(i + j, 2) == 0 and flags == 0:
                cm[i][j] = int(label[count]) + 1
                count = count + 1
            if np.mod(i + j, 2) == 1 and flags == 1:
                cm[i][j] = int(label[count]) + 1
                count = count + 1
    return cm


def cal_entropy(images, label, ks, flages, W):
    A, B = images.shape
    his = []
    his1 = []
    his2 = []
    count = 0

    errolist1 = np.zeros((A, B))
    errolist2 = np.zeros((A, B))
    for i in range(ks):
        his.append([])
        his1.append([])
        his2.append([])
    for i in range(2, A - 2):
        for j in range(2, B - 2):
            v1 = int(images[i - 1][j])
            v2 = int(images[i + 1][j])
            v3 = int(images[i][j + 1])
            v4 = int(images[i][j - 1])
            P1 = -np.sort([-v1, -v2, -v3, -v4])
            P2 = np.sort([v1, v2, v3, v4])
            P1 = np.insert(P1, 0, P1[0] + 1)
            P2 = np.insert(P2, 0, P2[0] - 1)
            size = P1.shape[0]
            P1 = P1.reshape(size, -1)
            P2 = P2.reshape(size, -1)
            e = np.ceil(np.average([v1, v2, v3, v4])) - int(images[i][j])
            x1 = np.ceil(np.dot(W, P1) / np.sum(W))
            x2 = np.ceil(np.dot(W, P2) / np.sum(W))
            e1 = int(x1) - int(images[i][j])
            e2 = int(x2) - int(images[i][j])
            # print(e1,e2,e1,e2)
            if flages == 0 and np.mod(i + j, 2) == 0:
                his[label[count]].append(e)
                his1[label[count]].append(e1)
                his2[label[count]].append(e2)
                # his1[int(e1+254)][label[count]]+=1
                # his2[int(e2 + 254)][label[count]] += 1
                errolist1[i][j] = e1
                errolist2[i][j] = e2
                count = count + 1
            elif flages == 1 and np.mod(i + j, 2) == 1:
                his[label[count]].append(e)
                his1[label[count]].append(e1)
                his2[label[count]].append(e2)
                errolist1[i][j] = e1
                errolist2[i][j] = e2
                count = count + 1

    return his1, his2, errolist1, errolist2


def cal_entropy2(his, ks, label):
    entropy = []
    for i in range(ks):
        e = 0
        c = Counter(his[i])
        total = len(his[i])
        for i in c.values():
            pro = int(i) / total
            e = e + pro * math.log(1 / pro, 2)
        entropy.append(e)

    # print(entropy)
    entropy_sort = sorted(entropy);
    cor = []
    labels = []
    # print(entropy_sort)
    for i in range(ks):
        for j in range(ks):
            if entropy[i] == entropy_sort[j]:
                cor.append(j)
                entropy_sort[j] = -1
                break
    for i in label:
        labels.append(cor[i])
    legend = []
    fig = []
    frequency = list(range(ks))
    new_name = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p']
    for i in range(ks):
        legend.append('l=' + str(cor[i]))
        fig.append(str(cor[i]))
        # print(len(his[i]))
        frequency[cor[i]] = int(len(his[cor[i]]))
    print(frequency)
    plt.grid(ls=':');
    plt.bar(range(ks), frequency, tick_label=new_name, color='b');
    plt.title('The distribution of pixels');
    plt.xlabel('Neuron')
    plt.ylabel('Population');
    plt.show()
    return labels, cor


def cal_aux(checklist, data_length, scale, re, lcm, flags):
    cl = 0
    test = []
    for i in range(16):
        test.append(i)
    checklist = np.array(checklist) + 1
    # checklist = np.array(test)+1
    print('incheck', checklist)
    a = np.ceil(np.log2(np.max(checklist)))
    if flags == 0:
        for i in range(16):
            if re[i] == 250 or re[i] == -250:
                scale = scale - 1
            if checklist[i] != i:
                cl = cl + np.ceil(np.log2(checklist[i]))
        if np.sum(lcm) == 0:
            aux = scale * 3 + 4 + 1 + 17 + cl + 4
        else:
            aux = scale * 3 + 4 + 1 + data_length * 8 + 17 + 17 + cl + 4
    if flags == 1:  # 根据寻优的bins更新辅助信息的长度
        for i in range(16):
            if re[i] < 250 and checklist[i] > i:
                cl = cl + np.ceil(np.log2(checklist[i]))
            if re[i] == 250 or re[i] == -250:
                scale = scale - 1
                print('scale', scale)
        if np.sum(lcm) == 0:
            aux = scale * 3 + 4 + 1 + 17 + cl + 4
        else:
            aux = scale * 3 + 4 + 1 + data_length * 8 + 17 + 17 + cl + 4
    print('The aux is ', flags, ' ', aux)
    return aux


def re_hs(k, row, col, cm, flag, em):
    Hs = np.zeros((511, k))
    # index = 0
    for i in range(2, row - 2):
        for j in range(2, col - 2):
            if np.mod(i + j, 2) == 0 and flag == 0:
                Hs[int(em[i][j] + 254)][int(cm[i][j]) - 1] += 1
                # index += 1
            if np.mod(i + j, 2) == 1 and flag == 1:
                Hs[int(em[i][j] + 254)][int(int(cm[i][j])) - 1] += 1
                # index += 1
    return Hs


def insert_msg(em, cm, bin, img_hide3, secret, ec3, flag3, type):
    '''
        em误差矩阵
        Dbis复杂度矩阵
        bin最佳区间
        img_hide图形矩阵
        secret嵌入矩阵
        插入需要插入的信息
    '''
    index = 0
    index2 = 0
    row, col = img_hide3.shape
    for i in range(2, row - 2):
        for j in range(2, col - 2):
            if np.mod(i + j, 2) == 0 and flag3 == 0 and ec3 > 0:
                ks = cm[i][j] - 1
                if type == 0:
                    if em[i][j] == bin[int(ks)]:
                        ec3 = ec3 - 1
                        img_hide3[i][j] += secret[i][j]
                        if secret[i][j] == 1:
                            index2 += 1
                    if em[i][j] > bin[int(ks)]:
                        img_hide3[i][j] += 1
                        index += 1
                else:
                    if em[i][j] == bin[int(ks)]:
                        ec3 = ec3 - 1
                        img_hide3[i][j] -= secret[i][j]
                        if secret[i][j] == 1:
                            index2 += 1
                    if em[i][j] < bin[int(ks)]:
                        img_hide3[i][j] -= 1
                        index += 1
            if np.mod(i + j, 2) == 1 and flag3 == 1 and ec3 > 0:
                ks = cm[i][j] - 1
                if type == 0:
                    if em[i][j] == bin[int(ks)]:
                        ec3 = ec3 - 1
                        img_hide3[i][j] += secret[i][j]
                        if secret[i][j] == 1:
                            index2 += 1
                    if em[i][j] > bin[int(ks)]:
                        img_hide3[i][j] += 1
                else:
                    if em[i][j] == bin[int(ks)]:
                        ec3 = ec3 - 1
                        img_hide3[i][j] -= secret[i][j]
                    if em[i][j] < bin[int(ks)]:
                        img_hide3[i][j] -= 1
    print("偏移数量", index)
    print('偏移点数量', index2)
    return img_hide3, ec3


def danci(danci_img, flags, ks, ecfinals, a):
    print(ecfinals)
    halfec = ecfinals / 2
    danci_img, lcm, halfsize, data = Over_flow(danci_img, flags)
    # 数组长度
    data_length = len(data)
    # 获取图片的坐标的长宽
    row, col = danci_img.shape
    # 初始图像数据,获取误差以及按照图像的复杂度进行分类
    label, em = get_feature(danci_img, flags, 10)
    cm = complexity_map(danci_img, label, flags)
    hs = re_hs(16, row, col, cm, flags, em)
    hs = np.sum(hs, axis=1)

    Hm = np.max(hs)
    print(Hm)
    # A = [0.1, 1, 10]
    W = cal_w(ecfinals, Hm, a)
    print(W)
    his1, his2, errolist1, errolist2 = cal_entropy(danci_img, label, ks, flags, W)
    scio.savemat('his1.mat', {'his1': his1})
    scio.savemat('his2.mat', {'his2': his2})
    label1, check1 = cal_entropy2(his1, ks, label)
    label2, check2 = cal_entropy2(his2, ks, label)
    cm1 = complexity_map(danci_img, label1, flags)
    cm2 = complexity_map(danci_img, label2, flags)
    hs1 = re_hs(16, row, col, cm1, flags, errolist1)
    hs2 = re_hs(16, row, col, cm2, flags, errolist2)
    # check = cal_entropy(hs, ks)
    R = np.zeros(16)
    anx1 = cal_aux(check1, data_length, ks, R, lcm, flags)
    anx2 = cal_aux(check2, data_length, ks, R, lcm, flags)
    # anx = test[0]
    ec1 = halfec / 2 + anx1
    ec2 = halfec / 2 + anx2
    bins1 = searchbins(hs1, ec1, type=0)
    bins2 = searchbins(hs2, ec2, type=1)
    print('bin', bins1, bins2)
    '''
        通过最佳嵌入点判断是否存储了两个以上的250（即无穷大）
        如果超过两个以上需要重新计算辅助信息大小
    '''
    scale = 16
    anx1 = cal_aux(check1, data_length, scale, bins1, lcm, flags)
    anx2 = cal_aux(check2, data_length, scale, bins2, lcm, flags)
    # anx = test[1]
    ec1 = halfec / 2 + anx1
    ec2 = halfec / 2 + anx2
    # # img_hide记录修改过后的图像坐标
    img_hide1 = copy.deepcopy(danci_img)
    # # 嵌入的数据secrt_msg[0][index]
    secret_info = secret_msg(row, col)
    # # 嵌入数据之后的误差图像
    img_hide1, ec1 = insert_msg(errolist1, cm1, bins1, img_hide1, secret_info, ec1, flags, type=0)
    img_hide1, ec2 = insert_msg(errolist2, cm2, bins2, img_hide1, secret_info, ec2, flags, type=1)
    # # lsb插入辅助信息
    anx_secret = secret_msg(row, col)
    img_hide1 = Lsb_insert(row, col, anx1, img_hide1, flag, anx_secret)
    img_hide1 = Lsb_insert(row, col, anx2, img_hide1, flag, anx_secret)
    return img_hide1, ec1


def xo(imges, flags, k, ecfinals):
    img_hide, ec = danci(imges, flags, k, ecfinals)
    new_img = copy.deepcopy(img_hide)
    print('ec', ec)
    if int(ec) == 0:
        print('第二次插入')
        k = 16
        flags = 1
        new_img, ec = danci(new_img, flags, k, ecfinals)
        pd.DataFrame(new_img).to_csv('./newimg.csv')
        psnr = cal_psnr(img, new_img, ec)
        print(psnr)
    return psnr


def plt_make(psnr,x_size):
    # x = np.linspace(0.01, 0.13, 9)
    x = x_size
    print(x_size)
    y = psnr
    plt.plot(x, y, '-r', label='psnr')
    plt.plot(x, y, '.b')
    plt.title('lena psnr')
    plt.xlabel('ec', color='#1C2833')
    plt.ylabel('psnr', color='#1C2833')
    plt.legend(loc='upper left')
    plt.grid()
    plt.show()


def cal_w(ECfinal, Hm, a):
    langta = 1.3 - (((ECfinal / 4) / Hm) * 0.9 + 0.2)
    wn = []
    b = 3
    # a = 1
    for n in range(1, 6):
        t = 0.5 * (n - 1);
        # print('t',t)
        St = exp(-a * pow((exp(langta * t) - 1), b));  # 生存函数
        wn.append(St)  # 五个参数，w1 - -w5

    return wn


if __name__ == '__main__':
    # A = [0.1, 1, 10]
    test = []
    # for value in A:
    value = 0.1
    flag = 0
    # ecfinal = 10000
    # # ecfinal = np.ceil(0.05 * 512 * 512)
    # k = 16
    # Img = Load_img('test', flag)
    # img = Img[0]
    # imgs = copy.deepcopy(img)
    # img_hide, ec = danci(imgs, flag, k, ecfinal, value)
    # new_img = copy.deepcopy(img_hide)
    # print('ec', ec)
    # if int(ec) == 0:
    #     print('第二次插入')
    #     k = 16
    #     flags = 1
    #     new_img, ec = danci(new_img, flags, k, ecfinal, value)
    #     # pd.DataFrame(new_img).to_csv('./newimg.csv')
    #     psnr = cal_psnr(img, new_img, ec)
    #     print('psnr', psnr)
    #     test.append(psnr)
    # print(test)
# x_size = np.linspace(0.01, 0.17, 9)
y_size = np.zeros(4)
x_size = np.linspace(0.5,2 ,4)
print(x_size)
index = 0
print(x_size)
for i in range(len(x_size)):
    print("第{}论".format(i))
    # ecfinal = np.ceil(x_size[i] * 512 * 512)
    ecfinal = np.ceil(10000*x_size[i])
    k = 16
    Img = Load_img('test', flag)
    img = Img[0]
    imgs = copy.deepcopy(img)
    img_hide, ec = danci(imgs, flag, k, ecfinal,value)
    new_img = copy.deepcopy(img_hide)
    print('ec', ec)
    if int(ec) == 0:
        print('第二次插入')
        k = 16
        flags = 1
        new_img, ec = danci(new_img, flags, k, ecfinal,value)
        pd.DataFrame(new_img).to_csv('./newimg.csv')
        psnr = cal_psnr(img, new_img, ec)
        print('psnr', psnr)
        y_size[index] = psnr
        index += 1
print(x_size)
print(y_size)
pd.DataFrame(y_size).to_csv('./psnr.csv')
plt_make(y_size,x_size)
