import numpy as np

from init import *
from sklearn.preprocessing import MinMaxScaler
import copy
from collections import Counter
from scipy.optimize import minimize
from init import *
import pandas as pd
from gatest import *
import  math

plt.rcParams['font.sans-serif'] = ['Arial Unicode MS']
plt.rcParams['axes.unicode_minus'] = False




def get_feature(img_data, flag, ksize):
    '''
    对图像的特征进行提取
    :param img_data: 图像信息
    :param flag: 奇数点还是偶数点
    :return:
    '''
    row, col = img_data.shape
    size = int((row * col) / 2)
    F = np.zeros((ksize, size))
    # 预测点
    predict = np.zeros((row, row))
    # 误差值
    erromap = np.zeros((row, row))
    errolist = np.zeros((1,size))[0]
    index = 0
    for i in range(2, row - 2):
        for j in range(2, col - 2):
            # 像素上下左右四个像素
            v1 = int(img_data[i - 1][j])
            v2 = int(img_data[i][j - 1])
            v3 = int(img_data[i + 1][j])
            v4 = int(img_data[i][j + 1])
            # 选中点
            x = int(img_data[i][j])
            # 其他九个点
            v5 = int(img_data[i - 2][j - 1])
            v6 = int(img_data[i - 1][j - 2])
            v7 = int(img_data[i + 1][j - 2])
            v8 = int(img_data[i + 2][j - 1])
            v9 = int(img_data[i + 2][j + 1])
            v10 = int(img_data[i + 1][j + 2])
            v11 = int(img_data[i - 1][j + 2])
            v12 = int(img_data[i - 2][j + 1])
            # 左梯度和右梯度
            lg = np.array([[v5 - v1, v2 - v1], [v1 - v4, v3 - v4], [v2 - v3, v8 - v3], [v6 - v2, v7 - v2]])
            rg = np.array([[v12 - v1, v4 - v1], [v11 - v4, v10 - v4], [v4 - v3, v9 - v3], [v1 - v2, v3 - v2]])
            # # 特征
            f1 = np.max([v1, v2, v3, v4])
            f2 = np.min([v1, v2, v3, v4])
            sod1 = abs(v2 + v4 + v5 + v12 - 4 * v1)
            sod2 = abs(v1 + v3 + v6 + v7 - 4 * v2)
            sod3 = abs(v2 + v4 + v8 + v9 - 4 * v3)
            sod4 = abs(v1 + v3 + v10 + v11 - 4 * v4)
            f3 = np.sum([sod1, sod2, sod3, sod4])
            f4_arr = [abs(v1 - v2), abs(v2 - v3), abs(v3 - v4), abs(v4 - v1)]
            f4 = np.sum(f4_arr)
            p1 = (math.modf(np.average([v1, v2, v3, v4])))[0]
            p = np.ceil(np.average([v1, v2, v3, v4])) if p1 >= 0.5 else np.floor(np.average([v1, v2, v3, v4]))
            p3 = np.ceil(np.average([v1, v2, v3, v4]))
            f5_arr = [abs(v6 - v7), abs(v5 - v2), abs(v2 - v8), abs(v1 - v3), abs(v12 - v4), abs(v4 - v9),
                      abs(v11 - v10), abs(v5 - v12), abs(v6 - v1), abs(v1 - v11), abs(v2 - v4), abs(v7 - v3),
                      abs(v3 - v10), abs(v8 - v9)]
            f5 = np.sum(f5_arr)
            f6_arr = [abs(p - v1), abs(p - v2), abs(p - v3), abs(p - v4), abs(p - v5), abs(p - v6), abs(p - v7),
                      abs(p - v8), abs(p - v9), abs(p - v10), abs(p - v11), abs(p - v12)]
            f6 = np.sum(f6_arr)
            f7 = np.sum(np.sqrt(np.sum(np.square(lg), axis=1)))
            f8 = np.sum(np.sqrt(np.sum(np.square(rg), axis=1)))
            gradDiff = []
            gradDiff2 = []
            for s in range(3):
                for q in range(s, 4):
                    gradDiff.append(lg[s] - lg[q])
                    gradDiff2.append(rg[s] - rg[q])
            # print(gradDiff, gradDiff2)
            gradDiff = np.array(gradDiff)
            gradDiff2 = np.array(gradDiff2)
            f9 = np.sum(np.sqrt(np.sum(np.square(gradDiff), axis=1)))
            f10 = np.sum(np.sqrt(np.sum(np.square(gradDiff2), axis=1)))
            a = np.array([f1, f2, f4, f3, f10, f9, f8, f7, f5, f6])
            if np.mod(i + j, 2) == 0 and flag == 0:
                # print(a)
                F[:, index] = a
                errolist[index] = int(img_data[i][j]) - int(p3)
                index += 1
                erromap[i][j] = int(img_data[i][j]) - int(p3)

            if np.mod(i + j, 2) == 1 and flag == 1:
                F[:, index] = a
                errolist[index] = int(img_data[i][j]) - int(p3)
                index += 1
                erromap[i][j] = int(img_data[i][j]) - int(p3)

    print('index', index)
    print(F.shape)
    F = F[:,:index]
    errolist = errolist[:index]
    return F,errolist,index


def func2(cm):
    """ Objective function """
    def v(x):
        c = np.dot(np.array([x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9]]).reshape(-1, 10), cm)
        avc = np.sum(c-np.mean(c))
        print(avc)
        cstd = np.std(c)
        return -avc / cstd

    return v


def func(arg):
    cm, errolists = arg
    print('cm,em2', cm.shape, errolists.shape)
    em = errolists - np.mean(errolists)
    size =em.shape[0]
    em = em.reshape(-1,size)
    v = lambda x: (np.dot(em, (np.dot(np.array([x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9]]).reshape(-1, 10), cm) - np.mean(
        np.dot(np.array([x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9]]).reshape(-1, 10), cm))).T))/np.std(np.dot(np.array([x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9]]).reshape(-1, 10), cm),ddof=1)
    print(v)
    return v





def Cm(comlex,errolist):
    bond =((-255,255),(-255,255),(-255,255),(-255,255),(-255,255),(-255,255),(-255,255),(-255,255),(-255,255),(-255,255))
    # x0 = np.asarray((-131,119,-119,255,106,-141,42,44,59,50)).reshape(-1, 10)
    x0 = np.asarray((1,1,1,1,1,1,1,1,1,1)).reshape(-1, 10)
    args = [comlex,errolist]
    cm =comlex
    em = errolist - np.mean(errolist)
    size = em.shape[0]
    em = em.reshape(-1, size)
    em2 = np.sqrt(np.sum(np.square(errolist - np.mean(errolist))))
    v = lambda x: -(np.dot(em, (
                np.dot(np.array([x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9]]).reshape(-1, 10),
                       cm) - np.mean(
            np.dot(np.array([x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9]]).reshape(-1, 10),
                   cm))).T)) /(em2 * np.sqrt(np.sum(np.square(np.dot(np.array([x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9]]).reshape(-1, 10), cm) - np.mean(
            np.dot(np.array([x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9]]).reshape(-1, 10), cm))))))

    # (np.std(np.dot(np.array([x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9]]).reshape(-1, 10), cm),
    #         ddof=1) * np.sqrt(len(errolist)) * np.sqrt(len(errolist)) * np.std(errolist, ddof=1))

    res = minimize(lambda x:v(x), x0, method='SLSQP',bounds=bond)
    # method='SLSQP'
    print(res.fun)
    print(res.success)
    print(np.ceil(res.x))
    result = np.ceil(np.dot(res.x,comlex))
    return result,res.x



def complexity_map(img_data, label, flags):
    cm = np.zeros(img_data.shape)
    A, B = img_data.shape
    count = 0
    for i in range(2, A - 2):
        for j in range(2, B - 2):
            if np.mod(i + j, 2) == 0 and flags == 0:
                cm[i][j] = int(label[count]) + 1
                count = count + 1
            if np.mod(i + j, 2) == 1 and flags == 1:
                cm[i][j] = int(label[count]) + 1
                count = count + 1
    return cm


def erro_map(img_data, flag2):
    row,col = img_data.shape
    erro = np.zeros(img_data.shape)
    erro_list = []
    for i in range(2, row - 2):
        for j in range(2, col - 2):
            v1 = int(img_data[i - 1][j])
            v2 = int(img_data[i][j - 1])
            v3 = int(img_data[i + 1][j])
            v4 = int(img_data[i][j + 1])
            p3 = np.ceil(np.average([v1, v2, v3, v4]))
            if np.mod(i+j,2) == 0 and flag2 ==0:
                erro[i][j] = img_data[i][j] - p3
                erro_list.append(img_data[i][j] - p3)
            if np.mod(i+j,2) == 1 and flag2 ==1:
                erro[i][j] = img_data[i][j] - p3
                erro_list.append(img_data[i][j] - p3)
    return  erro,erro_list



def cal_entropy(imgs, label, ks, flags):
    A, B = imgs.shape
    his = []
    count = 0
    entropy = []
    for i in range(ks):
        his.append([])
    for i in range(2, A - 2):
        for j in range(2, B - 2):
            v1 = int(imgs[i - 1][j])
            v2 = int(imgs[i + 1][j])
            v3 = int(imgs[i][j + 1])
            v4 = int(imgs[i][j - 1])
            e = np.ceil(np.average([v1, v2, v3, v4])) - int(imgs[i][j])
            if flags == 0 and np.mod(i + j, 2) == 0:
                his[label[count]].append(e)
                count = count + 1
            elif flags == 1 and np.mod(i + j, 2) == 1:
                his[label[count]].append(e)
                count = count + 1

    for i in range(ks):
        e = 0
        c = Counter(his[i])
        total = len(his[i])
        for i in c.values():
            pro = int(i) / total
            e = e + pro * math.log(1 / pro, 2)
        entropy.append(e)
    print(entropy)
    entropy_sort = sorted(entropy);
    cor = []
    labels = []
    print(entropy_sort)
    for i in range(ks):
        for j in range(ks):
            if entropy[i] == entropy_sort[j]:
                cor.append(j)
                entropy_sort[j] = -1
                break
    print(cor)
    for i in label:
        labels.append(cor[i])
    legend = []
    fig = []
    frequency = list(range(ks))
    new_name = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p']
    for i in range(ks):
        legend.append('l=' + str(cor[i]))
        fig.append(str(cor[i]))
        # print(len(his[i]))
        frequency[cor[i]] = int(len(his[cor[i]]))
    plt.grid(ls=':');
    plt.bar(range(ks), frequency, tick_label=new_name, color='b');
    plt.title('The distribution of pixels');
    plt.xlabel('Neuron')
    plt.ylabel('Population');
    print(10086)
    plt.show()
    return labels, cor


def cal_aux(checklist, data_length, scale, re, lcm, flags):
    cl = 0
    test = []
    for i in range(16):
        test.append(i)
    checklist = np.array(checklist) + 1
    # checklist = np.array(test)+1
    print('incheck', checklist)
    a = np.ceil(np.log2(np.max(checklist)))
    if flags == 0:
        for i in range(16):
            #
            if re[i] == 250:
                scale = scale - 1
            if checklist[i] != i:
                cl = cl + np.ceil(np.log2(checklist[i]))
        if np.sum(lcm) == 0:
            aux = scale * 3 + 4 + 1 + 17 + cl + 4
        else:
            aux = scale * 3 + 4 + 1 + data_length * 8 + 17 + 17 + cl + 4
    if flags == 1:  # 根据寻优的bins更新辅助信息的长度
        for i in range(16):
            if re[i] < 250 and checklist[i] > i:
                cl = cl + np.ceil(np.log2(checklist[i]))
            if re[i] == 250:
                scale = scale - 1
                print('scale', scale)
        if np.sum(lcm) == 0:
            aux = scale * 3 + 4 + 1 + 17 + cl + 4
        else:
            aux = scale * 3 + 4 + 1 + data_length * 8 + 17 + 17 + cl + 4
    print('The aux is ', flags, ' ', aux)
    return aux


# def re_hs(ks, row, col, sp, flag, em):
#     Hs = np.zeros((511, ks))
#     index = 0
#     for i in range(2, row - 2):
#         for j in range(2, col - 2):
#             if np.mod(i + j, 2) == 0 and flag == 0:
#                 # Hs[int(em[i][j] + 254)][int(cm[i][j]) - 1] += 1
#                 Hs[int(em[index]+254)[]]+=1
#                 # index += 1
#
#             if np.mod(i + j, 2) == 1 and flag == 1:
#                 Hs[int(em[i][j] + 254)][int(int(cm[i][j])) - 1] += 1
#                 # index += 1
#     return Hs


def re_hs(ks, indexsize2, sp, em):
    Hs = np.zeros((511, ks))
    i = 1
    print(em.shape)
    print(sp,indexsize2)
    index = 0
    for j in range(indexsize2-1,-1,-1):
        if i<len(sp):
            if j>=sp[i]:
                Hs[int(em[j]+255)][i-1]+=1
            else:
               i+=1
    return Hs


        # if i>= 0:
        #     if j <= sp[i] :
        #         Hs[int(em[j]+254)][i]+=1
        #     else:
        #         Hs[int(em[j] + 254)][i] += 1
        #         i-=1
        #         k+=1
        # else:
        #     Hs[int(em[j] + 254)][0] += 1
    file_name = 'Her.mat'
    scio.savemat(file_name, {'Her': Hs})
    return Hs

def insert_msg(em, cm, bin, img_hide3, secret, ec3, flag3):
    '''
        em误差矩阵
        Dbis复杂度矩阵
        bin最佳区间
        img_hide图形矩阵
        secret嵌入矩阵
        插入需要插入的信息
    '''
    index = 0
    index2 = 0
    row, col = img_hide3.shape
    for i in range(2, row - 2):
        for j in range(2, col - 2):
            if np.mod(i + j, 2) == 0 and flag3 == 0 and ec3 > 0:
                ks = cm[i][j] - 1
                if em[i][j] == bin[int(ks)]:
                    ec3 = ec3 - 1
                    img_hide3[i][j] += secret[i][j]
                    if secret[i][j] == 1:
                        index2 += 1
                if em[i][j] == -bin[int(ks)] - 1:
                    ec3 = ec3 - 1
                    img_hide3[i][j] -= secret[i][j]
                    if secret[i][j] == 1:
                        index2 += 1
                if em[i][j] > bin[int(ks)]:
                    img_hide3[i][j] += 1
                    index += 1
                if em[i][j] < -bin[int(ks)] - 1:
                    img_hide3[i][j] -= 1
                    index += 1
            if np.mod(i + j, 2) == 1 and flag3 == 1 and ec3 > 0:
                ks = cm[i][j] - 1
                if em[i][j] == bin[int(ks)]:
                    ec3 = ec3 - 1
                    img_hide3[i][j] += secret[i][j]
                    if secret[i][j] == 1:
                        index2 += 1
                if em[i][j] == -bin[int(ks)] - 1:
                    ec3 = ec3 - 1
                    img_hide3[i][j] -= secret[i][j]
                    if secret[i][j] == 1:
                        index2 += 1
                if em[i][j] > bin[int(ks)]:
                    img_hide3[i][j] += 1
                    index += 1
                if em[i][j] < -bin[int(ks)] - 1:
                    img_hide3[i][j] -= 1
                    index += 1

    print("偏移数量", index)
    print('偏移点数量', index2)
    return img_hide3, ec3


def danci(danci_img, flags, ks, ecfinals):
    danci_img, lcm, halfsize, data = Over_flow(danci_img, flags)
    # 数组长度
    data_length = len(data)
    # 获取图片的坐标的长宽
    row, col = danci_img.shape
    # 初始图像数据,获取误差以及按照图像的复杂度进行分类
    label, em = get_feature(danci_img, flags, 10)
    label, check = cal_entropy(danci_img, label, ks, flags)
    # print(label)
    pd.DataFrame(em).to_csv('./em.csv')
    cm = complexity_map(danci_img, label, flags)
    hs = re_hs(16, row, col, cm, flags, em)
    pd.DataFrame(hs).to_csv('./his.csv')
    pd.DataFrame(cm).to_csv('./cm.csv')
    # check = cal_entropy(hs, ks)
    R = np.zeros(16)
    anx = cal_aux(check, data_length, ks, R, lcm, flags)
    # anx = test[0]
    ec1 = ecfinals / 2 + anx
    bins = searchbins(hs, ec1)
    print('bin', bins)
    '''
        通过最佳嵌入点判断是否存储了两个以上的250（即无穷大）
        如果超过两个以上需要重新计算辅助信息大小
    '''
    scale = 16
    anx = cal_aux(check, data_length, scale, bins, lcm, flags)
    # anx = test[1]
    ec1 = ecfinals / 2 + anx
    # # img_hide记录修改过后的图像坐标
    img_hide1 = copy.deepcopy(danci_img)
    # # 嵌入的数据secrt_msg[0][index]
    secret_info = secret_msg(row, col)
    # # 嵌入数据之后的误差图像
    img_hide1, ec1 = insert_msg(em, cm, bins, img_hide1, secret_info, ec1, flags)
    # # lsb插入辅助信息
    anx_secret = secret_msg(row, col)
    img_hide1 = Lsb_insert(row, col, anx, img_hide1, flag, anx_secret)
    return img_hide1, ec1


def cal_step(cm,k,step):
    cm = np.sort(cm)
    sp = np.zeros(k+1)
    temp = np.max(cm)
    miner = np.argmin(cm)
    file_name = 'cm.mat'
    scio.savemat(file_name, {'cm': cm})
    print(temp)
    sp[k] = np.argmax(cm)
    for i in range(len(cm)-1,-1,-1):
        if cm[i] <cm[int(sp[k])]/step and k>0 :
            sp[k-1] = i
            print(sp)
            if sp[k-1] > 512*512/32:
                k-=1
    sp[k] = miner
    sp = np.flip(sp)
    index = np.argmin(sp)
    sp = sp[0:index+1]
    print(sp)
    return sp


def plt_make(psnr):
    x = np.linspace(0.01, 0.13, 9)
    y = psnr
    plt.plot(x, y, '-r', label='psnr')
    plt.plot(x, y, '.b')
    plt.title('lena psnr')
    plt.xlabel('ec', color='#1C2833')
    plt.ylabel('psnr', color='#1C2833')
    plt.legend(loc='upper left')
    plt.grid()
    plt.show()


def div_capacity(sp,ec,indexsize):
    tem = indexsize
    Rate = []
    for i in range(1,len(sp)):
        singlec =np.ceil(((tem - sp[i])/indexsize)*ec)
        tem = sp[i]
        Rate.append(singlec)
    return Rate


def testdemo(complex,errolists,ak):
    em = errolists - np.mean(errolists)
    size = em.shape[0]
    em = em.reshape(-1, size)
    cm = np.dot(ak,complex)
    c = cm - np.mean(cm)
    print(cm.shape)
    size = len(c)
    result = np.dot(em,c.T)/(np.std(cm)*np.std(errolists)*np.sqrt(size)*np.sqrt(size))
    print(result)


def devideAndPick2(errs, rate, flags):
    P = []
    Z = []
    dist_lists = np.ones(100) * 100000000
    pl_size = np.zeros((100, 2))
    tmpArea = np.zeros((100, 2))
    Dist = 10000000000
    miner = np.ceil(rate / len(errs) * 100)
    maxer = 99

    dist_lists, data = Ga_one(errs, miner, dist_lists, rate, flags)
    if dist_lists[int(miner)] != 0 and dist_lists[int(miner)] < Dist:
        Dist = dist_lists[int(miner)]
        P = data[0]
        Z = data[1]

    dist_lists, data = Ga_one(errs, maxer, dist_lists, rate, flags)
    if dist_lists[int(maxer)] != 0 and dist_lists[int(maxer)] < Dist:
        Dist = dist_lists[int(maxer)]
        P = data[0]
        Z = data[1]
    temp = 1
    pl_size[0, :] = [miner, maxer]
    if miner + 1 == maxer:
        temp = 0
    index = 0
    aa = []
    while temp > 0:
        tmpC = 0
        for i in range(temp):
            # print('plsize',pl_size)
            idx = np.floor((pl_size[i, 0] + pl_size[i, 1]) / 2)
            aa.append([idx, pl_size[i, 0], pl_size[i, 1]])
            print('这次的', idx, pl_size[i, 0], pl_size[i, 1])
            dist_lists, data = Ga_one(errs, idx, dist_lists, rate, flags)
            if dist_lists[int(idx)] != 0 and dist_lists[int(idx)] < Dist:
                Dist = dist_lists[int(idx)]
                P = data[0]
                Z = data[1]
            if idx > pl_size[i, 0] + 1:
                # print('tmc1',tmpC)
                tmpArea[tmpC, :] = [pl_size[i, 0], idx]
                tmpC += 1
            if idx + 1 < pl_size[i, 1]:
                print('tmc2', tmpC)
                tmpArea[tmpC, :] = [idx, pl_size[i, 1]]
                tmpC += 1
        temp = 0
        minindex = np.argmin(dist_lists)
        for i in range(tmpC):
            if tmpArea[i, 0] == minindex or tmpArea[i, 1] == minindex:
                pl_size[temp, :] = tmpArea[i, :]
                temp += 1
    idx = np.argmin(dist_lists)
    return P, Z, idx, dist_lists[idx]



if __name__ == '__main__':
    flag = 0
    # ecfinal = 10000

    ecfinal = np.ceil(0.05 * 512 * 512)
    k = 16
    Img = Load_img('test', flag)
    img = Img[0]
    imgs = copy.deepcopy(img)
    danci_img, lcm, halfsize, data = Over_flow(imgs, flag)
    # 初始图像数据,获取误差以及按照图像的复杂度进行分类
    F ,errolist,indexsize=get_feature(danci_img, flag, 10)
    erro  =errolist.copy()
    #归一化处理
    scaler = MinMaxScaler(feature_range=(0, 1000))
    F = scaler.fit_transform(F)
    # print(scaled_features)
    # print('cm,em1',F.shape,errolist.shape)
    ak =np.array([-255,240,-81,213,202,-159,34,29,93,100])
    ak2 = np.array([70.0,153.0,-165.0,-45.0,63.0,191.0,28.0,222.0,-68.0,217.0])
    # file_name = 'F.mat'
    # scio.savemat(file_name, {'cm': F})
    # file_names = 'em.mat'
    # scio.savemat(file_names, {'em': errolist})
    cm,ak3=Cm(F,errolist)
    # testdemo(F, errolist, ak2)
    testdemo(F, erro, ak)
    testdemo(F, erro, ak3)

    # 对errolist根据复杂度进行排序
    idx = np.argsort(cm)
    erro = erro[idx]
    sp = cal_step(cm,k-1,step=1.28)
    capacity =div_capacity(sp,ecfinal/2,indexsize)
    ksize = len(capacity)
    H = re_hs(ksize,indexsize,sp,erro)
    test = []
    for i in range(ksize):
        print()
        err = erro[sp[i+1]:sp[i]]
        P, Z, idx, dist = devideAndPick2(erro,capacity[i],flag)
        test.append([P,Z])
    print(test)
    # print(H)
    # print(aa)
    # test_pz(H[:,0],ecfinal/2)
    # # testdemo(F,errolist,ak)



