import numpy as np

from mainmethod import *

if __name__ == '__main__':
    flag = 0
    ecfinal = np.ceil(0.5 * 512 * 512)
    k = 16
    Img = Load_img('test')
    print(Img.img_path)
    # for i in range(len(Img.img_path)):
    img = Img[5]
    pathname = Img.img_path[5]
    name = pathname.split('.', 1)[0] + '9.csv'
    imgs = copy.deepcopy(img)

    imgs, lcm, halfsize, data = Over_flow(imgs, flag)
    data_length = len(data)
    # 获取图片的坐标的长宽
    row, col = imgs.shape

    # 初始图像数据,获取误差以及按照图像的复杂度进行分类
    v, h = get_grad(imgs)
    weightsize  =20
    F, errolist, indexsize = get_feature(imgs, flag, v, h,weightsize)
    transfor = MinMaxScaler(feature_range=(-1, 1))
    original_F = F.copy()

    # file_name = 'Y.mat'
    # scio.savemat(file_name, {'Y': original_F})
    F = transfor.fit_transform(F.T)
    F = (F + 1) * 500
    F = F.T


    # file_name = 'err.mat'
    # scio.savemat(file_name, {'pred_errors': errolist})
    # file_name = 'Y.mat'
    # scio.savemat(file_name, {'Y': F})
    # res = Gatest(errolist, F)
    # print(res['Vars'])
    # resindex = np.argsort(abs(res['Vars']))
    # resindex = resindex[:6]
    # Feature = F[resindex,:]
    # Feature = Feature[0]
    # print("F",Feature.shape)
    # print(Feature[0].shape)


    weight = Cm(F,errolist,weightsize)
    weight = np.round(weight / np.max(abs(weight)) * 255)
    weight = weight

    weight_index = np.argsort(-abs(weight))
    dataset = np.ones((2,weightsize))*999
    dataset[0,:] =weight
    dataset[1,:] = weight_index
    pd.DataFrame(dataset).to_csv('./dataset/' + name)
    # x_size = np.linspace(0.1, 0.5, 21)
    # # x_size = np.linspace(0.1, 0.7, 7)
    # # x_size = np.linspace(0.01,0.17,17)
    # y_size = np.zeros(21)
    # print(x_size)

    #

    # Img = Load_img('test')
    # # for j in range(len(Img.img_path)):
    # img = Img[5]
    # pathname = Img.img_path[5]
    # name = pathname.split('.', 1)[0] + '5.csv'
    # imgs = copy.deepcopy(img)
    # index = 0
    # flag = 0
    # k = 16
    # datasets = np.zeros((2 ** 8, 16))
    # imgs, lcm, halfsize, data = Over_flow(imgs, flag)
    # data_length = len(data)
    # # 获取图片的坐标的长宽
    # row, col = imgs.shape
    # # 初始图像数据,获取误差以及按照图像的复杂度进行分类
    # v, h = get_grad(imgs)
    # F, errolist, indexsize = get_feature(imgs, flag, v, h,20)
    # data = pd.read_csv('./dataset/Lena3.csv').values
    # data1 = data[0]
    # data2 =np.argsort(-data1)
    # dataset =np.array(data2[:8],dtype=int)
    # datakey = np.array([4, 5, 9, 1, 3, 16, 0, 2, 8, 10],dtype=int)
    # temp = datakey[dataset]
    # dataset = temp
    # Feature =F[datakey,:]
    # for k in range(5, 2 ** 10-1):
    #     print('此时的k的值为',k)
    #     Features = Feature.copy()
    #     ks = 16
    #     c = bin(k)
    #     # d =
    #     c = ''.join(c[2:])
    #     d = []
    #     d.__iadd__(c)
    #     d = np.array(d, dtype=int)
    #     chooselist = np.zeros(10)
    #     chooselist[10 - len(d):] = d
    #     # print(chooselist)
    #     if 4 < np.sum(chooselist) < 9:
    #         totalsize = int(np.sum(chooselist))
    #         rows = chooselist.shape[0]
    #         chooselist=chooselist.reshape((rows,-1))
    #         Features =chooselist*Features
    #
    #         list = []
    #         for i in range(len(chooselist)):
    #             if chooselist[i] == 0:
    #                 list.append(i)
    #         list = np.array(list)
    #         print('当前list',list)
    #         f = np.delete(Features, list, 0)
    #         print(f.shape)
    #         label =getcmeans(f)
    #         entropy_sort = cal_entropy(imgs, label, ks, flag)
    #         datasets[k, :] = entropy_sort
    #         pd.DataFrame(datasets).to_csv('./dataset2/'+name)
