import os
import struct

import matplotlib.pyplot as plt
import tensorflow as tf
from tqdm import tqdm

from common.AttackMethod import TA, DL
from common.ImgPainter import ImgPainter
import random
import numpy as np
from sklearn.preprocessing import StandardScaler
import sys
import h5py
from sklearn.decomposition import PCA
import pickle


class DataProcessingStep(ImgPainter):
    def execute(self, data):
        pass


class LoadASCAD(DataProcessingStep):
    def __init__(self, data_path=r"Dataset/ASCAD"):
        self.data_path = data_path

    def execute(self, data):
        print("-------------------加载ASCAD数据集-------------------")
        try:
            in_file = h5py.File(os.path.join(self.data_path, "ASCAD.h5"), "r")
        except FileExistsError:
            print("找不到数据，检查input ...", self.data_path)
            sys.exit(-1)
        # Load profiling traces
        data.train_x = np.array(in_file['Profiling_traces/traces'], dtype=np.int8)
        # Load profiling labels
        data.train_y = np.array(in_file['Profiling_traces/labels'])
        # Load attacking traces
        data.test_x = np.array(in_file['Attack_traces/traces'], dtype=np.int8)
        # Load attacking labels
        data.test_y = np.array(in_file['Attack_traces/labels'])
        data.pre_y = np.full((data.test_y.shape[0], 1), None)
        data.pre_y_row = np.arange(data.test_y.shape[0])
        data.pre_y_col = np.full(data.test_y.shape[0], 0)


class LoadJRMM1(DataProcessingStep):
    def __init__(self, data_path=r"C:\Users\zhanghaojin\Desktop\XZ_SCA\Dataset\JRMM"):
        self.data_path = data_path

    def execute(self, data):
        print("-------------------加载JRMM数据集-------------------")
        try:
            x = np.load(os.path.join(self.data_path, "all_trace.npy"))
            y = np.load(os.path.join(self.data_path, "y_value_1.npy"))
            z = np.load(os.path.join(self.data_path, "z_value_1.npy"))
        except ImportError:
            print("找不到数据，检查input ...", self.data_path)
            sys.exit(-1)
        index, length = self.division_trace(x)
        division_ratio = int(x.shape[0] * 0.8)
        if data.pre_y is None:
            data.pre_y = np.full(y[division_ratio:].shape, None)
        division_x, division_y, division_z, y_index_col = self.division_data(index, length, x[:division_ratio],
                                                                             y[:division_ratio], z[:division_ratio],
                                                                             data)
        data.train_x, data.train_y, _, _, _, _ = self.pick_data(division_x, division_y, division_z, y_index_col, data)
        division_x, division_y, division_z, y_index_col = self.division_data(index, length, x[division_ratio:],
                                                                             y[division_ratio:], z[division_ratio:],
                                                                             data)
        data.test_x, data.test_y, data.test_z, data.test_y32, data.pre_y_row, data.pre_y_col = self.pick_data(
            division_x, division_y, division_z, y_index_col, data)

    @staticmethod
    def pick_data(division_x, division_y, division_z, y_index_col, data):
        pick_x = []
        pick_y = []
        pick_z = []
        pick_y_index_row = []
        pick_y_index_col = []
        # A = np.zeros(33)
        for i in range(division_x.shape[0]):
            # A[bin((division_y[i] & 0xFFFFFFFF) >> data.low_bit).count('1')] += 1
            if bin((division_y[i] & 0xFFFFFFFF) >> data.low_bit).count('1') == data.model_array[data.model_index]:
                pick_x.append(division_x[i])
                pick_z.append(division_z[i])
                pick_y.append(division_y[i])
                pick_y_index_row.append(i)
                pick_y_index_col.append(y_index_col[i])
        pick_x = np.array(pick_x)
        pick_z = np.array(pick_z)
        pick_y = np.array(pick_y)
        pick_y_index_row = np.array(pick_y_index_row) % int(len(division_y) / 64)
        pick_y_index_col = np.array(pick_y_index_col)
        return pick_x, pick_y & (2 ** data.low_bit - 1), pick_z, pick_y, pick_y_index_row, pick_y_index_col

    @staticmethod
    def division_data(index, length, x, y, z, data):
        division_x = []
        division_y = []
        division_z = []
        y_index_col = []
        if data.all_median == 1 or data.all_median == 4:
            for i in range(data.median, y.shape[1], data.all_median):
                division_x.append(x[:, index[int(i / 4)]:index[int(i / 4)] + length])
                division_y.append(y[:, i])
                division_z.append(z[:, i])
                y_index_col.append(np.full(x.shape[0], i).tolist())
        if data.all_median == 256:
            division_x.append(x[:, index[int(data.median / 4)]:index[int(data.median / 4)] + length])
            division_y.append(y[:, data.median])
            division_z.append(z[:, data.median])
            y_index_col.append(np.full(x.shape[0], data.median).tolist())
        division_x = np.concatenate(division_x, axis=0)
        division_y = np.concatenate(division_y, axis=0)
        division_z = np.concatenate(division_z, axis=0)
        y_index_col = np.concatenate(y_index_col, axis=0)
        return division_x, division_y, division_z, y_index_col

    # @staticmethod
    def division_trace(self, x, first_length=700, interval=200, threshold=0.6):
        print("-------------------划分能量迹-------------------")
        mean_x = np.mean(x, axis=0)
        first_x = mean_x[:first_length]
        corr_arr = np.array(
            [np.corrcoef(first_x, mean_x[i:i + first_length])[0, 1] for i in range(0, x[0].shape[0] - first_length)])
        # self.img_line(corr_arr)
        corr_arr[corr_arr < threshold] = 0
        for i in range(0, corr_arr.shape[0] - interval):
            if not np.all(corr_arr[i:i + interval] == 0):
                max_index = np.where(corr_arr[i:i + interval] == np.max(corr_arr[i:i + interval]))[0][0]
                corr_max = corr_arr[i + max_index]
                corr_arr[i:i + interval] = 0
                corr_arr[i + max_index] = corr_max
            pass
        # self.img_line(corr_arr)
        index = np.where(corr_arr > 0.6)[0]
        length = np.min(np.diff(index))
        print("划分为{}段".format(len(index)), "每段长{}".format(length))
        return index, length

class Loadxxx(DataProcessingStep):
    def __init__(self, data_path=r"C:\Users\zhanghaojin\Desktop\师兄论文\code\dataset"):
        self.data_path = data_path

    def execute(self, data):
        print("-------------------加载JRMM数据集-------------------")

        x = np.load(os.path.join(self.data_path, "attack_u.npy"))
        y = np.load(os.path.join(self.data_path, "state_u.npy"))

        division_ratio = int(x.shape[0] * 0.8)

        data.train_x = x[:division_ratio]
        data.train_y = y[:division_ratio]

        data.test_x = x[division_ratio:]
        data.test_y = y[division_ratio:]
class LoadJRMM(DataProcessingStep):
    def __init__(self, data_path=r"C:\Users\zhanghaojin\Desktop\paper_code\dataset\jrmm"):
        self.data_path = data_path

    def execute(self, data):
        print("-------------------加载JRMM数据集-------------------")
        try:
            x = np.load(os.path.join(self.data_path, "all_trace.npy"))
            y = np.load(os.path.join(self.data_path, "y_value_1.npy"))
            z = np.load(os.path.join(self.data_path, "z_value_1.npy"))
            pass
        except ImportError:
            print("找不到数据，检查input ...", self.data_path)
            sys.exit(-1)
        if data.division_trace:
            index, length = self.division_trace(x)
        else:
            # index 为0
            index = np.full(int(y.shape[1] / 4), 0)
            length = x.shape[1]
        division_ratio = int(x.shape[0] * 0.8)
        if data.pre_y is None:
            data.pre_y = np.full(y[division_ratio:].shape, None)
        division_x, division_y, division_z, y_index_col = self.division_data(index, length, x[:division_ratio], y[:division_ratio], z[:division_ratio], data)
        data.train_x, data.train_y, _, A, _, _ = self.pick_data(division_x, division_y, division_z, y_index_col, data)
        division_x, division_y, division_z, y_index_col = self.division_data(index, length, x[division_ratio:], y[division_ratio:], z[division_ratio:], data)
        data.test_x, data.test_y, data.test_z, data.test_y32, data.pre_y_row, data.pre_y_col = self.pick_data(division_x, division_y, division_z, y_index_col, data)

    @staticmethod
    def pick_data(division_x, division_y, division_z, y_index_col, data):
        pick_x = []
        pick_y = []
        pick_z = []
        pick_y_index_row = []
        pick_y_index_col = []
        # A = np.zeros(33)
        for i in range(division_y.shape[0]):
            # A[bin((division_y[i] & 0xFFFFFFFF) >> data.low_bit).count('1')] += 1
            if bin((division_y[i] & 0xFFFFFFFF) >> data.low_bit).count('1') == data.model_array[data.model_index]:
                if data.division_trace:
                    pick_x.append(division_x[i])
                else:
                    pick_x.append(division_x[int(i / division_x.shape[0])])
                pick_z.append(division_z[i])
                pick_y.append(division_y[i])
                pick_y_index_row.append(i)
                pick_y_index_col.append(y_index_col[i])
        pick_x = np.array(pick_x)
        pick_z = np.array(pick_z)
        pick_y = np.array(pick_y)
        pick_y_index_row = np.array(pick_y_index_row) % int(len(division_y) / 64)
        pick_y_index_col = np.array(pick_y_index_col)
        return pick_x, pick_y & (2 ** data.low_bit - 1), pick_z, pick_y, pick_y_index_row, pick_y_index_col

    @staticmethod
    def division_data(index, length, x, y, z, data):
        division_x = []
        division_y = []
        division_z = []
        y_index_col = []
        if data.all_median == 1 or data.all_median == 4:
            for i in range(data.median, y.shape[1], data.all_median):
                division_x.append(x[:, index[int(i / 4)]:index[int(i / 4)] + length])
                division_y.append(y[:, i])
                division_z.append(z[:, i])
                y_index_col.append(np.full(x.shape[0], i).tolist())
        if data.all_median == 256:
            division_x.append(x[:, index[int(data.median / 4)]:index[int(data.median / 4)] + length])
            division_y.append(y[:, data.median])
            division_z.append(z[:, data.median])
            y_index_col.append(np.full(x.shape[0], data.median).tolist())
        if data.division_trace:
            division_x = np.concatenate(division_x, axis=0)
        else:
            division_x = np.array(division_x[0])
        division_y = np.concatenate(division_y, axis=0)
        division_z = np.concatenate(division_z, axis=0)
        y_index_col = np.concatenate(y_index_col, axis=0)
        return division_x, division_y, division_z, y_index_col

    # @staticmethod
    def division_trace(self, x, first_length=700, interval=200, threshold=0.6):
        print("-------------------划分能量迹-------------------")
        mean_x = np.mean(x, axis=0)
        first_x = mean_x[:first_length]
        corr_arr = np.array([np.corrcoef(first_x, mean_x[i:i + first_length])[0, 1] for i in range(0, x[0].shape[0] - first_length)])
        # self.img_line(corr_arr)
        #
        # from matplotlib import pyplot as plt
        # # 设置画布
        # plt.figure(figsize=(20, 6))
        # plt.plot(corr_arr)
        # plt.xlabel('Points')
        # plt.ylabel('Pcc')
        # plt.legend()
        # plt.tight_layout()






        corr_arr[corr_arr < threshold] = 0
        for i in range(0, corr_arr.shape[0] - interval):
            if not np.all(corr_arr[i:i + interval] == 0):
                max_index = np.where(corr_arr[i:i + interval] == np.max(corr_arr[i:i + interval]))[0][0]
                corr_max = corr_arr[i + max_index]
                corr_arr[i:i + interval] = 0
                corr_arr[i + max_index] = corr_max
            pass
        # self.img_line(corr_arr)
        index = np.where(corr_arr > 0.6)[0]
        length = np.min(np.diff(index))
        print("划分为{}段".format(len(index)), "每段长{}".format(length))
        return index, length


class LoadDilithium(DataProcessingStep):
    def __init__(self, data_path=r"../../dataset/dilithium/"):
        self.data_path = data_path

    def execute(self, data):
        print("-------------------加载JRMM数据集-------------------")
        try:
            x = np.load(self.data_path + "traces.npy")
            y = np.load(self.data_path + "hm_y.npy")
            # x = np.load(os.path.join(self.data_path, "all_trace.npy"))
            # y = np.load(os.path.join(self.data_path, "y_value_1.npy"))
            # z = np.load(os.path.join(self.data_path, "z_value_1.npy"))
        except ImportError:
            print("找不到数据，检查input ...", self.data_path)
            sys.exit(-1)
        data.train_x = x
        data.train_y = y[:, 1]
        pass
        index, length = self.division_trace(x)
        # division_ratio = int(x.shape[0] * 0.8)
        # data.pre_y = np.full(y[division_ratio:].shape, None)
        # division_x, division_y, division_z, pre_y_col = self.division_data(index, length, x[:division_ratio], y[:division_ratio], z[:division_ratio], data)
        # data.train_x, data.train_y, _, _, _, _ = self.pick_data(division_x, division_y, division_z, pre_y_col, data)
        # division_x, division_y, division_z, pre_y_col = self.division_data(index, length, x[division_ratio:], y[division_ratio:], z[division_ratio:], data)
        # data.test_x, data.test_y, data.test_z, data.test_y32, data.pre_y_row, data.pre_y_col = self.pick_data(division_x, division_y, division_z, pre_y_col, data)

    @staticmethod
    def pick_data(division_x, division_y, division_z, y_index_col, data):
        pick_x = []
        pick_y = []
        pick_z = []
        pick_y_index_row = []
        pick_y_index_col = []
        # A = np.zeros(33)
        for i in range(division_x.shape[0]):
            # A[bin((division_y[i] & 0xFFFFFFFF) >> data.low_bit).count('1')] += 1
            if bin((division_y[i] & 0xFFFFFFFF) >> data.low_bit).count('1') == data.model_array[data.model_index]:
                pick_x.append(division_x[i])
                pick_z.append(division_z[i])
                pick_y.append(division_y[i])
                pick_y_index_row.append(i)
                pick_y_index_col.append(y_index_col[i])
        pick_x = np.array(pick_x)
        pick_z = np.array(pick_z)
        pick_y = np.array(pick_y)
        pick_y_index_row = np.array(pick_y_index_row) % int(len(division_y) / 64)
        pick_y_index_col = np.array(pick_y_index_col)
        return pick_x, pick_y & (2 ** data.low_bit - 1), pick_z, pick_y, pick_y_index_row, pick_y_index_col

    @staticmethod
    def division_data(index, length, x, y, z, data):
        division_x = []
        division_y = []
        division_z = []
        y_index_col = []
        if data.all_median == 1 or data.all_median == 4:
            for i in range(data.median, y.shape[1], data.all_median):
                division_x.append(x[:, index[int(i / 4)]:index[int(i / 4)] + length])
                division_y.append(y[:, i])
                division_z.append(z[:, i])
                y_index_col.append(np.full(x.shape[0], i).tolist())
        if data.all_median == 256:
            division_x.append(x[:, index[int(data.median / 4)]:index[int(data.median / 4)] + length])
            division_y.append(y[:, data.median])
            division_z.append(z[:, data.median])
            y_index_col.append(np.full(x.shape[0], data.median).tolist())
        division_x = np.concatenate(division_x, axis=0)
        division_y = np.concatenate(division_y, axis=0)
        division_z = np.concatenate(division_z, axis=0)
        y_index_col = np.concatenate(y_index_col, axis=0)
        return division_x, division_y, division_z, y_index_col

    # @staticmethod
    def division_trace(self, x, first_length=700, interval=400, threshold=0.4):
        # y = np.zeros((x.shape[0], int(x[0].shape[0] / 15)+1))
        # for i in tqdm(range(0, x[0].shape[0],15)):
        #     y[:, int(i/15)] = x[:, i+10]
        # x = y
        print("-------------------划分能量迹-------------------")
        mean_x = np.mean(x, axis=0)
        first_x = mean_x[:first_length]
        corr_arr = np.array([np.corrcoef(first_x, mean_x[i:i + first_length])[0, 1] for i in range(0, x[0].shape[0] - first_length)])
        # self.img_line(corr_arr)
        corr_arr[corr_arr < threshold] = 0
        for i in tqdm(range(0, corr_arr.shape[0] - interval)):
            if not np.all(corr_arr[i:i + interval] == 0):
                max_index = np.where(corr_arr[i:i + interval] == np.max(corr_arr[i:i + interval]))[0][0]
                corr_max = corr_arr[i + max_index]
                corr_arr[i:i + interval] = 0
                corr_arr[i + max_index] = corr_max
            pass
        # self.img_line(corr_arr)
        index = np.where(corr_arr > 0.6)[0]
        length = np.min(np.diff(index))
        print("划分为{}段".format(len(index)), "每段长{}".format(length))
        return index, length


class CalculateHammingWeightStep(DataProcessingStep):
    def __init__(self, bit_length=8):
        self.max_value = (2 ** bit_length) - 1

    def execute(self, data):
        print("-------------------计算汉明重量-------------------")
        value_hm = []
        for value in data.train_y:
            value_hm.append(bin(int(value) & self.max_value).count('1'))
        data.train_y = np.array(value_hm)

        value_hm = []
        for value in data.test_y:
            value_hm.append(bin(int(value) & self.max_value).count('1'))
        data.test_y = np.array(value_hm)


class SOD(DataProcessingStep):
    def __init__(self, poi_num=50):
        self.poi_num = poi_num

    def execute(self, data):
        print("-------------------计算SOD兴趣点-------------------")
        sod_poi = self.sod_method(data.train_x, data.train_y)
        max_indices = sorted(range(len(sod_poi)), key=lambda i: sod_poi[i], reverse=True)[:self.poi_num]
        data.train_x = data.train_x[:, max_indices]
        data.test_x = data.test_x[:, max_indices]
        # todo
        # self.img_line(sod_poi, max_index=max_indices, title="SOD")

    @staticmethod
    def sod_method(profiling_traces, profiling_labels):
        classes = np.unique(profiling_labels)
        catagory_for_traces = [[] for _ in range(len(classes))]

        for i in range(len(profiling_traces)):
            label = profiling_labels[i]
            catagory_for_traces[label].append(profiling_traces[i])

        mean_traces = [np.mean(catagory_for_traces[label], axis=0) for label in range(len(classes))]

        tem = []
        for i in range(len(classes)):
            for j in range(i + 1, len(classes), 1):
                tem.append(np.square(mean_traces[i] - mean_traces[j]))

        return np.sum(tem, axis=0)


class Ttest(DataProcessingStep):
    def __init__(self, poi_num=50):
        self.poi_num = poi_num

    def execute(self, data):
        print("-------------------计算Ttest兴趣点-------------------")
        ttest_poi = self.ttest_method(data.train_x, data.train_y)
        max_indices = sorted(range(len(ttest_poi)), key=lambda i: ttest_poi[i], reverse=True)[:self.poi_num]
        data.train_x = data.train_x[:, max_indices]
        data.test_x = data.test_x[:, max_indices]
        # self.img_line(ttest_poi, max_index=max_indices, title="Ttest",)

    @staticmethod
    def ttest_method(profiling_traces, profiling_labels):
        classes = np.unique(profiling_labels)
        catagory_for_traces = [[] for _ in range(len(classes))]

        for i in range(len(profiling_traces)):
            label = profiling_labels[i]
            classes_index = np.where(classes == label)[0][0]
            catagory_for_traces[classes_index].append(profiling_traces[i])

        mean_traces = [np.mean(catagory_for_traces[label], axis=0) for label in range(len(classes))]

        v = [np.sum(np.square(catagory_for_traces[label] - mean_traces[label]), axis=0) / np.square(
            len(catagory_for_traces[label])) for label in range(len(classes))]

        tem = []
        for i in range(len(classes) - 1):
            for j in range(i + 1, len(classes), 1):
                if not (v[i] + v[j] == 0).any():
                    tem.append(np.square(mean_traces[i] - mean_traces[j]) / (v[i] + v[j]))
                else:
                    print("Ttest V 为0")

        return np.sum(tem, axis=0)


class NormalizeDataStep(DataProcessingStep):
    def execute(self, data):
        print("-------------------数据归一化-------------------")
        scaler = StandardScaler()
        data.train_x = scaler.fit_transform(data.train_x)
        data.test_x = scaler.transform(data.test_x)


class plt_mean_var_Step(DataProcessingStep):
    def execute(self, data):
        print("-------------------扩展维度-------------------")
        train_x = data.train_x[:, 0]

        classes = np.unique(data.train_y)
        class_x = [[] for _ in range(len(classes))]
        for i in range(len(train_x)):
            class_x[data.train_y[i]].append(train_x[i])
        class_x = [
            np.array(class_x[class_y]) for class_y in range(len(classes))]
        import matplotlib.pyplot as plt
        import seaborn as sns
        mean1 = np.mean(class_x[1])
        mean2 = np.mean(class_x[2])
        var1 = np.var(class_x[1])
        var2 = np.var(class_x[2])
        plt.figure(figsize=(8, 6))
        sns.kdeplot(class_x[1], color='blue', label='y[j-1:0]=1 Mean: %.2f, Var: %.2f' % (mean1, var1))
        sns.kdeplot(class_x[2], color='green', label='y[j-1:0]=2 Mean: %.2f, Var: %.2f' % (mean2, var2))
        plt.xlabel('Energy Value')
        plt.ylabel('Frequency')
        plt.legend()
        plt.tight_layout()
        plt.show()
        # np.save(r"C:\Users\zhanghaojin\Desktop\XZ_SCA\test\pbt-y0=12-q.npy", class_x[1:3])
        pass


class ExtendedDimensionStep(DataProcessingStep):
    def execute(self, data):
        print("-------------------扩展维度-------------------")
        data.train_x = np.expand_dims(data.train_x, axis=2)
        data.test_x = np.expand_dims(data.test_x, axis=2)


class PccStep(DataProcessingStep):
    def __init__(self,title="PCC"):
        self.title = title
    def execute(self, data):
        print("-------------------PCC-------------------")
        correlation = np.corrcoef(data.train_y, data.train_x.T)[0, 1:]
        # np.save(r"C:\Users\zhanghaojin\Desktop\XZ_SCA\test\pcc-{}-Y0.npy".format(self.title), correlation)

        from matplotlib import pyplot as plt
        plt.plot(correlation)
        plt.xlabel("Points")
        plt.ylabel("PCC")
        plt.title(self.title)
        plt.show()
        # self.img_line(correlation, title="PCC")


class TSNEStep(DataProcessingStep):
    def __init__(self, title):
        self.title = title

    def execute(self, data):
        print("-------------------TSNE降维-------------------")
        hm_list = [0, 1, 2, 3]
        indices = [np.where(data.train_y == i)[0] for i in hm_list]
        # indices = []
        # indices.append(np.where(data.train_y == 0)[0])
        # indices.append(np.where(data.train_y == 1)[0])
        # indices.append(np.where(data.train_y == 2)[0])
        # indices.append(np.where(data.train_y == 3)[0])
        # # indices.append(np.where(data.train_y == 24)[0])
        # # indices.append(np.where(data.train_y == 15)[0])
        # # indices.append(np.where(data.train_y == 26)[0])
        # # indices.append(np.where(data.train_y == 7)[0])
        # # indices.append(np.where(data.train_y == 8)[0])
        # # indices.append(np.where(data.train_y == 9)[0])
        # # indices.append(np.where(data.train_y == 10)[0])
        indices = np.concatenate(indices)
        np.save(r"C:\Users\zhanghaojin\Desktop\XZ_SCA\test\tsne-{}-Y0_0123-x.npy".format(self.title), data.train_x[indices])
        np.save(r"C:\Users\zhanghaojin\Desktop\XZ_SCA\test\tsne-{}-Y0_0123-y.npy".format(self.title), data.train_y[indices])
        self.tsne_plot(data.train_x[indices], data.train_y[indices],self.title)
        # hm = np.array([bin(i)[2:].count('1') for i in range(2 ** bin(np.max(data.train_y))[2:].count("1"))])

        # hm = [bin(i)[2:].count('1') for i in range(np.max(data.train_y))]
        # self.tsne_plot(data.train_x, hm[data.train_y])
        # pass


class PCAStep(DataProcessingStep):
    def __init__(self, poi_num=32):
        self.n_components = poi_num

    def execute(self, data):
        print("-------------------PCA降维-------------------")
        combined_data = np.vstack([data.train_x, data.test_x])
        pca = PCA(n_components=self.n_components)
        pca.fit(combined_data)

        # 对训练数据和测试数据进行降维转换
        data.train_x = pca.transform(data.train_x)
        data.test_x = pca.transform(data.test_x)


class MixupTaStep(DataProcessingStep):

    def execute(self, data):
        print("-------------------MixupTa-------------------")
        index = self.extract_unique_index_pairs(data.train_x.shape[0], int(data.train_x.shape[0] * 0.5))
        self.mixup(index, data)

    @staticmethod
    def mixup(index, data, lamda=0.9):
        x = []
        y = []
        for i in range(len(index)):
            x.append(lamda * data.train_x[index[i][0]] + (1 - lamda) * data.train_x[index[i][1]])
            # x.append(lamda * data.train_x[index[i][0]] + (1 - lamda) * data.train_x[index[i][1]])
            y.append(data.train_y[index[i][0]])
            # y.append(data.train_y[index[i][1]])

        x = np.array(x)
        y = np.array(y)

        data.train_x = np.concatenate((data.train_x, x), axis=0)
        data.train_y = np.concatenate((data.train_y, y), axis=0)

    @staticmethod
    def extract_unique_index_pairs(arr_len, repetitions):
        pairs = set()  # 用set来存储不重复的索引组合
        # result = []  # 存储结果的列表

        while len(pairs) < repetitions:
            # 从数组长度范围内随机抽取两个不重复的索引组成一对
            pair = tuple(random.sample(range(arr_len), 2))
            # 将这对索引添加到集合中
            pairs.add(pair)

        # 将集合中的元素转换为列表，并返回
        result = list(pairs)
        return result


class ShuffledTrainStep(DataProcessingStep):
    def execute(self, data):
        print("-------------------打乱训练集-------------------")
        np.random.seed(42)  # 任意选择一个整数作为种子
        # 首先打乱训练集
        train_indices = np.random.permutation(len(data.train_x))
        data.train_x = data.train_x[train_indices]
        data.train_y = data.train_y[train_indices]

        # 然后打乱测试集
        # test_indices = np.random.permutation(len(data.test_x))
        # shuffled_test_x = data.test_x[test_indices]
        # shuffled_test_y = data.test_y[test_indices]


class TrainDPAStep(DataProcessingStep):
    def __init__(self, bit_length=8):
        self.max_value = (2 ** bit_length) - 1
        self.train_plaintext = None
        self.test_plaintext = None
        self.key = None
        self.hw_array = [(bin(int(value) & self.max_value).count('1')) for value in range(self.max_value + 1)]
        self.AES_Sbox = np.array([
            0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
            0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
            0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
            0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
            0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
            0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
            0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
            0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
            0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
            0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
            0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
            0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
            0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
            0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
            0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
            0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
        ])

    def execute(self, data):
        print("-------------------训练DPA模型-------------------")
        self.load_plain_and_key(data)
        for key in range(self.max_value):
            v = self.AES_Sbox[key ^ self.train_plaintext]
            h = [(bin(int(value) & self.max_value).count('1')) for value in v]
            correlations = np.corrcoef(h, data.train_x, rowvar=False)[0, 1:]
            self.img_line(correlations, path=os.path.join(data.output_path, "Img", data.model_id + "_" + str(key) + ".png"))

    def load_plain_and_key(self, data):
        try:
            in_file = h5py.File("Dataset/ASCAD/ASCAD.h5")
            self.train_plaintext = in_file['Profiling_traces/metadata']["plaintext"][:, 2]
            self.test_plaintext = in_file['Attack_traces/metadata']["plaintext"][:, 2]
            self.key = in_file['Attack_traces/metadata']["key"][0, 2]
        except FileExistsError:
            print("找不到数据，检查input ...", data.data_path)
            sys.exit(-1)


class TrainTAStep(DataProcessingStep):
    def execute(self, data):
        print("-------------------训练TA模型-------------------")
        model = TA()
        model.set_para(data)
        with open(os.path.join(data.output_path, "model_C", data.model_id + ".pickle"), "wb") as file:
            pickle.dump(model, file)


class TrainDLStep(DataProcessingStep):
    def __init__(self, get_model, loss=None, optimizer=None, learning_rate=5e-4, epochs=20, batch_size=32, output_shape=32, embedding=False):
        self.get_model = get_model

        self.loss = loss
        if loss is None:
            import tensorflow as tf
            self.loss = tf.keras.losses.SparseCategoricalCrossentropy

        self.optimizer = optimizer
        if optimizer is None:
            import tensorflow as tf
            self.optimizer = tf.keras.optimizers.Adam

        self.learning_rate = learning_rate
        self.epochs = epochs
        self.batch_size = batch_size
        self.output_shape = output_shape
        self.embedding = embedding

    def execute(self, data):
        print("-------------------训练DL模型-------------------")
        # self.DL.model = self.get_model(input_shape=data.train_x[0].shape,
        #                                output_shape=self.output_shape,
        #                                embedding=self.embedding)
        model = DL()
        model.train(data=data,
                    get_model=self.get_model,
                    loss=self.loss,
                    learning_rate=self.learning_rate,
                    optimizer=self.optimizer,
                    epochs=self.epochs,
                    batch_size=self.batch_size,
                    output_shape=self.output_shape,
                    embedding=self.embedding
                    )
        if self.embedding:
            # for i in range(self.output_shape):
            #     self.img_line(model.model.layers[1].get_weights()[0][:, i])

            # file = os.path.join(data.output_path, "model_E", data.model_id + ".h5")
            data.train_x = model.get_score(data.train_x)
            data.test_x = model.get_score(data.test_x)


class TestTAStep(DataProcessingStep):
    def execute(self, data):
        print("-------------------测试TA模型-------------------")
        with open(os.path.join(data.output_path, "model_C", data.model_id + ".pickle"), "rb") as file:
            model = pickle.load(file)
            pre_y_arr = model.get_score(data.test_x)
            pre_y = np.argmax(pre_y_arr, axis=1)
            print("Test Accuracy:", np.mean(pre_y == data.test_y))


class TestDLStep(DataProcessingStep):
    def execute(self, data):
        print("-------------------测试DL模型-------------------")
        model = tf.keras.models.load_model(os.path.join(data.output_path, "model_C", data.model_id + ".h5"))
        pre_y_arr = model.predict(data.test_x)
        pre_y = np.argmax(pre_y_arr, axis=1)
        print("Test low Accuracy:", np.mean(pre_y == data.test_y))



class SelectClass(DataProcessingStep):
    def execute(self, data):
        print("-------------------选择分类-------------------")
        # 选择data.train_y值为3和5的样本
        data.train_x = data.train_x[(data.train_y == 0) | (data.train_y == 3) | (data.train_y == 5)]
        data.train_y = data.train_y[(data.train_y == 0) | (data.train_y == 3) | (data.train_y == 5)]
        pass


class Test(DataProcessingStep):
    def execute(self, data):
        print("-------------------测试-------------------")
        a = np.abs(data.test_y32 - data.test_z)
        print("6,", np.sum(a < 2 ** 6) / len(a))
        print("5,", np.sum(a < 2 ** 5) / len(a))
        print("4,", np.sum(a < 2 ** 4) / len(a))
        print("3,", np.sum(a < 2 ** 3) / len(a))
