from loguru import logger
import torch
import math
import torch.nn.functional as F
from torch import nn

# from loguru import logger


class Normalize(nn.Module):
    def __init__(self, power=2):
        super(Normalize, self).__init__()
        self.power = power

    def forward(self, x):
        norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
        out = x.div(norm)
        return out


class LearnedPositionEncoding(nn.Embedding):
    def __init__(self, d_model, dropout=0.1, max_len=5000):
        super().__init__(max_len, d_model)
        self.dropout = nn.Dropout(p=dropout)

    def forward(self, x):
        weight = self.weight.data.unsqueeze(1)
        x = x + weight[:x.size(0), :]
        return self.dropout(x)


class CLSNetV2(torch.nn.Module):
    def __init__(self, in_d=200, hiden=200, num_layers=2, out_classes=9, mid=50, is_norm=False):
        super().__init__()
        self.lstm_layer = torch.nn.LSTM(
            input_size=in_d,
            hidden_size=hiden,
            num_layers=num_layers,
            bidirectional=True,
            batch_first=True,
        )
        self.lstm_layer2 = torch.nn.LSTM(
            input_size=hiden,
            hidden_size=hiden,
            num_layers=num_layers,
            bidirectional=True,
            batch_first=True,
        )
        self.linear = torch.nn.Linear(hiden, hiden)
        # self.linear2 = torch.nn.Linear(mid, out_classes)
        self.max_length = 700
        self.is_norm = is_norm
        self.norm = Normalize(2)
        self.mlp = MLP(in_c=hiden, out_c=out_classes)

    def forward(self, x, device):

        out = []
        for idx, func in enumerate(x):
            mid_out = []
            for blocks in func:
                tmp_out = []

                for block in blocks:
                    # print(block.shape)
                    o, (hn, cn) = self.lstm_layer(block.to(device))
                    pred = self.linear(hn[-1])
                    if self.is_norm:
                        pred = self.norm(pred)

                    if len(tmp_out) == 0:
                        tmp_out = pred
                    else:
                        tmp_out += pred
                tmp_out /= len(func)

                mid_out.append(tmp_out)

            if idx == 0:
                out = [torch.mean(torch.stack(mid_out, dim=1).squeeze(0), dim=0)]
            else:
                out += [torch.mean(torch.stack(mid_out, dim=1).squeeze(0), dim=0)]
        if len(out) == 0:
            return None
        # out /= len(x)
        out = torch.stack(out)
        o, (hn, cn) = self.lstm_layer2(out.unsqueeze(0))
        pred = self.mlp(hn[-1])
        return pred


class CLSNet(torch.nn.Module):
    def __init__(self,
                 in_d=200,
                 hiden=200,
                 num_layers=2,
                 out_classes=9,
                 mid=50,
                 is_norm=False,
                 has_shape=False,
                 has_value=False,
                 has_weight_info=False):
        super().__init__()
        self.lstm_layer = torch.nn.LSTM(
            input_size=in_d,
            hidden_size=hiden,
            num_layers=num_layers,
            bidirectional=True,
            batch_first=True,
        )
        self.linear = torch.nn.Linear(hiden, hiden)
        # self.linear2 = torch.nn.Linear(mid, out_classes)
        self.max_length = 700
        self.is_norm = is_norm
        self.norm = Normalize(2)
        self.mlp = MLP(in_c=hiden, out_c=out_classes)
        self.has_shape = has_shape
        self.has_value = has_value
        self.has_weight_info = has_weight_info

        in_channel = hiden
        if has_shape:
            in_channel += 12 * 5 * 2
        if has_weight_info:
            in_channel += hiden
            self.weight_lstm = torch.nn.LSTM(
                input_size=12 * 5,
                hidden_size=hiden,
                num_layers=num_layers,
                bidirectional=True,
                batch_first=True,
            )
            self.weight_linear = torch.nn.Linear(hiden, hiden)
        if has_value:
            in_channel += hiden
            self.trace_linear = torch.nn.Linear(hiden, hiden)
            self.value_lstm = torch.nn.LSTM(
                input_size=in_d,
                hidden_size=hiden,
                num_layers=num_layers,
                bidirectional=True,
                batch_first=True,
            )

        self.mlp = MLP(in_c=in_channel, out_c=out_classes)

    def forward(self, x, device, shape_info=torch.tensor([0]*120), value_data:torch.Tensor=torch.zeros([1,10,15,200])):
        # value_data = [i for i in value_data]
        out = torch.tensor([])
        if value_data is None:#和value相关的值序列，value——embedding
            for idx, func in enumerate(x):
                mid_out = []
                for blocks in func:
                    tmp_out = []

                    for block in blocks:
                        o, (hn, cn) = self.lstm_layer(block.to(device))
                        pred = self.linear(hn[-1])
                        if self.is_norm:
                            pred = self.norm(pred)

                        if len(tmp_out) == 0:
                            tmp_out = pred
                        else:
                            tmp_out += pred
                    tmp_out /= len(func)

                    mid_out.append(tmp_out)

                if idx == 0:
                    out = torch.mean(torch.stack(mid_out, dim=1).squeeze(0), dim=0)
                else:
                    out += torch.mean(torch.stack(mid_out, dim=1).squeeze(0), dim=0)
        else:
            trace_out = torch.tensor([])
            for idx, (func, value_trace) in enumerate(zip(x, value_data)):#从数据拿func，func和value_trace是真实的，因为之前是分裂
                mid_out = []
                mid_trace_out = []
                for blocks, traces in zip(func, value_trace):#从func拿block一个block有一个value_trace，有多个block
                    tmp_out = torch.tensor([])
                    tmp_trace_out = torch.tensor([])
                    for block, trace in zip(blocks, traces):#单个block里面的trace（bi-lstm）
                        o, (hn, cn) = self.lstm_layer(block.to(device))
                        _, (t_hn, cn) = self.value_lstm(trace.to(device))

                        pred = self.linear(hn[-1])
                        tr_pred = self.trace_linear(t_hn[-1])

                        if self.is_norm:
                            pred = self.norm(pred)

                        if len(tmp_out) == 0:
                            tmp_out = pred
                        else:
                            tmp_out += pred
                        tmp_trace_out = tr_pred if len(tmp_trace_out) == 0 else tr_pred + tmp_trace_out

                    if isinstance(tmp_out, list):
                        continue
                    tmp_out /= len(func)
                    tmp_trace_out /= len(func)
                    mid_out.append(tmp_out)
                    mid_trace_out.append(tmp_trace_out)

                if idx == 0:#对第一个tensor赋值
                    trace_out = torch.mean(torch.stack(mid_trace_out, dim=1).squeeze(0), dim=0)
                    out = torch.mean(torch.stack(mid_out, dim=1).squeeze(0), dim=0)
                else:#其他的tensor加载后面
                    trace_out += torch.mean(torch.stack(mid_trace_out, dim=1).squeeze(0), dim=0)
                    out += torch.mean(torch.stack(mid_out, dim=1).squeeze(0), dim=0)

        if len(out) == 0:
            return None
        out /= len(x)

        if self.has_shape:#将shape并入
            in_out_shape_info = shape_info[:, :12 * 5 * 2]
            out = torch.cat([out, in_out_shape_info.squeeze(0).float().to(device)])

        if self.has_weight_info:#将权重并入,并未使用
            weight_shape_info = shape_info[:, 12 * 5 * 2:]
            weight_shape_info = weight_shape_info.reshape(1, -1, 12 * 5)
            o, (hn, cn) = self.weight_lstm(weight_shape_info.float().to(device))
            pred = self.weight_linear(hn[-1])
            out = torch.cat([out, pred.squeeze(0)])

        if self.has_value:
            trace_out /= len(x)
            out = torch.cat([out, trace_out])

        pred = self.mlp(out.unsqueeze(0))
        return pred


class CLSNet_Attention(torch.nn.Module):
    def __init__(self, in_d=200, hiden=200, num_layers=2, out_classes=9, mid=50, is_norm=False):
        super().__init__()
        self.lstm_layer = torch.nn.LSTM(
            input_size=in_d,
            hidden_size=hiden,
            num_layers=num_layers,
            bidirectional=True,
            batch_first=True,
        )
        self.linear = torch.nn.Linear(2 * hiden, hiden)
        # self.linear2 = torch.nn.Linear(mid, out_classes)
        self.max_length = 700
        self.is_norm = is_norm
        self.norm = Normalize(2)
        self.dropout = torch.nn.Dropout(0.5)
        self.mlp = MLP(in_c=hiden, out_c=out_classes)

    def attention(self, x, query, mask=None):
        d_k = query.size(-1)
        scores = torch.matmul(query, x.transpose(1, 2)) / math.sqrt(d_k)
        alpha_n = F.softmax(scores, dim=-1)
        context = torch.matmul(alpha_n, x).sum(1)

        return context, alpha_n

    def forward(self, x, device):
        out = []
        for idx, func in enumerate(x):
            mid_out = []
            for blocks in func:
                tmp_out = []
                for block in blocks:
                    o, (hn, cn) = self.lstm_layer(block.to(device))
                    query = self.dropout(o)
                    attn_output, alpha_n = self.attention(o, query)
                    pred = self.linear(attn_output)
                    if self.is_norm:
                        pred = self.norm(pred)

                    if len(tmp_out) == 0:
                        tmp_out = pred
                    else:
                        tmp_out += pred
                tmp_out /= len(func)
                mid_out.append(tmp_out)

            if idx == 0:
                out = torch.mean(torch.stack(mid_out, dim=1).squeeze(0), dim=0)
            else:
                out += torch.mean(torch.stack(mid_out, dim=1).squeeze(0), dim=0)
        if len(out) == 0:
            return None
        out /= len(x)
        pred = self.mlp(out.unsqueeze(0))
        return pred


class MLP(torch.nn.Module):
    def __init__(self, in_c=128, out_c=10, mid_c=64):
        super().__init__()
        self.fc1 = torch.nn.Linear(in_c, mid_c)
        self.fc2 = torch.nn.Linear(mid_c, out_c)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = self.fc2(x)
        return x


class CLSNetV3(torch.nn.Module):
    def __init__(self,
                 in_d=200,
                 hiden=200,
                 num_layers=2,
                 out_classes=9,
                 mid=50,
                 is_norm=False,
                 has_shape=False,
                 has_value=False,
                 has_weight_info=False):
        super().__init__()
        self.lstm_layer = torch.nn.LSTM(
            input_size=in_d,
            hidden_size=hiden,
            num_layers=num_layers,
            bidirectional=True,
            batch_first=True,
        )
        self.linear = torch.nn.Linear(2 * hiden, 2 * hiden)
        # self.linear2 = torch.nn.Linear(mid, out_classes)
        self.max_length = 700
        self.is_norm = is_norm
        self.norm = Normalize(2)
        self.mlp = MLP(in_c=2 * hiden, out_c=out_classes)
        self.has_shape = has_shape
        self.has_value = has_value
        self.has_weight_info = has_weight_info

        in_channel = 2 * hiden
        if has_shape:
            in_channel += 12 * 5 * 2
        if has_weight_info:
            in_channel += 2 * hiden
            self.weight_lstm = torch.nn.LSTM(
                input_size=12 * 5,
                hidden_size=hiden,
                num_layers=num_layers,
                bidirectional=True,
                batch_first=True,
            )
            self.weight_linear = torch.nn.Linear(2 * hiden, hiden)
        if has_value:
            in_channel += hiden
            self.trace_linear = torch.nn.Linear(2 * hiden, hiden)
            self.value_lstm = torch.nn.LSTM(
                input_size=in_d,
                hidden_size=hiden,
                num_layers=num_layers,
                bidirectional=True,
                batch_first=True,
            )
        self.mlp = MLP(in_c=in_channel, out_c=out_classes)

    def forward(self, x, device, shape_info=None, value_data=None):
        out = []
        if value_data is None:
            for idx, func in enumerate(x):#函数
                mid_out = []
                for blocks in func:#block
                    tmp_out = []
                    for block in blocks:#block 中的asm
                        o, (_, cn) = self.lstm_layer(block.to(device))
                        pred = self.linear(o[-1][-1].unsqueeze(0))
                        if self.is_norm:
                            pred = self.norm(pred)
                        if len(tmp_out) == 0:
                            tmp_out = pred
                        else:
                            tmp_out += pred
                    tmp_out /= len(func)
                    mid_out.append(tmp_out)

                if idx == 0:
                    out = torch.mean(torch.stack(mid_out, dim=1).squeeze(0), dim=0)
                else:
                    out += torch.mean(torch.stack(mid_out, dim=1).squeeze(0), dim=0)
        else:
            trace_out = []
            for idx, (func, value_trace) in enumerate(zip(x, value_data)):#文件
                mid_out = []
                mid_trace_out = []
                for blocks, traces in zip(func, value_trace):#func
                    tmp_out = []
                    tmp_trace_out = []
                    for block, trace in zip(blocks, traces):#block
                        o, (hn, cn) = self.lstm_layer(block.to(device))
                        t_o, (t_hn, cn) = self.value_lstm(trace.to(device))
                        pred = self.linear(o[-1][-1].unsqueeze(0))
                        tr_pred = self.trace_linear(t_o[-1][-1].unsqueeze(0))
                        
                        if self.is_norm:
                            pred = self.norm(pred)

                        if len(tmp_out) == 0:
                            tmp_out = pred
                        else:
                            tmp_out += pred
                        tmp_trace_out = tr_pred if len(tmp_trace_out) == 0 else tr_pred + tmp_trace_out

                    tmp_out /= len(func)
                    tmp_trace_out /= len(func)
                    mid_out.append(tmp_out)
                    mid_trace_out.append(tmp_trace_out)

                if idx == 0:
                    trace_out = torch.mean(torch.stack(mid_trace_out, dim=1).squeeze(0), dim=0)
                    out = torch.mean(torch.stack(mid_out, dim=1).squeeze(0), dim=0)#除成了一个数字？
                else:
                    trace_out += torch.mean(torch.stack(mid_trace_out, dim=1).squeeze(0), dim=0)
                    out += torch.mean(torch.stack(mid_out, dim=1).squeeze(0), dim=0)

        if len(out) == 0:
            return None
        out /= len(x)#???

        if self.has_shape:
            in_out_shape_info = shape_info[:, :12 * 5 * 2]
            out = torch.cat([out, in_out_shape_info.squeeze(0).float().to(device)])

        if self.has_weight_info:
            weight_shape_info = shape_info[:, 12 * 5 * 2:]
            weight_shape_info = weight_shape_info.reshape(1, -1, 12 * 5)
            o, (hn, cn) = self.weight_lstm(weight_shape_info.float().to(device))
            pred = self.weight_linear(hn[-1])
            out = torch.cat([out, pred.squeeze(0)])

        if self.has_value:
            trace_out /= len(x)
            out = torch.cat([out, trace_out])

        pred = self.mlp(out.unsqueeze(0))
        return pred


class CLSNetV3Attr(torch.nn.Module):
    def __init__(self,
                 in_d=200,
                 hidden=200,
                 num_layers=2,
                 out_classes=9,
                 mid=50,
                 is_norm=False,
                 has_shape=False,
                 has_value=False,
                 has_weight_info=False):
        super().__init__()
        self.lstm_layer = torch.nn.LSTM(
            input_size=in_d,
            hidden_size=hidden,
            num_layers=num_layers,
            bidirectional=True,
            batch_first=True,
        )
        self.block_linear = torch.nn.Linear(2 * hidden, 2 * hidden)
        # self.linear2 = torch.nn.Linear(mid, out_classes)
        self.max_length = 700
        self.is_norm = is_norm
        self.norm = Normalize(2)
        self.mlp = MLP(in_c=2 * hidden, out_c=out_classes)
        self.has_shape = has_shape
        self.has_value = has_value
        self.has_weight_info = has_weight_info

        in_channel = hidden
        if has_shape:
            in_channel += 12 * 5 * 2
        if has_value:
            in_channel += 2 * hidden
            self.value_linear = torch.nn.Linear(2 * hidden, hidden)
            self.value_lstm_layer = torch.nn.LSTM(
                input_size=in_d,
                hidden_size=hidden,
                num_layers=num_layers,
                bidirectional=True,
                batch_first=True,
            )

        type_number = 40
        self.type_mlp = MLP(in_c=hidden + type_number, out_c=hidden)
        self.type_mlp_value = MLP(in_c=hidden + type_number, out_c=hidden)
        self.mlp = MLP(in_c=in_channel, out_c=out_classes)

    def forward(self, x, device, type_vector, shape_info=None, value_data=None):

        blocks = x[0]
        value_seq_arr = value_data[0]

        block_embeddings = []
        value_seq_embeddings = []
        for block, value_seq in zip(blocks, value_seq_arr):
            # logger.debug(block.shape)
            block = block.squeeze(0)
            value_seq = value_seq.squeeze(0)

            block = block.to(device)
            value_seq = value_seq.to(device)
            #编织op和oprand
            block_length = block.shape[1]
            type_embedding_matrix = type_vector.repeat(1, block_length).view(1, block_length, -1).to(device)

            concat_matrix = torch.cat([block, type_embedding_matrix], dim=2)
            type_specific_block_embeddings = self.type_mlp(concat_matrix)
            #编织value_seq
            value_seq_length = value_seq.shape[1]
            type_embedding_matrix_value = type_vector.repeat(1, value_seq_length).view(1, value_seq_length,-1).to(device)

            concat_matrix_value = torch.cat([value_seq, type_embedding_matrix_value], dim=2)
            type_sepcific_value_seq_embeddings = self.type_mlp_value(concat_matrix_value)

            o, (_, _) = self.lstm_layer(type_specific_block_embeddings.to(device))
            v_o, (_, _) = self.value_lstm_layer(type_sepcific_value_seq_embeddings.to(device))
            pred = self.block_linear(o[-1][-1].unsqueeze(0))
            v_pred = self.value_linear(v_o[-1][-1].unsqueeze(0))
            block_embeddings.append(pred)
            value_seq_embeddings.append(v_pred)

        block_embeddings = torch.stack(block_embeddings, dim=1).squeeze(0)
        value_seq_embeddings = torch.stack(value_seq_embeddings, dim=1).squeeze(0)

        blocks_embedding = torch.mean(block_embeddings, dim=0)
        value_seqs_embedding = torch.mean(value_seq_embeddings, dim=0)

        out = torch.cat([blocks_embedding, value_seqs_embedding])

        if len(out) == 0:
            return None

        if self.has_shape:
            in_out_shape_info = shape_info[:, :12 * 5 * 2]
            out = torch.cat([out, in_out_shape_info.squeeze(0).float().to(device)])

        pred = self.mlp(out.unsqueeze(0))
        return pred


# Transformer for the aggregation of all basicblock embeddings
class CLSNetV4(torch.nn.Module):
    def __init__(self,
                 in_d=200,
                 hiden=200,
                 num_layers=2,
                 out_classes=9,
                 mid=50,
                 is_norm=False,
                 has_shape=False,
                 has_value=False,
                 has_weight_info=False):
        super().__init__()
        self.lstm_layer = torch.nn.LSTM(
            input_size=in_d,
            hidden_size=hiden,
            num_layers=num_layers,
            bidirectional=True,
            batch_first=True,
        )
        self.hiden = hiden
        self.linear = torch.nn.Linear(2 * hiden, 2 * hiden)
        # self.linear2 = torch.nn.Linear(mid, out_classes)
        self.max_length = 700
        self.is_norm = is_norm
        self.norm = Normalize(2)
        self.mlp = MLP(in_c=2 * hiden, out_c=out_classes)
        self.has_shape = has_shape
        self.has_value = has_value
        self.has_weight_info = has_weight_info

        in_channel = 2 * hiden

        self.head_num = 4
        self.transformer_layer_num = 4
        encoder_layer = nn.TransformerEncoderLayer(d_model=2 * hiden, nhead=self.head_num, dim_feedforward=2 * hiden)
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=self.transformer_layer_num)
        self.position_embeddings = LearnedPositionEncoding(d_model=2 * hiden, max_len=200)
        self.cls_token = nn.Parameter(torch.zeros(1, 1, self.hiden * 2))

        if has_shape:
            in_channel += 12 * 5 * 2
        if has_weight_info:
            in_channel += 2 * hiden
            self.weight_lstm = torch.nn.LSTM(
                input_size=12 * 5,
                hidden_size=hiden,
                num_layers=num_layers,
                bidirectional=True,
                batch_first=True,
            )
            self.weight_linear = torch.nn.Linear(2 * hiden, hiden)
        if has_value:
            in_channel += hiden
            self.trace_linear = torch.nn.Linear(2 * hiden, hiden)
            self.value_lstm = torch.nn.LSTM(
                input_size=in_d,
                hidden_size=hiden,
                num_layers=num_layers,
                bidirectional=True,
                batch_first=True,
            )
            value_encoder_layer = nn.TransformerEncoderLayer(d_model=hiden, nhead=self.head_num, dim_feedforward=hiden)
            self.value_transformer_encoder = nn.TransformerEncoder(value_encoder_layer,
                                                                   num_layers=self.transformer_layer_num)
            self.value_position_embeddings = LearnedPositionEncoding(d_model=hiden, max_len=200)
            self.value_cls_token = nn.Parameter(torch.zeros(1, 1, self.hiden))

        self.mlp = MLP(in_c=in_channel, out_c=out_classes)

    def forward(self, x, device, shape_info=None, value_data=None):

        out = []
        if value_data is None:
            func = x[0]
            mid_out = []
            for blocks in func:
                tmp_out = []
                for block in blocks:
                    o, (_, cn) = self.lstm_layer(block.to(device))
                    pred = self.linear(o[-1][-1].unsqueeze(0))
                    tmp_out.append(pred)
                mid_out.extend(tmp_out)
            out = torch.stack(mid_out, dim=1)
        else:
            trace_out = []
            func = x[0]
            value_trace = value_data[0]
            mid_out = []
            mid_trace_out = []
            for blocks, traces in zip(func, value_trace):
                tmp_out = []
                tmp_trace_out = []
                for block, trace in zip(blocks, traces):
                    o, (hn, cn) = self.lstm_layer(block.to(device))
                    t_o, (t_hn, cn) = self.value_lstm(trace.to(device))
                    pred = self.linear(o[-1][-1].unsqueeze(0))
                    tr_pred = self.trace_linear(t_o[-1][-1].unsqueeze(0))

                    tmp_out.append(pred)
                    tmp_trace_out.append(tr_pred)

                mid_out.extend(tmp_out)
                mid_trace_out.extend(tmp_trace_out)
            trace_out = torch.stack(mid_trace_out, dim=1)
            out = torch.stack(mid_out, dim=1)

        if len(out) == 0:
            return None

        out = torch.cat([self.cls_token.to(device), out], dim=1)
        if out.shape[1] > 200:
            out = out[:, :200, :]

        out = self.position_embeddings(out)
        out = self.transformer_encoder(out)[:, 0, :].squeeze(0)

        if self.has_shape:
            in_out_shape_info = shape_info[:, :12 * 5 * 2]
            out = torch.cat([out, in_out_shape_info.squeeze(0).float().to(device)])

        if self.has_value:
            trace_out = torch.cat([self.value_cls_token.to(device), trace_out], dim=1)
            if trace_out.shape[1] > 200:
                trace_out = trace_out[:, :200, :]
            trace_out = self.value_position_embeddings(trace_out)
            trace_out = self.value_transformer_encoder(trace_out)[:, 0, :].squeeze(0)
            out = torch.cat([out, trace_out])

        pred = self.mlp(out.unsqueeze(0))
        return pred


# Transformer based CLSNet, but we use mean operation to obtain basic block embeddings.
class CLSNetV5(torch.nn.Module):
    def __init__(self,
                 in_d=200,
                 hiden=200,
                 num_layers=2,
                 out_classes=9,
                 mid=50,
                 is_norm=False,
                 has_shape=False,
                 has_value=False,
                 has_weight_info=False):
        super().__init__()

        self.hiden = hiden
        self.linear = torch.nn.Linear(hiden, 2 * hiden)
        # self.linear2 = torch.nn.Linear(mid, out_classes)
        self.max_length = 700
        self.is_norm = is_norm
        self.norm = Normalize(2)
        self.mlp = MLP(in_c=2 * hiden, out_c=out_classes)
        self.has_shape = has_shape
        self.has_value = has_value
        self.has_weight_info = has_weight_info

        in_channel = 2 * hiden

        self.head_num = 8
        self.transformer_layer_num = 6
        encoder_layer = nn.TransformerEncoderLayer(d_model=2 * hiden,
                                                   nhead=self.head_num,
                                                   dim_feedforward=2 * hiden,
                                                   dropout=0.)
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=self.transformer_layer_num)
        self.position_embeddings = LearnedPositionEncoding(d_model=2 * hiden, max_len=200)
        self.cls_token = nn.Parameter(torch.zeros(1, 1, self.hiden * 2))

        if has_shape:
            in_channel += 12 * 5 * 2
        if has_weight_info:
            in_channel += 2 * hiden
            self.weight_lstm = torch.nn.LSTM(
                input_size=12 * 5,
                hidden_size=hiden,
                num_layers=num_layers,
                bidirectional=True,
                batch_first=True,
            )
            self.weight_linear = torch.nn.Linear(2 * hiden, hiden)
        if has_value:
            in_channel += hiden
            self.trace_linear = torch.nn.Linear(hiden, hiden)
            value_encoder_layer = nn.TransformerEncoderLayer(d_model=hiden,
                                                             nhead=self.head_num,
                                                             dim_feedforward=hiden,
                                                             dropout=0.)
            self.value_transformer_encoder = nn.TransformerEncoder(value_encoder_layer,
                                                                   num_layers=self.transformer_layer_num)
            self.value_position_embeddings = LearnedPositionEncoding(d_model=hiden, max_len=200)
            self.value_cls_token = nn.Parameter(torch.zeros(1, 1, self.hiden))

        self.mlp = MLP(in_c=in_channel, out_c=out_classes)

    def forward(self, x, device, shape_info=None, value_data=None):

        out = []
        if value_data is None:
            if len(x) > 1:
                logger.debug("There is a multi func data!")
            func = x[0]
            mid_out = []
            for blocks in func:
                tmp_out = []
                for block in blocks:
                    # o, (_, cn) = self.lstm_layer(block.to(device))
                    block = block.to(device)
                    # print(torch.mean(block, dim=1).shape)
                    pred = self.linear(torch.mean(block, dim=1))
                    tmp_out.append(pred)
                mid_out.extend(tmp_out)
            out = torch.stack(mid_out, dim=1)
        else:
            trace_out = []
            func = x[0]
            value_trace = value_data[0]
            mid_out = []
            mid_trace_out = []
            for blocks, traces in zip(func, value_trace):
                tmp_out = []
                tmp_trace_out = []
                for block, trace in zip(blocks, traces):
                    block = block.to(device)
                    trace = trace.to(device)
                    pred = self.linear(torch.mean(block, dim=1))
                    tr_pred = self.trace_linear(torch.mean(trace, dim=1))

                    tmp_out.append(pred)
                    tmp_trace_out.append(tr_pred)

                mid_out.extend(tmp_out)
                mid_trace_out.extend(tmp_trace_out)
            trace_out = torch.stack(mid_trace_out, dim=1)
            out = torch.stack(mid_out, dim=1)

        if len(out) == 0:
            return None

        out = torch.cat([self.cls_token.to(device), out], dim=1)
        if out.shape[1] > 200:
            out = out[:, :200, :]

        out = self.position_embeddings(out)
        out = self.transformer_encoder(out)[:, 0, :].squeeze(0)

        if self.has_shape:
            in_out_shape_info = shape_info[:, :12 * 5 * 2]
            out = torch.cat([out, in_out_shape_info.squeeze(0).float().to(device)])

        if self.has_value:
            trace_out = torch.cat([self.value_cls_token.to(device), trace_out], dim=1)
            if trace_out.shape[1] > 200:
                trace_out = trace_out[:, :200, :]
            trace_out = self.value_position_embeddings(trace_out)
            trace_out = self.value_transformer_encoder(trace_out)[:, 0, :].squeeze(0)
            out = torch.cat([out, trace_out])

        pred = self.mlp(out.unsqueeze(0))
        return pred


# Mean based CLSNet, but we use mean operation to obtain basic block embeddings.
class CLSNetV6(torch.nn.Module):
    def __init__(self,
                 in_d=200,
                 hiden=200,
                 num_layers=2,
                 out_classes=9,
                 mid=50,
                 is_norm=False,
                 has_shape=False,
                 has_value=False,
                 has_weight_info=False):
        super().__init__()

        self.hiden = hiden
        self.linear = torch.nn.Linear(hiden, 2 * hiden)
        # self.linear2 = torch.nn.Linear(mid, out_classes)
        self.max_length = 700
        self.is_norm = is_norm
        self.norm = Normalize(2)
        self.mlp = MLP(in_c=2 * hiden, out_c=out_classes)
        self.has_shape = has_shape
        self.has_value = has_value
        self.has_weight_info = has_weight_info

        in_channel = 2 * hiden

        if has_shape:
            in_channel += 12 * 5 * 2
        if has_weight_info:
            in_channel += 2 * hiden
            self.weight_lstm = torch.nn.LSTM(
                input_size=12 * 5,
                hidden_size=hiden,
                num_layers=num_layers,
                bidirectional=True,
                batch_first=True,
            )
            self.weight_linear = torch.nn.Linear(2 * hiden, hiden)
        if has_value:
            in_channel += hiden
            self.trace_linear = torch.nn.Linear(hiden, hiden)

        self.mlp = MLP(in_c=in_channel, out_c=out_classes)

    def forward(self, x, device, shape_info=None, value_data=None):

        out = []
        if value_data is None:
            if len(x) > 1:
                logger.debug("There is a multi func data!")
            func = x[0]
            mid_out = []
            for blocks in func:
                logger.debug("Block Num: {}".format(len(blocks)))
                tmp_out = []
                for block in blocks:
                    # o, (_, cn) = self.lstm_layer(block.to(device))
                    block = block.to(device)
                    # print(torch.mean(block, dim=1).shape)
                    pred = self.linear(torch.mean(block, dim=1))
                    tmp_out.append(pred)
                mid_out.extend(tmp_out)
            out = torch.stack(mid_out, dim=1)
        else:
            trace_out = []
            func = x[0]
            value_trace = value_data[0]
            mid_out = []
            mid_trace_out = []
            for blocks, traces in zip(func, value_trace):
                tmp_out = []
                tmp_trace_out = []
                for block, trace in zip(blocks, traces):
                    block = block.to(device)
                    trace = trace.to(device)
                    pred = self.linear(torch.mean(block, dim=1))
                    tr_pred = self.trace_linear(torch.mean(trace, dim=1))

                    tmp_out.append(pred)
                    tmp_trace_out.append(tr_pred)

                mid_out.extend(tmp_out)
                mid_trace_out.extend(tmp_trace_out)
            trace_out = torch.stack(mid_trace_out, dim=1)
            out = torch.stack(mid_out, dim=1)

        if len(out) == 0:
            return None

        out = torch.mean(out.squeeze(0), dim=0)
        if self.has_shape:
            in_out_shape_info = shape_info[:, :12 * 5 * 2]
            out = torch.cat([out, in_out_shape_info.squeeze(0).float().to(device)])

        if self.has_value:
            trace_out = torch.mean(trace_out.squeeze(0), dim=0)
            out = torch.cat([out, trace_out])

        pred = self.mlp(out.unsqueeze(0))
        return pred


# Use bi-direction output
class CLSNetV7(torch.nn.Module):
    def __init__(self,
                 in_d=200,
                 hiden=200,
                 num_layers=2,
                 out_classes=9,
                 mid=50,
                 is_norm=False,
                 has_shape=False,
                 has_value=False,
                 has_weight_info=False):
        super().__init__()
        self.lstm_layer = torch.nn.LSTM(
            input_size=in_d,
            hidden_size=hiden,
            num_layers=num_layers,
            bidirectional=True,
            batch_first=True,
        )
        self.linear = torch.nn.Linear(2 * hiden, hiden)
        # self.linear2 = torch.nn.Linear(mid, out_classes)
        self.max_length = 700
        self.is_norm = is_norm
        self.norm = Normalize(2)
        self.mlp = MLP(in_c=hiden, out_c=out_classes)
        self.has_shape = has_shape
        self.has_value = has_value
        self.has_weight_info = has_weight_info

        in_channel = hiden
        if has_shape:
            in_channel += 12 * 5 * 2
        if has_weight_info:
            in_channel += hiden
            self.weight_lstm = torch.nn.LSTM(
                input_size=12 * 5,
                hidden_size=hiden,
                num_layers=num_layers,
                bidirectional=True,
                batch_first=True,
            )
            self.weight_linear = torch.nn.Linear(2 * hiden, hiden)
        if has_value:
            in_channel += hiden
            self.trace_linear = torch.nn.Linear(2 * hiden, hiden)
            self.value_lstm = torch.nn.LSTM(
                input_size=in_d,
                hidden_size=hiden,
                num_layers=num_layers,
                bidirectional=True,
                batch_first=True,
            )

        self.mlp = MLP(in_c=in_channel, out_c=out_classes)

    def forward(self, x, device, shape_info=None, value_data=None):

        out = []
        if value_data is None:
            for idx, func in enumerate(x):
                mid_out = []
                for blocks in func:
                    tmp_out = []

                    for block in blocks:
                        o, (hn, cn) = self.lstm_layer(block.to(device))
                        pred = self.linear(torch.cat([hn[0], hn[-1]], dim=1))
                        if self.is_norm:
                            pred = self.norm(pred)

                        if len(tmp_out) == 0:
                            tmp_out = pred
                        else:
                            tmp_out += pred
                    tmp_out /= len(func)

                    mid_out.append(tmp_out)

                if idx == 0:
                    out = torch.mean(torch.stack(mid_out, dim=1).squeeze(0), dim=0)
                else:
                    out += torch.mean(torch.stack(mid_out, dim=1).squeeze(0), dim=0)
        else:
            trace_out = []
            for idx, (func, value_trace) in enumerate(zip(x, value_data)):
                mid_out = []
                mid_trace_out = []
                for blocks, traces in zip(func, value_trace):
                    tmp_out = []
                    tmp_trace_out = []
                    for block, trace in zip(blocks, traces):
                        if block.shape[1] == 0:
                            continue
                        o, (hn, cn) = self.lstm_layer(block.to(device))
                        _, (t_hn, cn) = self.value_lstm(trace.to(device))

                        pred = self.linear(torch.cat([hn[0], hn[-1]], dim=1))
                        tr_pred = self.trace_linear(torch.cat([t_hn[0], t_hn[-1]], dim=1))

                        if self.is_norm:
                            pred = self.norm(pred)

                        if len(tmp_out) == 0:
                            tmp_out = pred
                        else:
                            tmp_out += pred
                        tmp_trace_out = tr_pred if len(tmp_trace_out) == 0 else tr_pred + tmp_trace_out

                    if isinstance(tmp_out, list):
                        continue
                    tmp_out /= len(func)
                    tmp_trace_out /= len(func)
                    mid_out.append(tmp_out)
                    mid_trace_out.append(tmp_trace_out)

                if idx == 0:
                    trace_out = torch.mean(torch.stack(mid_trace_out, dim=1).squeeze(0), dim=0)
                    out = torch.mean(torch.stack(mid_out, dim=1).squeeze(0), dim=0)
                else:
                    trace_out += torch.mean(torch.stack(mid_trace_out, dim=1).squeeze(0), dim=0)
                    out += torch.mean(torch.stack(mid_out, dim=1).squeeze(0), dim=0)

        if len(out) == 0:
            return None
        out /= len(x)

        if self.has_shape:
            in_out_shape_info = shape_info[:, :12 * 5 * 2]
            out = torch.cat([out, in_out_shape_info.squeeze(0).float().to(device)])

        if self.has_weight_info:
            weight_shape_info = shape_info[:, 12 * 5 * 2:]
            weight_shape_info = weight_shape_info.reshape(1, -1, 12 * 5)
            o, (hn, cn) = self.weight_lstm(weight_shape_info.float().to(device))
            pred = self.weight_linear(torch.cat([hn[0], hn[-1]], dim=1))
            out = torch.cat([out, pred.squeeze(0)])

        if self.has_value:
            trace_out /= len(x)
            out = torch.cat([out, trace_out])

        pred = self.mlp(out.unsqueeze(0))
        return pred


if __name__ == "__main__":
    x = torch.rand(3, 200, 200)
    # s = [10, 20, 30]
    net = CLSNet()
    net.eval()
    out = net(x)
    print(out.shape)
    x = torch.rand(1, 100, 200)
    out = net(x)
    criterion = torch.nn.CrossEntropyLoss()
    # y = torch.from_numpy(np.array([1]))
    y = [1]

    print(criterion(out, y))
    print(out.shape)

    print(out[0].topk(1)[1][0])
