import torch
# import torch_npu
from utils import *
from statistical_analysis import behavioral_analysis


class LSTMModel(torch.nn.Module):
    def __init__(self, vocab_size, embedding_dim, hidden_dim, num_layers, dropout=0.5, requires_grad=True):
        super(LSTMModel, self).__init__()
        self.embedding = torch.nn.Embedding(vocab_size, embedding_dim)
        self.LSTM = torch.nn.LSTM(embedding_dim, hidden_dim, num_layers=num_layers, batch_first=True)
        self.classifier = torch.nn.Sequential(
            torch.nn.Dropout(dropout),
            torch.nn.Linear(hidden_dim, 1),
            torch.nn.Sigmoid()
        )

    def forward(self, inputs):
        inputs = self.embedding(inputs)
        x, _ = self.LSTM(inputs, None)
        x = x[:, -1, :]
        x = self.classifier(x)
        return x


class Model(object):
    def __init__(self):
        embedding_dim = 256
        hidden_dim = 16
        num_layers = 1
        model_path = './resources/model0528.pth'
        confFilePath = './resources/charList.txt'
        max_features, charList = char_list(confFilePath)
        vocab_size = len(charList) + 1
        self.max_features, self.charList = char_list(confFilePath)
        self.model = LSTMModel(vocab_size, embedding_dim, hidden_dim, num_layers, dropout=0.5, )
        self.model.load_state_dict(torch.load(model_path))
        self.model.eval()
        self.model.to('npu')

    def process(self, arr):
        domain_list, ip_list = behavioral_analysis(arr)
        new_arr = [[i[3]] for i in arr]
        input = self.readfile(new_arr)
        input = input.to('npu')
        outputs = self.model(input)
        res = []
        for i, v in enumerate(outputs):
            tag = False
            domain = arr[i][3]
            src_ip = arr[i][1]
            dst_ip = arr[i][2]
            ip_pair = [src_ip, dst_ip]
            if domain in domain_list or ip_pair in ip_list:
                tag = True
            if v >= 0.9 or domain in domain_list:
                res.append(True)
            elif 0.99 > v >= 0.5 and tag:
                res.append(True)
            else:
                res.append(False)
        print(res)

    def readfile(self, arr):
        x = []
        for i in arr:
            x_data = []
            i = i[0].strip('\n').strip('\r').strip(' ')
            if i != '':
                _, i = extract_domain(i)
                for char in i:
                    try:
                        x_data.append(self.charList[char])
                    except:
                        print('unexpected char' + ':' + char)
                        x_data.append(0)
            x.append(x_data)
        max_len = max(len(sublist) for sublist in x)
        padded_list = [sublist + [0] * (max_len - len(sublist)) for sublist in x]
        x_tensor = torch.tensor(padded_list)
        return x_tensor


if __name__ == '__main__':
    # 包含 DNS 日志的 DataFrame，需包含 ['time', 'src_ip', 'dst_ip', 'domain', 'query_type', 'response_size']
    model = Model()
    predict_data = [['2025-05-29 14:09:58', '1.1.1.1', '2.2.2.2', '', ''],
                    ['2025-05-29 14:09:58', '1.1.1.1', '2.2.2.2', 'AAAAACDmQA.=auth.a.friendsakka.xyz', '', ''],
                    ['2025-05-29 14:09:58', '1.1.1.1', '2.2.2.2', 'www.baidu.com', '', ''],
                    ['2025-05-29 14:09:58', '1.1.1.1', '2.2.2.2', 'github.com', '', ''],
                    ['2025-05-29 14:09:58', '1.1.1.1', '2.2.2.2', 'http://epitom.co/fk/mbkp.ph', '', '']]
    # predict_data = [['2025-05-29 14:09:58', '1.1.1.1', '2.2.2.2', 'www.baidu.com', '', '']]
    model.process(predict_data)
