from sklearn.utils import shuffle
import torch
from torch import nn
import random
import time
from collections import Counter
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
import torchtext.vocab as Vocab
import torch.utils.data as Data
import hiddenlayer as hl
from visdom import Visdom
import numpy as np

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


# device = "cpu"

def get_data():
    X = []
    y = []
    with open("../processed_data/whiteopcode.txt") as f:
        opcodes = f.readlines()
        for line in opcodes:
            X.append(line.split())
            y.append(1)
    with open("../processed_data/blackopcode.txt") as f:
        opcodes = f.readlines()
        for line in opcodes:
            X.append(line.split())
            y.append(0)
    return X, y


def get_vocab():
    with open("../processed_data/all_php.txt") as f:
        data = f.read().split()
    counter = Counter(data)
    return Vocab.Vocab(counter)

def get_dict() -> dict:
    with open("../processed_data/all_php.txt") as f:
        data = f.read().split()
    d = dict(Counter(data))
    for i, item in enumerate(d.items()):
        d[item[0]] = i
    return d

def preprocess(X, y, vocab):
    max_l = 1000  # 将opcode序列进行截断，使得长度变为6000
    def pad(x):
        return x[:max_l] if len(x) > max_l else x + [0] * (max_l - len(x))
    features = torch.tensor([pad([vocab.stoi[word] for word in words]) for words in X])
    # features = torch.tensor([pad([vocab[word] for word in words]) for words in X])
    labels = torch.tensor(y)
    return features, labels


def evaluate_accuracy(data_iter, net, device=None):
    if device is None and isinstance(net, torch.nn.Module):
        # 如果没指定device就使用net的device
        device = list(net.parameters())[0].device
    acc_sum, n = 0.0, 0
    with torch.no_grad():
        for X, y in data_iter:
            if isinstance(net, torch.nn.Module):
                net.eval()  # 评估模式, 这会关闭dropout
                acc_sum += (net(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item()
                net.train()  # 改回训练模式
            else:  # 自定义的模型, 3.13节之后不会用到, 不考虑GPU
                if 'is_training' in net.__code__.co_varnames:  # 如果有is_training这个参数
                    # 将is_training设置成False
                    acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item()
                else:
                    acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
            n += y.shape[0]
    return acc_sum / n


def train(train_iter, test_iter, net, loss, optimizer, device, num_epochs):
    # canvas = hl.Canvas()
    # history = hl.History()
    # viz = Visdom()
    # viz.line([[0.,0.]], [0], win='train', opts=dict(title='loss&acc', legend=['train_loss', 'test_acc']))

    net = net.to(device)
    print("training on ", device)
    batch_count = 0
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
        step = 0
        # for X, y in train_iter:
        for X, y in train_iter:
            X = X.to(device)
            y = y.to(device)
            y_hat = net(X)
            l = loss(y_hat, y)
            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            train_l_sum += l.cpu().item()
            train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()
            n += y.shape[0]
            batch_count += 1
            # if batch_count % 30 == 0:
            #     acc = evaluate_accuracy(test_iter, net)
            # # acc = accuracy_score(next(iter(test_iter))[1].detach().numpy(), y_hat.cpu().detech().numpy())
            # #     acc = (y_hat.argmax(dim=1) == next(iter(test_iter))[1]).float().sum().item() / y.shape[0]
            #     viz.line([[train_l_sum / batch_count, acc]], [batch_count], win='train', update='append')
            # time.sleep(0.1)

        test_acc = evaluate_accuracy(test_iter, net)
        print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec'
              % (epoch + 1, train_l_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start))


class BiRNN(nn.Module):
    def __init__(self, vocab, embedding_size, n_hiddens, n_layers):
        super(BiRNN, self).__init__()
        self.embedding = nn.Embedding(len(vocab), embedding_size)
        # 双向循环神经网络
        self.encoder = nn.LSTM(input_size=embedding_size,
                               hidden_size=n_hiddens,
                               num_layers=n_layers,
                               bidirectional=True)
        # 初始时间步和最终时间步的隐藏状态作为全连接层输入
        self.decoder = nn.Linear(4 * n_hiddens, 2)

    def forward(self, inputs):
        # inputs的形状是(批量大小, 词数)，因为LSTM需要将序列长度(seq_len)作为第一维，所以将输入转置后
        # 再提取词特征，输出形状为(词数, 批量大小, 词向量维度)
        embeddings = self.embedding(inputs.permute(1, 0))
        # rnn.LSTM只传入输入embeddings，因此只返回最后一层的隐藏层在各时间步的隐藏状态。
        # outputs形状是(词数, 批量大小, 2 * 隐藏单元个数)
        outputs, _ = self.encoder(embeddings)  # output, (h, c)
        # 连结初始时间步和最终时间步的隐藏状态作为全连接层输入。它的形状为
        # (批量大小, 4 * 隐藏单元个数)。
        encoding = torch.cat((outputs[0], outputs[-1]), -1)
        outs = self.decoder(encoding)
        return outs


def get_confusion_matrix(modelpath, y_test):
    net = torch.load(modelpath)
    y_pred = []
    for X, y in test_iter:
        X = X.to(device)
        y = y.to(device)
        y_hat = net(X)
        y_pred.append(y_hat.argmax(dim=1).tolist())
    y_pred = np.array(y_pred).reshape(-1)
    y_test = np.array(y_test)
    matrix = confusion_matrix(y_test, y_pred)
    print(matrix)
    TP = matrix[0][0]
    FN = matrix[0][1]
    FP = matrix[1][0]
    TN = matrix[1][1]
    FPR = FP / (FP + TN)
    ACC = (TP + TN) / len(y_test)
    PRE = TP / (TP + FN)
    REC = TP / (TP + FN)
    F1 = 2 * PRE * REC / (PRE + REC)
    print("误报率：%.4f，准确率：%.4f，精确率：%.4f，召回率：%.4f，F1值：%.4f" % (FPR, ACC, PRE, REC, F1))


data, labels = get_data()
vocab = get_vocab()
features, labels = preprocess(data, labels, vocab)
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.2, random_state=1)

batch_size = 4
# batch_size = 32
print_step = 20
train_iter = Data.DataLoader(Data.TensorDataset(X_train, y_train), batch_size, shuffle=True)
test_iter = Data.DataLoader(Data.TensorDataset(X_test, y_test), batch_size)

embedding_size, n_hiddens, n_layers = 128, 128, 2
net = BiRNN(vocab, embedding_size, n_hiddens, n_layers)

lr, n_epochs = 0.01, 3
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr)
loss = nn.CrossEntropyLoss()

if __name__ == '__main__':
    # train(train_iter, test_iter, net, loss, optimizer, device, n_epochs)
    # torch.save(net, 'model.pkl')

    get_confusion_matrix('model.pkl', y_test)
