import numpy as np
import pandas as pd
# from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
import torch
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset


class KddData(object):

    def __init__(self, batch_size, file_name, augmented=False, test_filename=None):
        self._encoder = {
            'label':    LabelEncoder()
        }
        self.batch_size = batch_size
        if (augmented == False):
            pass
        else:
            data = pd.read_csv(file_name)
            target = np.array(data['label'])
            features = np.array(data.drop('label', axis=1))
            X_train, y_train = self.__encode_data(features, target)
            self.train_dataset = TensorDataset(
                torch.from_numpy(X_train.astype(np.float32)),
                torch.from_numpy(y_train.astype(int))
            )
            test_data = pd.read_csv(test_filename)
            target = np.array(test_data['label'])
            # print(target.shape)
            features = np.array(test_data.drop('label', axis=1))
            X_test, y_test = self.__encode_data(features, target)
            self.test_dataset = TensorDataset(
                torch.from_numpy(X_test.astype(np.float32)),
                torch.from_numpy(y_test.astype(np.float32))
            )
            self.train_dataloader = DataLoader(self.train_dataset, self.batch_size, shuffle=True)
            self.test_dataloader = DataLoader(self.test_dataset, self.batch_size, shuffle=True)
            return
        data = pd.read_csv(file_name)
        target = np.array(data['label'])
        features = np.array(data.drop('label', axis=1))

        data_X, data_y = self.__encode_data(features, target)
        self.train_dataset, self.test_dataset = self.__split_data_to_tensor(data_X, data_y)
        self.train_dataloader = DataLoader(self.train_dataset, self.batch_size, shuffle=True)
        self.test_dataloader = DataLoader(self.test_dataset, self.batch_size, shuffle=True)

    """将数据中字符串部分转换为数字，并将输入的41维特征转换为8*8的矩阵"""
    def __encode_data(self, data_X, data_y):
        self._encoder['label'].fit(list(set(data_y)))
        data_X = np.pad(data_X, ((0, 0), (0, 100 - len(data_X[0]))), 'constant').reshape(-1, 1, 10, 10)
        data_y = self._encoder['label'].transform(data_y)
        return data_X, data_y

    """将数据拆分为训练集和测试集，并转换为TensorDataset对象"""
    def __split_data_to_tensor(self, data_X, data_y):
        X_train, X_test, y_train, y_test = train_test_split(data_X, data_y, test_size=0.3)
        train_dataset = TensorDataset(
            torch.from_numpy(X_train.astype(np.float32)),
            torch.from_numpy(y_train.astype(np.int))
        )
        test_dataset = TensorDataset(
            torch.from_numpy(X_test.astype(np.float32)),
            torch.from_numpy(y_test.astype(np.int))
        )
        return train_dataset, test_dataset

    """接受一个数组进行解码"""
    def decode(self, data, label=False):
        if not label:
            _data = list(data)
            _data[1] = self._encoder['protocal'].inverse_transform([_data[1]])[0]
            _data[2] = self._encoder['service'].inverse_transform([_data[2]])[0]
            _data[2] = self._encoder['flag'].inverse_transform([_data[3]])[0]
            return _data
        return self._encoder['label'].inverse_transform(data)
    
    def encode(self, data, label=False):
        if not label:
            _data = list(data)
            _data[1] = self._encoder['protocal'].transform([_data[1]])[0]
            _data[2] = self._encoder['service'].transform([_data[2]])[0]
            _data[3] = self._encoder['flag'].transform([_data[3]])[0]
            return _data
        return self._encoder['label'].transform([data])[0]


batch_size = 128
data_file = "data/train-expon-nagle-open.csv"
test_file = "data/test-lossRate-0.csv"
dataset = KddData(batch_size, file_name=data_file, augmented=True, test_filename=test_file)



import torch
from torch import nn
from Models import Transformer, Encoder, Decoder

class myModel(nn.Module):
    def __init__(self, vocab_size, src, d_model, N, heads, dropout, n_classes):
        super().__init__()
        self.encoder = Encoder(vocab_size, d_model, N, heads, dropout)
        self.linear1 = nn.Linear(d_model, 1)
        self.linear2 = nn.Linear(src, n_classes)
        self.fc = nn.Sigmoid()
    
    def forward(self, x):
        x = self.encoder(x, None)
        x = self.linear1(x)
        x = x.squeeze(-1)
        x = self.linear2(x)
        x = self.fc(x)
        return x


import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from tqdm import tqdm
import os

# 神经网络参数
learning_rate = 5e-3
num_epoches = 12
USE_GPU = torch.cuda.is_available()

print("cuda avail:", USE_GPU)

model = myModel(1500, 100, 8, 6, 8, 0.1, 1)


if os.path.isfile('model.pth'):
    model.load_state_dict(torch.load('model.pth'))

model.cuda()
model.eval()
num_correct = 0
test_num = 0
for i, data in enumerate(dataset.test_dataloader):
    x, label = data
    
    # Reshape x and convert to LongTensor
    x = x.reshape(x.size(0),  100)
    x = Variable(x)
    x = x.long()
    x = x.cuda()
    
    label = label.reshape(label.size(0), 1)
    label = label.float()
    label = label.cuda()
    
    out = model(x)
    
    pred = torch.round(out)
    num_correct += (pred == label).sum().item()
    test_num += label.size(0)

print(f"{num_correct}/{test_num}, {num_correct / test_num:.2f}")
    

# model.cuda()
# criterion = nn.BCELoss()
# optimizer = optim.SGD(model.parameters(), lr=learning_rate)

# for epoch in range(num_epoches):
#     running_loss = 0.0
#     running_acc = 0.0
#     pbar = tqdm(enumerate(dataset.train_dataloader, 1), total=len(dataset.train_dataloader))
#     pbar.set_description(f'epoch{epoch + 1}')
#     for i, data in pbar:
#         model.train()
#         x, label = data
        
#         # Reshape x and convert to LongTensor
#         x = x.reshape(x.size(0),  100)
#         x = Variable(x)
#         x = x.long()
#         x = x.cuda()
        
#         label = label.reshape(label.size(0), 1)
#         label = label.float()
#         label = label.cuda()
        
#         out = model(x)

#         loss = criterion(out, label)
        
#         running_loss += loss.item() * label.size(0)
#         pred = torch.round(out)
#         accuracy = (pred == label).float().mean()
#         num_correct = (pred == label).sum()
#         running_acc += num_correct.item()
        
#         pbar.set_postfix({
#             # 'b_acc': f'{accuracy:.2f}',
#             'r_acc': f'{running_acc / ((i + 1) * label.size(0)):.2f}',
#             'r_loss': f'{running_loss / ((i + 1) * label.size(0)):.2f}',
#         })
        
#         # Back Propagation
#         optimizer.zero_grad()
#         loss.backward()
#         optimizer.step()

# torch.save(model.state_dict(), 'model.pth')