
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import pandas as pd
from sklearn.model_selection import train_test_split
from string import printable
import numpy as np
import random
seed=40
torch.manual_seed(seed) # 为CPU设置随机种子
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子
torch.cuda.manual_seed_all(seed)  # if you are using multi-GPU，为所有GPU设置随机种子
np.random.seed(seed)  # Numpy module.
random.seed(seed)  # Python random module.	
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True

class URLDataset(Dataset):
    def __init__(self, urls, labels, max_len):
        self.urls = urls
        self.labels = labels
        self.max_len = max_len

    def __len__(self):
        return len(self.urls)

    def __getitem__(self, idx):
        url = self.urls[idx]
        label = self.labels[idx]
        url_int_tokens = [printable.index(x) + 1 for x in url if x in printable]
        url_int_tokens = url_int_tokens[:self.max_len] + [0] * (self.max_len - len(url_int_tokens))
        return torch.tensor(url_int_tokens, dtype=torch.long), torch.tensor(label, dtype=torch.float32)


class DualPooling(nn.Module):
    def __init__(self, pool_size):
        super(DualPooling, self).__init__()
        self.maxpool = nn.MaxPool1d(pool_size)
        self.avgpool = nn.AvgPool1d(pool_size)

    def forward(self, x):
        x_max = self.maxpool(x)
        x_avg = self.avgpool(x)
        x = torch.cat([x_max, x_avg], dim=1)
        return x


class MaliciousURLDetector(nn.Module):
    def __init__(self, max_len, vocab_size, emb_dim, lstm_hidden_dim, num_classes):
        super(MaliciousURLDetector, self).__init__()
        self.embedding = nn.Embedding(vocab_size, emb_dim)
        self.conv1 = nn.Conv1d(emb_dim, 256, kernel_size=5, padding='same')
        self.conv2 = nn.Conv1d(256, 256, kernel_size=6, padding='same')
        self.conv3 = nn.Conv1d(256, 256, kernel_size=7, padding='same')
        self.dualpool = DualPooling(4)
        self.dropout = nn.Dropout(0.25) 
        self.dropout1 = nn.Dropout(0.5)  
        self.maxpooling =nn.MaxPool1d(kernel_size=4)
        self.bilstm = nn.LSTM(1536, lstm_hidden_dim, batch_first=True, bidirectional=True)
        self.lstm = nn.LSTM(256, lstm_hidden_dim, batch_first=True, bidirectional=False)
        self.attention = nn.Linear(2* lstm_hidden_dim, 2* lstm_hidden_dim)
        self.fc = nn.Linear( lstm_hidden_dim, num_classes)

    def forward(self, x):
        x = self.embedding(x)
        x = self.dropout(x)
        x = x.permute(0, 2, 1)
        x = nn.functional.elu(self.conv1(x))
        x =self.maxpooling(x)
        x = self.dropout1(x)
        x = nn.functional.elu(self.conv2(x))
        x =self.maxpooling(x)
        x = self.dropout1(x)
        x = nn.functional.elu(self.conv3(x))
        x =self.maxpooling(x)
        x = self.dropout1(x)
        
       
        x = x.permute(0, 2, 1)

        lstm_out, _ = self.lstm(x)
        lstm_out = self.dropout1(lstm_out)
        #print(lstm_out.shape)
        lstm_out  = lstm_out[:, -1, :] 
        #print(lstm_out.shape)
        out = self.fc(lstm_out)
        
        return torch.sigmoid(out)


def train(model, dataloader, criterion, optimizer, device):
    model.train()
    running_loss = 0.0
    for inputs, labels in dataloader:
        inputs, labels = inputs.to(device), labels.to(device)
        labels = labels.unsqueeze(1)
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
    return running_loss / len(dataloader)


def evaluate(model, dataloader, criterion, device):
    model.eval()
    running_loss = 0.0
    correct = 0
    total = 0
    with torch.no_grad():
        for inputs, labels in dataloader:
            inputs, labels = inputs.to(device), labels.to(device)
            labels = labels.unsqueeze(1)
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            running_loss += loss.item()
            predicted = (outputs > 0.5).float()
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    return running_loss / len(dataloader), correct / total


if __name__ == '__main__':
    df = pd.read_csv('D:\\dataset.csv')
    urls = df['url'].tolist()
    labels = df['isMalicious'].tolist()
    max_len = 75
    vocab_size = len(printable) + 1
    emb_dim = 32
    lstm_hidden_dim = 64
    num_classes = 1
    batch_size = 32
    epochs = 10
    learning_rate = 0.0001

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    train_urls, test_urls, train_labels, test_labels = train_test_split(urls, labels, test_size=0.2, random_state=33)
    train_dataset = URLDataset(train_urls, train_labels, max_len)
    test_dataset = URLDataset(test_urls, test_labels, max_len)
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    test_dataloader = DataLoader(test_dataset, batch_size=batch_size)

    model = MaliciousURLDetector(max_len, vocab_size, emb_dim, lstm_hidden_dim, num_classes).to(device)
    criterion = nn.BCELoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    optimizer = optim.Adam(model.parameters(), lr=1e-4, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.0)  

    for epoch in range(epochs):
        train_loss = train(model, train_dataloader, criterion, optimizer, device)
        test_loss, test_acc = evaluate(model, test_dataloader, criterion, device)
        print(
            f'Epoch [{epoch + 1}/{epochs}], Train Loss: {train_loss:.4f}, Test Loss: {test_loss:.4f}, Test Acc: {test_acc:.4f}')

    torch.save(model.state_dict(), 'F:\\yuanshimodel.pth')
