import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import time
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score, classification_report

# reading features list
with open("../data/kddcup.names", 'r') as f:
    print(f.read())

cols = """duration,
protocol_type,
service,
flag,
src_bytes,
dst_bytes,
land,
wrong_fragment,
urgent,
hot,
num_failed_logins,
logged_in,
num_compromised,
root_shell,
su_attempted,
num_root,
num_file_creations,
num_shells,
num_access_files,
num_outbound_cmds,
is_host_login,
is_guest_login,
count,
srv_count,
serror_rate,
srv_serror_rate,
rerror_rate,
srv_rerror_rate,
same_srv_rate,
diff_srv_rate,
srv_diff_host_rate,
dst_host_count,
dst_host_srv_count,
dst_host_same_srv_rate,
dst_host_diff_srv_rate,
dst_host_same_src_port_rate,
dst_host_srv_diff_host_rate,
dst_host_serror_rate,
dst_host_srv_serror_rate,
dst_host_rerror_rate,
dst_host_srv_rerror_rate"""

columns = []
for c in cols.split(',\n'):
    if c.strip():
        columns.append(c.strip())

columns.append('target')
print(len(columns))

with open("../data/training_attack_types", 'r') as f:
    print(f.read())

# Updated attack types dictionary with all known attack types
attacks_types = {
    'normal': 'normal',
    'back': 'dos',
    'buffer_overflow': 'u2r',
    'ftp_write': 'r2l',
    'guess_passwd': 'r2l',
    'imap': 'r2l',
    'ipsweep': 'probe',
    'land': 'dos',
    'loadmodule': 'u2r',
    'multihop': 'r2l',
    'neptune': 'dos',
    'nmap': 'probe',
    'perl': 'u2r',
    'phf': 'r2l',
    'pod': 'dos',
    'portsweep': 'probe',
    'rootkit': 'u2r',
    'satan': 'probe',
    'smurf': 'dos',
    'spy': 'r2l',
    'teardrop': 'dos',
    'warezclient': 'r2l',
    'warezmaster': 'r2l',
    'mscan': 'probe',
    'saint': 'probe',
    'apache2': 'dos',
    'udpstorm': 'dos',
    'processtable': 'dos',
    'httptunnel': 'u2r',
    'ps': 'u2r',
    'sqlattack': 'u2r',
    'xterm': 'u2r',
    'named': 'r2l',
    'sendmail': 'r2l',
    'snmpgetattack': 'r2l',
    'snmpguess': 'r2l',
    'worm': 'dos',
    'xlock': 'r2l',
    'xsnoop': 'r2l',
    'mailbomb': 'dos'  # 新增的邮件炸弹攻击类型
}

path = "../data/corrected.gz"
df = pd.read_csv(path, names=columns)


# Adding Attack Type column with error handling
def map_attack_type(attack):
    attack = attack.strip('.')  # Remove trailing dot if present
    # 处理已知攻击类型
    if attack in attacks_types:
        return attacks_types[attack]

    # 处理未知攻击类型
    print(f"Warning: Unknown attack type '{attack}' found, mapping to 'unknown'")
    return 'unknown'


df['Attack Type'] = df['target'].apply(map_attack_type)
df.head()

df.shape

df.isnull().sum()

# Finding categorical features
num_cols = df._get_numeric_data().columns

cate_cols = list(set(df.columns) - set(num_cols))
cate_cols.remove('target')
cate_cols.remove('Attack Type')

# drop columns with NaN
df = df.dropna(axis='columns')

# keep columns where there are more than 1 unique values and are numeric
ndf = df[[col for col in df.columns if df[col].nunique() > 1 and pd.api.types.is_numeric_dtype(df[col])]]

# Now calculate the correlation matrix
corr = ndf.corr()

plt.figure(figsize=(15, 12))
sns.heatmap(corr)
plt.show()

# Drop highly correlated features
df.drop('num_root', axis=1, inplace=True)
df.drop('srv_serror_rate', axis=1, inplace=True)
df.drop('srv_rerror_rate', axis=1, inplace=True)
df.drop('dst_host_srv_serror_rate', axis=1, inplace=True)
df.drop('dst_host_serror_rate', axis=1, inplace=True)
df.drop('dst_host_rerror_rate', axis=1, inplace=True)
df.drop('dst_host_srv_rerror_rate', axis=1, inplace=True)
df.drop('dst_host_same_srv_rate', axis=1, inplace=True)

# protocol_type feature mapping
pmap = {'icmp': 0, 'tcp': 1, 'udp': 2}
df['protocol_type'] = df['protocol_type'].map(pmap)

# flag feature mapping
fmap = {'SF': 0, 'S0': 1, 'REJ': 2, 'RSTR': 3, 'RSTO': 4, 'SH': 5, 'S1': 6, 'S2': 7, 'RSTOS0': 8, 'S3': 9, 'OTH': 10}
df['flag'] = df['flag'].map(fmap)

df.drop('service', axis=1, inplace=True)

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler

# Splitting the dataset
df = df.drop(['target'], axis=1)
print(df.shape)

# Target variable and train set
y = df[['Attack Type']]
X = df.drop(['Attack Type'], axis=1)

sc = MinMaxScaler()
X = sc.fit_transform(X)

# Split test and train data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
print(X_train.shape, X_test.shape)
print(y_train.shape, y_test.shape)


# Function to calculate all metrics
def evaluate_model(model, X_train, y_train, X_test, y_test):
    # Training
    start_time = time.time()
    model.fit(X_train, y_train.values.ravel())
    train_time = time.time() - start_time

    # Predictions
    start_time = time.time()
    y_train_pred = model.predict(X_train)
    y_test_pred = model.predict(X_test)
    test_time = time.time() - start_time

    # Calculate metrics
    metrics = {
        'train_time': train_time,
        'test_time': test_time,
        'train_accuracy': accuracy_score(y_train, y_train_pred),
        'test_accuracy': accuracy_score(y_test, y_test_pred),
        'precision': precision_score(y_test, y_test_pred, average='weighted', zero_division=0),
        'recall': recall_score(y_test, y_test_pred, average='weighted', zero_division=0),
        'f1': f1_score(y_test, y_test_pred, average='weighted', zero_division=0),
        'classification_report': classification_report(y_test, y_test_pred, zero_division=0)
    }
    return metrics


# Initialize models
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC

models = {
    'Naive Bayes': GaussianNB(),
    'Decision Tree': DecisionTreeClassifier(criterion="entropy", max_depth=4),
    'Random Forest': RandomForestClassifier(n_estimators=30),
    'Logistic Regression': LogisticRegression(max_iter=1000),
    'SVM': SVC(kernel='linear', max_iter=1000)
}

# Evaluate all models
results = {}
for name, model in models.items():
    print(f"\nEvaluating {name}...")
    results[name] = evaluate_model(model, X_train, y_train, X_test, y_test)
    print(f"Completed {name} evaluation")

# Print results
print("\n\n=== Model Evaluation Results ===")
for name, metrics in results.items():
    print(f"\n{name} Results:")
    print(f"Training Time: {metrics['train_time']:.4f}s")
    print(f"Testing Time: {metrics['test_time']:.4f}s")
    print(f"Train Accuracy: {metrics['train_accuracy']:.4f}")
    print(f"Test Accuracy: {metrics['test_accuracy']:.4f}")
    print(f"Precision: {metrics['precision']:.4f}")
    print(f"Recall: {metrics['recall']:.4f}")
    print(f"F1 Score: {metrics['f1']:.4f}")
    print("\nClassification Report:")
    print(metrics['classification_report'])

# Visualization of comparison
plt.figure(figsize=(20, 15))

# Training Time comparison
plt.subplot(3, 2, 1)
names = list(results.keys())
values = [results[name]['train_time'] for name in names]
plt.bar(names, values)
plt.title('Training Time Comparison')
plt.ylabel('Seconds')
plt.xticks(rotation=45)

# Testing Time comparison
plt.subplot(3, 2, 2)
values = [results[name]['test_time'] for name in names]
plt.bar(names, values)
plt.title('Testing Time Comparison')
plt.ylabel('Seconds')
plt.xticks(rotation=45)

# Training Accuracy comparison
plt.subplot(3, 2, 3)
values = [results[name]['train_accuracy'] for name in names]
plt.bar(names, values)
plt.title('Training Accuracy Comparison')
plt.ylim(0.7, 1.0)
plt.xticks(rotation=45)

# Testing Accuracy comparison
plt.subplot(3, 2, 4)
values = [results[name]['test_accuracy'] for name in names]
plt.bar(names, values)
plt.title('Testing Accuracy Comparison')
plt.ylim(0.7, 1.0)
plt.xticks(rotation=45)

# Precision comparison
plt.subplot(3, 2, 5)
values = [results[name]['precision'] for name in names]
plt.bar(names, values)
plt.title('Precision Comparison (weighted)')
plt.ylim(0.7, 1.0)
plt.xticks(rotation=45)

# F1 Score comparison
plt.subplot(3, 2, 6)
values = [results[name]['f1'] for name in names]
plt.bar(names, values)
plt.title('F1 Score Comparison (weighted)')
plt.ylim(0.7, 1.0)
plt.xticks(rotation=45)

plt.tight_layout()
plt.show()

# Print summary table
print("\nModel Comparison Summary:")
print("{:<20} {:<15} {:<15} {:<15} {:<15} {:<15} {:<15}".format(
    'Model', 'Train Time', 'Test Time', 'Train Acc', 'Test Acc', 'Precision', 'F1 Score'))
print("-" * 110)
for name in results:
    print("{:<20} {:<15.4f} {:<15.4f} {:<15.4f} {:<15.4f} {:<15.4f} {:<15.4f}".format(
        name,
        results[name]['train_time'],
        results[name]['test_time'],
        results[name]['train_accuracy'],
        results[name]['test_accuracy'],
        results[name]['precision'],
        results[name]['f1']))