#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.svm import OneClassSVM
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
import matplotlib.pyplot as plt
import joblib


class EncoderLSTM(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, lstm_hidden_dim, num_layers):
        super(EncoderLSTM, self).__init__()

        self.encoder = nn.Sequential(
            nn.Linear(input_dim, 128),
            nn.ReLU(),
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Linear(64, 32),
            nn.ReLU(),
            nn.Linear(32, 16),
            nn.ReLU()
        )

        self.lstm = nn.LSTM(16, lstm_hidden_dim, num_layers, batch_first=True)
        self.fc = nn.Linear(lstm_hidden_dim, output_dim)

    def forward(self, x):
        x = self.encoder(x)
        x = x.unsqueeze(1)
        lstm_out, _ = self.lstm(x)
        x = lstm_out[:, -1, :]
        x = self.fc(x)
        return x


def main():
    file_path = 'dataset/gear_dataset.csv'
    data = pd.read_csv(file_path)

    data['Label'] = data['Label'].apply(lambda x: 1 if x > 1 else x)

    X = data[['DATA_0', 'DATA_1', 'DATA_2', 'DATA_3',
              'DATA_4', 'DATA_5', 'DATA_6', 'DATA_7']].values
    y = data['Label'].values

    scaler = StandardScaler()
    X = scaler.fit_transform(X)

    joblib.dump(scaler, 'scaler.pkl')

    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.3, random_state=42)

    X_train = torch.tensor(X_train, dtype=torch.float32)
    X_test = torch.tensor(X_test, dtype=torch.float32)
    y_train = torch.tensor(y_train, dtype=torch.long)
    y_test = torch.tensor(y_test, dtype=torch.long)

    input_dim = X_train.shape[1]
    hidden_dim = 64
    output_dim = 2
    lstm_hidden_dim = 64
    num_layers = 1

    model = EncoderLSTM(input_dim, hidden_dim, output_dim,
                        lstm_hidden_dim, num_layers)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    num_epochs = 100
    loss_values = []
    for epoch in range(num_epochs):
        model.train()
        optimizer.zero_grad()

        output = model(X_train)

        loss = criterion(output, y_train)
        loss.backward()
        optimizer.step()

        loss_values.append(loss.item())

        if (epoch + 1) % 10 == 0:
            print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.6f}')

    print("Training OneClassSVM anomaly detection model...")
    model.eval()
    with torch.no_grad():
        encoded_features = model.encoder(X_train).numpy()

    oc_svm = OneClassSVM(gamma=0.0005, nu=0.05)
    oc_svm.fit(encoded_features)

    joblib.dump(oc_svm, 'oc_svm_model.pkl')

    with torch.no_grad():
        encoded_test_features = model.encoder(X_test).numpy()

    y_pred = oc_svm.predict(encoded_test_features)

    y_pred = [0 if x == -1 else 1 for x in y_pred]

    print("Classification Report:")
    print(classification_report(y_test, y_pred))

    print("Accuracy:", accuracy_score(y_test, y_pred))

    print("Confusion Matrix:")
    print(confusion_matrix(y_test, y_pred))

    print("Saving model...")
    torch.save(model.state_dict(), 'lstm_encoder_model.pth')


if __name__ == "__main__":
    main()
