from PyQt6.QtCore import pyqtSignal
from matplotlib import pyplot as plt
from torch.utils.data import Dataset
import numpy as np
import torch


def normalization(df, col, minmax_scale):
    for i in col:
        arr = df[i]
        arr = np.array(arr)
        df[i] = minmax_scale.fit_transform(arr.reshape(len(arr), 1))
    return df


class LoadData(Dataset):
    def __init__(self, X, y):
        self.X = X
        self.y = y

    def __len__(self):
        return len(self.X)

    def __getitem__(self, index):
        X = torch.tensor(self.X.iloc[index])
        y = torch.tensor(self.y.iloc[index])
        return X, y


def train(
    model,
    optimizer,
    loss_fn,
    epochs,
    train_dataloader,
    device,
    X_dimension,
    train_data,
    process: pyqtSignal,
    process2: pyqtSignal,
):

    losses = []
    iter = 0

    for epoch in range(epochs):
        process.emit(f"epoch:----- {epoch+1} ------")
        for i, (X, y) in enumerate(train_dataloader):
            X, y = X.to(device).to(torch.float32), y.to(device).to(torch.float32)
            X = X.reshape(X.shape[0], 1, X_dimension)
            y_pred = model(X)
            loss = loss_fn(y_pred, y.long())

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i % 100 == 0:
                process.emit(f"loss: ===>>>: {(loss.item()):.6}")
                iter += 1
                losses.append(loss.item())
                process2.emit(
                    int(
                        ((epoch + 1) / epochs)
                        * ((i + 1) * len(X) / len(train_data))
                        * 100
                    )
                )
    process2.emit(100)
    return losses, iter


def test(
    model,
    loss_fn,
    test_dataloader,
    device,
    X_dimension,
    test_data,
    process: pyqtSignal,
    process2: pyqtSignal,
):
    positive = 0
    negative = 0
    with torch.no_grad():
        iter = 0
        loss_sum = 0
        for i, (X, y) in enumerate(test_dataloader):
            X, y = X.to(device).to(torch.float32), y.to(device).to(torch.float32)
            X = X.reshape(X.shape[0], 1, X_dimension)
            y_pred = model(X)
            loss = loss_fn(y_pred, y.long())
            loss_sum += loss.item()
            iter += 1
            for item in zip(y_pred, y):
                if torch.argmax(item[0]) == item[1]:
                    positive += 1
                else:
                    negative += 1
            process.emit(
                f"test acc ===>>>: {(positive / (positive + negative)) * 100:.6} %"
            )
            process2.emit(int(((i + 1) * len(X) / len(test_data)) * 100))
    process2.emit(100)
    acc = positive / (positive + negative)
    avg_loss = loss_sum / iter
    process2.emit(999)
    process.emit(f"Accuracy: {acc}")
    process.emit(f"Average Loss: {avg_loss}")


def loss_value_plot(losses, iter):
    plt.figure(figsize=(6.4, 4.8))
    plt.plot([i for i in range(1, iter + 1)], losses)
    plt.xlabel("Iterations (×100)")
    plt.ylabel("Loss Value")
