import numpy as np
import torch
from torchvision.datasets import MNIST
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import cv2
import torch.nn as nn
from sklearn.metrics import f1_score

from nlp2024.linear_numpy import max_epoch
from nlp2024.rnn_say_hello import optimizer

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

batch_size = 32
learning_rate = 1e-3
max_epochs = 2

transforms = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1), (0.3)),
])

train_dataset = MNIST(
    "./data/mnist",
    train=True,
    transform=transforms,
    download=True,
)

test_dataset = MNIST(
    "./data/mnist",
    train=False,
    transform=transforms,
    download=True,
)

train_loader = DataLoader(
    dataset=train_dataset,
    batch_size=batch_size,
    shuffle=True
)

test_loader = DataLoader(
    dataset=test_dataset,
    batch_size=batch_size,
    shuffle=False
)

# for step, batch in enumerate(train_loader):
#     x, y = batch
#     image = x[0] # [1, 28, 28] torch tensor
#     image = image.numpy()
#     image = np.transpose(image, (1, 2, 0))
#     cv2.imshow("image", image)
#     cv2.waitKey(0)
#
#     break

class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.fc1 = nn.Linear(28 * 28, 400)
        self.fc2 = nn.Linear(400, 100)
        self.fc3 = nn.Linear(100, 10)
        self.dropout = torch.nn.Dropout(p=0.5)

    def forward(self, x):
        # x: [B, 1, 28, 28]
        x = x.view(x.shape[0], -1) # [B, 784]
        x = self.fc1(x)
        x = torch.relu(x)
        x = self.fc2(x)
        x = self.dropout(x)
        x = self.fc3(x) # [B, 10]
        return x

def train(model, dataloader):
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    model.train()

    for epoch in range(max_epochs):
        for step, batch in enumerate(dataloader):
            x, y = batch
            x = x.to(device)
            y = y.to(device)

            logits = model(x)
            # logits: [B, 10], y: [B]
            loss = criterion(logits, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if step % 100 == 0:
                print("Epoch: {:d}/{:d}, Step: {:d}, Loss:{:.4f}".format(epoch, max_epochs, step, loss.item()))

def test(model, dataloader):
    model.eval()
    predicted_y = []
    golden_y = []

    for step, batch in enumerate(dataloader):
        x, y = batch
        x = x.to(device)
        y = y.to(device)

        pred_y = model(x) # [B, 10]
        # logits -> index(label)
        _, pred_max_index = torch.max(pred_y, dim=1)
        # [B]
        predicted_y.extend(pred_max_index.cpu().numpy().tolist())
        golden_y.extend(y.cpu().numpy().tolist())

    f1 = f1_score(predicted_y, golden_y)
    return f1

if __name__ == '__main__':
    model = Model()
    model.to(device)

    train(model, train_loader)
    f1 = test(model, test_loader)

    print("f1: {:.4f}".format(f1))













