import numpy as np
import torch
import cv2
import os
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
import torch.nn as nn
from sklearn.metrics import f1_score

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

max_epoch = 2
learning_rate = 1e-3
batch_size = 32

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Resize((224, 224)),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

root = "data/PetImages"

def read_data():
    valid_path_list = []
    valid_label_list = []

    if not os.path.exists(os.path.join(root, "image_files.txt")):
        cat_path_list = os.listdir(os.path.join(root, "Cat"))
        dog_path_list = os.listdir(os.path.join(root, "Dog"))
        cat_label_list = [0] * len(cat_path_list)
        dog_label_list = [1] * len(dog_path_list)

        path_list = cat_path_list + dog_path_list
        label_list = cat_label_list + dog_label_list


        labels = ["Cat", "Dog"]

        for i, file in enumerate(path_list):
            file_path = os.path.join(root, labels[label_list[i]], file)
            img = cv2.imread(file_path, cv2.IMREAD_UNCHANGED)
            if img is None:
                print(file_path)
            else:
                valid_path_list.append(file_path)
                valid_label_list.append(label_list[i])

            print("{:d}/{:d}".format(i, len(path_list)))

        with open(os.path.join(root, "image_files.txt"), "w") as f:
            for file in valid_path_list:
                f.write(file + "\n")

        with open(os.path.join(root, "labels.txt"), "w") as f:
            for label in valid_label_list:
                f.write(str(label) + "\n")
    else:
        with open(os.path.join(root, "image_files.txt"), "r") as f:
            lines = f.readlines()
            for line in lines:
                valid_path_list.append(line.strip("\n"))

        with open(os.path.join(root, "labels.txt"), "r") as f:
            lines = f.readlines()
            for line in lines:
                valid_label_list.append(int(line.strip("\n")))

    return valid_path_list, valid_label_list

def split_data(path_list, label_list, train_ratio=0.8):
    train_count = int(len(label_list) * train_ratio)

    indices = np.arange(len(path_list))
    np.random.shuffle(indices)

    path_list = np.array(path_list)
    label_list = np.array(label_list)
    path_list = path_list[indices]
    label_list = label_list[indices]

    return path_list[:train_count], \
        label_list[:train_count], \
        path_list[train_count:], \
        label_list[train_count:],

# Dataset
class CatDogDataset(Dataset):
    def __init__(self, path_list, label_list):
        super(CatDogDataset, self).__init__()
        self.path_list = path_list
        self.label_list = label_list

    def __getitem__(self, index):
        image_path = self.path_list[index]
        img = cv2.imread(image_path, cv2.IMREAD_COLOR)
        img = transform(img)
        label = self.label_list[index]
        return img, torch.LongTensor([label])

    def __len__(self):
        return len(self.path_list)

# model
class MyModel(nn.Module):
    def __init__(self):
        super(MyModel, self).__init__()
        self.fc1 = nn.Linear(224 * 224 * 3, 200)
        self.fc2 = nn.Linear(200, 1)

    def forward(self, x):
        x = x.view(x.shape[0], -1)
        x = self.fc1(x) # 224 * 224 * 3 -> 200
        x = torch.relu(x) # 200
        x = self.fc2(x) # 200 -> 1
        x = torch.sigmoid(x)
        return x

def train(model, train_loader, val_loader):
    criterion = torch.nn.BCELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    best_f1 = 0
    for epoch in range(max_epoch):
        for step, batch in enumerate(train_loader):
            model.train()
            batch_image, batch_label = batch
            batch_image = batch_image.to(device)
            batch_label = batch_label.to(device)

            pred_label = model(batch_image)
            loss = criterion(pred_label, batch_label.to(torch.float32))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if step % 10 == 0:
                print("Epoch: {:d}, Step: {:d}, Loss: {:.4f}".format(epoch, step, loss.item()))

        f1 = test(model, val_loader)
        if f1 > best_f1:
            best_f1 = f1
            torch.save(model.state_dict(), "cat_dog_model.pt")

def test(model, val_loader):
    model.eval()
    golden_labels = []
    predicted_labels = []

    for step, batch in enumerate(val_loader):
        batch_image, batch_label = batch

        batch_image = batch_image.to(device)
        batch_label = batch_label.to(device)

        pred_label = model(batch_image)

        golden_labels.extend(batch_label.cpu().numpy().tolist())
        predicted_labels.extend(pred_label.cpu().numpy().tolist())

    f1 = f1_score(golden_labels, predicted_labels, average="macro")
    return f1

if __name__ == '__main__':
    valid_path_list, valid_label_list = read_data()
    train_path_list, train_label_list, val_path_list, val_label_list = split_data(valid_path_list, valid_label_list, 0.8)

    train_dataset = CatDogDataset(train_path_list, train_label_list)
    val_dataset = CatDogDataset(val_path_list, val_label_list)
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
    model = MyModel()

    train(model, train_loader, val_loader)

