import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from PIL import Image
import glob
from torch.optim import Adam


# Data Handling
class HKDataset(Dataset):
    def __init__(self, images, annotations, transform=None):
        self.images = images
        self.annotations = annotations
        self.transform = transform

    def __len__(self):
        return len(self.images)

    def __getitem__(self, index):
        image = Image.open(self.images[index])
        annotation = Image.open(self.annotations[index])

        if self.transform:
            image = self.transform(image)
            annotation = self.transform(annotation)

        annotation[annotation > 0] = 1
        annotation = torch.squeeze(annotation).long()

        return image, annotation


# Neural Network Blocks
class ConvBlock(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(ConvBlock, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
        )

    def forward(self, x):
        return self.conv(x)


class AttentionBlock(nn.Module):
    def __init__(self, F_g, F_l, F_int):
        super(AttentionBlock, self).__init__()
        self.W_g = nn.Sequential(
            nn.Conv2d(F_g, F_int, kernel_size=1), nn.BatchNorm2d(F_int)
        )
        self.W_x = nn.Sequential(
            nn.Conv2d(F_l, F_int, kernel_size=1), nn.BatchNorm2d(F_int)
        )
        self.psi = nn.Sequential(
            nn.Conv2d(F_int, 1, kernel_size=1), nn.BatchNorm2d(1), nn.Sigmoid()
        )

    def forward(self, g, x):
        g1 = self.W_g(g)
        x1 = self.W_x(x)
        psi = self.psi(F.relu(g1 + x1))
        return x * psi


# DAFNet Architecture
class DAFNet(nn.Module):
    def __init__(self, in_channel=3, out_channel=2):
        super(DAFNet, self).__init__()
        self.encoder1 = ConvBlock(in_channel, 64)
        self.encoder2 = ConvBlock(64, 128)
        self.encoder3 = ConvBlock(128, 256)
        self.encoder4 = ConvBlock(256, 512)
        self.pool = nn.MaxPool2d(2, 2)

        self.att1 = AttentionBlock(256, 256, 128)

        self.up1 = nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2)
        self.decoder1 = ConvBlock(512, 256)
        self.decoder2 = ConvBlock(384, 128)
        self.decoder3 = ConvBlock(192, 64)
        self.final = nn.Conv2d(64, out_channel, kernel_size=1)

    def forward(self, x):
        e1 = self.encoder1(x)
        p1 = self.pool(e1)
        e2 = self.encoder2(p1)
        p2 = self.pool(e2)
        e3 = self.encoder3(p2)
        p3 = self.pool(e3)
        e4 = self.encoder4(p3)

        up1 = self.up1(e4)
        att1 = self.att1(up1, e3)
        d1 = self.decoder1(torch.cat([up1, att1], dim=1))

        up2 = F.interpolate(d1, scale_factor=2, mode="bilinear", align_corners=True)
        d2 = self.decoder2(torch.cat([up2, e2], dim=1))

        up3 = F.interpolate(d2, scale_factor=2, mode="bilinear", align_corners=True)
        d3 = self.decoder3(torch.cat([up3, e1], dim=1))

        out = self.final(d3)
        return out


# Main Training and Evaluation Loop
def fit(epoch, model, train_loader, test_loader, device, optimizer, loss_fn):
    model.train()
    train_loss, train_correct, train_total = 0, 0, 0

    for images, labels in train_loader:
        images, labels = images.to(device), labels.to(device)
        outputs = model(images)
        loss = loss_fn(outputs, labels)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        _, predicted = torch.max(outputs.data, 1)
        train_total += labels.nelement()  # Total number of pixel elements in the batch
        train_correct += (predicted == labels).sum().item()
        train_loss += loss.item()

    train_accuracy = 100 * train_correct / train_total

    model.eval()
    test_loss, test_correct, test_total = 0, 0, 0
    with torch.no_grad():
        for images, labels in test_loader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            loss = loss_fn(outputs, labels)

            _, predicted = torch.max(outputs.data, 1)
            test_total += (
                labels.nelement()
            )  # Total number of pixel elements in the batch
            test_correct += (predicted == labels).sum().item()
            test_loss += loss.item()

    test_accuracy = 100 * test_correct / test_total

    print(
        f"Epoch {epoch+1}: Train Loss: {train_loss:.4f}, Train Acc: {train_accuracy:.2f}%, Test Loss: {test_loss:.4f}, Test Acc: {test_accuracy:.2f}%"
    )


transform = transforms.Compose([transforms.Resize((512, 512)), transforms.ToTensor()])


def main():
    train_annotations = glob.glob("./train/*_matte.TIF")
    train_images = [x.replace("_matte.TIF", ".TIF") for x in train_annotations]

    test_annotations = glob.glob("./test/*_matte.TIF")
    test_images = [x.replace("_matte.TIF", ".TIF") for x in test_annotations]

    train_ds = HKDataset(train_images, train_annotations, transform)
    test_ds = HKDataset(test_images, test_annotations, transform)

    train_loader = DataLoader(train_ds, batch_size=8, shuffle=True)
    test_loader = DataLoader(test_ds, batch_size=8)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = DAFNet().to(device)
    optimizer = Adam(model.parameters(), lr=0.001)
    loss_fn = nn.CrossEntropyLoss()

    for epoch in range(10):
        fit(epoch, model, train_loader, test_loader, device, optimizer, loss_fn)

    torch.save(model.state_dict(), "./dafnet.pth")


if __name__ == "__main__":
    main()
