import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader

class SelfAttention(nn.Module):
    def __init__(self, in_channels):
        super(SelfAttention, self).__init__()
        self.query_conv = nn.Conv2d(in_channels, in_channels // 8, kernel_size=1)
        self.key_conv = nn.Conv2d(in_channels, in_channels // 8, kernel_size=1)
        self.value_conv = nn.Conv2d(in_channels, in_channels, kernel_size=1)
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, x):
        batch_size, channels, height, width = x.size()
        
        # Generate Query, Key, Value
        query = self.query_conv(x).view(batch_size, -1, height * width)  # (B, C//8, H*W)
        key = self.key_conv(x).view(batch_size, -1, height * width)      # (B, C//8, H*W)
        value = self.value_conv(x).view(batch_size, -1, height * width)  # (B, C, H*W)

        # Calculate attention
        attention = self.softmax(torch.bmm(query.permute(0, 2, 1), key))  # (B, H*W, H*W)
        out = torch.bmm(value, attention.permute(0, 2, 1))  # (B, C, H*W)

        out = out.view(batch_size, channels, height, width)  # Reshape to original size
        return out

class BasicBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride=1):
        super(BasicBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
        self.bn2 = nn.BatchNorm2d(out_channels)
        
        # Skip connection
        self.shortcut = nn.Sequential()
        if stride != 1 or in_channels != out_channels:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride),
                nn.BatchNorm2d(out_channels)
            )
        
        # Add self-attention layer
        self.attention = SelfAttention(out_channels)

    def forward(self, x):
        out = torch.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        out += self.shortcut(x)
        out = torch.relu(out)
        
        # Apply self-attention
        out = self.attention(out)
        return out


class ResNet18(nn.Module):
    def __init__(self, num_classes=102):
        super(ResNet18, self).__init__()
        self.in_channels = 64
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
        self.bn1 = nn.BatchNorm2d(64)
        self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(64, 2, stride=1)
        self.layer2 = self._make_layer(128, 2, stride=2)
        self.layer3 = self._make_layer(256, 2, stride=2)
        self.layer4 = self._make_layer(512, 2, stride=2)
        self.fc = nn.Linear(512, num_classes)

    def _make_layer(self, out_channels, num_blocks, stride):
        strides = [stride] + [1] * (num_blocks - 1)
        layers = []
        for stride in strides:
            layers.append(BasicBlock(self.in_channels, out_channels, stride))
            self.in_channels = out_channels
        return nn.Sequential(*layers)

    def forward(self, x):
        x = torch.relu(self.bn1(self.conv1(x)))
        x = self.pool(x)
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = torch.nn.functional.adaptive_avg_pool2d(x, (1, 1))
        x = torch.flatten(x, 1)
        x = self.fc(x)
        return x

# Hyperparameters
batch_size = 32
learning_rate = 0.001
EPOCH = 10

# Data preparation
transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])  # ImageNet normalization
])

train_dataset = datasets.Flowers102(root='/home/assist/data/Flowers102', split='train', transform=transform, download=True)
test_dataset = datasets.Flowers102(root='/home/assist/data/Flowers102', split='test', transform=transform, download=True)

train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

# Initialize custom ResNet-18 model
model = ResNet18(num_classes=102)

# Move model to GPU if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

# Loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# train
def train(epoch):
    model.train()
    running_loss = 0.0
    running_total = 0
    running_correct = 0
    for batch_idx, data in enumerate(train_loader, 0):
        inputs, target = data
        optimizer.zero_grad()

        # forward + backward + update
        outputs = model(inputs.to(device))
        loss = criterion(outputs, target.to(device))

        loss.backward()
        optimizer.step()

        # Accumulating loss for logging
        running_loss += loss.item()
        # Calculating accuracy
        _, predicted = torch.max(outputs.data, dim=1)
        running_total += inputs.shape[0]
        running_correct += (predicted == target.to(device)).sum().item()

        if batch_idx % 10 == 9:  # Print every 10 batches
            print('[Epoch %d, Batch %5d]: Loss: %.3f, Acc: %.2f %%' 
                  % (epoch + 1, batch_idx + 1, running_loss / 10, 100 * running_correct / running_total))
            running_loss = 0.0
            running_total = 0
            running_correct = 0

# test
def test(epoch):
    model.eval()
    correct = 0
    total = 0
    with torch.no_grad():  # Disable gradients for validation/testing
        for data in test_loader:
            images, labels = data
            outputs = model(images.to(device))
            _, predicted = torch.max(outputs.data, dim=1)
            total += labels.size(0)
            correct += (predicted == labels.to(device)).sum().item()
    
    accuracy = 100 * correct / total
    print('[Epoch %d]: Accuracy on test set: %.2f %%' % (epoch + 1, accuracy))
    return accuracy

# Training loop with accuracy logging
if __name__ == '__main__':
    acc_list_test = []
    for epoch in range(EPOCH):
        torch.cuda.empty_cache()
        train(epoch)
        acc_test = test(epoch)
        acc_list_test.append(acc_test)
