import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
import numpy as np
import json
import os
from sklearn.metrics import confusion_matrix, classification_report
import matplotlib.pyplot as plt

# Set the device for training
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"Using device: {device}")

# Data preprocessing and augmentation
data_transforms = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.RandomHorizontalFlip(),
    transforms.RandomRotation(10),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

# Load datasets
train_dataset = ImageFolder('../brain-tumor-mri-dataset/Training', transform=data_transforms)
train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)

val_dataset = ImageFolder('../brain-tumor-mri-dataset/Testing', transform=data_transforms)
val_loader = DataLoader(val_dataset, batch_size=16, shuffle=False)

print(f"Classes: {train_dataset.classes}")
print(f"Training samples: {len(train_dataset)}")
print(f"Validation samples: {len(val_dataset)}")

# Model Architecture
class TumorClassifier(nn.Module):
    def __init__(self, num_classes):
        super(TumorClassifier, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 16, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(16, 32, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
        self.classifier = nn.Sequential(
            nn.Linear(32 * 56 * 56, 128),
            nn.ReLU(inplace=True),
            nn.Linear(128, num_classes)
        )

    def forward(self, x):
        x = self.features(x)
        x = x.view(x.size(0), -1)
        x = self.classifier(x)
        return x

# Initialize model
model = TumorClassifier(num_classes=4)
model.to(device)

# Define loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# Training loop
num_epochs = 5
best_val_accuracy = 0.0

# Initialize lists to store training history
train_losses = []
val_losses = []
train_accuracies = []
val_accuracies = []

print("Starting training...")
for epoch in range(num_epochs):
    model.train()
    train_loss = 0.0
    correct = 0
    total = 0
    
    for batch_idx, (inputs, labels) in enumerate(train_loader):
        inputs, labels = inputs.to(device), labels.to(device)
        
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        
        train_loss += loss.item()
        _, predicted = torch.max(outputs, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()
    
    train_accuracy = correct / total
    train_losses.append(train_loss)
    train_accuracies.append(train_accuracy)
    
    # Validation
    model.eval()
    val_loss = 0.0
    correct = 0
    total = 0
    
    with torch.no_grad():
        for inputs, labels in val_loader:
            inputs, labels = inputs.to(device), labels.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            
            val_loss += loss.item()
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            
    val_loss /= len(val_loader)
    val_accuracy = correct / total
    val_losses.append(val_loss)
    val_accuracies.append(val_accuracy)
    
    print(f'Epoch [{epoch+1}/{num_epochs}], '
          f'Training Loss: {train_loss:.4f}, Training Accuracy: {train_accuracy:.2%}, '
          f'Validation Loss: {val_loss:.4f}, Validation Accuracy: {val_accuracy:.2%}')

    # Save the best model
    if val_accuracy > best_val_accuracy:
        best_val_accuracy = val_accuracy
        torch.save(model.state_dict(), 'best_model.pth')

print("Training completed!")

# Final evaluation with confusion matrix
model.eval()
all_predictions = []
all_labels = []

with torch.no_grad():
    for inputs, labels in val_loader:
        inputs, labels = inputs.to(device), labels.to(device)
        outputs = model(inputs)
        _, predicted = torch.max(outputs, 1)
        all_predictions.extend(predicted.cpu().numpy())
        all_labels.extend(labels.cpu().numpy())

# Calculate confusion matrix
cm = confusion_matrix(all_labels, all_predictions)
print("Confusion Matrix:")
print(cm)

# Calculate classification report
report = classification_report(all_labels, all_predictions, 
                             target_names=train_dataset.classes, 
                             output_dict=True)

print("\nClassification Report:")
print(classification_report(all_labels, all_predictions, 
                          target_names=train_dataset.classes))

# Prepare data for API
training_data = {
    "epochs": list(range(1, num_epochs + 1)),
    "train_loss": [loss / len(train_loader) for loss in train_losses],  # Average loss per epoch
    "val_loss": val_losses,
    "train_acc": train_accuracies,
    "val_acc": val_accuracies
}

confusion_matrix_data = {
    "labels": train_dataset.classes,
    "matrix": cm.tolist()
}

# Extract class metrics from classification report
class_metrics = []
for class_name in train_dataset.classes:
    if class_name in report:
        metrics = report[class_name]
        class_metrics.append({
            "class": class_name,
            "precision": round(metrics['precision'], 3),
            "recall": round(metrics['recall'], 3),
            "f1_score": round(metrics['f1-score'], 3)
        })

# Model information
model_info = {
    "architecture": "Custom CNN",
    "total_params": sum(p.numel() for p in model.parameters()),
    "input_size": "224x224x3",
    "num_classes": len(train_dataset.classes),
    "optimizer": "Adam",
    "learning_rate": 0.001,
    "batch_size": 16,
    "epochs": num_epochs,
    "best_val_accuracy": round(best_val_accuracy, 4)
}

# Save all data to JSON file
training_results = {
    "training_data": training_data,
    "confusion_matrix": confusion_matrix_data,
    "class_metrics": class_metrics,
    "model_info": model_info
}

# Create results directory if it doesn't exist
os.makedirs('training_results', exist_ok=True)

# Save to JSON file
with open('training_results/training_metrics.json', 'w') as f:
    json.dump(training_results, f, indent=2)

print(f"\nTraining results saved to training_results/training_metrics.json")
print(f"Best validation accuracy: {best_val_accuracy:.2%}")

# Save model
torch.save(model.state_dict(), 'training_results/best_model.pth')
print("Model saved to training_results/best_model.pth")
