import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torchvision
import torchvision.transforms as transforms
from conv_next import Conv_Next 
from resnet_50 import ResNet_50
import matplotlib.pyplot as plt
import numpy as np

class AdaBoost:
    def __init__(self, models, device = 'cpu'):
        self.models = models
        self.device = device
        self.num_models = len(models)
        self.model_weights = np.ones(self.num_models) / self.num_models
        self.model_errors = np.zeros(self.num_models)
        
    def compute_weights(self, val_loader):
        print("Computing model weights on validation set...")
        for idx, (model, name) in enumerate(self.models):
            model.eval()
            model.to(self.device)
            correct = 0
            total = 0
            
            with torch.no_grad():
                for data in val_loader:
                    images, labels = data
                    images, labels = images.to(self.device), labels.to(self.device)
                    outputs = model(images)
                    _, predictions = torch.max(outputs, 1)
                    total += labels.size(0)
                    correct += (labels == predictions).sum().item()
            
            accuracy = correct / total
            error = 1 - accuracy
            self.model_errors[idx] = error
            
            if error == 0:
                error = 1e-10
            elif error == 1:
                error = 1 - 1e-10
                
            weight = 0.5 * np.log((1 - error) / error)
            self.model_weights[idx] = weight
            print(f"{name} - Accuracy: {accuracy:.4f}, Error: {error:.4f}, Weight: {weight:.4f}")
            
        self.model_weights = self.model_weights / np.sum(self.model_weights)
        print(f"Normalized weights: {self.model_weights}")
        
    def predict(self, images):
        batch_size = images.size(0)
        num_classes = 100
        all_predictions = torch.zeros(batch_size, num_classes).to(self.device)
        for idx, (model, _) in enumerate(self.models):
            model.eval()
            with torch.no_grad():
                outputs = model(images)
                probs = F.softmax(outputs, dim = 1)
                all_predictions += self.model_weights[idx] * probs
                
        return all_predictions
    
    def evaluate(self, test_loader, classes):
        correct = 0
        total = 0
        all_predictions = []
        all_labels = []
        class_correct = list(0. for i in range(len(classes)))
        class_total = list(0. for i in range(len(classes)))
        with torch.no_grad():
            for data in test_loader:
                images, labels = data
                images, labels = images.to(self.device), labels.to(self.device)
                outputs = self.predict(images)
                _, predictions = torch.max(outputs, dim = 1)
                total += labels.size(0)
                correct += (labels == predictions).sum().item()
                all_predictions.append(predictions.cpu().numpy())
                all_labels.append(labels.cpu().numpy())
                c = (predictions == labels).squeeze()
                for i in range(labels.size(0)):
                    label = labels[i]
                    class_correct[label] += c[i].item()
                    class_total[label] += 1
                    
        accuracy = correct / total
        return accuracy, all_predictions, all_labels, class_correct, class_total
    
def main():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"Using device: {device}")
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    test_dataset = torchvision.datasets.CIFAR100(root = './data', train = False, download = True, transform = transform_test)
    val_size = int(0.5 * len(test_dataset))
    test_size = len(test_dataset) - val_size
    val_dataset, test_dataset_final = torch.utils.data.random_split(test_dataset, [val_size, test_size])
    val_loader = DataLoader(val_dataset, batch_size = 32, shuffle = False, num_workers = 2) 
    test_loader = DataLoader(test_dataset_final, batch_size = 32, shuffle = False, num_workers = 2) 
    
    classes = test_dataset.classes
    
    resnet = ResNet_50(len(classes))
    resnet.load_state_dict(torch.load('./resnet.pth', map_location = device))
    resnet.to(device)
    
    conv_next = Conv_Next(len(classes))
    conv_next.load_state_dict(torch.load('./conv_next.pth', map_location = device))
    conv_next.to(device)
    
    models = [
        (resnet, "ResNet-50"),
        (conv_next, "Conv-Next")
    ]
    
    adaboost = AdaBoost(models, device)
    
    adaboost.compute_weights(val_loader)
    
    accuracy, all_predictions, all_labels, class_correct, class_total = adaboost.evaluate(test_loader, classes)
    
    print(f"Ensemble Test Accuracy: {accuracy * 100:.2f}%")
    
    class_accuracy = []
    for i in range(len(classes)):
        if class_total[i] > 0:
            class_accuracy.append((classes[i], ((100 * class_correct[i]) / class_total[i])))
    for idx, (class_name, class_acc) in enumerate(class_accuracy):
        print(f"{idx+1}. {class_name}: {class_acc:.2f}%")
        
    plt.figure(figsize=(12, 6))
    plt.subplot(1, 2, 1)
    model_names = [name for _, name in models]
    plt.bar(model_names, adaboost.model_weights)
    plt.title('Model Weights in Ensemble')
    plt.ylabel('Weight')
    plt.ylim(0, 1)
    
    class_accuracy.sort(key = lambda x: x[1], reverse = True)
    plt.subplot(1, 2, 2)
    top_20_classes = class_accuracy[:20]
    class_names_20 = [c[0] for c in top_20_classes]
    accuracies_20 = [c[1] for c in top_20_classes]
    plt.bar(range(20), accuracies_20)
    plt.xticks(range(20), class_names_20, rotation=45, ha='right')
    plt.title('Top 20 Classes by Accuracy')
    plt.ylabel('Accuracy (%)')
    plt.tight_layout()
    plt.savefig('adaboost_ensemble_results.png')
    plt.show()   
    
    ensemble_info = {
        'model_weights': adaboost.model_weights,
        'model_errors': adaboost.model_errors,
        'ensemble_accuracy': accuracy
    }
    np.save('ensemble_info.npy', ensemble_info)
    print("\nEnsemble information saved to 'ensemble_info.npy'") 
                
if __name__ == "__main__":
    main()