import random
import time
from collections import OrderedDict

import numpy as np
import torch
from math import ceil
from torch import nn, optim
from torch.utils.data import DataLoader, random_split, Subset
from torchvision import datasets, transforms


def model_size(model):
    total_params = sum(p.numel() for p in model.parameters())
    total_bytes = total_params * 4
    return total_bytes


def generate_client_features(num_clients):
    client_data_distribution = [random.random() for _ in range(num_clients)]
    client_compute_power = [random.random() for _ in range(num_clients)]
    client_communication_bandwidth = [random.random() for _ in range(num_clients)]
    return client_data_distribution, client_compute_power, client_communication_bandwidth


def GMBS(client_data_distribution, client_compute_power, client_communication_bandwidth, total_blocks):
    alpha = 0.5
    beta = 0.3
    gamma = 0.2

    norm_data_distribution = np.array(client_data_distribution)
    norm_compute_power = np.array(client_compute_power)
    norm_communication_bandwidth = np.array(client_communication_bandwidth)

    score = (alpha * norm_data_distribution +
             beta * norm_compute_power +
             gamma * norm_communication_bandwidth)

    block_allocation_ratio = score * total_blocks / sum(score)

    return block_allocation_ratio


transform_mnist = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))
])

transform_cifar_gray = transforms.Compose([
    transforms.Grayscale(num_output_channels=1),
    transforms.RandomHorizontalFlip(),
    transforms.RandomCrop(32, padding=4),
    transforms.ToTensor(),
    transforms.Normalize((0.5,), (0.5,))
])


def calculate_communication_energy(model, rounds, energy_per_byte=1e-9):
    total_params = sum(p.numel() for p in model.parameters())
    total_bytes = total_params * 4
    total_energy = total_bytes * rounds * energy_per_byte
    return total_energy


def split_iid(dataset, num_clients):
    data_size = len(dataset) // num_clients
    return random_split(dataset, [data_size] * num_clients)


def split_non_iid(dataset, num_clients, num_shards=200):
    indices = np.arange(len(dataset))
    np.random.shuffle(indices)
    shards_per_client = num_shards // num_clients
    client_indices = [indices[i * shards_per_client:(i + 1) * shards_per_client] for i in range(num_clients)]
    return [Subset(dataset, idx) for idx in client_indices]


def add_noise(dataset, noise_type='gaussian', noise_level=0.1):
    if noise_type == 'gaussian':
        data_tensor = torch.tensor(dataset.data, dtype=torch.float)
        noise = torch.randn_like(data_tensor) * noise_level
        dataset.data = (data_tensor + noise).clamp(0, 255).byte().numpy()
    return dataset


def load_dataset(dataset_name, iid=True, add_noise_flag=False, noise_type='gaussian', noise_level=0.1):
    if dataset_name == 'mnist':
        dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform_mnist)
    elif dataset_name == 'fashion_mnist':
        dataset = datasets.FashionMNIST(root='./data', train=True, download=True, transform=transform_mnist)
    elif dataset_name == 'cifar10':
        dataset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_cifar_gray)
    elif dataset_name == 'cifar100':
        dataset = datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_cifar_gray)
    else:
        raise ValueError(f"Unknown dataset: {dataset_name}")

    if add_noise_flag:
        dataset = add_noise(dataset, noise_type, noise_level)

    if iid:
        client_datasets = split_iid(dataset, num_clients=5)
    else:
        client_datasets = split_non_iid(dataset, num_clients=5)

    client_train_loaders = [DataLoader(client_dataset, batch_size=32, shuffle=True) for client_dataset in
                            client_datasets]
    return client_train_loaders


class ALIC:
    def __init__(self, clients, importance_threshold=0.1):
        self.clients = clients
        self.importance_threshold = importance_threshold

    def calculate_importance(self, client):
        importance = 0.0
        for param in client.model.parameters():
            if param.grad is not None:
                importance += param.grad.abs().mean().item()
        return importance

    def select_clients(self):
        client_importances = [(i, self.calculate_importance(client)) for i, client in enumerate(self.clients)]
        selected_clients = [i for i, importance in client_importances if importance > self.importance_threshold]
        if not selected_clients:
            selected_clients = [i for i, _ in client_importances]
        return selected_clients


class SNN(nn.Module):
    def __init__(self, input_size, hidden_size1, hidden_size2, output_size):
        super(SNN, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size1)
        self.fc2 = nn.Linear(hidden_size1, hidden_size2)
        self.fc3 = nn.Linear(hidden_size2, output_size)

    def forward(self, x):
        x = x.view(x.size(0), -1)
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        x = torch.sigmoid(self.fc3(x))
        return x


def init_weights(m):
    if isinstance(m, nn.Linear):
        nn.init.xavier_uniform_(m.weight)
        nn.init.zeros_(m.bias)


class Client:
    def __init__(self, client_id, model, train_loader, device='cuda', local_epochs=1):
        self.client_id = client_id
        self.gmbs_rate = 0.5
        self.model = model.to(device)
        self.model.apply(init_weights)
        self.train_loader = train_loader
        self.device = device
        self.local_epochs = local_epochs
        self.optimizer = optim.SGD(self.model.parameters(), lr=0.01, momentum=0.9)
        self.criterion = nn.CrossEntropyLoss()

    def train(self, global_model, local_epochs):
        self.model.load_state_dict(global_model.state_dict())
        self.model.train()

        epoch_loss = 0.0
        correct = 0
        total = 0
        lambda_kd = 0.5
        loss2 = 0.0
        for epoch in range(local_epochs):
            for data, target in self.train_loader:
                data, target = data.to(self.device), target.to(self.device)

                if target.max() >= 10:
                    target = target % 10

                self.optimizer.zero_grad()

                output = self.model(data.view(data.size(0), -1))
                loss = self.criterion(output, target)

                with torch.no_grad():
                    teacher_output = global_model(data.view(data.size(0), -1))
                kd_loss = lambda_kd * nn.functional.mse_loss(output, teacher_output)

                total_loss = loss + kd_loss
                total_loss.backward()
                nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)
                self.optimizer.step()

                epoch_loss += total_loss.item()
                loss2 = total_loss.item() / len(self.train_loader)
                _, predicted = torch.max(output, 1)
                total += target.size(0)
                correct += (predicted == target).sum().item()

            accuracy = 100 * correct / total
            print(f"Epoch {epoch + 1}: Loss = {loss2:.4f}, Accuracy = {accuracy:.2f}%")

        return loss2, accuracy


class FederatedLearning:
    def __init__(self, server_model, clients, device='cuda'):
        self.server_model = server_model
        self.clients = clients
        self.device = device
        self.criterion = nn.CrossEntropyLoss()
        self.optimizer = optim.SGD(self.server_model.parameters(), lr=0.001, momentum=0.9)
        self.alic = ALIC(clients)
        self.gmbs_rate = 0.5
        self.total_communication_cost = 0
        self.communication_rounds = 0
        self.total_time_cost = 0
        self.best_accuracy = 0.0
        self.best_model_state = None

        self.client_data_distribution, self.client_compute_power, self.client_communication_bandwidth = generate_client_features(
            len(clients))

    def adjust_gmbs_rate(self, round_num, total_rounds):
        self.gmbs_rate = 0.5 * (1 - round_num / total_rounds)
        for client in self.clients:
            client.gmbs_rate = self.gmbs_rate

    def train(self, rounds, local_epochs=1):
        total_blocks = 10

        for round_num in range(rounds):
            print(f"Round {round_num + 1}/{rounds}")
            start_time = time.time()

            block_allocation = GMBS(self.client_data_distribution,
                                    self.client_compute_power,
                                    self.client_communication_bandwidth,
                                    total_blocks)

            selected_clients = [
                self.clients[i] for i in range(len(block_allocation))
                if ceil(block_allocation[i]) > 0
            ]

            client_losses, client_accuracies = [], []
            for client in selected_clients:
                print(f"Training client {client.client_id}")
                loss, accuracy = client.train(self.server_model, local_epochs)
                client_losses.append(loss)
                client_accuracies.append(accuracy)

            aggregated_params = self.aggregate_models([client.model for client in selected_clients])
            self.update_server_model(aggregated_params)

            self.adjust_gmbs_rate(round_num, rounds)

            communication_cost = model_size(self.server_model) * len(selected_clients)
            self.total_communication_cost += communication_cost
            self.communication_rounds += 1
            end_time = time.time()
            self.total_time_cost += (end_time - start_time)

            avg_accuracy = sum(client_accuracies) / len(client_accuracies)
            if avg_accuracy > self.best_accuracy:
                self.best_accuracy = avg_accuracy
                self.best_model_state = self.server_model.state_dict()

            print(f"Communication Cost after Round {round_num + 1}: {communication_cost / (1024 * 1024):.2f} MB")
            print(f"Total Communication Cost: {self.total_communication_cost / (1024 * 1024):.2f} MB")

        print(f"Total communication rounds: {self.communication_rounds}")
        print(f"Total time cost: {self.total_time_cost:.2f} seconds")
        print(f"Total communication cost: {self.total_communication_cost / (1024 * 1024):.2f} MB")

        # Save the best model
        if self.best_model_state:
            torch.save(self.best_model_state, 'best_model.pth')
            print(f"Best model saved with accuracy: {self.best_accuracy:.2f}%")

    def aggregate_models(self, models):
        aggregated_params = OrderedDict()
        for key in models[0].state_dict().keys():
            aggregated_params[key] = torch.mean(torch.stack([model.state_dict()[key] for model in models]), dim=0)
        return aggregated_params

    def update_server_model(self, aggregated_params):
        self.server_model.load_state_dict(aggregated_params)


if __name__ == '__main__':
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    dataset = 'cifar10'

    if dataset == 'mnist':
        client_train_loaders = load_dataset('mnist', iid=True, add_noise_flag=False, noise_type='gaussian',
                                            noise_level=0.1)
        input_size = 784
    elif dataset == 'fashion_mnist':
        client_train_loaders = load_dataset('fashion_mnist', iid=True, add_noise_flag=False, noise_type='gaussian',
                                            noise_level=0.1)
        input_size = 784
    elif dataset == 'cifar10':
        client_train_loaders = load_dataset('cifar10', iid=False, add_noise_flag=False, noise_type='gaussian',
                                            noise_level=0.1)
        input_size = 1024
    elif dataset == 'cifar100':
        client_train_loaders = load_dataset('cifar100', iid=True, add_noise_flag=False, noise_type='gaussian',
                                            noise_level=0.1)
        input_size = 1024
    else:
        raise ValueError(f"Unknown dataset: {dataset}")

    server_model = SNN(input_size=input_size, hidden_size1=512, hidden_size2=256, output_size=10).to(device)
    clients = [
        Client(client_id=i, model=SNN(input_size=input_size, hidden_size1=512, hidden_size2=256, output_size=10),
               train_loader=client_train_loaders[i], device=device) for i in range(5)
    ]
    fl = FederatedLearning(server_model=server_model, clients=clients, device=device)
    fl.train(rounds=50, local_epochs=50)
