import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader,random_split
from torchvision import datasets, transforms
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
import numpy as np
import matplotlib.pyplot as plt
from multiprocessing import Manager, Lock, get_context
import multiprocessing

# 全局定义，要保证n=2f+3，才能保证gama达到最大值
sum_epochs = 30
node_num = 11 #25  # 总节点数
f = 4 #11  # 坏节点个数
# gamma_m的计算需要的参数
p = 2.00        # 范数的指数
q = 2.00        # 用于Krum和GeoMed算法的调整因子
delta = 0.03  # 小偏移量


# 定义模型
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 32, 3, 1)
        self.conv2 = nn.Conv2d(32, 64, 3, 1)
        self.dropout1 = nn.Dropout2d(0.25)
        self.dropout2 = nn.Dropout2d(0.5)
        self.fc1 = nn.Linear(9216, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = self.conv1(x)
        x = torch.relu(x)
        x = self.conv2(x)
        x = torch.relu(x)
        x = torch.max_pool2d(x, 2)
        x = self.dropout1(x)
        x = torch.flatten(x, 1)
        x = self.fc1(x)
        x = torch.relu(x)
        x = self.dropout2(x)
        x = self.fc2(x)
        return torch.log_softmax(x, dim=1)

# 数据预处理和加载
def setup_data_loader(batch_size, rank, world_size):
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    dataset = datasets.MNIST('./data', train=True, download=True, transform=transform)
    train_size = int(0.8 * len(dataset))
    test_size = len(dataset) - train_size
    train_dataset, test_dataset = random_split(dataset, [train_size, test_size])
    
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, num_replicas=world_size, rank=rank)
    train_loader = DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler)
    
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
    return train_loader, test_loader

# 初始化过程
def setup(rank, world_size):
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '12356'
    dist.init_process_group("gloo", rank=rank, world_size=world_size)
    torch.manual_seed(0)

# 清理过程
def cleanup():
    dist.destroy_process_group()

class CustomGradientAggregator:  
    def __init__(self, model, num_workers, k=node_num-f-2):  
        self.model = model  
        self.num_workers = num_workers  
        self.k = k  
        self.gradients = {}  
    
    def collect_gradients(self, gradients_from_gpus):  
        for gpu_idx, grad_tensor in gradients_from_gpus.items():  
            if grad_tensor is not None:  
                grad_tensor = grad_tensor.detach().cpu().numpy()  # 转换为 numpy 数组
                self.gradients[gpu_idx] = grad_tensor  
    
    def calculate_distances(self):  
        distances = {}  
        for i in range(self.num_workers):  
            for j in range(self.num_workers):  
                if i != j:  
                    distance = np.linalg.norm(self.gradients[i] - self.gradients[j], ord=2) 
                    distances[(i, j)] = distance  
        return distances  
    
    def find_k_nearest_neighbors(self, distances):  
        neighbors = {}  
        for i in range(self.num_workers):  
            sorted_distances = sorted([(j, distances[(i, j)]) for j in range(self.num_workers) if i != j], key=lambda x: x[1])  
            neighbors[i] = [j for j, _ in sorted_distances[:self.k]]  
        return neighbors

    def aggregate_gradients_average(self, device):
        """
        Perform average aggregation on the gradients.
        
        Returns:
        torch.tensor: Aggregated gradient using average.
        """
        all_gradients = np.array(list(self.gradients.values()))
        avg_gradient = np.mean(all_gradients, axis=0)
        return torch.tensor(avg_gradient).to(device)
    
    def aggregate_gradients_krum(self, device):  
        if len(self.gradients) < self.k:
            return None
        
        distances = self.calculate_distances() 
        neighbors = self.find_k_nearest_neighbors(distances)  

        selected_gradients = None  
        min_score = float('inf')  

        for i in range(self.num_workers):
            score = sum(distances[(i, j)] for j in neighbors[i])  
            
            if score < min_score:  
                min_score = score  
                selected_gradients = self.gradients[i]  
  
        return torch.tensor(selected_gradients).to(device)

    def aggregate_gradients_median(self, device):
        """
        Perform median aggregation on the gradients.
        
        Returns:
        torch.tensor: Aggregated gradient using median.
        """
        all_gradients = np.array(list(self.gradients.values()))
        median_gradient = np.median(all_gradients, axis=0)
        return torch.tensor(median_gradient).to(device)

def is_malicious_node(rank):  
    for r in range(f): 
        if r == rank:
            return True
    return False
'''生成攻击节点梯度'''
def generate_malicious_gradient(non_byzantine_gradients_list, target_grad, rank):
    distance_rank_with_nonByzantine = []
    for grad in non_byzantine_gradients_list:
        distance_rank_with_nonByzantine.append(torch.norm(grad - target_grad, p=2).item())
    # print(f"rank={rank}的梯度与其他非拜占庭节点点距离序列={distance_rank_with_nonByzantine}\n")
    sorted_indices = sorted(range(len(distance_rank_with_nonByzantine)), key=lambda k: distance_rank_with_nonByzantine[k])
    rank_list_seq = [non_byzantine_gradients_list[i] for i in sorted_indices]
    # print(f"与rank{rank}的距离序列为{rank_list_seq}\n")

    #根据距离对梯度进行排序，并找到中间位置的梯度作为“锚点”。
    rank_list_seq_middle = len(rank_list_seq) // 2
    if len(rank_list_seq) % 2 == 0:
        anchor_grad = rank_list_seq[rank_list_seq_middle - 1]
    else:
        anchor_grad = rank_list_seq[rank_list_seq_middle]

    perturbation = torch.zeros_like(anchor_grad)
    perturbation[0] = 0.001
    malicious_grad = anchor_grad - perturbation
    return malicious_grad


def generate_malicious_gradient_far(non_byzantine_gradients_list, target_grad, rank):
    # 计算每个非拜占庭节点的梯度与目标节点梯度的距离
    distance_rank_with_nonByzantine = []
    for grad in non_byzantine_gradients_list:
        distance_rank_with_nonByzantine.append(torch.norm(grad - target_grad, p=2).item())
    
    # 根据距离排序，选择距离最远的节点作为锚点
    sorted_indices = sorted(range(len(distance_rank_with_nonByzantine)), key=lambda k: distance_rank_with_nonByzantine[k], reverse=True)
    farthest_index = sorted_indices[0]
    anchor_grad = non_byzantine_gradients_list[farthest_index]

    # 添加扰动生成恶意梯度
    perturbation = torch.zeros_like(anchor_grad)
    perturbation[0] = 0.001  # 设置一个小的扰动，例如在第一个位置上添加扰动
    malicious_grad = anchor_grad + perturbation  # 可以根据具体情况选择是加还是减扰动
    
    return malicious_grad


# gamma_m的计算需要的参数
p = 2.00        # 范数的指数
q = 2.00        # 用于Krum和GeoMed算法的调整因子
delta = 0.03  # 小偏移量


'''击溃krum的恶意梯度的生成
non_byzantine_gradients与grad_shape需要计算后传入
delta、p、q为超参数'''
def generate_malicious_gradient_finite(rank,non_byzantine_gradients_list, grad_shape, delta, p, q, d ):
    device = non_byzantine_gradients_list[0].device  # 确保所有梯度在同一个设备上 
    # 在与 non_byzantine_gradients 相同的设备上创建 E 张量，并确保形状与 grad_shape 相同
    E = torch.zeros(grad_shape, device=device)
    E[0] = 1  # Attack the first coordinate for simplicity
    # 计算非恶意节点的平均梯度
    #使用 torch.stack 将张量列表堆叠成一个张量
    mean_gradient = torch.mean(torch.stack(non_byzantine_gradients_list), dim=0)
    f_power = f ** (1 / q)
    d_power = d ** (1 / p)
    gamma_m = delta * f_power * d_power
    #gamma_m = delta * (f ** (1/q)) * (d ** (1/p))
    malicious_gradient = mean_gradient + gamma_m * E
    # print(f"rank={rank},malicious_gradient={malicious_gradient}\n")
    return malicious_gradient

# 训练循环
def train(rank, world_size, krum, acc_step_rank, lock, attack_type='Seesaw',aggregate_type='krum',attack=1):
    setup(rank, world_size)

    from datetime import datetime
    # 设置不同的随机数种子
    # 获取当前时间，精确到微秒
    current_time = datetime.now()
    microsecond_timestamp = current_time.microsecond
    # print(f"rank={rank},microsecond_timestamp={microsecond_timestamp}")
    seed = microsecond_timestamp+ rank*11
    # print(f"------------rank={rank},seed={seed}-------------------------------\n")
    torch.manual_seed(seed)
    np.random.seed(seed)

    batch_size = 64
    #
    train_loader, test_loader = setup_data_loader(batch_size, rank, world_size)
    
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = Net().to(device)
    ddp_model = DDP(model, device_ids=[0] if torch.cuda.is_available() else None,find_unused_parameters=True)
    criterion = nn.CrossEntropyLoss().to(device)
    optimizer = optim.Adam(ddp_model.parameters(), lr=0.001)
    
    for epoch in range(sum_epochs):
        ddp_model.train()
        for batch_idx, (data, target) in enumerate(train_loader):
            '''data 是一个张量，包含一批次（64个）的MNIST图像，每个图像的形状为 [1, 28, 28]。
            打印 data[:2] 将显示批次中的前两个图像张量'''
            data, target = data.to(device), target.to(device)

            optimizer.zero_grad()
            with ddp_model.no_sync():
                output = ddp_model(data)
                loss = criterion(output, target)
                loss.backward()
            '''收集每个节点的梯度，包括自己的梯度
            功能：构建一个字典 gradients_from_gpus，键为节点的 rank，值为该节点所有模型参数的梯度向量。
            作用：对于每个节点 i，获取所有模型参数的梯度，将其展平并连接成一个一维向量，存储在字典中。'''
            gradients_from_gpus = {}
            for i in range(world_size):
                gradient_list = []
                for param in ddp_model.parameters():
                    gradient_list.append(param.grad.data.flatten())
                gradients_from_gpus[i] = torch.cat(gradient_list)
            
            
            # 收集非拜占庭节点的梯度值
            non_byzantine_gradients_list = []
            for i in range(world_size):
                if not is_malicious_node(i):
                    non_byzantine_gradients_list.append(gradients_from_gpus[i])
            

            # 获取梯度维度数并存储到全局变量 d 中--generate_malicious_gradient函数需要使用;d=1199882 
            d = gradients_from_gpus[rank].shape[0]
            #获取当前rank中的梯度
            target_grad = gradients_from_gpus[rank]
            #生成恶意梯度
            if attack_type == 'Seesaw':
                malicious_gradient = generate_malicious_gradient(non_byzantine_gradients_list, target_grad, rank)
            elif attack_type == 'far':
                malicious_gradient = generate_malicious_gradient_far(non_byzantine_gradients_list, target_grad, rank)
            elif attack_type == 'finite':
                malicious_gradient = generate_malicious_gradient_finite(rank,non_byzantine_gradients_list, gradients_from_gpus[rank].shape, delta, p, q, d)

            # print(f"rank={rank},生成的恶意梯度malicious_gradient为{malicious_gradient}\n")
            #处理恶意rank上的梯度形状问题
            if is_malicious_node(rank) & attack == 1:
                offset = 0
                for param in ddp_model.parameters():
                    param_grad_size = torch.numel(param.grad)
                    param.grad = malicious_gradient[offset:offset + param_grad_size].view(param.grad.shape)
                    offset += param_grad_size
                #print(f"坏节点的param.grad={param.grad}")

                # param.grad = torch.randn_like(param.grad)
            
            '''问题5：使用dist.all_gather_object收集所有节点的梯度信息————》问题出在这一行代码，为什么要在收集一次呢？
            问题6：这行代码有必要存在吗，因为不同节点之间不需要共享梯度信息，因为下面会通过collect_gradients和aggregate_gradients_krum
            计算出每个节点根据krum算法计算后的聚合梯度值，相当于有一个中心的虚拟计算器来专门计算最终每个rank要用来更新的梯度'''
            dist.all_gather_object(gradients_from_gpus, gradients_from_gpus[rank])
            '''-----------------------------------下面都没有问题---------------------------------------------'''
            # 使用krum对象收集梯度并聚合
            krum.collect_gradients(gradients_from_gpus)

            print(f"rank={rank},所有节点的梯度为gradients_from_gpus={gradients_from_gpus}\n")

            # agg_grad = krum.aggregate_gradients_krum()

            if aggregate_type == 'average':
                # 平均聚合
                agg_grad = krum.aggregate_gradients_average(device)
            elif aggregate_type == 'krum':
                # krum聚合
                agg_grad = krum.aggregate_gradients_krum(device)
            elif aggregate_type == 'median':
                # 中值聚合
                agg_grad = krum.aggregate_gradients_median(device)

            
            # if agg_grad is not None:
            #     agg_grad = agg_grad.to(device)
            #     shapes = [param.grad.shape for param in ddp_model.parameters()]
            #     offset = 0
            #     for param, shape in zip(ddp_model.parameters(), shapes):
            #         param_grad_size = torch.numel(param.grad)
            #         param.grad = agg_grad[offset:offset + param_grad_size].view(shape)
            #         offset += param_grad_size
                
            # optimizer.step()

            # # 记录每个rank的loss
            # with lock:
            #     acc_step_rank[rank].append(loss.item())

            # if batch_idx % 10 == 0:
            #     print(f'Rank {rank}, Epoch {epoch}, Batch {batch_idx}, Loss {loss.item()}')


            if agg_grad is not None:
                gradient_index = 0
                for param in ddp_model.parameters():
                    gradient_length = param.grad.data.numel()
                    param.grad.data = agg_grad[gradient_index:gradient_index + gradient_length].reshape(param.grad.data.shape)
                    gradient_index += gradient_length

            optimizer.step()

        ddp_model.eval()
        correct = 0
        total = 0
        with torch.no_grad():
            for data, target in test_loader:
                data, target = data.to(device), target.to(device)
                outputs = ddp_model(data)
                _, predicted = torch.max(outputs.data, 1)
                total += target.size(0)
                correct += (predicted == target).sum().item()

        accuracy = 100 * correct / total
        if rank == 0:
            lock.acquire()
            acc_step_rank.append(accuracy)
            lock.release()
            print(f"Epoch [{epoch+1}/{sum_epochs}], Accuracy: {accuracy:.2f}%")


    cleanup()


#################################################################################################








#################################################################################################

def main():
    world_size = node_num
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = Net().to(device)
    krum = CustomGradientAggregator(model, num_workers=node_num)
    # 确保spawn和multiprocessing的锁一致
    manager = get_context("spawn").Manager()
    lock = manager.Lock()
    
    
    ###Seesaw
    # no attack
    acc_step_rank_no = manager.list()
    print('acc_step_rank_no:', acc_step_rank_no)
    
    mp.spawn(train, args=(world_size, krum, acc_step_rank_no, lock, 'Seesaw', 'average',0), nprocs=world_size, join=True)
    print('range:', range(len(acc_step_rank_no)))
    print('acc_step_rank_no:', acc_step_rank_no)



    acc_step_rank_no_krum = manager.list()
    print('acc_step_rank_no_krum:', acc_step_rank_no_krum)
    
    mp.spawn(train, args=(world_size, krum, acc_step_rank_no_krum, lock, 'Seesaw', 'krum',0), nprocs=world_size, join=True)
    print('range:', range(len(acc_step_rank_no_krum)))
    print('acc_step_rank_no_krum:', acc_step_rank_no_krum)


    acc_step_rank_no_median = manager.list()
    print('acc_step_rank_no_median:', acc_step_rank_no_median)
    
    mp.spawn(train, args=(world_size, krum, acc_step_rank_no_median, lock, 'Seesaw', 'median',0), nprocs=world_size, join=True)
    print('range:', range(len(acc_step_rank_no_median)))
    print('acc_step_rank_no_median:', acc_step_rank_no_median)


    acc_step_rank = manager.list()
    print('acc_step_rank:', acc_step_rank)
    
    mp.spawn(train, args=(world_size, krum, acc_step_rank, lock), nprocs=world_size, join=True)
    print('range:', range(len(acc_step_rank)))
    print('acc_step_rank:', acc_step_rank)

    ##averge
    # Seesaw attack with average aggregation
    acc_step_rank_avg = manager.list()
    mp.spawn(train, args=(world_size, krum, acc_step_rank_avg, lock, 'Seesaw', 'average'), nprocs=world_size, join=True)
    print('range:', range(len(acc_step_rank_avg)))
    print('acc_step_rank_avg:', acc_step_rank_avg)


    #media
    # Seesaw attack with median aggregation
    acc_step_rank_median = manager.list()
    mp.spawn(train, args=(world_size, krum, acc_step_rank_median, lock, 'Seesaw', 'median'), nprocs=world_size, join=True)
    print('range:', range(len(acc_step_rank_median)))
    print('acc_step_rank_median:', acc_step_rank_median)

    # 绘制loss曲线
    plt.figure(figsize=(12, 8))
    plt.plot(range(len(acc_step_rank_no)), acc_step_rank_no, marker='.', label='No attack')
    plt.plot(range(len(acc_step_rank)), acc_step_rank, marker='o', label='Seesaw - Krum')
    plt.plot(range(len(acc_step_rank_avg)), acc_step_rank_avg, marker='x', label='Seesaw - Average')
    plt.plot(range(len(acc_step_rank_median)), acc_step_rank_median, marker='+', label='Seesaw - Median')
    plt.xlabel('Batch')
    plt.ylabel('Accuracy rate')
    plt.title('The contrast picture of Seesaw')
    plt.legend()
    plt.grid(True)
    plt.savefig(f'log/acc_step_rank_{world_size}_{f}_aggregation_compare_.png')
    plt.close()




    ###finite
    finite_acc_step_rank = manager.list()
    print('finite_acc_step_rank:', finite_acc_step_rank)
    
    mp.spawn(train, args=(world_size, krum, finite_acc_step_rank, lock, 'finite'), nprocs=world_size, join=True)
    print('range:', range(len(finite_acc_step_rank)))
    print('finite_acc_step_rank:', finite_acc_step_rank)

    ##averge
    # finite attack with average aggregation
    finite_acc_step_rank_avg = manager.list()
    mp.spawn(train, args=(world_size, krum, finite_acc_step_rank_avg, lock, 'finite', 'average'), nprocs=world_size, join=True)
    print('range:', range(len(finite_acc_step_rank_avg)))
    print('finite_acc_step_rank_avg:', finite_acc_step_rank_avg)


    #media
    # finite attack with median aggregation
    finite_acc_step_rank_median = manager.list()
    mp.spawn(train, args=(world_size, krum, finite_acc_step_rank_median, lock, 'finite', 'median'), nprocs=world_size, join=True)
    print('range:', range(len(finite_acc_step_rank_median)))
    print('finite_acc_step_rank_median:', finite_acc_step_rank_median)

    # 绘制Accuracy rate曲线
    plt.figure(figsize=(12, 8))
    plt.plot(range(len(acc_step_rank_no)), acc_step_rank_no, marker='.', label='No attack')
    plt.plot(range(len(finite_acc_step_rank)), finite_acc_step_rank, marker='o', label='finite - Krum')
    plt.plot(range(len(finite_acc_step_rank_avg)), finite_acc_step_rank_avg, marker='x', label='finite - Average')
    plt.plot(range(len(finite_acc_step_rank_median)), finite_acc_step_rank_median, marker='+', label='finite - Median')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy rate')
    plt.title('The contrast picture of finite')
    plt.legend()
    plt.grid(True)
    plt.savefig(f'log/acc_step_rank_{world_size}_{f}_aggregation_compare_finite_.png')
    plt.close()


    plt.plot(range(len(acc_step_rank_no)), acc_step_rank_no, marker='.', label='No attack')
    plt.plot(range(len(acc_step_rank_avg)), acc_step_rank_avg, marker='o', label='Seesaw - Average')
    plt.plot(range(len(finite_acc_step_rank_avg)), finite_acc_step_rank_avg, marker='+', label='finite - Average')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy rate')
    plt.title('Comparison diagram of average aggregation')
    plt.legend()
    plt.grid(True)
    plt.savefig(f'log/acc_step_rank_{world_size}_{f}_aggregation_compare_Average_.png')
    plt.close()

    plt.plot(range(len(acc_step_rank_no_krum)), acc_step_rank_no_krum, marker='.', label='No attack')
    plt.plot(range(len(acc_step_rank)), acc_step_rank, marker='o', label='Seesaw - Krum')
    plt.plot(range(len(finite_acc_step_rank)), finite_acc_step_rank, marker='+', label='finite - Krum')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy rate')
    plt.title('Comparison diagram of krum aggregation')
    plt.legend()
    plt.grid(True)
    plt.savefig(f'log/acc_step_rank_{world_size}_{f}_aggregation_compare_krum_.png')
    plt.close()

    plt.plot(range(len(acc_step_rank_no_median)), acc_step_rank_no_median, marker='.', label='No attack')
    plt.plot(range(len(acc_step_rank_median)), acc_step_rank_median, marker='o', label='Seesaw - Median')
    plt.plot(range(len(finite_acc_step_rank_median)), finite_acc_step_rank_median, marker='+', label='finite - Median')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy rate')
    plt.title('Comparison diagram of median aggregation')
    plt.legend()
    plt.grid(True)
    plt.savefig(f'log/acc_step_rank_{world_size}_{f}_aggregation_compare_median_.png')
    plt.close()



    

if __name__ == '__main__':
    main()
