import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
import numpy as np
import matplotlib.pyplot as plt
from multiprocessing import Manager, Lock
import multiprocessing
import time
from datetime import datetime

# 全局定义，要保证n=2f+3，才能保证gama达到最大值
node_num = 11  # 总节点数
f = 4  # 坏节点个数
malicious_rank_list_pre=[0,3,7,9]

# 定义模型
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 32, 3, 1)
        self.conv2 = nn.Conv2d(32, 64, 3, 1)
        self.dropout1 = nn.Dropout2d(0.25)
        self.dropout2 = nn.Dropout2d(0.5)
        self.fc1 = nn.Linear(9216, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = self.conv1(x)
        x = torch.relu(x)
        x = self.conv2(x)
        x = torch.relu(x)
        x = torch.max_pool2d(x, 2)
        x = self.dropout1(x)
        x = torch.flatten(x, 1)
        x = self.fc1(x)
        x = torch.relu(x)
        x = self.dropout2(x)
        x = self.fc2(x)
        return torch.log_softmax(x, dim=1)

# 数据预处理和加载
def setup_data_loader(batch_size, rank, world_size):
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    dataset = datasets.MNIST('./data', train=True, download=True, transform=transform)
    sampler = torch.utils.data.distributed.DistributedSampler(dataset, num_replicas=world_size, rank=rank)
    dataloader = DataLoader(dataset, batch_size=batch_size, sampler=sampler)

    # 打印每个rank使用的数据索引
    '''indices = list(sampler)
    print(f"Rank {rank} using data indices: {indices[:10]}.\n")  # 打印前10个索引用于调试 '''

    return dataloader

# 初始化过程
def setup(rank, world_size):
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '12355'
    dist.init_process_group("gloo", rank=rank, world_size=world_size)
    torch.manual_seed(0)

# 清理过程
def cleanup():
    dist.destroy_process_group()

class CustomGradientAggregator:  
    def __init__(self, model, num_workers=node_num, k= node_num - f - 2):  
        self.model = model  
        self.num_workers = num_workers  
        self.k = k  
        self.gradients = {}  
    
    def collect_gradients(self, gradients_from_gpus):  
        for gpu_idx, grad_tensor in gradients_from_gpus.items():  
            if grad_tensor is not None:  
                grad_tensor = grad_tensor.detach().cpu()  
                self.gradients[gpu_idx] = grad_tensor  
    
    def calculate_distances(self):  
        distances = {}  
        for i in range(self.num_workers):  
            for j in range(self.num_workers):  
                if i != j:  
                    distance = torch.norm(self.gradients[i] - self.gradients[j], p=2) 
                    distances[(i, j)] = distance.item()  
        return distances  
    
    def find_k_nearest_neighbors(self, distances):  
        neighbors = {}  
        for i in range(self.num_workers):  
            sorted_distances = sorted([(j, distances[(i, j)]) for j in range(self.num_workers) if i != j])  
            neighbors[i] = [j for j, _ in sorted_distances[:self.k]]  
        return neighbors
    
    def aggregate_gradients_krum(self):  
        if len(self.gradients) < self.k:
            return None
        
        distances = self.calculate_distances() 
        neighbors = self.find_k_nearest_neighbors(distances)  

        selected_gradients = None  
        min_score = float('inf')  

        for i in range(self.num_workers):
            score = sum(distances[(i, j)] for j in neighbors[i])  
            
            if score < min_score:  
                min_score = score  
                selected_gradients = self.gradients[i]  
  
        return selected_gradients

def is_malicious_node(rank, malicious_rank_list): 
    return rank in malicious_rank_list 
'''生成攻击节点梯度'''
def generate_malicious_gradient(non_byzantine_gradients_list, target_grad, rank):
    distance_rank_with_nonByzantine = []
    for grad in non_byzantine_gradients_list:
        distance_rank_with_nonByzantine.append(torch.norm(grad - target_grad, p=2).item())
    print(f"rank={rank}的梯度与其他非拜占庭节点点距离序列={distance_rank_with_nonByzantine}\n")
    sorted_indices = sorted(range(len(distance_rank_with_nonByzantine)), key=lambda k: distance_rank_with_nonByzantine[k])
    rank_list_seq = [non_byzantine_gradients_list[i] for i in sorted_indices]
    print(f"与rank{rank}的距离序列为{rank_list_seq}\n")

    #根据距离对梯度进行排序，并找到中间位置的梯度作为“锚点”。
    rank_list_seq_middle = len(rank_list_seq) // 2
    if len(rank_list_seq) % 2 == 0:
        anchor_grad = rank_list_seq[rank_list_seq_middle - 1]
    else:
        anchor_grad = rank_list_seq[rank_list_seq_middle]

    perturbation = torch.zeros_like(anchor_grad)
    perturbation[0] = 0.001
    malicious_grad = anchor_grad - perturbation
    return malicious_grad


# 训练循环
def train(rank, world_size, krum, loss_step_rank, lock,malicious_rank_list):
    setup(rank, world_size)
    # 设置不同的随机数种子
    # 获取当前时间，精确到微秒
    current_time = datetime.now()
    microsecond_timestamp = current_time.microsecond
    seed = microsecond_timestamp+ rank*11
    #print(f"------------rank={rank},seed={seed}-------------------------------\n")
    torch.manual_seed(seed)
    
    batch_size = 64
    #
    dataloader = setup_data_loader(batch_size, rank, world_size)
    
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = Net().to(device)
    ddp_model = DDP(model, device_ids=[0] if torch.cuda.is_available() else None)
    criterion = nn.CrossEntropyLoss().to(device)
    optimizer = optim.Adam(ddp_model.parameters(), lr=0.001)
    
    for epoch in range(1):
        ddp_model.train()
        for batch_idx, (data, target) in enumerate(dataloader):
            '''data 是一个张量，包含一批次（64个）的MNIST图像，每个图像的形状为 [1, 28, 28]。
            打印 data[:2] 将显示批次中的前两个图像张量'''
            data, target = data.to(device), target.to(device)

            with ddp_model.no_sync():
                optimizer.zero_grad()
                output = ddp_model(data)
                loss = criterion(output, target)
                loss.backward()
            #收集所有rank中的梯度

            
            # 收集当前rank的梯度
            gradients_from_gpus = {rank: torch.cat([param.grad.data.flatten() for param in ddp_model.parameters()])}
            #print(f"Before all_gather_object - rank={rank}, gradients={gradients_from_gpus[rank]}")
            
            # 收集所有rank的梯度
            all_gradients_from_gpus = [None for _ in range(world_size)]
            dist.all_gather_object(all_gradients_from_gpus, gradients_from_gpus[rank])
            
            # 将收集到的梯度放入字典中
            gradients_from_gpus = {i: all_gradients_from_gpus[i] for i in range(world_size)}
            #print(f"After all_gather_object - rank={rank}, gradients_from_gpus={gradients_from_gpus}")
            
            non_byzantine_gradients_list = [gradients_from_gpus[i] for i in range(world_size) if not is_malicious_node(i, malicious_rank_list)]
            #print(f"rank={rank}, 非拜占庭节点的梯度值列表为{non_byzantine_gradients_list}")
            

            # 获取梯度维度数并存储到全局变量 d 中--generate_malicious_gradient函数需要使用;d=1199882 
            d = gradients_from_gpus[rank].shape[0]
            #获取当前rank中的梯度
            target_grad = gradients_from_gpus[rank]
            #生成恶意梯度
            malicious_gradient = generate_malicious_gradient(non_byzantine_gradients_list, target_grad, rank)

            #print(f"rank={rank},生成的恶意梯度malicious_gradient为{malicious_gradient}\n")
            #处理恶意rank上的梯度形状问题
            if is_malicious_node(rank,malicious_rank_list):
                offset = 0
                for param in ddp_model.parameters():
                    param_grad_size = torch.numel(param.grad)
                    param.grad = malicious_gradient[offset:offset + param_grad_size].view(param.grad.shape)
                    offset += param_grad_size
                #print(f"坏节点的param.grad={param.grad}")
            
            dist.all_gather_object(gradients_from_gpus, gradients_from_gpus[rank])
            '''-----------------------------------下面都没有问题---------------------------------------------'''
            # 使用krum对象收集梯度并聚合
            krum.collect_gradients(gradients_from_gpus)
            agg_grad = krum.aggregate_gradients_krum()
            
            if agg_grad is not None:
                agg_grad = agg_grad.to(device)
                shapes = [param.grad.shape for param in ddp_model.parameters()]
                offset = 0
                for param, shape in zip(ddp_model.parameters(), shapes):
                    param_grad_size = torch.numel(param.grad)
                    param.grad = agg_grad[offset:offset + param_grad_size].view(shape)
                    offset += param_grad_size
            
            optimizer.step()

            # 记录每个rank的loss
            with lock:
                loss_step_rank[rank].append(loss.item())

            if batch_idx % 10 == 0:
                print(f'Rank {rank}, Epoch {epoch}, Batch {batch_idx}, Loss {loss.item()}')

    cleanup()


def main():
    world_size = node_num
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = Net().to(device)
    krum = CustomGradientAggregator(model, num_workers=node_num)
    manager = Manager()
    lock = Lock()
    loss_step_rank = manager.list([manager.list() for _ in range(world_size)])
    
    # 设置恶意节点
    malicious_rank_list = malicious_rank_list_pre  # 在这里设置恶意节点的rank值

    mp.spawn(train, args=(world_size, krum, loss_step_rank, lock,malicious_rank_list), nprocs=world_size, join=True)
    print(f"loss_step_rank={loss_step_rank}")

    # 计算每个batch的平均loss
    loss_batch = []
    num_batches = min(len(loss_step_rank[rank]) for rank in range(world_size))
    for batch_idx in range(num_batches):
        batch_losses = [loss_step_rank[rank][batch_idx] for rank in range(world_size)]
        loss_batch.append(np.mean(batch_losses))
    print(f"loss_batch={loss_batch}")
    # 绘制loss曲线
    plt.plot(range(len(loss_batch)), loss_batch, marker='o')
    plt.xlabel('Batch')
    plt.ylabel('Loss')
    plt.title('Average Loss over Batches')
    plt.grid(True)
    plt.show()

if __name__ == '__main__':
    main()
