import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
import numpy as np
import matplotlib.pyplot as plt
from multiprocessing import Manager, Lock
import multiprocessing
import time
from datetime import datetime

# 全局定义，要保证n=2f+3，才能保证gama达到最大值
node_num = 11  # 总节点数
f = 4 # 坏节点个数
# gamma_m的计算需要的参数
p = 2.00        # 范数的指数
q = 2.00        # 用于Krum和GeoMed算法的调整因子
delta = 0.03  # 小偏移量
malicious_rank_list_pre=[0,3,7,9]

# 定义模型
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 32, 3, 1)
        self.conv2 = nn.Conv2d(32, 64, 3, 1)
        self.dropout1 = nn.Dropout2d(0.25)
        self.dropout2 = nn.Dropout2d(0.5)
        self.fc1 = nn.Linear(9216, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = self.conv1(x)
        x = torch.relu(x)
        x = self.conv2(x)
        x = torch.relu(x)
        x = torch.max_pool2d(x, 2)
        x = self.dropout1(x)
        x = torch.flatten(x, 1)
        x = self.fc1(x)
        x = torch.relu(x)
        x = self.dropout2(x)
        x = self.fc2(x)
        return torch.log_softmax(x, dim=1)

# 数据预处理和加载
def setup_data_loader(batch_size, rank, world_size):
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    dataset = datasets.MNIST('./data', train=True, download=True, transform=transform)
    sampler = torch.utils.data.distributed.DistributedSampler(dataset, num_replicas=world_size, rank=rank)
    dataloader = DataLoader(dataset, batch_size=batch_size, sampler=sampler)

     # 打印每个rank使用的数据索引
    indices = list(sampler)
    #print(f"Rank {rank} using data indices: {indices[:10]}.\n")  # 打印前10个索引用于调试 

    return dataloader

# 初始化过程
def setup(rank, world_size):
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '12355'
    dist.init_process_group("gloo", rank=rank, world_size=world_size)
    torch.manual_seed(0)

# 清理过程
def cleanup():
    dist.destroy_process_group()

class CustomGradientAggregator:  
    def __init__(self, model, num_workers=node_num, k= node_num - f - 2):  
        self.model = model  
        self.num_workers = num_workers  
        self.k = k  
        self.gradients = {}  
    
    def collect_gradients(self, gradients_from_gpus):  
        for gpu_idx, grad_tensor in gradients_from_gpus.items():  
            if grad_tensor is not None:  
                grad_tensor = grad_tensor.detach().cpu()  
                self.gradients[gpu_idx] = grad_tensor  
    
    def calculate_distances(self):  
        distances = {}  
        for i in range(self.num_workers):  
            for j in range(self.num_workers):  
                if i != j:  
                    distance = torch.norm(self.gradients[i] - self.gradients[j], p=2) 
                    distances[(i, j)] = distance.item()  
        return distances  
    
    def find_k_nearest_neighbors(self, distances):  
        neighbors = {}  
        for i in range(self.num_workers):  
            sorted_distances = sorted([(j, distances[(i, j)]) for j in range(self.num_workers) if i != j])  
            neighbors[i] = [j for j, _ in sorted_distances[:self.k]]  
        return neighbors
    
    def aggregate_gradients_krum(self):  
        if len(self.gradients) < self.k:
            return None
        
        distances = self.calculate_distances() 
        neighbors = self.find_k_nearest_neighbors(distances)  

        selected_gradients = None  
        min_score = float('inf')  

        for i in range(self.num_workers):
            score = sum(distances[(i, j)] for j in neighbors[i])  
            
            if score < min_score:  
                min_score = score  
                selected_gradients = self.gradients[i]  
  
        return selected_gradients

def is_malicious_node(rank, malicious_rank_list): 
    return rank in malicious_rank_list 

'''击溃krum的恶意梯度的生成
non_byzantine_gradients与grad_shape需要计算后传入
delta、p、q为超参数'''
def generate_malicious_gradient(rank,non_byzantine_gradients_list, grad_shape, delta, p, q, d ):
    device = non_byzantine_gradients_list[0].device  # 确保所有梯度在同一个设备上 
    # 在与 non_byzantine_gradients 相同的设备上创建 E 张量，并确保形状与 grad_shape 相同
    E = torch.zeros(grad_shape, device=device)
    E[0] = 1  # Attack the first coordinate for simplicity
    # 计算非恶意节点的平均梯度
    #使用 torch.stack 将张量列表堆叠成一个张量
    mean_gradient = torch.mean(torch.stack(non_byzantine_gradients_list), dim=0)
    f_power = f ** (1 / q)
    d_power = d ** (1 / p)
    gamma_m = delta * f_power * d_power
    #gamma_m = delta * (f ** (1/q)) * (d ** (1/p))
    malicious_gradient = mean_gradient + gamma_m * E
    print(f"rank={rank},malicious_gradient={malicious_gradient}\n")
    return malicious_gradient

# 训练循环
def train(rank, world_size, krum, loss_step_rank, lock,malicious_rank_list):
    setup(rank, world_size)
    # 设置不同的随机数种子
    # 获取当前时间，精确到微秒
    current_time = datetime.now()
    microsecond_timestamp = current_time.microsecond
    #print(f"rank={rank},microsecond_timestamp={microsecond_timestamp}")
    seed = microsecond_timestamp+ rank*11
    #print(f"------------rank={rank},seed={seed}-------------------------------\n")
    torch.manual_seed(seed)
    
    batch_size = 64
    #
    dataloader = setup_data_loader(batch_size, rank, world_size)
    
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = Net().to(device)
    ddp_model = DDP(model, device_ids=[0] if torch.cuda.is_available() else None)
    criterion = nn.CrossEntropyLoss().to(device)
    optimizer = optim.Adam(ddp_model.parameters(), lr=0.001)
    
    for epoch in range(1):
        ddp_model.train()
        for batch_idx, (data, target) in enumerate(dataloader):
            '''data 是一个张量，包含一批次（64个）的MNIST图像，每个图像的形状为 [1, 28, 28]。
            打印 data[:2] 将显示批次中的前两个图像张量'''
            data, target = data.to(device), target.to(device)

            # 打印每个rank的输入图像
            '''if batch_idx == 0:  # 仅在第一个批次打印图像
                for i in range(2):  # 仅显示前2个样本
                    img = data[i].cpu().numpy().squeeze()
                    plt.imshow(img, cmap='gray')
                    plt.title(f'Label: {target[i].item()}')
                    plt.show()'''
            with ddp_model.no_sync():
                optimizer.zero_grad()
                output = ddp_model(data)
                loss = criterion(output, target)
                loss.backward()
            '''收集每个节点的梯度，包括自己的梯度
            功能：构建一个字典 gradients_from_gpus，键为节点的 rank，值为该节点所有模型参数的梯度向量。
            作用：对于每个节点 i，获取所有模型参数的梯度，将其展平并连接成一个一维向量，存储在字典中。'''
            gradients_from_gpus = {}
            for i in range(world_size):
                gradient_list = []
                
                for param in ddp_model.parameters():
                    gradient_list.append(param.grad.data.flatten())
                gradients_from_gpus[i] = torch.cat(gradient_list)
            print(f"-----------rank={rank},当前rank梯度为{gradients_from_gpus[rank]},gradients_from_gpus={gradients_from_gpus}\n")
            
            # 收集非恶意节点列表
            non_byzantine_gradients_list = []
            for i in range(world_size):
                if not is_malicious_node(i,malicious_rank_list):
                    non_byzantine_gradients_list.append(gradients_from_gpus[i])
            

            # 获取梯度维度数并存储到全局变量 d 中--generate_malicious_gradient函数需要使用;d=1199882 
            d = gradients_from_gpus[rank].shape[0]
            #生成恶意梯度
            malicious_gradient = generate_malicious_gradient(rank,non_byzantine_gradients_list, gradients_from_gpus[rank].shape, delta, p, q, d)
            #print(f"rank={rank},生成的恶意梯度malicious_gradient为{malicious_gradient}\n")
            #处理恶意rank上的梯度形状问题
            if is_malicious_node(rank,malicious_rank_list):
                offset = 0
                for param in ddp_model.parameters():
                    param_grad_size = torch.numel(param.grad)
                    param.grad = malicious_gradient[offset:offset + param_grad_size].view(param.grad.shape)
                    offset += param_grad_size
                #print(f"坏节点的param.grad={param.grad}")
            
            '''使用dist.all_gather_object收集所有节点的梯度信息'''
            dist.all_gather_object(gradients_from_gpus, gradients_from_gpus[rank])
            '''-----------------------------------下面都没有问题---------------------------------------------'''
            # 使用krum对象收集梯度并聚合
            krum.collect_gradients(gradients_from_gpus)
            agg_grad = krum.aggregate_gradients_krum()
            
            if agg_grad is not None:
                agg_grad = agg_grad.to(device)
                shapes = [param.grad.shape for param in ddp_model.parameters()]
                offset = 0
                for param, shape in zip(ddp_model.parameters(), shapes):
                    param_grad_size = torch.numel(param.grad)
                    param.grad = agg_grad[offset:offset + param_grad_size].view(shape)
                    offset += param_grad_size
            
            optimizer.step()

            # 记录每个rank的loss
            with lock:
                loss_step_rank[rank].append(loss.item())

            if batch_idx % 10 == 0:
                print(f'Rank {rank}, Epoch {epoch}, Batch {batch_idx}, Loss {loss.item()}')

    cleanup()


def main():
    world_size = node_num
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = Net().to(device)
    krum = CustomGradientAggregator(model, num_workers=node_num)
    manager = Manager()
    lock = Lock()
    loss_step_rank = manager.list([manager.list() for _ in range(world_size)])
    
    # 设置恶意节点
    malicious_rank_list = malicious_rank_list_pre  # 在这里设置恶意节点的rank值
    mp.spawn(train, args=(world_size, krum, loss_step_rank, lock,malicious_rank_list), nprocs=world_size, join=True)
    print(f"loss_step_rank={loss_step_rank}")

    # 计算每个batch的平均loss
    loss_batch = []
    num_batches = min(len(loss_step_rank[rank]) for rank in range(world_size))
    for batch_idx in range(num_batches):
        batch_losses = [loss_step_rank[rank][batch_idx] for rank in range(world_size)]
        loss_batch.append(np.mean(batch_losses))
    print(f"loss_batch={loss_batch}")
    # 绘制loss曲线
    plt.plot(range(len(loss_batch)), loss_batch, marker='o')
    plt.xlabel('Batch')
    plt.ylabel('Loss')
    plt.title('Average Loss over Batches')
    plt.grid(True)
    plt.show()

if __name__ == '__main__':
    main()
