import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
import numpy as np
import matplotlib.pyplot as plt
from multiprocessing import Manager, Lock

# 全局定义
node_num = 11 # 总节点数
f = 4  # 坏节点个数

# 定义模型
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 32, 3, 1)
        self.conv2 = nn.Conv2d(32, 64, 3, 1)
        self.dropout1 = nn.Dropout2d(0.25)
        self.dropout2 = nn.Dropout2d(0.5)
        self.fc1 = nn.Linear(9216, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = self.conv1(x)
        x = torch.relu(x)
        x = self.conv2(x)
        x = torch.relu(x)
        x = torch.max_pool2d(x, 2)
        x = self.dropout1(x)
        x = torch.flatten(x, 1)
        x = self.fc1(x)
        x = torch.relu(x)
        x = self.dropout2(x)
        x = self.fc2(x)
        return torch.log_softmax(x, dim=1)

# 数据预处理和加载
def setup_data_loader(batch_size, rank, world_size):
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    dataset = datasets.MNIST('./data', train=True, download=True, transform=transform)
    sampler = torch.utils.data.distributed.DistributedSampler(dataset, num_replicas=world_size, rank=rank)
    dataloader = DataLoader(dataset, batch_size=batch_size, sampler=sampler)
    return dataloader

# 初始化过程
def setup(rank, world_size):
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '12355'
    dist.init_process_group("gloo", rank=rank, world_size=world_size)
    torch.manual_seed(0)

# 清理过程
def cleanup():
    dist.destroy_process_group()

def is_malicious_node(rank): 
    for r in range(f): 
        if r == rank:
            return True
    return False

def average_gradients(model):
    size = float(dist.get_world_size())
    for param in model.parameters():
        dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)
        param.grad.data /= size

# 训练循环
def train(rank, world_size, loss_step_rank, lock):
    setup(rank, world_size)
    
    batch_size = 64
    dataloader = setup_data_loader(batch_size, rank, world_size)
    
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = Net().to(device)
    ddp_model = DDP(model, device_ids=[0] if torch.cuda.is_available() else None)
    criterion = nn.CrossEntropyLoss().to(device)
    optimizer = optim.Adam(ddp_model.parameters(), lr=0.001)
    
    for epoch in range(1):
        ddp_model.train()
        correct = 0
        total = 0
        for batch_idx, (data, target) in enumerate(dataloader):
            data, target = data.to(device), target.to(device)
            with ddp_model.no_sync():
                optimizer.zero_grad()
                output = ddp_model(data)
                loss = criterion(output, target)
                loss.backward()
            
            if is_malicious_node(rank):  
                for param in ddp_model.parameters():  
                    param.grad = torch.randn_like(param.grad)

            average_gradients(ddp_model)  # 平均化梯度
            optimizer.step()

            with lock:
                loss_step_rank[rank].append(loss.item())
            if batch_idx % 10 == 0:
                print(f'Rank {rank}, Epoch {epoch}, Batch {batch_idx}, Loss {loss.item()}')

    cleanup()

def main():
    world_size = node_num
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    manager = Manager()
    lock = Lock()
    loss_step_rank = manager.list([manager.list() for _ in range(world_size)])
    
    mp.spawn(train, args=(world_size, loss_step_rank, lock), nprocs=world_size, join=True)
    print(f"loss_step_rank={loss_step_rank}")

    # 计算每个batch的平均accuracy
    loss_batch = []
    num_batches = min(len(loss_step_rank[rank]) for rank in range(world_size))
    for batch_idx in range(num_batches):
        batch_losses = [loss_step_rank[rank][batch_idx] for rank in range(world_size)]
        loss_batch.append(np.mean(batch_losses))
    print(f"loss_batch={loss_batch}")

    # 绘制accuracy曲线
    plt.plot(range(len(loss_batch)), loss_batch, marker='o')
    plt.xlabel('Batch')
    plt.ylabel('Loss')
    plt.title('Average loss over Batches')
    plt.grid(True)
    plt.show()

if __name__ == '__main__':
    import multiprocessing
    multiprocessing.freeze_support()
    main()
