#%%
import torch.multiprocessing as mp
def temp(i,a,b,c):
    print('The {} process{}'.format(i,c))

mp.spawn(fn=temp,nprocs=3,args=(1,2,3))


#%% 我自己的distributed分布式训练实验
import time
import torch,torchvision
import torch.nn.parallel
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from torch.nn.parallel import DistributedDataParallel
batch = 128
gpu_nums = torch.cuda.device_count()
def main_worker(local_rank:int):
    dist.init_process_group(backend='nccl'
                            ,init_method='tcp://127.0.0.1:55555'
                            ,world_size=gpu_nums
                            ,rank=local_rank)
    torch.cuda.set_device(local_rank)
    model = torchvision.models.googlenet(pretrained=False)
    model.fc = torch.nn.Linear(in_features=1024,out_features=10)
    model.cuda(local_rank)

    criterion = torch.nn.CrossEntropyLoss().cuda(local_rank)
    optimizer = torch.optim.Adam(params=model.parameters()).cuda(local_rank)
    #datasets
    dataset = torchvision.datasets.MNIST('./MNIST')
    sampler = torch.utils.data.distributed.DistributedSampler(dataset=dataset)
    data_loader = torch.utils.data.DataLoader(dataset=dataset,batch_size=batch/gpu_nums,sampler=sampler,num_workers=2,pin_memory=True)
    for epoch in range(10):
        sampler.set_epoch(epoch)
        for images,targets in data_loader:
            start_time = time.time()
            images,targets = images.cuda(local_rank),targets.cuda(local_rank)
            out = model(images)
            loss = criterion(out,targets)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            time_use = time.time()-start_time
            print('time:',time_use)
            print('loss:',loss.item())
mp.spawn(fn=main_worker,args=(),nprocs=gpu_nums)
