import torch
from torch import nn
from torch.nn import functional as f
from d2l import torch as d2l

def get_params(params,device):
    new_params = [p.to(device) for p in params]
    for p in new_params:
        p.requires_grad_()
    return new_params

def allreduce(data):
    for i in range(1,len(data)):
        data[0] += data[i].to(data[0].device)
    for i in range(1,len(data)):
        data[i] = data[0].to(data[i].device)

data_gpu = torch.ones((1,2),device=d2l.try_gpu())
data_cpu = torch.ones((1,2),device=torch.device('cpu'))
print("before allreduce:\n",data_gpu,'\n',data_cpu)
data = [data_gpu,data_cpu]
allreduce(data)
print('after allreduce:\n',data[0],'\n',data[1])

def split_batch(X,y,devices):
    assert X.shape[0] == y.shape[0]
    return (nn.parallel.scatter(X,devices),nn.parallel.scatter(y,devices))

import sys
sys.path.append('E:\AI\DL')
import LeNet

def train_batch(X,y,device_params,devices,lr):
    X_shards,y_shards = split_batch(X,y,devices)
    loss = nn.CrossEntropyLoss()
    ls = [loss(LeNet.net(X_shard,device_W),y_shard).sum()
            for X_shard,y_shard,device_W in zip(X_shards,y_shards,device_params)]
    for l in ls:
        l.backward()
    with torch.no_grad():
        for i in range(len(device_params[0])):
            allreduce([device_params[c][i].grad for c in range(len(devices))])
    for param in device_params:
        d2l.sgd(param,lr,X.shape[0])

batch_size = 256
num_workers = 4
from SoftmaxRegression import data_generate as dg
train_iter,test_iter = dg.load_data_fashion_mnist(batch_size,num_workers)

num_epochs = 10
lr = 0.1
def train(net):
    devices = [torch.device('cuda:0'),torch.device('cpu')]
    def init_weights(m):
        if type(m) in [nn.Linear,nn.Conv2d]:
            nn.init.normal_(m.weight,std=0.01)
    net.apply(init_weights)
    net = nn.DataParallel(LeNet.net,device_ids=devices)
    trainer = torch.optim.SGD(net.parameters(),lr)
    loss = nn.CrossEntropyLoss()

    animator = d2l.Animator('epoch','test_acc',xlim=[1,num_epochs])
    timer = d2l.Timer()
    for epoch in range(num_epochs):
        net.train()
        timer.start()
        for X,y in train_iter:
            trainer.zero_grad()
            X,y = X.to(devices[0]),y.to(devices[0])
            l = loss(net(X),y)
            l.backward()
            trainer.step()
        timer.stop()
        animator.add(epoch+1,(d2l.evaluate_accuracy_gpu(net,test_iter,devices[0])))
    print('test_acc:',animator.Y[0][-1],timer.avg(),'sec/epoch on',devices)

train(LeNet.net)
