import torch
import torch.distributed
import torch.multiprocessing
import torch.optim
import torch.nn.parallel
# from torch.nn.parallel import DistributedDataParallel as DDP
import os
import torch.utils.data
import torch.nn.functional as F


def init_weights(m):
    classname = m.__class__.__name__
    if hasattr(m, 'weight') and classname.find('Conv') != -1:

        torch.nn.init.xavier_normal_(m.weight)

    elif classname.find('BatchNorm2d') != -1:
        torch.nn.init.normal_(m.weight, 1.0, 0.02)
        torch.nn.init.constant_(m.bias, 0.0)


def f(x):
    return x ** 3 + x ** 2 + 1


def sign(x, y):
    labels = torch.zeros(y.shape, dtype=torch.long)
    labels[(y - f(x)) > 0] = 1
    return labels


class DDPTestModel(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.mlp = torch.nn.Sequential(
            torch.nn.Linear(1, 10),
            torch.nn.BatchNorm1d(10),
            torch.nn.ReLU(),
            torch.nn.Linear(10, 2),
        )

    def forward(self, x):
        return self.mlp(x)


class DDPTestDataset(torch.utils.data.Dataset):
    def __init__(self, n):
        x = torch.randint(0, 1000, (n,))
        y = f(x) + torch.normal(0, 1, (n,))
        labels = sign(x, y)
        self.x = x.float()
        self.y = labels

    def __len__(self):
        return len(self.x)

    def __getitem__(self, item):
        return self.x[item], self.y[item]


if __name__ == "__main__":
    train_dataset = DDPTestDataset(n=9000)
    test_dataset = DDPTestDataset(n=1000)

    rank = int(os.environ['LOCAL_RANK'])
    world_size = int(os.environ['WORLD_SIZE'])
    torch.distributed.init_process_group("gloo", rank=rank, world_size=world_size)
    model = DDPTestModel()
    model.to(rank)
    init_weights(model)
    ddp_model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank])
    loss_func = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(ddp_model.parameters(), lr=0.01)
    ddp_model.train()
    epochs = 20
    for e in range(epochs):

        loss_arr = []

        for x, y in torch.utils.data.DataLoader(train_dataset, batch_size=16, shuffle=True):
            x, y = x.to(rank).unsqueeze(1), y.to(rank)
            dt = ddp_model(x)
            loss = loss_func(dt, y)
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
            loss_arr.append(loss.detach())
        print(rank, e, torch.tensor(loss_arr).mean())
    # torch.distributed.destroy_process_group()
    # for m in ddp_model.parameters():
    #    print(m.sum())
    acc = []
    ddp_model.eval()
    for x, y in torch.utils.data.DataLoader(test_dataset, batch_size=16, shuffle=False):
        x, y = x.to(rank).unsqueeze(1), y.to(rank)
        dt = ddp_model(x)
        dt = torch.argmax(F.softmax(dt, dim=1), dim=1)
        acc.append((dt == y).sum())
    print(torch.tensor(acc).sum() / test_dataset.__len__())
    torch.distributed.destroy_process_group()
