import os
from urllib import parse
import torch
from torch.distributed.distributed_c10d import get_rank, get_world_size 
import torch.nn as nn
import torch.nn.functional as F 
import torchvision

import argparse
import torch.distributed as dist
from torch.distributed.optim import DistributedOptimizer
import torch.distributed.autograd as dist_autograd
import torch.multiprocessing as mp
import torch.distributed.rpc as rpc
from torch.distributed.rpc import rpc_sync
from torch.utils.tensorboard import SummaryWriter
import dist_utils

parellel_mode = False

if parellel_mode:
    writer = SummaryWriter(log_dir='.')
else:
    writer = SummaryWriter(log_dir='./local_trainer')
    
class LocalNet(nn.Module):
    def __init__(self, in_channels=1, num_classes=10):
        super(LocalNet, self).__init__()

        self.conv1 = nn.Conv2d(
            in_channels=in_channels, out_channels=6, kernel_size=5, stride=1, padding=2)
        self.conv2 = nn.Conv2d(
            in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=0)

        self.fc1 = nn.Linear(16*5*5, 120)
        self.fc2 = nn.Linear(120, num_classes)

    def forward(self, x):
        """
        Args:
            x: (b, 1, 28, 28)
        """
        out = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
        out = F.max_pool2d(F.relu(self.conv2(out)), (2, 2))
        # flatten the feature map
        out = out.flatten(1)
        # fc layer
        out = F.relu(self.fc1(out))
        out = self.fc2(out)

        return out

class SubNetConv(nn.Module):
    def __init__(self, in_channels):
        print(f'SubNetConv created, with in_channels[{in_channels}]')
        super().__init__()
        self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=6, kernel_size=5, stride=1, padding=2)
        self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=0)

    def forward(self, x_rref):
        """
        Write your code here!
        """
        # x = x_rref.to_here()
        out = x_rref.to_here()
        out = F.max_pool2d(F.relu(self.conv1(out)), (2, 2))
        out = F.max_pool2d(F.relu(self.conv2(out)), (2, 2))
        # flatten the feature map
        out = out.flatten(1)
        return out

    def parameter_rrefs(self):
        # return rpc.RRef([p for p in self.parameters()])
        return [rpc.RRef(p) for p in self.parameters()]


class SubNetFC(nn.Module):
    def __init__(self, num_classes):
        super().__init__()
        self.fc1 = nn.Linear(16*5*5, 120)
        self.fc2 = nn.Linear(120, num_classes)

    def forward(self, x_rref):
        """
        Write your code here!
        """
        # x = x_rref.to_here()
        x_rref = x_rref.to_here()
        # print(f'fc-x{x_rref.shape}')
        x_rref = self.fc1(x_rref)
        # print(f'fc1-x{x_rref.shape}')
        x_rref = self.fc2(x_rref)
        # print(f'fc2-x{x_rref.shape}')
        return x_rref

    def parameter_rrefs(self):
        # return rpc.RRef([p for p in self.parameters()])
        return [rpc.RRef(p) for p in self.parameters()]


class ParallelNet(nn.Module):
    def __init__(self, in_channels=1, num_classes=10):
        super().__init__()
        # 分别远程声明SubNetConv和SubNetFC
        """
        Write your code here!
        """
        self.m1_rref = rpc.remote(
            "worker1",
            SubNetConv,
            args=(in_channels,)
        )
        
        self.m2_rref = rpc.remote(
            "worker2",
            SubNetFC,
            args=(num_classes,)
        )

    def forward(self, x):
        """
        Write your code here!
        """
        x_rref = rpc.RRef(x)
        
        y = self.m1_rref.remote().forward(x_rref)
        # print(f'y--{y.shape}')
        
        z = self.m2_rref.rpc_sync().forward(y)
        
        return z

    def parameter_rrefs(self):
        """
        Write your code here!
        """
        # return self.m1_rref.remote().parameter_rrefs().extend(
        #     self.m2_rref.remote().parameter_rrefs() 
        # )
        remote_params = []
        remote_params.extend(self.m1_rref.remote().parameter_rrefs().to_here())
        remote_params.extend(self.m2_rref.remote().parameter_rrefs().to_here())
        return remote_params
        # return [self.m1_rref, self.m2_rref]


def train(model, dataloader, loss_fn, optimizer, num_epochs=3):
    print("Device {} starts training ...".format(dist_utils.get_local_rank()))
    loss_total = 0.
    # model.train()
    dist_utils.init_parameters(model)
    
    j = 0
    for epoch in range(num_epochs):
        for i, batch_data in enumerate(dataloader):
            # print(f'{epoch}-{i}/{len(dataloader)}')
            """
            Write your code here!
            """
            # optimizer.zero_grad()
            inputs, labels = batch_data
            # inputs, labels = inputs.cuda(), labels.cuda()

            outputs = model(inputs)
            
            with dist_autograd.context() as context_id:
                # print(context_id, [loss_fn(outputs, labels)])
                outputs = model(inputs)
                dist_autograd.backward(context_id, [loss_fn(outputs, labels)])
                optimizer.step(context_id)

            loss_total += loss_fn(outputs, labels).item()
            writer.add_scalar(f'device-{dist_utils.get_local_rank()}-loss', loss_fn(outputs, labels).item(), j)
            j += 1
        print('Device: %d epoch: %d,loss: %.3f' % (dist_utils.get_local_rank(), epoch + 1, loss_total/len(dataloader)))
        loss_total = 0.0
    
    print("Training Finished!")


def test(model: nn.Module, test_loader):
    model.eval()
    size = len(test_loader.dataset)
    correct = 0
    print("testing ...")
    with torch.no_grad():
        for inputs, labels in test_loader:
            output = model(inputs)
            pred = output.data.max(1, keepdim=True)[1]
            correct += pred.eq(labels.data.view_as(pred)).sum().item()
    print('\nTest set: Accuracy: {}/{} ({:.2f}%)\n'.format(
        correct, size,
        100 * correct / size))


def main():
    if not parellel_mode:
        def train(model, dataloader, optimizer, loss_fn, num_epochs=1):
            print("Start training ...")
            loss_total = 0.
            model.train()
            j = 0
            for epoch in range(num_epochs):
                for i, batch_data in enumerate(dataloader):
                    # with dist_autograd.context() as context_id:
                    inputs, labels = batch_data

                    outputs = model(inputs)
                    loss = loss_fn(outputs, labels)

                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()
                    loss_total += loss.item()
                    writer.add_scalar(tag='device-0-loss', scalar_value=loss.item(), global_step=j)
                    j += 1

                mean_loss = loss_total/len(dataloader)
                loss_total = 0.

            print("Training Finished!")

        def test(model: nn.Module, test_loader):
            # test
            model.eval()
            size = len(test_loader.dataset)
            correct = 0
            print("testing ...")
            with torch.no_grad():
                for inputs, labels in test_loader:
                    output = model(inputs)
                    pred = output.data.max(1, keepdim=True)[1]
                    correct += pred.eq(labels.data.view_as(pred)).sum().item()
            print('\nTest set: Accuracy: {}/{} ({:.2f}%)\n'.format(
                correct, size,
                100 * correct / size))
        
        model = LocalNet(in_channels=1, num_classes=10)
        DATA_PATH = "/data"
        transform = torchvision.transforms.Compose(
            [torchvision.transforms.ToTensor()]
        )

        train_set = torchvision.datasets.MNIST(
            DATA_PATH, train=True, download=True, transform=transform)
        test_set = torchvision.datasets.MNIST(
            DATA_PATH, train=False, download=True, transform=transform)

        train_loader = torch.utils.data.DataLoader(
            train_set, batch_size=32, shuffle=True)
        test_loader = torch.utils.data.DataLoader(
            test_set, batch_size=32, shuffle=False)

        loss_fn = nn.CrossEntropyLoss()
    
        optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
        train(model, train_loader, optimizer, loss_fn)
        test(model, test_loader)
        return 
    # ------------------------------------------
    # ------------------------------------------

    
    args = parse_args()
    dist_utils.dist_init(args.n_devices, args.rank, args.master_addr, args.master_port)
    DATA_PATH = "/data"
    if args.rank == 0:
        
        rpc.init_rpc("worker0", rank=args.rank, world_size=args.n_devices)
        # construct the model
        model = ParallelNet(in_channels=1, num_classes=10)
        # construct the dataset
        transform = torchvision.transforms.Compose(
            [torchvision.transforms.ToTensor()]
        )
        train_set = torchvision.datasets.MNIST(DATA_PATH, train=True, download=True, transform=transform)
        test_set = torchvision.datasets.MNIST(DATA_PATH, train=False, download=True, transform=transform)

        train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True)
        test_loader = torch.utils.data.DataLoader(test_set, batch_size=32, shuffle=False)

        # construct the loss_fn and optimizer
        loss_fn = nn.CrossEntropyLoss()
        # optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
        dist_optimizer = DistributedOptimizer(torch.optim.SGD, model.parameter_rrefs(), lr=0.01)

        train(model, train_loader, loss_fn, dist_optimizer)
        test(model, test_loader)
    
    elif args.rank == 1:
        rpc.init_rpc("worker1", rank=args.rank, world_size=args.n_devices)
        print("Training on the worker1...")

    elif args.rank == 2:
        rpc.init_rpc("worker2", rank=args.rank, world_size=args.n_devices)
        print("Training on the worker2...")

    rpc.shutdown()


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--n_devices", default=1, type=int, help="The distributd world size.")
    parser.add_argument("--rank", default=0, type=int, help="The local rank of device.")
    parser.add_argument('--master_addr', default='localhost', type=str,help='ip of rank 0')
    parser.add_argument('--master_port', default='12355', type=str,help='ip of rank 0')
    args = parser.parse_args()
    return args

if __name__ == "__main__":
    main()