############################################################################
# 2019 - present Contributed by Apulis Technology (Shenzhen) Co. LTD
#
# This program and the accompanying materials are made available under the
# terms of the MIT License, which is available at
# https://www.opensource.org/licenses/MIT
#
# See the NOTICE file distributed with this work for additional
# information regarding copyright ownership.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# SPDX-License-Identifier: MIT
############################################################################

import argparse
import os

import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
# import nni
import torch.npu
import torch.optim as optim
# from nni.utils import merge_parameter
from torchvision import datasets, transforms


class Net(nn.Module):
    def __init__(self, hidden_size):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 20, 5, 1)
        self.conv2 = nn.Conv2d(20, 50, 5, 1)
        self.fc1 = nn.Linear(4 * 4 * 50, hidden_size)
        self.fc2 = nn.Linear(hidden_size, 10)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = F.max_pool2d(x, 2, 2)
        x = F.relu(self.conv2(x))
        x = F.max_pool2d(x, 2, 2)
        x = x.view(-1, 4 * 4 * 50)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)


def main(args):
    args["gpu"] = args["process_device_map"][0]
    print()
    torch.manual_seed(args['seed'])
    args["rank"] = 0
    if args["gpu"] is not None:
        print("[gpu id:", args["gpu"], "]", "Use GPU: {} for training".format(args["gpu"]))
    data_dir = args['data_dir']
    print("start")
    dist.init_process_group(backend='hccl',  # init_method=args.dist_url,
                            world_size=args["world_size"], rank=args["rank"])
    print("end")
    train_loader = torch.utils.data.DataLoader(
        datasets.MNIST(data_dir, train=True, download=False,
                       transform=transforms.Compose([
                           transforms.ToTensor(),
                           transforms.Normalize((0.1307,), (0.3081,))
                       ])), pin_memory=False, drop_last=True, num_workers=2,
        batch_size=args['batch_size'], shuffle=True)
    hidden_size = args['hidden_size']
    loc = 'npu:{}'.format(args["npu"])
    torch.npu.set_device(loc)
    model = Net(hidden_size=hidden_size)
    model = model.to(loc)
    model = torch.nn.DataParallel(model)
    cren = nn.CrossEntropyLoss().to(loc)
    optimizer = optim.SGD(model.parameters(), lr=args['lr'],
                          momentum=args['momentum'])
    print("ok")
    for epoch in range(1, args['epochs'] + 1):
        model.train()
        loc = 'npu:{}'.format(args["gpu"])
        for batch_idx, (data, target) in enumerate(train_loader):
            if (args['batch_num'] is not None) and batch_idx >= args['batch_num']:
                break
            target = target.to(torch.int32)
            data, target = data.to(loc, non_blocking=False), target.to(loc, non_blocking=False)
            optimizer.zero_grad()
            output = model(data)
            loss = cren(output, target)
            loss.backward()
            optimizer.step()
            if batch_idx % args['log_interval'] == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                    epoch, batch_idx * len(data), len(train_loader.dataset),
                           100. * batch_idx / len(train_loader), loss.item()))
        print('Pipe send intermediate result done.')


def device_id_to_process_device_map(device_list):
    devices = device_list.split(",")
    devices = [int(x) for x in devices]
    devices.sort()

    process_device_map = dict()
    for process_id, device_id in enumerate(devices):
        process_device_map[process_id] = device_id
    print(process_device_map)

    return process_device_map


def get_params():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument("--data_dir", type=str,
                        default='./', help="data directory")
    parser.add_argument("--process_device_map", type=str,
                        default='./', help="data directory")
    parser.add_argument('--batch_size', type=int, default=64, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument("--batch_num", type=int, default=None)
    parser.add_argument("--hidden_size", type=int, default=512, metavar='N',
                        help='hidden layer size (default: 512)')
    parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--epochs', type=int, default=5, metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--no_npu', action='store_true', default=False,
                        help='disables NPU training')
    parser.add_argument('--rank', type=int, default=0,
                        help='disables NPU training')
    parser.add_argument('--world_size', type=int, default=1,
                        help='disables NPU training')
    parser.add_argument('--log_interval', type=int, default=100, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--npu',
                        default=0,
                        type=int,
                        help='npu id to use.')
    parser.add_argument('--device_list',
                        default="0,1",
                        type=str,
                        help='npu id to use.')
    args, _ = parser.parse_known_args()
    args.process_device_map = device_id_to_process_device_map(args.device_list)
    return args


if __name__ == '__main__':
    os.environ['MASTER_ADDR'] = "127.0.0.1"
    os.environ['MASTER_PORT'] = '29688'

    try:
        main(args=vars(get_params()))

    except Exception as exception:
        print(exception)
        raise
