import os
import sys

import torch
import horovod.torch as hvd
import torch.nn.functional as F
from torchvision import transforms


from torch import optim
from tqdm import tqdm

from model import resnet34
from my_dataset import MyDataSet
from utils import read_split_data



# os.environ['CUDA_VISIBLE_DEVICES'] = '2,3'

# Initialize Horovod 初始化horovod
hvd.init()

# Pin GPU to be used to process local rank (one GPU per process) 分配到每个gpu上
torch.cuda.set_device(hvd.local_rank())

# Define dataset... 定义dataset

train_info, val_info, num_classes = read_split_data("/root/mult_gpu/data/flower_data/flower_photos")
train_images_path, train_images_label = train_info

data_transform = {
    "train": transforms.Compose([transforms.RandomResizedCrop(224),
                                 transforms.RandomHorizontalFlip(),
                                 transforms.ToTensor(),
                                 transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),
    "val": transforms.Compose([transforms.Resize(256),
                               transforms.CenterCrop(224),
                               transforms.ToTensor(),
                               transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])}

train_dataset = MyDataSet(images_path=train_images_path,
                               images_class=train_images_label,
                               transform=data_transform["train"])

# Partition dataset among workers using DistributedSampler  对dataset的采样器进行调整，使用torch.utils.data.distributed.DistributedSampler
train_sampler = torch.utils.data.distributed.DistributedSampler(
    train_dataset, num_replicas=hvd.size(), rank=hvd.rank())

train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=50, sampler=train_sampler)

# Build model...
model = resnet34()
pg = [p for p in model.parameters() if p.requires_grad]


model.cuda()

optimizer = optim.SGD(pg, lr=0.03, momentum=0.9, weight_decay=0.005)

# Add Horovod Distributed Optimizer  使用Horovod的分布式优化器函数包裹在原先optimizer上
optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())

# Broadcast parameters from rank 0 to all other processes.  参数广播到每个gpu上
hvd.broadcast_parameters(model.state_dict(), root_rank=0)

train_loader = tqdm(train_loader, file=sys.stdout)

for epoch in range(100):
   for batch_idx, (data, target) in enumerate(train_loader):
       data = data.cuda()
       target = target.cuda()


       optimizer.zero_grad()
       output = model(data)
       loss = F.cross_entropy(output, target)
       loss.backward()
       optimizer.step()
       # if batch_idx % args.log_interval == 0:
       #     print('Train Epoch: {} [{}/{}]\tLoss: {}'.format(
       #         epoch, batch_idx * len(data), len(train_sampler), loss.item()))
