# ddp_demo.py ss
import torch
import torch.nn as nn
from torch.optim import SGD
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
import os
import argparse

from torchvision import models

from ai import run
from ai import store_model
from ai import store_data

from torch import distributed, optim

os.environ['OMP_NUM_THREADS'] = "1"

parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', type=str, default=[1, 2, 3])
parser.add_argument('--batchSize', type=int, default=64)
parser.add_argument('--epochs', type=int, default=15)
parser.add_argument('--dataset-size', type=int, default=1024)
parser.add_argument('--num-classes', type=int, default=10)
config = parser.parse_args()


def get_use_device(rank):
    return config.gpu_id[rank]


torch.distributed.init_process_group(backend="nccl")

local_rank = torch.distributed.get_rank()
torch.cuda.set_device(local_rank + 1)
device = torch.device("cuda", get_use_device(local_rank))

# --------------------------------------------------------- start your code
feature_extract = True  # 是否使固定预训练参数
model = store_model.store_model.load_model(store_model.store_model_name.FLOWER, feature_extract=feature_extract)
input_size = 64  # 输入大小根据自己配置来
if local_rank == 0:
    print("Params to learn:")
if feature_extract:
    params_to_update = []
    for name, param in model.named_parameters():
        if param.requires_grad:
            params_to_update.append(param)
            if local_rank == 0:
                if local_rank == 0:
                    print("\t", name)
else:
    params_to_update = model.parameters()
    for name, param in model.named_parameters():
        if param.requires_grad:
            if local_rank == 0:
                print("\t", name)
# 优化器设置
optimizer = optim.Adam(params_to_update, lr=1e-2)  # 要训练啥参数，你来定
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)  # 学习率每7个epoch衰减成原来的1/10
criterion = nn.CrossEntropyLoss()

train_dataset, valid_dataset = store_data.store_data.DataStore(
    store_data.store_data_name.FLOWER_CLASSIFY).get_dataset()  # type: ignore
model_name = store_model.store_model_name.FLOWER

# -------------------------------------------------------- end your code


sampler1 = DistributedSampler(train_dataset)  # 这个sampler会自动分配数据到各个gpu上
loader1 = DataLoader(train_dataset, batch_size=config.batchSize, sampler=sampler1)
sampler2 = DistributedSampler(valid_dataset)  # 这个sampler会自动分配数据到各个gpu上
loader2 = DataLoader(valid_dataset, batch_size=config.batchSize, sampler=sampler2)
loss_func = nn.CrossEntropyLoss()

if torch.cuda.is_available():
    model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)

runner_o = run.runner.RunnerGxl(model, optimizer, loss_func, loader1, loader2, multi=True, local_rank=local_rank,
                                scheduler=scheduler, model_name=model_name)
runner_o.run(config.epochs)
