import argparse
import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as Ddp
from torch.utils.data.dataloader import DataLoader

from dataset import KafkaDataSet, worker_init_fn1
from models import LogisticRegression
from train import Trainer
from util import setup_env, cleanup_env


def arg_parser():
    parser = argparse.ArgumentParser(description="torch arg parser")
    parser.add_argument("-b", "--bootstrap_servers", type=str)
    parser.add_argument("-t", "--topic", type=str)
    parser.add_argument("-s", "--batch_size", type=int)
    parser.add_argument("-w", "--window_size", type=int)
    parser.add_argument("-p", "--parallesim", type=int)
    parser.add_argument("-k", "--ck_interval", type=int)
    return parser


def run():
    args = arg_parser().parse_args()
    bootstrap_servers = args.bootstrap_servers
    topic = args.topic
    batch_size = args.batch_size
    window_size = args.window_size
    parallesim = args.parallesim
    ck_interval = args.ck_interval
    num_workers = 1
    num_features = 18

    setup_env()

    world_size = dist.get_world_size()
    rank = dist.get_rank()

    # 连接数据通道
    kafka_dataset = KafkaDataSet(
        topic=topic,
        bootstrap_servers=bootstrap_servers,
        partitions=parallesim,
        world_size=world_size,
        rank=rank,
    )
    loader = DataLoader(
        kafka_dataset,
        batch_size=batch_size,
        num_workers=num_workers,
        worker_init_fn=worker_init_fn1,
        pin_memory=True,
        prefetch_factor=2,
        drop_last=True,
    )
    # 定义模型
    net = LogisticRegression(num_features, 1)
    ddp_net = Ddp(net)
    loss_fn = torch.nn.BCELoss()
    optimizer = torch.optim.SGD(ddp_net.parameters(), lr=1e-3)

    # 模型训练
    trainer = Trainer(
        ddp_net,
        loss_fn,
        optimizer,
        loader,
        window_type="count",
        window_size=window_size,
        window_threshold=batch_size,
        rank=rank,
        redis_addr="redis-ip",
        redis_port=6379,
        log_interval=10,
        ck_interval=ck_interval,
        ck_key="model_ck",
        report_speed_interval=1,
        speed_key="ml_sp",
    )
    trainer.train()

    cleanup_env()


if __name__ == "__main__":
    run()
