import math

import torch.utils.data
from kafka import TopicPartition, KafkaConsumer


def csv_deserializer(record):
    values = record.decode("utf-8").split(",")
    return torch.tensor(
        [float(v) for v in values[:-1]], dtype=torch.float32
    ), torch.tensor([float(values[-1])], dtype=torch.float32)


def csv_deserializer2(record):
    values = record.decode("utf-8").split(",")
    return torch.tensor(
        [float(v) for v in values[:-1]], dtype=torch.float32
    ), torch.tensor(int(float(values[-1])), dtype=torch.long)


def worker_init_fn1(worker_id):
    worker_init_fn(worker_id=worker_id, des=csv_deserializer)


def worker_init_fn2(worker_id):
    worker_init_fn(worker_id=worker_id, des=csv_deserializer2)


def worker_init_fn(worker_id, des):
    worker_info = torch.utils.data.get_worker_info()
    dataset = worker_info.dataset
    worker_name = f"kafka_dataset_{worker_info.id}"
    per_proc = math.ceil(dataset.partitions / dataset.world_size)
    this_proc_start = dataset.rank * per_proc
    this_proc_end = min(dataset.partitions, (dataset.rank + 1) * per_proc)
    this_proc_partitions = this_proc_end - this_proc_start

    per_worker = math.ceil(this_proc_partitions / worker_info.num_workers)
    tps = [
        TopicPartition(dataset.topic, p + this_proc_start)
        for p in range(
            worker_id * per_worker,
            min(this_proc_partitions, (worker_id + 1) * per_worker),
        )
    ]

    consumer = KafkaConsumer(
        bootstrap_servers=dataset.bootstrap_servers,
        auto_offset_reset="earliest",
        max_partition_fetch_bytes=2048576,
        fetch_max_bytes=52428800,
        enable_auto_commit=False,
        client_id=worker_name,
        value_deserializer=des,
        fetch_max_wait_ms=800,
        request_timeout_ms=3000,
        fetch_min_bytes=524288,
        max_poll_records=2048,
    )
    consumer.assign(tps)

    dataset.consumer = consumer
    dataset.worker_id = worker_id

    print(
        "Rank %d, worker %d initialized for tps: %s"
        % (dataset.rank, worker_id, str(tps))
    )


class KafkaDataSet(torch.utils.data.IterableDataset):
    def __init__(self, topic, bootstrap_servers, partitions, world_size, rank):
        super().__init__()
        self.topic = topic
        self.bootstrap_servers = bootstrap_servers
        self.partitions = partitions
        self.world_size = world_size
        self.rank = rank
        self.consumer = None
        self.worker_id = None

    def __iter__(self):
        while True:
            ret = self.consumer.poll(timeout_ms=2000)
            if len(ret) == 0:
                break
            for _, records in ret.items():
                for record in records:
                    yield record.value
