import csv
import os
from collections.abc import Iterable
import io
import numpy as np
import torch

from fsspec import AbstractFileSystem
from zkl_aiutils_datasets import ChunkedBytesDatasetWriter, DelegatingDatasetWriter, MappedDatasetWriter
from zkl_pyutils_fsspec import FsLike, resolve_fs


def _encode_tensor(sample: list[torch.Tensor]) -> bytes:
    numpy_arrays = [s.cpu().numpy() for s in sample]
    combined_array = np.array(numpy_arrays)
    buffer = io.BytesIO()
    np.save(buffer, combined_array)
    return buffer.getvalue()


class StrEmbeddingsWriter(DelegatingDatasetWriter[torch.Tensor]):
    def __init__(self,
        fs: FsLike, *,
        chunk_size: int = 500 * (2 ** 20),  # 500MB
    ):
        fs = resolve_fs(fs)
        fs.mkdir("", exists_ok=True)
        writer = ChunkedBytesDatasetWriter(fs, chunk_size=chunk_size, loader=None)
        writer = MappedDatasetWriter(writer, _encode_tensor)
        super().__init__(writer)
        write_dataset_script(fs)
        self._fs = fs

    def write_columns_name(self, columns_name: Iterable[str]):
        write_columns_name(self._fs, columns_name)


dataset_script_file_path = os.path.join(os.path.dirname(__file__), "str_embeddings.py")


def write_dataset_script(fs: AbstractFileSystem):
    with open(dataset_script_file_path, "rt") as fp:
        dataset_script = fp.read()
    with fs.open("dataset.py", "wt") as fp:
        fp.write(dataset_script)


def write_columns_name(fs: AbstractFileSystem, columns_name: Iterable[str]):
    with fs.open("columns.csv", "wt") as fp:
        writer = csv.writer(fp)
        for column_name in columns_name:
            writer.writerow([column_name])
