import re
from collections import deque
from collections.abc import Iterator

import pyzstd
from zkl_pyutils_fsspec import FsLike, resolve_fs

chunk_file_name_pattern = re.compile(r'^chunk_(\d+)-(\d+).zst$')


def parse_chunk_file_name(file_name: str) -> tuple[int, int] | None:
    match = chunk_file_name_pattern.match(file_name)
    if match is None:
        return None
    head = int(match.group(1))
    tail = int(match.group(2)) + 1
    return head, tail


def dump_chunk_file_name(head: int, tail: int) -> str:
    return f'chunk_{head}-{tail - 1}.zst'


def iter_chunks(fs: FsLike) -> Iterator[tuple[int, int]]:
    for file_name in fs.ls("", detail=False):
        if match := parse_chunk_file_name(file_name):
            chunk_head_i, chunk_tail_i = match
            yield chunk_head_i, chunk_tail_i


class HomoBytesSequenceWriter:
    def __init__(self,
        fs: FsLike, *,
        item_size: int,
        chunk_size: int = 8 * 1024 * 1024,  # 8MB
    ):
        self._fs = resolve_fs(fs)
        self._item_size = item_size
        self._chunk_size = chunk_size

        # buffer
        self._last_chunk_buffer: bytearray | None = None
        self._last_chunk_item_head: int = 0
        self._last_chunk_item_tail: int = 0

    def _load_last_chunk(self):
        # Find last chunk file
        last_chunk_file_name = None
        last_chunk_item_head = None
        last_chunk_item_tail = None
        for chunk_head_i, chunk_tail_i in iter_chunks(self._fs):
            chunk_file_name = dump_chunk_file_name(chunk_head_i, chunk_tail_i)
            if last_chunk_item_tail is None or chunk_tail_i > last_chunk_item_tail:
                last_chunk_file_name = chunk_file_name
                last_chunk_item_head = chunk_head_i
                last_chunk_item_tail = chunk_tail_i

        # If no chunks found, initialize an empty buffer
        if last_chunk_file_name is None:
            self._last_chunk_buffer = bytearray()
            self._last_chunk_item_head = 0
            self._last_chunk_item_tail = 0
            return

        # If the last chunk is already too large, start a new one
        last_chunk_file_size = self._fs.stat(last_chunk_file_name).get('size', 0)
        if last_chunk_file_size >= self._chunk_size:
            self._last_chunk_buffer = bytearray()
            self._last_chunk_item_head = last_chunk_item_tail
            self._last_chunk_item_tail = last_chunk_item_tail
            return

        # Read and decompress the last chunk
        with self._fs.open(last_chunk_file_name, 'rb') as fp:
            last_chunk_compressed = fp.read()

        # If the last chunk is already too large, start a new one
        if len(last_chunk_compressed) >= self._chunk_size:
            self._last_chunk_buffer = bytearray()
            self._last_chunk_item_head = last_chunk_item_tail
            self._last_chunk_item_tail = last_chunk_item_tail
            return

        # Verify chunk size matches the indices from filename
        last_chunk_data = pyzstd.decompress(last_chunk_compressed)
        expected_items_n = last_chunk_item_tail - last_chunk_item_head
        expected_bytes_n = expected_items_n * self._item_size
        actual_bytes_n = len(last_chunk_data)
        if actual_bytes_n != expected_bytes_n:
            raise ValueError(
                f'Unexpected file size of chunk {last_chunk_file_name}: '
                f'expected {expected_items_n} * {self._item_size} bytes, '
                f'got {actual_bytes_n} bytes!')

        # Store the decompressed data in bytearray
        self._last_chunk_buffer = bytearray(last_chunk_data)
        self._last_chunk_item_head = last_chunk_item_head
        self._last_chunk_item_tail = last_chunk_item_tail

    def write(self, item: bytes):
        if len(item) != self._item_size:
            raise ValueError(f'Expected {self._item_size} bytes, got {len(item)}')

        # Load last chunk on first write
        if self._last_chunk_buffer is None:
            self._load_last_chunk()

        # Try compressing last_chunk_data+item
        last_chunk_data = bytes(self._last_chunk_buffer)
        next_chunk_compressed = pyzstd.compress(last_chunk_data + item)
        if len(next_chunk_compressed) < self._chunk_size:
            # Write the compressed data to disk immediately
            next_chunk_file_name = dump_chunk_file_name(self._last_chunk_item_head, self._last_chunk_item_tail + 1)
            with self._fs.open(next_chunk_file_name, 'wb') as fp:
                fp.write(next_chunk_compressed)
            # Remove old chunk file
            if self._last_chunk_item_tail > 0:
                last_chunk_file_name = dump_chunk_file_name(self._last_chunk_item_head, self._last_chunk_item_tail)
                self._fs.rm(last_chunk_file_name)
            # Add the item to buffer
            self._last_chunk_buffer.extend(item)
            self._last_chunk_item_tail += 1
            return

        # Single item exceeds threshold
        if not last_chunk_data:
            raise ValueError(
                f"Single item (size: {len(item)} bytes) compresses to {len(next_chunk_compressed)} bytes, "
                f"which exceeds chunk_size ({self._chunk_size} bytes)")

        # Write a new chunk containing only the item
        new_chunk_file_name = dump_chunk_file_name(self._last_chunk_item_tail, self._last_chunk_item_tail + 1)
        with self._fs.open(new_chunk_file_name, 'wb') as fp:
            fp.write(pyzstd.compress(item))

        # Add the item to buffer
        self._last_chunk_buffer = bytearray(item)
        self._last_chunk_item_head = self._last_chunk_item_tail
        self._last_chunk_item_tail = self._last_chunk_item_head + 1


class HomoBytesSequenceReader:
    def __init__(self,
        fs: FsLike, *,
        item_size: int,
    ):
        self._fs = resolve_fs(fs)
        self._item_size = item_size

        # buffer
        self._items_buffer: bytearray | None = None
        self._items_n = 0

    def _load_chunks(self):
        if self._items_buffer is None:
            self._items_buffer = bytearray()
            self._items_n = 0

        chunks = deque(sorted(iter_chunks(self._fs), key=lambda x: x[0]))
        while chunks:
            chunk_item_head, chunk_item_tail = chunks.popleft()
            if chunk_item_head > self._items_n:
                chunks.append((chunk_item_head, chunk_item_tail))
                continue
            if chunk_item_tail <= self._items_n:
                continue
            chunk_file_name = dump_chunk_file_name(chunk_item_head, chunk_item_tail)
            with pyzstd.open(self._fs.open(chunk_file_name, 'rb'), 'rb') as fp:
                skip_items_n = self._items_n - chunk_item_head
                skip_bytes_n = skip_items_n * self._item_size
                fp.seek(skip_bytes_n)
                append_bytes = fp.read()

            append_items_n = chunk_item_tail - self._items_n
            append_bytes_n = append_items_n * self._item_size
            if len(append_bytes) != append_bytes_n:
                raise ValueError(
                    f'Unexpected file size of chunk {chunk_file_name}: '
                    f'expected {chunk_item_tail - chunk_item_head} * {self._item_size} bytes, '
                    f'got {skip_bytes_n + append_bytes_n} bytes!')

            self._items_buffer.extend(append_bytes)
            self._items_n += append_items_n

    def refresh(self, clean: bool):
        if clean:
            self._items_buffer = None
            self._items_n = 0
        self._load_chunks()

    def __len__(self) -> int:
        if self._items_buffer is None:
            self._load_chunks()
        return self._items_n

    def __iter__(self) -> Iterator[bytes]:
        if self._items_buffer is None:
            self._load_chunks()
        for head in range(0, len(self._items_buffer), self._item_size):
            tail = head + self._item_size
            yield bytes(self._items_buffer[head:tail])

    def __getitem__(self, item_i: int) -> bytes:
        if item_i < 0:
            item_i = len(self) + item_i
        if item_i < 0 or item_i >= len(self):
            raise IndexError(f"Expected {-len(self)}<=item_i<{len(self)}, got {item_i=}")

        if self._items_buffer is None:
            self._load_chunks()

        # read item from buffer
        item_byte_head = item_i * self._item_size
        item_byte_tail = item_byte_head + self._item_size
        return bytes(self._items_buffer[item_byte_head:item_byte_tail])
