import functools
import io
import json
import operator
import os
import threading
from contextlib import nullcontext
from mmap import mmap, ACCESS_WRITE
from multiprocessing.managers import Value
from os import PathLike
from pathlib import Path
from typing import Callable, AnyStr, Literal, List, TypedDict

from rich.progress import Progress

from storage.blobstore.compression import compress_blob, decompress_blob
from storage.blobstore.fileblob import CompressMethod
from storage.utils import mmap_view, common_progress

CompressFunc = Callable[[bytes], bytes]
DecompressFunc = Callable[[bytes], bytes]

SequentialReadWriteMode = Literal['rb', 'wb']


class CompressFileMeta(TypedDict):
    # 原始文件名
    filename: str
    # 压缩前分块大小
    block_size: int
    # 压缩方法
    compression: CompressMethod
    # 列表元素数量 n 对应下面的分块文件，n 从 0 开始
    #   文件名.{n}.part
    #   文件名.{n}.meta
    # 列表元素为分块的对应的压缩前大小
    split: List[int]


class CompressFileRangeMeta(TypedDict):
    # 块清单
    blocks: List['CompressFileBlockMeta']


class CompressFileBlockMeta(TypedDict):
    offset: int
    length: int


def _compress_file_range(src_view: memoryview, range_path: AnyStr | PathLike, range_meta_path: AnyStr | PathLike,
                         start: int, end: int,
                         compress: CompressMethod, block_size: int = 128 * 1024, progress: Progress = None,
                         discard_output=False):
    """
    压缩指定范围的文件

    本函数负责压缩文件的特定范围，并将压缩数据写入目标文件。它根据提供的参数，将源文件分块，
    并将压缩数据写入目标文件，同时记录压缩前和压缩后的大小。

    :param src_view: 源内存映射文件对象，用于读取源文件数据
    :param range_path: 目标文件的路径，可以是字符串或路径对象
    :param range_meta_path: 目标文件范围的元数据的路径，可以是字符串或路径对象
    :param start: 源文件的起始位置
    :param end: 源文件的结束位置
    :param compress: 压缩方法，一个枚举值，指示使用的压缩算法
    :param block_size: 分块的大小，默认为128KB
    :param progress: 进度条对象，用于显示压缩进度，默认为None
    :param discard_output: 是否丢弃输出的标志，如果为True，则不写入压缩数据到目标文件
    :returns: 无返回值。
    """
    range_path = Path(range_path)
    meta_path = Path(range_meta_path)
    range_meta: CompressFileRangeMeta = dict(blocks=[])

    task = None
    if progress:
        task = progress.add_task(range_path.name, total=end - start)

    final_length = 0
    with (range_path.open('wb') if not discard_output else nullcontext()) as dst:
        offset = 0
        for src_from in range(start, end, block_size):
            src_to = min(src_from + block_size, end)
            with src_view[src_from:src_to] as data:
                compressed = compress_blob(compress, data)
                uncompressed_length = len(data)
                final_length += len(compressed)
            dst.write(compressed)
            range_meta['blocks'].append(dict(offset=offset, length=len(compressed)))
            offset += len(compressed)

            if progress is not None and task is not None:
                progress.update(task, advance=uncompressed_length)

    if not discard_output:
        with meta_path.open('w', encoding='utf-8') as meta_file:
            json.dump(range_meta, meta_file)


def compress_file(path: AnyStr | PathLike, output: AnyStr | PathLike,
                  *,
                  compress: CompressMethod = CompressMethod.LZ4,
                  split: int = 4,
                  show_progress=False,
                  discard_output=False):
    progress = common_progress(show_progress)
    output_dir: Path = Path(output)
    src_path = Path(path)
    with src_path.open('rb') as f:
        f.seek(0, io.SEEK_END)
        total = f.tell()

    workers = []
    workload = total // split

    metadata: CompressFileMeta = dict(filename=src_path.name, block_size=workload, compression=compress, split=[])
    with mmap_view(src_path, readonly=True) as src_view:
        for i, start in enumerate(range(0, len(src_view), len(src_view) // split)):
            end = min(start + len(src_view) // split, len(src_view))

            # 压缩进程的输出路径
            range_path = str(output_dir / src_path.name) + f'.{i}.part'
            range_meta_path = str(output_dir / src_path.name) + f'.{i}.meta'

            # 记录分块到元数据
            metadata["split"].append(end - start)

            # 准备进程
            proc = threading.Thread(target=_compress_file_range,
                                    args=(src_view, range_path, range_meta_path, start, end, compress),
                                    kwargs={'progress': progress if show_progress else None,
                                            'discard_output': discard_output})
            workers.append(proc)

        # 启动进程
        with progress:
            for worker in workers:
                worker.start()

            # 等待任务完成
            for worker in workers:
                worker.join()

    # 丢弃输出的场合不写元数据
    if discard_output:
        return

    # 写入元数据
    meta_path = Path(str(output_dir / src_path.name) + '.meta')
    with meta_path.open('w') as meta_file:
        json.dump(metadata, meta_file)


def decompress_file(meta_path: AnyStr | PathLike, output: AnyStr | PathLike, show_progress=False, dry=False):
    """
    解压缩文件。

    本函数根据提供的元数据路径，解压缩文件到指定的输出路径。可以选择是否显示解压缩进度和是否丢弃输出。

    参数:
    :param meta_path: 元数据文件的路径，用于指导解压缩过程。
    :param output: 解压缩后文件的输出路径。
    :param show_progress: 是否显示解压缩进度，默认为False。
    :param dry: 是否丢弃解压缩后的文件内容，默认为False。
    :return: 无返回值。
    """
    # 将传入的路径转换为Path对象，以便后续操作
    meta_path = Path(meta_path)
    output = Path(output)

    # 打开并读取元数据文件，使用json.load加载数据，并通过dacite.from_dict转换为CompressFileMeta对象
    with meta_path.open('r', encoding='utf-8') as f:
        meta: CompressFileMeta = json.load(f)
    output = output / meta["filename"]

    # 根据show_progress参数决定是否显示进度条
    progress = common_progress(show_progress)
    task = None
    if show_progress:
        decompressed_size = functools.reduce(operator.add, meta["split"], 0)
        task = progress.add_task(output.name, total=decompressed_size)

    with output.open('wb') as dst:
        for i in range(len(meta["split"])):
            print(f'split {i} length {meta["split"][i]}')
            part_path = meta_path.parent / f'{meta["filename"]}.{i}.part'
            part_meta_path = part_path.with_suffix('.meta')

            part_meta: CompressFileRangeMeta = json.loads(part_meta_path.read_bytes())
            with mmap_view(part_path) as part_view:
                for blck in part_meta['blocks']:
                    src_from = blck['offset']
                    src_to = src_from + blck['length']

                    decompressed = decompress_blob(meta["compression"], part_view[src_from:src_to])
                    print(f'decompress split {i} block {src_from} to {src_to} result {len(decompressed)} bytes')
                    if not dry:
                        dst.write(decompressed)

                    if show_progress:
                        progress.update(task, advance=len(decompressed))
