import os
import re
from enum import Enum, auto, unique
from math import prod
from typing import Any, Dict, Iterable, List, Literal, NamedTuple, Optional, Sequence, Set, Tuple, Union

import fire
import openpyxl as xl

######## Common Data Structures ########

# Size Format Conversions

__human_readable_size_regex = re.compile(r'(?P<scale>\d+)\s*(?P<unit>[KMGT]i?B?)?', re.IGNORECASE)
__human_readable_size_units: List[Tuple[str, int]] = [
    ('K', 2 ** 10), ('M', 2 ** 20), ('G', 2 ** 30), ('T', 2 ** 40)
]

def human_readable_size_to_bytes(s: str) -> int:
    m = __human_readable_size_regex.match(s)
    if m is None:
        raise ValueError(f'{s} is not a recognizable size')
    scale = int(m.groupdict()['scale'])
    unit = m.groupdict()['unit']
    if unit is None:
        return scale
    for uc, us in __human_readable_size_units:
        if uc in unit or uc.lower() in unit:
            return scale * us
    raise ValueError(f'unit {unit} is not recognizable')

def size_bytes_to_human_readable(b: int, with_byte=False, exact_unit=False) -> str:
    for uc, us in reversed(__human_readable_size_units):
        if b <= .9 * us:
            continue
        s = f'{b/us:.4}' if b%us else f'{b//us}'
        return f'{s}{uc}{"i" if exact_unit else ""}{"B" if with_byte else ""}'
    return f'{b}{"B" if with_byte else ""}'


# Table Items

class EnumStringConvertible(Enum):
    def __str__(self) -> str:
        return self.name.lower()
    @classmethod
    def from_str(cls, s: str):
        for k, v in cls.__members__.items():
            if k.upper() in s.upper():
                return v
        raise ValueError(f'cannot find device type from {s}')

@unique
class DeviceType(EnumStringConvertible):
    PMEM = 1
    NVME = 2
    SATA_SSD = 3
    HDD = 4
    MEM = 5

@unique
class WorkloadType(EnumStringConvertible):
    SEQ_WRITE = 1
    RAND_WRITE = 2
    SEQ_READ = 3
    RAND_READ = 4


class RunIdentifier(NamedTuple):
    '''table header item'''
    device_type: DeviceType
    block_size_bytes: int
    @property
    def block_size(self) -> str:
        return size_bytes_to_human_readable(self.block_size_bytes, with_byte=True)
    num_threads: int
    workload_type: WorkloadType

    # def __hash__(self) -> int:
    #     return hash(str(self))
    # def __eq__(self, o) -> bool:
    #     return all((
    #         self.device_type == o.device_type,
    #         self.block_size == o.block_size,
    #         self.num_threads == o.num_threads,
    #         self.workload_type == o.workload_type,
    #     ))

    def __str__(self) -> str:
        return (
            f'{self.device_type} bs={self.block_size} thrd={self.num_threads} '
            f'{self.workload_type}'
        )

class Metrics(NamedTuple):
    '''table data item'''
    run: RunIdentifier
    latency_avg: Optional[float]    # in seconds
    iops_avg: Optional[float]
    bandwidth_avg: Optional[float]


DataTable = Dict[RunIdentifier, Metrics]

def get_all_device_types(dt: DataTable) -> Set[DeviceType]:
    return set(h.device_type for h in dt.keys())

def get_all_block_sizes(dt: DataTable) -> List[int]:
    return sorted(list(set(
        h.block_size_bytes for h in dt.keys()
    )))

def get_all_num_threads(dt: DataTable) -> List[int]:
    return sorted(list(set(
        h.num_threads for h in dt.keys()
    )))

def get_all_workload_types(dt: DataTable) -> Set[WorkloadType]:
    return set(h.workload_type for h in dt.keys())


# output generator

def nested(*iters: Iterable) -> Tuple:
    if not iters:
        raise ValueError
    if len(iters) == 1:
        for i in iters[0]:
            yield (i,)
        return
    first, *rest = iters
    for f in first:
        for r in nested(*rest):
            yield (f, *r)

def dump_data(dt: DataTable,
        outfile: Optional[str]=None,
        format: Literal['repr', 'repr+table', 'json', 'xlsx']='json'):
    '''dump data table to stdout or file
    @param dt
    @param outfile output file path, or `None` for stdout
    @param format output file / string format
    '''
    if format == 'repr':
        print(dt)
        return

    if format == 'repr+table':
        for h, m in dt.items():
            print(
                f'{h.device_type:8} '
                f'bs={h.block_size:<6} thrd={h.num_threads:<6} '
                f'{h.workload_type:12}: '
                f'bw(m)={m.bandwidth_avg/2**20:<10.4} '
                f'iops={m.iops_avg:<8} '
                f'lat(s)={m.latency_avg:<12} '
            )
        return

    if format == 'json':
        raise NotImplementedError

    def col_num_in_alpha(i: int) -> str:
        '''
        @param i 1-indexed
        '''
        if i <= 0: raise ValueError
        a = ''
        while i != 0:
            i -= 1
            a = chr(ord('A') + i % 26) + a
            i //= 26
        return a

    if format == 'xlsx':
        if outfile is None or not os.path.isdir(os.path.dirname(outfile)):
            raise FileNotFoundError
        wb = xl.Workbook()
        ws = wb.active
        ws.title = os.path.splitext(os.path.basename(outfile))[0]
        # dump data
        header = (
            'dev', 'bs', 'bs_hr', 'thrd',           # A B C D
            'type', 'lat. (s)', 'bw (MB/s)', 'iops' # E F G H I
        )
        DATA_MAX_COL = len(header)
        ws.append(header)
        DATA_MIN_ROW = 2
        device_types = sorted(list(get_all_device_types(dt)), key=lambda d: d.value)
        block_sizes = get_all_block_sizes(dt)
        nums_threads = get_all_num_threads(dt)
        workload_types = sorted(list(get_all_workload_types(dt)), key=lambda w: w.value)
        DATA_SIZE = prod(map(len, (device_types, block_sizes, nums_threads, workload_types)))
        for d, bs, t, w in nested(device_types, block_sizes, nums_threads, workload_types):
            id = RunIdentifier(d, bs, t, w)
            m = dt.get(id)
            if m is None:
                ws.append(tuple())
                continue
            ws.append((
                str(d), bs, size_bytes_to_human_readable(bs), t,
                str(w), m.latency_avg, m.bandwidth_avg / 2 ** 20, m.iops_avg
            ))
        # TODO: add sprinkles
        # TODO: figure concurrency comparison
        CONC_FIG_MIN_COL = DATA_MAX_COL + 2
        # TODO: figure block size comparision
        # TODO: figure device (backend) advantage, if multiple device types
        wb.save(outfile)
        print(f'Excel dumped to {outfile}')


######## Log Format Specific Parser ########

__zxg_rados_identifier_regex = re.compile(
    r'^=+\s*'
    r'(?P<device_type>(?:pmem|nvme|sata_ssd|hdd|mem))\w*\s+'
    r'-b=(?P<block_size>\S+)\s+'
    r'-O=(?P<object_size>\S+)\s+'
    r'-t=(?P<num_threads>\d+)'
    r'\s*=+$'
)
def get_rados_identifier_from_str(s: str, format: Literal['zxg']='zxg') \
        -> Optional[Dict[str, Any]]:
    '''
    @return a dict to later fill {@link RunIdentifier}, once `worload_type`
        is known
    '''
    m = None
    if format == 'zxg':
        m = __zxg_rados_identifier_regex.match(s)
    else:
        raise ValueError(f'unknown format {format}')
    # line not identifier
    if m is None:
        return None
    gd = m.groupdict()
    if gd['block_size'] != gd['object_size']:
        raise ValueError(f'block size {gd["block_size"]} and object size '
                        f'{gd["object_size"]} does not match')
    gd['device_type'] = DeviceType.from_str(gd['device_type'])
    gd['block_size_bytes'] = human_readable_size_to_bytes(gd['block_size'])
    del gd['block_size']
    del gd['object_size']
    gd['num_threads'] = int(gd['num_threads'])
    return gd


def parse_zxg_rados_log(f: str) -> DataTable:
    '''
    @param f file path
    '''
    if not os.path.isfile(f):
        raise FileNotFoundError(f'no log file found at {f}')

    dt: DataTable = {}

    @unique
    class ParseState(Enum):
        '''
                       (unused, jump right through)
        +===========+     + - - - - - - - -+
        | NO_TARGET | --> | PARSING_CONFIG | --.
        +===========+     +- - - - - - - - +   |
        ,--------------------------------------'
        |   +--------------+     +-----------------+     +------------+
        `-> | FOUND_HEADER | --> | FOUND_BANDWIDTH | --> | FOUND_IOPS | --.
            +--------------+     +-----------------+     +------------+   |
                   ^ (new run)           ^                                |
        ,----------)---------------------)--------------------------------'
        |   +------+--------+            |
        `-> | FOUND_LATENCY | -----------' (workload type changed)
            +---------------+
          (got all metrics, push to table, next workload)
        '''
        NO_TARGET = auto()
        PARSING_CONFIG = auto() # @unused
        FOUND_HEADER = auto()
        FOUND_BANDWIDTH = auto()
        FOUND_IOPS = auto()
        FOUND_LATENCY = auto()

    bandwidth_regex = re.compile(r'Bandwidth \(MB/sec\):\s*(\d+(?:\.\d+)?)')
    iops_regex = re.compile(r'Average IOPS:\s*(\d+)')
    latency_regex = re.compile(r'Average Latency\(s\):\s*(\d+(?:\.\d+)?)')

    with open(f, 'rt') as log:
        state = ParseState.NO_TARGET
        curr_header: RunIdentifier
        fill_metrics: Dict[str, Any]

        for line in log:
            maybe_header = get_rados_identifier_from_str(line)
            if maybe_header is not None:
                if state in (ParseState.NO_TARGET, ParseState.FOUND_LATENCY):
                    state = ParseState.FOUND_HEADER
                    maybe_header['workload_type'] = WorkloadType.RAND_WRITE
                    curr_header = RunIdentifier(**maybe_header)
                    fill_metrics = {'run': curr_header}
                else:
                    raise RuntimeError('parser state machine error')
            else:
                #assert maybe_header is None
                if state == ParseState.NO_TARGET:
                    pass
                elif state in (ParseState.FOUND_HEADER, ParseState.FOUND_LATENCY):
                    # search for bandwidth
                    maybe_bandwidth = bandwidth_regex.match(line)
                    if maybe_bandwidth is not None:
                        state = ParseState.FOUND_BANDWIDTH
                        fill_metrics['bandwidth_avg'] = float(maybe_bandwidth.group(1)) * 2 ** 20
                elif state == ParseState.FOUND_BANDWIDTH:
                    # search for IOPS
                    maybe_iops = iops_regex.match(line)
                    if maybe_iops is not None:
                        state = ParseState.FOUND_IOPS
                        fill_metrics['iops_avg'] = int(maybe_iops.group(1))
                elif state == ParseState.FOUND_IOPS:
                    # search for latency
                    maybe_latency = latency_regex.match(line)
                    if maybe_latency is not None:
                        state = ParseState.FOUND_LATENCY
                        fill_metrics['latency_avg'] = float(maybe_latency.group(1))
                        # got all metrics, push to table
                        dt[curr_header] = Metrics(**fill_metrics)
                        # prepare for next workload
                        if curr_header.workload_type == WorkloadType.RAND_WRITE:
                            next_workload_type = WorkloadType.SEQ_READ
                        elif curr_header.workload_type == WorkloadType.SEQ_READ:
                            next_workload_type = WorkloadType.RAND_READ
                        elif curr_header.workload_type == WorkloadType.RAND_READ:
                            # The last workload type, by order in `run-rados-test.sh`.
                            # Next workload will belong to a new run, any value
                            # will work, since it will soon be overwriten.
                            next_workload_type = curr_header.workload_type
                        curr_header = curr_header._replace(workload_type=next_workload_type)
                else:
                    raise RuntimeError('parser state machine error')

    return dt


######## Command-Line Interface ########

def main(input_file: str,
        output_file: Optional[str]=None,
        output_format: Literal['repr', 'xlsx']='xlsx'):
    if not os.path.isfile(input_file):
        raise FileNotFoundError(input_file)
    if output_file is None and output_format != 'repr':
        output_file = f'{os.path.splitext(input_file)[0]}.{output_format}'

    dt = parse_zxg_rados_log(input_file)

    if output_format == 'repr':
        dump_data(dt, format='repr+table')
        return

    if output_format == 'xlsx':
        dump_data(dt, output_file, format='xlsx')
        return


if __name__ == '__main__':
    fire.Fire(main)
