#!/usr/bin/env python3

'''
Environment Variables
---------------------

TOTAL_OBJECTS
    Number of objects loaded by YCSB, default 10**4
TARGET_MAX_OBJECTS
    `target_max_objects` set on cache pool, default 10**2
OPERATION_COUNT
    `operationcount` supplied to YCSB, default 10**5

YCSB_ROOT
    Root directory of YCSB, default `/mnt/zxg/ycsb-0.17.0`
WITH_TESTS
    With debug tests, default off
LEAVE_TEST_POOLS
    Don't delete test pools after run, default off
'''

import errno
import json
import logging
import os
import re
import subprocess
import uuid
from time import sleep
from typing import Any, Callable, Dict, Iterable, Literal, Tuple, Union

import rados
from tqdm import tqdm

#### configurations ####

TOTAL_OBJECTS = int(os.getenv('TOTAL_OBJECTS', 10**4))
TARGET_MAX_OBJECTS = int(os.getenv('TARGET_MAX_OBJECTS', 10**2))
OPERATION_COUNT = int(os.getenv('OPERATION_COUNT', 10**5))
REQUEST_DISTRIBUTION = os.getenv('REQUEST_DISTRIBUTION', 'zipfian')

FULL_RATIO = float(os.getenv('FULL_RATIO', .8))
DIRTY_RATIO = float(os.getenv('DIRTY_RATIO', .4))
DIRTY_HIGH_RATIO = float(os.getenv('DIRTY_HIGH_RATIO', .6))
HIT_SET_COUNT = int(os.getenv('HIT_SET_COUNT', 4))
HIT_SET_PERIOD = int(os.getenv('HIT_SET_PERIOD', 1200))
MIN_RECENCY = int(os.getenv('MIN_RECENCY', 1))

####

logging.basicConfig()
LOG = logging.getLogger('test-cache-tier')
LOG.setLevel(logging.INFO)

def _ignore(fn: Callable):
    try:
        fn()
    except Exception as e:
        LOG.warning(f'Ignoring exception {e}')

CEPH_BUILD_ROOT = os.getenv('CEPH_BUILD_ROOT', '/mnt/zxg/ceph/build')
CEPH_EXECUTABLE = os.path.join(CEPH_BUILD_ROOT, 'bin', 'ceph')
RADOS_EXECUTABLE = os.path.join(CEPH_BUILD_ROOT, 'bin', 'rados')
if not os.path.isdir(CEPH_BUILD_ROOT) or not os.path.isfile(CEPH_EXECUTABLE) or \
        not os.path.isfile(RADOS_EXECUTABLE):
    LOG.critical('Ceph not found!')
    exit(errno.ENFILE)

YCSB_ROOT = os.getenv('YCSB_ROOT', '/mnt/zxg/ycsb-0.17.0')
YCSB_BIN = os.path.join(YCSB_ROOT, 'bin', 'ycsb')
YCSB_WORKLOAD_DIR = os.path.join(YCSB_ROOT, 'workloads')
if not os.path.isdir(YCSB_ROOT) or not os.path.isfile(YCSB_BIN) or \
        not os.path.isdir(YCSB_WORKLOAD_DIR):
    LOG.critical('YCSB not found!')
    exit(errno.ENFILE)

LOG.debug('Connecting to existing Ceph...')
cluster = rados.Rados(conffile='/etc/ceph/ceph.conf')
cluster.connect()

def do_mon_cli(pref: str, **kwargs) -> Tuple[int, bytes, str]:
    '''Executes MON CLI command
    @param pref prefix
    @param **kwargs other arguments
    @returns (ret, outbuf, outs)
    '''
    kwargs['prefix'] = pref
    r = cluster.mon_command(json.dumps(kwargs), b'')
    if r[0]:
        raise RuntimeError(r)
    return r

# clean previous test pools
TEST_POOL_PREFIX = 'test_zxg_cache-tier_'
__test_pool_regex = re.compile(TEST_POOL_PREFIX+r'(?P<id>[0-9a-fA-F]+)_(?:base|cache)')
def cleanup_test_pools():
    pids = set()
    for p in cluster.list_pools():
        m = __test_pool_regex.match(p)
        if m is None:
            continue
        pids.add(m.group('id'))
    for p in pids:
        pref = f'{TEST_POOL_PREFIX}{p}_'
        _ignore(lambda: do_mon_cli('osd tier rm-overlay', pool=pref+'base'))
        _ignore(lambda: do_mon_cli('osd tier rm', pool=pref+'base', tierpool=pref+'cache'))
        _ignore(lambda: cluster.delete_pool(pref+'cache'))
        _ignore(lambda: cluster.delete_pool(pref+'base'))

LOG.info('Cleaning previous test pools...')
cleanup_test_pools()

# create test pool
LOG.info('Creating test env...')
POOL_GROUP_ID = uuid.uuid1().hex

base_pool = f'{TEST_POOL_PREFIX}{POOL_GROUP_ID}_base'
cache_pool = f'{TEST_POOL_PREFIX}{POOL_GROUP_ID}_cache'

for p in (base_pool, cache_pool):
    do_mon_cli('osd pool create', pool=p, pg_num=32, autoscale_mode='warn')

# create cache tier
do_mon_cli('osd tier add-cache', pool=base_pool, tierpool=cache_pool, size=0)
# do_mon_cli('osd pool set-quota', pool=cache_pool, field='max_objects', val=f'{TARGET_MAX_OBJECTS}')
do_mon_cli('osd pool set', pool=cache_pool, var='target_max_objects', val=f'{TARGET_MAX_OBJECTS}')
do_mon_cli('osd pool set', pool=cache_pool, var='cache_target_full_ratio', val=f'{FULL_RATIO}')
do_mon_cli('osd pool set', pool=cache_pool, var='cache_target_dirty_ratio', val=f'{DIRTY_RATIO}')
do_mon_cli('osd pool set', pool=cache_pool, var='cache_target_dirty_high_ratio', val=f'{DIRTY_HIGH_RATIO}')
do_mon_cli('osd pool set', pool=cache_pool, var='hit_set_count', val=f'{HIT_SET_COUNT}')
do_mon_cli('osd pool set', pool=cache_pool, var='hit_set_period', val=f'{HIT_SET_PERIOD}')
do_mon_cli('osd pool set', pool=cache_pool, var='min_read_recency_for_promote', val=f'{MIN_RECENCY}')
do_mon_cli('osd pool set', pool=cache_pool, var='min_write_recency_for_promote', val=f'{MIN_RECENCY}')

## check default cache tier configs
for v in ('target_max_bytes', 'target_max_objects',
            'cache_target_full_ratio', 'cache_target_dirty_ratio',
            'cache_target_dirty_high_ratio',
            'hit_set_count', 'hit_set_period',
            'min_read_recency_for_promote', 'min_write_recency_for_promote',
            'osd_tier_promote_max_bytes_sec', 'osd_tier_promote_max_objects_sec',
            'hit_set_grade_search_last_n', 'hit_set_grade_decay_rate',
            'cache_min_flush_age', 'cache_min_evict_age'):
    LOG.debug(do_mon_cli('osd pool get', pool=cache_pool, var=v))
LOG.debug(do_mon_cli('osd dump')[1].decode())

######## debug test ########

def pool_has_object(pool: Union[str, rados.Ioctx], okey: str) -> bool:
    '''Test if `pool` has object named `okey`

    DEPRECATED: use op_cache_hit perf counter instead

    Probing is done by traversing `Ioctx.list_objects()`. It is slow, but it is
    the only way you get correct result AFIK.

    DEPRECATED:
    Probing is done with unstable API `Ioctx.operate_read_op()`, it essentially
    performs an actual read operation on the object within the specified pool.
    However, unlike the peasant `Ioctx.read()` public stable API, it allows
    setting of flags, which disables cache behavior within the operation, and
    treat cache tier and base tier as separate pools.
    However, every probing I/O still goes through HitSet logic in
    `PrimaryLogPG::do_op()`, which in turn affects cache promote behavior. That
    is, of course, unless you modified the source code.

    @param pool
    @param okey
    @returns if `pool` has `okey`
    '''
    if type(pool) is str:
        pool = cluster.open_ioctx(pool)
    if False:
        ls_result = False
        for o in pool.list_objects():
            if o.key == okey:
                ls_result = True
                break
        return ls_result
    with rados.ReadOpCtx() as op:
        # NOTE: DO NOT DELETE! This line presumably fills some private `OpCtx`
        # variables and `operation_[aio_]read/write_op()`-family will not work
        # without it!
        op.cmpext(b'', 0)
        try:
            pool.operate_read_op( # type: ignore
                op, okey,
                rados.LIBRADOS_OPERATION_IGNORE_CACHE | \
                rados.LIBRADOS_OPERATION_IGNORE_OVERLAY
            )
        except rados.ObjectNotFound:
            op_result = False
        else:
            op_result = True
    if 'ls_result' in locals():
        ls_result: bool
        if ls_result != op_result:
            LOG.warning('list_objects() and operation_read_op() says different '
                    f"about existence of object '{okey}': "
                    f'ls {ls_result}, op {op_result}')
        return ls_result
    return op_result

def test_probe_method():
    '''Test if probe method works, i.e. for cache miss, object can be observed in
    base pool, but not in cache pool
    '''
    base_ctx = cluster.open_ioctx(base_pool)
    cache_ctx = cluster.open_ioctx(cache_pool)
    object_keys = [f'test-obj_{i}' for i in range(10)]
    # do_mon_cli('osd pool set', pool=cache_pool, var='target_max_bytes', val='0')
    # do_mon_cli('osd pool set', pool=cache_pool, var='target_max_objects', val='4')

    LOG.debug('Writing objects to base pool...')
    for o in object_keys:
        LOG.debug(f'Writing object {o} to {base_pool}...')
        base_ctx.write_full(o, o.encode())
    # NOTE: for `min_read/write_recency_for_promote` is non-zero, cache tier
    # should be empty
    for o in object_keys:
        assert pool_has_object(base_ctx, o)
        assert not pool_has_object(cache_ctx, o)

    # try promote something to cache
    promote_objects = {f'test-obj_{i}' for i in (2, 4, 6)}
    LOG.debug(f'Reading and promoting {promote_objects}...')
    for o in promote_objects:
        LOG.debug(f"Reading object '{o}': {base_ctx.read(o)}")
    for o in object_keys:
        assert pool_has_object(cache_ctx, o) == (o in promote_objects)

    # try removing a promoted object, should no longer be in any pool
    remove_object = 'test-obj_4'
    assert remove_object in promote_objects
    LOG.debug(f"Removing object '{remove_object}'...")
    assert pool_has_object(cache_ctx, remove_object)
    LOG.debug(base_ctx.read(remove_object))
    LOG.debug(base_ctx.stat(remove_object))
    r = base_ctx.remove_object(remove_object)
    assert r
    try:
        r = base_ctx.stat(remove_object)
        LOG.debug(r)
    except rados.ObjectNotFound:
        pass
    else:
        raise AssertionError(f"object '{remove_object}' still readable after removed")
    sleep(5)
    LOG.debug(sorted(o.key for o in base_ctx.list_objects()))
    LOG.debug(sorted(o.key for o in cache_ctx.list_objects()))
    for o in object_keys:
        LOG.debug(f"Probing object '{o}' in cache...")
        assert pool_has_object(cache_ctx, o) == (o in promote_objects and o != remove_object)

    # cleanup
    for o in object_keys:
        _ignore(lambda: base_ctx.remove_object(o))
    LOG.debug(f'Listing objects in {base_pool}: {[o.key for o in base_ctx.list_objects()]}')
    LOG.debug(f'Listing objects in {cache_pool}: {[o.key for o in cache_ctx.list_objects()]}')
    # do_mon_cli('osd pool set', pool=cache_pool, var='target_max_objects', val='0')

def reset_perf():
    '''Reset OSD performance counters

    TODO: adapt to multiple OSDs
    '''
    subprocess.run(f'{CEPH_EXECUTABLE} daemon osd.0 perf reset all'.split(),
            stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def get_perf() -> Dict[str, Any]:
    '''Get OSD performance counter

    TODO: adapt to multiple OSDs
    '''
    cp = subprocess.run(f'{CEPH_EXECUTABLE} daemon osd.0 perf dump'.split(),
            stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    return json.loads(cp.stdout)
def get_perf_cache_hit() -> int:
    '''Get OSD performance counter for op_cache_hit'''
    return get_perf()['osd']['op_cache_hit']

if os.getenv('WITH_TESTS'):# or True:
    test_probe_method()

######## ycsb ########

__ycsb_entry_regex = re.compile(
    r'(?P<op>INSERT|READ|UPDATE) (?P<table>\S+) (?P<okey>\S+) '
    r'\[ (?P<fields>.+?) ?\]'
)
def __parse_ycsb_fields(fields: str, fieldnameprefix: str) -> Iterable[Tuple[int, bytes]]:
    '''Parse fields in YCSB output
    @param fields raw fields string
    @param fieldnameprefix YCSB arg
    @returns [(field_id, new_data), ...]
    '''
    fpx = fieldnameprefix
    for i, d in map(lambda f: f.split('=', 1), fields[len(fpx):].split(f' {fpx}')):
        yield (int(i), d.encode('utf-8'))
def test_ycsb(base_workload_profile: str='workloada', **kwargs):
    '''Test cache with YCSB workload
    @param [base_workload_profile='workloada']
    @param **kwargs supplied to YCSB
    '''
    LOG.info(f'Running YCSB ({base_workload_profile} {kwargs})...')
    base_ctx = cluster.open_ioctx(base_pool)
    cache_ctx = cluster.open_ioctx(cache_pool)

    fpx: str = kwargs.get('fieldnameprefix', 'field')
    if len(fpx) == 0:
        raise ValueError('fieldnameprefix must not be empty')
    fcnt: int = kwargs.get('fieldcount', 10)
    flen: int = kwargs.get('fieldlength', 100)
    opcnt: int = kwargs.get('operationcount', 1000)

    def __get_ycsb_cli_args(args=kwargs): # type: ignore
        return (
            f'-P {os.path.join(YCSB_WORKLOAD_DIR, base_workload_profile)} ' +
            ' '.join(f'-p {k}={v}' for k, v in args.items())
        )

    def __load():
        cp = subprocess.run(f'{YCSB_BIN} load basic {__get_ycsb_cli_args(kwargs)}'.split(),
                stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
        #LOG.debug(f'Got generated load:====\n{cp.stdout}\n====')
        for l in tqdm(cp.stdout.splitlines(), desc='Inserting YCSB load'):
            m = __ycsb_entry_regex.match(l)
            if m is None:
                continue
            #LOG.debug(f'Matched load line: {m.groupdict()}')
            #assert m.group('op') == 'INSERT'
            odata = bytearray(fcnt * flen)
            for i, d in __parse_ycsb_fields(m.group('fields'), fpx):
                off = int(i) * flen
                #assert(len(d) == flen)
                odata[off : off+flen] = d
            #LOG.debug(f'Matched load data: {odata}')
            base_ctx.write_full(m.group('okey'), bytes(odata))

    # ycsb load, push objects to pool
    __load()

    def __run() -> Tuple[int, int, int, int]:
        '''
        NOTE: starts a new perf counter cycle, previous stats are reset
        @returns (total_cache_hits, total_ops, effective_cache_hits, effective_ops)
        '''
        args = kwargs.copy()
        # HACK: create more than enough trace for we don't now how much prefill
        # is gonna cost, but no too much otherwise YCSB may take a long while
        trace_samples = int(1e7)
        args['operationcount'] = trace_samples
        cp = subprocess.run(f'{YCSB_BIN} run basic {__get_ycsb_cli_args(args)}'.split(),
                stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
        #LOG.debug(f'Got generated run:====\n{cp.stdout}\n====')
        total_ops = 0
        noneff_ops = 0
        noneff_cache_hits = 0
        reset_perf()
        # HACK: unlocked promote throttle for shortest possible prefill stage
        do_mon_cli('config set', who='osd.0', name='osd_tier_promote_max_objects_sec', value='0')
        pbar = tqdm(desc='Prefilling cache', total=TARGET_MAX_OBJECTS*DIRTY_HIGH_RATIO)
        for l in cp.stdout.splitlines():
            m = __ycsb_entry_regex.match(l)
            if m is None:
                continue
            total_ops += 1
            #LOG.debug(f'Matched run line: {m.groupdict()}')
            op: Literal['READ', 'UPDATE'] = m.group('op') # type: ignore
            okey, fds = m.group('okey'), m.group('fields')
            if op == 'READ':
                #assert fds == '<all fields>'
                base_ctx.read(okey)
            elif op == 'UPDATE':
                update_data = list(__parse_ycsb_fields(fds, fpx))
                #assert len(update_data) == 1
                off, d = update_data[0][0] * flen, update_data[0][1]
                base_ctx.write(okey, d, off)
            else:
                raise ValueError(f'unhandled op {op}')
            # prefill phase
            if noneff_ops == 0:
                # update pbar
                if total_ops % 1000 == 0:
                    pbar.n = cache_ctx.get_stats()['num_objects']
                    pbar.refresh()
                # check if cache is filled
                if total_ops % 5000 == 0 and get_perf()['osd']['agent_flush'] != 0:
                    # NOTE: 100 is a nice balance with agent ops 10/20
                    do_mon_cli('config set', who='osd.0', name='osd_tier_promote_max_objects_sec', value='100')
                    noneff_ops = total_ops
                    noneff_cache_hits = get_perf_cache_hit()
                    pbar.close()
                    pbar = tqdm(desc='Running effective trace', total=min(opcnt, trace_samples-noneff_ops))
            # stat run phase
            else:
                pbar.update()
                # stop if collected enough effective ops
                if total_ops - noneff_ops == opcnt:
                    pbar.close()
                    break
        total_cache_hits = get_perf_cache_hit()
        if noneff_ops == 0:
            noneff_ops = total_ops
            noneff_cache_hits = total_cache_hits
        return (
            total_cache_hits, total_ops,
            total_cache_hits - noneff_cache_hits, total_ops - noneff_ops
        )

    # actual run for statistics
    total_hits, total_ops, eff_hits, eff_ops = __run()
    LOG.info(f'Total raw cache hit count is {total_hits} / {total_ops} '
            f'({total_hits/total_ops*100:.4f}%)')
    if eff_ops == 0:
        LOG.warning(f'Trace ended before cache is prefilled!')
    else:
        LOG.info(f'Total effective cache hit count is {eff_hits} / {eff_ops} '
                f'({eff_hits/eff_ops*100:.4f}%)')
    cache_fill = cache_ctx.get_stats()['num_objects']
    LOG.info(f'Final cache fill ratio is {cache_fill} / {TARGET_MAX_OBJECTS} '
            f'({cache_fill/TARGET_MAX_OBJECTS*100:.4f}%)')

test_ycsb(
    base_workload_profile='workloada',
    requestdistribution=REQUEST_DISTRIBUTION,
    recordcount=TOTAL_OBJECTS,
    operationcount=OPERATION_COUNT,
)

# cleanup
LOG.debug(do_mon_cli('osd dump'))
if not os.getenv('LEAVE_TEST_POOLS'):
    LOG.info('Cleaning up...')
    cleanup_test_pools()
