'''

Development util functions
=

Variables
-

:var:`parent` - `str` - the parent path of development repository
:var:`github_dir` - `str` - the github directory path related development repository
:var:`github_urls` - `dict[str, str]` - the github resositories' url

'''

import os
import re
import sys
import glob
import queue
import shutil
import subprocess

import cv2


if os.path.exists('/content'):
    parent = '/content'
elif os.path.exists('/kaggle/working'):
    parent = '/kaggle/working'
else:
    sys.path.append(os.path.dirname(__file__))
    parent = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))

github_dir = os.path.join(parent, '.github')
github_urls = {
    'timm': 'https://github.com/rwightman/pytorch-image-models',
    'mmcv': 'https://github.com/open-mmlab/mmcv',
    'mmpose': 'https://github.com/open-mmlab/mmpose',
    'mmdet': 'https://github.com/open-mmlab/mmdetection',
    'mmcls': 'https://github.com/open-mmlab/mmclassification',
    'mmdeploy': 'https://github.com/open-mmlab/mmdeploy',
    'yolov5': 'https://github.com/ultralytics/yolov5',
    'yolov7': 'https://github.com/WongKinYiu/yolov7',
    'insightface': 'https://github.com/deepinsight/insightface',
}

def save_config(configs, task, root):
    import os
    import re
    import json

    def key(x): 
        flops = re.search('flops_\d+x\d+', str(dict(x[1]).keys()))
        return float(x[1][flops[0]][:-7]) if flops else 1e9

    configs[task] = {key: value for (key, value) in sorted(
        dict(configs[task]).items(), key=key)}

    open(os.path.join(root, '_configs.py'), 'w').write(f'configs = {json.dumps(configs, indent=4)}')

def get_configs(task):
    sys.path.append(os.path.dirname(__file__))
    from _configs import configs
    sys.path.remove(os.path.dirname(__file__))
    keys = [re.sub('^\w+/', '', re.sub('\.\w+$', '', c)) for c in [*configs[task]]]
    return keys

def get_project(cfg_id):
    if 'yolov5' in cfg_id:
        return 'yolov5'
    if 'timm' in cfg_id:
        return 'timm'

def get_projects():
    return ['yolov5']

def get_formats():
    return ['torchscript', 'onnx']

def get_run(action='', task='', data_dir='', cfg_id='', github_dir='', 
    device='', weights='', out_dir='', logger=None,
    main_queue=None, sub_queue=None, im_sz='',
    split='1::', epochs=0, batch_size=0, workers='8', 
    infer_dir='', conf_thres=0, frameworks='', **kwargs):

    print("get_run")
    path1 = os.path.dirname(__file__)
    print(path1)

    sys.path.append(os.path.dirname(os.path.dirname(__file__)))
    print(os.path.dirname(os.path.dirname(__file__)))

    import project.train as train
    import project.infer as infer
    import project.export as export
    import project.config as config
    import project.yolov5 as yolov5
    print(yolov5)

    if action == 'train':
        train.run(task=task, data_dir=data_dir, cfg_id=cfg_id, github_dir=github_dir,
            device=device, weights=weights, out_dir=out_dir, logger=logger,
            main_queue=main_queue, sub_queue=sub_queue, im_sz=im_sz,
            split=split, epochs=epochs, batch_size=batch_size, workers=workers, **kwargs)

    if action == 'infer':
        infer.run(task=task, data_dir=data_dir, cfg_id=cfg_id, github_dir=github_dir,
            device=device, weights=weights, out_dir=out_dir, logger=logger,
            main_queue=main_queue, sub_queue=sub_queue, im_sz=im_sz,
            infer_dir=infer_dir, conf_thres=conf_thres)

    if action == 'export':
        export.run(task=task, data_dir=data_dir, cfg_id=cfg_id, github_dir=github_dir,
            device=device, weights=weights, out_dir=out_dir, logger=logger,
            frameworks=frameworks)

    if action == 'config':
        config.run(task=task, data_dir=data_dir, cfg_id=cfg_id, github_dir=github_dir)       

def get_device(all_=False):

    max_available_memory = 0
    max_device_id = -1

    device = ''
    for device_id in range(torch.cuda.device_count()):
        if all_:
            device += f'{device_id},'
            continue

        available_memory, _ = torch.cuda.mem_get_info(device_id)
        if available_memory > max_available_memory:
            max_available_memory = available_memory
            max_device_id = device_id

    device = device[:-1] if all_ else device
    device = max_device_id if max_device_id >= 0 and max_available_memory / 2 ** 30 > 1 else device or 'cpu'
    return device

def get_devices(device=''):
    import torch

    if device:
        return str(device)

    devices = ['CPU']

    for device_id in range(torch.cuda.device_count()):
        available_memory, _ = torch.cuda.mem_get_info(device_id)
        if available_memory / 2 ** 30 > 1:
            devices.append(f'GPU:{device_id}')

    return devices

def get_batch_device(model, imgsz:str=''):
    import numpy

    device = torch.device(f'cuda:{get_device()}')
    try:
        model.to(device)
    except:
        return 0

    gb = 1 << 30  # bytes to GiB (1024 ** 3)
    y = []
    batch_sizes = [1, 2]
    for bs in batch_sizes:
        try:
            inputs = torch.zeros(bs, 3, int(imgsz.split('x')[0]), int(imgsz.split('x')[1])).to(device)
            inputs.requires_grad = True
            if hasattr(model, 'forward_dummy'):
                model.forward_dummy(inputs)
            else:
                model(inputs)
            y.append(torch.cuda.memory_reserved(device) / gb)
            inputs = inputs.to(torch.device('cpu'))
            del inputs
        except:
            print(f'forward {bs} failed')
            torch.cuda.empty_cache()

    batch_sizes = batch_sizes[:len(y)]
    polynomial_fit = numpy.polyfit(batch_sizes, y, deg=1)  # first degree polynomial fit

    max_batch_sizes = []
    for device_id in range(torch.cuda.device_count()):
        torch.cuda.empty_cache()
        device = torch.device(f'cuda:{device_id}')
        available_memory, _ = torch.cuda.mem_get_info(device)
        batch_size = int((available_memory / gb - polynomial_fit[1]) / polynomial_fit[0])  # y intercept (optimal batch size)
        max_batch_sizes.append(batch_size)

    print(max_batch_sizes)

    final_devices = []
    final_batch_size = 0
    max_total_batch_size = 0
    for current_max_batch_size in max_batch_sizes:
        devices = []
        for j, max_batch_size in enumerate(max_batch_sizes):
            if max_batch_size >= current_max_batch_size:
                devices.append(j)

        divided_batch_size = current_max_batch_size // (len(devices)+1) * len(devices)
        total_batch_size = divided_batch_size * len(devices)

        if total_batch_size > max_total_batch_size:
            max_total_batch_size = total_batch_size
            final_devices = devices
            final_batch_size = divided_batch_size

    final_devices = ','.join(map(str, final_devices))

    return final_batch_size, final_devices

def main(action='', task='', data_dir='', cfg_id='', github_dir='', 
    device='', weights='', out_dir='', logger=None,
    main_queue:queue.Queue=None, sub_queue:queue.Queue=None, im_sz='', 
    split='1::', epochs=0, batch_size=0, workers='8',
    infer_dir='', conf_thres=0, frameworks='', root='', **kwargs) -> None:
    '''

    Development main function
    =

    Parameters
    -

    :param:`action` - `str` - train infer export configs
    :param:`task` - `str` - classification detection segmentation
    :param:`data_dir` - `str` - dataset directory
    :param:`cfg_id` - `str` - config id
    :param:`infer_dir` - `str` - inference directory

    :param:`device` - `str` - cpu or 0 or 0,1...
    :param:`out_dir` - `str` - save action results
    :param:`weights` - `str` - weights path
    :param:`workers` - `int` - `8` - pytorch dataloader workers
    :param:`batch_size` - `int` - `1` - batch_size
    :param:`epochs` - `int` - `1` - epochs
    :param:`split` - `str` - train val test split, like 8:1:1

    :param:`sub_queue` - `queue.Queue` - queue for sending data from sub thread to main thread, like "progress"
    :param:`main_queue` - `queue.Queue` - queue for sending data from main thread to sub thread, like "stop"
    :param:`logger` - `logs.Logger` - custom logger object for logging
    :param:`root` - `str` - development root path

    :param:`exist` - `bool` - `True` - yolov5 save_dir increment or not
    :param:`conf_thres` - `float` - `0.5` - yolov5 detection confident threshold

    '''

    root = root or os.path.dirname(os.path.dirname(__file__))
    basename = os.path.basename(data_dir)
    device = device or get_device(all_=True) # TODO get batch device
    out_dir = os.path.join(out_dir or os.path.join(root, 'downloads', 'out'), cfg_id, basename)
    os.makedirs(out_dir, exist_ok=True)

    sys.argv = sys.argv[:1]
    sys.path.append(os.path.join(root, 'project', get_project(cfg_id)))
    sys.path.append(github_dir)

    try:
        get_run(action=action, task=task, data_dir=data_dir, cfg_id=cfg_id, github_dir=github_dir,
            device=device, weights=weights, out_dir=out_dir, logger=logger,
            main_queue=main_queue, sub_queue=sub_queue, im_sz=im_sz,
            split=split, epochs=epochs, batch_size=batch_size, workers=workers,
            infer_dir=infer_dir, conf_thres=conf_thres, frameworks=frameworks, **kwargs)

    except Exception as e:
        if sub_queue:
            sub_queue.put({'err': str(e)})

    # remove previous json, log, pth files
    for ext in ['json', 'log', 'pth']:
        filename = 'epoch' if ext == 'pth' else ''
        pathname = os.path.join(out_dir, f"{filename}*.{ext}")
        files = glob.glob(pathname)
        for extra in sorted(files, key=lambda x: os.stat(x).st_atime)[:-1]:
            os.remove(extra)

def pull_git(repo_name, force=False, checkout=True):
    os.makedirs(github_dir, exist_ok=True)
    repo_url = github_urls[repo_name]
    save_dir = os.path.join(github_dir, repo_url.split('/')[-1])

    # TODO stderr
    class Class:
        stderr = b'...'
    pull_result = Class()

    if os.path.exists(save_dir) and len(os.listdir(save_dir)) < 2:
        shutil.rmtree(save_dir)

    if not os.path.exists(save_dir):
        os.chdir(github_dir)
        input_ = f'git clone {repo_url}'
    elif force:
        os.chdir(save_dir)
        os.system('git checkout .')
        input_ = 'git pull'

    while pull_result.stderr and (not os.path.exists(save_dir) or force) and \
        'already exists and is not an empty directory' not in pull_result.stderr.decode():
        print(input_, repo_url, pull_result.stderr.decode())
        pull_result = subprocess.run(
            input_, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
    else:
        pull_result.stderr = ''

    if checkout:
        os.chdir(save_dir)
        os.system('git checkout .')

    save_dir = save_dir if os.path.exists(
        save_dir) and not pull_result.stderr else None

    return save_dir

def add_arg(args='', value=''):
    if args is None: return
    for arg in args.split(' '):
        if value is None:
            return
        if arg in sys.argv and value:
            sys.argv[sys.argv.index(arg)+1] = value
        elif arg not in sys.argv:
            sys.argv += [arg] + ([value] if value else [])

def replace_content(path, func):
    try:
        content = open(path, encoding='utf-8').read()
        content = func(content)
        open(path, 'w', encoding='utf-8').write(content)
    except:
        pass

def get_time(number):

    day = number // 86400
    hour = (number - (day * 86400)) // 3600
    minute = (number - (day * 86400) - (hour * 3600)) // 60
    second = number - (day * 86400) - (hour * 3600) - (minute * 60)
    time_ = ''
    time_ += f'{int(day)}d' if (day > 1) else ''
    time_ += f'{int(hour)}h' if (hour > 1) else ''
    time_ += f'{int(minute)}m' if (minute > 1) else ''
    time_ += f'{int(second)}s' if (second > 1) else ''

    return time_

def get_drawing_params(x1, y1, x2, y2, img_w, img_h):
    fontScale = ((x2 - x1) * (y2 - y1)) / img_w / img_h
    fontScale = 0.7 if fontScale < 0.7 else fontScale
    font_pixel_size = int(fontScale * 20)
    thickness = font_pixel_size // 5 + 1

    return thickness, fontScale

def draw_object(img, x1, y1, x2, y2, img_w, img_h, label, color, score=0):

    thickness, fontScale = get_drawing_params(x1, y1, x2, y2, img_w, img_h)
    fontFace = cv2.FONT_HERSHEY_SIMPLEX

    text = f'{label} {str(score)[2:4] if score else ""}'

    img = cv2.rectangle(img, (x1, y1), (x2, y2), color, thickness)

    retval, _ = cv2.getTextSize(text, fontFace, fontScale, thickness)

    cv2.rectangle(img, (x1, y1), (x1+retval[0], y1+retval[1]), color, -1)
    cv2.putText(img, text, (x1, y1+retval[1]), fontFace, fontScale, (255, 255, 255))

def setup():
    root = os.path.dirname(os.path.dirname(__file__))
    requirements_path = os.path.join(root, 'requirements.txt')
    requirements = open(requirements_path).read()
    torch_version = torch.__version__.split('+')[0]
    cuda_version = torch.__version__.split('+')[1]
    requirements = re.sub('cu/torch', f'{cuda_version}/torch{torch_version}', requirements)
    open(requirements_path, 'w').write(requirements)

    os.system(f'pip install -r {requirements_path}')

    requirements = re.sub(f'{cuda_version}/torch{torch_version}', 'cu/torch', requirements)
    open(requirements_path, 'w').write(requirements)

sys.path.remove(os.path.dirname(__file__))