
import os
import re
import sys
import glob

import cv2

from project.data import update_labels, setup
from util_my import replace_content

parent = os.path.dirname(os.path.dirname(__file__))
sys.path.append(parent)

def refactor(task='', data_dir='', github_dir=''):

    git_name = 'yolov5'

    git_dir, code_dir = setup(git_name=git_name, github_dir=github_dir)

    if True or not os.path.exists(code_dir):
        code_dir = os.path.join(os.path.dirname(parent), '.github', git_name)
        git_dir = code_dir

        is_cls = task == 'classification' or 'cls' in data_dir
        is_seg = task == 'segmentation' or 'seg' in data_dir
        is_det = task == 'detection' or 'det' in data_dir

        stop_content = '''
        try:
            if opt.main.get(block=False).get('stop', None):
                opt.main.put({'stop':True})
                release_model(model)
                return [0,0,0,0], [], []
        except:
            pass'''

        release_model = '''
        def release_model(model):
            try:
                del model
                import gc
                gc.collect()
                torch.cuda.empty_cache()
            except:
                pass
        '''.replace('\n' + ' ' * 8, '\n')

        def replace_stop(content, pattern='(\n( *)callbacks.run)'):

            def match_pattern(m):
                stop_ = re.sub(f'\n' + ' ' * 8, f'\n{m[2]}', stop_content)
                return stop_ + f'\n{m[1]}' if stop_ not in content else m[0]

            if stop_content not in content:
                content = re.sub(pattern, match_pattern, content)
            return content

        def replace_train(content):

            content = replace_stop(content)
            content = replace_stop(content, pattern='(\n( *)# (Dataloaders|Model))')

            content = re.sub(', WorkingDirectory\(', ':#, #WorkingDirectory(', content)
            content = re.sub(' check_requirements\(', ' #check_requirements(', content)
            content = re.sub('check_yaml\(opt.hyp\)', r'opt.hyp', content)
            content = re.sub('([:,]) vars\(opt\)', r'\1 vars(opt_)', content)
            content = re.sub('\(\)((\n *)return results)', r'()\2release_model(model)\1', content)
            content = re.sub('(\n\ndef)', release_model + r'\1', content, 1) if 'def release_model' not in content else content
            content = re.sub('((not evolve:|run settings)(\n *))(?=yaml_save)', 
                r'\2\3opt_ = argparse.Namespace(**vars(opt))\3opt_.main = None\3opt_.sub = None\3', content)

            if is_cls:
                loss = '''
                'tloss': tloss,
                'vloss': vloss,'''
            elif is_det:
                loss = '''
                'train/box_loss': loss_items[0].item(),
                'train/obj_loss': loss_items[1].item(),
                'train/cls_loss': loss_items[2].item(),
                'targets': targets.shape[0],'''
            elif is_seg:
                loss = '''
                'train/box_loss': loss_items[0].item(),
                'train/seg_loss': loss_items[1].item(),
                'train/obj_loss': loss_items[2].item(),
                'train/cls_loss': loss_items[3].item(),
                'targets': targets.shape[0],'''

            # TODO tqdm [<47:18, 405.50s/it] [<35:49, 358.27s/it]
            progress = f'''
            if opt.sub:
                opt.sub.put({{{loss}
                    'epoch': epoch,
                    'train_time': pbar.last_print_t - pbar.start_t,
                    'total_batch': pbar.total,
                    'current_batch': pbar.n,
                    'gpu_mem': mem,
                }})'''
            if not re.search('opt=opt,\n(( *)dataloader=)', content):
                content = re.sub('(( *)dataloader=)', r'\2opt=opt,\n\1', content)
            content = re.sub('.*=single_cls,\n', '', content)
            content = re.sub('(check_amp\(model\)) *#', r"\1 if torch.cuda.mem_get_info(torch.device('cuda:0'))[0] > 2 ** 30 * 4 else false #", content)
            
            content = re.sub("(\n( {8})# Scheduler)", 
                stop_content.replace('\n', '\n' + ' ' * 4) + progress + r'\n\1', content) if progress not in content else content

            return content

        def replace_val(content):

            content = replace_stop(content)
            if is_cls:
                val = '''
                'loss': loss,
                'top1': top1,
                'top5': top5,'''
            elif is_det:
                val = '''
                'seen': seen,
                'instances': nt.sum(),
                'precision': mp,
                'recall': mr,
                'map50': map50,
                'map': map,'''
            elif is_seg:
                val = '''
                'seen': seen,
                'instances': nt.sum(),
                'mp_bbox': mp_bbox,
                'mr_bbox': mr_bbox,
                'map50_bbox': map50_bbox,
                'map_bbox': map_bbox,
                'mp_mask': mp_mask,
                'mr_mask': mr_mask,
                'map50_mask': map50_mask,
                'map_mask': map_mask,'''

            progress = f'''
            if opt.sub:
                opt.sub.put({{{val}
                    'val_time': pbar.last_print_t - pbar.start_t,
                }})'''.replace(' ' * 12, ' ' * 4)

            content = re.sub(' check_requirements\(', ' #check_requirements(', content)
            content = re.sub('(( *)model=None,\n)(( *)dataloader=None,\n)', r'\1\2opt=None,\n\3', content)
            content = re.sub('(\n\ndef)', release_model + r'\1', content, 1) if 'def release_model' not in content else content
            content = re.sub("(\n *return.*?\n*def)", 
                stop_content.replace('\n' + ' ' * 4, '\n') + progress + r'\n\1', content) if progress not in content else content

            return content

        def replace_infer(content):
            
            if is_cls:
                out = "'prob': prob" 
            elif is_det:
                out = "'det': det"
            elif is_seg:
                out = "'segj': [seg.tolist() for seg in segments] if len(det) else [],\n" + ' ' *16 + "'det': det"
    
            progress = f'''
            if sub:
                sub.put({{
                    {out}.to(torch.device('cpu')).numpy().tolist(),
                    'idx': s[6:].split('/')[0],
                    'names': names,
                    'path': path,
                    'im': im0s,
                }})'''.replace(' ' * 12, ' ' * 8)

            content = re.sub(' check_requirements\(', ' #check_requirements(', content)
            content = re.sub('(( *)vid_stride=1.*\n)', r'\1\2main_=None,\n\2sub=None\n', content) if 'sub=None' not in content else content

            content = re.sub('((\n *)if update:)', r'\2release_model(model)\1', content) if 'release_model(model)' not in content else content

            content = re.sub("(\n *# Print time)", 
                stop_content + progress + r'\n\1', content) if progress not in content else content

            content = re.sub('(\n\ndef)', release_model + r'\1', content, 1) if 'def release_model' not in content else content

            return content

        for name in (['classify'] if is_cls else []) + \
                    (['segment'] if is_seg else []) + \
                    ([''] if is_det else []):

            stop_content = stop_content.replace('main_', 'opt.main')

            replace_content(os.path.join(git_dir, name, 'train.py'), replace_train)

            replace_content(os.path.join(git_dir, name, 'val.py'), replace_val)

            stop_content = stop_content.replace('opt.main', 'main_')

            replace_content(os.path.join(git_dir, name, 'detect.py' if is_det else 'predict.py'), replace_infer)

    sys.path.append(code_dir)

    return git_dir, code_dir

def create_labels(data_dir='', names='', split='1::'):

    # create yolov5 images/labels structure if data_dir endswith images
    if data_dir.endswith(os.path.sep + 'images'):
        labels_dir = data_dir.replace(os.path.sep + 'images', os.path.sep + 'labels')
        os.makedirs(labels_dir, exist_ok=True)
    else:
        labels_dir = data_dir

    names = set(names) or set() #TODO move file
    for xml_path in glob.glob(os.path.join(data_dir, '**', '*.xml'), recursive=True):
        label_path = os.path.join(labels_dir, xml_path.replace('.xml', '.txt').replace(labels_dir+os.path.sep, ''))
        if os.path.exists(label_path) and os.stat(xml_path).st_mtime < os.stat(label_path).st_mtime:
            continue
        else:
            xml2txt(xml_path, label_path, names)

    for json_path in glob.glob(os.path.join(data_dir, '**', '*.json'), recursive=True):
        label_path = os.path.join(labels_dir, json_path.replace('.json', '.txt').replace(labels_dir+os.path.sep, ''))
        if os.path.exists(label_path) and os.stat(json_path).st_mtime < os.stat(label_path).st_mtime:
            continue
        else:
            json2txt(json_path, label_path, names)

    train_dir, val_dir = update_labels(data_dir=data_dir, split=split)

    yaml_path = os.path.join(data_dir, 'data.yaml')
    data = {
        'names': list(names),
        'nc': len(names),
        'train': train_dir,
        'val': val_dir,
    }

    import yaml
    yaml.dump(data, open(yaml_path, 'w'))

def xml2txt(xml_path:str, label_path:str, names):
    '''
    <annotation>
        <filename>1.jpg</filename>
        <size>
            <width></width>
            <height></height>
            <depth></depth>
        </size>
        <segmented>0</segmented>
        <object>
            <name></name>
            <bndbox>
                <xmin></xmin>
                <ymin></ymin>
                <xmax></xmax>
                <ymax></ymax>
            </bndbox>
        </object>
    </annotation>
    '''

    from xml.dom.minidom import Element, Node, CDATASection, parse
    tree:Element = parse(xml_path)

    def get_data(tree:Element, name):
        node:Node = tree.getElementsByTagName(name)[0]
        section:CDATASection = node.childNodes[0]
        return section.data
    try:
        width = int(get_data(tree, 'width'))
        height = int(get_data(tree, 'height'))
    except:
        print('Could not parse size in ', xml_path)
        return

    txt_content = ''
    names = set(names)
    for obj in tree.getElementsByTagName('object'):
        name = get_data(obj, 'name')
        names.add(name)
        id_ = list(names).index(name)
        xmin = int(get_data(obj, 'xmin'))
        ymin = int(get_data(obj, 'ymin'))
        xmax = int(get_data(obj, 'xmax'))
        ymax = int(get_data(obj, 'ymax'))
        xc = (xmax + xmin) / 2 / width
        yc = (ymax + ymin) / 2 / height
        w = (xmax - xmin) / width
        h = (ymax - ymin) / height
        if not (str(xc) in txt_content and str(yc) in txt_content and
            str(w) in txt_content and str(h) in txt_content):
            txt_content += f'{id_} {xc} {yc} {w} {h}\n'

    for obj in tree.getElementsByTagName('objects'):
        name = get_data(obj, 'name')
        names.add(name)
        id_ = list(names).index(name)
        txt_content += str(id_)
        points = str(get_data(obj, 'points')).split(';')

        for point in points:
            if not point: continue
            txt_content += ' ' + str(float(str(point).split(',')[0]) / width)
            txt_content += ' ' + str(float(str(point).split(',')[1]) / height)

        txt_content += '\n'

    open(label_path, 'w').write(txt_content)

    return names

def json2txt(json_path:str, label_path:str, names):
    '''
    {
        "shapes": [
            {
                "label": "",
                "points": [
                    [1, 1],
                ],
                "shape_type": "polygon",
            }
        ],
        "imagePath": "1.jpg",
        "imageHeight": 1,
        "imageWidth": 1
    }
    '''
    import json
    label = json.load(open(json_path))

    try:
        width = label['imageWidth']
        height = label['imageHeight']
    except:
        print('Could not parse size in ', json_path)
        return

    txt_content = ''
    names = set(names)
    for obj in label['shapes']:
        names.add(obj['label'])
        id_ = list(names).index(obj['label'])
        txt_content += str(id_)
        points = obj['points']

        for point in points:
            if not point: continue
            txt_content += ' ' + str(float(point[0]) / width)
            txt_content += ' ' + str(float(point[1]) / height)

        txt_content += '\n'

    open(label_path, 'w').write(txt_content)

    return names

def txt2obj(txt_path, shape=(1, 1)):
    results = []
    try:
        labels = open(txt_path).readlines()
    except:
        return results

    height, width = shape
    items = labels[0].split(' ')
    width = int(items[0]) if re.search('[a-z]', labels[0]) else 1
    height = int(items[1]) if re.search('[a-z]', labels[0]) else 1
    path = ' '.join(items[2:]).strip() if re.search('[a-z]', labels[0]) else ''
    results.append({
        'width': width,
        'height': height,
        'path': path,
    })
    for label in labels[1:]:
        items = label.strip().split(' ')
        idx, xc, yc, w, h = items[:5]

        score = float(items[5]) if len(items) > 5 else 0

        xmin = int((float(xc) - float(w) / 2) * width)
        ymin = int((float(yc) - float(h) / 2) * height)
        xmax = int((float(xc) + float(w) / 2) * width)
        ymax = int((float(yc) + float(h) / 2) * height)

        results.append({
            'idx': idx,
            'xmin': xmin,
            'ymin': ymin,
            'xmax': xmax,
            'ymax': ymax,
            'score': score,
            'width': width,
            'height': height,
            'path': path,
        })
    return results

def txt2poly(txt_path, im_path:str, color=(255, 255, 255)):
    import numpy

    im = cv2.imread(im_path)
    objs = open(txt_path).read().strip().split('\n')
    for obj in objs:
        points = []
        coord = obj.split(' ')[1:]
        for i, c in enumerate(coord):
            points.append(int(im.shape[(i + 1) % 2] * float(c)))
        if len(coord):
            cv2.fillPoly(im, pts=[numpy.array(points).reshape(-1, 2)], color=color)
    return im

def mask2txt(path:str, thredsholds=[[0,1]]):
    im = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
    content = ''
    for i, threadhold in enumerate(thredsholds):
        _, im = cv2.threshold(im, threadhold[0], threadhold[1], cv2.THRESH_BINARY)
        contours, _= cv2.findContours(im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        for contour in contours:
            content += str(i)
            for point in contour:
                content += ' ' + str(round(point[0][0] / im.shape[1], 9))
                content += ' ' + str(round(point[0][1] / im.shape[0], 9))
            content += '\n'
    open(path.replace(path.split('.')[-1], 'txt'), 'w').write(content)
    return content