import json
import os
import random
import shutil
import sys
from functools import reduce
from pathlib import Path

import cv2
import numpy as np
import yaml
from PIL import Image
from labelme import utils as U
from tqdm import tqdm


def view(data_dir='output'):
    # Get valid image list and annotation list
    root = Path('.')
    img_dir = root / data_dir / 'image'
    ann_dir = root / data_dir / 'annotation'
    assert os.path.isdir(img_dir), f'ERROR - {img_dir} not found'
    assert os.path.isdir(ann_dir), f'ERROR - {ann_dir} not found'
    img_li, ann_li = os.listdir(img_dir), os.listdir(ann_dir)

    for i in img_li:
        # Annotation file is the image base name with extension `.lines.txt`
        basename = os.path.splitext(i)[0]
        ann = basename + '.lines.txt'

        with open(ann_dir / ann) as f:
            ann_li = f.readlines()
        ann_li = [i.split() for i in ann_li]
        ann_li = [[(int(float(i[x])), int(float(i[x + 1]))) for x in range(0, len(i), 2) if float(i[x]) > 0]
                  for i in ann_li]

        image = cv2.imread(str(img_dir / i))
        image0 = image.copy()

        # Draw lines for lanes
        for lane in ann_li:
            cv2.polylines(image0, np.int64([lane]), isClosed=False, color=(0, 255, 0), thickness=20)

        print(img_dir / i, image.shape)

        # If you want to view images in original hikvision camera size, set this to True, otherwise False
        hikvision = False
        if hikvision:
            hikvision_size = (1280, 720)
            image0 = cv2.resize(image0, hikvision_size, interpolation=cv2.INTER_AREA)
        cv2.imshow(i, image0)

    cv2.waitKey(0)
    cv2.destroyAllWindows()


def labelbox2CULane() -> None:
    # The default size of CULane images
    culane_size = (1640, 590)  # width, height
    colors = [(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4)]

    # The path of labelbox-style annotation & images should be available and not empty
    project = Path('.')
    annotation = project / 'labelbox' / 'annotation'
    image = project / 'labelbox' / 'image'
    folder_assertion(annotation, image)

    # The labelbox annotation file
    labelbox_json = annotation / os.listdir(annotation)[0]
    print('LOADING -', labelbox_json)

    # Create new directory for converted CULane-style annotation & images
    output = project / 'output'
    converted_image = output / 'image'
    converted_annotation = output / 'annotation'
    segmentation = output / 'segmentation'
    list_folder = output / 'list'
    create_folder(output, converted_image, converted_annotation, segmentation, list_folder)

    # Open labelbox json annotation
    with open(labelbox_json, 'r', encoding='utf8') as f:
        data = json.load(f)
    print('num of annotation -', len(data))

    # In case some images or CULane_example are missing
    assert len(data) == len(os.listdir(image)), f'ERROR - Files in {image} mismatch with {labelbox_json}'

    # Open ground truth txt
    ground_truth = list_folder / 'train_gt.txt'

    # Open train.txt
    train_txt = list_folder / 'train.txt'

    with open(ground_truth, 'w', encoding='utf8') as gt, open(train_txt, 'w', encoding='utf8') as train, \
            tqdm(total=len(data), file=sys.stdout) as pbar:

        for i, c in enumerate(data):
            # The name of image
            name = c['External ID']

            # The name of mask image
            mask = os.path.splitext(name)[0] + '.png'

            # CULane annotation: basename.lines.txt
            ann_file = os.path.splitext(name)[0] + '.lines.txt'

            abs_name = image / name
            assert os.path.exists(abs_name), f'ERROR - {abs_name} does not exist'
            im0 = cv2.imread(str(abs_name))
            h_ratio, w_ratio = culane_size[1] / im0.shape[0], culane_size[0] / im0.shape[1]

            # Ground truth line
            gt_line = []

            with open(converted_annotation / ann_file, 'w', encoding='utf8') as f_dataset:
                # The ground truth of classes, 4 at most
                fourlines = [0] * 4

                # Draw segmentation png
                seg = np.empty_like(im0)
                seg_resized = cv2.resize(seg, culane_size, interpolation=cv2.INTER_AREA)

                # Please refer to the labelbox json format
                for j in range(len(c['Label']['objects'])):
                    new_line = []
                    print(c['Label']['objects'][j]['line'])

                    # class value
                    cls = c['Label']['objects'][j]['value']
                    assert isinstance(cls, str), 'ERROR - cls is not str'
                    print('cls', cls)

                    # Set fourlines with class value
                    fourlines[int(cls) - 1] = 1

                    # Each annotated coord on a line
                    for point in c['Label']['objects'][j]['line']:
                        # Conversion is needed, as the CULane image size is different from customized data
                        x_converted, y_converted = point['x'] * w_ratio, point['y'] * h_ratio
                        new_line.append(x_converted)
                        new_line.append(y_converted)

                    new_line = cal_x_from_y(new_line)
                    new_line = list(map(str, new_line))
                    line_n = ' '.join(new_line)
                    f_dataset.write(line_n)

                    line_n = line_n.split()
                    line_n = [(int(float(line_n[k])), int(float(line_n[k + 1]))) for k in range(0, len(line_n), 2)]
                    print('line_n', line_n)

                    cv2.polylines(seg_resized, np.int32([line_n]), isClosed=False, color=colors[int(cls) - 1],
                                  thickness=20)

                    # The last line should end without `\n`
                    if j != len(c['Label']['objects']) - 1:
                        f_dataset.write('\n')

                fourlines = ' '.join(iter(map(str, fourlines)))

            gt_line.append('/image/' + name)
            gt_line.append('/segmentation/' + mask)
            gt_line.append(fourlines)

            # Write ground truth
            gt.write(' '.join(gt_line))
            if i != len(data) - 1:
                gt.write('\n')

            # Write image names to train.txt
            train.write('/image/' + name)
            if i != len(data) - 1:
                train.write('\n')

            # Resize images from raw to CULane size: raw -> (1640, 590)
            im_resized = cv2.resize(im0, culane_size, interpolation=cv2.INTER_AREA)
            cv2.imwrite(str(converted_image / name), im_resized)
            cv2.imwrite(str(segmentation / name), seg_resized)

            pbar.update(1)


def labelme2culane(dataset: Path, output: Path) -> None:
    bundle, image, seg, li, ann = 'bundle_not_for_training', 'image', 'segmentation', 'list', 'annotation'
    raw_ann_folder = dataset / 'annotation'

    # Input dataset folder assertion
    folder_assertion(dataset, dataset / 'annotation', dataset / 'image')

    # Create `output` and 5 sub-folders
    create_folder(output, output / bundle, output / image, output / seg, output / li, output / ann)

    # Shuffle
    shuffled = os.listdir(raw_ann_folder)
    random.shuffle(shuffled)

    # Split
    ratio = (0.7, 0.15, 0.15)  # train, val, test
    train_num = int(len(shuffled) * ratio[0])
    val_num = int(len(shuffled) * (ratio[0] + ratio[1]))

    with open(str(output / li / 'train.txt'), 'w', encoding='utf8') as train, \
            open(str(output / li / 'train_gt.txt'), 'w', encoding='utf8') as train_gt, \
            open(str(output / li / 'val.txt'), 'w', encoding='utf8') as val, \
            open(str(output / li / 'val_gt.txt'), 'w', encoding='utf8') as val_gt, \
            open(str(output / li / 'test.txt'), 'w', encoding='utf8') as test, \
            tqdm(total=len(shuffled), file=sys.stdout) as pbar:

        for i, c in enumerate(shuffled):
            some_json = raw_ann_folder / c
            data = json.load(open(some_json))
            img = U.img_b64_to_arr(data['imageData'])
            lbl, lbl_names = U.labelme_shapes_to_label(img.shape, data['shapes'])

            # captions = ['%d: %s' % (l, name) for l, name in enumerate(lbl_names)]
            captions = [name for name in lbl_names]

            viz = U.draw_label(lbl, img, captions)
            basename = os.path.splitext(os.path.basename(some_json))[0]

            item = output / bundle / basename
            create_folder(item)

            Image.fromarray(img).save(item / (basename + '_image.png'))
            Image.fromarray(lbl).save(item / (basename + '_seg.png'))
            Image.fromarray(viz).save(item / (basename + '_viz.png'))

            Image.fromarray(img).save(output / image / f'{basename}.png')
            Image.fromarray(lbl).save(output / seg / f'{basename}.png')

            gt = [0] * 4
            helper = {'1': 0, '2': 1, '3': 2, '4': 3}
            with open(str(item / 'label_names.txt'), 'w') as f:
                for lbl_name in lbl_names:
                    f.write(lbl_name + '\n')

                    if lbl_name in helper:
                        gt[helper[lbl_name]] = 1

            info = dict(label_names=lbl_names)
            with open(os.path.join(item, 'info.yaml'), 'w') as f:
                yaml.safe_dump(info, f, default_flow_style=False)

            # Split dataset & write to `output/list/*.txt`
            if i < train_num:
                train.write(f'/image/{basename}.png')
                train.write('\n')
                gt = ' '.join(list(map(str, gt)))
                train_gt.write(f'/image/{basename}.png /segmentation/{basename}.png {gt}')
                train_gt.write('\n')
            elif i < val_num:
                val.write(f'/image/{basename}.png')
                val.write('\n')
                gt = ' '.join(list(map(str, gt)))
                val_gt.write(f'/image/{basename}.png /segmentation/{basename}.png {gt}')
                val_gt.write('\n')
            else:
                test.write(f'/image/{basename}.png')
                test.write('\n')

            # Write `output/annotation/*.lines.txt`
            shapes = data['shapes']
            shapes.sort(key=lambda x: int(x['label']))
            with open(str(output / ann / (basename + '.lines.txt')), 'w', encoding='utf8') as f:
                for idx, shape in enumerate(shapes):
                    points = sorted(shape['points'], key=lambda x: x[1], reverse=True)
                    points = reduce(lambda x, y: x + y, points)
                    f.write(' '.join(list(map(str, points))))
                    if idx != len(shapes) - 1:
                        f.write('\n')

            pbar.update(1)


def cal_x_from_y(line: list) -> list:
    assert isinstance(line, list), 'ERROR - Line is not list type'

    # The y coords of CULane is fixed in range 0 ~ 590 at step of 10, the original size is (1280, 720)
    culane_y = [i for i in range(720, 0, -10)]
    new_line = []
    for y in culane_y:
        # Check if annotation coords `y` not in CULane fixed range
        if line[-1] < y < line[1]:
            for i in range(0, len(line), 2):
                if line[i + 3] <= y <= line[i + 1]:
                    x = line[i + 2] + (y - line[i + 3]) * (line[i] - line[i + 2]) / (line[i + 1] - line[i + 3])
                    new_line.append(round(x, 3))
                    new_line.append(int(y))
                    break
    return new_line


def create_folder(*args) -> None:
    for arg in args:
        # Delete old folder
        if os.path.exists(arg):
            shutil.rmtree(arg)

        # Create new folder
        os.makedirs(arg, exist_ok=False)


def folder_assertion(*args) -> None:
    for arg in args:
        assert os.path.isdir(arg), f'ERROR - {arg} is not dir'
        assert os.listdir(arg), f'ERROR - {arg} is empty'


def png2jpg(folder: Path, output: Path) -> None:
    assert os.path.isdir(folder), f'ERROR - {folder} is not a folder'
    print('converting...')

    with tqdm(total=len(os.listdir(folder)), file=sys.stdout) as pbar:
        for i in os.listdir(folder):
            if os.path.splitext(i)[1] == 'jpg':
                pbar.update(1)
                continue

            converted = os.path.splitext(i)[0] + '.jpg'
            img = Image.open(folder / i)
            img.save(output / converted)
            pbar.update(1)


def get_test(folder: Path) -> None:
    assert os.path.isdir(folder), f'ERROR - {folder} is not a folder'
    print('preparing test...')

    create_folder(Path('.') / 'output' / 'list' / 'test_split')
    with open(Path('.') / 'output' / 'list' / 'test_split' / 'video.txt', 'w', encoding='utf8') as f, \
            tqdm(total=len(os.listdir(folder)), file=sys.stdout) as pbar:
        for i in os.listdir(folder):
            fullname = '/video_test/' + i
            f.write(str(fullname))
            if i != len(os.listdir(folder)) - 1:
                f.write('\n')
            pbar.update(1)


def resize_ims(folder: Path) -> None:
    output = Path('.') / 'output' / 'resized'
    create_folder(output)

    for i in os.listdir(folder):
        name = os.path.basename(i)
        print(name)
        im = cv2.imread(str(folder / i), cv2.IMREAD_UNCHANGED)
        im_resized = cv2.resize(im, (1640, 590), interpolation=cv2.INTER_AREA)
        cv2.imwrite(str(output / name), im_resized)
