# -*- coding: utf-8 -*-
from __future__ import annotations

import os
import os.path as osp
import json
import shutil
from copy import deepcopy
import logging
from PIL import Image
import tifffile as tif
import numpy as np
import cv2
from pycocotoolse.coco import COCO
from pathlib import Path
import concurrent.futures
from io_util import getSize
from task.util import create_dir, listdir, image_extensions, match_by_base_name
from task.base import BaseTask
from task.util.color import hex_to_rgb
from task.util import copy
from rpc.seg import polygon2points
import concurrent.futures

from task.util.labelme import write_ann

log = logging.getLogger("paddlelabel")

def hex_to_rgb(hex_color):
    hex_color = hex_color.lstrip('#')
    return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4))

def polygon2points(polygon):
    # 将多边形的顶点列表转换为点列表
    points = []
    for i in range(0, len(polygon), 2):
        points.append((polygon[i], polygon[i + 1]))
    return points


def draw_mask(data, annotations, labels_dict, mask_type="grayscale"):
    try:
        height, width = map(int, data.size.split(",")[1:3])
    except (ValueError, IndexError):
        log.error(f"Invalid data size: {data.size}")
        return None

    instance_id = 0

    if mask_type == "pesudo":
        catg_mask = np.zeros((height, width, 3), dtype=np.uint8)
    elif mask_type == "grayscale":
        catg_mask = np.zeros((height, width), dtype=np.uint8)
    elif mask_type == "instance":
        catg_mask = np.zeros((height, width), dtype=np.uint8)
        instance_mask = np.zeros((height, width), dtype=np.uint8)
    else:
        raise RuntimeError(f"Unsupported mask type: {mask_type}")

    # def process_annotation(ann):
    #     nonlocal instance_id
    #     if ann.type not in ["brush", "polygon", "points", "rubber"]:
    #         return
    #     label_obj = labels_dict.get(ann.label_id, None)
    #     if not label_obj:
    #         return
    #     label_id = label_obj.id
    #     result = ann.result.strip().split(",")
    #     result = [r for r in result if r != ""]
    #     if len(result) < 2:
    #         return
    #
    #     instance_id += 1
    #
    #     try:
    #         result = np.array([int(float(p)) for p in result])
    #     except ValueError:
    #         log.error(f"Annotation {ann} conversion failed: {result}, please open an issue for this")
    #         return
    #
    #     if result[0] == 0:
    #         ann.type = "points"
    #
    #     if result[0] != 0 and result[1] == 0:
    #         ann.type = "rubber"
    #
    #     color = [0, 0, 0] if ann.type == "rubber" else hex_to_rgb(label_obj.color)[::-1] if mask_type == "pseudo" else int(label_id)
    #
    #     if ann.type in ["brush", "rubber"]:
    #         points = result[2:].reshape(-1, 2)
    #         line_width = max(result[0], 1)
    #         if len(points) < 2:
    #             return
    #
    #         for i in range(len(points) - 1):
    #             cv2.line(catg_mask, tuple(points[i]), tuple(points[i + 1]), color, line_width)
    #             if mask_type == "instance":
    #                 cv2.line(instance_mask, tuple(points[i]), tuple(points[i + 1]), instance_id, line_width)
    #
    #     else:
    #         if ann.type == "points":
    #             points = result[2:].reshape(-1, 2)
    #         elif ann.type == "polygon":
    #             points = polygon2points(result).reshape(-1, 2)
    #
    #         for x, y in points:
    #             catg_mask[y, x] = color
    #             if mask_type == "instance":
    #                 instance_mask[y, x] = instance_id
    #
    # for ann in annotations:
    #     process_annotation(ann)

    for ann in annotations:
        if ann.type not in ["brush", "polygon", "points", "rubber"]:
            continue
        label_obj = labels_dict.get(ann.label_id, None)
        if not label_obj:
            continue
        label_id = label_obj.id
        result = ann.result.strip().split(",")
        result = [r for r in result if r != ""]
        if len(result) < 2:
            log.warning(f"Invalid result format for annotation {ann}: {result}")
            continue

        instance_id += 1

        try:
            result = [int(float(p)) for p in result]
        except ValueError:
            log.error(f"Annotation {ann} conversion failed: {result}, please open an issue for this")
            continue

        if result[0] == 0:
            ann.type = "points"

        if result[0] != 0 and result[1] == 0:
            ann.type = "rubber"

        color = [0, 0, 0] if ann.type == "rubber" else hex_to_rgb(label_obj.color)[
                                                       ::-1] if mask_type == "pseudo" else int(label_id)

        if ann.type in ["brush", "rubber"]:
            points = result[2:]
            line_width = max(result[0], 1)
            if len(points) % 2 != 0:
                log.warning(f"Odd number of points for annotation {ann}: {points}")
                continue

            prev_w, prev_h = points[0:2]
            for idx in range(2, len(points), 2):
                w, h = points[idx: idx + 2]
                if 0 <= w < width and 0 <= h < height and 0 <= prev_w < width and 0 <= prev_h < height:
                    cv2.line(catg_mask, (prev_w, prev_h), (w, h), color, line_width)
                    if mask_type == "instance":
                        cv2.line(instance_mask, (prev_w, prev_h), (w, h), instance_id, line_width)
                else:
                    log.warning(f"Point ({w}, {h}) or ({prev_w}, {prev_h}) out of bounds for annotation {ann}")
                prev_w, prev_h = w, h

        else:
            if ann.type == "points":
                points = result[2:]
            elif ann.type == "polygon":
                points = polygon2points(result[2:])
                if len(points) < 3:
                    log.warning(f"Polygon must have at least 3 points for annotation {ann}: {points}")
                    continue

            for x, y in points:
                if 0 <= x < width and 0 <= y < height:
                    catg_mask[y, x] = color
                    if mask_type == "instance":
                        instance_mask[y, x] = instance_id
                else:
                    log.warning(f"Point ({x}, {y}) out of bounds for annotation {ann}")

    if mask_type == "instance":
        return np.stack([instance_mask, catg_mask], axis=0)
    return catg_mask


def parse_instance_mask(annotation_path, labels, image_path=None):
    mask = tif.imread(annotation_path)
    if image_path is not None:
        # img = cv2.imread(annotation_path, cv2.IMREAD_UNCHANGED)
        # if img is None:
        #     raise RuntimeError(f"Read image {annotation_path} failed.")
        img = Image.open(image_path)
        if img.size[::-1] != mask.shape[1:]:
            logging.info(
                f"Image {img.size[::-1]} and annotation {mask.shape[1:]} has different shape, please check image {image_path} and annotation {annotation_path}")
            return None, None

    instance_mask = mask[0]
    label_mask = mask[1]
    anns = []

    for label in labels:
        instance_part = instance_mask[label_mask == label.id]
        instance_ids = np.unique(instance_part)
        for instance_id in instance_ids:
            h, w = np.where(instance_mask == instance_id)
            result = ",".join([f"{w},{h}" for w, h in zip(w, h)])
            # result = f"{1},{instance_id}," + result
            result = f"{0},{instance_id}," + result
            anns.append(
                {
                    "label_name": label.name,
                    "result": result,
                    "type": "brush",
                    "frontend_id": str(instance_id),
                }
            )
    s = (1,) + tuple(instance_mask.shape[:2])
    s = [str(s) for s in s]
    size = ",".join(s)
    return size, anns


def osp_join(*args):
    return os.path.join(*args)


class InstanceSegmentation(BaseTask):
    def __init__(self, session, project, data_dir=None, is_export=False):
        super(InstanceSegmentation, self).__init__(session, project, skip_label_import=True, data_dir=data_dir,
                                                   is_export=is_export)
        self.importers = {
            "mask": self.mask_importer,
            "coco": self.coco_importer,
            "eiseg": self.eiseg_importer,
        }
        self.exporters = {
            "mask": self.mask_exporter,
            "coco": self.coco_exporter,
        }
        self.default_exporter = self.coco_exporter
        self.db_session = session

    def mask_importer(
            self,
            data_dir: Path | None = None,
            filters={"exclude_prefix": ["."], "include_postfix": image_extensions},
    ):
        # 1. set params
        project = self.project

        data_dir = project.data_dir if data_dir is None else data_dir

        background_line = self.import_labels(ignore_first=True)
        # other_settings = project._get_other_settings()
        other_settings = project.other_settings
        other_settings["background_line"] = background_line
        project.other_settings = json.dumps(other_settings)

        ann_dict = {
            osp.basename(p).split(".")[0]: p
            for p in listdir(data_dir, {"exclude_prefix": ["."], "include_postfix": [".tiff", ".tif"]})
        }

        # 2. import records
        self.db_session.begin(subtransactions=True)
        for data_path in listdir(data_dir, filters):
            id = osp.basename(data_path).split(".")[0]
            data_path = osp.join(data_dir, data_path)
            if id in ann_dict.keys():
                ann_path = osp.join(data_dir, ann_dict[id])
                size, anns = parse_instance_mask(ann_path, project.labels, data_path)
                if size is None and anns is None:
                    return False
            else:
                anns = []
                size, _, _ = getSize(data_path)
                # mask = tif.imread(data_path)

            self.add_task([{"path": data_path, "size": size}], [anns])
        # self.commit()
        self.db_session.commit()

    def mask_exporter(self, export_dir,finalValues=False,db_session=None):
        # 1. set params
        project = self.project
        finalValues = finalValues

        # 2. create export destinations
        export_data_dir = osp.join(export_dir, "JPEGImages")
        export_label_dir = osp.join(export_dir, "Annotations")
        create_dir(export_data_dir)
        create_dir(export_label_dir)
        if db_session is None:
            db_session = self.db_session

        datas = self._get_project_data(finalValues,db_session=db_session)
        datas_id_list = [t.data_id for t in datas]
        datas_annotations_dict = self._get_datas_annotations(datas_id_list,db_session=db_session)
        label_dict = self._get_labels_id_dict(db_session=db_session)
        export_data_paths = []
        export_label_paths = []

        for data in datas:
            if data:
                annotations_list = datas_annotations_dict.get(data.data_id, [])
                data_path = osp.join(project.data_dir, data.path)
                export_data_path = osp.join("JPEGImages", osp.basename(data.path))
                export_label_path = osp.join(export_label_dir, osp.basename(data_path).split(".")[0] + ".tiff")
                copy(data_path, export_data_dir)
                # height, width = map(int, data.size.split(",")[1:3])
                mask = draw_mask(data, annotations_list,label_dict, mask_type="instance")
                if mask is None:
                    return False
                try:
                    # low version tifffile doen't have compression setting
                    tif.imwrite(export_label_path, mask, compression="zlib")
                except TypeError:
                    tif.imwrite(export_label_path, mask)
                export_data_paths.append(export_data_path)
                export_label_paths.append([export_label_path])
        self.export_split(
            export_dir,
            datas,
            export_data_paths,
            with_labels=False,
            annotation_ext=".tiff",
        )
        # other_setting = project.other_settings if project.other_settings else {}
        other_setting = {}

        background_line = other_setting.get("background_line")
        if not 0 in [l.id for l in label_dict.values()] and (background_line is None or len(background_line) == 0):
            background_line = "background"

        self.export_labels(osp.join(export_dir, "labels.txt"), background_line, with_id=True)
        return True




    def coco_importer(
            self,
            data_dir: Path | None = None,
            filters: dict[str, list] = {"exclude_prefix": ["."], "include_postfix": image_extensions},
    ):
        # 1. set params
        project = self.project
        data_dir = Path(project.data_dir if data_dir is None else data_dir)
        self.split = [set()] * 3  # disable xx_list.txt support
        self.create_warning(data_dir)

        label_file_paths = [
            (["train.json"], 0),
            (["val.json"], 1),
            (["test.json"], 2),
            (["Annotations", "coco_info.json"], 0),  # EasyData format
            (["annotations.json"], 0),  # LabelMe format
            (["label", "annotations.json"], 0),  # EISeg format
        ]
        label_file_paths = [(data_dir / Path(*p), split) for p, split in label_file_paths]

        def _coco_importer(data_paths, label_file_path, set=0):
            coco = COCO(label_file_path)
            info = coco.dataset.get("info", {})
            licenses = coco.dataset.get("licenses", [])

            # 1. create all labels
            self.create_coco_labels(coco.cats.values())

            ann_by_task = {}
            # 2. get image full path and size
            for idx, img in coco.imgs.items():
                file_name = img["file_name"]
                img_path = filter(
                    lambda p: osp.normpath(p)[-len(osp.normpath(file_name)):] == osp.normpath(file_name), data_paths
                )
                img_path = list(img_path)
                if len(img_path) != 1:
                    logging.info(
                        f"{'No' if len(img_path) == 0 else 'Multiple'} image(s) with path ending with {file_name} found under {data_dir}")
                    return None, None

                img_path = img_path[0]
                data_paths.remove(img_path)
                coco.imgs[idx]["img_path"] = img_path
                coco.imgs[idx]["size"], _, _ = getSize(Path(data_dir) / img_path)
                ann_by_task[img["id"]] = []

            # 3. get ann by image
            for ann_id in coco.getAnnIds():
                ann = coco.anns[ann_id]
                if coco.imgs.get(ann["image_id"]) is None:
                    print(f"No image with id {ann['image_id']} found, skipping this annotation.")
                    continue

                label_name = coco.cats[ann["category_id"]]["name"]
                res = ann["segmentation"][0]
                width, height = (
                    coco.imgs[ann["image_id"]].get("width", None),
                    coco.imgs[ann["image_id"]].get("height", None),
                )
                for idx in range(0, len(res), 2):
                    res[idx] -= width / 2
                    res[idx + 1] -= height / 2

                res = [str(r) for r in res]
                res = ",".join(res)
                ann_by_task[ann["image_id"]].append(
                    {
                        "label_name": label_name,
                        "result": res,
                        "type": "polygon",
                        "frontend_id": len(ann_by_task[ann["image_id"]]) + 1,
                    }
                )

            # 4. add tasks
            for img_id, annotations in list(ann_by_task.items()):
                data_path = coco.imgs[img_id]["img_path"]
                self.add_task([{"path": data_path, "size": coco.imgs[img_id]["size"]}], [annotations], split=set)
            return data_paths, json.dumps({"info": info, "licenses": licenses})

        # 2. find all images under data_dir
        data_paths = listdir(data_dir, filters=filters)
        coco_others = {}
        for label_file_path, split_idx in label_file_paths:
            if label_file_path.exists():
                data_paths, others = _coco_importer(data_paths, label_file_path, split_idx)
                if data_paths is None and others is None:
                    return False
                coco_others[split_idx] = others

        # other_settings = project._get_other_settings()
        other_settings = project.other_settings
        other_settings["coco_others"] = coco_others
        project.other_settings = json.dumps(other_settings)
        self.db_session.begin(subtransactions=True)
        # 3. add tasks without label
        for data_path in data_paths:
            size, _, _ = getSize(data_dir / data_path)
            self.add_task([{"path": data_path, "size": size}])
        self.db_session.commit()
        # self.commit()

    def coco_exporter(self, export_dir,finalValues=False,seg_mask_type=None,db_session=None):
        # 1. set params
        if db_session is None:
            db_session = self.db_session
        project = self.project
        finalValues = finalValues
        # 2. create coco with all tasks
        coco = COCO()
        # 2.1 add categories
        labels = self._get_project_labels(db_session=db_session)
        labels_dict = {l.label_id: l for l in labels}
        for label in labels:
            if label.super_category_id is None:
                super_category_name = "none"
            else:
                super_category_name = self.label_id2name(label.super_category_id)
            coco.addCategory(label.id, label.name, label.color, super_category_name)

        # 2.2 add images
        split = [set(), set(), set()]
        datas = self._get_project_data(finalValues,db_session=db_session)
        data_dir = osp.join(export_dir, "image")
        create_dir(data_dir)
        for data in datas:
            if data:
                size = data.size.split(",")
                export_path = osp.join("image", osp.basename(data.path))
                coco.addImage(export_path, int(size[1]), int(size[2]), data.data_id)
                copy(osp.join(project.data_dir, data.path), data_dir)
                split[data.set].add(data.data_id)

        # 2.3 add annotations
        annotations = self._get_project_id_annotations(db_session=db_session)
        for ann in annotations:
            label_obj = labels_dict.get(ann.label_id, None)
            if not label_obj:
                print(f"Label object not found for annotation {ann.annotation_id}")
                continue
            if ann.type != "polygon":
                continue
            if ann.data_id not in coco.imgs:
                print(f"Image not found for annotation {ann.annotation_id} with data_id {ann.data_id}")
                continue
            r = ann.result.split(",")
            r = [float(t) for t in r]
            width, height = (
                coco.imgs[ann.data_id]["width"],
                coco.imgs[ann.data_id]["height"],
            )
            width = int(width)
            height = int(height)
            for idx in range(0, len(r), 2):
                r[idx] += width / 2
                r[idx + 1] += height / 2
            coco.addAnnotation(
                ann.data_id,
                label_obj.id,
                segmentation=[r],
                id=ann.annotation_id,
            )
        # 3. write coco json
        # other_setting = project.other_settings if project.other_settings else {}
        other_setting = {}
        coco_others = other_setting.get("coco_others", {})
        for split_idx, fname in enumerate(["train.json", "val.json", "test.json"]):
            outcoco = deepcopy(coco)
            outcoco.dataset["images"] = [img for img in coco.dataset["images"] if img["id"] in split[split_idx]]
            outcoco.dataset["annotations"] = [
                ann for ann in coco.dataset["annotations"] if ann["image_id"] in split[split_idx]
            ]

            coco_others_split = coco_others.get(str(split_idx), "{}")
            coco_others_split = json.loads(coco_others_split)

            outcoco.dataset["info"] = coco_others_split.get("info", "")
            outcoco.dataset["licenses"] = coco_others_split.get("licenses", [])

            with open(osp.join(export_dir, fname), "w") as outf:
                json.dump(outcoco.dataset,outf)
                # print(json.dumps(outcoco.dataset), file=outf)
        return True



    # def coco_exporter(self, export_dir, finalValues=False,seg_mask_type=None):
    #     # 1. set params
    #     project = self.project
    #     finalValues = finalValues
    #     # 2. create coco with all tasks
    #     coco = COCO()
    #     # 2.1 add categories
    #     labels = self._get_project_labels()
    #     labels_dict = {l.label_id: l for l in labels}
    #     for label in labels:
    #         if label.super_category_id is None:
    #             super_category_name = "none"
    #         else:
    #             super_category_name = self.label_id2name(label.super_category_id)
    #         coco.addCategory(label.id, label.name, label.color, super_category_name)
    #
    #     # 2.2 add images
    #     split = [set(), set(), set()]
    #     datas = self._get_project_data(finalValues)
    #     data_dir = osp.join(export_dir, "image")
    #     create_dir(data_dir)
    #
    #     def process_data(data):
    #         if data:
    #             size = data.size.split(",")
    #             export_path = osp.join("image", osp.basename(data.path))
    #             coco.addImage(export_path, int(size[1]), int(size[2]), data.data_id)
    #             copy(osp.join(project.data_dir, data.path), data_dir)
    #             split[data.set].add(data.data_id)
    #
    #     with concurrent.futures.ThreadPoolExecutor() as executor:
    #         futures = [executor.submit(process_data, data) for data in datas]
    #         for future in concurrent.futures.as_completed(futures):
    #             future.result()
    #
    #     # 2.3 add annotations
    #     annotations = self._get_project_id_annotations()
    #
    #     def process_annotation(ann):
    #         try:
    #             label_obj = labels_dict.get(ann.label_id, None)
    #             if not label_obj:
    #                 return
    #             if ann.type != "polygon":
    #                 return
    #             if ann.data_id not in coco.imgs:
    #                 print(f"Warning: data_id {ann.data_id} not found in coco.imgs")
    #                 return
    #             r = ann.result.split(",")
    #             r = [float(t) for t in r]
    #             width, height = (
    #                 coco.imgs[ann.data_id]["width"],
    #                 coco.imgs[ann.data_id]["height"],
    #             )
    #             width = int(width)
    #             height = int(height)
    #             for idx in range(0, len(r), 2):
    #                 r[idx] += width / 2
    #                 r[idx + 1] += height / 2
    #             coco.addAnnotation(
    #                 ann.data_id,
    #                 label_obj.id,
    #                 segmentation=[r],
    #                 id=ann.annotation_id,
    #             )
    #         except Exception as e:
    #             print(f"Error processing annotation {ann.annotation_id}: {e}")
    #
    #     with concurrent.futures.ThreadPoolExecutor() as executor:
    #         futures = [executor.submit(process_annotation, ann) for ann in annotations]
    #         for future in concurrent.futures.as_completed(futures):
    #             future.result()
    #
    #     # 3. write coco json
    #     other_setting = {}
    #     coco_others = other_setting.get("coco_others", {})
    #     for split_idx, fname in enumerate(["train.json", "val.json", "test.json"]):
    #         outcoco = deepcopy(coco)
    #         outcoco.dataset["images"] = [img for img in coco.dataset["images"] if img["id"] in split[split_idx]]
    #         outcoco.dataset["annotations"] = [
    #             ann for ann in coco.dataset["annotations"] if ann["image_id"] in split[split_idx]
    #         ]
    #
    #         coco_others_split = coco_others.get(str(split_idx), "{}")
    #         coco_others_split = json.loads(coco_others_split)
    #
    #         outcoco.dataset["info"] = coco_others_split.get("info", "")
    #         outcoco.dataset["licenses"] = coco_others_split.get("licenses", [])
    #
    #         with open(osp.join(export_dir, fname), "w") as outf:
    #             print(json.dumps(outcoco.dataset), file=outf)
    #     return True

    def eiseg_importer(
            self,
            data_dir: Path | None = None,
            filters={"exclude_prefix": ["."], "include_postfix": image_extensions},
    ):
        project = self.project
        if data_dir is None:
            data_dir = project.data_dir
        data_dir = Path(data_dir)
        data_paths = [Path(p) for p in listdir(data_dir, filters=filters)]
        json_paths = listdir(data_dir, filters={"exclude_prefix": ["."], "include_postfix": [".json"]})
        json_paths = [Path(p) for p in json_paths]
        self.db_session.begin(subtransactions=True)
        for data_path in data_paths:
            size, height, width = getSize(data_dir / data_path)
            json_path = match_by_base_name(data_path, json_paths)

            if len(json_path) == 0:
                self.add_task([{"path": str(data_path), "size": size}])
            else:
                json_path = json_path[0]
                anns_d = json.loads((data_dir / json_path).read_text())
                anns = []
                for ann in anns_d:
                    # ['name', 'labelIdx', 'color', 'points']
                    res = [wh for p in ann["points"] for wh in p]
                    for idx in range(0, len(res), 2):
                        res[idx] -= width / 2
                        res[idx + 1] -= height / 2
                    anns.append(
                        {
                            "label_name": ann["name"],
                            "result": ",".join(str(r) for r in res),
                            "type": "polygon",
                            "frontend_id": len(anns),
                        }
                    )
                self.add_task([{"path": str(data_path), "size": size}], [anns])
                json_paths.remove(json_path)
        self.db_session.commit()

    def labelme_exporter(self, export_dir , finalValues=False,db_session=None):
        # 1. set params, prep output folders
        # project = self.project
        # finalValues = finalValues
        # export_data_dir = osp.join(export_dir,"JPEGImages")
        # export_label_dir = osp.join(export_dir,"Annotations")
        # create_dir(export_data_dir)
        # create_dir(export_label_dir)
        # if db_session is None:
        #     db_session = self.db_session
        # # 2. move images and write ann json
        # datas = self._get_project_data(finalValues,db_session=db_session)
        # datas_id_list = [t.data_id for t in datas]
        # task_id_annotations_dict = self._get_datas_annotations(datas_id_list, db_session=db_session)
        # labels_dict = self._get_labels_id_dict(db_session = db_session)
        #
        # new_paths = []
        # for data in datas:
        #     if data:
        #         _, height, width = map(int, data.size.split(","))
        #         data_path = osp.join(project.data_dir, data.path)
        #         export_img_path = osp.join(export_data_dir, osp.basename(data.path))
        #         export_ann_path = osp.join(export_label_dir, osp.basename(data.path).split(".")[0] + ".json")
        #         shutil.copy(data_path,export_img_path)
        #         write_ann(export_ann_path, export_img_path, height, width, data.annotations, with_data=False)
        #
        #         new_paths.append([str(export_img_path.relative_to(export_dir))])
        # print(new_paths)
        #
        # # 3. write split files
        # self.export_split(export_dir, datas, new_paths, with_labels=False, annotation_ext=".json")

        project = self.project
        finalValues = finalValues
        export_data_dir = os.path.join(export_dir, "JPEGImages")
        export_label_dir = os.path.join(export_dir, "Annotations")
        create_dir(export_data_dir)
        create_dir(export_label_dir)

        if db_session is None:
            db_session = self.db_session

        # 2. move images and write ann json
        datas = self._get_project_data(finalValues, db_session=db_session)
        datas_id_list = [t.data_id for t in datas]
        task_id_annotations_dict = self._get_datas_annotations(datas_id_list, db_session=db_session)
        labels_dict = self._get_labels_id_dict(db_session=db_session)

        new_paths = []
        for data in datas:
            if data:
                _, height, width = map(int, data.size.split(","))
                data_path = os.path.join(project.data_dir, data.path)
                export_img_path = os.path.join(export_data_dir, os.path.basename(data.path))
                export_ann_path = os.path.join(export_label_dir, os.path.basename(data.path).split(".")[0] + ".json")

                shutil.copy(data_path, export_img_path)

                annotations = task_id_annotations_dict.get(data.data_id, [])
                if annotations:
                    write_ann(
                        Path(export_ann_path),
                        Path(export_img_path),
                        height,
                        width,
                        labels_dict,
                        annotations,
                        with_data=False
                    )

                new_paths.append([str(Path(export_img_path).relative_to(export_dir))])

        # 3. write split files
        self.export_split(export_dir, datas, new_paths, with_labels=False, annotation_ext=".json")

        return True
