from __future__ import annotations

import hashlib
import logging
import os
import os.path as osp
import threading
from collections import deque
from pathlib import Path
from PIL import Image, ImagePath
from concurrent.futures import ThreadPoolExecutor
from config import REDIS_DATABASE_KEY
from libs.redis_wrapper import RedisWrapper
from models.task_models.task_model import Task
from models.project_models.project_model import Project
from models.data_models.data_model import Data
from models.annotation_models.annotation_model import Annotation
from models.label_models.label import Label
from task.util import create_dir, image_extensions, listdir
from task.util.color import name_to_hex, rand_hex_color, rgb_to_hex
from enumobjects.general_enum import YesOrNoEnum
from utils.common_util import CommonUtil


class BaseTask:
    def __init__(
            self,
            session,
            project: int | Project,
            data_dir: None | Path = None,
            skip_label_import: bool = False,
            is_export: bool = False
    ):
        self.db_session = session
        self.task_cache: list[Task] = []
        # 1.set project
        if isinstance(project, int):
            project = self._get_project_obj(project)
            if project is None:
                raise RuntimeError(f"No project with project_id {project}")
        # else:
        #     res = self._get_project_obj(project.project_id)
        #     if res is None:
        #         raise RuntimeError(f"Create project error {project}")
        self.project = project

        self.db_session.begin(subtransactions=True)
        self.project.data_dir = self.project.data_dir.strip()
        self.db_session.commit()
        if data_dir is None:
            data_dir = self.project.data_dir
        assert data_dir is not None and Path().exists()

        # 2.设置当前标签最大id
        # 下一个添加的标签将有id label_max_id+1，所以标签。Id从1开始
        self.label_max_id = 0
        labels = self._get_project_labels()
        for label in labels:
            self.label_max_id = max(self.label_max_id, label.id)
        # 3.读取数据集分割
        if not is_export:
            self.split = self.read_split()
        # 4.创建在labels.txt中指定的标签
        if not skip_label_import and not is_export:
            # 导入labels.txt中指定标签
            self.import_labels()

        # 5.填充标签颜色
        self.populate_label_colors()

    def _get_project_obj(self, project_id):
        project_obj = self.db_session.query(Project).filter(
            Project.is_delete == YesOrNoEnum.NO.value,
            Project.project_id == project_id
        ).first()
        return project_obj

    def _get_project_labels(self,db_session=None):
        if db_session is None:
            db_session = self.db_session
        labels = db_session.query(Label).filter(
            Label.is_delete == YesOrNoEnum.NO.value,
            Label.project_id == self.project.project_id
        ).all()
        return labels

    def _get_labels_id_dict(self,db_session=None):
        if db_session is None:
            db_session = self.db_session
        labels = db_session.query(Label).filter(
            Label.is_delete == YesOrNoEnum.NO.value,
            Label.project_id == self.project.project_id
        ).all()
        labels_dict = {l.label_id: l for l in labels}
        return labels_dict

    def _get_project_data(self,finalValues=False,db_session=None):
        if db_session is None:
            db_session = self.db_session
        if finalValues is True:
            datas = db_session.query(Data).filter(
                Data.is_delete == YesOrNoEnum.NO.value,
                Data.project_id == self.project.project_id
            ).all()
        else:
            datas = db_session.query(Data).filter(
                Data.is_delete == YesOrNoEnum.NO.value,
                Data.project_id == self.project.project_id,
                Data.conform == 1
            ).all()
        return datas

    def _get_tasks_datas(self, data_id_list):
        datas = self.db_session.query(Data).filter(
            Data.is_delete == YesOrNoEnum.NO.value,
            Data.data_id.in_(data_id_list),
            Data.conform == 1
        ).all()
        task_id_datas_dict = {t: [] for t in data_id_list}
        for d in datas:
            if d.task_id in task_id_datas_dict.keys():
                task_id_datas_dict[d.task_id].append(d)
        return task_id_datas_dict

    def _get_tasks_annotations(self, task_id_list):
        annotations = self.db_session.query(Annotation).filter(
            Annotation.is_delete == YesOrNoEnum.NO.value,
            Annotation.task_id.in_(task_id_list)
        ).all()
        task_id_annotations_dict = {t: [] for t in task_id_list}
        for a in annotations:
            if a.task_id in task_id_annotations_dict.keys():
                task_id_annotations_dict[a.task_id].append(a)
        return task_id_annotations_dict

    def _get_datas_annotations(self, data_id_list, db_session=None):
        if db_session is None:
            db_session = self.db_session
        annotations = db_session.query(Annotation).filter(
            Annotation.is_delete == YesOrNoEnum.NO.value,
            Annotation.data_id.in_(data_id_list)
        ).all()
        data_id_annotations_dict = {t: [] for t in data_id_list}
        for a in annotations:
            if a.data_id in data_id_annotations_dict.keys():
                data_id_annotations_dict[a.data_id].append(a)
        return data_id_annotations_dict


    def _get_data_annotations(self, data_id,db_session=None):
        if db_session is None:
            db_session = self.db_session
        annotations = db_session.query(Annotation).filter(
            Annotation.is_delete == YesOrNoEnum.NO.value,
            Annotation.data_id == data_id
        ).all()
        return annotations

    def _get_project_id_annotations(self,db_session=None):
        if db_session is None:
            db_session = self.db_session
        annotations = db_session.query(Annotation).filter(
            Annotation.is_delete == YesOrNoEnum.NO.value,
            Annotation.project_id == self.project.project_id
        ).all()
        return annotations

    def _add_project(self, project):
        try:
            self.db_session.begin(subtransactions=True)
            self.db_session.add(project)
            self.db_session.commit()
            return project
        except Exception as ex:
            message = "添加项目失败，原因是：%s" % str(ex)
            logging.info(message)
            return None

    def label_id2name(self, label_id: int | float):
        label_id = int(label_id)
        labels = self._get_project_labels()
        for label in labels:
            if label.id == label_id:
                return label.name
        return None

    def read_split(self, separator: str = " ") -> list[set[str]]:
        data_dir = Path(self.project.data_dir)

        sets: list[set[str]] = []
        split_names = ["train_list.txt", "val_list.txt", "test_list.txt"]
        for split_name in split_names:
            split_path = data_dir / split_name
            if split_path.exists():
                paths = split_path.read_text(encoding="utf-8").split("\n")
                paths = [p.strip().split(separator)[0] for p in paths if len(p.strip()) != 0]
            else:
                paths = []
            sets.append(set(paths))
        return sets

    def import_labels(self, delimiter: str = " ", ignore_first: bool = False):
        # 1.设置params
        label_names_path = None
        project = self.project
        data_dir = Path(project.data_dir)
        # 1.1 try project.data_dir / "labels.txt"
        label_names_path = data_dir / "labels.txt"
        # 1.2 if labels.txt doesn't exist, try: searching for classes.names.
        # 注意:这适用于yolo格式
        if not label_names_path.exists():
            classes_path = listdir(data_dir, exact_match_one_of=["classes.txt", "classes.names"])
            if len(classes_path) == 0:
                return
            if len(classes_path) > 1:
                logging.info(f"Found {len(classes_path)} classes files at {','.join(classes_path)}")
                return
            label_names_path = data_dir / classes_path[0]

        # 1.3 如果标签文件不存在，则不需要导入任何内容
        if not label_names_path.exists():
            return

        # 2. import labels
        labels = label_names_path.read_text(encoding="utf-8").split("\n")
        labels = [l.strip() for l in labels if len(l.strip()) != 0]

        # 2.1 忽略第一个 background 标签
        background_line = labels[0]
        if ignore_first:
            labels = labels[1:]

        # 2.2 解析标签
        labels = [l.split("//") for l in labels]
        comments = [None if len(l) == 1 else l[1].strip() for l in labels]
        labels = [l[0].strip().split(delimiter) for l in labels]
        current_labels = self._get_project_labels()
        current_labels = [l.name for l in current_labels]
        self.db_session.begin(subtransactions=True)
        for label, comment in zip(labels, comments):
            """
            label length: 1: label name
                          2: label name | label id
                          3: label name | label id | hex color or common color name or grayscale value
                          5: label name | label id | r | g | b color
                          //: string after // is stored as comment
                          -: skip this field
            """
            valid_lengths = [1, 2, 3, 5]
            if len(label) not in valid_lengths:
                raise RuntimeError(f"After split got {label}. It's not in valid lengths {valid_lengths}")
            if label[0] not in current_labels:
                logging.info(f"Adding label {label}")
                if len(label) == 5:
                    label[2] = rgb_to_hex(label[2:])
                    del label[3]
                label = [None if v == "-" else v for v in label]
                if len(label) > 1 and label[1] is not None:
                    try:
                        int(label[1])
                    except ValueError:
                        raise RuntimeError(
                            f"Got '{label[1]}' as label id which should be a number. PaddleLabel expects label name to be written before label id. e.g: 'Cat 1' is accepted, while '1 Cat' isn't"
                        )
                self.add_label(*label, comment=comment)
        self.db_session.commit()

        if ignore_first:
            return background_line

    def add_label(
            self,
            name: str,
            id: int | None = None,
            color: str | None = None,
            super_category_id: int | None = None,
            comment: str | None = None,
            commit=False,
    ):
        # 检测params
        if name is None or len(name) == 0:
            raise RuntimeError(f"Label name is required, got {name}")
        labels = self._get_project_labels()
        name = name.strip()
        current_names = set(l.name for l in labels)
        if name in current_names:
            # raise RuntimeError(f"Label name {name} is not unique")
            raise RuntimeError(f"Trying to add label {name} which already exists.")

        # 2.检测或分配颜色
        current_colors = set(l.color for l in labels)
        if color is None:
            color = rand_hex_color(current_colors)
        else:
            if color[0] != "#":
                color = name_to_hex(color)
            if color in current_colors:
                raise RuntimeError(f"Label color {color} is not unique")
        # 3. 检测或分配id
        current_ids = set(int(l.id) for l in labels)
        if id is None:
            id = self.label_max_id + 1
        else:
            id = int(id)
            if id in current_ids:
                raise RuntimeError(f"Label id {id} is not unique")

        # 4. 分配super category id
        self.db_session.begin(subtransactions=True)
        label = Label(
            project_id=self.project.project_id,
            id=id,
            name=name.replace(" ", "_"),
            color=color,
            comment=comment,
            super_category_id=super_category_id,
        )
        self.db_session.add(label)
        if commit:
            self.db_session.commit()
        labels = self._get_project_labels()
        current_ids = [l.id for l in labels]
        self.label_max_id = max(current_ids)
        return label

    def populate_label_colors(self):
        """
        Assign a color for all labels that don't yet have one
        """
        labels = self._get_project_labels()
        self.db_session.begin(subtransactions=True)
        for lab in labels:
            if lab.color is None:
                lab.color = rand_hex_color([l.color for l in labels])
        self.db_session.commit()

    def export_labels(
            self,
            label_names_path: str,
            background_line: str | None = None,
            with_id: bool = False,
            db_session = None
    ) -> dict[int, int]:
        if db_session is None:
            db_session = self.db_session
        labels = self._get_project_labels(db_session=db_session)
        labels.sort(key=lambda l: l.id)
        id_mapping = {}
        curr_id = 0
        with open(label_names_path, "w", encoding="utf-8") as f:
            if background_line is not None:
                f.write(f"{background_line}\n")
                curr_id += 1
            for lab in labels:
                if with_id:
                    f.write(f"{lab.name}: {lab.id}\n")
                    id_mapping[lab.id] = lab.id
                else:
                    f.write(f"{lab.name}: {curr_id}\n")
                    id_mapping[lab.id] = curr_id
                    curr_id += 1
        return id_mapping

    def export_split(
            self,
            export_dir: Path,
            datas,
            new_paths,
            delimiter=" ",
            with_labels=True,
            annotation_ext=None,
    ):
        # only used in file-file split, not in file-class split
        if annotation_ext is not None and annotation_ext[0] == ".":
            annotation_ext = annotation_ext[1:]
        label_dict = self._get_labels_id_dict()
        set_names = ["train_list", "val_list", "test_list"]
        create_dir(export_dir)
        set_files = [open(osp.join(export_dir, f"{n}.txt"), "w", encoding="utf-8") for n in set_names]
        for data, new_path in zip(datas, new_paths):
            if type(new_path) is not str:
                new_path = str(new_path[0])
            if with_labels:
                label_ids = []
                annotations = self._get_data_annotations(data.data_id) #获取对应的注释信息
                for ann in annotations:
                    label_obj = label_dict.get(ann.label_id, None)
                    if not label_obj:
                        continue
                    label_ids.append(label_obj.id - 1)
                if len(label_ids) == 0:
                    continue
                label_ids = [str(id) for id in label_ids]
                line = new_path + delimiter + delimiter.join(label_ids)
                print(
                    line,
                    file=set_files[data.set],
                )
            else:
                annotation_path = new_path.replace("JPEGImages", "Annotations")
                annotation_path = annotation_path[: annotation_path.rfind(".") + 1] + annotation_ext
                print(new_path + delimiter + annotation_path, file=set_files[data.set])

        for f in set_files:
            f.close()

    # def export_split(
    #         self,
    #         export_dir: Path,
    #         datas,
    #         new_paths,
    #         delimiter=" ",
    #         with_labels=True,
    #         annotation_ext=None,
    # ):
    #     # only used in file-file split, not in file-class split
    #     if annotation_ext is not None and annotation_ext[0] == ".":
    #         annotation_ext = annotation_ext[1:]
    #
    #     label_dict = self._get_labels_id_dict()
    #     set_names = ["train_list", "val_list", "test_list"]
    #     create_dir(export_dir)
    #
    #     # Prepare lists to store lines for each set
    #     set_lines = {name: [] for name in set_names}
    #
    #     def process_data(data, new_path):
    #         if type(new_path) is not str:
    #             new_path = str(new_path[0])
    #
    #         if with_labels:
    #             label_ids = []
    #             annotations = self._get_data_annotations(data.data_id)  # 获取对应的注释信息
    #             print('1',annotations)
    #             for ann in annotations:
    #                 label_obj = label_dict.get(ann.label_id, None)
    #                 if not label_obj:
    #                     continue
    #                 label_ids.append(label_obj.id - 1)
    #
    #             if len(label_ids) == 0:
    #                 return
    #
    #             label_ids = [str(id) for id in label_ids]
    #             print('2',label_ids)
    #             line = new_path + delimiter + delimiter.join(label_ids)
    #             print('3',line)
    #             set_lines[data.set].append(line)
    #         else:
    #             annotation_path = new_path.replace("JPEGImages", "Annotations")
    #             print('else_1',annotation_path)
    #             annotation_path = annotation_path[: annotation_path.rfind(".") + 1] + annotation_ext
    #             print('else_2',annotation_path)
    #             line = new_path + delimiter + annotation_path
    #             print('else_3',line)
    #             set_lines[data.set].append(line)
    #
    #     # Use ThreadPoolExecutor to process data in parallel
    #     with ThreadPoolExecutor() as executor:
    #         executor.map(process_data, datas, new_paths)
    #
    #     # Write lines to files
    #     for name in set_names:
    #         print('写入',name)
    #         with open(osp.join(export_dir, f"{name}.txt"), "w", encoding="utf-8") as f:
    #             print('写入',set_lines)
    #             f.write("\n".join(set_lines[name]))

    # Helper function to create directory if it doesn't exist

    from concurrent.futures import ThreadPoolExecutor


    def create_dir(dir_path: Path):
        if not dir_path.exists():
            dir_path.mkdir(parents=True, exist_ok=True)

    def getSize(img_path: Path) -> tuple[str, int, int]:
        im = Image.open(img_path)
        width, height = im.size
        s = ",".join(map(str, (1,) + (height, width)))
        return s, height, width

    def add_task(
            self,
            datas: list[dict[str, str | None]],
            annotations: list[list[dict[str, str]]] | None = None,
            split: int | None = None,
    ):
        project = self.project
        assert len(datas) != 0, "Can't add task without data"
        for idx, data in enumerate(datas):
            if osp.isabs(datas[idx]["path"]):
                datas[idx]["path"] = osp.relpath(datas[idx]["path"], project.data_dir)
            datas[idx]["path"] = str(datas[idx]["path"])

        # 1. find task split
        if split is None:
            split_idx = 0
            for idx, split_paths in enumerate(self.split):
                if datas[0]["path"] in split_paths:
                    split_idx = idx
                    break
        else:
            split_idx = split

        def get_label(name):
            labels = self._get_project_labels()
            # for lab in project.labels:
            for lab in labels:
                if lab.name == name:
                    return lab
            return None

        if annotations is None:
            annotations = []
        while len(annotations) < len(datas):
            annotations.append([])
        data_list, a_list = [], []
        for anns, data in zip(annotations, datas):
            # 2. add data record
            data['project_id'] = project.project_id
            data['set'] = split_idx
            data = Data(**data)
            data_list.append(data)
            total_anns = 0
            # 3. add data's annotations
            for ann in anns:
                if len(ann.get("label_name", "")) == 0:
                    continue
                label = get_label(ann["label_name"])
                if label is None:
                    label = self.add_label(ann["label_name"], ann.get("color"), commit=True)
                del ann["label_name"]
                ann = Annotation(label_id=label.label_id, project_id=project.project_id, data_id=data.data_id,
                                  **ann)
                a_list.append(a_list)
                # task.annotations.append(ann)  #
                # data.annotations.append(ann)
                total_anns += 1
            logging.debug(f"{data.path} with {total_anns} annotation(s) under set {split_idx} discovered")
        self.db_session.add_all(data_list)
        self.db_session.add_all(a_list)

    def label_name2label_id(self, label_name: str):
        for label in self.project.labels:
            if label.name == label_name:
                return label.label_id
        return None

    def label_name2id(self, label_name: str):
        for label in self.project.labels:
            if label.name == label_name:
                return label.id
        return None

    def create_coco_labels(self, labels):
        catgs = deque()

        for catg in labels:
            catgs.append(catg)

        tried_names = []  # guard against invalid dependency graph
        for _ in range(len(catgs) * 2):
            if len(catgs) == 0:
                break
            catg = catgs.popleft()
            if self.label_name2id(catg["name"]) is not None:
                continue

            color = catg.get("color", None)
            if color is not None:
                color = rgb_to_hex(color) if isinstance(color, list) else name_to_hex(color)

            if (
                    "supercategory" not in catg.keys()
                    or catg["supercategory"] == "none"
                    or catg["supercategory"] is None
                    or len(catg["supercategory"]) == 0
            ):
                self.add_label(
                    name=catg["name"],
                    id=catg["id"],
                    super_category_id=None,
                    color=color,
                )
            else:
                super_category_id = self.label_name2label_id(catg["supercategory"])
                if super_category_id is None and catg["name"] not in tried_names:
                    catgs.append(catg)
                    tried_names.append(catg["name"])
                else:
                    self.add_label(
                        name=catg["name"],
                        id=catg["id"],
                        super_category_id=super_category_id,
                        color=color,
                    )
            self.db_session.commit()
    @staticmethod
    def calculate_file_hash(file_path):
        """计算文件的 MD5 哈希值"""
        hash_md5 = hashlib.md5()
        with open(file_path, "rb") as f:
            for chunk in iter(lambda: f.read(4096), b""):
                hash_md5.update(chunk)
        return hash_md5.hexdigest()


    def default_importer(
            self,
            data_dir: Path | None = None,
            filters={"exclude_prefix": ["."], "include_postfix": image_extensions},
            with_size: bool = True,  #
    ):
        logging.info("进入dafault_importer")
        ex_list = [elem.decode('utf-8') for elem in RedisWrapper.lrange(REDIS_DATABASE_KEY + str(self.project.project_id), 0, -1)]
        data_dir = self.project.data_dir if data_dir is None else data_dir
        assert data_dir is not None

        origin_image_path = self.db_session.query(Data).filter(
            Data.project_id == self.project.project_id,
        ).all()

        origin_image_lists = [i.path for i in origin_image_path]
        origin_md5_lists = [i.md5 for i in origin_image_path]

        seen_hashes = {}

        # 并行处理文件
        self._process_files_multithreaded(data_dir, filters, with_size, origin_image_lists, origin_md5_lists, seen_hashes)
        for i in self.db_session.query(Data).filter(
            Data.project_id == self.project.project_id,
            Data.is_delete == YesOrNoEnum.NO.value,
            Data.create_user_id == None,
            Data.data_id.notin_(ex_list)
        ).all():
            RedisWrapper.r.rpush(REDIS_DATABASE_KEY + str(self.project.project_id), i.data_id)
        logging.info("redis插入数据成功")

        return True, ''


    def _process_files_multithreaded(self, data_dir, filters, with_size, origin_image_lists, origin_md5_lists, seen_hashes):
        img_list = listdir(data_dir, filters)
        num_threads = min(len(img_list), 8)  # 最多使用8个线程
        chunk_size = len(img_list) // num_threads
        threads = []

        def process_files(start, end):
            self.db_session.begin(subtransactions=True)
            for data_path in img_list[start:end]:
                if data_path in origin_image_lists:
                    continue

                if with_size:
                    img_path = Path(data_dir) / data_path
                    im = Image.open(img_path)
                    width, height = im.size
                    size = ",".join(map(str, (1,) + (height, width)))
                else:
                    size = None

                img_path = Path(data_dir) / data_path
                md5 = self.calculate_file_hash(img_path)
                if md5 in seen_hashes or md5 in origin_md5_lists:
                    continue
                else:
                    seen_hashes[md5] = img_path

                self.add_task([{"path": data_path, "size": size, "md5": md5}])
            self.db_session.commit()
        for i in range(num_threads):
            start = i * chunk_size
            end = (i + 1) * chunk_size if i < num_threads - 1 else len(img_list)
            thread = threading.Thread(target=process_files, args=(start, end))
            threads.append(thread)
            thread.start()

        for thread in threads:
            thread.join()




    def create_warning(self, data_dir: Path) -> None:
        data_dir = Path(data_dir)
        if not data_dir.exists():
            raise FileNotFoundError(f"Dataset Path specified {data_dir} doesn't exist.")

        warning_path = data_dir / "paddlelabel.warning"
        if not warning_path.exists():
            warning_path.write_text(
                "PP Label is using files stored under this folder!\nChanging file in this folder may cause issues."
            )
