import base64
import logging
import os
import json
import cv2
import numpy as np
from pycocotools import mask as MaskUtils
from pathlib import Path
from PIL import Image
from tqdm import tqdm


def convert_mask_to_coco(classes, masks_dir, type="train", suffix=".png"):
    """

    :param classes:分类，是一个list，每个元素当前的index代表类别号
    :param masks_dir: masks图像所在的文件夹路径
    :param type: train，val或者是test
    :param suffix: mask图像的后缀
    :return:
    """
    output = {
        "info": {
            "description": "coco dataset",
            "version": "1.0",
            "year": 2023
        },
        "licenses": [],
        "images": [],
        "annotations": [],
        "categories": []
    }
    for index, ca in enumerate(classes):
        output["categories"].append({
            "id": index,
            "name": ca,
            "supercategory": "none"
        })

    image_id = 1
    annotation_id = 1
    mask_file_list = os.listdir(masks_dir)
    file_sum = len(mask_file_list)
    for filename in mask_file_list:
        if filename.endswith(".png"):
            mask_path = Path(masks_dir) / filename
            # todo send this log to server
            logging.info(f"converting mask file to coco [{image_id}/{file_sum}]:{mask_path}")
            if os.path.exists(mask_path):
                mask_data_cv = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
                image_info = {
                    "id": image_id,
                    "file_name": filename.replace(".png", suffix),
                    "width": mask_data_cv.shape[1],
                    "height": mask_data_cv.shape[0]
                }
                output["images"].append(image_info)
                for index, cls in enumerate(classes):
                    binary_mask = np.uint8(mask_data_cv == index) * 255
                    contours, _ = cv2.findContours(binary_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
                    for contour in contours:
                        if cv2.contourArea(contour) > 0:
                            segmentation = []
                            contour = contour.flatten().tolist()
                            segmentation.append(contour)
                            rle = MaskUtils.frPyObjects(segmentation, mask_data_cv.shape[0], mask_data_cv.shape[1])
                            area = MaskUtils.area(rle)[0]
                            bbox = MaskUtils.toBbox(rle).flatten()
                            annotation_info = {
                                "id": annotation_id,
                                "image_id": image_id,
                                "category_id": index,
                                "segmentation": segmentation,
                                "area": area.tolist(),
                                "bbox": bbox.tolist(),
                                "iscrowd": 0
                            }
                            output["annotations"].append(annotation_info)
                            annotation_id += 1
                image_id += 1
    parent_dir = os.path.dirname(masks_dir)
    with open(f"{parent_dir}/coco-seg-{type}.json", "w") as f:
        f.write(json.dumps(output))


def convert_coco_to_yolo(coco_file_path, type="train", use_segments=False):
    """

    :param coco_file_path:
    :param type:
    :param use_segments:
    :return:
    """
    if os.path.exists(coco_file_path):
        with open(coco_file_path) as f:
            data = json.load(f)
        images_dict = {"{:g}".format(x["id"]): x for x in data["images"]}
        label_dir = Path(os.path.dirname(coco_file_path)) / "labels" / type
        label_dir.mkdir()
        imgToAnns = {}
        for ann in data["annotations"]:
            imgToAnns[ann["image_id"]].append(ann)
        for img_id, anns in imgToAnns.items():
            img = images_dict[f"{img_id:g}"]
            height, width, file_name = img["height"], img["width"], img["file_name"]
            bboxes = []
            segments = []
            for ann in anns:
                box = np.array(ann["bbox"], dtype=np.float64)
                box[:2] += box[2:] / 2
                box[[0, 2]] /= width
                box[[1, 3]] /= height
                if box[2] <= 0 or box[3] <= 0:
                    continue
                cls = ann["category_id"]
                box = [cls] + box.tolist()
                if box not in bboxes:
                    bboxes.append(box)
                if use_segments:
                    s = ann["segmentation"][0]
                    s = (np.array(s).reshape(-1, 2) / np.array([width, height])).reshape(-1).tolist()
                s = [cls] + s
                if s not in segments:
                    segments.append(s)

            with open((label_dir / file_name).with_suffix(".txt")) as file:
                for i in range(len(bboxes)):
                    line = (*(segments[i] if use_segments else bboxes[i]),)  # cls, box or segments
                    file.write(("%g " * len(line)).rstrip() % line + "\n")


def convert_labelmeJson_to_yolo(classes, labels_dir, use_segment):
    path = Path(labels_dir)
    output = path.parent / "yoloLabels"
    if path.is_dir():
        for file in path.rglob("*"):
            if file.is_file():
                with file.open("r", encoding="utf-8") as f:
                    labelme_json = json.load(f)
                height = labelme_json["imageHeight"]
                width = labelme_json["imageWidth"]
                annotations = labelme_json["shapes"]
                boxes = []
                segments = []
                for anno in annotations:
                    points = anno["points"]
                    cls_name = anno["label"]
                    if use_segment:
                        s = (np.array(points) / np.array([width, height])).reshape(-1).tolist()
                    else:
                        points = np.array(points, dtype=np.float64).reshape(-1)
                        points[2] = points[2] - points[0]
                        points[3] = points[3] - points[1]
                        points[:2] += points[2:] / 2
                        points[[0, 2]] /= width
                        points[[1, 3]] /= height
                        s = np.array(points).tolist()
                    cls = classes.index(cls_name)
                    s = [cls] + s
                    if s not in segments:
                        segments.append(s)
                with open((output / file.name).with_suffix(".txt")) as f:
                    for seg in segments:
                        line = (*(seg),)
                        f.write(("%g " * len(line)).rstrip() % line + "\n")


def transform(start, length):
    half = length / 2
    if half > start:
        if start < half * 0.75:
            return 0
        return start
    elif start + half > 1:
        out_length = (1 - start) * 2 - 0.0001
        if out_length < length * 0.75:
            return 0
        return out_length
    return length


def convert_crowdHuman_to_yolo(label_path, data_type, img_suffix):
    p = Path(label_path)
    images_dir = p.parent / "images" / data_type
    labels_dir = p.parent / "labels" / data_type
    labels_dir.mkdir(parents=True, exist_ok=True)
    if p.exists():
        with open(p, "r", encoding="utf-8") as f:
            lines = f.readlines()
            f.close()
        for line in tqdm(lines, ascii=True):
            labels = json.loads(line.strip())
            filename = labels["ID"]
            logging.info(f"converting boxes in:{filename}")
            label_path_yolo = (labels_dir / filename).with_suffix(".txt")
            img_path = (images_dir / filename).with_suffix(f".{img_suffix}")
            img = Image.open(img_path)
            img_width, img_height = img.size
            boxes = labels["gtboxes"]
            contents = []
            for box in boxes:
                if box["tag"] == "mask" or box.get("extra", {}).get("ignore", 0) == 1:
                    continue
                # fbox:full-body box;vbox:visible body box;hbox:head box
                x, y, w, h = box["hbox"]
                # 计算归一化后的中心坐标
                x = (x + w / 2) / img_width
                y = (y + h / 2) / img_height
                # 归一化宽高
                w = w / img_width
                h = h / img_height
                w = transform(x, w)
                if w == 0:
                    continue
                h = transform(y, h)
                if h == 0:
                    continue
                # --- normalized center x,normalized center y,normalized width,normalized height--- #
                # if np.any(points < 0) or np.any(points > 1):
                #     continue
                s = [0,x,y,w,h]
                contents.append(s)
            with open(label_path_yolo, "w") as f:
                for cont in contents:
                    f.write(f"{cont[0]} {cont[1]:.6f} {cont[2]:.6f} {cont[3]:.6f} {cont[4]:.6f}\n")


def view_yolo():
    src_img = "E:/data/CrowdHuman20250110/images/train/273271,1a02900084ed5ae8.jpg"
    img = Image.open(src_img)
    lables_path = Path("E:/data/CrowdHuman20250110/labels/train/273271,1a02900084ed5ae8.txt")
    img_width, img_height = img.size
    img_data_cv = cv2.imread(src_img)
    with open(lables_path, "r") as file:
        lines = file.readlines()
        for line in lines:
            points = np.array(line.split(" ")[1:], dtype=np.float64).reshape(-1)
            points[[0, 2]] *= img_width
            points[[1, 3]] *= img_height
            points[:2] -= (points[2:] / 2)
            points = points.astype(int)
            cv2.rectangle(img_data_cv, (points[0], points[1]), (points[0] + points[2], points[1] + points[3]),
                          [220, 20, 60], 2)
            cv2.imwrite(f"view.png", img_data_cv)

def base64encode():
    body = {}
    params = {}
    img_path = "E:/data/test/273275,6af2200061012bb2.jpg"
    with open(img_path,"rb") as f:
        img_data = f.read()
    img_base64 = base64.b64encode(img_data).decode("utf-8")
    body["cmd"] = "cv_predict_image"
    params["ret_type"] = "image"
    params["task_type"] = "object_detection"
    params["image"] = img_base64
    params["model_id"] = "yolo11n-WsVZ_taCesoohBv4daeJq"
    body["params"] = params
    with open("predictBody.json","w") as f2:
        f2.write(json.dumps(body))


if __name__ == '__main__':
    base64encode()
