import requests
import config
import glob
import numpy as np
import torch
import cv2
import os
import datetime
import shutil
from utils import nms_3d, write_json, change_image_anno_format, change_pcd_anno_format, change_fuse_image_anno_format
from view_anno import view_vendor_image_anno, view_vendor_pcd_anno, view_own_image_anno, view_own_pcd_anno
from pathlib import Path
import zipfile
from pcdet.config import cfg
from pcdet.datasets import DatasetTemplate
from pcdet.models import load_data_to_gpu
from fuse_detection import FuseDetection
import json
own_image_class_map = {"0":"car", "1":"truck", "2":"motorcycle", "3":"bicycle", "4":"tricycle", "5":"bus", "6":"pedestrain"}
#own pcd 1car 2truck 3construction_vehicle 4bus 5trailer 6barrier 7motorcycle 8bicycle 9pedestrain 10traffic_cone type, label is string
#vendor pcd 1pedestrain 2motorcycle 3vehicle 4tricycle, type is int
pcd_map_own_to_vendor = {1:"3",2:"3",3:"3",4:"3",5:"3",7:"2",8:"2",9:"1"}#vendor is better, 6barrier 10traffic_cone is deleted

image_basename = "_GMSLCam_2"
pcd_basename = "_pandarat128_2372"
image_zip_name = "jpg" + image_basename + ".zip"
fuse_image_zip_name = "jpg" + pcd_basename + image_basename + ".zip"
pcd_zip_name = "pcd" + pcd_basename + ".zip"

data_path_dict = {
    "downloaded_data_save_path": "./downloaded_data",
    "generated_data_save_path": "./generated_data",
    "visualizd_data_save_path": "./visualized_data",
    "pre_annotation_upload_path": "pre_annotation_result/",
    "mining_upload_path": "ai_label_result/"
}

IF_DEBUG = False

class DemoDataset(DatasetTemplate):
    def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ext=".bin"):
        """
        Args:
            root_path:
            dataset_cfg:
            class_names:
            training:
            logger:
        """
        super().__init__(
            dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
        )
        self.root_path = root_path
        self.ext = ext
        data_file_list = glob.glob(str(root_path / f"*{self.ext}")) if self.root_path.is_dir() else [self.root_path]
        data_file_list.sort()
        self.sample_file_list = data_file_list

    def __len__(self):
        return len(self.sample_file_list)

    def __getitem__(self, index):
        if self.ext == ".bin":
            # points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 4)#original
            points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 5)#for nuscenes
        elif self.ext == ".npy":
            points = np.load(self.sample_file_list[index])
        elif self.ext == ".pcd":
            with open(self.sample_file_list[index], "r", encoding="utf-8") as f:
                lines = f.readlines()
            # Find the start of the data section
            data_start_idx = 0
            fields = []
            for i, line in enumerate(lines):
                if line.startswith("FIELDS"):
                    fields = line.split()[1:]
                elif line.startswith("DATA"):
                    data_start_idx = i + 1
                    break

            if "x" not in fields or "y" not in fields or "z" not in fields or "intensity" not in fields:
                raise ValueError("PCD file does not contain x, y, z, and intensity fields.")

            # Get indices of the required fields
            x_idx = fields.index("x")
            y_idx = fields.index("y")
            z_idx = fields.index("z")
            intensity_idx = fields.index("intensity")

            # Read the data section and parse x, y, z, and intensity
            data = []
            for line in lines[data_start_idx:]:
                if "nan" in line:
                    continue
                if line.strip():  # skip empty lines
                    values = line.split()
                    x = float(values[x_idx])
                    y = float(values[y_idx])
                    z = float(values[z_idx])
                    intensity = float(values[intensity_idx])
                    data.append([x, y, z, intensity])

            # Convert the list to a NumPy ndarray
            data = np.array(data)
            zeros = np.zeros((data.shape[0], 1))
            points = np.hstack((data, zeros))
        else:
            raise NotImplementedError

        input_dict = {
            "points": points,
            "frame_id": index,
        }

        data_dict = self.prepare_data(data_dict=input_dict)
        return data_dict

def post_process(pred_dict, score_th=0.3, iou_th=0.1):
    data_cpu = {
    "pred_boxes": pred_dict["pred_boxes"].cpu(),
    "pred_scores": pred_dict["pred_scores"].cpu(),
    "pred_labels": pred_dict["pred_labels"].cpu()
    }
 
    # filter result whose conf larger than threshold 
    filtered_mask = data_cpu["pred_scores"] > score_th
    filtered_boxes = data_cpu["pred_boxes"][filtered_mask]
    filtered_scores = data_cpu["pred_scores"][filtered_mask]
    filtered_labels = data_cpu["pred_labels"][filtered_mask]
    keep_indices = nms_3d(filtered_boxes, filtered_scores, iou_th)
    # filtered result
    if keep_indices.numel() == 0:
        return {} #no detection
    nms_boxes = filtered_boxes[keep_indices]
    nms_scores = filtered_scores[keep_indices]
    nms_labels = filtered_labels[keep_indices]
    post_process_ret = {}
    post_process_ret.update({"pred_boxes": nms_boxes,"pred_scores": nms_scores,"pred_labels": nms_labels})
    return post_process_ret

def generate_image_annotation_json(labels, coordinates, confs, image_name, image_w, image_h):
    shapes = []
    for i in range(len(labels)):
        shape = {
            "label": labels[i],#string#
            "points": coordinates[i],
            "conf": confs[i],
            "shape_type": "rectangle",
            "group_id": "null",
            "flags": {}
        }
        shapes.append(shape)
    summary = {label: labels.count(label) for label in set(labels)}
    summary["rect"] = len(shapes)
    data = {
        "version": "4.6.0",
        "flags": {},
        "shapes": shapes,
        "summary": summary,
        "imagePath": image_name.split("/")[-1],
        "imageHeight": image_h,
        "imageWidth": image_w
    }
    return data

def detect_one_image(yolo_model, image_path, imageConfTh, generated_path, visualized_path, has_truth, logger):
    image = cv2.imread(image_path)
    image_h, image_w = image.shape[:2]
    results = yolo_model(image)
    labels = []
    coordinates = []
    confs = []
    for result in results:
        boxes = result.boxes
        for box in boxes:
            if box.conf.item() > imageConfTh:
                labels.append(str(int(box.cls.item())))
                coordinates.append(box.xyxy.tolist()[0])
                confs.append(box.conf.item())
    logger.info("{} detection done".format(image_path))
    detection_result = generate_image_annotation_json(labels, coordinates, confs, image_path, image_w, image_h)
    write_json(detection_result, os.path.join(generated_path, image_path.split("/")[-1].split(".")[0] +".json"))
    if IF_DEBUG:
        view_own_image_anno(image_path, generated_path, visualized_path)
        if has_truth:
            view_vendor_image_anno(image_path, visualized_path)

def generate_pcd_annotation_json(labels, rotations, scales, positions):
    annotations = []
    for i in range(len(labels)):# 10 classed to 4 class
        if labels[i] == 10 or labels[i] == 6: 
            continue
        annotation = {
            "label": str(pcd_map_own_to_vendor[labels[i]]),
            "cube": {
                "rotation": {"x":0 ,"y":0, "z":rotations[i]},
                "scale": {"x":scales[i][0], "y":scales[i][1], "z":scales[i][2]},
                "position": {"x":positions[i][0], "y":positions[i][1], "z":positions[i][2]}
            },
            "group_id": "null",
            "flags": {}
        }
        annotations.append(annotation)
    
    summary = {pcd_map_own_to_vendor[label]: labels.count(label) for label in set(labels) if label not in [6, 10]}
    summary["cube"] = len(annotations)
    
    data = {
        "annotations": annotations,
        "summary": summary
    }
    return data

def detect_one_pcd(voxelnext_model, pcd_path, pcdConfTh, generated_path, visualized_path, has_truth, logger):
    demo_dataset = DemoDataset(dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path(pcd_path), ext=".pcd", logger=logger)
    with torch.no_grad():
        for idx, data_dict in enumerate(demo_dataset):
            data_dict = demo_dataset.collate_batch([data_dict])
            load_data_to_gpu(data_dict)
            pred_dicts, _ = voxelnext_model.forward(data_dict)
            final_ret = post_process(pred_dicts[0], score_th=pcdConfTh)
    if len(final_ret) == 0:#no detection
        labels = []
        rotations = []
        scales = []
        positions = []
    else:
        labels = final_ret["pred_labels"].numpy().tolist()
        rotations = final_ret["pred_boxes"][:, 8].tolist()
        scales = final_ret["pred_boxes"][:, 3:6].tolist()
        positions = final_ret["pred_boxes"][:, 0:3].tolist()
    logger.info("{} detection done".format(pcd_path))
    detection_result = generate_pcd_annotation_json(labels, rotations, scales, positions)
    write_json(detection_result, os.path.join(generated_path, pcd_path.split("/")[-1].split(".")[0] + ".json"))
    if IF_DEBUG:
        view_own_pcd_anno(pcd_path, generated_path, visualized_path)
        if has_truth:
            view_vendor_pcd_anno(pcd_path, visualized_path)


def download_from_obs_and_check(obs_client, bucket_name, downloaded_path, data, task_type, logger):
    # check bucket download
    logger.info("start download data")
    response = obs_client.listObjects(bucket_name, prefix=data.get("datasetPath"))
    if response.status < 300:
        pass
    else:
        logger.error("{} listObjects failed".format(bucket_name + "/" + data.get("datasetPath")))
    objects = []
    if task_type == "1":#only need image
        for obj in response.body.contents:
            if image_zip_name in obj.key:
                objects.append(obj.key)
        if len(objects) < 1:
            raise Exception("no {} file in {}".format(image_zip_name, bucket_name + "/" + data.get("datasetPath")))
    if task_type == "2":#only need pcd
        for obj in response.body.contents:
            if pcd_zip_name in obj.key:
                objects.append(obj.key)
        if len(objects) < 1:
            raise Exception("no {} file in {}".format(pcd_zip_name, bucket_name + "/" + data.get("datasetPath")))
    if task_type == "3":#joint anno, need image, pcd, calibration
        for obj in response.body.contents:
            if pcd_zip_name in obj.key or fuse_image_zip_name in obj.key or "calibration.json" in obj.key:
                objects.append(obj.key)
        if len(objects) < 3:
            raise Exception("no enough necesary files in {}".format(bucket_name + "/" + data.get("datasetPath")))
        
    for obj in response.body.contents:#download truth zip
        if "json.zip" in obj.key:
            objects.append(obj.key)
            logger.info("json.zip in {}".format(bucket_name + "/" + data.get("datasetPath")))
         
    logger.info("totally {} needed zip/json files in {}".format(len(objects), bucket_name + "/" + data.get("datasetPath")))
    failed_list = [] #download failed names, retry later
    for obj in objects:
        #if "ft120" in obj:#currently we don"t use ft120 lidar
        #    continue
        response = obs_client.getObject(bucket_name, obj, downloadPath=os.path.join(downloaded_path, obj.split("/")[-1]))
        if response.status < 300:
            pass
        else:
            logger.error("{} download failed".format(obj))
            failed_list.append(os.path.join(obj))
    if len(failed_list) == 0:# all file download done
        logger.info("all {} files download success".format(len(objects)))
    else:
        retry_times = 1
        while len(failed_list) > 0:
            logger.info("retry downloading times:{}".format(retry_times))
            for obj in failed_list:
                response = obs_client.getObject(bucket_name, obj, downloadPath=os.path.join(downloaded_path, obj.split("/")[-1]))
                if response.status < 300:
                    failed_list.remove(obj)
                else:
                    logger.error("retry downloading times:{}, {} upload failed".format(retry_times, obj))
            retry_times += 1
            if retry_times == 6:#retry 5 times
                raise Exception("{}/{} files download failed".format(len(failed_list), len(objects)))
    for obj in objects:
        if ".zip" not in obj:
            continue
        with zipfile.ZipFile(os.path.join(downloaded_path, obj.split("/")[-1]), "r") as zip_ref:
            zip_ref.extractall(downloaded_path)
            logger.info("{} unzip done".format(obj))


def change_format_and_zip(generated_path, output_zip_file, datasetType, logger):
    file_path_list = os.listdir(generated_path)
    if datasetType == "1":#image fused anno update
        for file_path in file_path_list:
            if image_basename in file_path:
                change_image_anno_format(os.path.join(generated_path, file_path))# own and fused is same ,7 class
    if datasetType == "2":#pcd fused anno update
        for file_path in file_path_list:
            if pcd_basename in file_path:
                change_pcd_anno_format(os.path.join(generated_path, file_path))#4 class
    if datasetType == "3":
        # for file_path in file_path_list:
        #     if image_basename in file_path:
        #         change_fuse_image_anno_format(os.path.join(generated_path, file_path))
        #     if pcd_basename in file_path and image_basename not in file_path:
        #         change_pcd_anno_format(os.path.join(generated_path, file_path))
        pass#change foramat done in fuse detection
    # establish a new ZIP file
    with zipfile.ZipFile(output_zip_file, "w", zipfile.ZIP_DEFLATED) as zipf:
        for root, _, files in os.walk(generated_path):
            for file in files:
                file_path = os.path.join(root, file)
                arcname = os.path.relpath(file_path, generated_path)
                zipf.write(file_path, arcname)
    logger.info("result zip done")

def upload_to_obs(obs_client, bucket_name, generated_path, data, upload_path, logger):
    datasetPath = data.get("datasetPath")
    if not datasetPath.endswith("/"):
        datasetPath = datasetPath + "/"
    file_name = "result.zip"
    datasetType = data.get("datasetType")
    change_format_and_zip(generated_path, file_name, datasetType, logger)
    shutil.move("result.zip", generated_path)
    failed_list = [] #upload failed names, retry later
    response = obs_client.putFile(bucket_name, upload_path + datasetPath + file_name, file_path=os.path.join(generated_path, file_name))
    if response.status < 300:
        pass
    else:
        logger.error("{} upload failed".format(file_name))
        failed_list.append(os.path.join(file_name))
    if len(failed_list) == 0:# all file upload done
        logger.info("{} file upload success".format(file_name))
    else:
        retry_times = 1
        while len(failed_list) > 0:
            logger.info("retry uploading times:{}".format(retry_times))
            for file_name in failed_list:
                response = obs_client.putFile(bucket_name, upload_path + datasetPath + file_name, file_path=os.path.join(generated_path, file_name))
                if response.status < 300:
                    failed_list.remove(file_name)
                else:
                    logger.error("retry uploading times:{}, {} upload failed".format(retry_times, file_name))
            retry_times += 1
            if retry_times == 6:#retry 5 times
                raise Exception("{} file upload failed".format(file_name))

def delete_download_generated_data(downloaded_path, generated_path, visualized_path, logger):
    if os.path.exists(downloaded_path):
        shutil.rmtree(downloaded_path)
    if os.path.exists(generated_path):
        shutil.rmtree(generated_path)
    if os.path.exists(visualized_path):
        shutil.rmtree(visualized_path)
    logger.info("{} and {} and {} removed".format(downloaded_path, generated_path, visualized_path))

def background_task_data_annotation(voxelnext_model, yolo_model, obs_client, data, logger):
    try:
        logger.info("start data annotation")
        if data.get("datasetType") == "1":#image annotation
            logger.info("task type is image annotation")
        elif data.get("datasetType") == "2":#pcd annotation
            logger.info("task type is pcd annotation")
        else:#"3" for joint annotation
            logger.info("task type is joint annotation")
        current_timestamp = datetime.datetime.now().timestamp()
        if data.get("datasetPath").endswith("/"):
            datasetPath = data.get("datasetPath")[:-1]
        downloaded_path = os.path.join(data_path_dict["downloaded_data_save_path"], datasetPath + "_" + str(current_timestamp))
        os.makedirs(downloaded_path, exist_ok=True)
        # download data
        download_from_obs_and_check(obs_client, config.annotation_bucket_name, downloaded_path, data, data.get("datasetType"), logger)
        file_path_list = os.listdir(downloaded_path)
        has_truth = False
        if "json.zip" in file_path_list:
            has_truth = True
        pcd_path_list = [os.path.join(downloaded_path, x) for x in file_path_list if (x.endswith(".pcd") and "at128" in x)]
        pcd_path_list.sort()
        image_path_list = [os.path.join(downloaded_path, x) for x in file_path_list if(x.endswith(".jpg") or x.endswith(".png"))]
        image_path_list.sort()
        # mkdir generated data save_path
        generated_path = os.path.join(data_path_dict["generated_data_save_path"], datasetPath + "_" + str(current_timestamp))
        visualized_path = os.path.join(data_path_dict["visualizd_data_save_path"], datasetPath + "_" + str(current_timestamp))
        os.makedirs(generated_path, exist_ok=True)
        os.makedirs(visualized_path, exist_ok=True)
        progress = 0.1
        if data.get("datasetType") == "1":#image annotation
            # image detect
            for seq, image_path in enumerate(image_path_list):
                detect_one_image(yolo_model, image_path, data.get("imageConfTh"), generated_path, visualized_path, has_truth, logger)
                if seq / len(image_path_list) > progress:
                    notify_platform_data_annotation_progress(data, progress, logger)
                    progress += 0.1
            logger.info("image detection done")
            if has_truth:
                # check with vendor image annotation result, get final result
                detection_fuser = FuseDetection(downloaded_path, data.get("datasetType"))
                detection_fuser.fuse_detection(downloaded_path, generated_path, visualized_path, IF_DEBUG, data.get("datasetType"), logger)
                logger.info("fuse detection result done")
            

        elif data.get("datasetType") == "2":#pcd annotation
            # pcd detect
            for seq, pcd_path in enumerate(pcd_path_list):
                detect_one_pcd(voxelnext_model, pcd_path, data.get("pcdConfTh"), generated_path, visualized_path, has_truth, logger)
                if seq / len(pcd_path_list) > progress:
                    notify_platform_data_annotation_progress(data, progress, logger)
                    progress += 0.1
            logger.info("pcd detection done")
            if has_truth:
                # check with vendor annotation result, get final result
                detection_fuser = FuseDetection(downloaded_path, data.get("datasetType"))
                detection_fuser.fuse_detection(downloaded_path, generated_path, visualized_path, IF_DEBUG, data.get("datasetType"), logger)
                logger.info("fuse detection result done")

        else:#"3" for joint annotation
            # image detect
            for seq, image_path in enumerate(image_path_list):
                detect_one_image(yolo_model, image_path, data.get("imageConfTh"), generated_path, visualized_path, has_truth, logger)
                if seq / len(image_path_list) > progress:
                    notify_platform_data_annotation_progress(data, progress/2, logger)
                    progress += 0.1
            logger.info("image detection done")
            progress = 0.1
            # pcd detect
            for seq, pcd_path in enumerate(pcd_path_list):
                detect_one_pcd(voxelnext_model, pcd_path, data.get("pcdConfTh"), generated_path, visualized_path, has_truth, logger)
                if seq / len(pcd_path_list) > progress:
                    notify_platform_data_annotation_progress(data, 0.5 + progress/2, logger)
                    progress += 0.1
            logger.info("pcd detection done")
            if has_truth:
                # fused image result and fuse pcd result multi domain fusing
                detection_fuser = FuseDetection(downloaded_path, data.get("datasetType"))
                detection_fuser.fuse_detection(downloaded_path, generated_path, visualized_path, IF_DEBUG, data.get("datasetType"), logger)
                logger.info("fuse detection result done")

        # upload to obs
        logger.info("start result uploading")
        upload_to_obs(obs_client, config.annotation_bucket_name, generated_path, data, data_path_dict["pre_annotation_upload_path"], logger)
        if not IF_DEBUG:
            delete_download_generated_data(downloaded_path, generated_path, visualized_path, logger)
        # notify paltform
        status_code = 200
        message = "data annotation result upload to {} success".format(data_path_dict["pre_annotation_upload_path"] + data.get("datasetPath"))
        logger.info(message)
        notify_platform_data_annotation_result(status_code, message, data, logger)
        logger.info("finish data annotation")
    except Exception as e:
        logger.error("data annotation failed because of {}".format(e))
        if not IF_DEBUG:
            delete_download_generated_data(downloaded_path, generated_path, visualized_path, logger)
        status_code = 500
        message = "data annotation failed"
        notify_platform_data_annotation_result(status_code, message, data, logger)

def notify_platform_data_annotation_progress(data, progress, logger):
    # if status_code == 1:
    payload = {"datasetId":data.get("datasetId"), "process":progress * 100}
    headers = {"Content-Type": "application/json"}
    try:
        response = requests.post(data.get("processUrl"), json=payload, headers=headers)
        if response.status_code != 200:
            logger.error("notify_platform_data_annotation_progress failed")
        else:
            logger.info("notify_platform_data_annotation_progress success")
    except Exception as e:
        logger.error(e)


def notify_platform_data_annotation_result(status_code, message, data, logger):
    datasetPath = data.get("datasetPath")
    if not datasetPath.endswith("/"):
        datasetPath = datasetPath + "/"
    payload = {"datasetId":data.get("datasetId"), "resultCode":status_code, "resultMessage": message, "resultPath":data_path_dict["pre_annotation_upload_path"] + datasetPath + "result.zip"}
    headers = {"Content-Type": "application/json"}
    try:
        response = requests.post(data.get("callbackUrl"), json=payload, headers=headers)
        if response.status_code != 200:
            logger.error("notify_platform_data_annotation_result failed")
        else:
            logger.info("notify_platform_data_annotation_result success")
    except Exception as e:
        logger.error(e)


