import requests
import config
import glob
import numpy as np
import torch
import cv2
import os
import datetime
import shutil
from utils import nms_3d, write_json
from view_anno import view_vendor_image_anno, view_vendor_pcd_anno, view_own_image_anno, view_own_pcd_anno
from pathlib import Path
import zipfile
from pcdet.config import cfg
from pcdet.datasets import DatasetTemplate
from pcdet.models import load_data_to_gpu
from fuse_detection import FuseDetection

data_path_dict = {
    "downloaded_data_save_path": "./downloaded_data",
    "generated_data_save_path": "./generated_data",
    "visualizd_data_save_path": "./visualized_data",
    "pre_annotation_upload_path": "pre_annotation_result/",
    "mining_upload_path": "ai_label_result/"
}

IF_DEBUG = False

class DemoDataset(DatasetTemplate):
    def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ext='.bin'):
        """
        Args:
            root_path:
            dataset_cfg:
            class_names:
            training:
            logger:
        """
        super().__init__(
            dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
        )
        self.root_path = root_path
        self.ext = ext
        data_file_list = glob.glob(str(root_path / f'*{self.ext}')) if self.root_path.is_dir() else [self.root_path]
        data_file_list.sort()
        self.sample_file_list = data_file_list

    def __len__(self):
        return len(self.sample_file_list)

    def __getitem__(self, index):
        if self.ext == '.bin':
            # points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 4)#original
            points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 5)#for nuscenes
        elif self.ext == '.npy':
            points = np.load(self.sample_file_list[index])
        elif self.ext == '.pcd':
            with open(self.sample_file_list[index], 'r', encoding='utf-8') as f:
                lines = f.readlines()
            # Find the start of the data section
            data_start_idx = 0
            fields = []
            for i, line in enumerate(lines):
                if line.startswith('FIELDS'):
                    fields = line.split()[1:]
                elif line.startswith('DATA'):
                    data_start_idx = i + 1
                    break

            if 'x' not in fields or 'y' not in fields or 'z' not in fields or 'intensity' not in fields:
                raise ValueError("PCD file does not contain x, y, z, and intensity fields.")

            # Get indices of the required fields
            x_idx = fields.index('x')
            y_idx = fields.index('y')
            z_idx = fields.index('z')
            intensity_idx = fields.index('intensity')

            # Read the data section and parse x, y, z, and intensity
            data = []
            for line in lines[data_start_idx:]:
                if "nan" in line:
                    continue
                if line.strip():  # skip empty lines
                    values = line.split()
                    x = float(values[x_idx])
                    y = float(values[y_idx])
                    z = float(values[z_idx])
                    intensity = float(values[intensity_idx])
                    data.append([x, y, z, intensity])

            # Convert the list to a NumPy ndarray
            data = np.array(data)
            zeros = np.zeros((data.shape[0], 1))
            points = np.hstack((data, zeros))
        else:
            raise NotImplementedError

        input_dict = {
            'points': points,
            'frame_id': index,
        }

        data_dict = self.prepare_data(data_dict=input_dict)
        return data_dict

def post_process(pred_dict, score_th=0.3, iou_th=0.1):
    data_cpu = {
    'pred_boxes': pred_dict['pred_boxes'].cpu(),
    'pred_scores': pred_dict['pred_scores'].cpu(),
    'pred_labels': pred_dict['pred_labels'].cpu()
    }
 
    # filter result whose conf larger than threshold 
    filtered_mask = data_cpu['pred_scores'] > score_th
    filtered_boxes = data_cpu['pred_boxes'][filtered_mask]
    filtered_scores = data_cpu['pred_scores'][filtered_mask]
    filtered_labels = data_cpu['pred_labels'][filtered_mask]
    keep_indices = nms_3d(filtered_boxes, filtered_scores, iou_th)
    # filtered result
    if keep_indices.numel() == 0:
        return {} #no detection
    nms_boxes = filtered_boxes[keep_indices]
    nms_scores = filtered_scores[keep_indices]
    nms_labels = filtered_labels[keep_indices]
    post_process_ret = {}
    post_process_ret.update({'pred_boxes': nms_boxes,'pred_scores': nms_scores,'pred_labels': nms_labels})
    return post_process_ret

def generate_image_annotation_json(labels, coordinates, confs, image_name, image_w, image_h):
    shapes = []
    for i in range(len(labels)):
        shape = {
            "label": labels[i],
            "points": coordinates[i],
            "conf": confs[i],
            "shape_type": "rectangle",
            "group_id": "null",
            "flags": {}
        }
        shapes.append(shape)
    summary = {label: labels.count(label) for label in set(labels)}
    summary["rect"] = len(labels)
    data = {
        "version": "4.6.0",
        "flags": {},
        "shapes": shapes,
        "summary": summary,
        "imagePath": image_name.split("/")[-1],
        "imageHeight": image_h,
        "imageWidth": image_w
    }
    return data

def detect_one_image(yolo_model, image_path, imageConfTh, generated_path, visualized_path, logger):
    image = cv2.imread(image_path)
    image_h, image_w = image.shape[:2]
    results = yolo_model(image)
    labels = []
    coordinates = []
    confs = []
    for result in results:
        boxes = result.boxes
        for box in boxes:
            if box.conf.item() > imageConfTh:
                labels.append(str(int(box.cls.item())))
                coordinates.append(box.xyxy.tolist()[0])
                confs.append(box.conf.item())
    logger.info("{} detection done".format(image_path))
    detection_result = generate_image_annotation_json(labels, coordinates, confs, image_path, image_w, image_h)
    write_json(detection_result, os.path.join(generated_path, image_path.split("/")[-1].split(".")[0].split("_")[0]+"_own_image.json"))
    if IF_DEBUG:
        view_vendor_image_anno(image_path, visualized_path)
        view_own_image_anno(image_path, generated_path, visualized_path)

def generate_pcd_annotation_json(labels, rotations, scales, positions):
    annotations = []
    for i in range(len(labels)):
        annotation = {
            "label": str(labels[i]),
            "cube": {
                "rotation": {"x":0 ,"y":0, "z":rotations[i]},
                "scale": {"x":scales[i][0], "y":scales[i][1], "z":scales[i][2]},
                "position": {"x":positions[i][0], "y":positions[i][1], "z":positions[i][2]}
            },
            "group_id": "null",
            "flags": {}
        }
        annotations.append(annotation)
    
    summary = {label: labels.count(label) for label in set(labels)}
    summary["cube"] = len(labels)
    
    data = {
        "annotations": annotations,
        "summary": summary
    }
    return data

def detect_one_pcd(voxelnext_model, pcd_path, pcdConfTh, generated_path, visualized_path, logger):
    demo_dataset = DemoDataset(dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path(pcd_path), ext=".pcd", logger=logger)
    with torch.no_grad():
        for idx, data_dict in enumerate(demo_dataset):
            data_dict = demo_dataset.collate_batch([data_dict])
            load_data_to_gpu(data_dict)
            pred_dicts, _ = voxelnext_model.forward(data_dict)
            final_ret = post_process(pred_dicts[0], score_th=pcdConfTh)
    if len(final_ret) == 0:#no detection
        labels = []
        rotations = []
        scales = []
        positions = []
    else:
        labels = final_ret['pred_labels'].numpy().tolist()
        rotations = final_ret["pred_boxes"][:, 8].tolist()
        scales = final_ret["pred_boxes"][:, 3:6].tolist()
        positions = final_ret["pred_boxes"][:, 0:3].tolist()
    logger.info("{} detection done".format(pcd_path))
    detection_result = generate_pcd_annotation_json(labels, rotations, scales, positions)
    write_json(detection_result, os.path.join(generated_path, pcd_path.split("/")[-1].split(".")[0].split("_")[0] + "_own_pcd.json"))
    if IF_DEBUG:
        view_vendor_pcd_anno(pcd_path, visualized_path)
        view_own_pcd_anno(pcd_path, generated_path, visualized_path)



def download_from_obs_and_check(obs_client, bucket_name, downloaded_path, data, task_type, logger):
    datasetPath = data[3]
    # check bucket download
    logger.info("start download data")
    response = obs_client.listObjects(bucket_name, prefix=datasetPath)
    if response.status < 300:
        pass
    else:
        logger.error('{} listObjects failed'.format(bucket_name + datasetPath))
    objects = []
    if task_type == "1":#only need image
        for obj in response.body.contents:
            if "jpg_GMSLCam_2.zip" in obj.key:
                objects.append(obj.key)
        if len(objects) < 1:
            raise Exception("no jpg_GMSLCam_2.zip file in {}".format(bucket_name + datasetPath))
    if task_type == "2":#only need pcd
        for obj in response.body.contents:
            if "pcd_pandarat128_2372.zip" in obj.key:
                objects.append(obj.key)
        if len(objects) < 1:
            raise Exception("no pcd_pandarat128_2372.zip file in {}".format(bucket_name + datasetPath))
    if task_type == "3":#joint anno, need vendor anno json, image, pcd, calibration
        for obj in response.body.contents:
            if "json.zip" in obj.key or "pcd_pandarat128_2372.zip" in obj.key or "jpg_GMSLCam_2.zip" in obj.key or "calibration.json" in obj.key:
                objects.append(obj.key)
        if len(objects) < 4:
            raise Exception("no enough necesary files in {}".format(bucket_name + datasetPath))
         
    logger.info("totally {} needed zip/json files in {}".format(len(objects), bucket_name + "/" + datasetPath))
    failed_list = [] #download failed names, retry later
    for obj in objects:
        #if "ft120" in obj:#currently we don't use ft120 lidar
        #    continue
        response = obs_client.getObject(bucket_name, obj, downloadPath=os.path.join(downloaded_path, obj.split("/")[-1]))
        if response.status < 300:
            pass
        else:
            logger.error('{} download failed'.format(obj))
            failed_list.append(os.path.join(obj))
    if len(failed_list) == 0:# all file download done
        logger.info("all {} files download success".format(len(objects)))
    else:
        retry_times = 1
        while len(failed_list) > 0:
            logger.info("retry downloading times:{}".format(retry_times))
            for obj in failed_list:
                response = obs_client.getObject(bucket_name, obj, downloadPath=os.path.join(downloaded_path, obj.split("/")[-1]))
                if response.status < 300:
                    failed_list.remove(obj)
                else:
                    logger.error('retry downloading times:{}, {} upload failed'.format(retry_times, obj))
            retry_times += 1
            if retry_times == 6:#retry 5 times
                raise Exception("{}/{} files download failed".format(len(failed_list), len(objects)))
    for obj in objects:
        if ".zip" not in obj:
            continue
        with zipfile.ZipFile(os.path.join(downloaded_path, obj.split("/")[-1]), 'r') as zip_ref:
            zip_ref.extractall(downloaded_path)
            logger.info("{} unzip done".format(obj))
    
def upload_to_obs(obs_client, bucket_name, generated_path, data, upload_path, logger):
    datasetPath = data[3]
    if not datasetPath.endswith("/"):
        datasetPath = datasetPath + "/" 
    file_names = os.listdir(generated_path)
    failed_list = [] #upload failed names, retry later
    for file_name in file_names:
        response = obs_client.putFile(bucket_name, upload_path + datasetPath + file_name, file_path=os.path.join(generated_path, file_name))
        if response.status < 300:
            pass
        else:
            logger.error('{} upload failed'.format(file_name))
            failed_list.append(os.path.join(file_name))
    if len(failed_list) == 0:# all file upload done
        logger.info("all {} files upload success".format(len(file_names)))
    else:
        retry_times = 1
        while len(failed_list) > 0:
            logger.info("retry uploading times:{}".format(retry_times))
            for file_name in failed_list:
                response = obs_client.putFile(bucket_name, upload_path + datasetPath + file_name, file_path=os.path.join(generated_path, file_name))
                if response.status < 300:
                    failed_list.remove(file_name)
                else:
                    logger.error('retry uploading times:{}, {} upload failed'.format(retry_times, file_name))
            retry_times += 1
            if retry_times == 6:#retry 5 times
                raise Exception("{}/{} files upload failed".format(len(failed_list), len(file_names)))

def delete_download_generated_data(downloaded_path, generated_path, visualized_path, logger):
    if os.path.exists(downloaded_path):
        shutil.rmtree(downloaded_path)
    if os.path.exists(generated_path):
        shutil.rmtree(generated_path)
    if os.path.exists(visualized_path):
        shutil.rmtree(visualized_path)
    logger.info("{} and {} and {} removed".format(downloaded_path, generated_path, visualized_path))

def annotate(voxelnext_model, yolo_model, obs_client, data, logger):
    try:
        datasetType = data[2]
        datasetPath = data[3]
        imageConfTh = float(data[4])
        pcdConfTh = float(data[5])
        logger.info("start data annotation")
        if datasetType == "1":#image annotation
            logger.info("task type is image annotation")
        elif datasetType == "2":#pcd annotation
            logger.info("task type is pcd annotation")
        else:#"3" for joint annotation
            logger.info("task type is joint annotation")
        current_timestamp = datetime.datetime.now().timestamp()
        if datasetPath.endswith("/"):
            datasetPath = datasetPath[:-1]
        downloaded_path = os.path.join(data_path_dict["downloaded_data_save_path"], datasetPath + "_" + str(current_timestamp))
        os.makedirs(downloaded_path, exist_ok=True)
        # download data
        download_from_obs_and_check(obs_client, config.annotation_bucket_name, downloaded_path, data, datasetType, logger)
        file_path_list = os.listdir(downloaded_path)
        pcd_path_list = [os.path.join(downloaded_path, x) for x in file_path_list if (x.endswith(".pcd") and "at128" in x)]
        pcd_path_list.sort()
        image_path_list = [os.path.join(downloaded_path, x) for x in file_path_list if(x.endswith(".jpg") or x.endswith(".png"))]
        image_path_list.sort()
        # mkdir generated data save_path
        generated_path = os.path.join(data_path_dict["generated_data_save_path"], datasetPath + "_" + str(current_timestamp))
        visualized_path = os.path.join(data_path_dict["visualizd_data_save_path"], datasetPath + "_" + str(current_timestamp))
        os.makedirs(generated_path, exist_ok=True)
        os.makedirs(visualized_path, exist_ok=True)
        if datasetType == "1":#image annotation
            # image detect
            for image_path in image_path_list:
                detect_one_image(yolo_model, image_path, imageConfTh, generated_path, visualized_path, logger)
            logger.info("image detection done")
        elif datasetType == "2":#pcd annotation
            # pcd detect
            for pcd_path in pcd_path_list:
                detect_one_pcd(voxelnext_model, pcd_path, pcdConfTh, generated_path, visualized_path, logger)
            logger.info("pcd detection done")
        else:#"3" for joint annotation
            # image detect
            for image_path in image_path_list:
                detect_one_image(yolo_model, image_path, imageConfTh, generated_path, visualized_path, logger)
            logger.info("image detection done")
            # pcd detect
            for pcd_path in pcd_path_list:
                detect_one_pcd(voxelnext_model, pcd_path, pcdConfTh, generated_path, visualized_path, logger)
            logger.info("pcd detection done")
            # # check with vendor annotation result, get final result
            detection_fuser = FuseDetection(os.path.join(downloaded_path, "calibration.json"))
            detection_fuser.fuse_detection(downloaded_path, generated_path, visualized_path, IF_DEBUG, logger)
            logger.info("fuse detection result done")
        # upload to obs
        logger.info("start result uploading")
        upload_to_obs(obs_client, config.annotation_bucket_name, generated_path, data, data_path_dict["pre_annotation_upload_path"], logger)
        if not IF_DEBUG:
            delete_download_generated_data(downloaded_path, generated_path, visualized_path, logger)
        # notify paltform
        status_code = 200
        message = "data annotation result upload to {} success".format(data_path_dict["pre_annotation_upload_path"] + datasetPath)
        logger.info(message)
        notify_platform_data_annotation_result(status_code, message, data, logger)
        logger.info("finish data annotation")
    except Exception as e:
        logger.error("data annotation failed because of {}".format(e))
        if not IF_DEBUG:
            delete_download_generated_data(downloaded_path, generated_path, visualized_path, logger)
        status_code = 500
        message = "data annotation failed"
        notify_platform_data_annotation_result(status_code, message, data, logger)


def notify_platform_data_annotation_result(status_code, message, data, logger):
    datasetId = data[1]
    datasetPath = data[3]
    callbackUrl = data[6]
    payload = {"datasetId":datasetId, "resultCode":status_code, "resultMessage": message, "resultPath":data_path_dict["pre_annotation_upload_path"] + datasetPath}
    headers = {'Content-Type': 'application/json'}
    response = requests.post(callbackUrl, json=payload, headers=headers)
    if response.code != "200":
        logger.error("notify_platform_data_annotation_result failed")
    else:
        logger.info("notify_platform_data_annotation_result success")

