import requests
import config
import glob
import numpy as np
import os
import datetime
import shutil
from utils import write_json, get_labelId_from_phrase
from view_anno import view_own_image_anno
from pathlib import Path
import zipfile
from groundingdino.util.inference import load_image, predict


data_path_dict = {
    "downloaded_data_save_path": "./downloaded_data",
    "generated_data_save_path": "./generated_data",
    "visualizd_data_save_path": "./visualized_data",
    "pre_annotation_upload_path": "pre_annotation_result/",
    "mining_upload_path": "ai_label_result/"
}

IF_DEBUG = False

def generate_image_annotation_json(labels, coordinates, confs, image_name, image_w, image_h):
    shapes = []
    for i in range(len(labels)):
        shape = {
            "label": labels[i],
            "points": coordinates[i],
            "conf": confs[i],
            "shape_type": "rectangle",
            "group_id": "null",
            "flags": {}
        }
        shapes.append(shape)
    summary = {label: labels.count(label) for label in set(labels)}
    summary["rect"] = len(labels)
    data = {
        "version": "4.6.0",
        "flags": {},
        "shapes": shapes,
        "summary": summary,
        "imagePath": image_name.split("/")[-1],
        "imageHeight": image_h,
        "imageWidth": image_w
    }
    return data



def mine_one_image(groundingdino_model, image_path, labelsId, prompt, boxTh, textTh, generated_path, visualized_path, logger):
    _, image, image_w, image_h = load_image(image_path)
    startFrameNo = int(image_path.split("/")[-1].split("_")[0])
    boxes, logits, phrases = predict(model=groundingdino_model, image=image, caption=prompt, box_threshold=boxTh, text_threshold=textTh)
    boxes_list = boxes.numpy().tolist()
    coordinates = []#xyxy
    for x in boxes_list:
        centerx = x[0] * image_w
        centery = x[1] * image_h
        w = x[2] * image_w
        h = x[3] * image_h
        xmin = centerx - w/2
        ymin = centery - h/2
        xmax = centerx + w/2
        ymax = centery + h/2
        coordinates.append([xmin, ymin, xmax, ymax])
    confs = logits.numpy().tolist()
    labels = get_labelId_from_phrase(phrases, labelsId, prompt)
    labels_int = list(set([int(x) for x in labels]))
    logger.info("{} groundingdino detection done".format(image_path))
    if IF_DEBUG:
        detection_result = generate_image_annotation_json(labels, coordinates, confs, image_path, image_w, image_h)
        write_json(detection_result, os.path.join(generated_path, image_path.split("/")[-1].split(".")[0].split("_")[0] + "_own_image.json"))
        view_own_image_anno(image_path, generated_path, visualized_path)
    return {"startFrameNo": startFrameNo, "labels": labels_int}


def download_from_obs_and_check(obs_client, bucket_name, downloaded_path, data, logger):
    # check bucket download
    logger.info("start download data")
    datasetPath = data[2]
    response = obs_client.listObjects(bucket_name, datasetPath)
    if response.status < 300:
        pass
    else:
        logger.error('{} listObjects failed'.format(bucket_name + datasetPath))
    objects = []
    for obj in response.body.contents:
        if "jpg_GMSLCam_2.zip" in obj.key:
            objects.append(obj.key)
    if len(objects) < 1:
        raise Exception("no jpg_GMSLCam_2.zip file in {}".format(bucket_name + datasetPath))
         
    logger.info("totally {} needed zip/json files in {}".format(len(objects), bucket_name + "/" + datasetPath))
    failed_list = [] #download failed names, retry later
    for obj in objects:
        #if "ft120" in obj:#currently we don't use ft120 lidar
        #    continue
        response = obs_client.getObject(bucket_name, obj, downloadPath=os.path.join(downloaded_path, obj.split("/")[-1]))
        if response.status < 300:
            pass
        else:
            logger.error('{} download failed'.format(obj))
            failed_list.append(os.path.join(obj))
    if len(failed_list) == 0:# all file download done
        logger.info("all {} files download success".format(len(objects)))
    else:
        retry_times = 1
        while len(failed_list) > 0:
            logger.info("retry downloading times:{}".format(retry_times))
            for obj in failed_list:
                response = obs_client.getObject(bucket_name, obj, downloadPath=os.path.join(downloaded_path, obj.split("/")[-1]))
                if response.status < 300:
                    failed_list.remove(obj)
                else:
                    logger.error('retry downloading times:{}, {} upload failed'.format(retry_times, obj))
            retry_times += 1
            if retry_times == 6:#retry 5 times
                raise Exception("{}/{} files download failed".format(len(failed_list), len(objects)))
    for obj in objects:
        if ".zip" not in obj:
            continue
        with zipfile.ZipFile(os.path.join(downloaded_path, obj.split("/")[-1]), 'r') as zip_ref:
            zip_ref.extractall(downloaded_path)
            logger.info("{} unzip done".format(obj))
    
def upload_to_obs(obs_client, bucket_name, generated_path, data, upload_path, logger):
    datasetPath = data[2]
    if not datasetPath.endswith("/"):
        datasetPath = datasetPath + "/" 
    file_names = os.listdir(generated_path)
    failed_list = [] #upload failed names, retry later
    for file_name in file_names:
        response = obs_client.putFile(bucket_name, upload_path + datasetPath + file_name, file_path=os.path.join(generated_path, file_name))
        if response.status < 300:
            pass
        else:
            logger.error('{} upload failed'.format(file_name))
            failed_list.append(os.path.join(file_name))
    if len(failed_list) == 0:# all file upload done
        logger.info("all {} files upload success".format(len(file_names)))
    else:
        retry_times = 1
        while len(failed_list) > 0:
            logger.info("retry uploading times:{}".format(retry_times))
            for file_name in failed_list:
                response = obs_client.putFile(bucket_name, upload_path + datasetPath + file_name, file_path=os.path.join(generated_path, file_name))
                if response.status < 300:
                    failed_list.remove(file_name)
                else:
                    logger.error('retry uploading times:{}, {} upload failed'.format(retry_times, file_name))
            retry_times += 1
            if retry_times == 6:#retry 5 times
                raise Exception("{}/{} files upload failed".format(len(failed_list), len(file_names)))

def delete_download_generated_data(downloaded_path, generated_path, visualized_path, logger):
    if os.path.exists(downloaded_path):
        shutil.rmtree(downloaded_path)
    if os.path.exists(generated_path):
        shutil.rmtree(generated_path)
    if os.path.exists(visualized_path):
        shutil.rmtree(visualized_path)
    logger.info("{} and {} and {} removed".format(downloaded_path, generated_path, visualized_path))


def mine(groundingdino_model, obs_client, data, logger):
    try:
        taskId = data[1]
        datasetPath = data[2]
        startFrameNo = int(data[3])
        endFrameNo = int(data[4])
        labelsId = data[5]
        prompts = data[6]
        boxTh = float(data[7])
        textTh = float(data[8])
        logger.info("start image mining")
        current_timestamp = datetime.datetime.now().timestamp()
        if datasetPath.endswith("/"):
            datasetPath = datasetPath[:-1]
        downloaded_path = os.path.join(data_path_dict["downloaded_data_save_path"], datasetPath + "_" + str(current_timestamp))
        os.makedirs(downloaded_path, exist_ok=True)
        # download data
        download_from_obs_and_check(obs_client, config.bucket_name, downloaded_path, data, logger)#config.bucket_name
        file_path_list = os.listdir(downloaded_path)
        image_path_list = [os.path.join(downloaded_path, x) for x in file_path_list if(x.endswith(".jpg") or x.endswith(".png"))]
        image_path_list.sort()
        image_path_list = image_path_list[startFrameNo: endFrameNo + 1]
        # mkdir generated data save_path
        generated_path = os.path.join(data_path_dict["generated_data_save_path"], datasetPath + "_" + str(current_timestamp))
        visualized_path = os.path.join(data_path_dict["visualizd_data_save_path"], datasetPath + "_" + str(current_timestamp))
        os.makedirs(generated_path, exist_ok=True)
        os.makedirs(visualized_path, exist_ok=True)
        # image detect
        mining_result = {}
        mining_result["data"] = []
        mining_result["data"].append({"taskId": taskId, "labelApplicationType": 1})
        label = []
        for image_path in image_path_list:
            ret = mine_one_image(groundingdino_model, image_path, labelsId, prompts, boxTh, textTh, generated_path, visualized_path, logger)
            label.append(ret)
        mining_result["data"][0].update({"label": label})
        write_json(mining_result, os.path.join(generated_path, taskId + "_ai_label.json"))
        logger.info("all image groundingdino detect done")
        # upload to obs
        logger.info("start result uploading")
        upload_to_obs(obs_client, config.bucket_name, generated_path, data, data_path_dict["mining_upload_path"], logger)
        if not IF_DEBUG:
            delete_download_generated_data(downloaded_path, generated_path, visualized_path, logger)
        # notify paltform
        status_code = 200
        message = "data mining result upload to {} success".format(data_path_dict["mining_upload_path"] + datasetPath)
        logger.info(message)
        notify_platform_data_mining_result(status_code, message, data, logger)
        logger.info("finish data mining")
    except Exception as e:
        logger.error("data mining failed because of {}".format(e))
        if not IF_DEBUG:
            delete_download_generated_data(downloaded_path, generated_path, visualized_path, logger)
        status_code = 500
        message = "data mining failed"
        notify_platform_data_mining_result(status_code, message, data, logger)


def notify_platform_data_mining_result(status_code, message, data, logger):
    taskId = data[1]
    datasetPath = data[2]
    callbackUrl = data[9]
    payload = {"taskId":taskId, "resultCode":status_code, "resultMessage": message, "resultPath":data_path_dict["mining_upload_path"] + datasetPath}
    headers = {'Content-Type': 'application/json'}
    response = requests.post(callbackUrl, json=payload, headers=headers)
    if response.code != "200":
        logger.error("notify_platform_data_mining_result failed")
    else:
        logger.info("notify_platform_data_mining_result success")


