import base64
import io
import os
import sys

import gradio as gr

from sdk.httpclient import CTClientBuilder, CTClient

base_url = "https://ai-global.ctapi.ctyun.cn"

client: CTClient


def _get_image_base64(img):
    """
    返回图片的base64大小（单位MB）和内容
    """
    byte_arr = io.BytesIO()
    img.save(byte_arr, format='PNG')
    byte_arr = byte_arr.getvalue()
    base64_str = base64.b64encode(byte_arr).decode('utf-8')
    base64_mb_size = sys.getsizeof(base64_str) / (1024 * 1024)
    return base64_mb_size, base64_str


def _get_video_base64(video_file):
    """
    返回图视频的base64大小（单位MB）和内容
    """
    with open(video_file, 'rb') as f:
        video_data = f.read()
    base64_str = base64.b64encode(video_data).decode('utf-8')
    base64_mb_size = sys.getsizeof(base64_str) / (1024 * 1024)
    return base64_mb_size, base64_str


def hat_detect(img):
    base64_size, base64_str = _get_image_base64(img)
    if base64_size > 2.0:
        return {"errMsg": f"上传的图片进行base64编码后大小为 {base64_size:.2f} MB，超过 2.00 MB"}, None

    url = f"{base_url}/v1/aiop/api/2f3uquanbrpc/SafeHat/base/hat_detect"
    response = client.request("POST", url, body={"image": base64_str})
    response.raise_for_status()
    response_json = response.json()
    try:
        result = response_json['returnObj']
        return result
    except Exception as e:
        print(f"error: {e}")
        return None, response_json


def predict(action_type, video):
    base64_size, base64_str = _get_video_base64(video)
    if base64_size > 8.0:
        return {"errMsg": f"上传的图片进行base64编码后大小为 {base64_size:.2f} MB，超过 8.00 MB"}, None

    url = f"{base_url}/v1/aiop/api/3j8ogbxxpce8/action-fas/predict"
    response = client.request("POST", url, body={"Action": "ActionFas", "VideoBase64": base64_str, "ActionType": action_type})
    response.raise_for_status()
    response_json = response.json()
    try:
        result = response_json['returnObj']
        return result
    except Exception as e:
        print(f"error: {e}")
        return None, response_json


def falldown(img):
    base64_size, base64_str = _get_image_base64(img)
    if base64_size > 2.0:
        return {"errMsg": f"上传的图片进行base64编码后大小为 {base64_size:.2f} MB，超过 2.00 MB"}, None

    url = f"{base_url}/v1/aiop/api/2gm0y369n30g/action/falldown/images"
    response = client.request("POST", url, body={"images": [base64_str]})
    response.raise_for_status()
    response_json = response.json()
    try:
        result = response_json['returnObj']
        return result
    except Exception as e:
        print(f"error: {e}")
        return None, response_json


def fight(img):
    base64_size, base64_str = _get_image_base64(img)
    if base64_size > 2.0:
        return {"errMsg": f"上传的图片进行base64编码后大小为 {base64_size:.2f} MB，超过 2.00 MB"}, None

    url = f"{base_url}/v1/aiop/api/2gm1xzigc2kg/action/fight/images"
    response = client.request("POST", url, body={"images": [base64_str]})
    response.raise_for_status()
    response_json = response.json()
    try:
        result = response_json['returnObj']
        return result
    except Exception as e:
        print(f"error: {e}")
        return None, response_json


product_intro = "进一步了解天翼云安全生产产品：<a href='https://www.ctyun.cn/products/workplacesafety'>https://www.ctyun.cn/products/workplacesafety</a>"
gr_config = {
    "hat_detect": gr.Interface(fn=hat_detect,
                                inputs=gr.Image(type="pil", label="上传图像"),
                                outputs=[
                                    gr.Textbox(label="识别结果")
                                ],
                                examples=[],
                                title="安全生产 - 安全帽正确佩戴识别",
                                description="用于自动检测图片中的安全帽穿戴情况，并给出图像中安全帽的颜色及位置信息。",
                                article=product_intro),
    "predict": gr.Interface(fn=predict,
                                inputs=[
                                    gr.Dropdown([("眨眼", "Blink"), ("张嘴", "OpenMouth"), ("左摇头", "FaceLeft"), ("右摇头", "FaceRight"), ("上下点头", "FaceUpdown"), ], label="动作类型"),
                                    gr.Video(label="上传视频")
                                ],
                                outputs=[
                                    gr.Textbox(label="识别结果")
                                ],
                                examples=[],
                                title="安全生产 - 动作活体识别",
                                description="用于判断视频中的人物动作与传入动作类型是否一致来识别视频中人物是否眨眼、左摇头、右摇头、上下点头或张嘴，以及是否活体人脸。",
                                article=product_intro),
    "falldown": gr.Interface(fn=falldown,
                                inputs=gr.Image(type="pil", label="上传图像"),
                                outputs=[
                                    gr.Textbox(label="识别结果")
                                ],
                                examples=[],
                                title="安全生产 - 摔倒识别",
                                description="用于判断图片中是否存在摔倒动作。",
                                article=product_intro),
    "fight": gr.Interface(fn=fight,
                                inputs=gr.Image(type="pil", label="上传图像"),
                                outputs=[
                                    gr.Textbox(label="识别结果")
                                ],
                                examples=[],
                                title="安全生产 - 打架识别",
                                description="用于判断图片中是否存在打架行为。",
                                article=product_intro),
}


def get_not_empty_env(key):
    value = os.getenv(key, "").strip()
    if value == "":
        raise Exception(f"env {key} is not set or empty")
    return value


def run():
    ctyun_ak = get_not_empty_env("ext_cf_ctyun_ak")
    ctyun_sk = get_not_empty_env("ext_cf_ctyun_sk")
    ctyun_ai_app_key = get_not_empty_env("ext_cf_ctyun_ai_app_key")
    app_type = get_not_empty_env("ext_cf_app_type")

    global client
    client = CTClientBuilder().with_ak(ctyun_ak).with_sk(ctyun_sk).with_ai_app_key(ctyun_ai_app_key).build()

    gr_config[app_type].launch(server_name="0.0.0.0", server_port=9000)


if __name__ == '__main__':
    run()
