import os
import signal
import sys
import time

import grpc
from typing import Iterable
from huawei_osi.datahub_pb2 import StreamInfo
from huawei_osi.datahub_pb2_grpc import DataHubStub
# from huawei_osi.eva_pb2 import Evaluation
from huawei_osi.octopus import (
    Evaluation,
    Source, EvaluationMetric, EvaluationMetricAnomaly,EvaluationMetricResult
)
from osi3.osi_groundtruth_pb2 import GroundTruth


DATAHUB_PORT = 20080

# 定义需要的指标名。
# 每个大类指标可以包含0-n个小类指标
# 当没有两个级以上子类指标时，建议将子类指标与大类指标取相同的名称
METRICS = {
    "acc": ("acc_x", "acc_y"),
    "metric1": ("metric1",)  # 根据实际情况取名，支持中文
}


def write_evaluation_result(eva: Evaluation, output: str):

    if output:
        with open(output, 'wb') as f:
            f.write(bytes(eva))
    else:
        dict_eva = eva.to_dict(include_default_values=True)
        del dict_eva["vis"]
        print(eva)


def init_metrics(eva: Evaluation):
    for dis_name, subs in METRICS.items():
        metric = EvaluationMetric(display_name=dis_name)
        for obj in subs:
            ano = EvaluationMetricAnomaly(display_name=obj)
            metric.anomalies.append(ano)
        eva.metrics.append(metric)


def handle_ground_truth(eva: Evaluation, stub: DataHubStub):
    gt_stream: Iterable[GroundTruth] = stub.SubscribeGroundTruth(StreamInfo())

    cur_result = {
        "acc": {
            "acc_x": EvaluationMetricResult.RESULT_UNSPECIFIED,
            "acc_y": EvaluationMetricResult.RESULT_UNSPECIFIED
        },
        "metric1": {
            "metric1": EvaluationMetricResult.RESULT_PASSED
        }
    }

    for gt in gt_stream:
        # 这里添加自定义评测逻辑
        for mv_obj in gt.moving_object:
            # eg: 判断主车加速度是否大于5m/s^2
            # 下面直接使用acceleration.x 表示加速度，但osi是大地坐标时，实际使用时还需要根据坐标转换出之车前进方向的加速度。
            if mv_obj.id == gt.host_vehicle_id.value:
                if mv_obj.base.acceleration.x > 5:
                    cur_result["acc"]["acc_x"] = EvaluationMetricResult.RESULT_FAILED
    # 每接收一帧数据，计算完当前最新的评测结果，更新到eva中的各指标结果
    update_eva(eva, cur_result)


def update_eva(eva: Evaluation, cur_result):
    for metric in eva.metrics:
        subs = cur_result.get(metric.display_name, {})
        for anomaly in metric.anomalies:
            anomaly.status = subs.get(anomaly.display_name, EvaluationMetricResult.RESULT_UNSPECIFIED)
        # 只有所有anomaly 都成功，metric 的状态才是成功
        metric.status = all([sub.status == EvaluationMetricResult.RESULT_PASSED for sub in metric.anomalies])

    # 添加评分计算逻辑，可采用平均扣分原则。
    avg = 100/len(eva.metrics)
    score = 100
    for metrics in eva.metrics:
        if metrics.status == EvaluationMetricResult.RESULT_FAILED:
            score -= avg
    eva.score = score


def evaluate(output: str):

    eva = Evaluation(
        score=0,
        avg_speed=25,
        distance=1000,
        reach_time=59,
        source=Source.SOURCE_CUSTOMIZED_REALTIME
    )

    def sigint_handler(sig_num, frame):
        print("receive ctrl+c, exiting")
        write_evaluation_result(eva, output)
        sys.exit(0)
    signal.signal(signal.SIGINT, sigint_handler)
    init_metrics(eva)
    while True:
        try:
            with grpc.insecure_channel('localhost:8001') as channel:
                print("init client stub")
                stub = DataHubStub(channel)
                handle_ground_truth(eva, stub)
        except Exception as e:
            print(f"cannot connect to server, exc is {e}")
            time.sleep(1)


if __name__ == '__main__':

    evaluate(os.environ.get("EVA_PATH"))
