#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/6/19 13:04
# @Author  : huidong.bai
# @File    : HawkDecderBaseSuite.py
# @Software: PyCharm
# @Mail    : MasterBai2018@outlook.com
import os
import pdb
import json
import time
import pytest
import allure
import shutil
from queue import Queue
import concurrent.futures
from threading import Event
from conftest import logging
from src.core.Case import TestCase
from src.utils.jsonUtil import JsonUtil
from src.core.Assertion import AssertionFactory
from src.core.Common.Bean.Conveyor import Conveyor
from src.core.Service.TSASession import TSASession
from src.core.ParamEventParse import ParamEventParse
from src.utils.common import uuid, load_yaml_config, get_timestamp
from src.core.Service.ClientSession import ClientSession
from src.core.Common.Bean.XMLReporter import XMLReporter
from src.core.Common.Bean.RuntimeRecorder import RuntimeRecorder
from src.core.Service.HawkDecoderClient import HawkDecoderClient
from src.core.NluEventParser import NluEventParser
from src.core.Status.HawkDecoderStatus import SpeechEngineParam, MongoCode
from src.utils.common import read_config, async_color_print, is_available_file
from src.utils.TxtTranslateExcel import TxtTranslateExcel


class HawkDecderBaseSuite:
    dataBus = Queue()                       # 接受回调数据总线队列
    xmlReporter = XMLReporter()             # xml报告的json报告者
    runTimeRecoder = RuntimeRecorder()      # 运行时结果收集器
    conveyor: Conveyor = None               # 外部参数传递者
    yaml_config = None                      # yaml配置对象
    hawk_session: HawkDecoderClient = None  # SpeechEngine客户端
    suiteDir = ""                           # 运行Suite工作根路径
    resultPath = ""                         # 结果文件路径
    assert_finish_cond = Event()            # 断言结束的Event事件
    testcase = TestCase()                   # 测试用例
    asserter = AssertionFactory()           # 断言类
    thread_pool_executor = None             # 回调数据处理线程池
    report_pool = []                        # 报告数据打印
    calc_recrate = []                       # 计算错误率列表
    callback_result = []                    # 回调函数统计结果集合
    voice_time_list = []                    # 音频时长统计列表
    voiceRegionEndList = []                 # 音频的时间边界
    txtTranslateExcel = TxtTranslateExcel()  # 生成excel报告

    @classmethod
    def setup_class(cls, suite_name, hawk_callback=None):
        from conftest import cmder
        cls.conveyor = cmder
        cls.suiteDir = cls.conveyor.suite_dir
        cls.yaml_config = load_yaml_config(cls.conveyor.parse_yaml)
        cls.resultPath = os.path.join(cls.suiteDir, "result.txt")
        cls.xmlReporter.set_param(cls.conveyor)

        cls.hawk_session = HawkDecoderClient(cls.conveyor.hawk_decoder_so)

        if hawk_callback:
            cls.hawk_session.set_listener(hawk_callback)

        cls.xmlReporter.push_start(suite_name)
        cls.asserter.load_path_yaml(cls.yaml_config)
        cls.asserter.set_reporter(cls.xmlReporter)
        cls.asserter.set_case(cls.testcase)
        cls.asserter.setResultPath(os.path.join(cls.conveyor.suite_dir, "result.txt"))
        cls.asserter.set_finish_event(cls.assert_finish_cond)
        cls.asserter.set_recorder(cls.runTimeRecoder)
        cls.asserter.set_databus(cls.dataBus)
        cls.hawk_session.set_recorder(cls.runTimeRecoder)
        cls.thread_pool_executor = concurrent.futures.ThreadPoolExecutor(max_workers=20)

    @classmethod
    def teardown_class(cls):
        cls.hawk_session.free()
        cls.dataBus.queue.clear()
        del cls.hawk_session
        cls.xmlReporter.push_stop()
        del cls

    @pytest.fixture(scope="class", autouse=True, name="MongoTestSuiteFixture")
    def mongo_testsuite_fixture(self):
        async_color_print(("\n" + "=" * 31 + " test suit start " + "=" * 31, None))
        yield
        async_color_print(("=" * 32 + " test suit end " + "=" * 32, None))
    
    @pytest.fixture(scope="function", autouse=True, name="MongoPreTestFixture")
    def mongo_pretest_fixture(self, request):
        async_color_print(("\n" + "=" * 31 + " test case start " + "=" * 31, "green"))
        allure.dynamic.id(request.node._nodeid)
        allure.dynamic.parent_suite(f"[ Suite ] [{self.conveyor.suite_id}] {self.conveyor.suite_abstract}")
        allure.dynamic.suite(f"[ Suite ] [{self.conveyor.suite_id}]")
        allure.dynamic.epic(f"[ Solution ] {self.conveyor.solution_space}")
        allure.dynamic.feature(f"[ Scene ] {self.conveyor.workspace}")
        allure.dynamic.story(f"[ Suite ] [{self.conveyor.suite_id}] {self.conveyor.suite_abstract}")
        self.clear()
        yield
        run_cmd = f"python3 Run_Mongo.py -f {self.conveyor.suite_name} -C {self.conveyor.config} -a {self.conveyor.case_list} -p {self.conveyor.source_lcs_config} -F {self.testcase.index}"
        with allure.step("执行命令："): pass
        with allure.step(run_cmd): pass

        # 日志保存开关打开时, 保存日志
        if (not self.asserter.result) and (self.conveyor.log_save_option == "True"):
            self.attach_zip_log()
            # allure attach 音频
            voice_path = self.testcase.input.get("voice")
            self.attach_voice(voice_path)

        async_color_print(("\n" + "="*32 + " test case end " + "=" * 32, "green"))
    
    @pytest.fixture(scope="function", autouse=False, name="testCaseLoader")
    def mongo_testcase_load_fixture(self, request):
        testcase = None
        try:
            testcase = request._pyfuncitem.callspec.params.get("testcase")
            self.testcase.load_case(testcase)
        except Exception as e:
            logging.error(f"加载Case失败: {e}, testcase:{testcase}")
            self.xmlReporter.skip_case_infos.append(f"加载Case失败: {e}, testcase:{testcase}")
            pytest.skip(f"加载Case失败: {e}, testcase:{testcase}")

        with allure.step(f"解析Case：{self.conveyor.case_list}"):
            with allure.step(f"[ Line ] {self.testcase.index}"): pass
            with allure.step(f"[ 输入 ] {self.testcase.input}"): pass
            with allure.step(f"[ 参数 ] {self.testcase.param}"): pass
            with allure.step(f"[ 预期 ] {self.testcase.expect}"): pass

        voice = self.testcase.input.get('voice')
        if voice and (not is_available_file(voice)):
            logging.error(f"错误的音频: {voice}")
            self.xmlReporter.skip_case_infos.append(f"错误的音频: {voice}")
            pytest.skip(f"错误的音频: {voice}")
        yield
        allure.dynamic.title(f"[{self.testcase.index}] {self.testcase.input}")

    def attach_voice(self, voice_path):
        if voice_path is None:
            return
        if os.path.exists(voice_path):
            if voice_path.endswith('.wav'):
                allure.attach.file(source=voice_path, name=voice_path, attachment_type=allure.attachment_type.WAVE)
            elif voice_path.endswith('.pcm'):
                allure.attach.file(source=voice_path, name=voice_path, attachment_type=allure.attachment_type.PCM)

    def attach_zip_log(self):
        attach_zip_log_path = os.path.join(self.conveyor.solution_space, "allure_attach", uuid())
        log_path = os.path.join(self.suiteDir, "log")
        shutil.make_archive(base_name=attach_zip_log_path, format="zip", base_dir=log_path)
        allure.attach.file(attach_zip_log_path + ".zip", name="运行日志文件", attachment_type=allure.attachment_type.ZIP)

    def clear(self):
        self.runTimeRecoder.reset()
        self.testcase.reset()
        self.asserter.reset()
        self.dataBus.queue.clear()
        self.report_pool.clear()

    def set_parameters_pre(self) -> int:
        try:
            expects = json.dumps(self.testcase.expect)
        except Exception as e:
            logging.error(f"Parse testcase expects error: {e}")
            return -1

        # 设置送音频的delay方式
        delay = self.testcase.param.get('delay')
        if delay:
            self.hawk_session.set_delay(float(delay))

        # 默认设置工作模式为2, 为了兼容LCS的freeWakeup Case
        workMode = self.testcase.param.get('freeWakeup')
        ret = self.hawk_session.set_speech_engine_work_mode(workMode if workMode else "2")
        if ret != 0:
            logging.error(f"Set speech engine work mode error: {ret}")

        # 设置全是模式 fullTimeOption 1为开启 0为关闭
        fullTimeOption = self.testcase.param.get('fullTimeOption')
        if fullTimeOption:
            ret = self.hawk_session.set_speech_engine_param(SpeechEngineParam.SPEECH_ENGINE_PARAM_FULLTIME_OPTION, int(fullTimeOption))
            if ret != 0:
                print(f"设置全时模式失败")
                logging.error(f"Set speech engine fullTime mode error: {ret}")
        
        # 设置语种信息 vrLang:cmn
        vrLang = self.testcase.param.get('vrLang')
        if vrLang:
            ret = self.hawk_session.speech_engine_set_language_info(vrLang)
            if ret != 0:
                logging.error(f"Set speech engine lang error: {ret}")

        # 设置唤醒场景
        wakeupScene = self.testcase.param.get('wakeupScene')
        if not wakeupScene:
            wakeupScene = '1048575'
        ret = self.hawk_session.set_speech_engine_param(SpeechEngineParam.SPEECH_ENGINE_PARAM_WAKEUP_SCENE, int(wakeupScene))
        if ret != 0:
            logging.error(f"Set speech engine wakeup scene error: {ret}")

        # 设置本地实时上屏开关
        offlineButterfly = self.testcase.param.get('offlineButterfly')
        if offlineButterfly:
            ret = self.hawk_session.set_speech_engine_param(SpeechEngineParam.SPEECH_ENGINE_PARAM_REAL_TIME_RESULT, int(offlineButterfly))
            if ret != 0:
                logging.error(f"Set speech engine offline butterfly error: {ret}")

        # 设置OneShot的Duration
        delayOneshotDuration = self.testcase.param.get('delayOneshotDuration')
        if delayOneshotDuration:
            ret = self.hawk_session.set_speech_engine_param(SpeechEngineParam.SPEECH_ENGINE_PARAM_WAKEUP_DELAY_ONESHOT_DURATION, int(delayOneshotDuration))
            if ret != 0:
                logging.error(f"Set speech engine delay oneshot duration error: {ret}")

        # 设置静音超时时间
        silenceDuration = self.testcase.param.get('silenceDuration')
        if silenceDuration:
            ret = self.hawk_session.set_speech_engine_param(SpeechEngineParam.SPEECH_ENGINE_PARAM_SILENCE_DURATION, int(silenceDuration))
            if ret != 0:
                logging.error(f"Set speech engine silence duration error: {ret}")

        # 设置数字转换开关
        digitOption = self.testcase.param.get('digitOption')
        if digitOption:
            ret = self.hawk_session.set_speech_engine_param(SpeechEngineParam.SPEECH_ENGINE_PARAM_DIGIT_CONVERT_RESULT, int(digitOption))
            if ret != 0:
                logging.error(f"Set speech engine digit option error: {ret}")

        # 设置自定义唤醒词
        vrWakeupAlias = self.testcase.param.get('vrWakeupAlias')
        threshold = self.testcase.param.get('threshold')
        if threshold is None:
            threshold = "0.1"
        if vrWakeupAlias and threshold:
            ret = self.hawk_session.add_wakeup_word(vrWakeupAlias, threshold)
            if ret != 0:
                logging.error(f"Set speech engine alis wakeup word error: {ret}")

        # 获取唤醒词列表
        if "wakeupWordList" in expects:
            wakeupWords = self.hawk_session.get_wakeup_term()
            all_wakeup_words = []
            if wakeupWords:
                for val in json.loads(wakeupWords)["wakeup"]["data"]["fix_data"]:
                    all_wakeup_words.append(val["word"])
                    self.runTimeRecoder.wakeupWordList = all_wakeup_words

        # 设置唤醒词生效
        vrWakeupWord = self.testcase.param.get('vrWakeupWord')
        if vrWakeupWord:
            ret = self.hawk_session.set_wakeup_word_enable(vrWakeupWord, 1)
            if ret != 0:
                logging.error(f"Set speech engine wakeup word enable error: {ret}")

        # 设置LinkType
        linkType = self.testcase.param.get('linkType')
        if linkType:
            ret = self.hawk_session.set_speech_engine_param(SpeechEngineParam.SPEECH_ENGINE_PARAM_LINK_TYPE, int(linkType))
            if ret != 0:
                logging.error(f"Set speech engine link type error: {ret}")
        
        # 设置生效音区,如果Case中未写，则默认设置为全音区生效
        vrSeatSignal = self.testcase.param.get('vrSeatSignal')
        if vrSeatSignal:
            ret = self.hawk_session.set_speech_engine_param(SpeechEngineParam.SPEECH_ENGINE_PARAM_SEAT_SIGNAL, int(vrSeatSignal))
            if ret != 0:
                logging.error(f"Set speech engine vr seat signal error: {ret}")

        # 设置场景唤醒词开关 vrSceneOption:1
        vrSceneOption = self.testcase.param.get('vrSceneOption')
        if vrSceneOption:
            ret = self.hawk_session.set_speech_engine_param(SpeechEngineParam.SPEECH_ENGINE_PARAM_SR_WAKEUP_SCENE_ENABLE, int(vrSceneOption))
            if ret != 0:
                logging.error(f"Set speech engine vr wakeup scene option error: {ret}")

        return 0

    def set_parameters_after(self):
        # 设置可见即可说页面信息
        hmi = self.testcase.param.get('hmi')
        if hmi:
            ret = self.hawk_session.set_hmi(file=hmi)
            if ret != 0:
                logging.error(f"Set speech engine hmi event error: {ret}, event: {hmi}")

        # TODO设置个性化信息
        # grammar = self.testcase.param.get('grammar')
        # if grammar:
        #     ret = self.tsa_session.set_event(grammar, is_file=True)
        #     if ret != 0:
        #         print(f"Set speech engine grammar event error: {ret}, event: {grammar}")
        #         return -1

        return 0

    @classmethod
    def voice_region_parse(self):
        for item in self.testcase.expect:
            self.voiceRegionEndList.append(item.get("voiceRegion")[1])
        self.voiceRegionEndList.sort()
        self.tsa_session.set_voice_region_end_list(self.voiceRegionEndList)

    @classmethod
    def process_result(cls, status, data):
        # 断言信号
        message_type = JsonUtil.parse(data, 'type')
        if message_type == "ASSERTED":
            _data = {"status": status, "resultType": message_type, "voiceRegionEnd": data.get("voiceRegionEnd"), "recordTime": get_timestamp()}
            cls.dataBus.put(_data)
            return

        if message_type in cls.yaml_config:
            type_config = cls.yaml_config[message_type]
            result = {"status": status, "resultType": message_type, "recordTime": get_timestamp()}

            for key, value in type_config.items():
                data_value = JsonUtil.parse(data, value)
                result[key] = data_value
            channel_id = str(result.get("channelId"))

            if message_type == "NLPResult":
                source = result.get("source")
                text = result.get("text")
                skill = result.get("skill")
                intention = result.get("intention")
                dirCallbackType = result.get("dirCallbackType")
                dirCallbackEtId = result.get("dirCallbacketId")
                # VPA理解结果过滤
                if not text and skill == "VPA_ACTION" and intention == "VPA_ACTION":
                    return
                async_color_print((f"[ 理解结果 ] channel:{channel_id}\tsource:{source}\ttext:{text}\tskill:{skill}\tintention:{intention}\tcallbackType:{dirCallbackType}", "yellow"))
                async_color_print((json.dumps(data, ensure_ascii=False, separators=(',', ':')), None))
                cls.callback_event_type = dirCallbackType
                cls.callback_event_id = dirCallbackEtId
                cls.dataBus.put(result)
                cls.nluEvent.set()
            elif message_type == "ASRResult":
                asr = result.get("asr")
                source = result.get("source")
                async_color_print((f"[ 识别结果 ] channel:{channel_id}\tsource:{source}\ttext:{asr}", "yellow"))
                async_color_print((json.dumps(data, ensure_ascii=False, separators=(',', ':')), None))
                cls.dataBus.put(result)
            elif message_type == "ASRResultTemp":
                tmp_asr = result.get("asr")
                async_color_print((tmp_asr, None))
                cls.dataBus.put(result)
            elif message_type == "ASRInputResult":
                voiAsr = result.get("voiAsr")
                async_color_print((f"[ VOI Asr ] channel:{channel_id}\ttext:{voiAsr}", "blue"))
                async_color_print((json.dumps(data, ensure_ascii=False, separators=(',', ':')), None))
                cls.dataBus.put(result)
            elif message_type == "ASRInputResultTemp":
                voiAsrTemp = result.get("voiAsrTemp")
                async_color_print((f"{voiAsrTemp}", "blue"))
                cls.dataBus.put(result)

    @classmethod
    def tsa_callback_handler(cls, status, data):
        """处理TSA助手,LCSEngine平台回调事件"""
        # 断言信号
        message_type = JsonUtil.parse(data, 'type')
        if message_type == "ASSERTED":
            _data = {"status": status, "resultType": message_type, "voiceRegionEnd": data.get("voiceRegionEnd"),
                     "recordTime": get_timestamp()}
            cls.dataBus.put(_data)
            return
        callback_data = json.dumps(data, ensure_ascii=False, separators=(',', ':'))
        cls.report_pool.append(callback_data)
        if message_type in cls.yaml_config:
            type_config = cls.yaml_config[message_type]
            result = {"status": status, "resultType": message_type, "recordTime": get_timestamp()}
            for key, value in type_config.items():
                data_value = JsonUtil.parse(data, value)
                result[key] = data_value
            channel_id = str(result.get("channelId"))
            cls.callback_result.append(result)
            if message_type == "NLPResult":
                source = result.get("source")
                text = result.get("text")
                skill = result.get("skill")
                intention = result.get("intention")
                dirCallbackType = result.get("dirCallbackType")
                dirCallbackEtId = result.get("dirCallbacketId")
                # VPA理解结果过滤
                if not text and skill == "VPA_ACTION" and intention == "VPA_ACTION":
                    return
                async_color_print((f"[ 理解结果 ] channel:{channel_id}\tsource:{source}\ttext:{text}\tskill:{skill}\tintention:{intention}\tcallbackType:{dirCallbackType}", "yellow"))
                async_color_print((callback_data, None))
                cls.callback_event_type = dirCallbackType
                cls.callback_event_id = dirCallbackEtId
                cls.dataBus.put(result)
                cls.nluEvent.set()
            elif message_type == "ASRResult":
                asr = result.get("asr")
                source = result.get("source")
                async_color_print((f"[ 识别结果 ] channel:{channel_id}\tsource:{source}\ttext:{asr}", "yellow"))
                async_color_print((callback_data, None))
                cls.dataBus.put(result)
            elif message_type == "ASRResultTemp":
                tmp_asr = result.get("asr")
                async_color_print((tmp_asr, None))
                cls.dataBus.put(result)
            elif message_type == "ASRInputResult":
                voiAsr = result.get("voiAsr")
                async_color_print((f"[ VOI Asr ] channel:{channel_id}\ttext:{voiAsr}", "blue"))
                async_color_print((callback_data, None))
                cls.dataBus.put(result)
            elif message_type == "ASRInputResultTemp":
                voiAsrTemp = result.get("voiAsrTemp")
                async_color_print((f"{voiAsrTemp}", "blue"))
                cls.dataBus.put(result)
            elif message_type == "HICARWakeup":
                source = result.get("source")
                channel_id = result.get("channelId")
                types = result.get("type")
                text = result.get("text")
                cls.dataBus.put(result)
                async_color_print((f"[ HiCar唤醒结果 ][{channel_id}]\tsource:{source}\ttype:{types}\ttext:{text}", "red"))
