# -*- coding:UTF-8 -*-

from __future__ import print_function
from speech.ibm.speech_recognition import IBMSpeechRecognizer
from translate.youdao.translate_recognition import YouDaoTranslateRecognizer
import threading
import librosa
from vad.speech_segmentation import compute_bic, vad
import speech_recognition


class TextData:
    def __init__(self, start_time, end_time, en_text=None, zh_text=None):
        self.start_time = start_time
        self.end_time = end_time
        self.en_text = en_text
        self.zh_text = zh_text


class WaveAudio(threading.Thread):
    """
        音频线程类
    """
    def __init__(self, times_queue, sr, frame_size, frame_shift):
        super(WaveAudio, self).__init__()
        self.times_queue = times_queue
        self.sr = sr
        self.frame_size = frame_size
        self.frame_shift = frame_shift
        self.file_path = None

    def set_file_path(self, file_path):
        self.file_path = file_path

    def run(self):
        if not self.file_path:
            pass

        # -> 时间级数和
        y, sr = librosa.load(self.file_path, sr=self.sr)

        # -> mfcc数组
        mfccs = librosa.feature.mfcc(y, sr, n_mfcc=12, hop_length=self.frame_shift, n_fft=self.frame_size)
        mfccs = mfccs / mfccs.max()

        w_start = 0
        w_end = 200
        w_grow = 200
        delta = 25

        m, n = mfccs.shape

        # 分段计算BIC
        temp_point = 0

        while w_end < n:
            feature_seg = mfccs[:, w_start:w_end]
            det_bic = compute_bic(feature_seg, delta)  # 计算BIC

            if det_bic > 0:
                temp = w_start + det_bic

                start_time, end_time = self.time_quantum(y, temp_point, temp)
                if start_time and end_time:
                    self.times_queue.put(TextData(start_time, end_time))
                temp_point = temp

                w_start = w_start + det_bic + 200
                w_end = w_start + w_grow
            else:
                w_end = w_end + w_grow

        if w_start < n <= w_end and temp_point < n:
            start_time, end_time = self.time_quantum(y, temp_point, n)
            if start_time and end_time:
                self.times_queue.put(TextData(start_time, end_time))

    def time_quantum(self, y, start_point, end_point):
        """
        获取需要处理的语音时间段
            :param y: 时间序列
            :param start_point: mfcc数组开始标记
            :param end_point: mfcc数组结束标记
            :param sr: 目标取样率
            :param frame_size:
            :param frame_shift: mfcc数组到时间序列的映射值
            :return:
        """
        start_point = start_point * self.frame_shift
        end_point = end_point * self.frame_shift
        temp_seg = y[start_point:end_point]

        start_time = end_time = None

        x1, x2 = vad.vad(temp_seg, sr=self.sr, frame_size=self.frame_size, frame_shift=self.frame_shift)
        if len(x1) != 0 and len(x2) != 0:
            start_time = start_point / float(self.sr)
            end_time = end_point / float(self.sr)

        return start_time, end_time


class Recognizer(threading.Thread):
    """
        识别线程类
    """
    def __init__(self, times_queue, recognitions_queue):
        super(Recognizer, self).__init__()
        self.times_queue = times_queue
        self.recognitions_queue = recognitions_queue
        self.file_path = None

        self.__sr = IBMSpeechRecognizer()
        self.__r = speech_recognition.Recognizer()
        self.__audio = None

        self.__status = threading.Event()
        self.__status.set()

    def run(self):
        if not self.__audio:
            pass

        while self.__status.isSet():
            data = self.times_queue.get()
            segment_audio = self.__audio.get_segment(data.start_time*1000, data.end_time*1000)
            username, password = self.__sr.username, self.__sr.password

            # 语音识别
            en_text = self.__r.recognize_ibm(segment_audio, username, password, 'en-US')
            if en_text:
                data.en_text = en_text.replace('\n', ',')
                self.recognitions_queue.put(data)

    def set_file_path(self, file_path):
        self.file_path = file_path
        with speech_recognition.AudioFile(file_path) as source:
            self.__audio = self.__r.record(source)

    def stop(self):
        self.__status.clear()


class Translation(threading.Thread):
    """
        翻译线程类
    """
    def __init__(self, recognitions_queue, results_queue):
        super(Translation, self).__init__()
        self.recognitions_queue = recognitions_queue
        self.results_queue = results_queue

        self.__tr = YouDaoTranslateRecognizer()

        self.__status = threading.Event()
        self.__status.set()

    def run(self):
        while self.__status.isSet():
            data = self.recognitions_queue.get()
            # 语音翻译
            data.zh_text = self.__tr.translate(data.en_text)
            self.results_queue.put(data)

    def stop(self):
        self.__status.clear()
