# -*- coding: utf-8 -*-

"""
DOA
"""

import collections
import os
import sys
import threading
if sys.version_info[0] < 3:
    import Queue as queue
else:
    import queue

import numpy as np
from webrtcvad import Vad
from voice_engine.element import Element



MIC_DISTANCE_4 = 0.08127

eps = np.finfo(float).eps


class DOA(Element):
    def __init__(self, rate=16000, channels=4):
        super(DOA, self).__init__()

        self.rate = rate
        self.channels = channels
        self.mask = [0, 1, 2, 3]
        self.pair = [[0, 2], [1, 3]]

        self.frame_size = 160
        self.frame_bytes = self.frame_size * self.channels * 2

        self.vad = Vad(3)

        self.queue = queue.Queue()
        self.done = False

        self.collections = collections.deque(maxlen=50)

        # prepare hanning window for stft
        self.window = np.hanning(self.frame_size)
        # self.window = None

        # length of stft
        self.nfft = 1 << (self.frame_size * 2 - 1).bit_length()
        print('fft size: {}'.format(self.nfft))
        # self.nfft = 512

        self.margin_f = MIC_DISTANCE_4 * 16000 / 340.0

        self.interp = 2
        self.margin = int(self.margin_f * self.interp)

        self.cc_baseline = [0] * len(self.pair)

    def put(self, data):
        self.queue.put(data)

    def start(self):
        self.done = False
        thread = threading.Thread(target=self.run)
        thread.daemon = True
        thread.start()

    def stop(self):
        self.done = True

    def run(self):
        buffer = ''

        while not self.done:
            data = self.queue.get()
            buffer += data

            while len(buffer) >= self.frame_bytes:
                data = buffer[:self.frame_bytes]
                buffer = buffer[self.frame_bytes:]

                data = np.fromstring(data, dtype='int16')
                mono = data[0::self.channels].tostring()

                has_voice = self.vad.is_speech(mono, self.rate)

                # sys.stdout.write('1' if has_voice else '0')
                # sys.stdout.flush()

                offset, direction_index = self._process(data)

                self.collections.append([direction_index, offset, has_voice])

                super(DOA, self).put(mono)

    def set_callback(self, callback):
        if callable(callback):
            self.on_detected = callback
        else:
            ValueError('The callback parameter is not callable')

    def get_direction(self):
        counting = [0] * 12
        voice = 0
        for d in self.collections:
            if d[2]:
                voice += 1

                counting[d[0]] += 1

        direction_index = np.argmax(counting)
        self.direction = direction_index * 30

        # print counting[direction_index], voice

        # if voice >= self.collections.maxlen / 2 and counting[direction_index] >= self.collections.maxlen / 3:
        
        return self.direction

    def _process(self, data):
        X = [0] * self.channels
        for channel in self.mask:
            x = data[channel::self.channels]
            # add window
            if self.window is not None:
                x = x * self.window

            X[channel] = np.fft.rfft(x, self.nfft)

        offset = [0] * len(self.pair)
        theta = [0] * len(self.pair)

        for i, v in enumerate(self.pair):
            CC = X[v[1]] * np.conj(X[v[0]])
            # generalized
            CC /= np.abs(CC) + eps
            cc = np.fft.irfft(CC, n=self.nfft * self.interp)

            cc = np.concatenate((cc[-self.margin:], cc[:self.margin + 1]))

            cc = np.abs(cc)

            # cc = cc - self.cc_baseline[i]

            # find max cross correlation index
            offset_max = np.argmax(cc) - self.margin
            offset[i] = (offset_max) / float(self.interp)
            theta[i] =  np.arcsin(offset[i] / self.margin_f) * 180 / np.pi

            # update baseline
            # self.cc_baseline[i] = self.cc_baseline[i] + 0.01 * cc

        # if offset[0] == 0 and offset[1] == 0 and offset[2] == 0:
        #     print cc_array

        # min_index = np.argmin(np.abs(offset[:3]))
        # theta = np.arcsin(offset[min_index] / self.margin_f) * 180 / np.pi
        # if (min_index != 0 and offset[min_index - 1] < 0) or (min_index == 0 and offset[2] >= 0):
        #     best_guess = (360 - theta) % 360
        # else:
        #     best_guess = (180 + theta)

        # best_guess = (best_guess + 30 + min_index * 60) % 360

        if np.abs(theta[0]) < np.abs(theta[1]):
            if theta[1] > 0:
                best_guess = (theta[0] + 360) % 360
            else:
                best_guess = (180 - theta[0])
        else:
            if theta[0] < 0:
                best_guess = (theta[1] + 360) % 360
            else:
                best_guess = (180 - theta[1])

            best_guess = (best_guess + 90 + 180) % 360


        best_guess = (-best_guess + 300) % 360

        direction = int((best_guess + 15) // 30 % 12)

        return offset, direction


def main():
    import time
    from voice_engine.source import Source
    from voice_engine.kws import KWS

    import gpiozero
    from pixel_ring import pixel_ring 

    en = gpiozero.LED(5)
    en.on()

    pixel_ring.change_pattern('echo')

    src = Source(channels=4, rate=16000, frames_size=160)
    doa = DOA(rate=src.rate, channels=4)
    kws = KWS()

    def on_detected(keyword):
        print('detected {}'.format(keyword))
        direction = doa.get_direction()
        pixel_ring.wakeup(direction)
        print(direction)

    kws.set_callback(on_detected)

    src.pipeline(doa, kws)

    src.pipeline_start()

    while True:
        try:
            time.sleep(1)
        except KeyboardInterrupt:
            break

    src.pipeline_stop()


if __name__ == '__main__':
    main()
