import pyaudio
import torchaudio
import torch
import numpy as np
import webrtcvad
import multiprocessing
import threading
from queue import SimpleQueue
import logging
from transformers import Wav2Vec2ForSequenceClassification, Wav2Vec2FeatureExtractor
from worker import Worker
from config import *


logger = logging.getLogger(__name__)


class SpeechCommandDetector(Worker):
    _NUM_PROCESSORS = 3
    _RECOGNISED_COMMANDS = [
        'up',
        'down',
        'left',
        'right',
        'yes',
        'no'
    ]

    def __init__(self, buffer_commands: multiprocessing.SimpleQueue) -> None:
        self._buffer_commands = buffer_commands
        self._buffer_audio = SimpleQueue()
        self._buffer_speech = SimpleQueue()
        self._model = Wav2Vec2ForSequenceClassification.from_pretrained(
            'superb/wav2vec2-base-superb-ks')
        self._feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
            'superb/wav2vec2-base-superb-ks')
        self._audio_capturing_thread = threading.Thread(
            target=self._audio_capturing_pipeline)
        self._speech_detection_thread = threading.Thread(
            target=self._speech_detection_pipeline)
        self._speech_processing_threads = [
            threading.Thread(target=self._speech_processing_pipeline)
            for _ in range(self._NUM_PROCESSORS)
        ]

    def _audio_capturing_pipeline(self) -> None:
        p = pyaudio.PyAudio()
        chunk = SAMPLE_RATE * FRAME_SIZE // 1000
        stream = p.open(format=pyaudio.paInt16,
                        channels=1,
                        rate=SAMPLE_RATE,
                        input=True,
                        frames_per_buffer=chunk)
        while True:
            data = stream.read(chunk)
            # print(f'Captured audio: {data}')
            self._buffer_audio.put(data)

    def _speech_detection_pipeline(self) -> None:
        vad = webrtcvad.Vad(3)
        while True:
            data = self._buffer_audio.get()
            is_speech = vad.is_speech(data, SAMPLE_RATE)
            # print(f'{data} is speech: {is_speech}')
            if is_speech:
                # Continue reading until silence
                speech = b''
                while is_speech:
                    speech += data
                    # print(f'Accumulating speech: {len(speech)} bytes.')
                    data = self._buffer_audio.get()
                    is_speech = vad.is_speech(data, SAMPLE_RATE)
                logger.info(f'Captured speech: {len(speech)} bytes.')
                self._buffer_speech.put(speech)

    def _speech_processing_pipeline(self) -> None:
        while True:
            speech = self._buffer_speech.get()
            speech = np.frombuffer(speech, dtype=np.int16)
            # The array is not writable, so make a copy
            speech = speech.copy()
            speech = torch.from_numpy(speech)
            # The tensor must be 2D, so add a dimension
            speech = speech.unsqueeze(0)
            speech, _ = torchaudio.sox_effects.apply_effects_tensor(
                speech, SAMPLE_RATE, SOX_EFFECT)
            inputs = self._feature_extractor(speech.squeeze(0),
                                             sampling_rate=SAMPLE_RATE,
                                             padding=True,
                                             return_tensors='pt')
            logits = self._model(**inputs).logits
            predicted_ids = torch.argmax(logits, dim=-1)
            labels = [self._model.config.id2label[_id]
                      for _id in predicted_ids.tolist()]
            print(labels)
            logger.info(f'Predicted labels: {labels}')
            for label in labels:
                if label in self._RECOGNISED_COMMANDS:
                    logger.info(f'Detected voice command: {label}')
                    self._buffer_commands.put(f'voice {label}')
                    break

    def run_forever(self) -> None:
        self._audio_capturing_thread.start()
        self._speech_detection_thread.start()
        for thread in self._speech_processing_threads:
            thread.start()
        self._audio_capturing_thread.join()
        self._speech_detection_thread.join()
        for thread in self._speech_processing_threads:
            thread.join()


if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO)
    buffer_commands = multiprocessing.SimpleQueue()
    detector = SpeechCommandDetector(buffer_commands)
    detector.run_forever()
