import base64
import os
from datetime import datetime
import cv2
from cv2 import VideoCapture, imencode
from PIL import Image
from speech_recognition import Microphone, Recognizer, AudioData, UnknownValueError
import asyncio
import edge_tts
import pygame
from openai import OpenAI
import logging
import wave
import torchaudio
import torchaudio.compliance.kaldi as kaldi
from ais_bench.infer.interface import InferSession
import numpy as np
import IPython
# 设置日志级别为WARNING，INFO级别的日志将不会显示
logging.basicConfig(level=logging.WARNING)

SYSTEM_PROMPT = """
    你是一个有眼睛的助手，我会发送图片给你，让你看到周围的景象，将使用用户提供的聊天历史和图片来回答其问题。
    不要提到“图片”这个单词，直接描述图片的内容，不要使用emojis，不要问用户问题。
    保持友好的态度。展示一些个性。不要太正式。
    用中文回复
"""
class WeNetASR:
    def __init__(self, model_path, vocab_path):
        """初始化模型，加载词表"""
        self.vocabulary = load_vocab(vocab_path)
        self.model = InferSession(0, model_path)
        # 获取模型输入特征的最大长度
        self.max_len = self.model.get_inputs()[0].shape[1]

    def transcribe(self, wav_file):
        """执行模型推理，将录音文件转为文本。"""
        feats_pad, feats_lengths = self.preprocess(wav_file)
        output = self.model.infer([feats_pad, feats_lengths])
        txt = self.post_process(output)
        return txt

    def preprocess(self, wav_file):
        """数据预处理"""
        waveform, sample_rate = torchaudio.load(wav_file)
        # 音频重采样，采样率16000
        waveform, sample_rate = resample(waveform, sample_rate, resample_rate=16000)
        # 计算fbank特征
        feature = compute_fbank(waveform, sample_rate)
        feats_lengths = np.array([feature.shape[0]]).astype(np.int32)
        # 对输入特征进行padding，使符合模型输入尺寸
        feats_pad = pad_sequence(feature,
                                 batch_first=True,
                                 padding_value=0,
                                 max_len=self.max_len)
        feats_pad = feats_pad.numpy().astype(np.float32)
        return feats_pad, feats_lengths

    def post_process(self, output):
        """对模型推理结果进行后处理，根据贪心策略选择概率最大的token，去除重复字符和空白字符，得到最终文本。"""
        encoder_out_lens, probs_idx = output[1], output[4]
        token_idx_list = probs_idx[0, :, 0][:encoder_out_lens[0]]
        token_idx_list = remove_duplicates_and_blank(token_idx_list)
        text = ''.join(self.vocabulary[token_idx_list])
        return text


def remove_duplicates_and_blank(token_idx_list):
    """去除重复字符和空白字符"""
    res = []
    cur = 0
    BLANK_ID = 0
    while cur < len(token_idx_list):
        if token_idx_list[cur] != BLANK_ID:
            res.append(token_idx_list[cur])
        prev = cur
        while cur < len(token_idx_list) and token_idx_list[cur] == token_idx_list[prev]:
            cur += 1
    return res


def pad_sequence(seq_feature, batch_first=True, padding_value=0, max_len=966):
    """对输入特征进行padding，使符合模型输入尺寸"""
    feature_shape = seq_feature.shape
    feat_len = feature_shape[0]
    if feat_len > max_len:
        # 如果输入特征长度大于模型输入尺寸，则截断
        seq_feature = seq_feature[:max_len].unsqueeze(0)
        return seq_feature

    batch_size = 1
    trailing_dims = feature_shape[1:]
    if batch_first:
        out_dims = (batch_size, max_len) + trailing_dims
    else:
        out_dims = (max_len, batch_size) + trailing_dims

    out_tensor = seq_feature.data.new(*out_dims).fill_(padding_value)
    if batch_first:
        out_tensor[0, :feat_len, ...] = seq_feature
    else:
        out_tensor[:feat_len, 0, ...] = seq_feature
    return out_tensor


def resample(waveform, sample_rate, resample_rate=16000):
    """音频重采样"""
    waveform = torchaudio.transforms.Resample(
        orig_freq=sample_rate, new_freq=resample_rate)(waveform)
    return waveform, resample_rate


def compute_fbank(waveform,
                  sample_rate,
                  num_mel_bins=80,
                  frame_length=25,
                  frame_shift=10,
                  dither=0.0):
    """提取filter bank音频特征"""
    AMPLIFY_FACTOR = 1 << 15
    waveform = waveform * AMPLIFY_FACTOR
    mat = kaldi.fbank(waveform,
                      num_mel_bins=num_mel_bins,
                      frame_length=frame_length,
                      frame_shift=frame_shift,
                      dither=dither,
                      energy_floor=0.0,
                      sample_frequency=sample_rate)
    return mat


def load_vocab(txt_path):
    """加载词表"""
    vocabulary = []
    LEN_OF_VALID_FORMAT = 2
    with open(txt_path, 'r') as fin:
        for line in fin:
            arr = line.strip().split()
            # 词表格式：token id
            if len(arr) != LEN_OF_VALID_FORMAT:
                raise ValueError(f"Invalid line: {line}. Expect format: token id")
            vocabulary.append(arr[0])
    return np.array(vocabulary)
class WebcamStream:
    def __init__(self):
        self.stream = None
        self.frame = None
        self.running = False

    def start(self):
        if self.running:
            return self

        self.running = True
        self.stream = VideoCapture(find_camera_index())
        _, self.frame = self.stream.read()
        return self

    def read(self, encode=False):
        if self.stream is None:
            return None

        _, frame = self.stream.read()
        if encode:
            _, buffer = imencode(".jpeg", frame)
            return base64.b64encode(buffer).decode("utf-8")

        return frame

    def stop(self):
        if self.running:
            self.running = False
            if self.stream:
                self.stream.release()

class Assistant:
    def __init__(self):
        self.api_key = "sk-PsRQJ46NWQMpy8xPFeDd2eC642424a4485202fB956Ca862d1"
        self.api_base = "https://api.zetatechs.com/v1"
        self.client = OpenAI(api_key=self.api_key, base_url=self.api_base)
        self.active = False
        self.history = []

    def answer(self, prompt):
        trigger_keywords = ['小翼', "小易", "小艺", '小亦', '小椅', '小蚁', '小姨', '小义', '小一', '你好', "小藝", '小蟻', '小義','小叶']
        exit_keywords = ["谢谢" ,"退出","再见",'结束','結束','再見','謝謝']
        shutdown_keywords = ['关机', '關機',]
        if any(keyword in prompt for keyword in shutdown_keywords):
            asyncio.run(self._tts('系统即将关闭，再见！'))
            return 'shutdown'
        if not self.active and not any(keyword in prompt for keyword in trigger_keywords):
            return
        if prompt == '' or prompt == ' ' or "字幕by索兰娅" in prompt:
            return
        if any(keyword in prompt for keyword in trigger_keywords):
            self.active = True
            self.history.append({"role": "user", "content": prompt})
        if any(keyword in prompt for keyword in exit_keywords):
            asyncio.run(self._tts('再见，如果你还需要帮助，请叫我小艺小艺，我随时都在！'))
            self.active = False
            self.history = []  # 清空历史记录
            return
        print("Prompt:", prompt)

        base64_image = None
        if "看" in prompt:
            webcam_stream = WebcamStream().start()
            base64_image = self._save_image_locally(webcam_stream.read())
            webcam_stream.stop()

        response = self._get_response_from_openai(prompt, base64_image)
        print("Response:", response)

        if response:
            self.history.append({"role": "assistant", "content": response})
            asyncio.run(self._tts(response))

    @staticmethod
    async def _tts(response):
        VOICE = "zh-CN-XiaoxiaoNeural"
        OUTPUT_FILE = "temp.mp3"
        communicate = edge_tts.Communicate(response, VOICE)
        await communicate.save(OUTPUT_FILE)

        pygame.mixer.init()
        pygame.mixer.music.load(OUTPUT_FILE)
        pygame.mixer.music.play()

        while pygame.mixer.music.get_busy():
            await asyncio.sleep(0.1)

        pygame.mixer.music.stop()
        pygame.mixer.quit()

        await asyncio.sleep(0.1)
        os.remove(OUTPUT_FILE)

    @staticmethod
    def _save_image_locally(image):
        img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
        if not os.path.exists("./images"):
            os.makedirs("./images")
        file_name = f"./images/image_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png"
        img.save(file_name)
        with open(file_name, "rb") as image_file:
            return base64.b64encode(image_file.read()).decode("utf-8")

    def _get_response_from_openai(self, prompt, base64_image):
        messages = [{"role": "system", "content": SYSTEM_PROMPT}] + self.history + [{"role": "user", "content": prompt}]
        if base64_image:
            messages[-1]["content"] = [
                {"type": "text", "text": prompt},
                {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{base64_image}"}}
            ]
        try:
            response = self.client.chat.completions.create(
            model="gpt-4o",
            messages=messages
            )
            answer=response.choices[0].message.content
            return answer
        except:
                response = self.client.chat.completions.create(
                model="gpt-4o",
                messages=messages
                )
                if response.choices[0].message.content:
                    answer=response.choices[0].message.content
                else:
                    answer='对不起，请你再说一遍，我没有听清楚'
                return answer
                
        
def find_camera_index():
        max_index_to_check = 10  # Maximum index to check for camera

        for index in range(max_index_to_check):
            cap = cv2.VideoCapture(index)
            if cap.read()[0]:
                cap.release()
                return index

        # If no camera is found
        raise ValueError("No camera found.")
def audio_data_to_wav(audio_data, output_file_path):
    with wave.open(output_file_path, 'wb') as wav_file:
        wav_file.setnchannels(1)  # Mono audio
        wav_file.setsampwidth(audio_data.sample_width)
        wav_file.setframerate(audio_data.sample_rate)
        wav_file.setcomptype('NONE', 'not compressed')  # Uncompressed audio
        wav_file.writeframes(audio_data.frame_data)

def recognize_speech_from_mic(recognizer, microphone):
    if not isinstance(recognizer, Recognizer):
        raise TypeError("`recognizer` must be `Recognizer` instance")
    if not isinstance(microphone, Microphone):
        raise TypeError("`microphone` must be `Microphone` instance")

    with microphone as source:
        print("正在调整麦克风，请保持安静...")
        recognizer.adjust_for_ambient_noise(source)  # 调整噪声阈值
        print("请开始说话...")
        audio = recognizer.listen(source)  # 捕捉音频

    try:
        print("正在识别...")
        output_wav_path = 'output_audio.wav'
        audio_data_to_wav(audio, output_wav_path)
        # Assuming `model` is already defined elsewhere to handle transcription
        recognized_text =  model.transcribe(output_wav_path)
        print(f"您说的内容是: {recognized_text}")
        return recognized_text
    except UnknownValueError:
        print("无法理解你说的话。")
        return ""
model_path = "offline_encoder.om"
vocab_path = 'vocab.txt'

model = WeNetASR(model_path, vocab_path)
assistant = Assistant()
recognizer = Recognizer()
microphone = Microphone()

try:
    while True:
        recognized_text = recognize_speech_from_mic(recognizer, microphone)
        if recognized_text and assistant.answer(recognized_text) == 'shutdown':
            print("System shutdown.")
            break
except KeyboardInterrupt:
    pass
