# -*- coding: utf-8 -*-
import logging
import os
import pickle
import timeit
import warnings
import pyaudio
import numpy as np
import requests
import time
from typing import List
import wave

from main.feature_engineer import FeatureEngineer
from main.majority_voter import MajorityVoter
from main.baby_cry_predictor import BabyCryPredictor

# 录音参数
CHUNK = 1024
FORMAT = pyaudio.paFloat32
CHANNELS = 1
RATE = 44100
THRESHOLD = 10 #去抖动

class StreamPrediction:
    def __init__(self):
        # 上次哭声状态
        self.last_cry_state = -1
        # 
        self.sleep = 5
        # 上次哭声开始时间
        self.last_cry_start = int(round(time.time()))

        # Arguments
        self.load_path_model = os.path.normpath("./output/model")
        self.save_path = os.path.normpath("./output/prediction")
        self.log_path = os.path.normpath("./logger")
        
        with open((os.path.join(self.load_path_model, 'model.pkl')), 'rb') as fp:
            model = pickle.load(fp)
                
        # Set up logging
        logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
                            datefmt='%Y-%m-%d %I:%M:%S %p',
                            filename=os.path.join(self.log_path, 'logs_prediction_test_test_model.log'),
                            filemode='w',
                            level=logging.INFO)
        
        # 初始化 pyaudio
        self.frames = []
        self.p = pyaudio.PyAudio()
    
    # 片段语音流
    def start_segment(self, second = 5, sleep = 5):
        print('* 开始录音')
        stream = self.p.open(format=FORMAT,
                        channels=CHANNELS,
                        rate=RATE,
                        input=True,
                        frames_per_buffer=CHUNK)
        try:
            while True:
                time.sleep(sleep)
                frames = []
                stream = self.p.open(format=FORMAT,
                        channels=CHANNELS,
                        rate=RATE,
                        input=True,
                        frames_per_buffer=CHUNK)
                
                for i in range(0, int(RATE / CHUNK * second)):
                    data = stream.read(CHUNK)
                    frames.append(data)
                    
                stream.stop_stream()
                stream.close()
                print("* 录制结束，开始识别")
                # 将录音数据转换为 numpy 数组
                audio_data = np.frombuffer(b''.join(frames), dtype=np.float32)
                # 检查并过滤非有限值
                if not np.isfinite(audio_data).all():
                    audio_data = np.nan_to_num(audio_data)
                # 使用 librosa 处理音频数据
                try:
                    # 转换为浮点数类型
                    # audio_data = audio_data.astype(np.float32) / 32767.0
                    # 开始预测结果
                    self.make_prediction(audio_data=audio_data)
                except Exception as e:
                    print(f"Error: {e}")
                    
        except KeyboardInterrupt:
            print("* 停止录音")
        finally:
            # 停止并关闭音频流
            stream.stop_stream()
            stream.close()
            # 终止 PyAudio
            self.p.terminate()
    
    # 实时语音流
    def start_realtime(self):
        logging.info('* 开始录音')
        stream = self.p.open(format=FORMAT,
                        channels=CHANNELS,
                        rate=RATE,
                        input=True,
                        frames_per_buffer=CHUNK)
        try:
            while True:
                # 读取音频数据
                data = stream.read(CHUNK)
                # 将录音数据转换为 numpy 数组
                audio_data = np.frombuffer(data, dtype=np.float32)
                # 检查并过滤非有限值
                if not np.isfinite(audio_data).all():
                    audio_data = np.nan_to_num(audio_data)
                # 使用 librosa 处理音频数据
                try:
                    # 转换为浮点数类型
                    # audio_data = audio_data.astype(np.float32) / 32767.0
                     # 开始预测结果
                    self.make_prediction(audio_data=audio_data)
                except Exception as e:
                    print(f"Error: {e}")
        except KeyboardInterrupt:
            print("* 停止录音")
        finally:
            # 停止并关闭音频流
            stream.stop_stream()
            stream.close()
            # 终止 PyAudio
            self.p.terminate()
    
    # 预测算法
    def make_prediction(self, audio_data):
        # 特征引擎
        logging.info('Starting feature engineering')
        start = timeit.default_timer()

        # 提取特征
        engineer = FeatureEngineer()
        play_list_processed = list()
        tmp = engineer.feature_engineer(audio_data)
        play_list_processed.append(tmp)

        stop = timeit.default_timer()
        logging.info('Time taken for feature engineering: {0}'.format(stop - start))

        # MAKE PREDICTION
        logging.info('Predicting...')
        start = timeit.default_timer()

        with warnings.catch_warnings():
            warnings.simplefilter("ignore", category=UserWarning)
            with open((os.path.join(self.load_path_model, 'model.pkl')), 'rb') as fp:
                model = pickle.load(fp)

        predictor = BabyCryPredictor(model)
        predictions = list()

        # 一次检测中哭闹所占比例
        for signal, _ in play_list_processed:
            tmp = predictor.classify(signal)
            predictions.append(tmp)
        self.getResult(p=predictions, audio_data=audio_data)
        
    # 生成结果     
    def getResult(self, p, audio_data):
        # 判断是否哭声
        majority_voter = MajorityVoter(p)
        majority_vote, lable = majority_voter.vote()
        
        # 记录检测到的哭声
        if majority_vote == 1:
            self.save_audio_to_file(audio_data=audio_data)
        
        # 如果当前结果和上次不一致则状态发生改变且相隔时间大于10秒，记录
        if self.last_cry_state != majority_vote:
            # 当前时间
            curr_cry_date = int(round(time.time()))
            # 时长
            duration = curr_cry_date - self.last_cry_start
            cry_date = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(curr_cry_date))
            hour = duration // 3600
            min = (duration % 3600) // 60
            sec = duration % 60
            duration_str = f"{hour}时{min}分{sec}秒"
        
            # 预测结果
            pre_result = ""
            if majority_vote == 1:
                descr = "好宝👍👍👍" if duration > 60 * 30 else "😒😒😒"
                pre_result = pre_result + f"{cry_date} 开始哭闹({lable})😭，安静时长:{duration_str} {descr}\n"
                if duration > THRESHOLD:
                    requests.get(url=f"https://api.day.app/oSiy4Mtp7WGTydYEvL7UtN/{pre_result}")
            else:
                descr = "😒😒😒" if duration > 60 * 30 else "好宝👍👍👍"
                pre_result = pre_result + f"{cry_date} 停止哭闹😊，哭闹时长:{duration_str}\n"
                if duration > THRESHOLD:
                    requests.get(url=f"https://api.day.app/oSiy4Mtp7WGTydYEvL7UtN/{pre_result}")
                
            print(pre_result)
            
            # 写入结果prediction.txt
            with open(os.path.join(self.save_path, 'prediction.txt'), 'a', encoding='utf-8') as text_file:
                text_file.writelines(pre_result)
            
            # 更新状态改变时间点
            self.last_cry_start = int(round(time.time()))
        
        # 更新预测状态
        self.last_cry_state = majority_vote
    
    # 保存声音碎片
    def save_audio_to_file(self, audio_data: List[bytes]) -> str:
        cry_date = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
        file_name = f"cry_{cry_date}.wav"
        file_path = os.path.join("./output/audios",file_name)
        
        with wave.open(file_path, "wb") as wf:
            wf.setnchannels(1)
            wf.setsampwidth(2) 
            wf.setframerate(16000)
            wf.writeframes(b"".join(audio_data))
        return file_path

if __name__ == '__main__':
    p = StreamPrediction()
    p.start_segment()
    # p.start_realtime()