import paho.mqtt.client as mqtt
import json
import json
import pyaudio
import numpy as np
import librosa
import time
import wave
import array
from adpcm  import encode_file, decode_file
import numpy as np
#from pydub import AudioSegment
import threading
from xf_websocket import SpeechRecognition
from xf_tts_websocket import TextToSpeech

from xf_websocket import EventAdder

from xf_websocket import STATUS_FIRST_FRAME
from xf_websocket import STATUS_CONTINUE_FRAME
from xf_websocket import STATUS_LAST_FRAME


from sparkdesk_api.core import SparkAPI


# 默认api接口版本为3.1，配置其他版本需要指定Version参数（2.1或者1.1）
sparkAPI = SparkAPI(
    app_id="21fa1ed5",
    api_secret="YzFlNmFiMjJkYTIwMTFhNWZkYjg4NDlm",
    api_key="b23ea50fd4cbaf35a9a502c47cb8cd4c",
    # version=2.1
)

# 定义全局标志
publish_flag = False

# 定义全局标志
voice_flag = 0

MQTT_URL =  "112.74.97.118"
MQTT_USER_NAME = "gateway_mqtt"
MQTT_USER_PASSWD = "gateway_mqtt"

SUBSCRIBE_SET_TOPIC = "GATEWAY/SetData"
SUBSCRIBE_RSP_TOPIC = "GATEWAY/UpLoadData"

PAIRED_TYPE     = "PAIRED_TYPE"
UNPAIRED_TYPE   = "UNPAIRED_TYPE"
AUDIO_TYPE = "AUDIO_TYPE"

threshold=10000

TEMP = b''

_frame_len = 0
output_file = None  #音频文件



# 配置参数
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 8000
CHUNK = 256

# 初始化音频流
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
                channels=CHANNELS,
                rate=RATE,
                output=True,
                frames_per_buffer=CHUNK)  

# 测试代码
app_id = "21fa1ed5"
api_key = "b23ea50fd4cbaf35a9a502c47cb8cd4c"
api_secret = "YzFlNmFiMjJkYTIwMTFhNWZkYjg4NDlm"
audio_file = "iat_pcm_8k.pcm"

frame_len = 8000
frame_rate = 8000
frame_intervel = 0.04

##speech_obj = SpeechRecognition(app_id, api_key, api_secret,frame_len,frame_rate,frame_intervel)
# 创建EventAdder实例，并传递SpeechRecognition实例
##event_adder = EventAdder(speech_obj)








def pcm_to_adpcm(input_file, output_file):
    # 使用pydub加载PCM文件
    sound = AudioSegment.from_file(input_file, format="pcm", frame_rate=16000, channels=1, sample_width=2)

    # 将PCM文件转换为ADPCM文件
    sound.export(output_file, format="adpcm-ms", codec="adpcm_ms")











def detect_voice(audio_data, frame_size=256, threshold_energy=5000, threshold_zcr=50):
    audio_array = np.frombuffer(audio_data, dtype=np.int16)
    
    energy = np.sum(np.square(audio_array[:len(audio_array) - len(audio_array) % frame_size].reshape(-1, frame_size)), axis=1)
    
    diff = np.diff(np.sign(audio_array))
    zero_crossings = len(np.where(diff)[0])
    
    if energy.max() > threshold_energy or zero_crossings > threshold_zcr:
        return "Voice Detected %d",energy.max(),zero_crossings
    else:
        return "Silence Detected %d",energy.max(),zero_crossings

# 读取二进制文件并转换为十六进制字符串
def read_binary_file(file_path, chunk_size=64):
    with open(file_path, 'rb') as f:
        while True:
            chunk = f.read(chunk_size)
            if not chunk:
                break
            yield chunk.hex()

            
    
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, reason_code, properties):
    print(f"Connected with result code {reason_code}")
    # Subscribing in on_connect() means that if we lose the connection and
    # reconnect then subscriptions will be renewed.
    #client.subscribe(SUBSCRIBE_SET_TOPIC)
    client.subscribe(SUBSCRIBE_RSP_TOPIC)

# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
    #print(msg.topic+" "+str(msg.payload))
##    pcm = np.frombuffer(msg.payload, dtype=np.int16)
##    pcm_buffer = pcm.tobytes()
##    stream.write(pcm_buffer)
##    return
    #json_msg = None
    json_msg = json.loads(msg.payload.decode()) #字符串转json格式
    if(msg.topic == SUBSCRIBE_RSP_TOPIC):
         #print(msg.topic+" "+str(msg.payload.decode()))
         if(json_msg["type"]==PAIRED_TYPE):
             print("paired\r\n")
         elif(json_msg["type"]==UNPAIRED_TYPE):
             print("unpaired\r\n")
         elif(json_msg["type"]==AUDIO_TYPE):
             global _frame_len,output_file
             #print("AUDIO_TYPE\r\n")
##             pcm_array = json_msg["data"]["audio_data"]#= np.frombuffer(json_msg["data"]["audio_data"].encode(),dtype=np.int16).tobytes()
##            # 确保音频数据长度为偶数
##            # 确保音频数据长度是 int16 大小的倍数
##             remainder = len(pcm_array) % 2  # 因为每个十六进制对对应两个字节，int16 大小为两个字节，因此是 4 的倍数
##             if remainder != 0:
##                 pcm_array +="0" * (2 - remainder)  # 在末尾添加足够的零，使长度是 4 的倍数
##             print(pcm_array)
####                 # 将音频数据字符串转换为字节
##             audio_data_bytes = bytes.fromhex(pcm_array)
##             print(audio_data_bytes)
##             #print(audio_data_bytes.tobytes())
##             pcm = np.frombuffer(audio_data_bytes, dtype=np.int16)
##             pcm_buffer = pcm.tobytes()
##             print(pcm)
##             print(pcm_buffer)
##             stream.write(pcm)
             if(json_msg["data"]["audio_frame"]==0):
                 #print("0")
                 output_file = open('audio_upload.speex', 'wb')  # 打开文件进行写入
                 pcm_array = json_msg["data"]["audio_data"]#= np.frombuffer(json_msg["data"]["audio_data"].encode(),dtype=np.int16).tobytes()
                ##            # 确保音频数据长度为偶数
                # 确保音频数据长度是 int16 大小的倍数
                 remainder = len(pcm_array) % 4  # 因为每个十六进制对对应两个字节，int16 大小为两个字节，因此是 4 的倍数
                 if remainder != 0:
                     pcm_array +="0" * (4 - remainder)  # 在末尾添加足够的零，使长度是 4 的倍数
                 #print(pcm_array)
                 # 将音频数据字符串转换为字节
                 audio_data_bytes = bytes.fromhex(pcm_array)
                 # 使用 np.frombuffer() 将字节转换为 NumPy 数组
                 pcm_array = np.frombuffer(audio_data_bytes, dtype=np.int16)
                 output_file.write(audio_data_bytes)
                 #output_file.write(bytes.fromhex(json_msg["data"]["audio_data"]))
        #event_adder.add_event(STATUS_FIRST_FRAME, pcm_array.tobytes())
        #speech_obj.add_event_to_queue(STATUS_FIRST_FRAME,pcm_array.tobytes())
        #print("Queue size after adding event:", speech_obj.event_queue.qsize())  # 检查队列大小
        #speech_obj.send_audio_event(STATUS_FIRST_FRAME,pcm_array.tobytes())
             elif(json_msg["data"]["audio_frame"]==1):
                 pcm_array = json_msg["data"]["audio_data"]#np.frombuffer(json_msg["data"]["audio_data"].encode(),dtype=np.int16).tobytes()
                    ##            # 确保音频数据长度为偶数
                # 确保音频数据长度是 int16 大小的倍数
                 remainder = len(pcm_array) % 4  # 因为每个十六进制对对应两个字节，int16 大小为两个字节，因此是 4 的倍数
                 if remainder != 0:
                     pcm_array +="0" * (4 - remainder)  # 在末尾添加足够的零，使长度是 4 的倍数
                 #print(pcm_array)
                 # 将音频数据字符串转换为字节
                 audio_data_bytes = bytes.fromhex(pcm_array)
                 #stream.write(audio_data_bytes)
                 pcm_array = np.frombuffer(audio_data_bytes, dtype=np.int16)
                 if output_file is not None:  # 检查文件对象是否为 None
                     output_file.write(audio_data_bytes)
                 #output_file.write(bytes.fromhex(json               #print("1")
                 #output_file.write(bytes.fromhex(json_msg["data"]["audio_data"]))
             elif(json_msg["data"]["audio_frame"]==2):
                 #print("2")
                 pcm_array = json_msg["data"]["audio_data"]#np.frombuffer(json_msg["data"]["audio_data"].encode(),dtype=np.int16).tobytes()
##            # 确保音频数据长度为偶数
                # 确保音频数据长度是 int16 大小的倍数
                 remainder = len(pcm_array) % 4  # 因为每个十六进制对对应两个字节，int16 大小为两个字节，因此是 4 的倍数
                 if remainder != 0:
                     pcm_array +="0" * (4 - remainder)  # 在末尾添加足够的零，使长度是 4 的倍数
                 #print(pcm_array)
                 # 将音频数据字符串转换为字节
                 audio_data_bytes = bytes.fromhex(pcm_array)
                 #print(audio_data_bytes)
                 pcm_array = np.frombuffer(audio_data_bytes, dtype=np.int16)
                 if output_file is not None:  # 检查文件对象是否为 None
                    output_file.write(audio_data_bytes)
                 #output_file.write(bytes.fromhex(json               #output_file.write(bytes.fromhex(json_msg["data"]["audio_data"]))
                 if output_file is not None:  # 检查文件对象是否为 None
                    output_file.close()  # 关闭文件
                 output_file = None
                 
##                 with open("audio.pcm", "rb") as fp:
##                     
##                    while True:
##                        buf = fp.read(8000)
##                        stream.write(buf)
##                        # 文件结束
##                        if not buf:
##                            break;
                     
                 speech_obj = SpeechRecognition(app_id, api_key, api_secret,frame_len,frame_rate,frame_intervel)
                 result = speech_obj.detect_voice("audio_upload.speex")
                 print("Speech recognition result:", result)
                 AI_result = sparkAPI.chat(result)
                 print(AI_result)
                 output_file = "audio.pcm"
                 tts = TextToSpeech(app_id, api_key, api_secret, AI_result, output_file)
                 tts.tts()
                 # 定义全局标志
                 global publish_flag
                 global voice_flag
                 voice_flag = 0
                 publish_flag = True
##                 global mqttc
##                 while True:
##                     mqttc.publish(SUBSCRIBE_SET_TOPIC,"message")
##                     time.sleep(0.5)
##                     print("hello")
                 # 编码输入文件并将结果写入输出文件
                 #encode_file("audio.pcm", "output.adpcm")
                 # 解码输入文件并将结果写入输出文件
                 #decode_file("output.adpcm", "audio.pcm")
                 
            
    # elif(msg.topic == SUBSCRIBE_GET_TOPIC):
    #     #print(msg.topic+" "+str(msg.payload.decode())
    #     pass
    # else:
    #     pass
    # 将PCM数据转换为numpy数组
##    pcm_array = np.frombuffer(msg.payload, dtype=np.int16)
##    global _frame_len,output_file
##    if _frame_len == 0:
##        print("#########################")
##        output_file = open('audio.pcm', 'wb')  # 打开文件进行写入
##        output_file.write(pcm_array.tobytes())
##        #event_adder.add_event(STATUS_FIRST_FRAME, pcm_array.tobytes())
##        #speech_obj.add_event_to_queue(STATUS_FIRST_FRAME,pcm_array.tobytes())
##        #print("Queue size after adding event:", speech_obj.event_queue.qsize())  # 检查队列大小
##        #speech_obj.send_audio_event(STATUS_FIRST_FRAME,pcm_array.tobytes())
##    elif _frame_len <100:
##        output_file.write(pcm_array.tobytes())  # 追加写入
##        #event_adder.add_event(STATUS_CONTINUE_FRAME, pcm_array.tobytes())
##        #speech_obj.add_event_to_queue(STATUS_CONTINUE_FRAME,pcm_array.tobytes())
##        #print("Queue size after adding event:", speech_obj.event_queue.qsize())  # 检查队列大小
##        #speech_obj.send_audio_event(STATUS_CONTINUE_FRAME,pcm_array.tobytes())
##    elif _frame_len == 100:
##        if output_file is not None:  # 检查文件对象是否为 None
##            output_file.close()  # 关闭文件
##            output_file = None
##        speech_obj = SpeechRecognition(app_id, api_key, api_secret,frame_len,frame_rate,frame_intervel)
##        result = speech_obj.detect_voice("audio.pcm")
##        print("Speech recognition result:", result)
##        
##        #_frame_len+=1
##        #event_adder.add_event(STATUS_LAST_FRAME, pcm_array.tobytes())
##        #speech_obj.add_event_to_queue(STATUS_LAST_FRAME,pcm_array.tobytes())
##        #speech_obj.send_audio_event(STATUS_LAST_FRAME,pcm_array.tobytes())
##        #_frame_len =0
##    elif _frame_len == 200:
##        _frame_len = 0
##        return
##
##    _frame_len+=1

    
##    global TEMP  # 声明TEMP为全局变量
##    TEMP += msg.payload
##    if(len(TEMP)>1024):
##    # 调用函数检测音频
##        result = detect_voice(TEMP)
##        print(result)
##        TEMP = b''
    # 计算音频信号的能量
    #energy = np.sum(np.square(pcm_array))
    # 如果能量超过阈值，则判断为有声音；否则为静音
    #if energy > threshold:
    #    print("Voice Detected %d",energy)
    #else:
    #    print("Silence Detected")
    # 使用 librosa 库加载音频数据并计算其声谱图
    #spectrogram = np.abs(librosa.stft(pcm_array))
    # 计算声谱图的能量
    #energy = librosa.feature.rms(S=spectrogram)

    # 检测是否有人在说话
    #if np.max(energy) > threshold:  # 设置适当的能量阈值
    #    print("Speech detected!")
    #else:
    #    print("No speech detected.")
    # 播放音频流
    #stream.write(pcm_array.tobytes())

# 发布消息的函数
def publish_message():
    # 创建 MQTT 客户端
    client = mqtt.Client(mqtt.CallbackAPIVersion.VERSION2)
    client.username_pw_set(MQTT_USER_NAME, MQTT_USER_PASSWD)
    client.connect(MQTT_URL, 1883, 60)
    global publish_flag
    global voice_flag
    # 循环发布消息
    while True:
        if publish_flag:
            
            with open("audio.pcm", 'rb') as f:
                while True: 
                    chunk = f.read(64)
                    if not chunk:
                        publish_flag =False
                        data = {
                        "type": "AUDIO_TYPE",
                        "data": {
                            "audio_frame": 2,
                            "audio_data": "00"
                            }
                        }
                        mqttc.publish(SUBSCRIBE_SET_TOPIC, json.dumps(data))
                        break
                    
                    hex_data = chunk.hex()
                    if  voice_flag == 0:
                        data = {
                        "type": "AUDIO_TYPE",
                        "data": {
                            "audio_frame": 0,
                            "audio_data": hex_data
                            }
                        }
                        voice_flag =1
                    elif voice_flag == 1:
                        data = {
                        "type": "AUDIO_TYPE",
                        "data": {
                            "audio_frame": 1,
                            "audio_data": hex_data
                            }
                        }   
                    mqttc.publish(SUBSCRIBE_SET_TOPIC, json.dumps(data))
                    time.sleep(0.03)
            
##            for hex_data in read_binary_file(file_path):
##                publish_message(mqttc, hex_data)
            
            #client.publish("topic", "Hello from another thread!")
            #publish_flag = False

# 创建消息发布线程
publish_thread = threading.Thread(target=publish_message)
publish_thread.start()   


          
        

mqttc = mqtt.Client(mqtt.CallbackAPIVersion.VERSION2)

mqttc.username_pw_set(MQTT_USER_NAME,MQTT_USER_PASSWD)
mqttc.on_connect = on_connect
mqttc.on_message = on_message

mqttc.connect(MQTT_URL, 1883, 60)

# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
mqttc.loop_forever()
# 停止音频流
stream.stop_stream()
stream.close()

# 关闭PyAudio对象
p.terminate()
