import dashscope
import json
import base64

from zhipuai import ZhipuAI
from http import HTTPStatus
from dashscope import Application
from dashscope.audio.tts_v2 import *
from dashscope.audio.asr import (Recognition, RecognitionCallback,
                                 RecognitionResult)


class Callback(RecognitionCallback):
    def on_complete(self) -> None:
        print("done")
        # 识别完成

    def on_error(self, result: RecognitionResult) -> None:
        print(result)
        # 错误处理

    def on_event(self, result: RecognitionResult) -> None:
        print(result)
        # 处理识别结果


class MultiAgent(object):
   
    def __init__(self,):
        self.api_key = 'sk-a406b36a8e0041c0bee92d1f17355332'
        dashscope.api_key = self.api_key
        
        callback = Callback()
        self.asr = Recognition(
                    model='paraformer-realtime-v2',
                    format='pcm',
                    sample_rate=44100,
                    callback=callback)
    
        self.vl = ZhipuAI(api_key="47416fa79fbbc3a3adcba19505281bdd.9ECLp1uNc26UUxYS")
    
    def audio2text(self, audio_file):
        # 语音转文本
        text = self.asr.call(audio_file).output['sentence'][0]['text']
        
        print(f"[audio2text]text={text}")
        return text 
        
    def text2action(self, text):
        # 文本转指令
        response = Application.call(
                                app_id='99e310ee5b7549e3903566ca2124848f',
                                prompt=text,
                                stream=False,
                                api_key=self.api_key,)
        action = response.output.text
        
        print(f"[text2action]action={action}")
        return action
    
    def text2audio(self, text, audio_file):
        # 文本转语音
        #print(f"[text2audio]text={text}")
        tts = SpeechSynthesizer(model="cosyvoice-v1", voice="longxiaochun")
        audio_out = tts.call(text)

        with open(audio_file, 'wb') as f:
            f.write(audio_out)
        print(f"text2audio: saved={audio_file}")
    
    def vision(self, image_file, question):
        with open(image_file, 'rb') as img_file:
            img_base = base64.b64encode(img_file.read()).decode('utf-8')
            
        response = self.vl.chat.completions.create(
            model="glm-4v-plus",  # 填写需要调用的模型名称
            messages=[
              {
                "role": "user",
                "content": [
                  {
                    "type": "image_url",
                    "image_url": {
                        "url": img_base,
                    }
                  },
                  {
                    "type": "text",
                    "text": f"{question}, 使用一句话描述。",
                  }
                ]
              }
            ]
        )
        content = response.choices[0].message.content
        
        print(f"[vision]content={content}")
        return content
        
    
    def infer(self, audio_file, image_file):
        print(f"[infer]audio_file={audio_file}; image_file={image_file}")
        
        # 语音转文本
        text = self.audio2text(audio_file)
        
        # 文本转指令
        action = self.text2action(text)
        
        # 根据指令判断是否生成语音
        audio_text = ""
        if action == "h":
            audio_text = "大家好，我是鼎桥机器人大脑，超智。"
        
        if action == "v":
            audio_text = self.vision(image_file, text)
        
        if action[0] == "n":
            audio_text = f"好的，目的地-{action[2:]}，让我们出发吧~"
            
        if action[0] not in ['u', 'd', 's', 'f', 'b', 'l', 'r', 'v', 'h', 'n']:
            audio_text = action
        
        audio_out = None if audio_text == "" else self.text2audio(audio_text)

        return text, action, audio_out

        
agent = MultiAgent()
