{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "945e562e",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-05-24T10:53:44.480057Z",
     "start_time": "2022-05-24T10:53:44.473077Z"
    }
   },
   "outputs": [],
   "source": [
    "API_KEY = 'LxG7I4xdAFLmsX43wKggBbFD'\n",
    "SECRET_KEY = 'WoT77EWlrhWC41m8YUrvn47WydEXK5IG'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "f283331e",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-05-24T10:53:45.055146Z",
     "start_time": "2022-05-24T10:53:45.041250Z"
    }
   },
   "outputs": [],
   "source": [
    "# coding=utf-8\n",
    "\n",
    "import sys\n",
    "import json\n",
    "import base64\n",
    "import time\n",
    "\n",
    "IS_PY3 = sys.version_info.major == 3\n",
    "\n",
    "if IS_PY3:\n",
    "    from urllib.request import urlopen\n",
    "    from urllib.request import Request\n",
    "    from urllib.error import URLError\n",
    "    from urllib.parse import urlencode\n",
    "    timer = time.perf_counter\n",
    "else:\n",
    "    from urllib2 import urlopen\n",
    "    from urllib2 import Request\n",
    "    from urllib2 import URLError\n",
    "    from urllib import urlencode\n",
    "    if sys.platform == \"win32\":\n",
    "        timer = time.clock\n",
    "    else:\n",
    "        # On most other platforms the best timer is time.time()\n",
    "        timer = time.time\n",
    "\n",
    "\n",
    "# 需要识别的文件\n",
    "AUDIO_FILE = './audio/16k.pcm'  # 只支持 pcm/wav/amr 格式，极速版额外支持m4a 格式\n",
    "# 文件格式\n",
    "FORMAT = AUDIO_FILE[-3:]  # 文件后缀只支持 pcm/wav/amr 格式，极速版额外支持m4a 格式\n",
    "\n",
    "CUID = '123456PYTHON'\n",
    "# 采样率\n",
    "RATE = 16000  # 固定值\n",
    "\n",
    "# 普通版\n",
    "\n",
    "DEV_PID = 1537  # 1537 表示识别普通话，使用输入法模型。根据文档填写PID，选择语言及识别模型\n",
    "ASR_URL = 'http://vop.baidu.com/server_api'\n",
    "SCOPE = 'audio_voice_assistant_get'  # 有此scope表示有asr能力，没有请在网页里勾选，非常旧的应用可能没有\n",
    "\n",
    "#测试自训练平台需要打开以下信息， 自训练平台模型上线后，您会看见 第二步：“”获取专属模型参数pid:8001，modelid:1234”，按照这个信息获取 dev_pid=8001，lm_id=1234\n",
    "# DEV_PID = 8001 ;   \n",
    "# LM_ID = 1234 ;\n",
    "\n",
    "# 极速版 打开注释的话请填写自己申请的appkey appSecret ，并在网页中开通极速版（开通后可能会收费）\n",
    "\n",
    "# DEV_PID = 80001\n",
    "# ASR_URL = 'http://vop.baidu.com/pro_api'\n",
    "# SCOPE = 'brain_enhanced_asr'  # 有此scope表示有极速版能力，没有请在网页里开通极速版\n",
    "\n",
    "# 忽略scope检查，非常旧的应用可能没有\n",
    "# SCOPE = False\n",
    "\n",
    "class DemoError(Exception):\n",
    "    pass\n",
    "\n",
    "\n",
    "\"\"\"  TOKEN start \"\"\"\n",
    "\n",
    "TOKEN_URL = 'http://aip.baidubce.com/oauth/2.0/token'\n",
    "\n",
    "\n",
    "def fetch_token(API_KEY,SECRET_KEY):\n",
    "    params = {'grant_type': 'client_credentials',\n",
    "              'client_id': API_KEY,\n",
    "              'client_secret': SECRET_KEY}\n",
    "    post_data = urlencode(params)\n",
    "    if (IS_PY3):\n",
    "        post_data = post_data.encode( 'utf-8')\n",
    "    req = Request(TOKEN_URL, post_data)\n",
    "    try:\n",
    "        f = urlopen(req)\n",
    "        result_str = f.read()\n",
    "    except URLError as err:\n",
    "        print('token http response http code : ' + str(err.code))\n",
    "        result_str = err.read()\n",
    "    if (IS_PY3):\n",
    "        result_str =  result_str.decode()\n",
    "\n",
    "    print(result_str)\n",
    "    result = json.loads(result_str)\n",
    "    print(result)\n",
    "    if ('access_token' in result.keys() and 'scope' in result.keys()):\n",
    "        print(SCOPE)\n",
    "        if SCOPE and (not SCOPE in result['scope'].split(' ')):  # SCOPE = False 忽略检查\n",
    "            raise DemoError('scope is not correct')\n",
    "        print('SUCCESS WITH TOKEN: %s  EXPIRES IN SECONDS: %s' % (result['access_token'], result['expires_in']))\n",
    "        return result['access_token']\n",
    "    else:\n",
    "        raise DemoError('MAYBE API_KEY or SECRET_KEY not correct: access_token or scope not found in token response')\n",
    "\n",
    "\"\"\"  TOKEN end \"\"\"\n",
    "\n",
    "\n",
    "# if __name__ == '__main__':\n",
    "#     token = fetch_token()\n",
    "def asr(token,AUDIO_FILE):\n",
    "    speech_data = []\n",
    "    with open(AUDIO_FILE, 'rb') as speech_file:\n",
    "        speech_data = speech_file.read()\n",
    "\n",
    "    length = len(speech_data)\n",
    "    if length == 0:\n",
    "        raise DemoError('file %s length read 0 bytes' % AUDIO_FILE)\n",
    "    speech = base64.b64encode(speech_data)\n",
    "    if (IS_PY3):\n",
    "        speech = str(speech, 'utf-8')\n",
    "    params = {'dev_pid': DEV_PID,\n",
    "             #\"lm_id\" : LM_ID,    #测试自训练平台开启此项\n",
    "              'format': FORMAT,\n",
    "              'rate': RATE,\n",
    "              'token': token,\n",
    "              'cuid': CUID,\n",
    "              'channel': 1,\n",
    "              'speech': speech,\n",
    "              'len': length\n",
    "              }\n",
    "    post_data = json.dumps(params, sort_keys=False)\n",
    "    # print post_data\n",
    "    req = Request(ASR_URL, post_data.encode('utf-8'))\n",
    "    req.add_header('Content-Type', 'application/json')\n",
    "    try:\n",
    "        begin = timer()\n",
    "        f = urlopen(req)\n",
    "        result_str = f.read()\n",
    "        print (\"Request time cost %f\" % (timer() - begin))\n",
    "    except URLError as err:\n",
    "        print('asr http response http code : ' + str(err.code))\n",
    "        result_str = err.read()\n",
    "\n",
    "    if (IS_PY3):\n",
    "        result_str = str(result_str, 'utf-8')\n",
    "#     print(result_str)\n",
    "    with open(\"result.txt\",\"w\") as of:\n",
    "        of.write(result_str)\n",
    "    return result_str"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "705be431",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-05-24T10:53:45.979428Z",
     "start_time": "2022-05-24T10:53:45.918104Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{\"refresh_token\":\"25.7f25cb828ffc2de6462cfb8370c0f9d2.315360000.1968749625.282335-26255993\",\"expires_in\":2592000,\"session_key\":\"9mzdXvagIcC+o4hAfq1PJ4c0bboBlD7uxreMBcdHOcUyEvCuvlyVqf7OD7Gwwc5DPqreivSQroxxFff4l2NyiXU\\/jyrb5Q==\",\"access_token\":\"24.e358cf0a7b1e9dbee7546f1ab5225088.2592000.1655981625.282335-26255993\",\"scope\":\"audio_voice_assistant_get brain_enhanced_asr audio_tts_post brain_speech_realtime public brain_all_scope brain_asr_async wise_adapt lebo_resource_base lightservice_public hetu_basic lightcms_map_poi kaidian_kaidian ApsMisTest_Test\\u6743\\u9650 vis-classify_flower lpq_\\u5f00\\u653e cop_helloScope ApsMis_fangdi_permission smartapp_snsapi_base smartapp_mapp_dev_manage iop_autocar oauth_tp_app smartapp_smart_game_openapi oauth_sessionkey smartapp_swanid_verify smartapp_opensource_openapi smartapp_opensource_recapi fake_face_detect_\\u5f00\\u653eScope vis-ocr_\\u865a\\u62df\\u4eba\\u7269\\u52a9\\u7406 idl-video_\\u865a\\u62df\\u4eba\\u7269\\u52a9\\u7406 smartapp_component smartapp_search_plugin avatar_video_test b2b_tp_openapi b2b_tp_openapi_online smartapp_gov_aladin_to_xcx\",\"session_secret\":\"1a8361c129116f724f1c5a069fce2455\"}\n",
      "\n",
      "{'refresh_token': '25.7f25cb828ffc2de6462cfb8370c0f9d2.315360000.1968749625.282335-26255993', 'expires_in': 2592000, 'session_key': '9mzdXvagIcC+o4hAfq1PJ4c0bboBlD7uxreMBcdHOcUyEvCuvlyVqf7OD7Gwwc5DPqreivSQroxxFff4l2NyiXU/jyrb5Q==', 'access_token': '24.e358cf0a7b1e9dbee7546f1ab5225088.2592000.1655981625.282335-26255993', 'scope': 'audio_voice_assistant_get brain_enhanced_asr audio_tts_post brain_speech_realtime public brain_all_scope brain_asr_async wise_adapt lebo_resource_base lightservice_public hetu_basic lightcms_map_poi kaidian_kaidian ApsMisTest_Test权限 vis-classify_flower lpq_开放 cop_helloScope ApsMis_fangdi_permission smartapp_snsapi_base smartapp_mapp_dev_manage iop_autocar oauth_tp_app smartapp_smart_game_openapi oauth_sessionkey smartapp_swanid_verify smartapp_opensource_openapi smartapp_opensource_recapi fake_face_detect_开放Scope vis-ocr_虚拟人物助理 idl-video_虚拟人物助理 smartapp_component smartapp_search_plugin avatar_video_test b2b_tp_openapi b2b_tp_openapi_online smartapp_gov_aladin_to_xcx', 'session_secret': '1a8361c129116f724f1c5a069fce2455'}\n",
      "audio_voice_assistant_get\n",
      "SUCCESS WITH TOKEN: 24.e358cf0a7b1e9dbee7546f1ab5225088.2592000.1655981625.282335-26255993  EXPIRES IN SECONDS: 2592000\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'24.e358cf0a7b1e9dbee7546f1ab5225088.2592000.1655981625.282335-26255993'"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "jiang_token = fetch_token(API_KEY,SECRET_KEY)\n",
    "jiang_token"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "e8d3c3b1",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-05-24T10:53:47.429160Z",
     "start_time": "2022-05-24T10:53:46.645079Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Request time cost 0.765280\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'{\"corpus_no\":\"7101254372553460972\",\"err_msg\":\"success.\",\"err_no\":0,\"result\":[\"北京科技馆。\"],\"sn\":\"57542268271653389626\"}\\n'"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "asr(jiang_token,'audio/16k.pcm')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fde5619e",
   "metadata": {},
   "source": [
    "# 语音识别自动回复文本机器人\n",
    "\n",
    "> 1.准备音频文件\n",
    "> 2.调用语音识别，将音频转成文本（已完成）\n",
    "> 3.文本自动回复 ()原理：问和答，I/O 输入和输出----特点： key：value"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6f0f06da",
   "metadata": {},
   "source": [
    "## 准备音频文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "8f6a5caf",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-05-24T10:53:53.273984Z",
     "start_time": "2022-05-24T10:53:50.928213Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Requirement already satisfied: SpeechRecognition in c:\\programdata\\anaconda3\\lib\\site-packages (3.8.1)\n"
     ]
    }
   ],
   "source": [
    "!pip install SpeechRecognition"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "d8e7fec0",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-05-24T10:54:02.458173Z",
     "start_time": "2022-05-24T10:53:53.292586Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Collecting PyAudio\n",
      "  Using cached PyAudio-0.2.11.tar.gz (37 kB)\n",
      "Building wheels for collected packages: PyAudio\n",
      "  Building wheel for PyAudio (setup.py): started\n",
      "  Building wheel for PyAudio (setup.py): finished with status 'error'\n",
      "  Running setup.py clean for PyAudio\n",
      "Failed to build PyAudio\n",
      "Installing collected packages: PyAudio\n",
      "    Running setup.py install for PyAudio: started\n",
      "    Running setup.py install for PyAudio: finished with status 'error'\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  ERROR: Command errored out with exit status 1:\n",
      "   command: 'C:\\ProgramData\\Anaconda3\\python.exe' -u -c 'import sys, setuptools, tokenize; sys.argv[0] = '\"'\"'C:\\\\Users\\\\86137\\\\AppData\\\\Local\\\\Temp\\\\pip-install-6ebxq66d\\\\pyaudio_f7c05f98b21d49388ad97c81689803c5\\\\setup.py'\"'\"'; __file__='\"'\"'C:\\\\Users\\\\86137\\\\AppData\\\\Local\\\\Temp\\\\pip-install-6ebxq66d\\\\pyaudio_f7c05f98b21d49388ad97c81689803c5\\\\setup.py'\"'\"';f=getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__);code=f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' bdist_wheel -d 'C:\\Users\\86137\\AppData\\Local\\Temp\\pip-wheel-fwvbhx5v'\n",
      "       cwd: C:\\Users\\86137\\AppData\\Local\\Temp\\pip-install-6ebxq66d\\pyaudio_f7c05f98b21d49388ad97c81689803c5\\\n",
      "  Complete output (9 lines):\n",
      "  running bdist_wheel\n",
      "  running build\n",
      "  running build_py\n",
      "  creating build\n",
      "  creating build\\lib.win-amd64-3.8\n",
      "  copying src\\pyaudio.py -> build\\lib.win-amd64-3.8\n",
      "  running build_ext\n",
      "  building '_portaudio' extension\n",
      "  error: Microsoft Visual C++ 14.0 or greater is required. Get it with \"Microsoft C++ Build Tools\": https://visualstudio.microsoft.com/visual-cpp-build-tools/\n",
      "  ----------------------------------------\n",
      "  ERROR: Failed building wheel for PyAudio\n",
      "    ERROR: Command errored out with exit status 1:\n",
      "     command: 'C:\\ProgramData\\Anaconda3\\python.exe' -u -c 'import sys, setuptools, tokenize; sys.argv[0] = '\"'\"'C:\\\\Users\\\\86137\\\\AppData\\\\Local\\\\Temp\\\\pip-install-6ebxq66d\\\\pyaudio_f7c05f98b21d49388ad97c81689803c5\\\\setup.py'\"'\"'; __file__='\"'\"'C:\\\\Users\\\\86137\\\\AppData\\\\Local\\\\Temp\\\\pip-install-6ebxq66d\\\\pyaudio_f7c05f98b21d49388ad97c81689803c5\\\\setup.py'\"'\"';f=getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__);code=f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' install --record 'C:\\Users\\86137\\AppData\\Local\\Temp\\pip-record-_4yp8zs5\\install-record.txt' --single-version-externally-managed --compile --install-headers 'C:\\ProgramData\\Anaconda3\\Include\\PyAudio'\n",
      "         cwd: C:\\Users\\86137\\AppData\\Local\\Temp\\pip-install-6ebxq66d\\pyaudio_f7c05f98b21d49388ad97c81689803c5\\\n",
      "    Complete output (9 lines):\n",
      "    running install\n",
      "    running build\n",
      "    running build_py\n",
      "    creating build\n",
      "    creating build\\lib.win-amd64-3.8\n",
      "    copying src\\pyaudio.py -> build\\lib.win-amd64-3.8\n",
      "    running build_ext\n",
      "    building '_portaudio' extension\n",
      "    error: Microsoft Visual C++ 14.0 or greater is required. Get it with \"Microsoft C++ Build Tools\": https://visualstudio.microsoft.com/visual-cpp-build-tools/\n",
      "    ----------------------------------------\n",
      "ERROR: Command errored out with exit status 1: 'C:\\ProgramData\\Anaconda3\\python.exe' -u -c 'import sys, setuptools, tokenize; sys.argv[0] = '\"'\"'C:\\\\Users\\\\86137\\\\AppData\\\\Local\\\\Temp\\\\pip-install-6ebxq66d\\\\pyaudio_f7c05f98b21d49388ad97c81689803c5\\\\setup.py'\"'\"'; __file__='\"'\"'C:\\\\Users\\\\86137\\\\AppData\\\\Local\\\\Temp\\\\pip-install-6ebxq66d\\\\pyaudio_f7c05f98b21d49388ad97c81689803c5\\\\setup.py'\"'\"';f=getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__);code=f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' install --record 'C:\\Users\\86137\\AppData\\Local\\Temp\\pip-record-_4yp8zs5\\install-record.txt' --single-version-externally-managed --compile --install-headers 'C:\\ProgramData\\Anaconda3\\Include\\PyAudio' Check the logs for full command output.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Collecting pipwin\n",
      "  Using cached pipwin-0.5.2-py2.py3-none-any.whl\n",
      "Requirement already satisfied: six in c:\\programdata\\anaconda3\\lib\\site-packages (from pipwin) (1.15.0)\n",
      "Collecting js2py\n",
      "  Using cached Js2Py-0.71-py3-none-any.whl (1.0 MB)\n",
      "Requirement already satisfied: requests in c:\\programdata\\anaconda3\\lib\\site-packages (from pipwin) (2.25.1)\n",
      "Collecting pySmartDL>=1.3.1\n",
      "  Using cached pySmartDL-1.3.4-py3-none-any.whl (20 kB)\n",
      "Requirement already satisfied: packaging in c:\\programdata\\anaconda3\\lib\\site-packages (from pipwin) (20.9)\n",
      "Collecting pyprind\n",
      "  Using cached PyPrind-2.11.3-py2.py3-none-any.whl (8.4 kB)\n",
      "Requirement already satisfied: beautifulsoup4>=4.9.0 in c:\\programdata\\anaconda3\\lib\\site-packages (from pipwin) (4.9.3)\n",
      "Collecting docopt\n",
      "  Using cached docopt-0.6.2-py2.py3-none-any.whl\n",
      "Requirement already satisfied: soupsieve>1.2 in c:\\programdata\\anaconda3\\lib\\site-packages (from beautifulsoup4>=4.9.0->pipwin) (2.2.1)\n",
      "Collecting tzlocal>=1.2\n",
      "  Using cached tzlocal-4.2-py3-none-any.whl (19 kB)\n",
      "Collecting pyjsparser>=2.5.1\n",
      "  Using cached pyjsparser-2.7.1-py3-none-any.whl\n",
      "Collecting pytz-deprecation-shim\n",
      "  Using cached pytz_deprecation_shim-0.1.0.post0-py2.py3-none-any.whl (15 kB)\n",
      "Collecting backports.zoneinfo\n",
      "  Using cached backports.zoneinfo-0.2.1-cp38-cp38-win_amd64.whl (38 kB)\n",
      "Requirement already satisfied: tzdata in c:\\programdata\\anaconda3\\lib\\site-packages (from tzlocal>=1.2->js2py->pipwin) (2022.1)\n",
      "Requirement already satisfied: pyparsing>=2.0.2 in c:\\programdata\\anaconda3\\lib\\site-packages (from packaging->pipwin) (2.4.7)\n",
      "Requirement already satisfied: certifi>=2017.4.17 in c:\\programdata\\anaconda3\\lib\\site-packages (from requests->pipwin) (2020.12.5)\n",
      "Requirement already satisfied: chardet<5,>=3.0.2 in c:\\programdata\\anaconda3\\lib\\site-packages (from requests->pipwin) (4.0.0)\n",
      "Requirement already satisfied: idna<3,>=2.5 in c:\\programdata\\anaconda3\\lib\\site-packages (from requests->pipwin) (2.10)\n",
      "Requirement already satisfied: urllib3<1.27,>=1.21.1 in c:\\programdata\\anaconda3\\lib\\site-packages (from requests->pipwin) (1.26.4)\n",
      "Installing collected packages: backports.zoneinfo, pytz-deprecation-shim, tzlocal, pyjsparser, pySmartDL, pyprind, js2py, docopt, pipwin\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "ERROR: Could not install packages due to an OSError: [WinError 5] 拒绝访问。: 'C:\\\\ProgramData\\\\Anaconda3\\\\Lib\\\\site-packages\\\\backports\\\\__init__.py'\n",
      "Consider using the `--user` option or check the permissions.\n",
      "\n",
      "'pipwin' 不是内部或外部命令，也不是可运行的程序\n",
      "或批处理文件。\n"
     ]
    }
   ],
   "source": [
    "!pip install PyAudio\n",
    "\n",
    "!pip install pipwin\n",
    "!pipwin instll PyAudio"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bf21b0c8",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-05-17T12:36:28.771504Z",
     "start_time": "2022-05-17T12:36:28.733700Z"
    }
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "09ec4362",
   "metadata": {},
   "source": [
    "## 文本自动回复"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "3b0a8f24",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-05-24T10:54:14.966492Z",
     "start_time": "2022-05-24T10:54:14.962923Z"
    }
   },
   "outputs": [],
   "source": [
    "qa = {\n",
    "    \"你好\":\"你好呀，有什么事么?\",\n",
    "    \"你叫什么名字\":\"我是人见人爱，花见花开的小小度呀\",\n",
    "    \"你多大了\":\"这是很私密的问题，我今年18岁了。\"\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "37e81ea0",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-05-24T10:54:16.965447Z",
     "start_time": "2022-05-24T10:54:16.948246Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'你好呀，有什么事么?'"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "qa.get('你好')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "5affc7e0",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-05-24T10:54:18.123853Z",
     "start_time": "2022-05-24T10:54:18.115874Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'我是人见人爱，花见花开的小小度呀'"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "qa.get('你叫什么名字')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "16ac2ea7",
   "metadata": {},
   "source": [
    "## 实践"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "b4cb9692",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-05-24T10:54:28.819072Z",
     "start_time": "2022-05-24T10:54:28.806514Z"
    }
   },
   "outputs": [
    {
     "ename": "SyntaxError",
     "evalue": "invalid character in identifier (<ipython-input-13-a6a8204a9954>, line 9)",
     "output_type": "error",
     "traceback": [
      "\u001b[1;36m  File \u001b[1;32m\"<ipython-input-13-a6a8204a9954>\"\u001b[1;36m, line \u001b[1;32m9\u001b[0m\n\u001b[1;33m    f.write(audio.get_wav_data(convert_rate=16000))、\u001b[0m\n\u001b[1;37m                                                   ^\u001b[0m\n\u001b[1;31mSyntaxError\u001b[0m\u001b[1;31m:\u001b[0m invalid character in identifier\n"
     ]
    }
   ],
   "source": [
    "# 1.录制音频文件\n",
    "import speech_recognition\n",
    "r = speech_recognition.Recognizer()\n",
    "# 通过电脑的麦克风进行音频的获取\n",
    "with speech_recognition.Microphone() as source:\n",
    "    audio = r.listen(source)\n",
    "# 将数据保存到wav文件中\n",
    "with open(\"1.wav\", \"wb\") as f: \n",
    "    f.write(audio.get_wav_data(convert_rate=16000))、\n",
    "    \n",
    "# 2.调用百度asr\n",
    "jiang_token = fetch_token(API_KEY,SECRET_KEY)\n",
    "\n",
    "asr_result = eval(asr(jiang_token,\"1.wav\"))['result'][0][:-1]\n",
    "\n",
    "# 3.文本自动回复\n",
    "qa.get(asr_result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "9f21fd0d",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-05-17T12:47:44.788687Z",
     "start_time": "2022-05-17T12:47:44.760736Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Help on package speech_recognition:\n",
      "\n",
      "NAME\n",
      "    speech_recognition - Library for performing speech recognition, with support for several engines and APIs, online and offline.\n",
      "\n",
      "PACKAGE CONTENTS\n",
      "    __main__\n",
      "\n",
      "CLASSES\n",
      "    builtins.Exception(builtins.BaseException)\n",
      "        RequestError\n",
      "        UnknownValueError\n",
      "        WaitTimeoutError\n",
      "    builtins.object\n",
      "        AudioData\n",
      "        AudioSource\n",
      "            AudioFile\n",
      "            Microphone\n",
      "            Recognizer\n",
      "        PortableNamedTemporaryFile\n",
      "    \n",
      "    class AudioData(builtins.object)\n",
      "     |  AudioData(frame_data, sample_rate, sample_width)\n",
      "     |  \n",
      "     |  Creates a new ``AudioData`` instance, which represents mono audio data.\n",
      "     |  \n",
      "     |  The raw audio data is specified by ``frame_data``, which is a sequence of bytes representing audio samples. This is the frame data structure used by the PCM WAV format.\n",
      "     |  \n",
      "     |  The width of each sample, in bytes, is specified by ``sample_width``. Each group of ``sample_width`` bytes represents a single audio sample.\n",
      "     |  \n",
      "     |  The audio data is assumed to have a sample rate of ``sample_rate`` samples per second (Hertz).\n",
      "     |  \n",
      "     |  Usually, instances of this class are obtained from ``recognizer_instance.record`` or ``recognizer_instance.listen``, or in the callback for ``recognizer_instance.listen_in_background``, rather than instantiating them directly.\n",
      "     |  \n",
      "     |  Methods defined here:\n",
      "     |  \n",
      "     |  __init__(self, frame_data, sample_rate, sample_width)\n",
      "     |      Initialize self.  See help(type(self)) for accurate signature.\n",
      "     |  \n",
      "     |  get_aiff_data(self, convert_rate=None, convert_width=None)\n",
      "     |      Returns a byte string representing the contents of an AIFF-C file containing the audio represented by the ``AudioData`` instance.\n",
      "     |      \n",
      "     |      If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.\n",
      "     |      \n",
      "     |      If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.\n",
      "     |      \n",
      "     |      Writing these bytes directly to a file results in a valid `AIFF-C file <https://en.wikipedia.org/wiki/Audio_Interchange_File_Format>`__.\n",
      "     |  \n",
      "     |  get_flac_data(self, convert_rate=None, convert_width=None)\n",
      "     |      Returns a byte string representing the contents of a FLAC file containing the audio represented by the ``AudioData`` instance.\n",
      "     |      \n",
      "     |      Note that 32-bit FLAC is not supported. If the audio data is 32-bit and ``convert_width`` is not specified, then the resulting FLAC will be a 24-bit FLAC.\n",
      "     |      \n",
      "     |      If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.\n",
      "     |      \n",
      "     |      If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.\n",
      "     |      \n",
      "     |      Writing these bytes directly to a file results in a valid `FLAC file <https://en.wikipedia.org/wiki/FLAC>`__.\n",
      "     |  \n",
      "     |  get_raw_data(self, convert_rate=None, convert_width=None)\n",
      "     |      Returns a byte string representing the raw frame data for the audio represented by the ``AudioData`` instance.\n",
      "     |      \n",
      "     |      If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.\n",
      "     |      \n",
      "     |      If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.\n",
      "     |      \n",
      "     |      Writing these bytes directly to a file results in a valid `RAW/PCM audio file <https://en.wikipedia.org/wiki/Raw_audio_format>`__.\n",
      "     |  \n",
      "     |  get_segment(self, start_ms=None, end_ms=None)\n",
      "     |      Returns a new ``AudioData`` instance, trimmed to a given time interval. In other words, an ``AudioData`` instance with the same audio data except starting at ``start_ms`` milliseconds in and ending ``end_ms`` milliseconds in.\n",
      "     |      \n",
      "     |      If not specified, ``start_ms`` defaults to the beginning of the audio, and ``end_ms`` defaults to the end.\n",
      "     |  \n",
      "     |  get_wav_data(self, convert_rate=None, convert_width=None)\n",
      "     |      Returns a byte string representing the contents of a WAV file containing the audio represented by the ``AudioData`` instance.\n",
      "     |      \n",
      "     |      If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.\n",
      "     |      \n",
      "     |      If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.\n",
      "     |      \n",
      "     |      Writing these bytes directly to a file results in a valid `WAV file <https://en.wikipedia.org/wiki/WAV>`__.\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Data descriptors defined here:\n",
      "     |  \n",
      "     |  __dict__\n",
      "     |      dictionary for instance variables (if defined)\n",
      "     |  \n",
      "     |  __weakref__\n",
      "     |      list of weak references to the object (if defined)\n",
      "    \n",
      "    class AudioFile(AudioSource)\n",
      "     |  AudioFile(filename_or_fileobject)\n",
      "     |  \n",
      "     |  Creates a new ``AudioFile`` instance given a WAV/AIFF/FLAC audio file ``filename_or_fileobject``. Subclass of ``AudioSource``.\n",
      "     |  \n",
      "     |  If ``filename_or_fileobject`` is a string, then it is interpreted as a path to an audio file on the filesystem. Otherwise, ``filename_or_fileobject`` should be a file-like object such as ``io.BytesIO`` or similar.\n",
      "     |  \n",
      "     |  Note that functions that read from the audio (such as ``recognizer_instance.record`` or ``recognizer_instance.listen``) will move ahead in the stream. For example, if you execute ``recognizer_instance.record(audiofile_instance, duration=10)`` twice, the first time it will return the first 10 seconds of audio, and the second time it will return the 10 seconds of audio right after that. This is always reset to the beginning when entering an ``AudioFile`` context.\n",
      "     |  \n",
      "     |  WAV files must be in PCM/LPCM format; WAVE_FORMAT_EXTENSIBLE and compressed WAV are not supported and may result in undefined behaviour.\n",
      "     |  \n",
      "     |  Both AIFF and AIFF-C (compressed AIFF) formats are supported.\n",
      "     |  \n",
      "     |  FLAC files must be in native FLAC format; OGG-FLAC is not supported and may result in undefined behaviour.\n",
      "     |  \n",
      "     |  Method resolution order:\n",
      "     |      AudioFile\n",
      "     |      AudioSource\n",
      "     |      builtins.object\n",
      "     |  \n",
      "     |  Methods defined here:\n",
      "     |  \n",
      "     |  __enter__(self)\n",
      "     |  \n",
      "     |  __exit__(self, exc_type, exc_value, traceback)\n",
      "     |  \n",
      "     |  __init__(self, filename_or_fileobject)\n",
      "     |      Initialize self.  See help(type(self)) for accurate signature.\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Data and other attributes defined here:\n",
      "     |  \n",
      "     |  AudioFileStream = <class 'speech_recognition.AudioFile.AudioFileStream...\n",
      "     |  \n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Data descriptors inherited from AudioSource:\n",
      "     |  \n",
      "     |  __dict__\n",
      "     |      dictionary for instance variables (if defined)\n",
      "     |  \n",
      "     |  __weakref__\n",
      "     |      list of weak references to the object (if defined)\n",
      "    \n",
      "    class AudioSource(builtins.object)\n",
      "     |  Methods defined here:\n",
      "     |  \n",
      "     |  __enter__(self)\n",
      "     |  \n",
      "     |  __exit__(self, exc_type, exc_value, traceback)\n",
      "     |  \n",
      "     |  __init__(self)\n",
      "     |      Initialize self.  See help(type(self)) for accurate signature.\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Data descriptors defined here:\n",
      "     |  \n",
      "     |  __dict__\n",
      "     |      dictionary for instance variables (if defined)\n",
      "     |  \n",
      "     |  __weakref__\n",
      "     |      list of weak references to the object (if defined)\n",
      "    \n",
      "    class Microphone(AudioSource)\n",
      "     |  Microphone(device_index=None, sample_rate=None, chunk_size=1024)\n",
      "     |  \n",
      "     |  Creates a new ``Microphone`` instance, which represents a physical microphone on the computer. Subclass of ``AudioSource``.\n",
      "     |  \n",
      "     |  This will throw an ``AttributeError`` if you don't have PyAudio 0.2.11 or later installed.\n",
      "     |  \n",
      "     |  If ``device_index`` is unspecified or ``None``, the default microphone is used as the audio source. Otherwise, ``device_index`` should be the index of the device to use for audio input.\n",
      "     |  \n",
      "     |  A device index is an integer between 0 and ``pyaudio.get_device_count() - 1`` (assume we have used ``import pyaudio`` beforehand) inclusive. It represents an audio device such as a microphone or speaker. See the `PyAudio documentation <http://people.csail.mit.edu/hubert/pyaudio/docs/>`__ for more details.\n",
      "     |  \n",
      "     |  The microphone audio is recorded in chunks of ``chunk_size`` samples, at a rate of ``sample_rate`` samples per second (Hertz). If not specified, the value of ``sample_rate`` is determined automatically from the system's microphone settings.\n",
      "     |  \n",
      "     |  Higher ``sample_rate`` values result in better audio quality, but also more bandwidth (and therefore, slower recognition). Additionally, some CPUs, such as those in older Raspberry Pi models, can't keep up if this value is too high.\n",
      "     |  \n",
      "     |  Higher ``chunk_size`` values help avoid triggering on rapidly changing ambient noise, but also makes detection less sensitive. This value, generally, should be left at its default.\n",
      "     |  \n",
      "     |  Method resolution order:\n",
      "     |      Microphone\n",
      "     |      AudioSource\n",
      "     |      builtins.object\n",
      "     |  \n",
      "     |  Methods defined here:\n",
      "     |  \n",
      "     |  __enter__(self)\n",
      "     |  \n",
      "     |  __exit__(self, exc_type, exc_value, traceback)\n",
      "     |  \n",
      "     |  __init__(self, device_index=None, sample_rate=None, chunk_size=1024)\n",
      "     |      Initialize self.  See help(type(self)) for accurate signature.\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Static methods defined here:\n",
      "     |  \n",
      "     |  get_pyaudio()\n",
      "     |      Imports the pyaudio module and checks its version. Throws exceptions if pyaudio can't be found or a wrong version is installed\n",
      "     |  \n",
      "     |  list_microphone_names()\n",
      "     |      Returns a list of the names of all available microphones. For microphones where the name can't be retrieved, the list entry contains ``None`` instead.\n",
      "     |      \n",
      "     |      The index of each microphone's name is the same as its device index when creating a ``Microphone`` instance - indices in this list can be used as values of ``device_index``.\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Data and other attributes defined here:\n",
      "     |  \n",
      "     |  MicrophoneStream = <class 'speech_recognition.Microphone.MicrophoneStr...\n",
      "     |  \n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Data descriptors inherited from AudioSource:\n",
      "     |  \n",
      "     |  __dict__\n",
      "     |      dictionary for instance variables (if defined)\n",
      "     |  \n",
      "     |  __weakref__\n",
      "     |      list of weak references to the object (if defined)\n",
      "    \n",
      "    class PortableNamedTemporaryFile(builtins.object)\n",
      "     |  PortableNamedTemporaryFile(mode='w+b')\n",
      "     |  \n",
      "     |  Limited replacement for ``tempfile.NamedTemporaryFile``, except unlike ``tempfile.NamedTemporaryFile``, the file can be opened again while it's currently open, even on Windows.\n",
      "     |  \n",
      "     |  Methods defined here:\n",
      "     |  \n",
      "     |  __enter__(self)\n",
      "     |  \n",
      "     |  __exit__(self, exc_type, exc_value, traceback)\n",
      "     |  \n",
      "     |  __init__(self, mode='w+b')\n",
      "     |      Initialize self.  See help(type(self)) for accurate signature.\n",
      "     |  \n",
      "     |  flush(self, *args, **kwargs)\n",
      "     |  \n",
      "     |  write(self, *args, **kwargs)\n",
      "     |  \n",
      "     |  writelines(self, *args, **kwargs)\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Data descriptors defined here:\n",
      "     |  \n",
      "     |  __dict__\n",
      "     |      dictionary for instance variables (if defined)\n",
      "     |  \n",
      "     |  __weakref__\n",
      "     |      list of weak references to the object (if defined)\n",
      "    \n",
      "    class Recognizer(AudioSource)\n",
      "     |  Method resolution order:\n",
      "     |      Recognizer\n",
      "     |      AudioSource\n",
      "     |      builtins.object\n",
      "     |  \n",
      "     |  Methods defined here:\n",
      "     |  \n",
      "     |  __init__(self)\n",
      "     |      Creates a new ``Recognizer`` instance, which represents a collection of speech recognition functionality.\n",
      "     |  \n",
      "     |  adjust_for_ambient_noise(self, source, duration=1)\n",
      "     |      Adjusts the energy threshold dynamically using audio from ``source`` (an ``AudioSource`` instance) to account for ambient noise.\n",
      "     |      \n",
      "     |      Intended to calibrate the energy threshold with the ambient energy level. Should be used on periods of audio without speech - will stop early if any speech is detected.\n",
      "     |      \n",
      "     |      The ``duration`` parameter is the maximum number of seconds that it will dynamically adjust the threshold for before returning. This value should be at least 0.5 in order to get a representative sample of the ambient noise.\n",
      "     |  \n",
      "     |  listen(self, source, timeout=None, phrase_time_limit=None, snowboy_configuration=None)\n",
      "     |      Records a single phrase from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance, which it returns.\n",
      "     |      \n",
      "     |      This is done by waiting until the audio has an energy above ``recognizer_instance.energy_threshold`` (the user has started speaking), and then recording until it encounters ``recognizer_instance.pause_threshold`` seconds of non-speaking or there is no more audio input. The ending silence is not included.\n",
      "     |      \n",
      "     |      The ``timeout`` parameter is the maximum number of seconds that this will wait for a phrase to start before giving up and throwing an ``speech_recognition.WaitTimeoutError`` exception. If ``timeout`` is ``None``, there will be no wait timeout.\n",
      "     |      \n",
      "     |      The ``phrase_time_limit`` parameter is the maximum number of seconds that this will allow a phrase to continue before stopping and returning the part of the phrase processed before the time limit was reached. The resulting audio will be the phrase cut off at the time limit. If ``phrase_timeout`` is ``None``, there will be no phrase time limit.\n",
      "     |      \n",
      "     |      The ``snowboy_configuration`` parameter allows integration with `Snowboy <https://snowboy.kitt.ai/>`__, an offline, high-accuracy, power-efficient hotword recognition engine. When used, this function will pause until Snowboy detects a hotword, after which it will unpause. This parameter should either be ``None`` to turn off Snowboy support, or a tuple of the form ``(SNOWBOY_LOCATION, LIST_OF_HOT_WORD_FILES)``, where ``SNOWBOY_LOCATION`` is the path to the Snowboy root directory, and ``LIST_OF_HOT_WORD_FILES`` is a list of paths to Snowboy hotword configuration files (`*.pmdl` or `*.umdl` format).\n",
      "     |      \n",
      "     |      This operation will always complete within ``timeout + phrase_timeout`` seconds if both are numbers, either by returning the audio data, or by raising a ``speech_recognition.WaitTimeoutError`` exception.\n",
      "     |  \n",
      "     |  listen_in_background(self, source, callback, phrase_time_limit=None)\n",
      "     |      Spawns a thread to repeatedly record phrases from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance and call ``callback`` with that ``AudioData`` instance as soon as each phrase are detected.\n",
      "     |      \n",
      "     |      Returns a function object that, when called, requests that the background listener thread stop. The background thread is a daemon and will not stop the program from exiting if there are no other non-daemon threads. The function accepts one parameter, ``wait_for_stop``: if truthy, the function will wait for the background listener to stop before returning, otherwise it will return immediately and the background listener thread might still be running for a second or two afterwards. Additionally, if you are using a truthy value for ``wait_for_stop``, you must call the function from the same thread you originally called ``listen_in_background`` from.\n",
      "     |      \n",
      "     |      Phrase recognition uses the exact same mechanism as ``recognizer_instance.listen(source)``. The ``phrase_time_limit`` parameter works in the same way as the ``phrase_time_limit`` parameter for ``recognizer_instance.listen(source)``, as well.\n",
      "     |      \n",
      "     |      The ``callback`` parameter is a function that should accept two parameters - the ``recognizer_instance``, and an ``AudioData`` instance representing the captured audio. Note that ``callback`` function will be called from a non-main thread.\n",
      "     |  \n",
      "     |  recognize_bing(self, audio_data, key, language='en-US', show_all=False)\n",
      "     |      Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Microsoft Bing Speech API.\n",
      "     |      \n",
      "     |      The Microsoft Bing Speech API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account <https://azure.microsoft.com/en-ca/pricing/details/cognitive-services/speech-api/>`__ with Microsoft Azure.\n",
      "     |      \n",
      "     |      To get the API key, go to the `Microsoft Azure Portal Resources <https://portal.azure.com/>`__ page, go to \"All Resources\" > \"Add\" > \"See All\" > Search \"Bing Speech API > \"Create\", and fill in the form to make a \"Bing Speech API\" resource. On the resulting page (which is also accessible from the \"All Resources\" page in the Azure Portal), go to the \"Show Access Keys\" page, which will have two API keys, either of which can be used for the `key` parameter. Microsoft Bing Speech API keys are 32-character lowercase hexadecimal strings.\n",
      "     |      \n",
      "     |      The recognition language is determined by ``language``, a BCP-47 language tag like ``\"en-US\"`` (US English) or ``\"fr-FR\"`` (International French), defaulting to US English. A list of supported language values can be found in the `API documentation <https://docs.microsoft.com/en-us/azure/cognitive-services/speech/api-reference-rest/bingvoicerecognition#recognition-language>`__ under \"Interactive and dictation mode\".\n",
      "     |      \n",
      "     |      Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://docs.microsoft.com/en-us/azure/cognitive-services/speech/api-reference-rest/bingvoicerecognition#sample-responses>`__ as a JSON dictionary.\n",
      "     |      \n",
      "     |      Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.\n",
      "     |  \n",
      "     |  recognize_google(self, audio_data, key=None, language='en-US', show_all=False)\n",
      "     |      Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Speech Recognition API.\n",
      "     |      \n",
      "     |      The Google Speech Recognition API key is specified by ``key``. If not specified, it uses a generic key that works out of the box. This should generally be used for personal or testing purposes only, as it **may be revoked by Google at any time**.\n",
      "     |      \n",
      "     |      To obtain your own API key, simply following the steps on the `API Keys <http://www.chromium.org/developers/how-tos/api-keys>`__ page at the Chromium Developers site. In the Google Developers Console, Google Speech Recognition is listed as \"Speech API\".\n",
      "     |      \n",
      "     |      The recognition language is determined by ``language``, an RFC5646 language tag like ``\"en-US\"`` (US English) or ``\"fr-FR\"`` (International French), defaulting to US English. A list of supported language tags can be found in this `StackOverflow answer <http://stackoverflow.com/a/14302134>`__.\n",
      "     |      \n",
      "     |      Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary.\n",
      "     |      \n",
      "     |      Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.\n",
      "     |  \n",
      "     |  recognize_google_cloud(self, audio_data, credentials_json=None, language='en-US', preferred_phrases=None, show_all=False)\n",
      "     |      Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Cloud Speech API.\n",
      "     |      \n",
      "     |      This function requires a Google Cloud Platform account; see the `Google Cloud Speech API Quickstart <https://cloud.google.com/speech/docs/getting-started>`__ for details and instructions. Basically, create a project, enable billing for the project, enable the Google Cloud Speech API for the project, and set up Service Account Key credentials for the project. The result is a JSON file containing the API credentials. The text content of this JSON file is specified by ``credentials_json``. If not specified, the library will try to automatically `find the default API credentials JSON file <https://developers.google.com/identity/protocols/application-default-credentials>`__.\n",
      "     |      \n",
      "     |      The recognition language is determined by ``language``, which is a BCP-47 language tag like ``\"en-US\"`` (US English). A list of supported language tags can be found in the `Google Cloud Speech API documentation <https://cloud.google.com/speech/docs/languages>`__.\n",
      "     |      \n",
      "     |      If ``preferred_phrases`` is an iterable of phrase strings, those given phrases will be more likely to be recognized over similar-sounding alternatives. This is useful for things like keyword/command recognition or adding new phrases that aren't in Google's vocabulary. Note that the API imposes certain `restrictions on the list of phrase strings <https://cloud.google.com/speech/limits#content>`__.\n",
      "     |      \n",
      "     |      Returns the most likely transcription if ``show_all`` is False (the default). Otherwise, returns the raw API response as a JSON dictionary.\n",
      "     |      \n",
      "     |      Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the credentials aren't valid, or if there is no Internet connection.\n",
      "     |  \n",
      "     |  recognize_houndify(self, audio_data, client_id, client_key, show_all=False)\n",
      "     |      Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Houndify API.\n",
      "     |      \n",
      "     |      The Houndify client ID and client key are specified by ``client_id`` and ``client_key``, respectively. Unfortunately, these are not available without `signing up for an account <https://www.houndify.com/signup>`__. Once logged into the `dashboard <https://www.houndify.com/dashboard>`__, you will want to select \"Register a new client\", and fill in the form as necessary. When at the \"Enable Domains\" page, enable the \"Speech To Text Only\" domain, and then select \"Save & Continue\".\n",
      "     |      \n",
      "     |      To get the client ID and client key for a Houndify client, go to the `dashboard <https://www.houndify.com/dashboard>`__ and select the client's \"View Details\" link. On the resulting page, the client ID and client key will be visible. Client IDs and client keys are both Base64-encoded strings.\n",
      "     |      \n",
      "     |      Currently, only English is supported as a recognition language.\n",
      "     |      \n",
      "     |      Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary.\n",
      "     |      \n",
      "     |      Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.\n",
      "     |  \n",
      "     |  recognize_ibm(self, audio_data, username, password, language='en-US', show_all=False)\n",
      "     |      Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the IBM Speech to Text API.\n",
      "     |      \n",
      "     |      The IBM Speech to Text username and password are specified by ``username`` and ``password``, respectively. Unfortunately, these are not available without `signing up for an account <https://console.ng.bluemix.net/registration/>`__. Once logged into the Bluemix console, follow the instructions for `creating an IBM Watson service instance <https://www.ibm.com/watson/developercloud/doc/getting_started/gs-credentials.shtml>`__, where the Watson service is \"Speech To Text\". IBM Speech to Text usernames are strings of the form XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX, while passwords are mixed-case alphanumeric strings.\n",
      "     |      \n",
      "     |      The recognition language is determined by ``language``, an RFC5646 language tag with a dialect like ``\"en-US\"`` (US English) or ``\"zh-CN\"`` (Mandarin Chinese), defaulting to US English. The supported language values are listed under the ``model`` parameter of the `audio recognition API documentation <https://www.ibm.com/watson/developercloud/speech-to-text/api/v1/#sessionless_methods>`__, in the form ``LANGUAGE_BroadbandModel``, where ``LANGUAGE`` is the language value.\n",
      "     |      \n",
      "     |      Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://www.ibm.com/watson/developercloud/speech-to-text/api/v1/#sessionless_methods>`__ as a JSON dictionary.\n",
      "     |      \n",
      "     |      Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.\n",
      "     |  \n",
      "     |  recognize_sphinx(self, audio_data, language='en-US', keyword_entries=None, grammar=None, show_all=False)\n",
      "     |      Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using CMU Sphinx.\n",
      "     |      \n",
      "     |      The recognition language is determined by ``language``, an RFC5646 language tag like ``\"en-US\"`` or ``\"en-GB\"``, defaulting to US English. Out of the box, only ``en-US`` is supported. See `Notes on using `PocketSphinx <https://github.com/Uberi/speech_recognition/blob/master/reference/pocketsphinx.rst>`__ for information about installing other languages. This document is also included under ``reference/pocketsphinx.rst``. The ``language`` parameter can also be a tuple of filesystem paths, of the form ``(acoustic_parameters_directory, language_model_file, phoneme_dictionary_file)`` - this allows you to load arbitrary Sphinx models.\n",
      "     |      \n",
      "     |      If specified, the keywords to search for are determined by ``keyword_entries``, an iterable of tuples of the form ``(keyword, sensitivity)``, where ``keyword`` is a phrase, and ``sensitivity`` is how sensitive to this phrase the recognizer should be, on a scale of 0 (very insensitive, more false negatives) to 1 (very sensitive, more false positives) inclusive. If not specified or ``None``, no keywords are used and Sphinx will simply transcribe whatever words it recognizes. Specifying ``keyword_entries`` is more accurate than just looking for those same keywords in non-keyword-based transcriptions, because Sphinx knows specifically what sounds to look for.\n",
      "     |      \n",
      "     |      Sphinx can also handle FSG or JSGF grammars. The parameter ``grammar`` expects a path to the grammar file. Note that if a JSGF grammar is passed, an FSG grammar will be created at the same location to speed up execution in the next run. If ``keyword_entries`` are passed, content of ``grammar`` will be ignored.\n",
      "     |      \n",
      "     |      Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the Sphinx ``pocketsphinx.pocketsphinx.Decoder`` object resulting from the recognition.\n",
      "     |      \n",
      "     |      Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if there are any issues with the Sphinx installation.\n",
      "     |  \n",
      "     |  recognize_wit(self, audio_data, key, show_all=False)\n",
      "     |      Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Wit.ai API.\n",
      "     |      \n",
      "     |      The Wit.ai API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account <https://wit.ai/>`__ and creating an app. You will need to add at least one intent to the app before you can see the API key, though the actual intent settings don't matter.\n",
      "     |      \n",
      "     |      To get the API key for a Wit.ai app, go to the app's overview page, go to the section titled \"Make an API request\", and look for something along the lines of ``Authorization: Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX``; ``XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`` is the API key. Wit.ai API keys are 32-character uppercase alphanumeric strings.\n",
      "     |      \n",
      "     |      The recognition language is configured in the Wit.ai app settings.\n",
      "     |      \n",
      "     |      Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://wit.ai/docs/http/20141022#get-intent-via-text-link>`__ as a JSON dictionary.\n",
      "     |      \n",
      "     |      Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.\n",
      "     |  \n",
      "     |  record(self, source, duration=None, offset=None)\n",
      "     |      Records up to ``duration`` seconds of audio from ``source`` (an ``AudioSource`` instance) starting at ``offset`` (or at the beginning if not specified) into an ``AudioData`` instance, which it returns.\n",
      "     |      \n",
      "     |      If ``duration`` is not specified, then it will record until there is no more audio input.\n",
      "     |  \n",
      "     |  snowboy_wait_for_hot_word(self, snowboy_location, snowboy_hot_word_files, source, timeout=None)\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Class methods defined here:\n",
      "     |  \n",
      "     |  recognize_api(audio_data, client_access_token, language='en', session_id=None, show_all=False) from builtins.type\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Methods inherited from AudioSource:\n",
      "     |  \n",
      "     |  __enter__(self)\n",
      "     |  \n",
      "     |  __exit__(self, exc_type, exc_value, traceback)\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Data descriptors inherited from AudioSource:\n",
      "     |  \n",
      "     |  __dict__\n",
      "     |      dictionary for instance variables (if defined)\n",
      "     |  \n",
      "     |  __weakref__\n",
      "     |      list of weak references to the object (if defined)\n",
      "    \n",
      "    class RequestError(builtins.Exception)\n",
      "     |  Common base class for all non-exit exceptions.\n",
      "     |  \n",
      "     |  Method resolution order:\n",
      "     |      RequestError\n",
      "     |      builtins.Exception\n",
      "     |      builtins.BaseException\n",
      "     |      builtins.object\n",
      "     |  \n",
      "     |  Data descriptors defined here:\n",
      "     |  \n",
      "     |  __weakref__\n",
      "     |      list of weak references to the object (if defined)\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Methods inherited from builtins.Exception:\n",
      "     |  \n",
      "     |  __init__(self, /, *args, **kwargs)\n",
      "     |      Initialize self.  See help(type(self)) for accurate signature.\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Static methods inherited from builtins.Exception:\n",
      "     |  \n",
      "     |  __new__(*args, **kwargs) from builtins.type\n",
      "     |      Create and return a new object.  See help(type) for accurate signature.\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Methods inherited from builtins.BaseException:\n",
      "     |  \n",
      "     |  __delattr__(self, name, /)\n",
      "     |      Implement delattr(self, name).\n",
      "     |  \n",
      "     |  __getattribute__(self, name, /)\n",
      "     |      Return getattr(self, name).\n",
      "     |  \n",
      "     |  __reduce__(...)\n",
      "     |      Helper for pickle.\n",
      "     |  \n",
      "     |  __repr__(self, /)\n",
      "     |      Return repr(self).\n",
      "     |  \n",
      "     |  __setattr__(self, name, value, /)\n",
      "     |      Implement setattr(self, name, value).\n",
      "     |  \n",
      "     |  __setstate__(...)\n",
      "     |  \n",
      "     |  __str__(self, /)\n",
      "     |      Return str(self).\n",
      "     |  \n",
      "     |  with_traceback(...)\n",
      "     |      Exception.with_traceback(tb) --\n",
      "     |      set self.__traceback__ to tb and return self.\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Data descriptors inherited from builtins.BaseException:\n",
      "     |  \n",
      "     |  __cause__\n",
      "     |      exception cause\n",
      "     |  \n",
      "     |  __context__\n",
      "     |      exception context\n",
      "     |  \n",
      "     |  __dict__\n",
      "     |  \n",
      "     |  __suppress_context__\n",
      "     |  \n",
      "     |  __traceback__\n",
      "     |  \n",
      "     |  args\n",
      "    \n",
      "    class UnknownValueError(builtins.Exception)\n",
      "     |  Common base class for all non-exit exceptions.\n",
      "     |  \n",
      "     |  Method resolution order:\n",
      "     |      UnknownValueError\n",
      "     |      builtins.Exception\n",
      "     |      builtins.BaseException\n",
      "     |      builtins.object\n",
      "     |  \n",
      "     |  Data descriptors defined here:\n",
      "     |  \n",
      "     |  __weakref__\n",
      "     |      list of weak references to the object (if defined)\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Methods inherited from builtins.Exception:\n",
      "     |  \n",
      "     |  __init__(self, /, *args, **kwargs)\n",
      "     |      Initialize self.  See help(type(self)) for accurate signature.\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Static methods inherited from builtins.Exception:\n",
      "     |  \n",
      "     |  __new__(*args, **kwargs) from builtins.type\n",
      "     |      Create and return a new object.  See help(type) for accurate signature.\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Methods inherited from builtins.BaseException:\n",
      "     |  \n",
      "     |  __delattr__(self, name, /)\n",
      "     |      Implement delattr(self, name).\n",
      "     |  \n",
      "     |  __getattribute__(self, name, /)\n",
      "     |      Return getattr(self, name).\n",
      "     |  \n",
      "     |  __reduce__(...)\n",
      "     |      Helper for pickle.\n",
      "     |  \n",
      "     |  __repr__(self, /)\n",
      "     |      Return repr(self).\n",
      "     |  \n",
      "     |  __setattr__(self, name, value, /)\n",
      "     |      Implement setattr(self, name, value).\n",
      "     |  \n",
      "     |  __setstate__(...)\n",
      "     |  \n",
      "     |  __str__(self, /)\n",
      "     |      Return str(self).\n",
      "     |  \n",
      "     |  with_traceback(...)\n",
      "     |      Exception.with_traceback(tb) --\n",
      "     |      set self.__traceback__ to tb and return self.\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Data descriptors inherited from builtins.BaseException:\n",
      "     |  \n",
      "     |  __cause__\n",
      "     |      exception cause\n",
      "     |  \n",
      "     |  __context__\n",
      "     |      exception context\n",
      "     |  \n",
      "     |  __dict__\n",
      "     |  \n",
      "     |  __suppress_context__\n",
      "     |  \n",
      "     |  __traceback__\n",
      "     |  \n",
      "     |  args\n",
      "    \n",
      "    class WaitTimeoutError(builtins.Exception)\n",
      "     |  Common base class for all non-exit exceptions.\n",
      "     |  \n",
      "     |  Method resolution order:\n",
      "     |      WaitTimeoutError\n",
      "     |      builtins.Exception\n",
      "     |      builtins.BaseException\n",
      "     |      builtins.object\n",
      "     |  \n",
      "     |  Data descriptors defined here:\n",
      "     |  \n",
      "     |  __weakref__\n",
      "     |      list of weak references to the object (if defined)\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Methods inherited from builtins.Exception:\n",
      "     |  \n",
      "     |  __init__(self, /, *args, **kwargs)\n",
      "     |      Initialize self.  See help(type(self)) for accurate signature.\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Static methods inherited from builtins.Exception:\n",
      "     |  \n",
      "     |  __new__(*args, **kwargs) from builtins.type\n",
      "     |      Create and return a new object.  See help(type) for accurate signature.\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Methods inherited from builtins.BaseException:\n",
      "     |  \n",
      "     |  __delattr__(self, name, /)\n",
      "     |      Implement delattr(self, name).\n",
      "     |  \n",
      "     |  __getattribute__(self, name, /)\n",
      "     |      Return getattr(self, name).\n",
      "     |  \n",
      "     |  __reduce__(...)\n",
      "     |      Helper for pickle.\n",
      "     |  \n",
      "     |  __repr__(self, /)\n",
      "     |      Return repr(self).\n",
      "     |  \n",
      "     |  __setattr__(self, name, value, /)\n",
      "     |      Implement setattr(self, name, value).\n",
      "     |  \n",
      "     |  __setstate__(...)\n",
      "     |  \n",
      "     |  __str__(self, /)\n",
      "     |      Return str(self).\n",
      "     |  \n",
      "     |  with_traceback(...)\n",
      "     |      Exception.with_traceback(tb) --\n",
      "     |      set self.__traceback__ to tb and return self.\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Data descriptors inherited from builtins.BaseException:\n",
      "     |  \n",
      "     |  __cause__\n",
      "     |      exception cause\n",
      "     |  \n",
      "     |  __context__\n",
      "     |      exception context\n",
      "     |  \n",
      "     |  __dict__\n",
      "     |  \n",
      "     |  __suppress_context__\n",
      "     |  \n",
      "     |  __traceback__\n",
      "     |  \n",
      "     |  args\n",
      "    \n",
      "    WavFile = class AudioFile(AudioSource)\n",
      "     |  WavFile(filename_or_fileobject)\n",
      "     |  \n",
      "     |  Creates a new ``AudioFile`` instance given a WAV/AIFF/FLAC audio file ``filename_or_fileobject``. Subclass of ``AudioSource``.\n",
      "     |  \n",
      "     |  If ``filename_or_fileobject`` is a string, then it is interpreted as a path to an audio file on the filesystem. Otherwise, ``filename_or_fileobject`` should be a file-like object such as ``io.BytesIO`` or similar.\n",
      "     |  \n",
      "     |  Note that functions that read from the audio (such as ``recognizer_instance.record`` or ``recognizer_instance.listen``) will move ahead in the stream. For example, if you execute ``recognizer_instance.record(audiofile_instance, duration=10)`` twice, the first time it will return the first 10 seconds of audio, and the second time it will return the 10 seconds of audio right after that. This is always reset to the beginning when entering an ``AudioFile`` context.\n",
      "     |  \n",
      "     |  WAV files must be in PCM/LPCM format; WAVE_FORMAT_EXTENSIBLE and compressed WAV are not supported and may result in undefined behaviour.\n",
      "     |  \n",
      "     |  Both AIFF and AIFF-C (compressed AIFF) formats are supported.\n",
      "     |  \n",
      "     |  FLAC files must be in native FLAC format; OGG-FLAC is not supported and may result in undefined behaviour.\n",
      "     |  \n",
      "     |  Method resolution order:\n",
      "     |      AudioFile\n",
      "     |      AudioSource\n",
      "     |      builtins.object\n",
      "     |  \n",
      "     |  Methods defined here:\n",
      "     |  \n",
      "     |  __enter__(self)\n",
      "     |  \n",
      "     |  __exit__(self, exc_type, exc_value, traceback)\n",
      "     |  \n",
      "     |  __init__(self, filename_or_fileobject)\n",
      "     |      Initialize self.  See help(type(self)) for accurate signature.\n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Data and other attributes defined here:\n",
      "     |  \n",
      "     |  AudioFileStream = <class 'speech_recognition.AudioFile.AudioFileStream...\n",
      "     |  \n",
      "     |  \n",
      "     |  ----------------------------------------------------------------------\n",
      "     |  Data descriptors inherited from AudioSource:\n",
      "     |  \n",
      "     |  __dict__\n",
      "     |      dictionary for instance variables (if defined)\n",
      "     |  \n",
      "     |  __weakref__\n",
      "     |      list of weak references to the object (if defined)\n",
      "\n",
      "FUNCTIONS\n",
      "    get_flac_converter()\n",
      "        Returns the absolute path of a FLAC converter executable, or raises an OSError if none can be found.\n",
      "    \n",
      "    recognize_api(self, audio_data, client_access_token, language='en', session_id=None, show_all=False)\n",
      "    \n",
      "    shutil_which(pgm)\n",
      "        Python 2 compatibility: backport of ``shutil.which()`` from Python 3\n",
      "\n",
      "DATA\n",
      "    __license__ = 'BSD'\n",
      "\n",
      "VERSION\n",
      "    3.8.1\n",
      "\n",
      "AUTHOR\n",
      "    Anthony Zhang (Uberi)\n",
      "\n",
      "FILE\n",
      "    c:\\programdata\\anaconda3\\lib\\site-packages\\speech_recognition\\__init__.py\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "help(speech_recognition)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "16c24afb",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.8"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
