#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Date    : 2023-04-01 19:04:55
import os
import json
import wave
import requests
import time
import base64
from pyaudio import PyAudio, paInt16
import torch
import wenetruntime as wenet
import argparse
import webbrowser

def get_args():
    parser = argparse.ArgumentParser(description='asr inference')
    parser.add_argument('--config', default='libtorch_model/train.yaml', help='config file')
    parser.add_argument('--model_dir', default="libtorch_model/",  help='inference model')
    parser.add_argument('--wav', default="recordspeech/output.wav", help='record wav file')
    parser.add_argument('--lang', default='en', choices=['chs','en'],help='The language you used, chs for Chinese, and en for English.')
    parser.add_argument('--nbest', default=1, help='top-n best result.')
    parser.add_argument('--streaming', default=False,  help='streaming decoding')
    parser.add_argument('--timestamp', default=False, help='display timestamp.')
    args = parser.parse_args()
    return args

def speech2text():
    ##语音识别
    args = get_args()
    ##流式识别
    if args.streaming:
        with wave.open(args.wav, 'rb') as fin:
            assert fin.getnchannels() == 1
            wav = fin.readframes(fin.getnframes())

        decoder = wenet.Decoder(model_dir=args.model_dir,
                                lang=args.lang,
                                nbest=args.nbest,
                                enable_timestamp=args.timestamp)
        # We suppose the wav is 16k, 16bits, and decode every 0.5 seconds
        interval = int(0.5 * 16000) * 2
        for i in range(0, len(wav), interval):
            last = False if i + interval < len(wav) else True
            chunk_wav = wav[i: min(i + interval, len(wav))]
            ans = decoder.decode(chunk_wav, last)
    ##非流式识别
    else:
        decoder = wenet.Decoder(model_dir=args.model_dir,
                                lang=args.lang,
                                nbest=args.nbest,
                                enable_timestamp=args.timestamp)
        ans = decoder.decode_wav(args.wav)
    return ans.split(' ')[12].strip('"\n')

def speech2text_local(audio_file):
    ##语音识别
    args = get_args()
    ##流式识别
    if args.streaming:
        with wave.open(audio_file, 'rb') as fin:
            assert fin.getnchannels() == 1
            wav = fin.readframes(fin.getnframes())

        decoder = wenet.Decoder(model_dir=args.model_dir,
                                lang=args.lang,
                                nbest=args.nbest,
                                enable_timestamp=args.timestamp)
        # We suppose the wav is 16k, 16bits, and decode every 0.5 seconds
        interval = int(0.5 * 16000) * 2
        for i in range(0, len(wav), interval):
            last = False if i + interval < len(wav) else True
            chunk_wav = wav[i: min(i + interval, len(wav))]
            ans = decoder.decode(chunk_wav, last)
    ##非流式识别
    else:
        decoder = wenet.Decoder(model_dir=args.model_dir,
                                lang=args.lang,
                                nbest=args.nbest,
                                enable_timestamp=args.timestamp)
        ans = decoder.decode_wav(audio_file)
    #return ans.split(' ')[12].strip('"\n')
    return ans

def batch_recognition(path):
    files = os.listdir(path)
    with open('result.txt','a',encoding='utf-8') as f:
        for wav in files:
            wav_path = os.path.join(path, wav)
            if ".wav" in wav_path:
                res = speech2text_local(wav_path)
                tmp = wav+" "+res+'\n'
                f.write(tmp)


class ASR_API():
    def __init__(self, pid, framerate=16000, num_samples=2000,
                channels=1, sampwidth=2):
        self.framerate = framerate # 采样率
        self.num_samples = num_samples  # 采样点
        self.channels = channels  # 声道
        self.sampwidth = sampwidth  # 采样宽度2bytes
        self.FILEPATH = 'speech.wav'
        self.pid = pid  #1536：普通话(简单英文),1537:普通话(有标点),1737:英语,1637:粤语,1837:四川话

        self.base_url = "https://openapi.baidu.com/oauth/2.0/token?grant_type=client_credentials&client_id=%s&client_secret=%s"
        self.APIKey = "fu7lf3QTidelXiogwXnsP7gN"
        self.SecretKey = "BYzb7OeF6ZTqccrsCsZxSpERhEq9UWjD"

        self.HOST = self.base_url % (self.APIKey, self.SecretKey)

    def getToken(self):
        res = requests.post(self.HOST)
        return res.json()['access_token']

    def save_wave_file(self,filepath, data):
        wf = wave.open(filepath, 'wb')
        wf.setnchannels(self.channels)
        wf.setsampwidth(self.sampwidth)
        wf.setframerate(self.framerate)
        wf.writeframes(b''.join(data))
        wf.close()

    def my_record(self):
        pa = PyAudio()
        stream = pa.open(format=paInt16, channels=self.channels,
                         rate=self.framerate, input=True, frames_per_buffer=self.num_samples)
        my_buf = []
        # count = 0
        t = time.time()
        print('正在录音...')

        while time.time() < t + 4:  # 秒
            string_audio_data = stream.read(self.num_samples)
            my_buf.append(string_audio_data)
        print('录音结束.')
        self.save_wave_file(self.FILEPATH, my_buf)
        stream.close()

    def get_audio(self):
        with open(self.FILEPATH, 'rb') as f:
            data = f.read()
        return data

    ## 将语音识别为文本
    def speech2text(self, speech_data, token):
        FORMAT = 'wav'
        RATE = '16000'
        CHANNEL = 1
        CUID = '*******'
        SPEECH = base64.b64encode(speech_data).decode('utf-8')

        data = {
            'format': FORMAT,
            'rate': RATE,
            'channel': CHANNEL,
            'cuid': CUID,
            'len': len(speech_data),
            'speech': SPEECH,
            'token': token,
            'dev_pid': self.pid
        }
        url = 'https://vop.baidu.com/server_api'
        headers = {'Content-Type': 'application/json'}
        # r=requests.post(url,data=json.dumps(data),headers=headers)
        r = requests.post(url, json=data, headers=headers)
        Result = r.json()
        if 'result' in Result:
            return Result['result'][0]
        else:
            return Result


if __name__ == '__main__':
    ans = speech2text_local('Bnjs1245.wav')
    print(ans)
    #batch_recognition()
