#!/usr/bin/env python3
# -*- coding:utf-8 -*-

"""
WebM格式音频转换为文字的工具
使用讯飞开放平台的语音识别API实现
"""

import os
import sys
import _thread as thread
import time
from time import mktime
import websocket
import base64
import datetime
import hashlib
import hmac
import json
import ssl
from datetime import datetime
from urllib.parse import urlencode
from wsgiref.handlers import format_date_time

# 导入pydub用于音频处理
try:
    from pydub import AudioSegment
    PYDUB_AVAILABLE = True
except ImportError:
    print("警告: pydub库未安装，无法进行音频格式预处理")
    PYDUB_AVAILABLE = False

# 状态常量定义
STATUS_FIRST_FRAME = 0  # 第一帧的标识
STATUS_CONTINUE_FRAME = 1  # 中间帧标识
STATUS_LAST_FRAME = 2  # 最后一帧的标识

class WsParam(object):
    # 初始化
    def __init__(self, app_id, api_key, api_secret, audio_file):
        self.APPID = app_id
        self.APIKey = api_key
        self.APISecret = api_secret
        self.AudioFile = audio_file
        self.iat_params = {
            "domain": "slm", 
            "language": "zh_cn", 
            "accent": "mandarin",
            "dwa": "wpgs", 
            "result": {
                "encoding": "utf8",
                "compress": "raw",
                "format": "plain"
            }
        }

    # 生成url
    def create_url(self):
        url = 'ws://iat.xf-yun.com/v1'
        # 生成RFC1123格式的时间戳
        now = datetime.now()
        date = format_date_time(mktime(now.timetuple()))

        # 拼接字符串
        signature_origin = "host: " + "iat.xf-yun.com" + "\n"
        signature_origin += "date: " + date + "\n"
        signature_origin += "GET " + "/v1 " + "HTTP/1.1"
        # 进行hmac-sha256进行加密
        signature_sha = hmac.new(self.APISecret.encode('utf-8'), signature_origin.encode('utf-8'),
                                 digestmod=hashlib.sha256).digest()
        signature_sha = base64.b64encode(signature_sha).decode(encoding='utf-8')

        authorization_origin = "api_key=\"%s\", algorithm=\"%s\", headers=\"%s\", signature=\"%s\"" % (
            self.APIKey, "hmac-sha256", "host date request-line", signature_sha)
        authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8')
        # 将请求的鉴权参数组合为字典
        v = {
            "authorization": authorization,
            "date": date,
            "host": "iat.xf-yun.com"
        }
        # 拼接鉴权参数，生成url
        url = url + '?' + urlencode(v)
        return url

class SpeechToTextConverter:
    def __init__(self, app_id=None, api_key=None, api_secret=None):
        # 默认使用示例中的API信息，实际使用时应该替换为自己的
        self.app_id = app_id or "bede12ef"
        self.api_key = api_key or "04433a12bb2945c2cd987f27e944b675"
        self.api_secret = api_secret or "Zjc1ZmJkNGMzNmVhZDMxMWVhYTRhMjMx"
        self.result_text = ""

    def preprocess_audio(self, input_path):
        """
        预处理音频文件，如果需要的话
        目前WebM格式可以直接处理，无需转换
        """
        # 检查文件是否存在
        if not os.path.exists(input_path):
            raise FileNotFoundError(f"输入文件不存在: {input_path}")

        # 检查文件扩展名是否为.webm
        if not input_path.lower().endswith('.webm'):
            raise ValueError(f"输入文件不是WebM格式: {input_path}")

        # WebM格式可以直接用于语音识别，无需转换
        print(f"音频文件准备就绪: {input_path}")
        return input_path

    def convert_to_text(self, audio_path, output_text_path=None):
        """
        将音频文件转换为文字
        """
        # 预处理音频
        processed_audio = self.preprocess_audio(audio_path)
        
        # 重置结果文本
        self.result_text = ""
        
        print(f"开始语音识别: {processed_audio}")
        
        # 创建WebSocket参数
        ws_param = WsParam(
            app_id=self.app_id,
            api_key=self.api_key,
            api_secret=self.api_secret,
            audio_file=processed_audio
        )
        
        # 定义回调函数
        def on_message(ws, message):
            nonlocal self
            message = json.loads(message)
            code = message["header"]["code"]
            status = message["header"]["status"]
            if code != 0:
                print(f"请求错误：{code}")
                ws.close()
            else:
                payload = message.get("payload")
                if payload:
                    text = payload["result"]["text"]
                    text = json.loads(str(base64.b64decode(text), "utf8"))
                    text_ws = text['ws']
                    result = ''
                    for i in text_ws:
                        for j in i["cw"]:
                            w = j["w"]
                            result += w
                    print(result)
                    self.result_text += result
                if status == 2:
                    ws.close()
                    
                    # 如果指定了输出文件路径，将结果保存到文件
                    if output_text_path:
                        with open(output_text_path, 'w', encoding='utf-8') as f:
                            f.write(self.result_text)
                        print(f"识别结果已保存到: {output_text_path}")
                    print("语音识别完成")
        
        def on_error(ws, error):
            print(f"### 错误: {error}")
        
        def on_close(ws, close_status_code, close_msg):
            print("### 连接已关闭 ###")
        
        def on_open(ws):
            def run(*args):
                frame_size = 1280  # 每一帧的音频大小
                interval = 0.04  # 发送音频间隔(单位:s)
                status = STATUS_FIRST_FRAME  # 音频的状态信息

                try:
                    with open(processed_audio, "rb") as fp:
                        while True:
                            buf = fp.read(frame_size)
                            if not buf:
                                status = STATUS_LAST_FRAME
                                break
                            
                            audio = str(base64.b64encode(buf), 'utf-8')

                            # 第一帧处理
                            if status == STATUS_FIRST_FRAME:
                                d = {
                                    "header": {
                                        "status": 0,
                                        "app_id": ws_param.APPID
                                    },
                                    "parameter": {
                                        "iat": ws_param.iat_params
                                    },
                                    "payload": {
                                        "audio": {
                                            "audio": audio, 
                                            "sample_rate": 16000, 
                                            "encoding": "raw"
                                        }
                                    }
                                }
                                ws.send(json.dumps(d))
                                status = STATUS_CONTINUE_FRAME
                            # 中间帧处理
                            elif status == STATUS_CONTINUE_FRAME:
                                d = {
                                    "header": {
                                        "status": 1,
                                        "app_id": ws_param.APPID
                                    },
                                    "parameter": {
                                        "iat": ws_param.iat_params
                                    },
                                    "payload": {
                                        "audio": {
                                            "audio": audio, 
                                            "sample_rate": 16000, 
                                            "encoding": "raw"
                                        }
                                    }
                                }
                                ws.send(json.dumps(d))
                            # 模拟音频采样间隔
                            time.sleep(interval)
                    
                    # 发送最后一帧
                    d = {
                        "header": {
                            "status": 2,
                            "app_id": ws_param.APPID
                        },
                        "parameter": {
                            "iat": ws_param.iat_params
                        },
                        "payload": {
                            "audio": {
                                "audio": "", 
                                "sample_rate": 16000, 
                                "encoding": "raw"
                            }
                        }
                    }
                    ws.send(json.dumps(d))
                except Exception as e:
                    print(f"处理音频时出错: {str(e)}")
                    ws.close()
            
            thread.start_new_thread(run, ())
        
        # 创建并运行WebSocket连接
        websocket.enableTrace(False)
        ws_url = ws_param.create_url()
        ws = websocket.WebSocketApp(
            ws_url, 
            on_message=on_message, 
            on_error=on_error, 
            on_close=on_close
        )
        ws.on_open = on_open
        
        # 运行WebSocket连接直到完成
        ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
        
        return self.result_text


def main():
    """主函数"""
    # 获取命令行参数
    if len(sys.argv) < 2:
        print("用法: python webm_to_text.py <input_webm_file> [output_text_file]")
        print("示例: python webm_to_text.py 1.webm output.txt")
        sys.exit(1)
    
    input_file = sys.argv[1]
    output_file = sys.argv[2] if len(sys.argv) > 2 else None
    
    try:
        # 创建转换器实例
        converter = SpeechToTextConverter()
        
        # 转换音频为文字
        result = converter.convert_to_text(input_file, output_file)
        
        # 如果没有指定输出文件，打印结果
        if not output_file:
            print(f"\n识别结果:\n{result}")
            
    except Exception as e:
        print(f"错误: {str(e)}")
        sys.exit(1)


if __name__ == "__main__":
    main()