import logging
import json
import torch
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import soundfile
import os

class AudioTranscriber:
    """音频转录器类，用于加载模型并执行语音识别"""
    
    def __init__(self, model_path=None):
        """初始化转录器"""
        # 配置日志
        self.logger = logging.getLogger()
        self.logger.setLevel(logging.INFO)
        self.logger.addHandler(logging.StreamHandler())
        
        # 设置模型路径
        self.model_path = model_path or os.environ.get("SM_MODEL_DIR", "/root/.cache/modelscope/hub/models/")
        self.device = self.get_device()
        self.model = None
        self.processor = None
        
        self.logger.info("Libraries are loaded")
    
    def get_device(self):
        """获取可用的计算设备"""
        device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
        self.logger.info(f"Using device: {device}")
        return device
    
    def load_model(self):
        """加载预训练模型和处理器"""
        try:
            self.model = Wav2Vec2ForCTC.from_pretrained(self.model_path).to(self.device)
            self.processor = Wav2Vec2Processor.from_pretrained(self.model_path)
            self.logger.info("Model and processor are loaded")
            return True
        except Exception as e:
            self.logger.error(f"Failed to load model: {e}")
            return False
    
    def process_input(self, json_request_data):
        """处理输入的JSON数据"""
        try:
            input_data = json.loads(json_request_data)
            self.logger.info("Input data is processed")
            return input_data
        except json.JSONDecodeError as e:
            self.logger.error(f"Failed to decode input JSON: {e}")
            return None
    
    def perform_prediction(self, input_data):
        """执行语音识别预测"""
        if input_data is None or self.model is None or self.processor is None:
            return None
        
        try:
            self.logger.info("Starting inference.")
            
            # 提取音频数据
            speech_array = input_data['speech_array']
            sampling_rate = input_data['sampling_rate']
            
            # 预处理音频
            input_values = self.processor(
                speech_array, 
                sampling_rate=sampling_rate, 
                return_tensors="pt"
            ).input_values.to(self.device)
            
            # 模型推理
            with torch.no_grad():
                logits = self.model(input_values).logits
            
            # 解码预测结果
            pred_ids = torch.argmax(logits, dim=-1)
            transcript = self.processor.batch_decode(pred_ids)[0]
            
            return transcript
        except Exception as e:
            self.logger.error(f"Prediction failed: {e}")
            return None
    
    def prepare_output(self, transcript):
        """准备输出结果"""
        if transcript is None:
            return None
        
        try:
            return json.dumps(transcript), 'application/json'
        except Exception as e:
            self.logger.error(f"Failed to prepare output JSON: {e}")
            return None
    
    def predict(self, json_request_data):
        """执行完整的预测流程"""
        input_data = self.process_input(json_request_data)
        transcript = self.perform_prediction(input_data)
        output = self.prepare_output(transcript)
        return output

# 使用示例
if __name__ == "__main__":
    # 创建转录器实例并加载模型
    transcriber = AudioTranscriber()
    if transcriber.load_model():
        # 准备输入数据
        file_name = '../MKH800_19_0001.wav'
        speech_array, sampling_rate = soundfile.read(file_name)

        print(f"len of speech is {len(speech_array)}")
        print(f"type of speech is {type(speech_array)}")
        # 检查是否为立体声数据
        if isinstance(speech_array[0], list):
            print("it is a stero audio")
        # 检查是否为立体声
        if len(speech_array.shape) > 1 and speech_array.shape[1] == 2:
            print("its a stero audio file")
        json_request_data = {
            "speech_array": speech_array.tolist(),
            "sampling_rate": sampling_rate
        }
        json_request_str = json.dumps(json_request_data)
        
        # 执行预测
        result = transcriber.predict(json_request_str)
        if result:
            print(f"转录结果: {result[0]}")
        else:
            print("预测失败")
    else:
        print("模型加载失败，无法进行预测")