import os
import re
import time
import json
import logging
from pydub import AudioSegment
from database import find_previous_in_time_file
from minio_manager import minio_manager
from config import UPLOAD_FOLDER, PROCESSED_FOLDER, SPEECHEMO_FOLDER
from asr_manager import process_audio_speech_emotion
from datetime import datetime

logger = logging.getLogger('audio_processor')

def process_speech_emotion(processed_file_path):
    """
    对处理后的音频文件进行语音识别和情绪分析
    
    参数:
        processed_file_path: 处理后的音频文件路径
    
    返回:
        str: 生成的文本文件路径，失败返回None
    """
    try:
       
        # 获取文件名（不带扩展名）
        base_name = os.path.splitext(os.path.basename(processed_file_path))[0]
        
        # 生成输出文本文件路径
        output_txt_path = os.path.join(SPEECHEMO_FOLDER, f"{base_name}.txt")
        
        logger.info(f"开始对音频文件进行语音识别: {processed_file_path}")
        
        # 调用语音识别和情绪分析
        success = process_audio_speech_emotion(processed_file_path, output_txt_path)
        
        if success:
            logger.info(f"语音识别完成，结果保存到: {output_txt_path}")
            return output_txt_path
        else:
            logger.error(f"语音识别失败: {processed_file_path}")
            return None
            
    except ImportError as e:
        logger.error(f"无法导入spk_emotion模块: {e}")
        return None
    except Exception as e:
        logger.error(f"语音识别处理过程中发生错误: {str(e)}", exc_info=True)
        return None

def is_first_chunk(chunk_index):
    """
    判断是否为第一个分片
    支持多种格式：A001, A1, 001, 1等
    """
    if not chunk_index:
        return False
    
    # 移除字母前缀，提取数字部分
    import re
    numbers = re.findall(r'\d+', chunk_index)
    if numbers:
        # 取第一个数字序列
        num = int(numbers[0])
        # 如果数字为1或001等，认为是第一个分片
        return num <= 1
    
    # 如果没有数字，检查常见的第一分片标识
    first_chunk_patterns = ['A001', 'A1', '001', '1', 'FIRST']
    return chunk_index.upper() in first_chunk_patterns

def timestamp_to_seconds(timestamp):
    """将时间戳(YYYYMMDDHHMMSS)转换为秒"""
    try:
        # 解析时间戳
        year = int(timestamp[0:4])
        month = int(timestamp[4:6])
        day = int(timestamp[6:8])
        hour = int(timestamp[8:10])
        minute = int(timestamp[10:12])
        second = int(timestamp[12:14])
        
        # 转换为时间元组
        time_tuple = (year, month, day, hour, minute, second, 0, 0, 0)
        return time.mktime(time_tuple)
    except Exception as e:
        logger.error(f"时间戳转换错误: {str(e)}", exc_info=True)
        return None

def normalize_timestamp(value):
    """将 datetime 或各种字符串格式的时间统一转换为 YYYYMMDDHHMMSS 字符串"""
    if value is None:
        return None
    if isinstance(value, datetime):
        return value.strftime('%Y%m%d%H%M%S')
    if isinstance(value, str):
        s = value.strip()
        if not s:
            return None
        if s.isdigit():
            if len(s) == 14:
                return s
            if len(s) == 12:
                return '20' + s
        for fmt in ('%Y-%m-%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%dT%H:%M:%S'):
            try:
                dt = datetime.strptime(s, fmt)
                return dt.strftime('%Y%m%d%H%M%S')
            except ValueError:
                continue
        logger.debug(f"无法规范化时间字符串: {s}")
        return s
    return normalize_timestamp(str(value))

def process_audio(file_path, time_pairs):
    """
    处理音频文件：根据多组inTime和outTime对进行截取或合并
    
    参数:
        file_path: 原始音频文件路径
        time_pairs: 时间对列表，每个元素是(inTime, outTime)元组
        
    返回:
        处理后的文件路径列表
    """
    try:
        logger.info(f"开始处理音频文件: {file_path}")
        logger.debug(f"时间对数量: {len(time_pairs)}, 内容: {time_pairs}")
        
        processed_files = []
        base_name = os.path.splitext(os.path.basename(file_path))[0]
        
        # 从文件名提取音频起始时间
        # 文件名格式: Seating0003774F-250926141355250926142355-A035-00000035.aac
        # 起始时间: 250926141355 (YYMMDDHHMMSS, 12位)
        parts = base_name.split('-')
        if len(parts) >= 2:
            timestamp_part = parts[1]
            # 提取前12位作为起始时间（YYMMDDHHMMSS）
            audio_start_time = timestamp_part[:12] if len(timestamp_part) >= 12 else None
        else:
            audio_start_time = None
        
        logger.info("正在加载音频文件...")
        audio = AudioSegment.from_file(
            file_path, 
            format="aac", 
            codec="aac", # 指定AAC解码器 
            parameters=[ 
            "-loglevel", "error", # 仅输出错误日志 
            "-threads", "12", # 12线程解码 
            "-hwaccel", "auto" # 启用硬件加速 
        ] )
        file_duration_ms = len(audio)  # 音频时长(毫秒)
        logger.info(f"音频文件加载成功，时长: {file_duration_ms}ms ({file_duration_ms/1000:.2f}秒)")
        logger.info(f"音频起始时间: {audio_start_time}")
        
        # 处理每组时间对
        logger.info(f"开始处理 {len(time_pairs)} 个时间对")
        for i, (in_time, out_time) in enumerate(time_pairs):
            logger.info(f"处理时间对 {i+1}/{len(time_pairs)}: inTime={in_time}, outTime={out_time}")
            
            if in_time and out_time:
                # 1. 处理同时有inTime和outTime的情况 - 直接截取
                logger.info(f"时间对 {i+1}: 完整时间段，开始直接截取")
                processed_file = process_single_segment(
                    audio, file_path, base_name, i, in_time, out_time, file_duration_ms, audio_start_time
                )
                if processed_file:
                    processed_files.append(processed_file)
                    logger.info(f"时间对 {i+1}: 截取成功")
                else:
                    logger.warning(f"时间对 {i+1}: 截取失败")
                    
            elif out_time:
                # 2. 处理只有outTime的情况 - 需要查找上一个inTime并合并
                logger.info(f"时间对 {i+1}: 只有outTime，检查是否需要跨文件合并")
                # 但是如果是第一个分片(A001)，则跳过查找
                file_name = os.path.basename(file_path)
                parts = file_name.split('-')
                if len(parts) >= 3:
                    chunk_index = parts[2]
                    logger.debug(f"解析到chunk_index: {chunk_index}")
                    # 检查是否为第一个分片（A001或类似格式）
                    if is_first_chunk(chunk_index):
                        logger.warning(f"第一个分片 {chunk_index} 只有outTime，跳过处理")
                        continue
                
                logger.info(f"时间对 {i+1}: 非第一分片，开始跨文件合并处理")
                processed_file = process_with_previous_file(
                    audio, file_path, base_name, i, out_time
                )
                if processed_file:
                    processed_files.append(processed_file)
                    logger.info(f"时间对 {i+1}: 跨文件合并成功")
                else:
                    logger.warning(f"时间对 {i+1}: 跨文件合并失败")
        
        return processed_files if processed_files else None
        
    except Exception as e:
        logger.error(f"音频处理错误: {str(e)}", exc_info=True)
        return None

def process_single_segment(audio, file_path, base_name, index, in_time, out_time, file_duration_ms, audio_start_time=None):
    """处理单一段落的音频截取"""
    try:
        logger.info(f"处理单一音频段: index={index}, inTime={in_time}, outTime={out_time}")
        
        # 计算时间戳(秒)
        logger.debug("开始时间戳转换...")
        in_sec = timestamp_to_seconds(in_time)
        out_sec = timestamp_to_seconds(out_time)
        logger.debug(f"时间戳转换结果: {in_time} -> {in_sec}秒, {out_time} -> {out_sec}秒")
        
        if not in_sec or not out_sec:
            logger.error(f"无法解析时间戳: inTime={in_time}, outTime={out_time}")
            return None
            
        # 如果有音频起始时间，计算相对偏移量
        if audio_start_time:
            audio_start_sec = timestamp_to_seconds("20" + audio_start_time)  # 添加世纪前缀
            logger.debug(f"音频起始时间: {audio_start_time} -> {audio_start_sec}秒")
            
            # 计算相对于音频开始的偏移量
            start_offset_sec = in_sec - audio_start_sec
            end_offset_sec = out_sec - audio_start_sec
            
            logger.info(f"相对偏移: 开始={start_offset_sec}秒, 结束={end_offset_sec}秒")
            
            # 转换为毫秒
            start_ms = max(0, int(start_offset_sec * 1000))
            end_ms = min(int(end_offset_sec * 1000), file_duration_ms)
        else:
            # 如果没有音频起始时间，使用旧的逻辑（从0开始截取时长）
            logger.warning("未提供音频起始时间，使用旧逻辑（可能不准确）")
            duration_sec = out_sec - in_sec
            start_ms = 0
            end_ms = min(int(duration_sec * 1000), file_duration_ms)
        
        # 验证时间范围
        if start_ms < 0 or end_ms <= start_ms:
            logger.warning(f"无效的时间范围: start_ms={start_ms}, end_ms={end_ms}")
            return None
        
        # 截取音频
        logger.info(f"开始截取音频: {start_ms}ms - {end_ms}ms (时长: {end_ms - start_ms}ms)")
        processed_audio = audio[start_ms:end_ms]
        logger.info(f"音频截取完成，截取长度: {len(processed_audio)}ms")
        
        # 生成输出文件名
        output_file = os.path.join(PROCESSED_FOLDER, f"{base_name}_segment_{index}_{in_time}_{out_time}.mp3")
        logger.info(f"开始导出MP3文件: {output_file}")
        
        # 将立体声转为单声道（混合左右声道）
        processed_audio = processed_audio.set_channels(1)
        
        # 保存为MP3
        processed_audio.export(output_file, format="mp3")
        logger.info(f"音频片段处理完成: {output_file}")
        
        # 进行语音识别和情绪分析
        speech_text_file = process_speech_emotion(output_file)
        
        # 返回单文件信息
        return {
            "file_path": output_file,
            "is_cross_file": False,
            "original_files": [os.path.basename(file_path)],
            "in_time": in_time,
            "out_time": out_time,
            "speech_text_file": speech_text_file
        }
        
    except Exception as e:
        logger.error(f"处理音频片段时出错: {str(e)}", exc_info=True)
        return None

def process_with_previous_file(current_audio, current_file_path, base_name, index, out_time):
    """处理需要与多个文件合并的音频截取（支持跨3个及以上文件）"""
    try:
        # 从文件名提取设备号和当前chunk_index
        file_name = os.path.basename(current_file_path)
        parts = file_name.split('-')
        if len(parts) < 3:
            logger.error(f"无法解析文件名: {file_name}")
            return None
            
        device_no = parts[0]
        chunk_index = parts[2]
        
        # 查找所有需要合并的连续文件（函数内部会从数据库查询start_time）
        prev_files = find_previous_in_time_file(device_no, chunk_index, file_name)
        if not prev_files:
            logger.warning("找不到需要合并的文件（可能时间不连续或chunk不连续）")
            return None
        
        # 从最早的文件中提取inTime（查找最后一个包含inTime的元素）
        first_file = prev_files[0]
        try:
            first_extended = json.loads(first_file['extended'])
            if not first_extended or not isinstance(first_extended, list):
                logger.error("起始文件的extended格式错误")
                return None
            
            # 从后向前查找包含inTime的元素（适配inTime和outTime分离的格式）
            in_time = None
            for item in reversed(first_extended):
                if isinstance(item, dict) and 'inTime' in item:
                    in_time = item['inTime']
                    break
            
            if not in_time:
                logger.error("起始文件中未找到inTime元素")
                return None
            
            logger.info(f"从起始文件 {first_file['chunk_index']} 提取inTime: {in_time}")
        except (json.JSONDecodeError, KeyError, TypeError) as e:
            logger.error(f"解析起始文件的extended数据失败: {str(e)}")
            return None
        
        # 按顺序加载并合并所有音频文件
        combined_audio = None
        original_files = []
        
        # 获取第一个文件的开始时间（用于计算偏移量）
        first_file_start_time = first_file.get('start_time')
        if not first_file_start_time:
            logger.error(f"第一个文件缺少start_time: {first_file['file_name']}")
            return None
        
        for prev_file in prev_files:
            prev_file_path = os.path.join(UPLOAD_FOLDER, prev_file['file_name'])
            if not os.path.exists(prev_file_path):
                logger.warning(f"文件不存在: {prev_file_path}，尝试从 MinIO 下载")
                remote_path = prev_file.get('file_path')
                if remote_path:
                    try:
                        object_name = remote_path.split('/')[-1]
                        minio_manager.client.fget_object(minio_manager.bucket_name, object_name, prev_file_path)
                        logger.info(f"已从 MinIO 下载文件: {prev_file_path}")
                    except Exception as e:
                        logger.error(f"从 MinIO 下载文件失败: {remote_path}, 错误: {e}")
                        return None
                else:
                    logger.error(f"无法找到文件且缺少 MinIO 地址: {prev_file['file_name']}")
                    return None
            
            logger.info(f"加载文件: {prev_file['chunk_index']}")
            audio_segment = AudioSegment.from_file(prev_file_path, format="aac")
            
            if combined_audio is None:
                combined_audio = audio_segment
            else:
                combined_audio += audio_segment
            
            original_files.append(prev_file['file_name'])
        
        # 添加当前文件
        combined_audio += current_audio
        original_files.append(os.path.basename(current_file_path))
        
        total_duration_ms = len(combined_audio)
        logger.info(f"合并了 {len(original_files)} 个文件，总时长: {total_duration_ms}ms")
        
        # 计算截取时间（需要计算相对于第一个文件开始时间的偏移）
        in_time_norm = normalize_timestamp(in_time)
        out_time_norm = normalize_timestamp(out_time)
        first_start_norm = normalize_timestamp(first_file_start_time)

        in_sec = timestamp_to_seconds(in_time_norm)
        out_sec = timestamp_to_seconds(out_time_norm)
        first_start_sec = timestamp_to_seconds(first_start_norm)
        
        if not in_sec or not out_sec or not first_start_sec:
            logger.error(f"无法解析时间戳: inTime={in_time_norm}, outTime={out_time_norm}, firstStart={first_start_norm}")
            return None
        
        # 计算相对于第一个文件开始时间的偏移量
        start_offset_sec = in_sec - first_start_sec
        end_offset_sec = out_sec - first_start_sec
        
        logger.info(f"时间偏移: 开始={start_offset_sec:.2f}秒, 结束={end_offset_sec:.2f}秒")
        
        if start_offset_sec < 0 or end_offset_sec <= start_offset_sec:
            logger.warning(f"无效的时间范围: start_offset={start_offset_sec}, end_offset={end_offset_sec}")
            return None
        
        # 转换为毫秒
        start_ms = max(0, int(start_offset_sec * 1000))
        end_ms = min(int(end_offset_sec * 1000), total_duration_ms)
        
        logger.info(f"截取音频: {start_ms}ms - {end_ms}ms (时长: {end_ms - start_ms}ms)")
        
        # 截取音频（从start_ms到end_ms）
        processed_audio = combined_audio[start_ms:end_ms]
        
        # 生成输出文件名
        output_file = os.path.join(PROCESSED_FOLDER, 
                                  f"{base_name}_merged_{len(original_files)}files_"
                                  f"segment_{index}_{in_time}_{out_time}.mp3")
        
        # 将立体声转为单声道（混合左右声道）
        processed_audio = processed_audio.set_channels(1)
        
        # 保存为MP3
        processed_audio.export(output_file, format="mp3")

        logger.info(f"多文件合并完成: {output_file}")
        
        # 进行语音识别和情绪分析
        speech_text_file = process_speech_emotion(output_file)
        
        return {
            "file_path": output_file,
            "is_cross_file": True,
            "original_files": original_files,
            "in_time": in_time,
            "out_time": out_time,
            "speech_text_file": speech_text_file,
            "merged_file_count": len(original_files)
        }
        
    except Exception as e:
        logger.error(f"处理多文件合并时出错: {str(e)}", exc_info=True)
        return None
