import os
import sys
import logging
import threading
import tempfile
import re
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
import argparse
from datetime import datetime, timedelta
import torch
from funasr import AutoModel
from pydub import AudioSegment
import shutil
import psutil
import readline  # 用于输入历史记录支持

class Config:
    # 原始大模型（高精度版本）
    MODEL_NAME = "iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
    # 模型不支持批量处理，强制batch_size=1
    BATCH_SIZE = 1
    # 动态并发参数
    MAX_WORKERS_RATIO = 0.6
    MIN_MEMORY_PER_WORKER = 2 * 1024**3  # 2GB per worker
    # 音频处理参数
    AUDIO_CHUNK_DURATION = 60  # 秒
    AUDIO_CHUNK_OVERLAP = 0    # 秒
    PREPROCESS_CACHE_DIR = Path("~/.cache/audio_preprocessed").expanduser()
    # 其他配置
    CACHE_DIR = Path("~/.cache/mp3_batch_converter").expanduser()
    LOG_FILE = "/tmp/mp3_batch_converter.log"
    MERGED_FILENAME = "merged_transcript.txt"
    TEMP_DIR = Path(tempfile.gettempdir()) / "mp3_to_txt"
    
    def __init__(self):
        self.CACHE_DIR.mkdir(exist_ok=True, parents=True)
        self.TEMP_DIR.mkdir(exist_ok=True, parents=True)
        self.PREPROCESS_CACHE_DIR.mkdir(exist_ok=True, parents=True)
        
        # 程序启动时自动清除wav缓存
        self.cleanup_wav_cache()
    
    def cleanup_wav_cache(self):
        """清除预处理生成的wav缓存文件"""
        if not self.PREPROCESS_CACHE_DIR.exists():
            return
            
        try:
            # 统计当前缓存大小
            total_size = 0
            dir_count = 0
            for item in self.PREPROCESS_CACHE_DIR.iterdir():
                if item.is_dir():
                    dir_count += 1
                    for f in item.rglob('*'):
                        if f.is_file():
                            total_size += f.stat().st_size
            
            # 删除所有缓存目录
            for item in self.PREPROCESS_CACHE_DIR.iterdir():
                if item.is_dir():
                    shutil.rmtree(item, ignore_errors=True)
            
            logging.info(
                f"已清除预处理缓存: {dir_count}个目录, "
                f"释放空间: {total_size / (1024**3):.2f}GB"
            )
        except Exception as e:
            logging.warning(f"清除缓存时出错: {e}")
    
    @property
    def device(self) -> str:
        try:
            if torch.cuda.is_available():
                return "cuda"
            elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
                return "mps"
        except RuntimeError as e:
            logging.info(f"设备检测提示: {e}")
        return "cpu"
    
    @property
    def max_workers(self) -> int:
        cpu_count = os.cpu_count() or 1
        cpu_workers = int(cpu_count * self.MAX_WORKERS_RATIO)
        
        available_memory = psutil.virtual_memory().available
        memory_workers = int(available_memory // self.MIN_MEMORY_PER_WORKER)
        
        return max(1, min(cpu_workers, memory_workers))

config = Config()

def setup_logging(log_file: str, verbose: bool, debug: bool):
    log_path = Path(log_file).expanduser()
    log_path.parent.mkdir(exist_ok=True, parents=True)
    
    handlers = [logging.FileHandler(log_path)]
    if verbose:
        handlers.append(logging.StreamHandler())
    
    logging.basicConfig(
        level=logging.DEBUG if debug else logging.INFO,
        format="%(asctime)s - %(levelname)s - %(message)s",
        datefmt="%Y-%m-%d %H:%M:%S",
        handlers=handlers
    )

# 工具函数：格式化时间差
def format_timedelta(td: timedelta) -> str:
    """将timedelta转换为HH:MM:SS格式"""
    total_seconds = int(td.total_seconds())
    hours, remainder = divmod(total_seconds, 3600)
    minutes, seconds = divmod(remainder, 60)
    return f"{hours:02d}:{minutes:02d}:{seconds:02d}"

class MP3BatchConverter:
    def __init__(self):
        self.model = None
        self._lock = threading.Lock()
        self.success_count = 0
        self.failed_files = []
        self.processed_txt_files = []
        self._model_loaded = False
        self._load_model_error = None
        self.total_files = 0
        self.start_time = None

    def _prepare_model(self):
        with self._lock:
            if self._model_loaded:
                if self._load_model_error:
                    raise self._load_model_error
                return
                
            try:
                self.model = AutoModel(
                    model=config.MODEL_NAME,
                    device=config.device,
                    disable_update=True,
                    vad_model="fsmn-vad",
                    punc_model="ct-punc",
                    show_progress=False,
                    quantize=True,
                    inference_type="float16" if config.device in ["cuda", "mps"] else "float32"
                )
                logging.info(f"已加载模型（量化加速），使用设备: {config.device}")
                self._model_loaded = True
            except Exception as e:
                logging.error(f"模型加载失败: {e}")
                self._load_model_error = e
                raise RuntimeError("无法加载语音识别模型") from e

    def _preprocess_audio(self, mp3_path: Path) -> list[Path]:
        import hashlib

        # 使用文件内容哈希 + 参数作为缓存键，实现全局缓存
        file_content = mp3_path.read_bytes()
        hasher = hashlib.sha256()
        hasher.update(file_content)
        hasher.update(str(config.AUDIO_CHUNK_DURATION).encode())
        hasher.update(str(config.AUDIO_CHUNK_OVERLAP).encode())
        file_hash = hasher.hexdigest()

        cache_dir = config.PREPROCESS_CACHE_DIR / file_hash

        # 添加缓存有效性检查
        cache_valid = False
        if cache_dir.exists() and (cache_dir / "done").exists():
            chunk_files = list(cache_dir.glob("chunk_*.wav"))
            if chunk_files and all(f.exists() for f in chunk_files):
                cache_valid = True

        if cache_valid:
            return sorted(chunk_files, key=lambda x: int(x.stem.split("_")[1]))

        # 清理旧缓存（如果空间不足）
        self._cleanup_cache_if_needed()

        try:
            cache_dir.mkdir(exist_ok=True, parents=True)
            audio = AudioSegment.from_mp3(mp3_path)
            audio = audio.set_frame_rate(16000).set_channels(1)
            chunk_length = config.AUDIO_CHUNK_DURATION * 1000
            overlap_length = config.AUDIO_CHUNK_OVERLAP * 1000
            total_length = len(audio)

            chunks = []
            start = 0
            chunk_idx = 0

            while start < total_length:
                end = start + chunk_length
                chunk = audio[start:end] if end <= total_length else audio[start:]
                chunk_path = cache_dir / f"chunk_{chunk_idx}.wav"
                chunk.export(chunk_path, format="wav")
                chunks.append(chunk_path)

                start += (chunk_length - overlap_length)
                chunk_idx += 1

                if chunk_idx > 1000:
                    logging.warning(f"音频过长，已截断为1000个片段: {mp3_path.name}")
                    break

            (cache_dir / "done").touch()
            logging.debug(f"预处理完成，生成 {len(chunks)} 个片段: {mp3_path.name}")
            return chunks
        except Exception as e:
            logging.error(f"音频预处理失败 {mp3_path.name}: {e}")
            if cache_dir.exists():
                shutil.rmtree(cache_dir, ignore_errors=True)
            return []

    def _cleanup_cache_if_needed(self):
        """在缓存空间不足时清理最旧的缓存"""
        try:
            cache_size = sum(f.stat().st_size for f in config.PREPROCESS_CACHE_DIR.rglob('*') if f.is_file())
            max_cache_size = 10 * 1024**3  # 10GB

            if cache_size > max_cache_size:
                logging.info("缓存空间超过限制，开始清理...")

                # 按最后访问时间排序，删除最旧的缓存
                cache_dirs = []
                for cache_dir in config.PREPROCESS_CACHE_DIR.iterdir():
                    if cache_dir.is_dir():
                        access_time = max((f.stat().st_atime for f in cache_dir.rglob('*') if f.is_file()), default=0)
                        cache_dirs.append((access_time, cache_dir))

                # 按访问时间升序排序（最旧的在前面）
                cache_dirs.sort(key=lambda x: x[0])

                # 删除最旧的一半缓存
                delete_count = len(cache_dirs) // 2
                for _, cache_dir in cache_dirs[:delete_count]:
                    shutil.rmtree(cache_dir, ignore_errors=True)

                logging.info(f"已清理 {delete_count} 个缓存目录")
        except Exception as e:
            logging.warning(f"缓存清理失败: {e}")

    def recognize_single(self, chunk_path: str) -> str:
        with self._lock:  # 使用现有锁保护模型调用
            try:
                result = self.model.generate(
                    input=chunk_path,
                    batch_size=1,
                    hotword="",
                    show_progress=False
                )
                return result[0]["text"].strip() if result and "text" in result[0] else ""
            except Exception as e:
                if "batch_size must be set 1" in str(e):
                    logging.warning("模型限制：仅支持单片段识别（已自动适配）")
                else:
                    logging.warning(f"片段识别失败: {str(e)}")
                return ""

    def process_single_file(self, mp3_path: Path, output_dir: Path) -> bool:
        if not mp3_path.exists():
            logging.error(f"文件不存在: {mp3_path}")
            with self._lock:
                self.failed_files.append(str(mp3_path))
            return False

        try:
            output_file = output_dir / f"{mp3_path.stem}.txt"
            
            if output_file.exists():
                if output_file.stat().st_size > 0:
                    logging.debug(f"文件已处理，跳过: {mp3_path.name}")
                    with self._lock:
                        self.processed_txt_files.append(output_file)
                    return True
                else:
                    logging.warning(f"发现空结果文件，重新处理: {mp3_path.name}")

            logging.info(f"开始处理: {mp3_path.name}")
            
            chunk_paths = self._preprocess_audio(mp3_path)
            if not chunk_paths:
                logging.error(f"无有效音频片段: {mp3_path.name}")
                with self._lock:
                    self.failed_files.append(str(mp3_path))
                return False
            
            full_text = []
            chunk_workers = min(4, len(chunk_paths)) if len(chunk_paths) > 1 else 1
            
            with ThreadPoolExecutor(max_workers=chunk_workers) as executor:
                futures = {
                    executor.submit(self.recognize_single, str(path)): idx
                    for idx, path in enumerate(chunk_paths)
                }
                
                results = [None] * len(chunk_paths)
                for future in as_completed(futures):
                    idx = futures[future]
                    try:
                        text = future.result()
                        results[idx] = text
                        logging.debug(f"完成片段 {idx+1}/{len(chunk_paths)} 识别")
                    except Exception as e:
                        logging.warning(f"片段 {idx+1} 处理异常: {e}")
                        results[idx] = ""
            
            full_text = [
                f"[{i+1}/{len(results)}] {text}" 
                for i, text in enumerate(results) 
                if text
            ]
            
            if full_text:
                temp_output = output_dir / f".{mp3_path.stem}.tmp.txt"
                with open(temp_output, "w", encoding="utf-8") as f:
                    f.write("\n".join(full_text))
                temp_output.chmod(0o644)
                shutil.move(temp_output, output_file)
                
                with self._lock:
                    self.success_count += 1
                    self.processed_txt_files.append(output_file)
                logging.info(f"处理完成: {mp3_path.name} -> {output_file.name}")
                return True
            else:
                logging.warning(f"未识别到有效内容: {mp3_path.name}")
                with self._lock:
                    self.failed_files.append(str(mp3_path))
                return False
                
        except Exception as e:
            logging.error(f"处理 {mp3_path.name} 失败: {str(e)}", exc_info=True)
            with self._lock:
                self.failed_files.append(str(mp3_path))
            return False

    @staticmethod
    def extract_number(filename: str) -> int:
        numbers = re.findall(r'\d+', filename)
        return int(numbers[0]) if numbers else -1

    def merge_text_files(self, output_dir: Path) -> None:
        if not self.processed_txt_files:
            logging.info("没有可合并的文本文件")
            return

        sorted_files = sorted(
            self.processed_txt_files,
            key=lambda x: self.extract_number(x.stem)
        )

        merged_path = output_dir / config.MERGED_FILENAME
        temp_merged = output_dir / f".{config.MERGED_FILENAME}.tmp"

        try:
            with open(temp_merged, "w", encoding="utf-8") as outfile:
                for idx, txt_file in enumerate(sorted_files, 1):
                    outfile.write(f"\n===== {txt_file.stem} =====\n\n")
                    
                    try:
                        with open(txt_file, "r", encoding="utf-8") as infile:
                            content = infile.read()
                            outfile.write(content)
                        outfile.write("\n")
                    except Exception as e:
                        logging.warning(f"读取文件 {txt_file} 失败: {e}")
                        outfile.write(f"[警告：该文件内容读取失败]\n\n")

            shutil.move(temp_merged, merged_path)
            logging.info(f"已合并 {len(sorted_files)} 个文件至: {merged_path}")
        except Exception as e:
            logging.error(f"合并失败: {str(e)}")
            if temp_merged.exists():
                try:
                    temp_merged.unlink(missing_ok=True)
                except:
                    pass

    def process_directory(self, input_dir: Path, output_dir: Path) -> None:
        # 重置统计信息（保留模型加载状态）
        self.success_count = 0
        self.failed_files = []
        self.processed_txt_files = []
        self.total_files = 0
        self.start_time = datetime.now()
        
        if not input_dir.exists():
            raise FileNotFoundError(f"输入目录不存在: {input_dir}")
        if not input_dir.is_dir():
            raise NotADirectoryError(f"输入路径不是目录: {input_dir}")
            
        output_dir.mkdir(exist_ok=True, parents=True)
        logging.info(f"输出目录: {output_dir}")
        
        mp3_files = list(input_dir.rglob("*.mp3"))
        self.total_files = len(mp3_files)
        
        if not mp3_files:
            logging.info("未找到任何MP3文件")
            return
            
        mp3_files.sort(key=lambda x: x.stat().st_size)
        
        logging.info(f"找到 {self.total_files} 个MP3文件，最大并发数: {config.max_workers}")
        
        self._prepare_model()
        
        with ThreadPoolExecutor(max_workers=config.max_workers) as executor:
            futures = {
                executor.submit(self.process_single_file, file, output_dir): file
                for file in mp3_files
            }
            
            completed_count = 0
            for future in as_completed(futures):
                completed_count += 1
                file = futures[future]
                try:
                    future.result()
                except Exception as e:
                    logging.error(f"处理 {file.name} 异常: {e}")
                
                progress = (completed_count / self.total_files) * 100
                logging.info(f"处理进度: {progress:.1f}% ({completed_count}/{self.total_files})")

        self.merge_text_files(output_dir)

        # 计算时间并格式化
        end_time = datetime.now()
        total_time = end_time - self.start_time
        
        # 处理平均时间（避免除以零）
        avg_time = (total_time / self.total_files) if self.total_files > 0 else timedelta(0)
        
        stats = [
            "\n" + "="*50,
            "处理完成统计",
            "="*50,
            f"总文件数: {self.total_files}",
            f"成功: {self.success_count} ({self.success_count/self.total_files:.1%})",
            f"失败: {len(self.failed_files)}",
            f"总耗时: {format_timedelta(total_time)}",  # 使用格式化函数
            f"平均速度: {format_timedelta(avg_time)} 秒/文件" if self.total_files else "N/A"  # 平均时间格式化
        ]
        for line in stats:
            logging.info(line)
            print(line)

        if self.failed_files:
            failed_log = output_dir / "failed_files.txt"
            with open(failed_log, "w", encoding="utf-8") as f:
                f.write("\n".join(self.failed_files))
            logging.info(f"失败文件列表: {failed_log}")

def process_directory_interactive(converter, input_dir_str, output_dir=None):
    """处理单个目录（交互式模式）"""
    try:
        input_dir = Path(input_dir_str).expanduser().resolve()
        if not output_dir:
            output_dir = input_dir / "transcripts"
        else:
            output_dir = Path(output_dir).expanduser().resolve()
            
        converter.process_directory(input_dir, output_dir)
    except Exception as e:
        logging.error(f"处理目录出错: {e}", exc_info=True)
        print(f"错误: {e}")

def main():
    parser = argparse.ArgumentParser(description="批量MP3语音识别（适配单片段模型）")
    parser.add_argument("--input-dir", type=str, help="初始输入目录")
    parser.add_argument("--output-dir", type=str, help="初始输出目录（默认: 输入目录/transcripts）")
    parser.add_argument("--log-file", default=config.LOG_FILE, help=f"日志路径（默认: {config.LOG_FILE}）")
    parser.add_argument("--quiet", action="store_true", help="不显示控制台日志")
    parser.add_argument("--debug", action="store_true", help="调试模式")
    args = parser.parse_args()

    try:
        setup_logging(args.log_file, not args.quiet, args.debug)
        converter = MP3BatchConverter()
        
        # 处理初始目录（如果提供）
        if args.input_dir:
            process_directory_interactive(converter, args.input_dir, args.output_dir)
        
        # 交互式循环
        print("\n输入目录路径进行转换，或输入 'q' 退出程序")
        while True:
            try:
                input_dir_str = input("请输入目录路径 (q退出): ").strip()
                if input_dir_str.lower() in ['q', 'quit', 'exit']:
                    print("程序退出中...")
                    break
                if not input_dir_str:
                    continue
                process_directory_interactive(converter, input_dir_str)
                print("\n处理完成，可继续输入新目录或按q退出")
            except KeyboardInterrupt:
                print("\n用户中断，输入q退出或继续输入目录")
            except Exception as e:
                print(f"操作出错: {e}，请重试")
                
    except Exception as e:
        logging.error(f"程序初始化出错: {e}", exc_info=args.debug)
        sys.exit(1)

if __name__ == "__main__":
    main()
