# main_stable_optimized.py - 稳定优化版本
import sys
import os
import pandas as pd
import torch
import json
import logging
from pathlib import Path
from tqdm import tqdm
import warnings
import gc
warnings.filterwarnings('ignore')

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class StableOptimizedTranslationSystem:
    def __init__(self, config_path="config.json"):
        """稳定优化的翻译系统"""
        self.config = self.load_config(config_path)
        
        # 延迟加载但预优化
        self.asr_pipeline = None
        self.translation_models = {}
        self.translation_tokenizers = {}
        
        # 路径设置
        self.csv_dir = '/mnt/workspace/text_data/'
        self.wav_dir = '/mnt/workspace'
        
        # 基础优化设置
        torch.set_grad_enabled(False)  # 关键优化：禁用梯度
        if torch.cuda.is_available():
            torch.backends.cudnn.benchmark = True
        
    def load_config(self, config_path):
        """优化配置"""
        default_config = {
            "asr_model": "iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
            "asr_model_revision": "v2.0.4",
            "translation_model": "facebook/nllb-200-3.3B",
            
            # 保守但有效的批大小
            "asr_batch_size": 1,
            "translation_batch_size": 24,  # 适中提升
            
            # 速度优化的生成参数
            "max_length": 200,     # 减少长度
            "num_beams": 3,        # 减少beam数量
            "length_penalty": 1.0,
            "early_stopping": True,  # 提前停止
            
            "device": "cuda:0" if torch.cuda.is_available() else "cpu",
            "torch_dtype": "float16",
            "use_cache": True,
        }
        
        if os.path.exists(config_path):
            with open(config_path, 'r', encoding='utf-8') as f:
                config = json.load(f)
                default_config.update(config)
        
        return default_config
    
    def get_asr_pipeline(self):
        """获取ASR模型"""
        if self.asr_pipeline is None:
            try:
                from modelscope.pipelines import pipeline
                from modelscope.utils.constant import Tasks
                
                logger.info("加载ASR模型...")
                self.asr_pipeline = pipeline(
                    task=Tasks.auto_speech_recognition,
                    model=self.config["asr_model"],
                    model_revision=self.config["asr_model_revision"],
                    device=self.config["device"]
                )
                logger.info("ASR模型加载完成")
            except Exception as e:
                logger.error(f"ASR模型加载失败: {e}")
                raise
        return self.asr_pipeline
    
    def get_translation_components(self, target_lang):
        """获取翻译组件 - 优化版本"""
        if target_lang not in self.translation_models:
            from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
            
            lang_codes = {
                '英语': 'eng_Latn',
                '马来语': 'zsm_Latn', 
                '泰语': 'tha_Thai'
            }
            
            if target_lang not in lang_codes:
                raise ValueError(f"不支持的目标语言: {target_lang}")
            
            logger.info(f"加载{target_lang}翻译组件...")
            
            # 加载tokenizer
            tokenizer = AutoTokenizer.from_pretrained(
                self.config["translation_model"], 
                src_lang="zho_Hans"
            )
            
            # 加载模型
            model = AutoModelForSeq2SeqLM.from_pretrained(
                self.config["translation_model"],
                torch_dtype=torch.float16 if self.config["torch_dtype"] == "float16" else torch.float32,
                device_map="auto",
                low_cpu_mem_usage=True
            )
            
            # 关键优化：设置为eval模式
            model.eval()
            
            self.translation_models[target_lang] = model
            self.translation_tokenizers[target_lang] = tokenizer
            
            logger.info(f"{target_lang}翻译组件加载完成")
        
        return self.translation_models[target_lang], self.translation_tokenizers[target_lang]
    
    def process_asr_optimized(self, data):
        """优化的ASR处理"""
        asr_pipeline = self.get_asr_pipeline()
        results = []
        
        logger.info(f"处理{len(data)}个音频文件")
        
        for i in tqdm(range(len(data)), desc="ASR处理"):
            try:
                audio_path = data['音频路径'][i]
                wav_path = self.wav_dir + audio_path
                
                if not os.path.exists(wav_path):
                    results.append("")
                    continue
                
                # ASR处理
                result = asr_pipeline(wav_path)
                
                if isinstance(result, dict) and 'text' in result:
                    text = result['text'].strip()
                elif isinstance(result, str):
                    text = result.strip()
                else:
                    text = str(result).strip()
                
                results.append(text)
                
            except Exception as e:
                logger.error(f"ASR处理失败 {audio_path}: {e}")
                results.append("")
        
        success_count = len([r for r in results if r])
        logger.info(f"ASR成功率: {success_count}/{len(results)}")
        return results
    
    def translate_batch_fast(self, texts, target_language):
        """快速批量翻译"""
        if not texts or not any(texts):
            return [""] * len(texts)
        
        model, tokenizer = self.get_translation_components(target_language)
        
        lang_codes = {
            '英语': 'eng_Latn',
            '马来语': 'zsm_Latn', 
            '泰语': 'tha_Thai'
        }
        tgt_lang_code = lang_codes[target_language]
        
        # 预处理
        valid_texts = []
        text_map = {}
        
        for i, text in enumerate(texts):
            if text and text.strip():
                clean_text = text.strip()
                valid_texts.append(clean_text)
                text_map[len(valid_texts)-1] = i
        
        if not valid_texts:
            return [""] * len(texts)
        
        try:
            # 批量编码
            inputs = tokenizer(
                valid_texts,
                return_tensors="pt",
                padding=True,
                truncation=True,
                max_length=256
            ).to(model.device)
            
            # 设置目标语言
            forced_bos_token_id = tokenizer.convert_tokens_to_ids(tgt_lang_code)
            
            # 批量生成 - 使用优化参数
            with torch.no_grad():
                outputs = model.generate(
                    **inputs,
                    forced_bos_token_id=forced_bos_token_id,
                    max_length=self.config["max_length"],
                    num_beams=self.config["num_beams"],
                    length_penalty=self.config["length_penalty"],
                    early_stopping=self.config["early_stopping"],
                    do_sample=False,  # 禁用采样提升速度
                    pad_token_id=tokenizer.pad_token_id,
                    use_cache=True
                )
            
            # 批量解码
            translations = tokenizer.batch_decode(outputs, skip_special_tokens=True)
            
            # 映射回原始位置
            results = [""] * len(texts)
            for i, translation in enumerate(translations):
                if i in text_map:
                    original_idx = text_map[i]
                    results[original_idx] = translation.strip()
            
            return results
            
        except Exception as e:
            logger.error(f"批量翻译失败: {e}")
            return [""] * len(texts)
    
    def process_translation_by_language_optimized(self, data):
        """按语言优化翻译"""
        logger.info("开始按语言批量翻译...")
        
        # 按语言分组
        lang_groups = {'英语': [], '马来语': [], '泰语': []}
        lang_indices = {'英语': [], '马来语': [], '泰语': []}
        
        for i, (text, lang) in enumerate(zip(data['语音识别结果'], data['语言'])):
            if lang in lang_groups:
                lang_groups[lang].append(text)
                lang_indices[lang].append(i)
        
        # 初始化结果
        results = [""] * len(data)
        
        # 对每种语言进行大批量翻译
        for lang in ['英语', '马来语', '泰语']:
            if lang_groups[lang]:
                logger.info(f"翻译{len(lang_groups[lang])}个{lang}文本")
                
                texts = lang_groups[lang]
                indices = lang_indices[lang]
                batch_size = self.config["translation_batch_size"]
                
                # 分批翻译
                for i in tqdm(range(0, len(texts), batch_size), desc=f"翻译{lang}"):
                    batch_texts = texts[i:i+batch_size]
                    batch_indices = indices[i:i+batch_size]
                    
                    translations = self.translate_batch_fast(batch_texts, lang)
                    
                    # 填充结果
                    for j, translation in enumerate(translations):
                        if j < len(batch_indices):
                            results[batch_indices[j]] = translation
                
                # 清理内存
                if torch.cuda.is_available():
                    torch.cuda.empty_cache()
        
        return results
    
    def process_dataset(self, csv_file, output_file):
        """优化的数据集处理"""
        logger.info(f"开始处理数据集: {csv_file}")
        
        # 读取数据
        csv_path = self.csv_dir + csv_file
        data = pd.read_csv(csv_path)
        logger.info(f"数据集大小: {len(data)}")
        
        # 缓存设置
        cache_dir = Path("cache")
        cache_dir.mkdir(exist_ok=True)
        asr_cache_path = cache_dir / f"asr_results_{Path(csv_file).stem}.json"
        
        # ASR处理
        if self.config["use_cache"] and asr_cache_path.exists():
            logger.info("加载ASR缓存...")
            with open(asr_cache_path, 'r', encoding='utf-8') as f:
                asr_results = json.load(f)
        else:
            asr_results = self.process_asr_optimized(data)
            with open(asr_cache_path, 'w', encoding='utf-8') as f:
                json.dump(asr_results, f, ensure_ascii=False, indent=2)
        
        data['语音识别结果'] = asr_results
        
        # 翻译处理
        translation_results = self.process_translation_by_language_optimized(data)
        data['answer'] = translation_results
        
        # 保存结果
        data.to_csv(output_file, index=False)
        logger.info(f"结果已保存到: {output_file}")
        
        # 统计
        asr_success = len([r for r in asr_results if r])
        trans_success = len(data[data['answer'].str.strip() != ''])
        logger.info(f"ASR成功率: {asr_success}/{len(data)} ({asr_success/len(data)*100:.1f}%)")
        logger.info(f"翻译成功率: {trans_success}/{len(data)} ({trans_success/len(data)*100:.1f}%)")
        
        return data

# 稳定优化配置
STABLE_CONFIG = {
    "asr_model": "iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
    "asr_model_revision": "v2.0.4", 
    "translation_model": "facebook/nllb-200-3.3B",
    "asr_batch_size": 1,
    "translation_batch_size": 24,
    "max_length": 200,
    "num_beams": 3,
    "length_penalty": 1.0,
    "early_stopping": True,
    "device": "cuda:0",
    "torch_dtype": "float16",
    "use_cache": True
}

def main():
    with open('config_stable.json', 'w', encoding='utf-8') as f:
        json.dump(STABLE_CONFIG, f, ensure_ascii=False, indent=2)
    
    system = StableOptimizedTranslationSystem("config_stable.json")
    
    datasets = [
        ('testa.csv', 'testa_predict_stable.csv'),
        # ('testb.csv', 'testb_predict_stable.csv')
    ]
    
    for csv_file, output_file in datasets:
        try:
            logger.info(f"开始处理 {csv_file}")
            start_time = pd.Timestamp.now()
            
            results = system.process_dataset(csv_file, output_file)
            
            end_time = pd.Timestamp.now()
            duration = (end_time - start_time).total_seconds()
            
            logger.info(f"{csv_file} 处理完成！")
            logger.info(f"处理时间: {duration/60:.1f}分钟")
            logger.info(f"平均每条: {duration/len(results):.2f}秒")
            
        except Exception as e:
            logger.error(f"处理 {csv_file} 失败: {e}")
            import traceback
            traceback.print_exc()

if __name__ == "__main__":
    main()