import torch, os, sys, argparse, re
from pathlib import Path

# 配置 huggingface_hub 环境变量
os.environ['HF_ENDPOINT']='https://hf-mirror.com'

from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

class NLLBTranslator:
    def __init__(self, model_name="facebook/nllb-200-distilled-600M", cache_dir: str = None, use_proxy=False):
        """初始化NLLB翻译器"""
        print("正在加载模型和分词器...")
        
        # 检查本地模型是否存在
        local_model_exists = self._check_local_model_exists(model_name, cache_dir)
        
        if local_model_exists:
            print("检测到本地模型，直接加载...")
            # 使用本地模型，不进行网络连接
            self.tokenizer = AutoTokenizer.from_pretrained(
                model_name, 
                cache_dir=cache_dir,
                local_files_only=True
            )
            self.model = AutoModelForSeq2SeqLM.from_pretrained(
                model_name, 
                cache_dir=cache_dir,
                local_files_only=True
            )
        else:
            print("本地模型不存在，从网络下载...")
            if use_proxy:
                print("使用代理下载模型...")
            
            # 从网络下载模型
            self.tokenizer = AutoTokenizer.from_pretrained(
                model_name, 
                cache_dir=cache_dir,
                local_files_only=False  # 允许网络连接
            )
            self.model = AutoModelForSeq2SeqLM.from_pretrained(
                model_name, 
                cache_dir=cache_dir,
                local_files_only=False  # 允许网络连接
            )
        
        # 设置设备
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = self.model.to(self.device)
        print(f"模型已加载到设备: {self.device}")
        
        # FLORES-200 语言代码映射
        self.language_codes = {
            'ace_Arab': 'Acehnese (Arabic script)',
            'ace_Latn': 'Acehnese (Latin script)',
            'acm_Arab': 'Mesopotamian Arabic',
            'acq_Arab': 'Ta\'izzi-Adeni Arabic',
            'aeb_Arab': 'Tunisian Arabic',
            'afr_Latn': 'Afrikaans',
            'ajp_Arab': 'South Levantine Arabic',
            'aka_Latn': 'Akan',
            'amh_Ethi': 'Amharic',
            'apc_Arab': 'North Levantine Arabic',
            'arb_Arab': 'Modern Standard Arabic',
            'ars_Arab': 'Najdi Arabic',
            'ary_Arab': 'Moroccan Arabic',
            'arz_Arab': 'Egyptian Arabic',
            'asm_Beng': 'Assamese',
            'ast_Latn': 'Asturian',
            'awa_Deva': 'Awadhi',
            'ayr_Latn': 'Central Aymara',
            'azb_Arab': 'South Azerbaijani',
            'azj_Latn': 'North Azerbaijani',
            'bak_Cyrl': 'Bashkir',
            'bam_Latn': 'Bambara',
            'ban_Latn': 'Balinese',
            'bel_Cyrl': 'Belarusian',
            'bem_Latn': 'Bemba',
            'ben_Beng': 'Bengali',
            'bho_Deva': 'Bhojpuri',
            'bjn_Arab': 'Banjar (Arabic script)',
            'bjn_Latn': 'Banjar (Latin script)',
            'bod_Tibt': 'Tibetan',
            'bos_Latn': 'Bosnian',
            'bug_Latn': 'Buginese',
            'bul_Cyrl': 'Bulgarian',
            'cat_Latn': 'Catalan',
            'ceb_Latn': 'Cebuano',
            'ces_Latn': 'Czech',
            'cjk_Latn': 'Chokwe',
            'ckb_Arab': 'Central Kurdish',
            'crh_Latn': 'Crimean Tatar',
            'cym_Latn': 'Welsh',
            'dan_Latn': 'Danish',
            'deu_Latn': 'German',
            'dik_Latn': 'Southwestern Dinka',
            'dyu_Latn': 'Dyula',
            'dzo_Tibt': 'Dzongkha',
            'ell_Grek': 'Greek',
            'eng_Latn': 'English',
            'epo_Latn': 'Esperanto',
            'est_Latn': 'Estonian',
            'eus_Latn': 'Basque',
            'ewe_Latn': 'Ewe',
            'fao_Latn': 'Faroese',
            'fij_Latn': 'Fijian',
            'fin_Latn': 'Finnish',
            'fon_Latn': 'Fon',
            'fra_Latn': 'French',
            'fur_Latn': 'Friulian',
            'fuv_Latn': 'Nigerian Fulfulde',
            'gla_Latn': 'Scottish Gaelic',
            'gle_Latn': 'Irish',
            'glg_Latn': 'Galician',
            'grn_Latn': 'Guarani',
            'guj_Gujr': 'Gujarati',
            'hat_Latn': 'Haitian Creole',
            'hau_Latn': 'Hausa',
            'heb_Hebr': 'Hebrew',
            'hin_Deva': 'Hindi',
            'hne_Deva': 'Chhattisgarhi',
            'hrv_Latn': 'Croatian',
            'hun_Latn': 'Hungarian',
            'hye_Armn': 'Armenian',
            'ibo_Latn': 'Igbo',
            'ilo_Latn': 'Ilocano',
            'ind_Latn': 'Indonesian',
            'isl_Latn': 'Icelandic',
            'ita_Latn': 'Italian',
            'jav_Latn': 'Javanese',
            'jpn_Jpan': 'Japanese',
            'kab_Latn': 'Kabyle',
            'kac_Latn': 'Jingpho',
            'kam_Latn': 'Kamba',
            'kan_Knda': 'Kannada',
            'kas_Arab': 'Kashmiri (Arabic script)',
            'kas_Deva': 'Kashmiri (Devanagari script)',
            'kat_Geor': 'Georgian',
            'kaz_Cyrl': 'Kazakh',
            'kbp_Latn': 'Kabiyè',
            'kea_Latn': 'Kabuverdianu',
            'khm_Khmr': 'Khmer',
            'kik_Latn': 'Kikuyu',
            'kin_Latn': 'Kinyarwanda',
            'kir_Cyrl': 'Kyrgyz',
            'kmb_Latn': 'Kimbundu',
            'kon_Latn': 'Kongo',
            'kor_Hang': 'Korean',
            'kmr_Latn': 'Northern Kurdish',
            'lao_Laoo': 'Lao',
            'lij_Latn': 'Ligurian',
            'lim_Latn': 'Limburgish',
            'lin_Latn': 'Lingala',
            'lit_Latn': 'Lithuanian',
            'lmo_Latn': 'Lombard',
            'ltg_Latn': 'Latgalian',
            'ltz_Latn': 'Luxembourgish',
            'lua_Latn': 'Luba-Kasai',
            'lug_Latn': 'Ganda',
            'luo_Latn': 'Luo',
            'lus_Latn': 'Mizo',
            'lvs_Latn': 'Standard Latvian',
            'mag_Deva': 'Magahi',
            'mai_Deva': 'Maithili',
            'mal_Mlym': 'Malayalam',
            'mar_Deva': 'Marathi',
            'min_Latn': 'Minangkabau',
            'mkd_Cyrl': 'Macedonian',
            'plt_Latn': 'Plateau Malagasy',
            'mlt_Latn': 'Maltese',
            'mni_Beng': 'Meitei (Bengali script)',
            'khk_Cyrl': 'Halh Mongolian',
            'mos_Latn': 'Mossi',
            'mri_Latn': 'Maori',
            'mya_Mymr': 'Burmese',
            'nld_Latn': 'Dutch',
            'nno_Latn': 'Norwegian Nynorsk',
            'nob_Latn': 'Norwegian Bokmål',
            'npi_Deva': 'Nepali',
            'nso_Latn': 'Northern Sotho',
            'nus_Latn': 'Nuer',
            'nya_Latn': 'Nyanja',
            'oci_Latn': 'Occitan',
            'gaz_Latn': 'West Central Oromo',
            'ory_Orya': 'Odia',
            'pag_Latn': 'Pangasinan',
            'pan_Guru': 'Eastern Panjabi',
            'pap_Latn': 'Papiamento',
            'pes_Arab': 'Western Persian',
            'pol_Latn': 'Polish',
            'por_Latn': 'Portuguese',
            'prs_Arab': 'Dari',
            'pbt_Arab': 'Southern Pashto',
            'quy_Latn': 'Ayacucho Quechua',
            'ron_Latn': 'Romanian',
            'run_Latn': 'Rundi',
            'rus_Cyrl': 'Russian',
            'sag_Latn': 'Sango',
            'san_Deva': 'Sanskrit',
            'sat_Olck': 'Santali',
            'scn_Latn': 'Sicilian',
            'shn_Mymr': 'Shan',
            'sin_Sinh': 'Sinhala',
            'slk_Latn': 'Slovak',
            'slv_Latn': 'Slovenian',
            'smo_Latn': 'Samoan',
            'sna_Latn': 'Shona',
            'snd_Arab': 'Sindhi',
            'som_Latn': 'Somali',
            'sot_Latn': 'Southern Sotho',
            'spa_Latn': 'Spanish',
            'als_Latn': 'Tosk Albanian',
            'srd_Latn': 'Sardinian',
            'srp_Cyrl': 'Serbian',
            'ssw_Latn': 'Swati',
            'sun_Latn': 'Sundanese',
            'swe_Latn': 'Swedish',
            'swh_Latn': 'Swahili',
            'szl_Latn': 'Silesian',
            'tam_Taml': 'Tamil',
            'tat_Cyrl': 'Tatar',
            'tel_Telu': 'Telugu',
            'tgk_Cyrl': 'Tajik',
            'tgl_Latn': 'Tagalog',
            'tha_Thai': 'Thai',
            'tir_Ethi': 'Tigrinya',
            'taq_Latn': 'Tamasheq',
            'taq_Tfng': 'Tamasheq (Tifinagh script)',
            'tpi_Latn': 'Tok Pisin',
            'tsn_Latn': 'Tswana',
            'tso_Latn': 'Tsonga',
            'tuk_Latn': 'Turkmen',
            'tum_Latn': 'Tumbuka',
            'tur_Latn': 'Turkish',
            'twi_Latn': 'Twi',
            'tzm_Tfng': 'Central Atlas Tamazight',
            'uig_Arab': 'Uyghur',
            'ukr_Cyrl': 'Ukrainian',
            'umb_Latn': 'Umbundu',
            'urd_Arab': 'Urdu',
            'uzn_Latn': 'Northern Uzbek',
            'vec_Latn': 'Venetian',
            'vie_Latn': 'Vietnamese',
            'war_Latn': 'Waray',
            'wol_Latn': 'Wolof',
            'xho_Latn': 'Xhosa',
            'ydd_Hebr': 'Eastern Yiddish',
            'yor_Latn': 'Yoruba',
            'yue_Hant': 'Yue Chinese',
            'zho_Hans': 'Chinese (Simplified)',
            'zho_Hant': 'Chinese (Traditional)',
            'zsm_Latn': 'Standard Malay',
            'zul_Latn': 'Zulu'
        }
        
        # 创建反向映射，方便用户使用语言名称
        self.name_to_code = {}
        for code, name in self.language_codes.items():
            # 提取主要语言名称作为键
            main_name = name.split('(')[0].strip().lower()
            self.name_to_code[main_name] = code
            # 添加完整名称
            self.name_to_code[name.lower()] = code

    def _check_local_model_exists(self, model_name, cache_dir):
        """检查本地模型是否存在"""
        try:
            # 尝试使用local_files_only=True加载tokenizer，如果成功则说明模型存在
            _ = AutoTokenizer.from_pretrained(
                model_name, 
                cache_dir=cache_dir,
                local_files_only=True
            )
            return True
        except:
            return False

    def get_lang_code(self, language):
        """获取语言代码"""
        lang_lower = language.lower().strip()
        
        # 首先检查是否是直接的语言代码
        if lang_lower in self.language_codes:
            return lang_lower
            
        # 检查是否是语言名称
        if lang_lower in self.name_to_code:
            return self.name_to_code[lang_lower]
            
        # 如果都不是，返回原始输入（可能是未知代码）
        return language

    def get_forced_bos_token_id(self, lang_code):
        """获取强制开始token ID"""
        try:
            token_id = self.tokenizer.convert_tokens_to_ids(lang_code)
            # 检查是否是未知token（通常为0, 1, 2, 3）
            if token_id < 100:  # 如果ID太小，可能是未知token
                return None
            return token_id
        except:
            return None

    def translate_text(self, text, source_lang, target_lang, max_length=256, num_beams=5):
        """
        翻译单条文本
        """
        try:
            # 获取语言代码
            src_code = self.get_lang_code(source_lang)
            tgt_code = self.get_lang_code(target_lang)
            
            # 验证语言代码
            if src_code not in self.language_codes:
                raise ValueError(f"不支持的源语言: {source_lang}")
            if tgt_code not in self.language_codes:
                raise ValueError(f"不支持的目标语言: {target_lang}")
            
            # 设置源语言
            self.tokenizer.src_lang = src_code
            
            # 编码输入文本
            inputs = self.tokenizer(
                text, 
                return_tensors="pt", 
                padding=True, 
                truncation=True,
                max_length=128
            ).to(self.device)
            
            # 获取目标语言token ID
            forced_bos_token_id = self.get_forced_bos_token_id(tgt_code)
            
            # 生成翻译
            generate_kwargs = {
                **inputs,
                'max_length': max_length,
                'num_beams': num_beams,
                'early_stopping': True
            }
            
            if forced_bos_token_id is not None:
                generate_kwargs['forced_bos_token_id'] = forced_bos_token_id
            
            with torch.no_grad():
                translated_tokens = self.model.generate(**generate_kwargs)
            
            # 解码翻译结果
            translation = self.tokenizer.batch_decode(
                translated_tokens, 
                skip_special_tokens=True
            )[0]
            
            return translation
            
        except Exception as e:
            raise Exception(f"翻译过程中出错: {str(e)}")

    def batch_translate(self, texts, source_lang, target_lang, batch_size=8, max_length=256, num_beams=5):
        """
        批量翻译文本
        """
        src_code = self.get_lang_code(source_lang)
        tgt_code = self.get_lang_code(target_lang)
        
        # 验证语言代码
        if src_code not in self.language_codes:
            raise ValueError(f"不支持的源语言: {source_lang}")
        if tgt_code not in self.language_codes:
            raise ValueError(f"不支持的目标语言: {target_lang}")
        
        # 获取 forced_bos_token_id
        forced_bos_token_id = self.get_forced_bos_token_id(tgt_code)
        
        all_translations = []
        
        for i in range(0, len(texts), batch_size):
            batch_texts = texts[i:i + batch_size]
            
            # 设置源语言并编码
            self.tokenizer.src_lang = src_code
            inputs = self.tokenizer(
                batch_texts, 
                return_tensors="pt", 
                padding=True, 
                truncation=True,
                max_length=128
            ).to(self.device)
            
            # 生成翻译
            generate_kwargs = {
                **inputs,
                'max_length': max_length,
                'num_beams': num_beams,
                'early_stopping': True
            }
            
            if forced_bos_token_id is not None:
                generate_kwargs['forced_bos_token_id'] = forced_bos_token_id
            
            with torch.no_grad():
                translated_tokens = self.model.generate(**generate_kwargs)
            
            # 解码
            translations = self.tokenizer.batch_decode(
                translated_tokens, 
                skip_special_tokens=True
            )
            
            all_translations.extend(translations)
        
        return all_translations

    def list_languages(self):
        """列出所有支持的语言"""
        return self.language_codes


class SRTProcessor:
    """SRT文件处理器"""
    
    @staticmethod
    def parse_srt_file(file_path):
        """
        解析SRT文件，返回字幕块列表
        每个字幕块格式: {'index': 序号, 'timestamp': 时间戳, 'text': 文本}
        """
        with open(file_path, 'r', encoding='utf-8') as f:
            content = f.read()
        
        # 使用正则表达式分割字幕块
        # SRT格式通常为: 序号 + 时间戳 + 文本 + 空行
        pattern = r'(\d+)\s*\n(\d{2}:\d{2}:\d{2},\d{3}\s*-->\s*\d{2}:\d{2}:\d{2},\d{3})\s*\n(.*?)\s*\n\s*\n'
        blocks = re.findall(pattern, content, re.DOTALL)
        
        subtitles = []
        for block in blocks:
            index, timestamp, text = block
            # 清理文本中的多余空白字符
            text = re.sub(r'\s+', ' ', text.strip())
            subtitles.append({
                'index': int(index),
                'timestamp': timestamp,
                'text': text
            })
        
        # 如果没有通过正则匹配到，尝试更简单的分割方法
        if not subtitles:
            subtitles = SRTProcessor._parse_srt_alternative(content)
        
        return subtitles
    
    @staticmethod
    def _parse_srt_alternative(content):
        """备用的SRT解析方法"""
        subtitles = []
        lines = content.split('\n')
        i = 0
        
        while i < len(lines):
            line = lines[i].strip()
            if not line:
                i += 1
                continue
                
            # 尝试解析序号
            try:
                index = int(line)
            except ValueError:
                i += 1
                continue
            
            # 下一行应该是时间戳
            i += 1
            if i >= len(lines):
                break
                
            timestamp = lines[i].strip()
            i += 1
            
            # 收集文本行，直到遇到空行
            text_lines = []
            while i < len(lines) and lines[i].strip():
                text_lines.append(lines[i].strip())
                i += 1
            
            if text_lines:
                subtitles.append({
                    'index': index,
                    'timestamp': timestamp,
                    'text': ' '.join(text_lines)
                })
            
            i += 1  # 跳过空行
        
        return subtitles
    
    @staticmethod
    def write_srt_file(subtitles, output_path):
        """将字幕块写入SRT文件"""
        with open(output_path, 'w', encoding='utf-8') as f:
            for subtitle in subtitles:
                f.write(f"{subtitle['index']}\n")
                f.write(f"{subtitle['timestamp']}\n")
                f.write(f"{subtitle['text']}\n\n")
    
    @staticmethod
    def translate_srt_file(translator, input_file, output_file, source_lang, target_lang, batch_size=8):
        """翻译SRT文件"""
        print(f"正在解析SRT文件: {input_file}")
        subtitles = SRTProcessor.parse_srt_file(input_file)
        
        if not subtitles:
            print("未找到有效的字幕内容")
            return False
        
        print(f"找到 {len(subtitles)} 条字幕")
        
        # 提取所有文本
        texts = [subtitle['text'] for subtitle in subtitles]
        
        # 批量翻译
        print("开始翻译字幕...")
        try:
            translated_texts = translator.batch_translate(
                texts=texts,
                source_lang=source_lang,
                target_lang=target_lang,
                batch_size=batch_size
            )
        except Exception as e:
            print(f"翻译失败: {e}")
            return False
        
        # 更新字幕文本
        for i, subtitle in enumerate(subtitles):
            subtitle['text'] = translated_texts[i]
        
        # 写入输出文件
        SRTProcessor.write_srt_file(subtitles, output_file)
        print(f"翻译完成！输出文件: {output_file}")
        return True


def setup_environment(proxy=None, token=None):
    """设置环境和认证"""
    # 设置代理
    if proxy:
        os.environ['http_proxy'] = proxy
        os.environ['https_proxy'] = proxy
        print(f"已设置代理: {proxy}")
    
    # 设置超时
    os.environ['HF_HUB_DOWNLOAD_TIMEOUT'] = '60'
    
    # 登录Hugging Face
    if token:
        try:
            from huggingface_hub import login
            login(token)
            print("✓ Hugging Face 登录成功")
        except Exception as e:
            print(f"✗ Hugging Face 登录失败: {e}")
            return False
    
    return True


def main():
    parser = argparse.ArgumentParser(description="NLLB-200 翻译器")
    parser.add_argument("-i", "--input", required=True, help="输入文本或文件路径")
    parser.add_argument("-o", "--output", help="输出文件路径（可选）")
    parser.add_argument("-s", "--source_lang", required=True, help="源语言代码或名称")
    parser.add_argument("-t", "--target_lang", required=True, help="目标语言代码或名称")
    parser.add_argument("-m", "--model", help="模型名称")
    parser.add_argument("--model_dir", default="./models", help="模型缓存目录")
    parser.add_argument("--proxy", help="代理地址")
    parser.add_argument("--max_length", type=int, default=256, help="最大生成长度")
    parser.add_argument("--num_beams", type=int, default=5, help="beam search数量")
    parser.add_argument("--batch_size", type=int, default=8, help="批量翻译大小")
    
    args = parser.parse_args()

    # 模型名称
    model_name = args.model or "facebook/nllb-200-distilled-600M"
    
    # 设置环境（如果需要代理下载的话）
    setup_environment(args.proxy)
    
    # 加载模型
    try:
        print("正在初始化翻译器...")
        translator = NLLBTranslator(
            model_name,
            cache_dir=args.model_dir, 
            use_proxy=bool(args.proxy)
        )
        print("翻译器初始化完成！")
    except Exception as e:
        print(f"翻译器初始化失败: {e}")
        return 1
    
    # 检查是否是SRT文件
    input_path = Path(args.input)
    is_srt_file = input_path.is_file() and input_path.suffix.lower() == '.srt'
    
    if is_srt_file:
        # SRT文件翻译模式
        if not args.output:
            # 如果没有指定输出文件，自动生成
            output_path = input_path.parent / f"{input_path.stem}_{args.target_lang}{input_path.suffix}"
        else:
            output_path = Path(args.output)
        
        success = SRTProcessor.translate_srt_file(
            translator=translator,
            input_file=args.input,
            output_file=str(output_path),
            source_lang=args.source_lang,
            target_lang=args.target_lang,
            batch_size=args.batch_size
        )
        
        return 0 if success else 1
    
    else:
        # 普通文本翻译模式
        input_texts = []
        if os.path.isfile(args.input):
            # 从文件读取
            try:
                with open(args.input, 'r', encoding='utf-8') as f:
                    input_texts = [line.strip() for line in f if line.strip()]
                print(f"从文件读取 {len(input_texts)} 行文本")
            except Exception as e:
                print(f"读取输入文件失败: {e}")
                return 1
        else:
            # 直接使用输入文本
            input_texts = [args.input]
        
        # 执行翻译
        try:
            if len(input_texts) == 1:
                # 单句翻译
                result = translator.translate_text(
                    text=input_texts[0],
                    source_lang=args.source_lang,
                    target_lang=args.target_lang,
                    max_length=args.max_length,
                    num_beams=args.num_beams
                )
                translations = [result]
            else:
                # 批量翻译
                translations = translator.batch_translate(
                    texts=input_texts,
                    source_lang=args.source_lang,
                    target_lang=args.target_lang,
                    batch_size=args.batch_size,
                    max_length=args.max_length,
                    num_beams=args.num_beams
                )
            
            # 输出结果
            if args.output:
                # 写入文件
                with open(args.output, 'w', encoding='utf-8') as f:
                    for translation in translations:
                        f.write(translation + '\n')
                print(f"翻译结果已保存到: {args.output}")
            else:
                # 输出到控制台
                for i, translation in enumerate(translations):
                    if len(input_texts) > 1:
                        print(f"{i+1}. {translation}")
                    else:
                        print(translation)
            
            print("翻译完成！")
            
        except Exception as e:
            print(f"翻译失败: {e}")
            return 1
        
        return 0


if __name__ == "__main__":
    sys.exit(main())