"""Translation module implementation"""

from typing import Dict, Optional
import asyncio
import time
import torch
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, MBartForConditionalGeneration
from modules.base.base_model import BaseModel
from modules.base.exceptions import ModelLoadError, InvalidInputError
from utils.validators import validate_text_input
import re
from utils.db_manager import DBManager
from utils.term_processor import TermProcessor

class TranslationModel(BaseModel):
    """Translation model implementation using Hugging Face models"""
    
    def __init__(self, model_name: str, config: Optional[Dict] = None):
        super().__init__(model_name, config)
        self.source_lang = config.get("source_lang", "en")
        self.target_lang = config.get("target_lang", "zh")
        self.tokenizer = None
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        self.max_retries = config.get("max_retries", 3)
        self.retry_delay = config.get("retry_delay", 5)
        
        # 使用术语处理器
        self.term_processor = TermProcessor(
            self.source_lang,
            self.target_lang
        )
        
        # 更新默认模型配置
        self.default_models = {
            ("en", "zh"): "Helsinki-NLP/opus-mt-en-zh",
            ("zh", "en"): "Helsinki-NLP/opus-mt-zh-en",
            ("en", "fr"): "Helsinki-NLP/opus-mt-en-fr",
            ("fr", "en"): "Helsinki-NLP/opus-mt-fr-en"
        }
        
        # 为不同模型设置语言代码
        self.model_lang_codes = {
            "facebook/nllb-200-distilled-600M": {
                "en": "eng_Latn",
                "zh": "zho_Hans",
                "fr": "fra_Latn",
                "de": "deu_Latn"
            }
        }
        
    async def load(self) -> None:
        """Load the translation model with retry mechanism"""
        for attempt in range(self.max_retries):
            try:
                self.logger.info(f"Loading translation model {self.model_name} on {self.device} (Attempt {attempt + 1})")
                
                # 尝试加载模型，强制使用非 safetensors 格式
                self.tokenizer = AutoTokenizer.from_pretrained(
                    self.model_name,
                    use_fast=True,
                    from_tf=False
                )
                
                # 根据模型类型选择不同的加载方式
                if "mbart" in self.model_name.lower():
                    self.model = MBartForConditionalGeneration.from_pretrained(
                        self.model_name,
                        from_tf=False
                    ).to(self.device)
                else:
                    self.model = AutoModelForSeq2SeqLM.from_pretrained(
                        self.model_name,
                        from_tf=False
                    ).to(self.device)
                
                self.logger.info(f"Successfully loaded translation model: {self.model_name}")
                break
                
            except Exception as e:
                self.logger.error(f"Attempt {attempt + 1} failed: {str(e)}")
                if attempt < self.max_retries - 1:
                    self.logger.info(f"Retrying in {self.retry_delay} seconds...")
                    await asyncio.sleep(self.retry_delay)
                else:
                    raise ModelLoadError(f"Failed to load model after {self.max_retries} attempts: {str(e)}")
            
    async def validate_input(self, input_data: str) -> bool:
        """Validate translation input"""
        try:
            return validate_text_input(
                input_data,
                min_length=1,
                max_length=self.config.get("max_input_length", 512)
            )
        except InvalidInputError as e:
            self.logger.error(f"Input validation failed: {str(e)}")
            raise
        
    async def preprocess(self, input_data: str) -> Dict:
        """Preprocess input text"""
        self.logger.debug(f"Preprocessing input: {input_data[:50]}...")
        
        try:
            # 1. 输入验证和特殊处理
            if not input_data or not input_data.strip():
                raise InvalidInputError("Empty input text")
            
            # 2. 处理数字和特殊字符
            if input_data.isnumeric() or all(not c.isalnum() for c in input_data):
                inputs = self.tokenizer(
                    input_data,
                    return_tensors="pt",
                    padding=True,
                    truncation=True,
                    max_length=self.config.get("max_length", 512)
                )
                return {k: v.to(self.device) for k, v in inputs.items()}
            
            # 3. 处理专业术语
            processed_text = await self._replace_technical_terms(input_data)
            if not processed_text:
                processed_text = input_data
            self.logger.debug(f"After technical terms replacement: {processed_text}")
            
            # 4. 分词和编码
            try:
                inputs = self.tokenizer(
                    processed_text,
                    return_tensors="pt",
                    padding=True,
                    truncation=True,
                    max_length=self.config.get("max_length", 512)
                )
                return {k: v.to(self.device) for k, v in inputs.items()}
                
            except Exception as e:
                self.logger.error(f"Tokenization failed: {str(e)}")
                # 如果分词失败，尝试使用原始文本
                inputs = self.tokenizer(
                    input_data,
                    return_tensors="pt",
                    padding=True,
                    truncation=True,
                    max_length=self.config.get("max_length", 512)
                )
                return {k: v.to(self.device) for k, v in inputs.items()}
                
        except Exception as e:
            self.logger.error(f"Preprocessing failed: {str(e)}")
            raise
        
    def _is_mixed_language(self, text: str) -> bool:
        """检查是否为混合语言文本"""
        has_chinese = bool(re.search(r'[\u4e00-\u9fff]', text))
        has_english = bool(re.search(r'[a-zA-Z]', text))
        return has_chinese and has_english
        
    async def _handle_mixed_language(self, text: str) -> str:
        """处理混合语言文本"""
        if not text:
            return ""
        
        try:
            # 1. 分割文本为语言段
            segments = []
            current_text = ""
            current_type = None
            
            for char in text:
                if '\u4e00' <= char <= '\u9fff':  # 中文字符
                    if current_type != 'zh' and current_text.strip():
                        segments.append((current_type, current_text.strip()))
                        current_text = ""
                    current_type = 'zh'
                elif char.isalpha():  # 英文字符
                    if current_type != 'en' and current_text.strip():
                        segments.append((current_type, current_text.strip()))
                        current_text = ""
                    current_type = 'en'
                else:  # 其他字符（标点等）
                    current_text += char
                    continue
                current_text += char
            
            if current_text.strip():
                segments.append((current_type, current_text.strip()))
            
            # 2. 处理每个语言段
            processed_segments = []
            for lang_type, segment in segments:
                if not segment.strip():
                    continue
                
                # 根据目标语言决定是否翻译
                if self.target_lang == 'en' and lang_type == 'zh':
                    translated = await self._translate_segment(segment, 'zh', 'en')
                    processed_segments.append(translated if translated else segment)
                elif self.target_lang == 'zh' and lang_type == 'en':
                    translated = await self._translate_segment(segment, 'en', 'zh')
                    processed_segments.append(translated if translated else segment)
                else:
                    processed_segments.append(segment)
            
            # 3. 合并结果
            result = " ".join(s for s in processed_segments if s)
            result = re.sub(r'\s+', ' ', result)  # 移除多余空格
            result = re.sub(r'([。！？，、；：])\s+', r'\1', result)  # 修复中文标点
            result = result.strip()
            
            return result
            
        except Exception as e:
            self.logger.error(f"Mixed language processing failed: {str(e)}")
            return text
        
    async def _translate_segment(self, text: str, source_lang: str, target_lang: str) -> str:
        """翻译单个语言段"""
        try:
            if not text or not text.strip():
                return ""
            
            # 先处理专业术语
            processed_text = await self._replace_technical_terms(text)
            if not processed_text:
                return text
            
            # 设置翻译方向
            self.source_lang = source_lang
            self.target_lang = target_lang
            
            # 进行翻译
            inputs = self.tokenizer(
                processed_text,
                return_tensors="pt",
                padding=True,
                truncation=True,
                max_length=self.config.get("max_length", 512)
            )
            
            inputs = {k: v.to(self.device) for k, v in inputs.items()}
            
            outputs = self.model.generate(
                **inputs,
                max_length=self.config.get("max_length", 512),
                num_beams=self.config.get("num_beams", 4),
                early_stopping=True
            )
            
            translation = self.tokenizer.decode(
                outputs[0],
                skip_special_tokens=True,
                clean_up_tokenization_spaces=True
            )
            
            return translation.strip()
            
        except Exception as e:
            self.logger.error(f"Segment translation failed: {str(e)}")
            return text  # 翻译失败时返回原文
        
    async def _replace_technical_terms(self, text: str) -> str:
        """替换专业术语"""
        return self.term_processor.process_text(text)
        
    async def predict(self, input_data: str) -> str:
        """Translate text"""
        self.logger.info(f"Starting translation for input: {input_data[:50]}...")
        
        try:
            # 1. 输入验证
            if not input_data or not input_data.strip():
                self.logger.warning("Empty input text, returning original")
                return input_data
            
            # 2. 检查特殊输入
            if input_data.isnumeric() or all(not c.isalnum() for c in input_data):
                return input_data
            
            # 3. 构建提示词和输入文本
            prompt = self._build_prompt(input_data)
            self.logger.debug(f"Built prompt: {prompt[:100]}...")
            
            # 4. 预处理输入
            try:
                # 注意：这里只使用原始文本，不包含提示词
                inputs = self.tokenizer(
                    input_data,  # 使用原始文本而不是提示词
                    return_tensors="pt",
                    padding=True,
                    truncation=True,
                    max_length=self.config.get("max_length", 512)
                )
                inputs = {k: v.to(self.device) for k, v in inputs.items()}
            except Exception as e:
                self.logger.warning(f"Preprocessing failed: {str(e)}, using original")
                return input_data
            
            # 5. 生成翻译
            try:
                # 设置强制性的语言标记（如果模型支持）
                if hasattr(self.tokenizer, 'lang_code_to_id'):
                    forced_bos_token_id = self.tokenizer.lang_code_to_id.get(
                        f"{self.target_lang}_Latn",
                        None
                    )
                else:
                    forced_bos_token_id = None
                
                outputs = self.model.generate(
                    **inputs,
                    max_length=self.config.get("max_length", 512),
                    num_beams=5,
                    length_penalty=1.0,
                    early_stopping=True,
                    do_sample=True,
                    temperature=0.7,
                    top_k=50,
                    top_p=0.95,
                    repetition_penalty=1.2,
                    no_repeat_ngram_size=3,
                    forced_bos_token_id=forced_bos_token_id
                )
            except Exception as e:
                self.logger.error(f"Generation failed: {str(e)}")
                return input_data
            
            # 6. 解码输出
            try:
                translation = self.tokenizer.decode(
                    outputs[0],
                    skip_special_tokens=True,
                    clean_up_tokenization_spaces=True
                )
            except Exception as e:
                self.logger.error(f"Decoding failed: {str(e)}")
                return input_data
            
            # 7. 后处理
            translation = self._postprocess_translation(translation)
            
            # 8. 验证翻译结果
            if not translation or translation.isspace():
                return input_data
            
            if all(c in '~-' for c in translation):
                return input_data
            
            if len(translation) < len(input_data) * 0.1:
                return input_data
            
            self.logger.info(f"Translation completed. Result: {translation}")
            return translation
            
        except Exception as e:
            self.logger.error(f"Translation failed: {str(e)}", exc_info=True)
            return input_data

    def _build_prompt(self, text: str) -> str:
        """构建翻译提示词（仅用于日志记录）"""
        if self.source_lang == "en" and self.target_lang == "zh":
            return f"[EN->ZH] {text}"
        elif self.source_lang == "zh" and self.target_lang == "en":
            return f"[ZH->EN] {text}"
        else:
            return f"[{self.source_lang}->{self.target_lang}] {text}"

    def _postprocess_translation(self, translation: str) -> str:
        """后处理翻译结果"""
        if not translation or translation.isspace():
            return translation
        
        try:
            # 1. 清理格式
            translation = translation.strip()
            translation = re.sub(r'\s+', ' ', translation)
            
            # 2. 处理中文标点
            if self.target_lang == "zh":
                translation = re.sub(r'([。！？，、；：])\s+', r'\1', translation)
                translation = re.sub(r'\s+([。！？，、；：])', r'\1', translation)
                # 将英文标点替换为中文标点
                punctuation_map = {
                    ',': '，',
                    '.': '。',
                    '?': '？',
                    '!': '！',
                    ':': '：',
                    ';': '；'
                }
                for en_punct, zh_punct in punctuation_map.items():
                    translation = translation.replace(en_punct, zh_punct)
            
            # 3. 处理英文格式
            elif self.target_lang == "en":
                # 确保标点后有空格
                translation = re.sub(r'([.,!?;:])([\w])', r'\1 \2', translation)
                # 修复常见的英文格式问题
                translation = re.sub(r'\s+([.,!?;:])', r'\1', translation)
                translation = re.sub(r'(\w)\s+\'s', r"\1's", translation)
                
            return translation.strip()
            
        except Exception as e:
            self.logger.warning(f"Post-processing failed: {str(e)}")
            return translation
        
    def _convert_number_to_chinese(self, number_str: str) -> str:
        """将阿拉伯数字转换为中文数字"""
        chinese_numbers = {
            '0': '零', '1': '一', '2': '二', '3': '三', '4': '四',
            '5': '五', '6': '六', '7': '七', '8': '八', '9': '九'
        }
        
        # 如果是简单数字，直接转换
        if len(number_str) <= 4:
            return ''.join(chinese_numbers[d] for d in number_str)
        
        # 对于较大的数字，保持原样
        return number_str
        
    def __repr__(self) -> str:
        return f"TranslationModel(model_name='{self.model_name}', source_lang='{self.source_lang}', target_lang='{self.target_lang}')"
