from fastapi import APIRouter, HTTPException, Request
from pydantic import BaseModel, validator
import model
import json
import re

router = APIRouter()

class T2TTRequest(BaseModel):
    text: str
    src_lang: str = "cmn"  # Default to Chinese
    tgt_lang: str = "eng"  # Default to English

    @validator('text')
    def validate_text(cls, v):
        if not v or not v.strip():
            raise ValueError('Text cannot be empty')

        # Enhanced text cleaning with intelligent line merging
        cleaned_text = cls._smart_text_processing(v)

        if not cleaned_text:
            raise ValueError('Text contains only invalid characters')

        return cleaned_text

    @classmethod
    def _smart_text_processing(cls, text):
        """
        智能文本处理：
        1. 处理以 - 结尾的行（连字符换行）
        2. 清理特殊字符
        3. 规范化空白字符
        """
        # 首先处理不同类型的换行符，统一为 \n
        text = text.replace('\r\n', '\n').replace('\r', '\n')

        # 分割成行进行处理
        lines = text.split('\n')
        processed_lines = []

        i = 0
        while i < len(lines):
            current_line = lines[i].strip()

            # 如果当前行以 - 结尾，则与下一行合并
            if current_line.endswith('-') and i + 1 < len(lines):
                # 移除末尾的 -
                current_line = current_line[:-1]
                next_line = lines[i + 1].strip()

                # 合并两行
                merged_line = current_line + next_line
                processed_lines.append(merged_line)
                i += 2  # 跳过下一行，因为已经合并了
            else:
                processed_lines.append(current_line)
                i += 1

        # 将处理后的行重新合并为单个文本
        cleaned_text = ' '.join(processed_lines)

        # 替换制表符
        cleaned_text = re.sub(r'\t+', ' ', cleaned_text)

        # 移除有害的控制字符
        cleaned_text = re.sub(r'[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]', '', cleaned_text)

        # 规范化多个空格为单个空格
        cleaned_text = re.sub(r'\s+', ' ', cleaned_text)

        # 清理首尾空白
        cleaned_text = cleaned_text.strip()

        return cleaned_text

    @classmethod
    def _preprocess_json_string(cls, json_str):
        """
        预处理 JSON 字符串，智能处理多行文本中的特殊字符
        """
        try:
            # 尝试解析 JSON 以找到 text 字段
            import json
            import re

            # 使用正则表达式找到 "text" 字段的值
            # 这个方法可以处理包含换行符的 JSON
            text_pattern = r'"text"\s*:\s*"([^"]*(?:\\.[^"]*)*)"'
            match = re.search(text_pattern, json_str, re.DOTALL)

            if match:
                original_text = match.group(1)

                # 智能处理文本中的换行符和连字符
                # 处理以 - 结尾的行
                processed_text = cls._process_hyphenated_lines(original_text)

                # 转义 JSON 中的特殊字符
                escaped_text = processed_text.replace('\\', '\\\\').replace('"', '\\"').replace('\n', '\\n').replace('\r', '\\r').replace('\t', '\\t')

                # 替换原始文本
                processed_json = json_str.replace(original_text, escaped_text)
                return processed_json

            return json_str

        except Exception:
            # 如果预处理失败，返回原始字符串
            return json_str

    @classmethod
    def _process_hyphenated_lines(cls, text):
        """
        处理以连字符结尾的行，将它们与下一行合并
        """
        # 处理 JSON 转义的换行符
        if '\\n' in text:
            # 这是 JSON 字符串中的转义换行符
            text = text.replace('\\r\\n', '\n').replace('\\n', '\n').replace('\\r', '\n')

        # 分割成行
        lines = text.split('\n')
        processed_lines = []

        i = 0
        while i < len(lines):
            current_line = lines[i].rstrip()

            # 如果当前行以 - 结尾且不是最后一行
            if current_line.endswith('-') and i + 1 < len(lines):
                # 移除连字符
                current_line = current_line[:-1]
                next_line = lines[i + 1].lstrip()

                # 合并行
                merged_line = current_line + next_line
                processed_lines.append(merged_line)
                i += 2  # 跳过下一行
            else:
                processed_lines.append(current_line)
                i += 1

        # 重新合并为单行文本，用空格分隔
        return ' '.join(line.strip() for line in processed_lines if line.strip())

    @validator('src_lang', 'tgt_lang')
    def validate_lang_codes(cls, v):
        if not v or not isinstance(v, str):
            raise ValueError('Language code must be a non-empty string')
        return v.lower().strip()

@router.post("/translate")
async def text_to_text_translation(raw_request: Request):
    """
    Enhanced Text-to-Text Translation endpoint

    Translates text from one language to another using SeamlessM4T v2.
    Now supports long text with special characters (line breaks, tabs, etc.)
    """
    try:
        # Get raw request body
        body = await raw_request.body()

        if not body:
            raise HTTPException(status_code=400, detail="Request body is empty")

        # Try to parse JSON with better error handling
        try:
            # Decode bytes to string
            body_str = body.decode('utf-8')

            # Pre-process the JSON string to handle common issues
            # This helps with multiline strings in JSON
            processed_body = T2TTRequest._preprocess_json_string(body_str)
            data = json.loads(processed_body)

        except json.JSONDecodeError as e:
            # If JSON parsing fails, try to provide helpful error message
            raise HTTPException(
                status_code=422,
                detail=f"Invalid JSON format. Please ensure your JSON is properly formatted. "
                       f"For multiline text, make sure to escape line breaks or use a single line. "
                       f"Error: {str(e)}"
            )
        except UnicodeDecodeError as e:
            raise HTTPException(
                status_code=422,
                detail=f"Invalid character encoding. Please use UTF-8 encoding. Error: {str(e)}"
            )

        # Validate required fields
        if 'text' not in data:
            raise HTTPException(status_code=400, detail="Missing required field: 'text'")

        # Get values with defaults
        text = data.get('text', '')
        src_lang = data.get('src_lang', 'cmn')
        tgt_lang = data.get('tgt_lang', 'eng')

        # Create and validate the request object
        # The T2TTRequest validator will clean the text automatically
        try:
            request = T2TTRequest(
                text=text,
                src_lang=src_lang,
                tgt_lang=tgt_lang
            )
        except ValueError as e:
            raise HTTPException(status_code=400, detail=str(e))

        # Continue with the original translation logic
        return await _perform_translation(request)

    except HTTPException:
        # Re-raise HTTP exceptions as-is
        raise
    except Exception as e:
        # Log the full error for debugging
        import traceback
        error_details = f"Error in T2TT translation: {str(e)}\nTraceback: {traceback.format_exc()}"
        print(error_details)
        raise HTTPException(status_code=500, detail=f"Translation failed: {str(e)}")

async def _perform_translation(request: T2TTRequest):
    """
    Internal function to perform the actual translation
    """
    # Validate input (should already be validated by T2TTRequest, but double-check)
    if not request.text:
        raise HTTPException(status_code=400, detail="Text input is required")
    if not request.src_lang:
        raise HTTPException(status_code=400, detail="Source language is required")
    if not request.tgt_lang:
        raise HTTPException(status_code=400, detail="Target language is required")

    # Get current model and processor instances
    current_model = model.model
    current_processor = model.processor

    # Check if model and processor are loaded
    if current_model is None or current_processor is None:
        raise HTTPException(status_code=503, detail="Model not initialized")

    # Process text input for translation
    text_inputs = current_processor(text=request.text, src_lang=request.src_lang, return_tensors="pt")

    # Move input tensors to the same device as the model
    device = next(current_model.parameters()).device
    text_inputs = {k: v.to(device) if hasattr(v, 'to') else v for k, v in text_inputs.items()}

    # Generate translation (text output only)
    output_tokens = current_model.generate(**text_inputs, tgt_lang=request.tgt_lang, generate_speech=False)

    # Decode the output tokens to text
    # According to the official docs, we need to call .tolist()[0] on the output tokens
    translated_text = current_processor.decode(output_tokens[0].tolist()[0], skip_special_tokens=True)

    return {
        "original_text": request.text,
        "translated_text": translated_text,
        "src_lang": request.src_lang,
        "tgt_lang": request.tgt_lang
    }

