import os
import logging
import concurrent.futures
from typing import List, Dict, Any, Optional
from pathlib import Path
from external.llm_service import LlmService
import json

def translate_sentence_batch(sentences_and_words: List[Dict[str, str]], 
                           max_workers: int = 5, 
                           timeout: int = 30) -> List[Dict[str, Any]]:
    """
    使用多线程并发翻译多个句子和单词
    
    参数:
        sentences_and_words: 包含句子和单词的字典列表，每个字典格式为 {"sentence": "英文句子", "word": "单词", ...}
        max_workers: 最大并发线程数
        timeout: 每个翻译任务的超时时间（秒）
    返回:
        List[Dict]: 翻译结果列表，每个元素是输入字典加上翻译结果
    """
    # 初始化LLM服务
    llm_service = LlmService(model_name="qwen3-30b-a3b-instruct-2507")
    if not llm_service.available:
        logging.warning("LLM服务不可用，无法进行翻译")
        return []
    
    # 读取系统提示词
    system_prompt = None
    system_prompt_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'prompt', 'sentence_translate.txt')

    if system_prompt_path and os.path.exists(system_prompt_path):
        with open(system_prompt_path, 'r', encoding='utf-8') as f:
            system_prompt = f.read().strip()
    
    # 定义单个翻译任务
    def translate_task(item: Dict[str, str]) -> Dict[str, Any]:
        sentence = item["sentence"]
        word = item["word"]
        
        try:
            # 构建用户提示词
            user_prompt = f"word: \"{word}\"\nsentence: \"{sentence}\""
            
            # 调用OpenAI API
            messages = [
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": user_prompt}
            ]
            for i in range(3):
                response = llm_service.llm_chat_json(messages)
                if response and "word_meaning" in response and "sentence_zh" in response:
                    if response['word_meaning'].strip() in response['sentence_zh']:
                        item['word_meaning'] = response['word_meaning']
                        item['sentence_zh'] = response['sentence_zh']
                        return item
                    else:
                        logging.error(f"第{i}次翻译失败: {sentence}:{response['sentence_zh']}\n{word}:{response['word_meaning']}")
                        messages.append({
                            "role": "assistant", 
                            "content": json.dumps(response)
                            })
                        messages.append({
                            "role": "user", 
                            "content": f"'{response['word_meaning']}' 不在句子 '{response['sentence_zh']}'中，在句子中找到关键词语"
                            })
                else:
                    logging.error(f"第{i}次翻译失败: {sentence}, {word}")
            return None
        except Exception as e:
            logging.error(f"翻译过程中发生错误: {str(e)}")
            return None
    
    # 使用线程池执行并发翻译
    results = []
    with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
        # 提交所有翻译任务
        future_to_item = {executor.submit(translate_task, item): item for item in sentences_and_words}
        
        # 收集结果
        for future in concurrent.futures.as_completed(future_to_item, timeout=timeout):
            item = future_to_item[future]
            try:
                result = future.result()
                if result:
                    results.append(result)
            except Exception as e:
                logging.error(f"处理翻译结果时发生错误: {str(e)}")
    
    return results

# 兼容原有的单句翻译函数
def translate_with_llm(sentence: str, word: str) -> Optional[Dict[str, str]]:
    """
    使用LLM翻译句子并解释单词在句子中的含义（单句版本，兼容原有代码）
    
    参数:
        sentence: 英文句子
        word: 需要解释的单词
    返回:
        dict: 包含翻译结果的字典，格式为 {"word_meaning": "词义", "sentence_zh": "中文翻译"}
        如果翻译失败，返回 None
    """
    # 初始化返回结果
    default_result = {"word_meaning": "", "sentence_zh": ""}
    
    try:
        # 初始化OpenAI服务
        llm_service = LlmService()
        if not llm_service.available:
            logging.warning("LLM服务不可用，无法进行翻译")
            return default_result
        
        # 读取系统提示词
        system_prompt = None
        system_prompt_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'prompt', 'sentence_translate.txt')

        if system_prompt_path and os.path.exists(system_prompt_path):
            with open(system_prompt_path, 'r', encoding='utf-8') as f:
                system_prompt = f.read().strip()
        
        user_prompt = f"word: \"{word}\"\nsentence: \"{sentence}\""
        
        # 调用OpenAI API
        messages = [
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": user_prompt}
            ]
        for i in range(3):
            response = llm_service.llm_chat_json(messages)
            
            if response and "word_meaning" in response and "sentence_zh" in response:
                if response['word_meaning'].strip() in response['sentence_zh']:
                    return response
                else:
                    logging.error(f"第{i}次翻译失败: {sentence}:{response['sentence_zh']}\n{word}:{response['word_meaning']}")
                    messages.append({
                        "role": "assistant", 
                        "content": json.dumps(response)
                        })
                    messages.append({
                        "role": "user", 
                        "content": f"{response['word_meaning']} 不在句子 {response['sentence_zh']}中"
                        })
            else:
                logging.error(f"第{i}次翻译失败: {sentence}, {word}")
        return None
    except Exception as e:
        logging.error(f"翻译过程中发生错误: {str(e)}")
        return None