import requests
import logging
import json

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

def filter_articles_with_llm(articles: list, user_interests: str, priority_keywords: list, api_key: str, api_url: str, model_name: str, relevance_threshold: float = 0.7) -> list:
    """
    使用LLM根据用户兴趣筛选文章，支持Gemini和OpenAI兼容的API。

    Args:
        articles: 待筛选的文章列表。
        user_interests: 描述用户兴趣的字符串。
        priority_keywords: 优先关注的关键词列表。
        api_key: API密钥。
        api_url: LLM API的端点URL。
        model_name: (可选) 用于OpenAI兼容API的模型名称。
        relevance_threshold: 相关性阈值，默认0.7，只有relevance_score大于等于此值的文章会被选中。

    Returns:
        一个经过筛选，符合用户兴趣的文章列表。
    """
    if not api_url:
        raise ValueError("LLM API Endpoint未在配置中设置。")
    
    # 对于本地Ollama，API Key可以为空
    if not api_key and "localhost" not in api_url and "127.0.0.1" not in api_url and "192.168." not in api_url:
        raise ValueError("API Key未设置。")

    # 根据URL判断API类型
    api_type = "gemini" if "gemini" in api_url.lower() else "openai"
    logging.info(f"检测到API类型: {api_type}")
    
    selected_articles = []
    chunk_size = 10  # 每次处理10篇文章，防止超出上下文长度

    logging.info(f"开始使用LLM筛选文章，共 {len(articles)} 篇，每批处理 {chunk_size} 篇。")

    for i in range(0, len(articles), chunk_size):
        chunk = articles[i:i + chunk_size]
        
        priority_keywords_str = ", ".join([f'"{kw}"' for kw in priority_keywords])
        base_prompt = f"""你是一个智能信息分析助手，任务是从文章列表中为我筛选出我应该阅读的内容。请仔细评估每一篇文章，确保不会错过任何重要或我感兴趣的内容。

筛选标准：
请基于我的兴趣描述“{user_interests}”和优先关键词[{priority_keywords_str}]，评估每篇文章与我的相关性。

**输出要求**:
你必须返回一个JSON对象，其唯一的键是 "selected_articles"，值是一个对象数组。
- 每个对象代表一篇被选中的文章，并必须包含 `index` (文章在列表中的原始索引) 和 `relevance_score`。
- `relevance_score` 是一个0-1之间的浮点数，表示文章与用户兴趣的相关性程度。
- 相关性分数应该综合考虑文章内容与我的兴趣和关键词的匹配程度。
- 如果没有任何文章符合标准，则返回 {{"selected_articles": []}}。

**输出格式示例**:
```json
{{
  "selected_articles": [
    {{ "index": 1, "relevance_score": 0.95 }},
    {{ "index": 4, "relevance_score": 0.75 }}
  ]
}}
```

**待分析的文章列表**:
        """
        for idx, article in enumerate(chunk):
            title = article['title']
            source_type = article.get('source_type', 'unknown')
            
            # 根据源类型决定提供的信息
            if source_type == 'sciencedirect_journal':
                # ScienceDirect期刊：只提供标题
                base_prompt += f"\n{idx}. 标题: {title}\n"
            elif source_type in ['nature_journal', 'wiley_journal', 'preprint_journal']:
                # 学术期刊：提供完整标题和完整摘要
                summary_text = article.get('summary', '')
                base_prompt += f"\n{idx}. 标题: {title}\n   摘要: {summary_text}\n"
            else:
                # 其他类型：提供标题和完整摘要
                summary_text = article.get('summary', '')
                base_prompt += f"\n{idx}. 标题: {title}\n   摘要: {summary_text}\n"

        prompt = base_prompt
        # 根据API类型构建不同的prompt和payload
        if api_type == "gemini":
            full_api_url = f"{api_url}?key={api_key}"
            headers = {"Content-Type": "application/json"}
            payload = {"contents": [{"parts": [{"text": prompt}]}]}
        else:  # openai compatible
            full_api_url = api_url
            headers = {
                "Content-Type": "application/json",
                "Authorization": f"Bearer {api_key}"
            }
            payload = {
                "model": model_name,  # 对于本地模型或兼容API，此名称可能是必需的
                "messages": [{"role": "user", "content": prompt}],
            }

        try:
            logging.info(f"正在处理第 {i//chunk_size + 1} 批文章 (API: {api_type})...")
            
            response = requests.post(full_api_url, headers=headers, json=payload, timeout=60)
            response.raise_for_status()  # 如果请求失败 (状态码 4xx or 5xx), 则会抛出异常
            
            response_data = response.json()
            response_text = ""

            # 根据API类型解析响应
            if api_type == "gemini":
                if response_data.get('candidates') and response_data['candidates'][0].get('content', {}).get('parts'):
                    response_text = response_data['candidates'][0]['content']['parts'][0]['text']
                else:
                    logging.error(f"Gemini响应格式不完整或为空: {response_data}")
                    continue
            else: # openai
                if response_data.get('choices') and response_data['choices'][0].get('message', {}).get('content'):
                    response_text = response_data['choices'][0]['message']['content']
                else:
                    logging.error(f"OpenAI响应格式不完整或为空: {response_data}")
                    continue
            
            # 清理并解析LLM返回的JSON
            cleaned_text = response_text.strip()
            # 从Markdown代码块中提取JSON
            if cleaned_text.startswith("```json"):
                cleaned_text = cleaned_text[7:].strip()
            elif cleaned_text.startswith("```"):
                cleaned_text = cleaned_text[3:].strip()
            if cleaned_text.endswith("```"):
                cleaned_text = cleaned_text[:-3].strip()
            if cleaned_text.startswith("json"):
                cleaned_text = cleaned_text[4:].strip()

            # 根据API类型解析JSON内容
            json_data = json.loads(cleaned_text)
            selections = json_data.get("selected_articles", [])
            
            logging.info(f"LLM返回的筛选结果: {selections}")

            for selection in selections:
                index = selection.get('index')
                relevance_score = selection.get('relevance_score')
                if isinstance(index, int) and 0 <= index < len(chunk) and isinstance(relevance_score, (int, float)):
                    selected_article = chunk[index]
                    selected_article['relevance_score'] = float(relevance_score)
                    selected_articles.append(selected_article)
        except json.JSONDecodeError:
            logging.error(f"无法解析LLM的响应为JSON: {response_text}")
        except requests.exceptions.RequestException as e:
            logging.error(f"请求LLM API时出错: {e}")
        except Exception as e:
            logging.error(f"处理第 {i//chunk_size + 1} 批文章时发生未知错误: {e}")

    # 根据相关性阈值进行最终过滤
    filtered_articles = [article for article in selected_articles if article.get('relevance_score', 0) >= relevance_threshold]
    
    # 按相关性分数降序排列
    filtered_articles.sort(key=lambda x: x.get('relevance_score', 0), reverse=True)
    
    logging.info(f"初步筛选完成，共选出 {len(selected_articles)} 篇文章，经过相关性阈值({relevance_threshold})过滤后，最终保留 {len(filtered_articles)} 篇，并按相关性降序排列。")
    return filtered_articles


def translate_articles_with_llm(articles: list, api_key: str, api_url: str, model_name: str) -> list:
    """
    使用LLM将文章标题和摘要翻译为中文。

    Args:
        articles: 需要翻译的文章列表。
        api_key: API密钥。
        api_url: LLM API的端点URL。
        model_name: (可选) 用于OpenAI兼容API的模型名称。

    Returns:
        包含中文翻译的文章列表。
    """
    if not api_url:
        raise ValueError("LLM API Endpoint未在配置中设置。")
    
    # 对于本地Ollama，API Key可以为空
    if not api_key and "localhost" not in api_url and "127.0.0.1" not in api_url and "192.168." not in api_url:
        raise ValueError("API Key未设置。")

    # 根据URL判断API类型
    api_type = "gemini" if "gemini" in api_url.lower() else "openai"
    logging.info(f"开始翻译文章，共 {len(articles)} 篇，API类型: {api_type}")

    translated_articles = []
    chunk_size = 2  # 每次处理2篇文章，翻译需要更精确的上下文

    for i in range(0, len(articles), chunk_size):
        chunk = articles[i:i + chunk_size]
        
        base_prompt = """请将以下英文文章标题和摘要准确翻译为中文。翻译要求：
1. 保持专业术语的准确性
2. 保持原文的科学性和专业性
3. 语言流畅自然
4. 标题翻译要简洁明了
5. 摘要翻译要完整准确

请返回一个JSON对象，其唯一的键是 "translations"，值是一个对象数组。
每个对象包含：
- "index": 文章在列表中的原始索引
- "title_zh": 中文标题
- "summary_zh": 中文摘要

**待翻译的文章列表**:
"""
        
        for idx, article in enumerate(chunk):
            title = article.get('title', '')
            summary = article.get('summary', '')
            base_prompt += f"\n{idx}. 标题: {title}\n   摘要: {summary}\n"

        base_prompt += "\n**输出格式示例**:\n```json\n{\n  \"translations\": [\n    {\n      \"index\": 0,\n      \"title_zh\": \"中文标题1\",\n      \"summary_zh\": \"中文摘要1...\"\n    },\n    {\n      \"index\": 1,\n      \"title_zh\": \"中文标题2\",\n      \"summary_zh\": \"中文摘要2...\"\n    }\n  ]\n}\n```"

        prompt = base_prompt
        
        # 根据API类型构建不同的payload
        if api_type == "gemini":
            full_api_url = f"{api_url}?key={api_key}"
            headers = {"Content-Type": "application/json"}
            payload = {"contents": [{"parts": [{"text": prompt}]}]}
        else:  # openai compatible
            full_api_url = api_url
            headers = {
                "Content-Type": "application/json",
                "Authorization": f"Bearer {api_key}"
            }
            payload = {
                "model": model_name,
                "messages": [{"role": "user", "content": prompt}],
            }

        try:
            logging.info(f"正在翻译第 {i//chunk_size + 1} 批文章 (共 {len(chunk)} 篇)...")
            
            response = requests.post(full_api_url, headers=headers, json=payload, timeout=60)
            response.raise_for_status()
            
            response_data = response.json()
            response_text = ""

            # 根据API类型解析响应
            if api_type == "gemini":
                if response_data.get('candidates') and response_data['candidates'][0].get('content', {}).get('parts'):
                    response_text = response_data['candidates'][0]['content']['parts'][0]['text']
                else:
                    logging.error(f"Gemini响应格式不完整或为空: {response_data}")
                    continue
            else: # openai
                if response_data.get('choices') and response_data['choices'][0].get('message', {}).get('content'):
                    response_text = response_data['choices'][0]['message']['content']
                else:
                    logging.error(f"OpenAI响应格式不完整或为空: {response_data}")
                    continue
            
            # 清理并解析LLM返回的JSON
            cleaned_text = response_text.strip()
            # 从Markdown代码块中提取JSON
            if cleaned_text.startswith("```json"):
                cleaned_text = cleaned_text[7:].strip()
            elif cleaned_text.startswith("```"):
                cleaned_text = cleaned_text[3:].strip()
            if cleaned_text.endswith("```"):
                cleaned_text = cleaned_text[:-3].strip()
            if cleaned_text.startswith("json"):
                cleaned_text = cleaned_text[4:].strip()

            # 解析JSON内容
            json_data = json.loads(cleaned_text)
            translations = json_data.get("translations", [])
            
            logging.info(f"LLM返回的翻译结果: 共 {len(translations)} 篇")

            for translation in translations:
                index = translation.get('index')
                title_zh = translation.get('title_zh')
                summary_zh = translation.get('summary_zh')
                
                if isinstance(index, int) and 0 <= index < len(chunk) and title_zh and summary_zh:
                    article = chunk[index]
                    article['title_zh'] = title_zh
                    article['summary_zh'] = summary_zh
                    translated_articles.append(article)
        except json.JSONDecodeError:
            logging.error(f"无法解析LLM的翻译响应为JSON: {response_text}")
        except requests.exceptions.RequestException as e:
            logging.error(f"请求LLM翻译API时出错: {e}")
        except Exception as e:
            logging.error(f"翻译第 {i//chunk_size + 1} 批文章时发生未知错误: {e}")

    # 确保所有文章都有翻译，如果没有翻译成功，保留原文
    for article in articles:
        if article not in translated_articles:
            article['title_zh'] = article.get('title', '')
            article['summary_zh'] = article.get('summary', '')
            translated_articles.append(article)

    logging.info(f"翻译完成，共成功翻译 {len(translated_articles)} 篇文章。")
    return translated_articles