#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
信息来源提取器
Source Information Extractor

主动从AI生成内容中提取和验证信息来源
"""

import re
import logging
from typing import List, Dict, Optional
from urllib.parse import urlparse
from url_content_analyzer import URLContentAnalyzer

logger = logging.getLogger(__name__)

class SourceExtractor:
    """信息来源提取器"""
    
    def __init__(self):
        # 信息来源模式
        self.source_patterns = {
            'url': [
                r'https?://[^\s<>"{}|\\^`\[\]]+',
                r'www\.[^\s<>"{}|\\^`\[\]]+',
            ],
            'citation': [
                r'根据\s*([^，。！？\n]+)\s*[的]?\s*(研究|报告|数据|统计|调查|分析)',
                r'参考\s*([^，。！？\n]+)\s*[的]?\s*(研究|报告|数据|统计|调查|分析)',
                r'来自\s*([^，。！？\n]+)\s*[的]?\s*(研究|报告|数据|统计|调查|分析)',
            ],
            'academic': [
                r'([^，。！？\n]+)\s*等\s*[的]?\s*(研究|报告|数据|统计|调查|分析)',
                r'([^，。！？\n]+)\s*[的]?\s*(论文|期刊|会议|学报)',
            ],
        }
        
        # 权威来源域名
        self.authoritative_domains = {
            'academic': ['arxiv.org', 'sciencedirect.com', 'ieeexplore.ieee.org'],
            'government': ['gov.cn', 'gov.uk', 'gov.us'],
            'news': ['reuters.com', 'bloomberg.com', 'bbc.com'],
        }
        
        # 初始化URL内容分析器
        self.url_analyzer = URLContentAnalyzer()
    
    def extract_sources(self, content: str) -> Dict:
        """从内容中提取信息来源"""
        sources = {
            'urls': [],
            'citations': [],
            'academic_sources': [],
            'suggested_sources': [],
            'confidence_score': 0.0
        }
        
        try:
            # 提取URL
            urls = self._extract_urls(content)
            sources['urls'] = urls
            
            # 提取引用
            citations = self._extract_citations(content)
            sources['citations'] = citations
            
            # 提取学术来源
            academic = self._extract_academic_sources(content)
            sources['academic_sources'] = academic
            
            # 生成建议来源
            suggested = self._generate_suggested_sources(content)
            sources['suggested_sources'] = suggested
            
            # 计算置信度
            sources['confidence_score'] = self._calculate_confidence_score(sources)
            
        except Exception as e:
            logger.error(f"提取信息来源时出错: {str(e)}")
        
        return sources
    
    def _extract_urls(self, content: str) -> List[Dict]:
        """提取URL并分析其内容"""
        urls = []
        
        for pattern in self.source_patterns['url']:
            matches = re.findall(pattern, content, re.IGNORECASE)
            for match in matches:
                url = match if match.startswith('http') else f'http://{match}'
                try:
                    parsed = urlparse(url)
                    domain = parsed.netloc.lower()
                    
                    source_type = self._classify_domain(domain)
                    
                    # 基础URL信息
                    url_info = {
                        'url': url,
                        'domain': domain,
                        'source_type': source_type,
                        'reliability_score': self._calculate_domain_reliability(domain)
                    }
                    
                    # 分析URL内容
                    content_analysis = self.url_analyzer.analyze_url_content(url)
                    if content_analysis['success']:
                        # 合并内容分析结果
                        url_info.update({
                            'content_analysis': content_analysis,
                            'title': content_analysis.get('title', ''),
                            'content_type': content_analysis.get('content_type', 'unknown'),
                            'content_reliability': content_analysis.get('reliability_score', 0.0),
                            'ai_probability': content_analysis.get('ai_indicators', {}).get('ai_probability', 0.0),
                            'content_summary': content_analysis.get('content_summary', ''),
                            'key_information': content_analysis.get('key_information', {}),
                            'metadata': content_analysis.get('metadata', {})
                        })
                        
                        # 更新可靠性评分（结合域名和内容分析）
                        url_info['reliability_score'] = self._calculate_combined_reliability(
                            url_info['reliability_score'], 
                            content_analysis.get('reliability_score', 0.0)
                        )
                    else:
                        # 如果内容分析失败，记录错误信息
                        url_info['content_analysis_error'] = content_analysis.get('error', '未知错误')
                    
                    urls.append(url_info)
                    
                except Exception as e:
                    logger.warning(f"解析URL失败: {url}")
        
        return urls
    
    def _extract_citations(self, content: str) -> List[Dict]:
        """提取引用信息"""
        citations = []
        
        for pattern in self.source_patterns['citation']:
            matches = re.findall(pattern, content, re.IGNORECASE)
            for match in matches:
                if isinstance(match, tuple):
                    source = match[0].strip()
                    citation_type = match[1].strip()
                else:
                    source = match.strip()
                    citation_type = "引用"
                
                citations.append({
                    'source': source,
                    'type': citation_type,
                    'reliability_score': self._calculate_citation_reliability(source, citation_type)
                })
        
        return citations
    
    def _extract_academic_sources(self, content: str) -> List[Dict]:
        """提取学术来源"""
        academic_sources = []
        
        for pattern in self.source_patterns['academic']:
            matches = re.findall(pattern, content, re.IGNORECASE)
            for match in matches:
                if isinstance(match, tuple):
                    author = match[0].strip()
                    source_type = match[1].strip()
                else:
                    author = match.strip()
                    source_type = "学术来源"
                
                academic_sources.append({
                    'author': author,
                    'type': source_type,
                    'reliability_score': self._calculate_academic_reliability(author, source_type)
                })
        
        return academic_sources
    
    def _generate_suggested_sources(self, content: str) -> List[Dict]:
        """生成建议的信息来源"""
        suggested = []
        
        # 基于内容主题生成建议来源
        topics = self._extract_topics(content)
        
        for topic in topics:
            topic_sources = self._get_topic_sources(topic)
            suggested.extend(topic_sources)
        
        return suggested[:5]  # 限制建议数量
    
    def _extract_topics(self, content: str) -> List[str]:
        """提取内容主题"""
        topic_keywords = [
            'AI', '人工智能', '机器学习', '深度学习', '自然语言处理',
            '计算机视觉', '机器人', '自动化', '算法', '数据科学'
        ]
        
        topics = []
        for keyword in topic_keywords:
            if keyword.lower() in content.lower():
                topics.append(keyword)
        
        return topics
    
    def _get_topic_sources(self, topic: str) -> List[Dict]:
        """获取主题相关的权威来源"""
        sources = []
        
        if 'AI' in topic or '人工智能' in topic:
            sources.extend([
                {'url': 'https://arxiv.org/', 'name': 'arXiv', 'type': 'academic', 'reliability': 0.9},
                {'url': 'https://www.nature.com/', 'name': 'Nature', 'type': 'academic', 'reliability': 0.95},
                {'url': 'https://science.sciencemag.org/', 'name': 'Science', 'type': 'academic', 'reliability': 0.95}
            ])
        
        return sources
    
    def _classify_domain(self, domain: str) -> str:
        """分类域名"""
        for source_type, domains in self.authoritative_domains.items():
            for auth_domain in domains:
                if auth_domain in domain:
                    return source_type
        
        return 'unknown'
    
    def _calculate_domain_reliability(self, domain: str) -> float:
        """计算域名可靠性评分"""
        source_type = self._classify_domain(domain)
        
        reliability_scores = {
            'academic': 0.9,
            'government': 0.95,
            'news': 0.7,
            'unknown': 0.3
        }
        
        return reliability_scores.get(source_type, 0.3)
    
    def _calculate_citation_reliability(self, source: str, citation_type: str) -> float:
        """计算引用可靠性评分"""
        base_score = 0.5
        
        if '研究' in citation_type or '报告' in citation_type:
            base_score += 0.2
        if '数据' in citation_type or '统计' in citation_type:
            base_score += 0.1
        if '专家' in source or '教授' in source:
            base_score += 0.2
        
        return min(base_score, 1.0)
    
    def _calculate_academic_reliability(self, author: str, source_type: str) -> float:
        """计算学术来源可靠性评分"""
        base_score = 0.7
        
        if '论文' in source_type or '期刊' in source_type:
            base_score += 0.2
        if '实验' in source_type or '试验' in source_type:
            base_score += 0.1
        
        return min(base_score, 1.0)
    
    def _calculate_combined_reliability(self, domain_score: float, content_score: float) -> float:
        """计算综合可靠性评分（域名权威性 + 内容质量）"""
        # 域名权威性权重40%，内容质量权重60%
        combined_score = domain_score * 0.4 + content_score * 0.6
        return min(combined_score, 1.0)
    
    def _calculate_confidence_score(self, sources: Dict) -> float:
        """计算整体置信度评分"""
        total_score = 0.0
        total_weight = 0.0
        
        if sources['urls']:
            url_score = sum(url['reliability_score'] for url in sources['urls']) / len(sources['urls'])
            total_score += url_score * 0.4
            total_weight += 0.4
        
        if sources['citations']:
            citation_score = sum(citation['reliability_score'] for citation in sources['citations']) / len(sources['citations'])
            total_score += citation_score * 0.3
            total_weight += 0.3
        
        if sources['academic_sources']:
            academic_score = sum(academic['reliability_score'] for academic in sources['academic_sources']) / len(sources['academic_sources'])
            total_score += academic_score * 0.2
            total_weight += 0.2
        
        return total_score / total_weight if total_weight > 0 else 0.0 