#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
URL内容分析器
URL Content Analyzer

分析URL所代表网页的具体内容，提取关键信息并验证其可靠性
"""

import requests
import logging
from typing import Dict, List, Optional, Tuple
from urllib.parse import urlparse
import re
import time
from bs4 import BeautifulSoup
from urllib.robotparser import RobotFileParser
import hashlib
import json
from zerogpt_detector import ZerogptDetector

logger = logging.getLogger(__name__)

class URLContentAnalyzer:
    """URL内容分析器"""
    
    def __init__(self):
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        })

        # 初始化Zerogpt检测器
        self.zerogpt_detector = ZerogptDetector()

        # 内容类型检测模式
        self.content_patterns = {
            'news': [
                r'新闻|报道|消息|发布|公告|声明',
                r'news|report|announcement|press|release'
            ],
            'academic': [
                r'论文|研究|学术|期刊|会议|学报',
                r'paper|research|academic|journal|conference|proceedings'
            ],
            'government': [
                r'政府|官方|政策|法规|条例|通知',
                r'government|official|policy|regulation|law|notice'
            ],
            'blog': [
                r'博客|个人|观点|评论|分享',
                r'blog|personal|opinion|comment|share'
            ],
            'commercial': [
                r'产品|服务|价格|购买|销售|广告',
                r'product|service|price|buy|sell|advertisement'
            ]
        }

        # 可靠性指标权重
        self.reliability_weights = {
            'domain_authority': 0.3,
            'content_quality': 0.25,
            'freshness': 0.2,
            'source_diversity': 0.15,
            'technical_indicators': 0.1
        }
    
    def analyze_url_content(self, url: str) -> Dict:
        """分析URL内容"""
        try:
            # 验证URL格式
            if not self._is_valid_url(url):
                return self._create_error_result("无效的URL格式")
            
            # 检查robots.txt
            if not self._check_robots_txt(url):
                logger.warning(f"robots.txt不允许访问: {url}")
            
            # 获取网页内容
            content_data = self._fetch_url_content(url)
            if not content_data['success']:
                return self._create_error_result(content_data['error'])
            
            # 解析内容
            parsed_content = self._parse_content(content_data['html'], content_data['text'])
            
            # 分析内容类型
            content_type = self._classify_content_type(parsed_content)
            
            # 计算可靠性评分
            reliability_score = self._calculate_reliability_score(url, parsed_content, content_type)
            
            # 提取关键信息
            key_info = self._extract_key_information(parsed_content)
            
            # 检测AI生成内容
            ai_indicators = self._detect_ai_content(parsed_content)
            
            return {
                'success': True,
                'url': url,
                'title': parsed_content.get('title', ''),
                'content_type': content_type,
                'reliability_score': reliability_score,
                'key_information': key_info,
                'ai_indicators': ai_indicators,
                'content_summary': self._generate_content_summary(parsed_content),
                'metadata': {
                    'domain': urlparse(url).netloc,
                    'last_modified': content_data.get('last_modified'),
                    'content_length': len(content_data['text']),
                    'word_count': len(content_data['text'].split())
                }
            }
            
        except Exception as e:
            logger.error(f"分析URL内容时出错: {str(e)}")
            return self._create_error_result(f"分析失败: {str(e)}")
    
    def _is_valid_url(self, url: str) -> bool:
        """验证URL格式"""
        try:
            result = urlparse(url)
            return all([result.scheme, result.netloc])
        except:
            return False
    
    def _check_robots_txt(self, url: str) -> bool:
        """检查robots.txt"""
        try:
            parsed = urlparse(url)
            robots_url = f"{parsed.scheme}://{parsed.netloc}/robots.txt"
            
            rp = RobotFileParser()
            rp.set_url(robots_url)
            rp.read()
            
            return rp.can_fetch("*", url)
        except:
            return True  # 如果无法检查，默认允许
    
    def _detect_and_fix_encoding(self, content: bytes) -> str:
        """检测并修复编码问题"""
        try:
            import chardet
            # 使用chardet检测编码
            detected = chardet.detect(content)
            detected_encoding = detected['encoding']
            confidence = detected['confidence']
            
            logger.info(f"检测到编码: {detected_encoding}, 置信度: {confidence}")
            
            if detected_encoding and confidence > 0.7:
                try:
                    decoded = content.decode(detected_encoding)
                    # 检查是否包含乱码
                    if 'ç' not in decoded and 'å' not in decoded:
                        return decoded
                except UnicodeDecodeError:
                    pass
        except ImportError:
            logger.warning("chardet库未安装，使用备用编码检测方法")
        
        # 备用编码检测方法
        encodings = ['utf-8', 'gbk', 'gb2312', 'big5', 'latin-1', 'iso-8859-1']
        
        # 首先尝试UTF-8
        try:
            decoded = content.decode('utf-8')
            if 'ç' not in decoded and 'å' not in decoded:
                return decoded
        except UnicodeDecodeError:
            pass
        
        # 尝试其他编码
        for encoding in encodings[1:]:
            try:
                decoded = content.decode(encoding)
                # 检查是否包含中文字符且无乱码
                if any('\u4e00' <= char <= '\u9fff' for char in decoded) and 'ç' not in decoded and 'å' not in decoded:
                    logger.info(f"使用编码: {encoding}")
                    return decoded
            except UnicodeDecodeError:
                continue
        
        # 如果都失败了，使用errors='ignore'参数
        try:
            return content.decode('utf-8', errors='ignore')
        except:
            return content.decode('latin-1', errors='ignore')
    
    def _fetch_url_content(self, url: str) -> Dict:
        """获取URL内容"""
        try:
            response = self.session.get(url, timeout=10, allow_redirects=True)
            response.raise_for_status()
            
            # 检查内容类型
            content_type = response.headers.get('content-type', '').lower()
            if 'text/html' not in content_type:
                return {
                    'success': False,
                    'error': f'不支持的内容类型: {content_type}'
                }
            
            # 使用新的编码检测函数
            html_content = self._detect_and_fix_encoding(response.content)
            
            return {
                'success': True,
                'html': html_content,
                'text': self._extract_text_from_html(html_content),
                'last_modified': response.headers.get('last-modified'),
                'status_code': response.status_code
            }
            
        except requests.exceptions.RequestException as e:
            return {
                'success': False,
                'error': f'网络请求失败: {str(e)}'
            }
    
    def _extract_text_from_html(self, html: str) -> str:
        """从HTML中提取纯文本"""
        try:
            # 使用html.parser解析器
            soup = BeautifulSoup(html, 'html.parser')
            
            # 移除脚本和样式标签
            for script in soup(["script", "style"]):
                script.decompose()
            
            # 提取文本
            text = soup.get_text()
            
            # 清理文本
            lines = (line.strip() for line in text.splitlines())
            chunks = (phrase.strip() for line in lines for phrase in line.split("  "))
            text = ' '.join(chunk for chunk in chunks if chunk)
            
            return text
        except Exception as e:
            logger.warning(f"提取文本失败: {str(e)}")
            return ""
    
    def _parse_content(self, html: str, text: str) -> Dict:
        """解析内容"""
        try:
            # 使用html.parser解析器
            soup = BeautifulSoup(html, 'html.parser')
            
            # 提取标题
            title = ""
            title_tag = soup.find('title')
            if title_tag:
                title = title_tag.get_text().strip()
            
            # 提取meta信息
            meta_info = {}
            for meta in soup.find_all('meta'):
                name = meta.get('name', meta.get('property', ''))
                content = meta.get('content', '')
                if name and content:
                    meta_info[name] = content
            
            # 提取链接
            links = []
            for link in soup.find_all('a', href=True):
                href = link.get('href')
                text_content = link.get_text().strip()
                if href and text_content:
                    links.append({
                        'url': href,
                        'text': text_content
                    })
            
            # 提取图片
            images = []
            for img in soup.find_all('img'):
                src = img.get('src', '')
                alt = img.get('alt', '')
                if src:
                    images.append({
                        'src': src,
                        'alt': alt
                    })
            
            return {
                'title': title,
                'text': text,
                'meta_info': meta_info,
                'links': links,
                'images': images,
                'html': html
            }
            
        except Exception as e:
            logger.error(f"解析内容失败: {str(e)}")
            return {'text': text, 'title': '', 'meta_info': {}, 'links': [], 'images': []}
    
    def _classify_content_type(self, parsed_content: Dict) -> str:
        """分类内容类型"""
        text = parsed_content.get('text', '').lower()
        title = parsed_content.get('title', '').lower()
        
        scores = {}
        
        for content_type, patterns in self.content_patterns.items():
            score = 0
            for pattern in patterns:
                # 检查标题
                if re.search(pattern, title, re.IGNORECASE):
                    score += 2
                # 检查正文
                if re.search(pattern, text, re.IGNORECASE):
                    score += 1
            
            scores[content_type] = score
        
        # 返回得分最高的类型
        if scores:
            return max(scores, key=scores.get)
        
        return 'unknown'
    
    def _calculate_reliability_score(self, url: str, parsed_content: Dict, content_type: str) -> float:
        """计算可靠性评分"""
        total_score = 0.0
        
        # 域名权威性
        domain_score = self._calculate_domain_authority(url)
        total_score += domain_score * self.reliability_weights['domain_authority']
        
        # 内容质量
        content_quality = self._calculate_content_quality(parsed_content)
        total_score += content_quality * self.reliability_weights['content_quality']
        
        # 内容新鲜度
        freshness = self._calculate_content_freshness(parsed_content)
        total_score += freshness * self.reliability_weights['freshness']
        
        # 来源多样性
        source_diversity = self._calculate_source_diversity(parsed_content)
        total_score += source_diversity * self.reliability_weights['source_diversity']
        
        # 技术指标
        technical_indicators = self._calculate_technical_indicators(parsed_content)
        total_score += technical_indicators * self.reliability_weights['technical_indicators']
        
        return min(total_score, 1.0)
    
    def _calculate_domain_authority(self, url: str) -> float:
        """计算域名权威性"""
        domain = urlparse(url).netloc.lower()
        
        # 权威域名列表
        authoritative_domains = {
            'academic': ['arxiv.org', 'sciencedirect.com', 'ieeexplore.ieee.org', 'springer.com'],
            'government': ['gov.cn', 'gov.uk', 'gov.us', 'whitehouse.gov'],
            'news': ['reuters.com', 'bloomberg.com', 'bbc.com', 'cnn.com'],
            'research': ['nature.com', 'science.org', 'researchgate.net']
        }
        
        for category, domains in authoritative_domains.items():
            for auth_domain in domains:
                if auth_domain in domain:
                    return 0.9
        
        # 检查域名年龄和SSL证书
        return 0.5  # 默认中等权威性
    
    def _calculate_content_quality(self, parsed_content: Dict) -> float:
        """计算内容质量"""
        text = parsed_content.get('text', '')
        title = parsed_content.get('title', '')
        
        score = 0.5  # 基础分数
        
        # 内容长度
        word_count = len(text.split())
        if word_count > 500:
            score += 0.2
        elif word_count > 100:
            score += 0.1
        
        # 标题质量
        if title and len(title) > 10:
            score += 0.1
        
        # 链接数量（适度的外部链接表示更好的内容）
        links = parsed_content.get('links', [])
        if 3 <= len(links) <= 20:
            score += 0.1
        
        # 图片数量
        images = parsed_content.get('images', [])
        if images:
            score += 0.1
        
        return min(score, 1.0)
    
    def _calculate_content_freshness(self, parsed_content: Dict) -> float:
        """计算内容新鲜度"""
        # 这里可以基于last_modified时间计算
        # 暂时返回默认值
        return 0.7
    
    def _calculate_source_diversity(self, parsed_content: Dict) -> float:
        """计算来源多样性"""
        links = parsed_content.get('links', [])
        if not links:
            return 0.3
        
        # 统计不同域名的链接
        domains = set()
        for link in links:
            try:
                domain = urlparse(link['url']).netloc
                if domain:
                    domains.add(domain)
            except:
                continue
        
        # 多样性评分
        if len(domains) >= 5:
            return 0.9
        elif len(domains) >= 3:
            return 0.7
        elif len(domains) >= 1:
            return 0.5
        else:
            return 0.3
    
    def _calculate_technical_indicators(self, parsed_content: Dict) -> float:
        """计算技术指标"""
        score = 0.5
        
        # 检查meta信息完整性
        meta_info = parsed_content.get('meta_info', {})
        if 'description' in meta_info:
            score += 0.1
        if 'keywords' in meta_info:
            score += 0.1
        if 'author' in meta_info:
            score += 0.1
        
        # 检查结构化数据
        html = parsed_content.get('html', '')
        if 'schema.org' in html or 'json-ld' in html:
            score += 0.2
        
        return min(score, 1.0)
    
    def _extract_key_information(self, parsed_content: Dict) -> Dict:
        """提取关键信息"""
        text = parsed_content.get('text', '')
        
        # 提取日期
        date_patterns = [
            r'\d{4}年\d{1,2}月\d{1,2}日',
            r'\d{4}-\d{1,2}-\d{1,2}',
            r'\d{1,2}/\d{1,2}/\d{4}'
        ]
        
        dates = []
        for pattern in date_patterns:
            dates.extend(re.findall(pattern, text))
        
        # 提取数字和统计数据
        number_patterns = [
            r'\d+%',
            r'\d+\.\d+%',
            r'约\s*\d+',
            r'超过\s*\d+'
        ]
        
        numbers = []
        for pattern in number_patterns:
            numbers.extend(re.findall(pattern, text))
        
        # 提取人名
        name_patterns = [
            r'[A-Z][a-z]+\s+[A-Z][a-z]+',
            r'[一-龯]{2,4}'
        ]
        
        names = []
        for pattern in name_patterns:
            names.extend(re.findall(pattern, text))
        
        return {
            'dates': dates[:5],  # 限制数量
            'numbers': numbers[:10],
            'names': names[:10],
            'word_count': len(text.split()),
            'sentence_count': len(re.split(r'[。！？]', text))
        }
    
    def _detect_ai_content(self, parsed_content: Dict) -> Dict:
        """检测AI生成内容"""
        text = parsed_content.get('text', '')
        
        if not text:
            return {
                'ai_probability': 0.0,
                'content_type': 'unknown',
                'detection_method': 'no_content',
                'confidence': 0.0
            }
        
        # 使用Zerogpt API检测AI内容
        try:
            result = self.zerogpt_detector.detect_ai_content(text)
            
            # 合并结果
            ai_indicators = {
                'ai_probability': result.get('ai_probability', 0.0),
                'content_type': result.get('content_type', 'unknown'),
                'detection_method': result.get('detection_method', 'unknown'),
                'confidence': result.get('confidence', 0.0),
                'model_used': result.get('model_used', 'unknown'),
                'original_content_length': result.get('original_content_length', 0)
            }
            
            # 如果使用本地检测，添加额外的指标
            if result.get('detection_method') == 'local_heuristic':
                ai_indicators.update({
                    'ai_indicators_found': result.get('ai_indicators_found', 0),
                    'total_indicators': result.get('total_indicators', 0)
                })
            
            return ai_indicators
            
        except Exception as e:
            logger.error(f"AI内容检测失败: {str(e)}")
            # 返回默认结果
            return {
                'ai_probability': 0.0,
                'content_type': 'unknown',
                'detection_method': 'error',
                'confidence': 0.0,
                'error': str(e)
            }
    
    def _generate_content_summary(self, parsed_content: Dict) -> str:
        """生成内容摘要"""
        text = parsed_content.get('text', '')
        title = parsed_content.get('title', '')
        
        if not text:
            return "无法提取内容"
        
        # 简单的摘要生成（取前200个字符）
        summary = text[:200].strip()
        if len(text) > 200:
            summary += "..."
        
        return summary
    
    def _create_error_result(self, error_message: str) -> Dict:
        """创建错误结果"""
        return {
            'success': False,
            'error': error_message,
            'url': '',
            'title': '',
            'content_type': 'unknown',
            'reliability_score': 0.0,
            'key_information': {},
            'ai_indicators': {'ai_probability': 0.0},
            'content_summary': '',
            'metadata': {}
        } 