"""
SEO优化顾问
提供搜索引擎优化建议，包括元标签、URL、内外链等
"""

import re
from typing import Dict, List, Tuple, Optional, Any
from dataclasses import dataclass
from enum import Enum
import logging
from urllib.parse import quote, unquote

logger = logging.getLogger(__name__)


class SEOIssueType(Enum):
    """SEO问题类型"""
    META_TITLE = "meta_title"
    META_DESCRIPTION = "meta_description"
    META_KEYWORDS = "meta_keywords"
    URL_STRUCTURE = "url_structure"
    HEADING_HIERARCHY = "heading_hierarchy"
    INTERNAL_LINKS = "internal_links"
    EXTERNAL_LINKS = "external_links"
    CONTENT_LENGTH = "content_length"
    KEYWORD_DENSITY = "keyword_density"
    READABILITY = "readability"
    MOBILE_FRIENDLY = "mobile_friendly"
    STRUCTURED_DATA = "structured_data"


@dataclass
class MetaTags:
    """元标签信息"""
    title: str
    description: str
    keywords: List[str]
    og_title: str
    og_description: str
    og_image: str
    twitter_card: str


@dataclass
class URLAnalysis:
    """URL分析结果"""
    url: str
    is_friendly: bool
    issues: List[str]
    suggestions: List[str]
    score: float


@dataclass
class HeadingStructure:
    """标题结构"""
    h1_count: int
    h2_count: int
    h3_count: int
    hierarchy_valid: bool
    issues: List[str]


@dataclass
class LinkAnalysis:
    """链接分析"""
    internal_links: List[Dict[str, str]]
    external_links: List[Dict[str, str]]
    internal_count: int
    external_count: int
    nofollow_count: int
    broken_links: List[str]


@dataclass
class ReadabilityMetrics:
    """可读性指标"""
    flesch_score: float  # 简化版
    sentence_avg_length: float
    paragraph_avg_length: float
    complex_words_ratio: float
    grade_level: int


@dataclass
class SEOAnalysisResult:
    """SEO分析结果"""
    overall_score: float
    meta_tags: MetaTags
    url_analysis: Optional[URLAnalysis]
    heading_structure: HeadingStructure
    link_analysis: LinkAnalysis
    readability: ReadabilityMetrics
    content_length: int
    keyword_density: Dict[str, float]
    issues: List[Dict[str, Any]]
    recommendations: List[Dict[str, Any]]
    structured_data_suggestions: List[Dict[str, Any]]


class SEOAdvisor:
    """SEO优化顾问"""
    
    def __init__(self):
        """初始化顾问"""
        # SEO标准
        self.meta_title_length = (30, 60)  # 字符数
        self.meta_description_length = (120, 160)
        self.ideal_content_length = 1000  # 最少字数
        self.ideal_keyword_density = (0.01, 0.03)  # 1-3%
        self.max_url_length = 100
        
        # 可读性标准
        self.ideal_sentence_length = (15, 25)  # 中文字数
        self.ideal_paragraph_length = (100, 200)
        
        # 常见SEO问题模式
        self.url_issues = {
            'chinese_chars': r'[\u4e00-\u9fff]',
            'special_chars': r'[^\w\-/.]',
            'multiple_slash': r'//',
            'trailing_slash': r'/$',
            'uppercase': r'[A-Z]',
            'underscores': r'_',
            'parameters': r'\?.*&.*&.*&'  # 过多参数
        }
        
        # 结构化数据类型
        self.structured_data_types = [
            'Article', 'NewsArticle', 'BlogPosting',
            'Product', 'Recipe', 'Event', 'FAQ',
            'HowTo', 'Review', 'Video'
        ]
    
    def analyze(self, content: str, title: str = "", url: str = "",
               meta_description: str = "", keywords: List[str] = None) -> SEOAnalysisResult:
        """
        执行SEO分析
        
        Args:
            content: 文章内容
            title: 页面标题
            url: 页面URL
            meta_description: 元描述
            keywords: 目标关键词
            
        Returns:
            SEO分析结果
        """
        if not content:
            return self._empty_result("内容为空")
        
        # 分析元标签
        meta_tags = self._analyze_meta_tags(title, meta_description, keywords, content)
        
        # 分析URL
        url_analysis = self._analyze_url(url) if url else None
        
        # 分析标题结构
        heading_structure = self._analyze_headings(content)
        
        # 分析链接
        link_analysis = self._analyze_links(content)
        
        # 分析可读性
        readability = self._analyze_readability(content)
        
        # 分析关键词密度
        keyword_density = self._calculate_keyword_density(content, keywords or [])
        
        # 内容长度
        content_length = len(re.findall(r'[\u4e00-\u9fff]', content)) + \
                        len(re.findall(r'\b[a-zA-Z]+\b', content))
        
        # 检测SEO问题
        issues = self._detect_issues(
            meta_tags, url_analysis, heading_structure,
            link_analysis, readability, content_length, keyword_density
        )
        
        # 生成建议
        recommendations = self._generate_recommendations(
            issues, meta_tags, heading_structure, content_length
        )
        
        # 生成结构化数据建议
        structured_data_suggestions = self._suggest_structured_data(content, title)
        
        # 计算总分
        overall_score = self._calculate_overall_score(
            meta_tags, url_analysis, heading_structure,
            link_analysis, readability, content_length
        )
        
        return SEOAnalysisResult(
            overall_score=overall_score,
            meta_tags=meta_tags,
            url_analysis=url_analysis,
            heading_structure=heading_structure,
            link_analysis=link_analysis,
            readability=readability,
            content_length=content_length,
            keyword_density=keyword_density,
            issues=issues,
            recommendations=recommendations,
            structured_data_suggestions=structured_data_suggestions
        )
    
    def _analyze_meta_tags(self, title: str, description: str,
                          keywords: List[str], content: str) -> MetaTags:
        """分析元标签"""
        # 如果没有提供，自动生成
        if not title and content:
            title = self._generate_title_from_content(content)
        
        if not description and content:
            description = self._generate_description_from_content(content)
        
        if not keywords and content:
            keywords = self._extract_keywords_from_content(content)
        
        return MetaTags(
            title=title,
            description=description,
            keywords=keywords or [],
            og_title=title,
            og_description=description[:200],
            og_image="",  # 需要从配图建议中获取
            twitter_card="summary_large_image"
        )
    
    def _analyze_url(self, url: str) -> URLAnalysis:
        """分析URL结构"""
        issues = []
        suggestions = []
        score = 1.0
        
        # 检查URL长度
        if len(url) > self.max_url_length:
            issues.append(f"URL过长（{len(url)}字符）")
            suggestions.append(f"缩短URL到{self.max_url_length}字符以内")
            score -= 0.1
        
        # 检查中文字符
        if re.search(self.url_issues['chinese_chars'], url):
            issues.append("URL包含中文字符")
            suggestions.append("使用拼音或英文替代中文")
            score -= 0.2
        
        # 检查特殊字符
        if re.search(self.url_issues['special_chars'], url):
            issues.append("URL包含特殊字符")
            suggestions.append("只使用字母、数字、连字符和斜杠")
            score -= 0.1
        
        # 检查大写字母
        if re.search(self.url_issues['uppercase'], url):
            issues.append("URL包含大写字母")
            suggestions.append("全部使用小写字母")
            score -= 0.05
        
        # 检查下划线
        if re.search(self.url_issues['underscores'], url):
            issues.append("URL使用下划线")
            suggestions.append("使用连字符(-)代替下划线")
            score -= 0.05
        
        # 检查参数过多
        if re.search(self.url_issues['parameters'], url):
            issues.append("URL参数过多")
            suggestions.append("使用更简洁的URL结构")
            score -= 0.1
        
        is_friendly = len(issues) == 0
        
        return URLAnalysis(
            url=url,
            is_friendly=is_friendly,
            issues=issues,
            suggestions=suggestions,
            score=max(0, score)
        )
    
    def _analyze_headings(self, content: str) -> HeadingStructure:
        """分析标题结构"""
        # 简化版：通过标记识别标题
        h1_pattern = r'^#\s+.+|<h1>.+</h1>'
        h2_pattern = r'^##\s+.+|<h2>.+</h2>'
        h3_pattern = r'^###\s+.+|<h3>.+</h3>'
        
        h1_count = len(re.findall(h1_pattern, content, re.MULTILINE))
        h2_count = len(re.findall(h2_pattern, content, re.MULTILINE))
        h3_count = len(re.findall(h3_pattern, content, re.MULTILINE))
        
        issues = []
        
        # 检查H1数量
        if h1_count == 0:
            issues.append("缺少H1标题")
        elif h1_count > 1:
            issues.append(f"H1标题过多（{h1_count}个）")
        
        # 检查层级
        hierarchy_valid = True
        if h3_count > 0 and h2_count == 0:
            issues.append("H3存在但缺少H2，层级不完整")
            hierarchy_valid = False
        
        return HeadingStructure(
            h1_count=h1_count,
            h2_count=h2_count,
            h3_count=h3_count,
            hierarchy_valid=hierarchy_valid,
            issues=issues
        )
    
    def _analyze_links(self, content: str) -> LinkAnalysis:
        """分析链接"""
        # 简化版链接提取
        link_pattern = r'\[([^\]]+)\]\(([^\)]+)\)|<a[^>]*href=["\']([^"\']+)["\'][^>]*>([^<]+)</a>'
        
        internal_links = []
        external_links = []
        
        for match in re.finditer(link_pattern, content):
            if match.group(2):  # Markdown链接
                url = match.group(2)
                text = match.group(1)
            else:  # HTML链接
                url = match.group(3)
                text = match.group(4)
            
            link_info = {'url': url, 'text': text}
            
            if url.startswith('http://') or url.startswith('https://'):
                external_links.append(link_info)
            else:
                internal_links.append(link_info)
        
        return LinkAnalysis(
            internal_links=internal_links,
            external_links=external_links,
            internal_count=len(internal_links),
            external_count=len(external_links),
            nofollow_count=0,  # 需要更详细的HTML解析
            broken_links=[]  # 需要实际检测
        )
    
    def _analyze_readability(self, content: str) -> ReadabilityMetrics:
        """分析可读性"""
        # 分句
        sentences = re.split(r'[。！？]', content)
        sentences = [s for s in sentences if s.strip()]
        
        # 分段
        paragraphs = content.split('\n\n')
        paragraphs = [p for p in paragraphs if p.strip()]
        
        # 计算平均句长
        if sentences:
            total_chars = sum(len(s) for s in sentences)
            sentence_avg_length = total_chars / len(sentences)
        else:
            sentence_avg_length = 0
        
        # 计算平均段长
        if paragraphs:
            total_para_chars = sum(len(p) for p in paragraphs)
            paragraph_avg_length = total_para_chars / len(paragraphs)
        else:
            paragraph_avg_length = 0
        
        # 复杂词汇比例（简化：4字以上的词）
        all_words = re.findall(r'[\u4e00-\u9fff]+', content)
        complex_words = [w for w in all_words if len(w) > 4]
        complex_words_ratio = len(complex_words) / len(all_words) if all_words else 0
        
        # 简化的Flesch分数（基于句长和词长）
        flesch_score = max(0, min(100, 100 - sentence_avg_length * 0.5 - complex_words_ratio * 50))
        
        # 年级水平（简化版）
        if flesch_score >= 90:
            grade_level = 5
        elif flesch_score >= 80:
            grade_level = 6
        elif flesch_score >= 70:
            grade_level = 7
        elif flesch_score >= 60:
            grade_level = 8
        elif flesch_score >= 50:
            grade_level = 9
        else:
            grade_level = 10
        
        return ReadabilityMetrics(
            flesch_score=flesch_score,
            sentence_avg_length=sentence_avg_length,
            paragraph_avg_length=paragraph_avg_length,
            complex_words_ratio=complex_words_ratio,
            grade_level=grade_level
        )
    
    def _calculate_keyword_density(self, content: str, keywords: List[str]) -> Dict[str, float]:
        """计算关键词密度"""
        density = {}
        
        # 总词数
        total_words = len(re.findall(r'[\u4e00-\u9fff]+|\b[a-zA-Z]+\b', content))
        
        if total_words == 0:
            return density
        
        for keyword in keywords:
            count = content.count(keyword)
            density[keyword] = count / total_words
        
        return density
    
    def _detect_issues(self, meta_tags: MetaTags, url_analysis: Optional[URLAnalysis],
                      heading_structure: HeadingStructure, link_analysis: LinkAnalysis,
                      readability: ReadabilityMetrics, content_length: int,
                      keyword_density: Dict[str, float]) -> List[Dict[str, Any]]:
        """检测SEO问题"""
        issues = []
        
        # 元标签问题
        if len(meta_tags.title) < self.meta_title_length[0]:
            issues.append({
                'type': SEOIssueType.META_TITLE.value,
                'severity': 'high',
                'description': f'标题过短（{len(meta_tags.title)}字符）',
                'impact': '影响点击率'
            })
        elif len(meta_tags.title) > self.meta_title_length[1]:
            issues.append({
                'type': SEOIssueType.META_TITLE.value,
                'severity': 'medium',
                'description': f'标题过长（{len(meta_tags.title)}字符）',
                'impact': '搜索结果中会被截断'
            })
        
        if len(meta_tags.description) < self.meta_description_length[0]:
            issues.append({
                'type': SEOIssueType.META_DESCRIPTION.value,
                'severity': 'medium',
                'description': f'描述过短（{len(meta_tags.description)}字符）',
                'impact': '未充分利用描述空间'
            })
        elif len(meta_tags.description) > self.meta_description_length[1]:
            issues.append({
                'type': SEOIssueType.META_DESCRIPTION.value,
                'severity': 'low',
                'description': f'描述过长（{len(meta_tags.description)}字符）',
                'impact': '搜索结果中会被截断'
            })
        
        # URL问题
        if url_analysis and not url_analysis.is_friendly:
            for issue in url_analysis.issues:
                issues.append({
                    'type': SEOIssueType.URL_STRUCTURE.value,
                    'severity': 'medium',
                    'description': issue,
                    'impact': '影响URL可读性和SEO'
                })
        
        # 标题层级问题
        for issue in heading_structure.issues:
            issues.append({
                'type': SEOIssueType.HEADING_HIERARCHY.value,
                'severity': 'medium',
                'description': issue,
                'impact': '影响内容结构理解'
            })
        
        # 内容长度问题
        if content_length < self.ideal_content_length:
            issues.append({
                'type': SEOIssueType.CONTENT_LENGTH.value,
                'severity': 'high',
                'description': f'内容过短（{content_length}字）',
                'impact': '内容深度不足，影响排名'
            })
        
        # 链接问题
        if link_analysis.internal_count == 0:
            issues.append({
                'type': SEOIssueType.INTERNAL_LINKS.value,
                'severity': 'medium',
                'description': '缺少内部链接',
                'impact': '影响站内权重传递'
            })
        
        if link_analysis.external_count > 10:
            issues.append({
                'type': SEOIssueType.EXTERNAL_LINKS.value,
                'severity': 'low',
                'description': f'外部链接过多（{link_analysis.external_count}个）',
                'impact': '可能分散权重'
            })
        
        # 可读性问题
        if readability.flesch_score < 30:
            issues.append({
                'type': SEOIssueType.READABILITY.value,
                'severity': 'medium',
                'description': f'可读性较差（得分{readability.flesch_score:.1f}）',
                'impact': '影响用户体验和停留时间'
            })
        
        # 关键词密度问题
        for keyword, density in keyword_density.items():
            if density > self.ideal_keyword_density[1]:
                issues.append({
                    'type': SEOIssueType.KEYWORD_DENSITY.value,
                    'severity': 'high',
                    'description': f'关键词"{keyword}"密度过高（{density:.2%}）',
                    'impact': '可能被判定为关键词堆砌'
                })
        
        return issues
    
    def _generate_recommendations(self, issues: List[Dict[str, Any]],
                                 meta_tags: MetaTags, heading_structure: HeadingStructure,
                                 content_length: int) -> List[Dict[str, Any]]:
        """生成SEO建议"""
        recommendations = []
        
        # 基于问题生成建议
        issue_types = {issue['type'] for issue in issues}
        
        if SEOIssueType.META_TITLE.value in issue_types:
            recommendations.append({
                'type': 'meta',
                'priority': 'high',
                'title': '优化页面标题',
                'description': f'调整标题长度到{self.meta_title_length[0]}-{self.meta_title_length[1]}字符',
                'tips': [
                    '包含主要关键词',
                    '放在标题前部',
                    '吸引点击的表达'
                ],
                'example': f'{meta_tags.title[:50]}...' if len(meta_tags.title) > 50 else f'{meta_tags.title} - 扩展内容'
            })
        
        if SEOIssueType.META_DESCRIPTION.value in issue_types:
            recommendations.append({
                'type': 'meta',
                'priority': 'medium',
                'title': '完善元描述',
                'description': f'优化描述长度到{self.meta_description_length[0]}-{self.meta_description_length[1]}字符',
                'tips': [
                    '简述文章要点',
                    '包含关键词',
                    '添加行动号召'
                ]
            })
        
        if SEOIssueType.HEADING_HIERARCHY.value in issue_types:
            recommendations.append({
                'type': 'structure',
                'priority': 'medium',
                'title': '优化标题层级',
                'description': '建立清晰的H1-H2-H3层级结构',
                'tips': [
                    '一个页面只用一个H1',
                    'H2作为主要章节标题',
                    'H3作为子章节标题'
                ]
            })
        
        if content_length < self.ideal_content_length:
            recommendations.append({
                'type': 'content',
                'priority': 'high',
                'title': '扩充内容',
                'description': f'增加内容到{self.ideal_content_length}字以上',
                'tips': [
                    '添加更多细节',
                    '增加案例说明',
                    '扩展相关话题'
                ]
            })
        
        # 通用SEO建议
        recommendations.append({
            'type': 'technical',
            'priority': 'low',
            'title': '技术SEO优化',
            'description': '提升技术SEO指标',
            'tips': [
                '优化页面加载速度',
                '确保移动端友好',
                '添加结构化数据',
                '生成XML站点地图'
            ]
        })
        
        return recommendations
    
    def _suggest_structured_data(self, content: str, title: str) -> List[Dict[str, Any]]:
        """建议结构化数据"""
        suggestions = []
        
        # 基础Article结构
        suggestions.append({
            'type': 'Article',
            'priority': 'high',
            'description': '添加Article结构化数据',
            'schema': {
                '@context': 'https://schema.org',
                '@type': 'Article',
                'headline': title,
                'description': self._generate_description_from_content(content),
                'author': {
                    '@type': 'Person',
                    'name': 'Author Name'
                },
                'datePublished': '2024-01-01',
                'dateModified': '2024-01-01'
            }
        })
        
        # 如果包含问答内容
        if '？' in content or '如何' in content or '为什么' in content:
            suggestions.append({
                'type': 'FAQPage',
                'priority': 'medium',
                'description': '添加FAQ结构化数据',
                'reason': '内容包含问答形式'
            })
        
        # 如果包含步骤说明
        if '步骤' in content or '第一步' in content:
            suggestions.append({
                'type': 'HowTo',
                'priority': 'medium',
                'description': '添加HowTo结构化数据',
                'reason': '内容包含操作步骤'
            })
        
        return suggestions
    
    def _calculate_overall_score(self, meta_tags: MetaTags,
                                url_analysis: Optional[URLAnalysis],
                                heading_structure: HeadingStructure,
                                link_analysis: LinkAnalysis,
                                readability: ReadabilityMetrics,
                                content_length: int) -> float:
        """计算SEO总分"""
        score = 0.0
        weights = 0.0
        
        # 元标签得分（25%）
        meta_score = 0.0
        if self.meta_title_length[0] <= len(meta_tags.title) <= self.meta_title_length[1]:
            meta_score += 0.5
        if self.meta_description_length[0] <= len(meta_tags.description) <= self.meta_description_length[1]:
            meta_score += 0.3
        if meta_tags.keywords:
            meta_score += 0.2
        score += meta_score * 0.25
        weights += 0.25
        
        # URL得分（15%）
        if url_analysis:
            score += url_analysis.score * 0.15
            weights += 0.15
        
        # 标题结构得分（15%）
        heading_score = 1.0
        if heading_structure.h1_count != 1:
            heading_score -= 0.3
        if not heading_structure.hierarchy_valid:
            heading_score -= 0.2
        score += max(0, heading_score) * 0.15
        weights += 0.15
        
        # 内容长度得分（20%）
        length_score = min(1.0, content_length / self.ideal_content_length)
        score += length_score * 0.20
        weights += 0.20
        
        # 可读性得分（15%）
        readability_score = readability.flesch_score / 100
        score += readability_score * 0.15
        weights += 0.15
        
        # 链接得分（10%）
        link_score = 0.0
        if link_analysis.internal_count > 0:
            link_score += 0.5
        if 1 <= link_analysis.external_count <= 5:
            link_score += 0.3
        if link_analysis.broken_links == []:
            link_score += 0.2
        score += link_score * 0.10
        weights += 0.10
        
        # 归一化
        if weights > 0:
            final_score = score / weights
        else:
            final_score = 0.0
        
        return min(1.0, max(0.0, final_score))
    
    def _generate_title_from_content(self, content: str) -> str:
        """从内容生成标题"""
        # 取第一行或前50字
        first_line = content.split('\n')[0] if '\n' in content else content[:50]
        return first_line[:60]
    
    def _generate_description_from_content(self, content: str) -> str:
        """从内容生成描述"""
        # 取前150字作为描述
        clean_content = re.sub(r'\s+', ' ', content)
        return clean_content[:150] + '...'
    
    def _extract_keywords_from_content(self, content: str) -> List[str]:
        """从内容提取关键词"""
        # 简化版关键词提取
        words = re.findall(r'[\u4e00-\u9fff]{2,4}', content)
        
        from collections import Counter
        word_freq = Counter(words)
        
        # 过滤停用词
        stopwords = {'的', '是', '在', '和', '了', '有', '我', '你', '他'}
        keywords = [w for w, count in word_freq.most_common(10) 
                   if w not in stopwords and count >= 2]
        
        return keywords[:5]
    
    def _empty_result(self, reason: str) -> SEOAnalysisResult:
        """返回空结果"""
        return SEOAnalysisResult(
            overall_score=0.0,
            meta_tags=MetaTags("", "", [], "", "", "", ""),
            url_analysis=None,
            heading_structure=HeadingStructure(0, 0, 0, False, [reason]),
            link_analysis=LinkAnalysis([], [], 0, 0, 0, []),
            readability=ReadabilityMetrics(0, 0, 0, 0, 0),
            content_length=0,
            keyword_density={},
            issues=[{
                'type': 'error',
                'severity': 'high',
                'description': reason,
                'impact': ''
            }],
            recommendations=[],
            structured_data_suggestions=[]
        )