"""
Markdown写入工具模块

将爬取的数据输出为Markdown格式文档。
"""

import os
from datetime import datetime
from typing import List, Dict, Any, Optional
from pathlib import Path
from collections import defaultdict

from ..models.data_model import ScrapedData, ScrapingResult
from ..utils.logger import get_logger
from config.settings import OUTPUT_DIR, MARKDOWN_CONFIG, Constants


class MarkdownWriter:
    """Markdown文档写入器"""

    def __init__(self, output_dir: Optional[Path] = None):
        """
        初始化Markdown写入器

        Args:
            output_dir: 输出目录
        """
        self.output_dir = output_dir or OUTPUT_DIR
        self.logger = get_logger('markdown_writer')

        # 确保输出目录存在
        self.output_dir.mkdir(parents=True, exist_ok=True)

        self.logger.info(f"Markdown写入器初始化完成，输出目录: {self.output_dir}")

    def write_data(self, data_list: List[ScrapedData],
                   filename: Optional[str] = None) -> str:
        """
        将数据写入Markdown文件

        Args:
            data_list: 数据列表
            filename: 文件名（可选）

        Returns:
            输出文件路径
        """
        if not data_list:
            self.logger.warning("没有数据需要写入")
            return ""

        # 生成文件名
        if not filename:
            filename = self._generate_filename()

        file_path = self.output_dir / filename

        try:
            # 生成Markdown内容
            content = self._generate_markdown_content(data_list)

            # 写入文件
            with open(file_path, 'w', encoding='utf-8') as f:
                f.write(content)

            self.logger.info(f"成功写入Markdown文件: {file_path}")
            return str(file_path)

        except Exception as e:
            self.logger.error(f"写入Markdown文件失败: {e}")
            raise

    def write_results(self, results: List[ScrapingResult],
                     filename: Optional[str] = None) -> str:
        """
        将爬取结果写入Markdown文件

        Args:
            results: 爬取结果列表
            filename: 文件名（可选）

        Returns:
            输出文件路径
        """
        # 合并所有数据
        all_data = []
        for result in results:
            if result.success and result.data:
                all_data.extend(result.data)

        return self.write_data(all_data, filename)

    def _generate_filename(self) -> str:
        """生成文件名"""
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        return f"scraped_data_{timestamp}.md"

    def _generate_markdown_content(self, data_list: List[ScrapedData]) -> str:
        """
        生成Markdown内容

        Args:
            data_list: 数据列表

        Returns:
            Markdown格式的内容
        """
        content_parts = []

        # 添加标题
        content_parts.append(self._generate_header())

        # 添加统计信息
        content_parts.append(self._generate_statistics(data_list))

        # 按来源分组（如果启用）
        if MARKDOWN_CONFIG.get('GROUP_BY_SOURCE', True):
            grouped_data = self._group_data_by_source(data_list)
            for source, source_data in grouped_data.items():
                content_parts.append(self._generate_source_section(source, source_data))
        else:
            # 不分组，直接生成数据表格
            content_parts.append(self._generate_data_table(data_list))

        # 添加页脚
        content_parts.append(self._generate_footer())

        return '\n\n'.join(content_parts)

    def _generate_header(self) -> str:
        """生成文档头部"""
        header_parts = [
            "# 爬取数据报告",
            "",
            f"**生成时间**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
            f"**数据来源**: 网络爬虫自动采集",
            "",
            "---",
            ""
        ]
        return '\n'.join(header_parts)

    def _generate_statistics(self, data_list: List[ScrapedData]) -> str:
        """生成统计信息"""
        if not data_list:
            return "## 统计信息\n\n无数据\n"

        # 按来源统计
        source_stats = defaultdict(int)
        for data in data_list:
            source_stats[data.source_url] += 1

        stats_parts = [
            "## 统计信息",
            "",
            f"- **总数据条数**: {len(data_list)}",
            f"- **数据来源数**: {len(source_stats)}",
            ""
        ]

        # 添加来源统计
        if len(source_stats) > 1:
            stats_parts.append("### 各来源数据统计")
            stats_parts.append("")
            for source, count in sorted(source_stats.items(), key=lambda x: x[1], reverse=True):
                stats_parts.append(f"- {source}: {count} 条")
            stats_parts.append("")

        return '\n'.join(stats_parts)

    def _group_data_by_source(self, data_list: List[ScrapedData]) -> Dict[str, List[ScrapedData]]:
        """按来源分组数据"""
        grouped = defaultdict(list)
        for data in data_list:
            # 提取域名作为分组键
            source_key = self._extract_domain(data.source_url)
            grouped[source_key].append(data)
        return dict(grouped)

    def _extract_domain(self, url: str) -> str:
        """提取URL的域名"""
        try:
            from urllib.parse import urlparse
            parsed = urlparse(url)
            return parsed.netloc or "未知来源"
        except Exception:
            return "未知来源"

    def _generate_source_section(self, source: str, source_data: List[ScrapedData]) -> str:
        """生成来源分组部分"""
        section_parts = [
            f"## {source}",
            "",
            f"共 {len(source_data)} 条数据",
            ""
        ]

        # 添加数据表格
        section_parts.append(self._generate_data_table(source_data))

        return '\n'.join(section_parts)

    def _generate_data_table(self, data_list: List[ScrapedData]) -> str:
        """生成数据表格"""
        if not data_list:
            return "无数据"

        table_parts = [
            "| 标题 | 登记时间 | 答复单位 | 答复时间 | 来源链接 |",
            "|------|----------|----------|----------|----------|"
        ]

        for data in data_list:
            # 处理标题长度
            title = self._truncate_text(data.title, 50)

            # 处理时间格式
            reg_time = data.registration_time or "未知"
            reply_time = data.reply_time or "未知"

            # 处理答复单位
            reply_unit = self._truncate_text(data.reply_unit, 30) or "未知"

            # 生成链接
            link_text = "查看详情"
            source_link = f"[{link_text}]({data.source_url})"

            row = f"| {title} | {reg_time} | {reply_unit} | {reply_time} | {source_link} |"
            table_parts.append(row)

        return '\n'.join(table_parts)

    def _truncate_text(self, text: str, max_length: int) -> str:
        """截断文本"""
        if not text:
            return ""

        if len(text) <= max_length:
            return text

        return text[:max_length-3] + "..."

    def _generate_footer(self) -> str:
        """生成文档页脚"""
        footer_parts = [
            "---",
            "",
            "> 本报告由网络爬虫自动生成，数据仅供参考。",
            f"> 生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
            ""
        ]
        return '\n'.join(footer_parts)

    def write_detailed_report(self, results: List[ScrapingResult],
                            filename: Optional[str] = None) -> str:
        """
        生成详细报告

        Args:
            results: 爬取结果列表
            filename: 文件名（可选）

        Returns:
            输出文件路径
        """
        if not filename:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"detailed_report_{timestamp}.md"

        file_path = self.output_dir / filename

        try:
            content = self._generate_detailed_content(results)

            with open(file_path, 'w', encoding='utf-8') as f:
                f.write(content)

            self.logger.info(f"成功写入详细报告: {file_path}")
            return str(file_path)

        except Exception as e:
            self.logger.error(f"写入详细报告失败: {e}")
            raise

    def _generate_detailed_content(self, results: List[ScrapingResult]) -> str:
        """生成详细报告内容"""
        content_parts = [
            "# 爬取详细报告",
            "",
            f"**生成时间**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
            "",
            "---",
            ""
        ]

        # 总体统计
        total_requests = len(results)
        successful_requests = sum(1 for r in results if r.success)
        total_data = sum(len(r.data) for r in results if r.success)

        content_parts.extend([
            "## 总体统计",
            "",
            f"- **总请求数**: {total_requests}",
            f"- **成功请求数**: {successful_requests}",
            f"- **失败请求数**: {total_requests - successful_requests}",
            f"- **成功率**: {successful_requests/total_requests*100:.1f}%" if total_requests > 0 else "0%",
            f"- **总数据条数**: {total_data}",
            ""
        ])

        # 详细结果
        content_parts.append("## 详细结果")
        content_parts.append("")

        for i, result in enumerate(results, 1):
            content_parts.append(f"### {i}. {result.url}")
            content_parts.append("")

            if result.success:
                content_parts.extend([
                    f"- **状态**: ✅ 成功",
                    f"- **数据条数**: {result.get_data_count()}",
                    f"- **处理时间**: {result.processing_time:.2f}秒",
                    f"- **爬取时间**: {result.scraped_at.strftime('%Y-%m-%d %H:%M:%S')}",
                    ""
                ])

                # 添加数据表格
                if result.data:
                    content_parts.append(self._generate_data_table(result.data))
                    content_parts.append("")
            else:
                content_parts.extend([
                    f"- **状态**: ❌ 失败",
                    f"- **错误信息**: {result.error_message}",
                    f"- **处理时间**: {result.processing_time:.2f}秒",
                    ""
                ])

        return '\n'.join(content_parts)


# 创建全局Markdown写入器实例
markdown_writer = MarkdownWriter()
