import aiohttp
import asyncio
import aiofiles
from typing import List, Dict, Any
from datetime import datetime
from pathlib import Path
import json
from bs4 import BeautifulSoup
import re

from src.core.base_downloader import BaseDownloader, ContentMetadata
from src.core.download_utils import DownloadUtils
from src.infrastructure.config import ConfigurationManager
from src.infrastructure.logger import log_manager

class XiaohongshuDownloader(BaseDownloader):
    def __init__(self, config: Dict[str, Any]):
        super().__init__(config)
        # 获取当前模块的logger
        self.logger = log_manager.get_logger("xiaohongshu")
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
            # 其他必要的headers
        }
        self.download_utils = None
        
    async def _get_note_content(self, url: str) -> Dict[str, Any]:
        """获取笔记内容"""
        async with aiohttp.ClientSession(headers=self.headers) as session:
            async with session.get(url) as response:
                if response.status == 200:
                    html = await response.text()
                    
                    
                    self.logger.debug("开始解析HTML内容")
                    
                    try:
                        

                        return {
                            
                        }
                        
                        # 构建返回数据
                        # return {
                        #     'title': title,
                        #     'text': text,
                        #     'images': images,
                        #     'videos': videos,
                        #     'metadata': ContentMetadata(
                        #         platform='xiaohongshu',
                        #         content_type='note',
                        #         publish_time=publish_time,
                        #         author=author,
                        #         likes=likes,
                        #         comments=comments,
                        #         collections=collections
                        #     )
                        # }
                        
                    except Exception as e:
                        self.logger.error(f"解析HTML内容失败: {e}", exc_info=True)
                        self.logger.debug(f"HTML内容: {html[:1000]}...")  # 记录部分HTML内容以便调试
                        raise ValueError(f"Failed to parse note content: {e}")
                        
                raise Exception(f"Failed to download: {response.status}")
        
    async def download_by_url(self, url: str) -> Dict[str, Any]:
        """下载单个笔记内容"""
        self.logger.info(f"开始下载笔记: {url}")
        try:
            # 创建下载工具实例
            if not self.download_utils:
                from src.core.progress_manager import ProgressManager
                progress_manager = ProgressManager()
                self.download_utils = DownloadUtils(self.config, progress_manager)
            
            # 获取笔记内容
            self.logger.debug("正在获取笔记内容...")
            note_content = await self._get_note_content(url)
            
            # 构建存储路径
            storage_path = self._build_storage_path(note_content['metadata'])
            storage_path.mkdir(parents=True, exist_ok=True)
            self.logger.debug(f"存储路径: {storage_path}")
            
            # 保存文本内容
            text_path = storage_path / self._get_text_filename(note_content)
            await self._save_text_content(text_path, note_content)
            
            # 下载媒体文件
            media_files = []
            
            # 下载图片
            if note_content['images']:
                image_paths = await self._download_media_files(
                    urls=note_content['images'],
                    save_path=storage_path,
                    media_type='images'
                )
                media_files.extend([
                    {
                        'type': 'image',
                        'original_url': url,
                        'local_path': path
                    }
                    for url, path in zip(note_content['images'], image_paths)
                ])
            
            # 下载视频
            if note_content['videos']:
                video_paths = await self._download_media_files(
                    urls=note_content['videos'],
                    save_path=storage_path,
                    media_type='videos'
                )
                media_files.extend([
                    {
                        'type': 'video',
                        'original_url': url,
                        'local_path': path
                    }
                    for url, path in zip(note_content['videos'], video_paths)
                ])
            
            # 更新返回结果
            note_content['local_text_path'] = str(text_path)
            note_content['media_files'] = media_files
            
            self.logger.info(f"笔记下载完成: {url}")
            return note_content
            
        except Exception as e:
            self.logger.error(f"下载笔记失败: {url}", exc_info=True)
            raise
        
    def _build_storage_path(self, metadata: ContentMetadata) -> Path:
        """构建存储路径"""
        try:
            template = ConfigurationManager.get_value('storage.content_path', '{platform}/{author}/{date}')
            path = template.format(
                platform='xiaohongshu',
                author=metadata.author or 'unknown',
                date=datetime.now().strftime('%Y%m%d')
            )
            base_path = ConfigurationManager.get_value('storage.base_path', './downloads')
            return Path(base_path) / path
        except Exception as e:
            # 使用默认路径
            return Path('./downloads/xiaohongshu/unknown')
        
    def _get_text_filename(self, content: Dict) -> str:
        """获取文本文件名"""
        template = ConfigurationManager.get_value('storage.text_filename', '{title}.txt')
        return template.format(
            title=content.get('title', 'untitled').replace('/', '_')
        )
        
    async def _save_text_content(self, file_path: Path, content: Dict):
        """保存文本内容"""
        text_content = {
            'title': content.get('title', ''),
            'text': content.get('text', ''),
            'metadata': {
                'author': content['metadata'].author,
                'publish_time': content['metadata'].publish_time.isoformat(),
                'likes': content['metadata'].likes,
                'comments': content['metadata'].comments,
                'collections': content['metadata'].collections
            }
        }
        
        async with aiofiles.open(file_path, 'w', encoding='utf-8') as f:
            await f.write(json.dumps(text_content, ensure_ascii=False, indent=2))
            
    async def _download_media_files(self, urls: List[str], save_path: Path, 
                                  media_type: str) -> List[str]:
        """下载媒体文件"""
        timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
        
        paths = []
        for index, url in enumerate(urls):
            ext = Path(url).suffix or '.jpg'
            filename = ConfigurationManager.get_value('storage.media_filename', '{timestamp}_{index}{ext}').format(
                timestamp=timestamp,
                index=index,
                ext=ext
            )
            file_path = save_path / media_type / filename
            file_path.parent.mkdir(exist_ok=True)
            
            await self.download_utils.download_file(url, str(file_path))
            paths.append(str(file_path))
            
        return paths

    async def download_by_author(self, author_id: str, **filters) -> List[Dict[str, Any]]:
        """下载作者的所有内容"""
        # TODO: 实现作者内容下载
        return []

    async def search_and_download(self, keyword: str, **filters) -> List[Dict[str, Any]]:
        """搜索并下载内容"""
        # TODO: 实现搜索下载
        return []

    async def analyze_html_structure(self, html_file: Path):
        """分析HTML结构"""
        async with aiofiles.open(html_file, 'r', encoding='utf-8') as f:
            html = await f.read()
        
        soup = BeautifulSoup(html, 'html.parser')
        
        # 分析主要结构
        self.logger.info("=== HTML结构分析 ===")
        
        # 查找所有主要容器
        main_containers = soup.find_all(['div', 'section'], class_=True)
        for container in main_containers:
            if container.get('class'):
                self.logger.info(f"\n容器: {' '.join(container.get('class'))}")
                # 列出容器内的主要元素
                for child in container.find_all(recursive=False):
                    if child.name:
                        self.logger.info(f"  ├─ {child.name}: {' '.join(child.get('class', []))}")
                        if child.text and len(child.text.strip()) < 50:
                            self.logger.info(f"  │  └─ 内容: {child.text.strip()}")
        
        # 查找所有可能的数据点
        self.logger.info("\n=== 可能的数据点 ===")
        # 标题
        title = soup.find(['h1', 'div'], class_=lambda x: x and 'title' in x.lower())
        if title:
            self.logger.info(f"标题: {title.text.strip()}")
        
        # 作者信息
        author = soup.find(['div', 'span'], class_=lambda x: x and ('author' in x.lower() or 'user' in x.lower()))
        if author:
            self.logger.info(f"作者信息: {author.text.strip()}")
        
        # 时间信息
        time_elem = soup.find(['span', 'div'], class_=lambda x: x and 'time' in x.lower())
        if time_elem:
            self.logger.info(f"时间: {time_elem.text.strip()}")
        
        # 图片
        images = soup.find_all('img', src=True)
        if images:
            self.logger.info(f"\n找到 {len(images)} 个图片:")
            for img in images[:3]:  # 只显示前3个
                self.logger.info(f"  └─ {img.get('src')}")
        
        # 视频
        videos = soup.find_all('video', src=True)
        if videos:
            self.logger.info(f"\n找到 {len(videos)} 个视频:")
            for video in videos[:3]:
                self.logger.info(f"  └─ {video.get('src')}")
        
        # 互动数据
        interactions = soup.find_all(['span', 'div'], class_=lambda x: x and any(word in x.lower() for word in ['like', 'comment', 'collect', 'count']))
        if interactions:
            self.logger.info("\n互动数据:")
            for inter in interactions:
                self.logger.info(f"  └─ {inter.get('class')}: {inter.text.strip()}")