from typing import List, Dict, Any, Optional
from datetime import datetime
from src.core.base_downloader import BaseDownloader, ContentMetadata
from src.core.retry_decorator import retry_with_backoff
import time
import random
import requests
import json
import os
from bs4 import BeautifulSoup
import re
from urllib.parse import urlencode
import logging
from src.infrastructure.config import config
from src.core.download_utils import get_save_directory
from src.core.file_system_service import read_file_content

logger = logging.getLogger(__name__)

class WechatDownloader(BaseDownloader):
    def __init__(self, config_dict: Dict[str, Any]):
        super().__init__(config_dict)
        self.session = requests.Session()
        
        # 从配置文件获取配置
        wechat_config = config.get_value('wechat', {})
        
        self.headers = {
            'User-Agent': wechat_config.get('user_agent', 
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'),
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        }
        
        # 获取下载配置
        self.min_interval = wechat_config.get('min_interval', 3)
        self.max_interval = wechat_config.get('max_interval', 5)
        self.download_config = wechat_config.get('download', {})
        self._last_request_time = 0
        
        # 代理配置
        self.proxy = wechat_config.get('proxy')
        if self.proxy:
            self.session.proxies = {
                'http': self.proxy,
                'https': self.proxy
            }
        
        # 下载路径配置
        self.download_path = config.get_value('download_path', './downloads')
        
        logger.info("WechatDownloader initialized with config: "
                   f"intervals: {self.min_interval}-{self.max_interval}s, "
                   f"proxy: {self.proxy}, "
                   f"download_path: {self.download_path}")

    def _wait_for_next_request(self):
        """控制请求间隔"""
        now = time.time()
        elapsed = now - self._last_request_time
        if elapsed < self.min_interval:
            wait_time = random.uniform(self.min_interval, self.max_interval)
            time.sleep(wait_time - elapsed)
        self._last_request_time = time.time()

    @retry_with_backoff(retries=0, backoff_in_seconds=5)
    async def download_by_url(self, url: str) -> Dict:
        """下载单篇文章"""
        self._wait_for_next_request()
        
        try:
            # 规范化URL
            url = self._normalize_url(url)
            
            # 获取文章内容
            response = self.session.get(url, headers=self.headers, timeout=30)
            response.raise_for_status()
            
            # 检查是否被限制
            if '访问过于频繁' in response.text:
                raise Exception('访问过于频繁，请稍后再试')
            with open('./text_path_w', 'w', encoding='utf-8') as f:
                f.write(f"{response.text}")
                
            soup = BeautifulSoup(response.text, 'html.parser')

            # content = read_file_content('./downloads/wechat/20241203/技术最TOP/Jetpack Compose 实现一个图片选择框架/content.txt')
            # soup = BeautifulSoup(content['content'], 'html.parser')

            # 提取文章信息
            title = self._extract_title(soup)
            author = self._extract_author(soup)
            publish_time = self._extract_publish_time(soup)
            content = self._extract_content(soup)
            
            # 获取保存目录
            save_dir = get_save_directory(
                base_path=self.download_path,
                platform='wechat',
                author=author,
                title=title
            )
            
            # 提取图片并保存到指定目录
            images = []
            for img in soup.find_all('img'):
                try:
                    if 'data-src' in img.attrs:
                        image_url = img['data-src']
                        image_path = await self._download_media(image_url, 'images', save_dir)
                        images.append({
                            'type': 'image',
                            'original_url': image_url,
                            'local_path': image_path
                        })
                except Exception as e:
                    logger.warning(f"Failed to download image {img.get('data-src')}: {e}")
                    continue
            
            # 保存文本内容到指定目录
            text_path = os.path.join(save_dir, 'content.txt')
            with open(text_path, 'w', encoding='utf-8') as f:
                f.write(f"标题：{title}\n")
                f.write(f"作者：{author}\n")
                f.write(f"发布时间：{publish_time}\n")
                f.write(f"原文链接：{url}\n")
                f.write(f"保存时间：{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n")
                f.write(content)
            
            # 构建元数据
            metadata = ContentMetadata(
                platform='wechat',
                content_type='article',
                author=author,
                publish_time=publish_time,
                url=url,
                likes=0,
                comments=0,
                collections=0
            )
            
            return {
                'title': title,
                'text': content,
                'metadata': metadata,
                'local_text_path': text_path,
                'media_files': images,
                'save_directory': save_dir
            }
            
        except Exception as e:
            logger.error(f"Failed to download WeChat article: {str(e)}")
            raise

    def _normalize_url(self, url: str) -> str:
        """规范化微信文章URL"""
        # 处理各种可能的URL格式
        if 'mp.weixin.qq.com' not in url:
            raise ValueError('不是有效的微信文章链接')
        return url

    def _extract_title(self, soup: BeautifulSoup) -> str:
        """提取文章标题"""
        title_elem = soup.find('h1', class_='rich_media_title')
        if not title_elem:
            raise ValueError('无法提取文章标题')
        return title_elem.text.strip()

    def _extract_author(self, soup: BeautifulSoup) -> str:
        """提取作者信息"""
        author_elem = soup.find('span', class_='rich_media_meta rich_media_meta_nickname')
        if not author_elem:
            return '未知作者'
        author_elem_a = author_elem.find('a')
        return author_elem_a.text.strip() if author_elem_a else '未知作者'

    def _extract_publish_time(self, soup: BeautifulSoup) -> str:
        """提取发布时间"""
        time_elem = soup.find('em', class_='rich_media_meta rich_media_meta_text')
        return time_elem.text.strip() if time_elem else datetime.now().strftime('%Y-%m-%d')

    def _extract_content(self, soup: BeautifulSoup) -> str:
        """提取文章内容"""
        content_elem = soup.find('div', class_='rich_media_content')
        if not content_elem:
            raise ValueError('无法提取文章内容')
        
        content_parts = []
        
        for element in content_elem.children:
            if not hasattr(element, 'name'):  # 跳过纯文本节点
                continue
            
            if element.name == 'p':
                # 处理段落
                text = element.get_text(strip=True)
                if text:
                    content_parts.append(text + '\n')
                
            elif element.name == 'h1' or element.name == 'h2' or element.name == 'h3':
                # 处理标题
                text = element.get_text(strip=True)
                if text:
                    content_parts.append(f"\n{text}\n")
                
            elif element.name == 'pre':
                # 处理代码块
                code = element.find('code')
                if code:
                    code_text = code.get_text(strip=True)
                    content_parts.append(f"\n```\n{code_text}\n```\n")
                else:
                    text = element.get_text(strip=True)
                    content_parts.append(f"\n```\n{text}\n```\n")
                
            elif element.name == 'ul' or element.name == 'ol':
                # 处理列表
                for li in element.find_all('li'):
                    text = li.get_text(strip=True)
                    if text:
                        content_parts.append(f"• {text}\n")
                    
            elif element.name == 'blockquote':
                # 处理引用
                text = element.get_text(strip=True)
                if text:
                    content_parts.append(f"\n> {text}\n")
                
            else:
                # 处理其他标签
                text = element.get_text(strip=True)
                if text:
                    content_parts.append(text + '\n')
        
        return ''.join(content_parts)

    async def _extract_and_download_images(self, soup: BeautifulSoup) -> List[Dict]:
        """提取并下载图片"""
        images = []
        for img in soup.find_all('img'):
            try:
                if 'data-src' in img.attrs:
                    image_url = img['data-src']
                    image_path = await self._download_media(image_url, 'images')
                    images.append({
                        'type': 'image',
                        'original_url': image_url,
                        'local_path': image_path
                    })
            except Exception as e:
                logger.warning(f"Failed to download image {img.get('data-src')}: {e}")
                continue
        return images

    @retry_with_backoff(retries=3, backoff_in_seconds=2)
    async def _download_media(self, url: str, media_type: str, save_dir: str) -> str:
        """下载媒体文件"""
        self._wait_for_next_request()
        
        try:
            response = self.session.get(
                url, 
                headers=self.headers, 
                timeout=self.download_config.get('timeout', 30)
            )
            response.raise_for_status()
            
            # 生成文件名
            ext = self._get_file_extension(response.headers.get('content-type', ''))
            filename = f"{int(time.time())}_{hash(url)}{ext}"
            
            # 使用完整的保存路径
            save_path = os.path.join(save_dir, filename)
            
            # 分块保存大文件
            chunk_size = self.download_config.get('chunk_size', 8192)
            with open(save_path, 'wb') as f:
                for chunk in response.iter_content(chunk_size=chunk_size):
                    if chunk:
                        f.write(chunk)
                
            return save_path
            
        except Exception as e:
            logger.error(f"Failed to download media: {str(e)}")
            raise

    def _get_file_extension(self, content_type: str) -> str:
        """根据Content-Type获取文件扩展名"""
        type_map = {
            'image/jpeg': '.jpg',
            'image/png': '.png',
            'image/gif': '.gif',
            'image/webp': '.webp'
        }
        return type_map.get(content_type.lower(), '.jpg')

    async def download_by_author(self, author_id: str, **filters) -> List[Dict[str, Any]]:
        """下载作者的所有内容"""
        raise NotImplementedError("下载作者的所有内容")

    async def search_and_download(self, keyword: str, **filters) -> List[Dict[str, Any]]:
        """搜索功能暂不支持"""
        raise NotImplementedError("微信公众号暂不支持搜功能")