import requests
from bs4 import BeautifulSoup
import re
import logging
from typing import Dict, Optional, List
from urllib.parse import urljoin

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class MetadataFetcher:
    """
    从wenku8.net获取书籍元数据
    """
    
    def __init__(self, base_url: str = "https://www.wenku8.net"):
        """
        初始化元数据获取器
        
        Args:
            base_url: wenku8网站基础URL
        """
        self.base_url = base_url
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        })
    
    def search_book_by_title(self, title: str) -> List[Dict]:
        """
        根据书名搜索书籍
        
        Args:
            title: 书名
            
        Returns:
            List[Dict]: 搜索结果列表
        """
        try:
            search_url = f"{self.base_url}/modules/article/search.php"
            params = {
                'searchtype': 'articlename',
                'searchkey': title.encode('gbk'),
                'page': 1
            }
            
            response = self.session.get(search_url, params=params)
            response.encoding = 'gbk'
            
            if response.status_code != 200:
                logger.error(f"搜索请求失败: {response.status_code}")
                return []
            
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # 解析搜索结果
            books = []
            for row in soup.select('tr[bgcolor="#FFFFFF"]'):
                cells = row.find_all('td')
                if len(cells) >= 4:
                    link_cell = cells[0].find('a')
                    if link_cell:
                        book_url = urljoin(self.base_url, link_cell['href'])
                        book_info = self._extract_search_result(cells)
                        if book_info:
                            book_info['wenku8_url'] = book_url
                            book_info['book_id'] = self._extract_book_id_from_url(book_url)
                            books.append(book_info)
            
            return books
            
        except Exception as e:
            logger.error(f"搜索书籍失败: {e}")
            return []
    
    def _extract_search_result(self, cells) -> Optional[Dict]:
        """从搜索结果行提取书籍信息"""
        try:
            title_cell = cells[0].find('a')
            author_cell = cells[2]
            status_cell = cells[3]
            
            if not title_cell:
                return None
            
            return {
                'title': title_cell.get_text(strip=True),
                'author': author_cell.get_text(strip=True),
                'status': status_cell.get_text(strip=True),
                'wenku8_url': urljoin(self.base_url, title_cell['href'])
            }
        except Exception as e:
            logger.error(f"解析搜索结果失败: {e}")
            return None
    
    def get_book_metadata(self, book_url: str) -> Optional[Dict]:
        """
        获取书籍详细信息
        
        Args:
            book_url: 书籍详情页URL
            
        Returns:
            Optional[Dict]: 书籍元数据
        """
        try:
            response = self.session.get(book_url)
            response.encoding = 'gbk'
            
            if response.status_code != 200:
                logger.error(f"获取书籍详情失败: {response.status_code}")
                return None
            
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # 提取基本信息
            metadata = self._extract_basic_info(soup)
            if not metadata:
                return None
            
            # 提取封面URL
            cover_url = self._extract_cover_url(soup)
            metadata['cover_url'] = cover_url
            
            # 提取简介
            description = self._extract_description(soup)
            metadata['description'] = description
            
            # 提取标签
            tags = self._extract_tags(soup)
            metadata['tags'] = tags
            
            metadata['book_id'] = self._extract_book_id_from_url(book_url)
            metadata['wenku8_url'] = book_url
            
            return metadata
            
        except Exception as e:
            logger.error(f"获取书籍元数据失败: {e}")
            return None
    
    def _extract_basic_info(self, soup: BeautifulSoup) -> Optional[Dict]:
        """提取书籍基本信息"""
        try:
            # 查找包含基本信息的表格
            info_table = soup.find('table', class_='grid')
            if not info_table:
                return None
            
            metadata = {}
            
            # 提取标题
            title_elem = soup.find('span', class_='title')
            if title_elem:
                metadata['title'] = title_elem.get_text(strip=True)
            
            # 遍历表格行提取信息
            for row in info_table.find_all('tr'):
                cells = row.find_all('td')
                if len(cells) >= 2:
                    label = cells[0].get_text(strip=True)
                    value = cells[1].get_text(strip=True)
                    
                    if '作者' in label:
                        metadata['author'] = value
                    elif '出版社' in label:
                        metadata['publisher'] = value
                    elif '文章状态' in label:
                        metadata['status'] = value
                    elif '文章积分' in label:
                        metadata['score'] = value
                    elif '文章字数' in label:
                        metadata['word_count'] = value
                    elif '写作进程' in label:
                        metadata['progress'] = value
            
            return metadata if metadata.get('title') else None
            
        except Exception as e:
            logger.error(f"提取基本信息失败: {e}")
            return None
    
    def _extract_cover_url(self, soup: BeautifulSoup) -> Optional[str]:
        """提取封面URL"""
        try:
            # 查找封面图片
            cover_img = soup.find('img', src=re.compile(r'pic'))
            if cover_img and 'src' in cover_img.attrs:
                cover_url = urljoin(self.base_url, cover_img['src'])
                return cover_url
            return None
        except Exception as e:
            logger.error(f"提取封面URL失败: {e}")
            return None
    
    def _extract_description(self, soup: BeautifulSoup) -> Optional[str]:
        """提取书籍简介"""
        try:
            # 查找简介内容
            intro_div = soup.find('div', class_='intro')
            if intro_div:
                return intro_div.get_text(strip=True)
            
            # 备用方法：查找包含简介的表格
            for row in soup.find_all('tr'):
                if '内容简介' in row.get_text():
                    next_row = row.find_next_sibling('tr')
                    if next_row:
                        return next_row.get_text(strip=True)
            
            return None
        except Exception as e:
            logger.error(f"提取简介失败: {e}")
            return None
    
    def _extract_tags(self, soup: BeautifulSoup) -> List[str]:
        """提取书籍标签"""
        try:
            tags = []
            
            # 查找标签相关的元素
            for elem in soup.find_all(['span', 'div']):
                text = elem.get_text(strip=True)
                if '标签' in text:
                    # 提取标签内容
                    tag_text = text.replace('标签：', '').replace('标签:', '')
                    tags = [tag.strip() for tag in tag_text.split(',') if tag.strip()]
                    break
            
            return tags
        except Exception as e:
            logger.error(f"提取标签失败: {e}")
            return []
    
    def _extract_book_id_from_url(self, url: str) -> Optional[str]:
        """从URL中提取书籍ID"""
        try:
            # 匹配URL中的书籍ID模式
            pattern = r'/book/(\d+)/index\.htm'
            match = re.search(pattern, url)
            if match:
                return match.group(1)
            
            # 备用模式
            pattern2 = r'id=(\d+)'
            match2 = re.search(pattern2, url)
            if match2:
                return match2.group(1)
            
            return None
        except Exception as e:
            logger.error(f"提取书籍ID失败: {e}")
            return None
    
    def refresh_book_metadata(self, title: str, author: str = None) -> Optional[Dict]:
        """
        刷新书籍元数据
        
        Args:
            title: 书名
            author: 作者（可选，用于精确匹配）
            
        Returns:
            Optional[Dict]: 最新的元数据
        """
        try:
            # 搜索书籍
            search_results = self.search_book_by_title(title)
            
            if not search_results:
                logger.warning(f"未找到书籍: {title}")
                return None
            
            # 如果提供了作者，尝试精确匹配
            target_book = None
            if author:
                for book in search_results:
                    if book.get('author') == author:
                        target_book = book
                        break
            
            # 如果没有精确匹配，使用第一个结果
            if not target_book and search_results:
                target_book = search_results[0]
            
            if not target_book:
                return None
            
            # 获取详细元数据
            return self.get_book_metadata(target_book['wenku8_url'])
            
        except Exception as e:
            logger.error(f"刷新书籍元数据失败: {e}")
            return None
    
    def download_cover_image(self, cover_url: str) -> Optional[bytes]:
        """
        下载封面图片
        
        Args:
            cover_url: 封面图片URL
            
        Returns:
            Optional[bytes]: 图片二进制数据
        """
        try:
            if not cover_url:
                return None
            
            response = self.session.get(cover_url)
            if response.status_code == 200:
                return response.content
            return None
            
        except Exception as e:
            logger.error(f"下载封面图片失败: {e}")
            return None