import re
import time
import requests
from bs4 import BeautifulSoup
from .base_adapter import BaseAdapter
from typing import Dict, Optional

class BaiduAdapter(BaseAdapter):
    _DOMAIN_PATTERN = re.compile(r"wenku\.baidu\.com")
    _API_TEMPLATE = "https://wk-c.nowapi.com/wenku/doc?url={url}"

    def detect(self, url: str) -> bool:
        return bool(self._DOMAIN_PATTERN.search(url))

    def parse(self, html: str) -> Dict:
        soup = BeautifulSoup(html, 'lxml')
        return {
            'title': self._parse_title(soup),
            'pages': self._parse_pages(soup),
            'watermark': self._detect_watermark(soup),
            'preview_content': self._extract_preview(soup)
        }

    def download(self, url: str) -> bytes:
        api_url = self._API_TEMPLATE.format(url=requests.utils.quote(url))
        response = requests.get(api_url, headers=self._gen_headers())
        response.raise_for_status()
        return response.content

    def _parse_title(self, soup) -> str:
        return soup.find('h1', {'class': 'doc-title'}).text.strip()

    def _parse_pages(self, soup) -> int:
        page_info = soup.find('span', {'class': 'page-count'})
        return int(page_info.text.split('页')[0]) if page_info else 0

    def _gen_headers(self) -> Dict:
        return {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
            'Referer': 'https://wenku.baidu.com/'
        }