from book import Book
from crawler import Crawler
import requests, re


class DouBan(Crawler):
    '''从豆瓣网查询图书，使用正则表达式解析'''

    def __init__(self, keyword: str) -> None:
        super().__init__(keyword)

    def search_book(self) -> str:
        # 伪造header
        headers = {"User-Agent": "Mozilla/5.0"}

        # cat=1001表示搜索结构为书籍的分类
        search_key = {"cat": "1001", "q": self.book_name}

        try:
            # 发起访问请求
            resp = requests.get(url="https://www.douban.com/search?",
                                params=search_key,
                                headers=headers,
                                timeout=10)

            # 设置字符编码
            resp.encoding = 'utf-8'
            return resp.text

        except Exception:
            raise Exception("网络错误")

    def parse_data(self) -> None:
        '''直接通过正则表达式从HTML文件中提取包含图书信息的节点'''

        # 从HTML网页中提取所有CSS类型为content的div
        pattern_str = r'<div class="content">\s*'+ \
                      r'<div class="title">.*?'+ \
                      r'<div class="rating-info">.*?</div>\s*</div>.*?</div>'

        # re.S/re.DOTALL可以让“.”匹配换行符
        pattn = re.compile(pattern_str, re.DOTALL)
        for item in re.findall(pattn, self.search_book()):
            book = Book()
            # 从<a/>中直接提取图书名称
            # ?控制只匹配0或1个，最小匹配、非贪婪的
            # ()提取整个字符串中符合括号里的正则的内容
            book_pattn = r'<a.*?>(.*?)</a>'
            book_info = re.findall(book_pattn, item)
            # 只包含一个元素
            book.title = book_info[0].strip()

            # 提取作者/译者和出版社
            book_pattn = r'<span class="subject-cast">(.+?)</span>'
            # 查找的结果列表只有一个数据项，各个值之间用“/”分割。数据不全则丢弃
            if len(_info := re.findall(book_pattn, item)) > 0 and "/" in _info[0]:
                   book_info = _info[0].split('/')
            else:
                continue

            # 翻译的图书有作者和译者
            if len(book_info) > 3:
                book.author = f'{book_info[0].strip()} 著, {book_info[1].strip()} 译'
                book.press = book_info[2].strip()
            else:
                book.author = book_info[0].strip()
                book.press = book_info[1].strip()
            
            # 添加到成员属性
            self.book_data.append(book)


if __name__ == "__main__":
    spider = DouBan('Python编程基础')

    # 测试 search_book() 方法
    # print(spider.search_book())

    # 测试 parse_data() 方法
    # spider.parse_data()
    # for book in spider.book_data:
    #     print(book)

    # 测试 save_book() 方法
    spider.parse_data()
    spider.save_book('query_book/data/book_db.txt')
