import requests
from lxml import etree
import re
import resultToCsv

books = []

cookies = {
    'douban-fav-remind': '1',
    'gr_user_id': '7a83f882-bcd1-485f-b47f-85b6a751eeb8',
    'bid': 'bJ0mm72YeFs',
    'ap_v': '0,6.0',
    'gr_session_id_22c937bbd8ebd703f2d8e9445f7dfd03': 'e6185041-d425-433b-8cc6-8e8f1591db45',
    'gr_session_id_22c937bbd8ebd703f2d8e9445f7dfd03_e6185041-d425-433b-8cc6-8e8f1591db45': 'false',
    'gr_cs1_e6185041-d425-433b-8cc6-8e8f1591db45': 'user_id^%^3A0',
    'viewed': '^\\^1102715_1803022_1883245_1029791_5414391_4913064_1007305^\\^',
}

headers = {
    'Connection': 'keep-alive',
    'sec-ch-ua': '^\\^',
    'sec-ch-ua-mobile': '?0',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'Sec-Fetch-Site': 'same-origin',
    'Sec-Fetch-Mode': 'navigate',
    'Sec-Fetch-Dest': 'document',
    'Referer': 'https://book.douban.com/top250?start=0',
    'Accept-Language': 'zh-CN,zh;q=0.9',
}


class book:

    def __str__(self) -> str:
        ret = ""
        for item in vars(self).items():
            ret = ret + item[0] + ":" + str(item[1]) + "\n"
        ret = ret + "============================"
        return ret

    def __init__(self, rank, name, url):
        self.rank = rank
        self.name = name
        self.url = url
        self.author = []
        self.price = None
        self.press = None
        self.isbn = None

    def add_author(self, author_name):
        self.author.append(author_name)

    def set_isbn(self, isbn):
        self.isbn = isbn

    def set_price(self, price):
        self.price = price

    def set_press(self, press):
        self.press = press


def Main():
    for num in range(0, 1):
        params = (
            ('start', str(25 * num)),
        )
        response = requests.get('https://book.douban.com/top250', headers=headers, params=params, cookies=cookies)
        html = etree.HTML(response.text)
        result_name_raw = html.xpath('//div[@class="pl2"]/a/text()')
        result_url = html.xpath('//*[@class="nbg"]/@href')
        result_name = []
        for item in result_name_raw:
            new = item.strip()
            if new:
                result_name.append(new)
        for i in range(0, 25):
            b = book(25 * num + i + 1, result_name[i], result_url[i])
            book_response = requests.get(result_url[i], headers=headers, cookies=cookies)
            book_html = etree.HTML(book_response.text)
            book_result_author = book_html.xpath(
                '//div[@id="info"]//span[contains(./text(),"作者")]/following-sibling::*')
            for author in book_result_author:
                if author.tag == 'br':
                    break
                author = " ".join(re.split('\s+', author.text.strip()))
                b.add_author(author)
            book_result_price = book_html.xpath('//span[contains(./text(),"定价:")]/following::text()[1]')
            if len(book_result_price) != 0:
                b.set_price(book_result_price[0].strip())
            book_result_press = book_html.xpath('//span[contains(./text(), "出版社:")]/following::text()[1]')
            if len(book_result_press) != 0:
                b.set_press(book_result_press[0].strip())
            book_result_isbn = book_html.xpath('//span[contains(./text(),"ISBN:")]/following::text()[1]')
            if len(book_result_isbn) != 0:
                b.set_isbn(book_result_isbn[0].strip())
            books.append(b)
            print(b)


if __name__ == "__main__":
    # Main()
    resultToCsv.resultToCsv()
