"""
获取cookies
"""
import os
import time
import urllib

import bs4
import requests
from openpyxl import Workbook


def get_cookies():
    cookies = {}
    with open("cookies.txt", "r") as file:
        for line in file.read().split(";"):
            name, value = line.strip().split("=", 1)
            cookies[name] = value
    return cookies


"""
类别选择
"""


def choice_category(cookies):
    userAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35"
    category_url = "https://book.douban.com/tag/"
    categoryList = []
    headers = {
        "User-Agent": userAgent
    }
    res = requests.get(category_url, cookies=cookies, headers=headers)
    soup = bs4.BeautifulSoup(res.text, "html.parser")
    # 找到所有分类列表
    article = soup.find("div", attrs={"class": "article"})
    # 解析大类
    first_category_class = article.findAll("a", attrs={"class": "tag-title-wrapper"})
    # 解析小类
    second_category_class = article.findAll("table", class_="tagCol")
    first_category_list = []
    for first_category in first_category_class:
        first_category_list.append(first_category.attrs["name"])
    num = 0
    for second_category in second_category_class:
        second_category_list = []
        second_category_tag = second_category.findAll("a")
        for sct in second_category_tag:
            second_category_list.append(sct.string.strip())
        categoryList.append([first_category_list[num], second_category_list])
        num += 1
    return categoryList


# 书明细爬虫
def book_spider(choice_category, cookies):
    userAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35"
    headers = {
        "User-Agent": userAgent
    }
    page_num = 0;
    url = 'https://book.douban.com/tag/' + urllib.parse.quote(choice_category) + '?start=' + str(
        page_num * 20) + '&type=T'
    res = requests.get(url, cookies=cookies, headers=headers)
    soup = bs4.BeautifulSoup(res.text, "html.parser")
    # 找到总页数
    paginator = soup.find('div', class_="paginator")
    pageAll = paginator.findAll("a")
    page_max = pageAll[-2].string.strip()
    page_max = int(page_max)
    books_list = []

    while True:
        url = 'https://book.douban.com/tag/' + urllib.parse.quote(choice_category) + '?start=' + str(
            page_num * 20) + '&type=T'
        res = requests.get(url, cookies=cookies, headers=headers)
        soup = bs4.BeautifulSoup(res.text, 'html.parser')
        # 找到该页所有书
        soup_list = soup.findAll('li', attrs={'class': 'subject-item'})
        for book_info in soup_list:
            # 书名
            h2 = book_info.find("h2")
            title = h2.find("a", attrs={"title": True})
            book_url = title.attrs["href"]
            title = title.attrs["title"]
            # 基本信息
            basic_info = book_info.find("div", attrs={"class": "pub"}).string.strip()
            basic_info_list = basic_info.split("/")
            try:
                author_info = "/".join(basic_info_list[0:-3])
            except:
                author_info = "暂无"
            try:
                pub_info = "/".join(basic_info_list[-3:])
            except:
                author_info = "暂无"

            # 评价
            evaluate_info = book_info.find('div', attrs={'class': 'star clearfix'})
            # 星级
            try:
                allstar = evaluate_info.find('span', attrs={'class': True})
                if (allstar.attrs['class'])[0][-1] == '1':
                    allstar = (allstar.attrs['class'])[0][-1]
                else:
                    allstar = (allstar.attrs['class'])[0][-2] + '.' + (allstar.attrs['class'])[0][-1]
            except:
                allstar = '0.0'
            # 评分
            try:
                rating_nums = evaluate_info.find('span', attrs={'class': 'rating_nums'}).string.strip()
            except:
                rating_nums = '0.0'
            # 评价人数
            try:
                people_num = evaluate_info.find('span', attrs={'class': 'pl'}).string.strip()
                people_num = people_num[1: -4]
            except:
                people_num = '0'
            # 内容描述
            try:
                description = book_info.find("p").string.strip()
            except:
                description = "暂无"
            books_list.append([title, author_info, pub_info, allstar, rating_nums, people_num, description, book_url])
        print('第%d页信息采集完毕，共%d页' % (page_num + 1, page_max))
        time.sleep(0.5)
        page_num += 1
        if page_num == page_max:
            break
    return books_list


def save_to_excel(book_list, excelName):
    wb = Workbook()
    ws = wb.active
    ws.append(['序号', '书名', '作者/译者', '出版信息', '星级', '评分', '评价人数', '简介', '豆瓣链接'])
    count = 1
    for bl in book_list:
        bl.insert(0, count)
        ws.append(bl)
        count += 1
    path = "./results/"
    os.makedirs(path, exist_ok=True)
    wb.find(path + excelName + ".xlsx")


if __name__ == "__main__":
    cookies = get_cookies()
    categoryList = choice_category(cookies)
    print("豆瓣书籍的初略分类有：\n")
    print("*" * 65)
    for cat in categoryList:
        print(cat[0], end=" ")
    print("")
    print("*" * 65)
    first_chose = input("请输入想要了解的粗略分类：\n")
    for cat in categoryList:
        if cat[0] == first_chose:
            print("")
            print("*" * 65)
            print("该粗略分类下的二级分类有：\n")
            for se in cat[1]:
                print(se)
            print("*" * 65)
            break
    choice_category = input("请输入您想要了解的详细分类：\n")
    book_list = book_spider(choice_category, cookies)
    print(book_list)
    save_to_excel(book_list, choice_category)
