import requests
from lxml import etree
import re
import json

url = "https://book.douban.com/top250?start=0"
# https://book.douban.com/top250?start=25
# https://book.douban.com/top250?start=50
# https://book.douban.com/top250?start=225
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36"
}

r = ".*(出版社|公司|书局|书店|书馆|集团)$"
books_descriptions = []
books_des = []
all_titles = []
all_authors = []
all_prices = []
all_derives = []
all_descriptions = []
all_imgs = []
all_jpgs = []


def get_all_info():
    t1, t2, t3, t4, t5, t6, t7, t8 = 0, 0, 0, 0, 0, 0, 0, 0
    for i in range(1, 11):
        url = "https://book.douban.com/top250?start=" + str((i - 1) * 25)
        response = requests.get(url=url, headers=headers)
        tree = etree.HTML(response.text)
        #     # ----------------------------------------------------获取书名
        book_titles = tree.xpath('//body//div[@class="pl2"]/a/@title')  # 书名
        for book_title in book_titles:
            print(book_title, t1)
            t1 += 1
        all_titles.extend(book_titles)

        books = tree.xpath('//body//p[@class="pl"]/text()')  # 书的基本信息：作者，出版社，价钱

        # ----------------------------------------------------获取作者
        for book in books:
            if len(book.split('/')[0].split()) >= 2:
                print(book.split('/')[0].split()[1].strip("()"))
                all_authors.append(book.split('/')[0].split()[1].strip("()"))
            else:
                print(book.split('/')[0])
                all_authors.append(book.split('/')[0])

        #     # ----------------------------------------------------获取价钱
        for book in books:
            print(book.split('/')[-1].strip().strip('元').strip('CNY'), t3)
            all_prices.append(book.split('/')[-1].strip().strip('元').strip('CNY'))
            t3 += 1

        #     # ----------------------------------------------------获取出版社
        for book in books:
            for i in book.split('/'):
                if re.findall(r, i.strip()):
                    print(re.search(r, i.strip()).group(), t4)
                    all_derives.append(re.search(r, i.strip()).group())
            t4 += 1

        #     # ----------------------------------------------------获取图书url

        book_urls = tree.xpath('//body//div[@class="pl2"]/a/@href')
        for book_url in book_urls:
            response = requests.get(url=book_url, headers=headers)
            print(response.status_code, t6)
            tree = etree.HTML(response.text)
            way1 = tree.xpath('//div[@class="related_info"]//span[@class="all hidden"]//p/text()')
            way2 = tree.xpath('//div[@class="related_info"]//div[@class="indent"]//div[@class="intro"]/p/text()')
            if way1 == []:
                all_descriptions.append(way2)
            else:
                all_descriptions.append(way1)
            t6 = t6 + 1

            # ----------------------------------------------------获取书的封面
        # book_imgs = tree.xpath('//body//a[@class="nbg"]/img/@src')
        # # print(book_imgs)
        # for book_img in book_imgs:
        #     print(book_img, t7)
        #     all_imgs.append(book_img)
        #     t7 = t7 + 1
        #     response = requests.get(url=book_img, headers=headers)
        #     with open(f"app/static/bookimgs/{t7}.jpg", "wb") as f:
        #         f.write(response.content)
        #         all_jpgs.append(f"app/static/bookimgs/{t7}.jpg")

    return True


def get_jpgs():
    t7 = 0
    for i in range(1, 11):
        url = "https://book.douban.com/top250?start=" + str((i - 1) * 25)
        response = requests.get(url=url, headers=headers)
        tree = etree.HTML(response.text)
        book_jpgs = tree.xpath('//body//a[@class="nbg"]/img/@src')
        for book_jpg in book_jpgs:
            response = requests.get(url=book_jpg, headers=headers)
            with open(f"app/static/bookimgs/{t7 + 1}.jpg", "wb") as f:
                f.write(response.content)
                all_jpgs.append(f"app/static/bookimgs/{t7 + 1}.jpg")
                t7 = t7 + 1
    return True


#   书名写入json
def write_in():
    with open('../informations/titles.json', 'w', encoding='utf-8') as f:
        f.write(json.dumps(all_titles, ensure_ascii=False))
        # 作者写入json
    with open('../informations/authors.json', 'w', encoding='utf-8') as f:
        f.write(json.dumps(all_authors, ensure_ascii=False))
        # 价格写入json
    with open('../informations/prices.json', 'w', encoding='utf-8') as f:
        f.write(json.dumps(all_prices, ensure_ascii=False))
        # 出版社写入json
    with open('../informations/derives.json', 'w', encoding='utf-8') as f:
        f.write(json.dumps(all_derives, ensure_ascii=False))
        # 图书描述写入json
    with open('../informations/descriptions.json', 'w', encoding='utf-8') as f:
        f.write(json.dumps(all_descriptions, ensure_ascii=False))
    #     图书封面写入json
    with open('./app/informations/imgs.json', 'w', encoding='utf-8') as f:
        f.write(json.dumps(all_imgs, ensure_ascii=False))
    # for i in range(250):
    #     all_jpgs.append(f"../../static/bookimgs/{i + 1}.jpg")
    # with open("./app/informations/book_jpgs.json", 'w', encoding='utf-8') as f:
    #     f.write(json.dumps(all_jpgs, ensure_ascii=False))
    return True

if __name__ == '__main__':
    get_all_info()
    write_in()
