# @author: daituodi
# @date: 2021/6/30 4:17 下午
import json

import requests
# from lxml import etree
import re
from bs4 import BeautifulSoup


def clean_html(html):
    '''清除html文本中的相关转义符号'''
    html = re.sub('&nbsp;', ' ', html)
    html = re.sub('&ensp;', ' ', html)
    html = re.sub('&emsp;', ' ', html)
    html = re.sub('&amp;', '&', html)
    html = re.sub('&lt;', '<', html)
    html = re.sub('&gt;', '>', html)
    html = re.sub('&quot;', '"', html)
    html = re.sub('\n+', "", html)
    html = re.sub('\s+', ' ', html)
    return html


headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'
}
with open('book_urls.txt', mode='r') as f:
    urls = f.read().splitlines()

res = []
i = 0
for url in urls:
    print(i)
    i += 1
    # url = 'https://book.douban.com/subject/1007305/'
    print(url)
    response = requests.get(url, headers=headers)
    html = clean_html(html=response.text)

    html = BeautifulSoup(html, parser='lxml')

    title = html.h1.span.get_text()
    print(title)
    try:
        summary = html.find(name='div', attrs={"class": "intro"}).text.strip()
    except:
        summary = ""
    try:
        image = html.find(name='a', attrs={"class": "nbg"}).attrs.get('href')
    except:
        image = ''

    book_info = str(html.find('div', {'id': 'info'}))
    print(book_info)
    try:
        author = re.findall(r'<a class="" href=".*?">(.*?)</a>', book_info)[0].strip()
    except:
        author = re.findall(r'<a href=".*?">(.*?)</a>', book_info)[0].strip()
    print(author)
    try:
        binding = re.search(r'装帧:</span>(.*?)<br', book_info).group(1).strip()
    except:
        binding = "平装"

    try:
        publisher = re.search(r'出版社:</span>(.*?)<br', book_info).group(1).strip()
        print(publisher)
    except:
        publisher = '新华出版社'
    try:
        isbn = re.search(r'ISBN:</span>(.*?)<br', book_info).group(1).strip()
    except:
        isbn = '0123456789876'
    print(isbn)

    try:
        pubdate = re.search(r'出版年:</span>(.*?)<br', book_info).group(1).strip()
    except:
        pubdate = "2000-01"
    try:
        pages = re.search(r'页数:</span>(.*?)<br', book_info).group(1).strip()
    except:
        pages = "100"

    try:
        price = re.search(r'定价:</span>(.*?)<br', book_info).group(1).strip()
    except:
        price = "50"
    try:
        subtile = re.search(r'副标题:</span>(.*?)<br', book_info).group(1).strip()
    except:
        subtile = ""

    d = {
        "url": url,
        "title": title,
        "author": [author],
        "publisher": publisher,
        "binding": binding,
        "isbn": isbn,
        "pubdate": pubdate,
        "pages": pages,
        "price": price,
        "image": image,
        "summary": summary,
        "subtitle": subtile,
    }

    print(d)
    res.append(d)
json.dump(res, open('../result.json', mode='w'), ensure_ascii=False)
