import json
from multiprocessing import Pool
import re
import requests

def get_one_page(url):
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299'}
    response = requests.get(url, headers=headers)
    if response.status_code == 200:
        return response.text
    return None

def parse_one_page(html):
    try:
        pattern = re.compile('<div.*?pl2.*?href="(.*?)".*?>(.*?)</a>.*?pl">(.*?)</p>.*?rating_nums">(.*?)</span>.*?pl">(.*?)</span>', re.S)
        items = re.findall(pattern, html)
        for item in items:
            url = item[0]
            name = re.sub(r'\s', '', item[1])
            tmp = item[2].strip()
            num = tmp.find('/')
            author = tmp[0:num]
            num2 = tmp.rfind('/')
            price = tmp[num2+1:]
            tmp = tmp[0:num2-1]
            publish = tmp[tmp.rfind('/')+1:]
            score = item[3]
            nums = re.sub(r'\s', '', item[4]).lstrip('(').rstrip('人评价)')

            content = get_one_page(url)
            pattern2 = re.compile('<div.*?intro">(.*?)</div>', re.S)
            items = re.findall(pattern2, content)

            more = items[0].replace('<p>', '').replace('</p>', '')
            more = re.sub(r'\s', '', more)

            namenum = name.find('<')
            if namenum > 0:
                name = name[0:namenum]

            morenum = more.find('<')
            if morenum > 0:
                more = more[0:morenum]
            yield {
                'name': name,
                'url': url,
                'author': author,
                'publishtime': publish,
                'price': price,
                'score': score,
                'nums': nums,
                # 'subs': item[5],
                'more': more
            }
    except:
        pass

def write_in_file(content):
    with open('douban_book.json', 'a', encoding='utf8') as f:
        f.write(json.dumps(content, ensure_ascii=False)+',\n')
        f.close()

def main(offset):
    url = 'https://book.douban.com/top250?start=' + str(offset)
    html = get_one_page(url)
    items = parse_one_page(html)
    for item in items:
        print(item)
        write_in_file(item)

if __name__ == '__main__':
    pool = Pool()
    pool.map(main, [i*25 for i in range(10)])
    # for i in range(10):
        # main(i*25)
