import time

from db import contents_col
from downloader import download
from txt_downloader import get_site


def extract_book_content(url):
    resp = download(url)
    lines = resp.xpath('//div[@id="content"]/p/text()').extract()
    contents = list(map(lambda x: x.replace('\u3000', ""), lines))
    try:
        next_page = resp.xpath('//a[contains(text(),"下一页")]/@href').extract()[0]
        if next_page:
            next_page_url = get_site(resp.url) + next_page
            contents += extract_book_content(next_page_url)
    except:
        pass
    if '请大家收藏' in contents[-1] or '下一页' in contents[-1] or '更新' in contents[-1]:
        contents = contents[:-1]
    return contents

def run_content_spider():
    while True:
        c = contents_col.find_one_and_update({'crawled':0},{'$set':{'crawled':-1}})
        if not c:
            print('没有需要爬取的内容')
            return
        url = c.get('url')
        contents = extract_book_content(url)
        result = contents_col.update_one({'_id':c.get('_id')}, {'$set':{'texts':contents,'crawled':1}},upsert=False)
        if result.modified_count == 1:
            print(f'{c.get('title')}({c.get('url')}) 爬取成功')
        else:
            if result.modified_count == 1:
                print(f'{c.get('title')}({c.get('url')}) 爬取失败')
        time.sleep(1)

if __name__ == '__main__':
    run_content_spider()