import re
from datetime import datetime

from db import book_col, contents_col
from downloader import download
from utils import get_site, md5, formate_yyyy_mm_dd


def extract_book_page(book_url):
    resp = download(book_url)
    chapters = []
    lis = resp.xpath('//b[contains(text(),"正文")]/../following::dd')
    title = resp.xpath('//h1/text()').extract_first()
    author = resp.xpath('//h1/following::p/a/text()').extract_first()
    newest_chapter_name = resp.xpath('//p[contains(text(),"更新")]/a/text()').extract_first()
    intro = resp.xpath('//div[@id="intro"]/p/text()').extract_first().split('书友群')[0]
    last_update_time = re.findall(r'最后更新：(.*?)<',resp.text)[0]
    book_info = {
        'title':title,
        'author': author,
        'newest_chapter_name':newest_chapter_name,
        'last_update_time': datetime.strptime(last_update_time,'%Y-%m-%d %H:%M:%S'),
        'updatedAt': datetime.now(),
        'intro':intro,
        'md5': md5(f'{title}_{author}'),
        'url':book_url
    }
    for li in lis:
        title = li.xpath('a/text()').extract_first()
        url = li.xpath('a/@href').extract_first()
        order = re.findall(r'(\d+).html', url)[0]
        url = get_site(resp.url) + url
        chapters.append({
            'title': title,
            'url': url,
            'order': order,
            'crawled':0,
            'md5': md5(url)
        })

    return book_info, chapters


def run_book_spider(book):
    url = book['url']
    book_id = book['_id']
    book_info, chapters = extract_book_page(book['url'])
    result = book_col.update_one({'md5':book_info['md5']},{'$set':book_info}, upsert=True)
    if result.modified_count == 1:
        print(book_info['title'],book_info['md5'],'保存成功')
    chapter_items = []
    for i, c in enumerate(chapters):
        c['book'] = book_id
        c['order'] = i
        chapter = contents_col.find_one({'md5':c['md5']})
        if chapter:
            chapter_id = chapter['_id']
            contents_col.update_one({'_id':c['title']},{'$set':c})
            if result.modified_count == 1:
                print(f'{i}>>>>>{c['title']}')
        else:
            result = contents_col.insert_one(c)
            chapter_id = result.inserted_id
        chapter_items.append(chapter_id)

    result = book_col.update_one({"_id":book_id},{'$set':{'updated':0}, '$addToSet':{'chapters':chapter_id}})
    if result.modified_count == 1:
        print(f'{bool['title']} 更新章节成功 新增章节:{len(chapter_items)}')



if __name__ == '__main__':
    while book := book_col.find_one({'updated':1}):
        url = book['url']
        title = book['title']
        print(f'正在爬取: {title}({url})')
        run_book_spider(book)
