from bson import ObjectId

import downloader
from db import category_col, book_col
from utils import get_site, md5, formate_mm_dd_time, formate_yyyy_mm_dd


def extract_books(resp,category, page):
    site = get_site(resp.url)
    if page == 1:
        lis = resp.xpath('//div[@id="newscontent"]/div[@class="l"]/ul/li')[1:]
    else:
        lis = resp.xpath('//div[@class="novelslist2"]//ul/li')[1:]
    books = []
    for li in lis:
        if page == 1:
            title = li.xpath('span[@class="s2"]/a/text()').extract_first().strip()
            url = li.xpath('span[@class="s2"]/a/@href').extract_first().strip()
            last_update_time = li.xpath('span[@class="s5"]/text()').extract_first().strip()
            last_update_time = formate_mm_dd_time(last_update_time)
            newest_chapter_name = li.xpath('span[@class="s3"]/a/text()').extract_first()
            author = li.xpath('span[@class="s4"]/text()').extract_first().strip()
            state = '连载'

        else:
            title = li.xpath('span[@class="s2 boys"]/a/text()').extract_first().strip()
            url = li.xpath('span[@class="s2 boys"]/a/@href').extract_first().strip()
            last_update_time = li.xpath('span[@class="s7"]/text()').extract_first().strip()
            last_update_time = formate_yyyy_mm_dd(last_update_time)
            newest_chapter_name = li.xpath('span[@class="s3"]/a/text()').extract_first()
            author = li.xpath('span[@class="s4"]/a/text()').extract_first().strip()
            state = li.xpath('span[@class="s6"]/text()').extract_first().strip()
        url = site + url
        book_md5 = md5(f'{title}_{author}')
        if book := book_col.find_one({'md5': book_md5}):
            if book['newest_chapter_name'] == newest_chapter_name:
                updated = 1
            else:
                updated = 0
        else:
            updated = 1

        book = {
            'md5':book_md5,
            'site': site,
            'title': title,
            'category':category,
            'url': url,
            'newest_chapter_name': newest_chapter_name,
            'author': author,
            'last_update_time': last_update_time,
            'state':state,
            'updated': updated
        }
        books.append(book)
    return books



def extract_next_page_url(resp):
    try:
        return resp.xpath('//div[@id="pages"]//a[contains(text(),"下一页")]/@href').extract_first()
    except:
        return resp.xpath('//div[@id="pages"]//a[contains(text(),"尾页")]/@href').extract_first()


def run_list_spider(c):
    run_page_parser(c['url'], c['name'])


def save_book(book):
    book_col.update_one({'md5': book['md5']}, {"$set": book}, upsert=True)
    print(f'{book['title']} 保存成功')


def run_page_parser(url, category,page=1):
    print(f'爬取{category}({page}) {url}')
    resp = downloader.download(url)
    book_list = extract_books(resp, category, page)
    for book in book_list:
        save_book(book)
    next_page_url = extract_next_page_url(resp)
    try:
        next_page_url = get_site(resp.url) + next_page_url
        if next_page_url:
            run_page_parser(next_page_url, category, page + 1)
    except:
        pass


def run_lists_spider():
    categories = category_col.find({}).limit(1000)
    for c in categories:
        run_list_spider(c)


demo_c = {
    "_id": ObjectId("6720942e4f43e7f751024cb0"),
    "md5": "11ede5cbaa7b34bff9b496dd5b7e6f6c",
    "name": "玄幻",
    "site": "https://www.3yt.org/",
    "url": "https://www.3yt.org/XuanHuan/"
}

if __name__ == '__main__':
    run_lists_spider()
