# -*- coding: utf-8 -*-
# @Time : 2021/3/3 12:06 
# @Author : for 
# @File : runoob.py
# @Motto: good good study,day day up


import requests
from fake_useragent import UserAgent
from parsel import Selector
import pymongo
from loguru import logger
import re

item_list = []

headers = {
    'User-Agent': UserAgent().random,
    # 'Cookie': 'PHPSESSID=tssp9d8vci6v0fegpk6uevsak4; Hm_lvt_424e222704e2274e37a8169110e6e00a=1614744189; Hm_lpvt_424e222704e2274e37a8169110e6e00a=1614744356',
}


def get_response(url):
    """
    获取网页源码
    :param url:
    :return:
    """
    response = requests.get(url=url, headers=headers)
    return response


def save(items, dbname):
    """
       保存数据
    """
    myclient = pymongo.MongoClient("mongodb://localhost:27017/")
    mydb = myclient["spiders"]
    mycol = mydb[dbname]
    mycol.insert_many(items)


def get_subject_content(url):
    """
       抓报各个学习科目的章节信息
    """
    response = get_response(url)
    selector = Selector(response.text)
    items = selector.xpath('//div[@class="design"]/a')
    subject = selector.xpath('//div[@class="tab"]/text()').get().strip()
    for item in items:
        chapter = item.xpath('./text()').get().strip()
        chapter_link = 'https://www.runoob.com' + item.xpath('./@href').get()
        item = {
            'chapter': chapter,
            'chapter_link': chapter_link,
            # 'text': get_chapter_text(chapter_link)
        }
        logger.info(f'正在保存{subject}中的{chapter}章节信息。。。')
        item_list.append(item)
        save(item_list, subject)
        item_list.clear()


def get_chapter_text(url):
    response = get_response(url)
    selector = Selector(response.text)
    text_list = selector.xpath('//div[@class="article-intro"]//text()').getall()
    text_str = ''.join(text_list)
    return text_str


def main(url):
    """:cvar
    获取首页当中的所有科目
    """
    for i in range(1, 11):
        response = get_response(url)
        selector = Selector(response.text)
        items = selector.xpath(f'//div[@class="codelist codelist-desktop cate{i}"]/a[@class="item-top item-1"]')
        for item in items:
            title = item.xpath('./h4/text()').get().split('【')[1].split('】')[0]
            if title.startswith('学习'):
                subject = re.search('学习\s(.*)', title)
                subject = subject.group(1)
            else:
                subject = title
            desc = item.xpath('./strong/text()').get()
            link = 'https:' + item.xpath('./@href').get(0)
            get_subject_content(link)
            item = {
                'title': title,
                'desc': desc,
                'link': link,
                'subject': subject
            }
            item_list.append(item)
            save(item_list, 'runoob')
            item_list.clear()


if __name__ == '__main__':
    main('https://www.runoob.com/')
    logger.info('数据抓取完成')
