# -*- coding: utf-8 -*-
# @Time : 2021/3/8 9:51 
# @Author : for 
# @File : jb51.py 
# @Motto: good good study,day day up
import pymongo
import requests
from fake_useragent import UserAgent
from parsel import Selector
from loguru import logger
import re

import sys
import io

sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030')

item_list = []

headers = {
    'User-Agent': UserAgent().random,
}


def get_response(url):
    """
    获取网页源码
    :param url:
    :return:
    """
    response = requests.get(url=url, headers=headers)
    response.encoding = 'gbk'
    return response


def save(items, dbname):
    """
       保存数据
    """
    myclient = pymongo.MongoClient("mongodb://localhost:27017/")
    mydb = myclient["spiders"]
    mycol = mydb[dbname]
    mycol.insert_many(items)


def get_article_text(url):
    response = get_response(url)
    selector = Selector(response.text)
    body = ''.join(selector.xpath('//div[@id="content"]//text()').getall())
    # 把文章尾部[您可能感兴趣的文章]开头的字样，替换成空格
    body = re.sub('\s(.*)您可能感兴趣的文章:(.*)', '', body)
    # 去掉文章前后空格
    body = body.replace('\r\n', '').strip()
    return body


def main(url):
    response = get_response(url)
    selector = Selector(response.text)
    items = selector.xpath('//div[@class="artlist clearfix"]/dl/dt')
    catgory = selector.xpath('//h1/text()').get()
    for item in items:
        title = item.xpath('./a/@title').get()
        link = 'https://www.jb51.net' + item.xpath('./a/@href').get()
        date = item.xpath('./span/text()').get().split(':')[1]
        item = {
            'title': title,
            'link': link,
            'date': date,
            'body': get_article_text(link)
        }
        item_list.append(item)
        save(item_list, catgory)
        item_list.clear()


if __name__ == '__main__':
    for page in range(1, 6):
        logger.info(f'正在抓取第{page}页数据，请稍候.....')
        main(f'https://www.jb51.net/list/list_97_{page}.htm#')
    # main('https://www.jb51.net/article/205128.htm')
