# -*- coding: utf-8 -*-
'''
@author: Hugo
@file: 脚本之家.py
@time: 2021/8/22 15:55
'''
import time

from gne import GeneralNewsExtractor
import json
import urllib
from pprint import pprint
import urllib3
from concurrent.futures import ThreadPoolExecutor
import requests,os
import pymongo
from icecream import ic
from lxml import etree
from bs4 import BeautifulSoup as bsp4
from icecream import ic
import asyncio

import requests


class jb():
    def __init__(self):
        import requests
        self.cookies = {
            '__guid': '109265795.3368359567397379600.1625365973719.991',
    '__gads': 'ID=4ce86d5fe3d2da0a-225b1eb328ca00b8:T=1625365973:RT=1625365973:S=ALNI_MZfJM-oU1keELnTLJY0fAKwLlCkKg',
    'UM_distinctid': '17a6f5c8e87a72-0b892fa033236c-3e604809-1fa400-17a6f5c8e88894',
    'CNZZDATA5488916': 'cnzz_eid%3D1277601068-1627981935-https%253A%252F%252Fwww.baidu.com%252F%26ntime%3D1627981935',
    'CNZZDATA1261400616': '948295784-1628508027-https%253A%252F%252Fwww.baidu.com%252F%7C1628508027',
    'CNZZDATA4716706': 'cnzz_eid%3D487611356-1629612967-https%253A%252F%252Fwww.jb51.net%252F%26ntime%3D1629612967',
    'CNZZDATA5488921': 'cnzz_eid%3D1046940385-1629610420-https%253A%252F%252Fwww.jb51.net%252F%26ntime%3D1629610420',
    'CNZZDATA1261487465': '1660895939-1629610563-https%253A%252F%252Fwww.jb51.net%252F%7C1629610563',
    'CNZZDATA5488951': 'cnzz_eid%3D434986357-1629611231-https%253A%252F%252Fwww.so.com%252F%26ntime%3D1629616634',
    'CNZZDATA1260728942': '1240387669-1629611464-https%253A%252F%252Fwww.jb51.net%252F%7C1629638523',
    'CNZZDATA1255898164': '820963134-1625364562-https%253A%252F%252Fwww.so.com%252F%7C1629637063',
    'CNZZDATA1267982899': '2071438471-1629616616-%7C1629638565',
    'Hm_lvt_b88cfc1ccab788f0903cac38c894caa3': '1628512748,1629615172,1629615506,1629685896',
    'Hm_lvt_5cf7ffaf53f2ae2c09200905ee32a7d5': '1629615175,1629685907',
    'Hm_lpvt_5cf7ffaf53f2ae2c09200905ee32a7d5': '1629685912',
    'monitor_count': '48',
    'CNZZDATA1585378': 'cnzz_eid%3D796045700-1625362000-https%253A%252F%252Fwww.so.com%252F%26ntime%3D1629685631',
    'Hm_lpvt_b88cfc1ccab788f0903cac38c894caa3': '1629686091',
        }


        self.headers = {
            'Connection': 'keep-alive',
    'Cache-Control': 'max-age=0',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'Sec-Fetch-Site': 'same-origin',
    'Sec-Fetch-Mode': 'navigate',
    'Sec-Fetch-User': '?1',
    'Sec-Fetch-Dest': 'document',
    'Referer': 'https://www.jb51.net/list/index_96.htm',
    'Accept-Language': 'zh-CN,zh;q=0.9',
        }
        self.base_url = 'https://www.jb51.net'
        self.count = 0
    def f_url(self):

        with ThreadPoolExecutor(50) as thread:
            for i in range(100):

                response = requests.get(f'https://www.jb51.net/list/list_97_{i}.htm', headers = self.headers,
                                        cookies = self.cookies)
                ic(response)
                html = response.content.decode('gbk')
                data = etree.HTML(html)
                detel_url = data.xpath("//div[@class='artlist clearfix']//dt//a/@href")
                # ic(detel_url)
                for url in detel_url:
                    thread.submit(self.crawl,url)
                    ic(str(self.count)+'>>>>>>>>>>done')
                    self.count +=1
                ic(f'第{i}页')

    def save(self,data):
        ic('----------------------------------------')
        print(type(data))
        if isinstance(data,dict):
            res1 = {
            '标题': data.get('title'),
            '正文': data.get('content'),
            '图片': data.get('images'),
            '作者': data.get('author'),
            '上传时间': data.get('publish_time')
            }
            ic(res1)
            client = pymongo.MongoClient()
            db = client['python']['jaoben']
            db.insert_one(res1)
            ic(res1['标题'] + '>>>>{}>>>done'.format(self.count))

    def crawl(self,url):
        res = requests.get(self.base_url + url, headers = self.headers, cookies = self.cookies)
        ic(res)
        html1 = res.content.decode('gbk')
        ic('html1:>')
        # gne库自动提取详情页

        extractor = GeneralNewsExtractor()
        result = extractor.extract(html1)
        print('result======================================================{}'.format(self.count))
        # ic(result)
        self.save(result)


    def run(self):
        self.f_url()

if __name__ == '__main__':
    star = time.time()
    c = jb()
    c.run()
    ic(time.time() - star)








