# encoding: utf-8
"""
@author: 夏洛
@QQ: 1972386194
@file: 02-中级.py
"""


from base_request import Spiders

# print(Spiders().fetch('http://www.httpbin.org/get').text)
from lxml import etree
from queue import Queue
from urllib.parse import urljoin
from logger import logger

class Crawl(Spiders):
    '''
    任务模块
    请求模块
    业务模块
    解析模块
    保存模块
    入口模块
    '''
    def __init__(self):
        # 种子URL
        self.base_url = 'https://36kr.com/information/web_news/latest/'
        self.base = "https://36kr.com"
        self.rule = {
            'list_url':'//div[@class="information-flow-list"]/div',
            'detail_url':'//a[@class="article-item-description ellipsis-2"]/@href',
            'title':'//h1[@class="article-title margin-bottom-20 common-width"]/text()',
        }
        # uRL 队列   先进先出 还是先进后出
        self.list_queue = Queue()
        self.maps = lambda x :x[0] if x else x

    def spider(self,url):
        # 任务生产者
        res = self.fetch(url)
        list_url = etree.HTML(res.text).xpath(self.rule['detail_url'])
        for i in list_url:
            self.list_queue.put(urljoin(self.base,i))

    def list_loop(self):
        while True:
            list_url = self.list_queue.get()
            # logger.info(list_url)
            self.spider_detail(list_url)
            # 如果任务池为空 就停止爬虫
            if self.list_queue.empty():
                logger.info('爬虫采集完毕')
                # 正常是发微信或者邮件通知
                break

    def spider_detail(self,url):
        res = self.fetch(url)
        title = self.maps(self.parse(obj=res.text,tag=self.rule['title']))
        self.save(title)

    def parse(self,tag,obj,flg=True):
        '''
        :param obj:   html 文本
        :param tag:    xpath语法
        :return:
        '''
        if flg:
            html = etree.HTML(obj)
            text = html.xpath(tag)
            return text
        else:
            text = obj.xpath(tag)
            return text

    def save(self,data):
        with open('36kr.txt','a',encoding='utf-8') as f:
            f.write(data)

    def run(self):
        # 生产任务
        self.spider(self.base_url)
        # 任务消费
        self.list_loop()

if __name__ == '__main__':
    Crawl().run()


