from queue import Queue
from urllib.parse import urljoin
from pymongo import MongoClient
from lxml import etree
from utils.base import Spider

# url = 'http://www.xiladaili.com/gaoni'
# res = Spider().fetch(url)
# print(res.text)


flt = lambda x :x[0] if x else ''
class Crawl(Spider):
    def __init__(self):
        self.base = 'https://36kr.com/'
        self.url = 'https://36kr.com/information/technology'

    rules = {
        'list_urls':'//a[@class="article-item-title weight-bold"]/@href',
        'detail_content':'//div[@class="common-width content articleDetailContent kr-rich-text-wrapper"]//text()',
        'titile':'//h1[@class="article-title margin-bottom-20 common-width"]//text()'
    }

    list_queue = Queue()

    def crawl(self):
        res = self.fetch(self.url)
        list_urls = etree.HTML(res.text).xpath(self.rules['list_urls'])
        for url in list_urls:
            # https://36kr.com/p/1333116082389384
            self.list_queue.put(urljoin(self.base,url))

    def list_loop(self):
        while True:
            url = self.list_queue.get()
            self.craw_detail(url)
            print(self.list_queue.qsize())
            if self.list_queue.empty():
                break

    def craw_detail(self,url):
        res = self.fetch(url)
        html = etree.HTML(res.text)
        content = html.xpath(self.rules['detail_content'])
        titile = flt(html.xpath(self.rules['titile']))
        data = {
            'titile': titile,
            'content':content
        }
        self.save_mongo(data)

    def save_mongo(self,data):
        client = MongoClient()
        col = client['python']['feier']
        if isinstance(data,dict):
            res = col.insert_one(data)
            return res
        else:
            return '数据格式有问题'


    def run(self):
        self.crawl()
        self.list_loop()




#//a[@class='article-item-title weight-bold']/text()



if __name__ == '__main__':
    c = Crawl()
    c.run()
