# _*_ coding: utf-8 _*_


__author__ = 'johncapper'
__date__ = '2017/10/11 20:16'

from threading import Thread,Event
import requests
from scrapy.selector import Selector
import time
import queue
from fake_useragent import UserAgent

ua = UserAgent()

headers = {
    "User-Agent": ua.random}


class MyThread(Thread):
    __slots__ = ('sid', 'queue')
    def __init__(self, sid,queue):
        Thread.__init__(self)
        self.queue = queue
        self.sid = sid


    def run(self):
        self.showNum(self.sid)

    def showNum(self, sid):
        url = "http://blog.csdn.net/?&page={}".format(sid)
        self.request_url(url=url)

    def request_url(self, url):#这里把CSDN博客的概览信息写入queue

        count = 0
        text = requests.get(url=url,headers=headers).text
        response = Selector(text=text)
        title = response.css('.csdn-tracking-statistics a::text').extract()
        url_list = response.css('.csdn-tracking-statistics a::attr(href)').extract()
        for i in dict(zip(title,url_list)).items():
            self.queue.put(i)

class CoverThread(Thread):
    __slots__ = ('cEvent', 'tEvent', 'queue')
    def __init__(self,queue,cEvent,tEvent):
        Thread.__init__(self)
        self.count = 0
        self.all_count = 0
        self.queue = queue
        self.cEvent = cEvent
        self.tEvent = tEvent


    def run(self):#这里是统计数量以及与另外一个线程的通信


        while True:
            self.count += 1
            self.all_count +=1
            self.print_get_thr()
            if self.count == 5:#每显示5，就通过事件联系另外一个线程，进行信息沟通

                self.cEvent.set()

                self.tEvent.wait()
                self.tEvent.clear()
                self.count=0

    def print_get_thr(self):#我简单地显示这些资料，但实际上您可以在这里做数据库存储，或者其他工作

            print(self.all_count)



class TakeTheQueue(Thread):
    __slots__ = ('cEvent','tEvent','queue')
    def __init__(self,cEvent,tEvent,queue):
        Thread.__init__(self)
        self.cEvent = cEvent
        self.tEvent = tEvent
        self.queue = queue
        self.setDaemon(True)

    def run(self):
        while True:
            self.cEvent.wait()
            self.printfivequeue()
            self.cEvent.clear()

            self.tEvent.set()

    def printfivequeue(self):#这里我只是简单操作，其实可以继续深入每一个连接的内部，进行更细致的网页细节爬取

        with open('johncapper.docx','a') as f:
            f.write(str(self.queue.get()[0])+':'+str(self.queue.get()[1])+'\n')


if __name__ == '__main__':
    start = time.time()
    q = queue.LifoQueue()
    my_thr = [MyThread(i,q) for i in range(1,10)]#爬虫线程，这个代码还有更多的进步空间，暂时来说速度上是比不上asyncio的


    cEvent = Event()#显示url_list线程

    tEvent = Event()#现实细节线程


    co_thr = CoverThread(q,cEvent,tEvent)
    takeTheQueue=TakeTheQueue(cEvent,tEvent,q)
    takeTheQueue.start()


    for t in my_thr:
        t.start()

    co_thr.start()

    for t in my_thr:
        t.join()


    q.put((1,None))
    end = time.time()
    print("---------------------------------------main thread","Complete in {} seconds".format(end - start))