import csv
from threading import Thread, Lock
import  requests
from lxml import etree
from queue import Queue

class csdnspider(Thread):
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'
   }
    def __init__(self,pageQueue,blogQueue,*args,**kwargs):
        super(csdnspider,self).__init__(*args,**kwargs)
        self.base_domain='http://blog.csdn.net/'
        self.page_queue = pageQueue
        self.blog_queue = blogQueue
        #拿到网址，内容，标题
    def run(self):
        while True:
            if self.page_queue.empty():
                break
            url = self.page_queue.get()
            response = requests.get(url,headers=self.header)
            text = response.text
            html = etree.HTML(text)
            urls = html.xpath("//span[@class='down fr']/../span[@class='link']/a/@href").extract()
            for url in urls:
                res = requests.get(url,header=self.header)
                text_detail = res.text
                html_detail = etree.HTML(text_detail)
                title = html_detail.xpath('//h1[@class="title-article"]/text()').extract()

                content = html_detail.xpath("//div[@class='article_content clearfix']").extract()
                self.blog_queue.put((title,content))

class csdnwriter(Thread):
    def __init__(self, blog_queue, writer, gLock, *args, **kwargs):
        super(csdnwriter, self).__init__(*args, **kwargs)
        self.blog_queue = blog_queue
        self.writer = writer
        self.lock = gLock

    def run(self):
        while True:
            try:
                csdn_info = self.blog_queue.get(timeout=40)
                title,content = csdn_info
                self.lock.acquire()
                self.writer.writerow((title,content))
                self.lock.release()
                print('保存一条')
            except:
                break

def main():
    page_queue = Queue(10)
    blog_queue = Queue(500)
    bLock = Lock()
    fp = open('scdn.scv','a',newline='',encoding='utf-8-sig')
    writer = csv.writer(fp)
    writer.writerow(('title content'))

    for i in range(1,11):
        url = 'https://so.csdn.net/so/search/s.do?p=%s&q=%s&t=blog&domain=&o=&s=&u=&l=&f=&rbg=0' % (i, 'python')
        page_queue.put(url)

    for i in range(5):
        t = csdnspider(page_queue,blog_queue)
        t.start()
    for i in range(5):
        t = csdnwriter(blog_queue,writer,bLock)
        t.start()

if __name__ == '__main__':
    main()


