# coding:utf-8

import threading
import json

class ThreadParser(threading.Thread):
    def __init__(self, name, filename, data_queue,lock,isCrawlComplete):
        super(ThreadParser, self).__init__()
        self.name = name
        self.filename = filename
        self.data_queue = data_queue
        self.lock = lock
        self.isCrawlComplete = isCrawlComplete

        #解析线程什么时候结束？
        #1. 解析队列空时：方案不行、因为队列空不代表数据已经完成
        #2. 标记位：当数据采集队列空（采集完成），说明数据采集完成且数据已经进行解析队列，设置数据采集队列为空的标记位，isCrawlComplete。
    def run(self):
        print('线程%s开始处理数据' % self.data_queue.get())
        while True:
            try:
                html = self.data_queue.get(False)
                self.data_queue.task_done()

                with open('text.html','w') as f:
                    f.write(html)
                self.parser(html)
                if self.data_queue.empty():
                    break
            except:
                pass
        print('线程%s数据处理完成' % self.name)

    def parser(self,html):
        from lxml import etree
        et = etree.HTML(html)
        title = et.xpath('//div[@class="w645 fl"]//h1')[0].text
        content = et.xpath('//div[contains(@class,"content-txt pt10")]')[0].text
        # 赞成的数量
        praise_num = et.xpath('//span[@class="fl ding"]//em')[0].text
        # 反对的数量
        oppose_num = et.xpath('//span[@class="fl cai"]//em')[0].text
        comments_num = et.xpath('//span[@class="fl det-commentClick"]//em')[0].text

        items = {
            'title':title,
            'content':content,
            'praise_num':praise_num,
            'oppose_num':oppose_num,
            'comments_num':comments_num,
        }

        with self.lock:
            data = json.dumps(items,ensure_ascii=False).encode('utf-8') + '\n'
            self.filename.write(data)
