# coding:utf-8
import os

import requests
import sys
from lxml import etree
import json
import threading
from queue import Queue


class QiushibakeThreading(object):
    def __init__(self, page):
        self.base_url = 'https://www.qiushibaike.com/8hr/page/{}/'
        self.page = page
        self.file = open('ext/E5_qiushibaike_threading.json', mode='w', encoding='utf-8')
        self.url_queue = Queue()
        self.response_queue = Queue()
        self.data_queue = Queue()

    def generate_url(self):
        for i in range(1, self.page + 1):
            _url = self.base_url.format(i)
            self.url_queue.put(_url)

    def get_page(self):
        while True:
            _url = self.url_queue.get()
            print('正在获取%s的响应' % _url)
            _path = os.path.join(os.path.abspath('.'), '../')
            sys.path.append(_path)
            _ei = __import__('Utils.C002_extract_info', fromlist=['C002_extract_info'])
            _ua_file = {'pc': '{0}Utils/{1}'.format(_path, _ei.ExtractInfo.ua_file['pc'])}
            _user_agent = _ei.ExtractInfo.get_useragent(file=_ua_file)
            _response = requests.get(_url, headers=_user_agent)
            if _response.status_code == 200:
                self.response_queue.put(_response.content)
            else:
                self.url_queue.put(_url)
            self.url_queue.task_done()

    def parse_data(self):
        while True:
            print('开始解析数据')
            _response = self.response_queue.get()
            # 将源码转换成element对象
            _html = etree.HTML(_response.decode())
            # 获取所有帖子节点列表
            _node_list = _html.xpath('//div[@id="content-left"]/div')
            _data_list = []
            # 遍历节点列表，从单个节点中抽取数据
            for _node in _node_list:
                _temp = {}
                try:
                    _temp['user'] = _node.xpath('./div[1]/a[2]/h2/text()')[0].strip()
                    _temp['link'] = 'https://www.qiushibaike.com' + _node.xpath('./div[1]/a[2]/@href')[0]
                    _temp['age'] = _node.xpath('./div[1]/div/text()')[0]
                    _temp['gender'] = _node.xpath('./div[1]/div/@class')[0].split(' ')[-1].replace('Icon', '')
                except:
                    _temp['user'] = '匿名用户'
                    _temp['link'] = None
                    _temp['age'] = None
                    _temp['gender'] = None
                _temp['content'] = _node.xpath('./a[1]/div/span/text()')[0].strip()
                _data_list.append(_temp)
            self.data_queue.put(_data_list)
            self.response_queue.task_done()

    def save_data(self):
        while True:
            print('开始保存数据')
            _data_list = self.data_queue.get()
            for _data in _data_list:
                _str_data = json.dumps(_data, ensure_ascii=False) + ',\n'
                self.file.write(_str_data)
            self.data_queue.task_done()

    def __del__(self):
        self.file.close()

    def run(self):
        _thread_list = []
        # 创建生成url的线程
        _t_generate_url = threading.Thread(target=self.generate_url)
        _thread_list.append(_t_generate_url)
        # 创建获取相应的线程
        for i in range(3):
            _t = threading.Thread(target=self.get_page)
            _thread_list.append(_t)
        # 创建解析响应的线程
        for i in range(3):
            _t = threading.Thread(target=self.parse_data)
            _thread_list.append(_t)
        # 创建数据存储线程
        _t_save_data = threading.Thread(target=self.save_data)
        _thread_list.append(_t_save_data)
        # 遍历循环列表启动线程
        for _t in _thread_list:
            # 将线程设置为守护线程，设置为守护线程之后，子线程将会跟随主线程的结束而结束
            _t.setDaemon(True)
            _t.start()
        for _q in [self.url_queue, self.response_queue, self.data_queue]:
            # 设置主线程等待队列操作完毕再退出主线程
            _q.join()


def main():
    page = input("请输入最大页数: ")
    qiushi = QiushibakeThreading(int(page))
    qiushi.run()


if __name__ == '__main__':
    main()
