# coding:utf-8
import os

import requests
import sys
from lxml import etree
import json


class Qiushibake(object):
    def __init__(self, page):
        self.base_url = 'https://www.qiushibaike.com/8hr/page/{}/'
        self.page = page
        self.url_list = []
        self.file = open('ext/E4_qiushibaike.json', mode='w', encoding='utf-8')

    def generate_url(self):
        self.url_list = [self.base_url.format(i) for i in range(1, self.page + 1)]

    def get_page(self, url):
        _path = os.path.join(os.path.abspath('.'), '../')
        sys.path.append(_path)
        _ei = __import__('Utils.C002_extract_info', fromlist=['C002_extract_info'])
        _ua_file = {'pc': '{0}Utils/{1}'.format(_path, _ei.ExtractInfo.ua_file['pc'])}
        _user_agent = _ei.ExtractInfo.get_useragent(file=_ua_file)
        _response = requests.get(url, headers=_user_agent)

        return _response.content

    def parse_data(self, response):
        # 将源码转换成element对象
        _html = etree.HTML(response.decode())
        # 获取所有帖子节点列表
        _node_list = _html.xpath('//div[@id="content-left"]/div')
        _data_list = []
        # 遍历节点列表，从单个节点中抽取数据
        for _node in _node_list:
            _temp = {}
            try:
                _temp['user'] = _node.xpath('./div[1]/a[2]/h2/text()')[0].strip()
                _temp['link'] = 'https://www.qiushibaike.com' + _node.xpath('./div[1]/a[2]/@href')[0]
                _temp['age'] = _node.xpath('./div[1]/div/text()')[0]
                _temp['gender'] = _node.xpath('./div[1]/div/@class')[0].split(' ')[-1].replace('Icon', '')
            except:
                _temp['user'] = '匿名用户'
                _temp['link'] = None
                _temp['age'] = None
                _temp['gender'] = None
            _temp['content'] = _node.xpath('./a[1]/div/span/text()')[0].strip()
            _data_list.append(_temp)

        return _data_list

    def save_data(self, data_list):
        for _data in data_list:
            _str_data = json.dumps(_data, ensure_ascii=False) + ',\n'
            self.file.write(_str_data)

    def __del__(self):
        self.file.close()

    def run(self):
        # 构建url列表
        self.generate_url()
        # 构建请求头
        # 遍历url列表
        for url in self.url_list:
            # 发起请求，获取响应
            _response = self.get_page(url)
            # 解析
            _data_list = self.parse_data(_response)
            # 保存
            self.save_data(_data_list)


def main():
    page = input("请输入最大页数: ")
    qiushi = Qiushibake(int(page))
    qiushi.run()


if __name__ == '__main__':
    main()
