# -*- coding: utf-8 -*-
# @Author : Huangcc https://github.com/huangchuchuan/Spider/tree/master/BaiduZhidaoCommentSpider
# modified by  Benchover 2019-1-24
# 原先是运行在py2版本的，目前改到了py3，测试用的py3.5
# 加入多进程支持 多个关键词时 一个关键词一个进程

import requests
import urllib.parse
from lxml import etree
import re
import codecs
import time
from multiprocessing import Pool  # 单线程速度太慢，启动多进程 进程池Pool


class BaiduZhidao():
    search_url = 'https://zhidao.baidu.com/search?word={keyword}&ie=gbk&site=-1&sites=0&date=0&pn=PAGE'
    my_headers = {
        'Host': 'zhidao.baidu.com',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Language': 'en-US,en;q=0.5',
        'Accept-Encoding': 'gzip, deflate, br',
    }
    comment_url = 'https://zhidao.baidu.com/question/{question_id}.html?sort=9&rn=5&pn=PAGE#wgt-answers'

    def __init__(self, keyword, task_number):
        self.session = requests.Session()
        self.keyword = keyword
        self.search_url = self.search_url.format(keyword=urllib.parse.quote(keyword.encode('gbk')))
        # 转换成GBK编码 然后再转换成URL编码 直接UTF-8编码转换URL编码 搜出来的是另外的东西
        print(self.search_url)
        self.task_number = task_number
        self.question_ids = []
        self.filename = str(task_number) + ".txt"

    def set_keyword(self, keyword):
        self.keyword = keyword

    def reset_filename(self):
        self.filename = str(self.task_number) + ".txt"

    @staticmethod
    def extract_question_id(url):
        pattern = '/question/(\d+?)\.'
        result = re.findall(pattern, url)
        if result:
            return result[0]
        else:
            return None

    @staticmethod
    def html_filter(html_text):
        html_text = html_text.replace('\n', '').replace('\t', ' ')
        pattern = re.compile(r'<[^>]+>', re.S)
        no_html_text = pattern.sub('', html_text)
        return no_html_text

    def search(self, page=0):
        print("进程：" + str(self.task_number) + '-*- start search with page %d -*-' % (page / 10 + 1))
        time.sleep(1)

        resp = self.session.get(url=self.search_url.replace('PAGE', str(page)), headers=self.my_headers, timeout=15)
        if resp.status_code == 200:
            response = etree.HTML(resp.text)
            urls = response.xpath('//a[@class="ti"]/@href')
            self.question_ids.extend(filter(lambda x: True if x else False, map(self.extract_question_id, urls)))

            next_page = response.xpath('//a[@class="pager-next"]/@href')
            if next_page:
                next_page_number = re.findall('&pn=(\d+)$', next_page[0])
                if next_page_number:
                    next_page_number = int(next_page_number[0])
                else:
                    next_page_number = 0
                self.search(page=next_page_number)  # 递归调用直到没有下一页
            else:
                print("进程：" + str(self.task_number) + '=*= end search with page %d =*=' % (page / 10 + 1))
        else:
            print("进程：" + str(self.task_number) + 'Error status code %d in getting search result with page %d' % (
            resp.status_code, (page / 10 + 1)))
            print(resp.content)

    def print_question_ids(self):
        print(self.question_ids)

    def find_comments(self):
        total = len(self.question_ids)
        for i, question_id in enumerate(self.question_ids):
            print("进程：" + str(self.task_number) + '|*| start get content from question id %s - %d/%d |*|' % (
            question_id, i + 1, total))
            url = self.comment_url.format(question_id=question_id)
            self.comment(url)
            print("进程：" + str(self.task_number) + '_*_ end get content from question id %s - %d/%d _*_' % (
            question_id, i + 1, total))

    def comment(self, url, page=0):
        print("进程：" + str(self.task_number) + ' * start get comments with page %d *' % (page / 5 + 1))
        time.sleep(1)
        resp = self.session.get(url.replace('PAGE', str(page)),
                                headers=self.my_headers, allow_redirects=False, timeout=15)
        # 这个request 请求容易卡住 即使有 timeout 据网上说是会卡在dns请求阶段 更换dns为 阿里dns 或者 DNSPod 没出现类似问题
        if resp.status_code != 200:
            print("进程：" + str(self.task_number) + 'Error status code %d in getting comment result with page %d' % (
            resp.status_code, (page / 5 + 1)))
            print(resp.content)
        else:
            response = etree.HTML(resp.content)
            comment_nodes = response.xpath('//*[@class="line content"]/div[1]')
            #           佛了，这个div里面居然有个“展开全部”这个字去不掉，下面用字符串去掉
            comments = []
            for node in comment_nodes:
                # print(node.xpath('string(.)').replace('展开全部', ''))
                comments.append(node.xpath('string(.)').strip())
            print("进程：" + str(self.task_number) + ' | get %d comments | ' % len(comments))
            # 获取问题
            ask_title = response.xpath('//span[@class="ask-title"]')
            if ask_title:
                ask_title = ask_title[0].text
            else:
                ask_title = ""
            ask_info = response.xpath('//span[@class="con-all"]')
            if ask_info:
                ask_info = ask_info[0].text
            else:
                ask_info = ''
            if comments:
                comments = map(self.html_filter, comments)
                with codecs.open(self.filename, 'a', encoding='utf-8') as f1:
                    f1.write(self.keyword + '\n')  # 把 搜索的关键词也加进去
                    for data in comments:
                        # data.replace("展开全部", '')
                        f1.write(ask_title + ' ' + ask_info + ' ' + data.replace('展开全部', '') + '\n')
            next_page = response.xpath('//a[@class="pager-next"]/@href')
            if next_page:
                next_page_number = re.findall('&pn=(\d+)#', next_page[0])
                if next_page_number:
                    next_page_number = int(next_page_number[0])
                else:
                    next_page_number = 0
                self.comment(url, next_page_number)  # 递归调用直到没有下一页
            else:
                print("进程：" + str(self.task_number) + ' - end get comments with page %d -' % (page / 5 + 1))


def fun(word, no):
    print("进程：" + str(no) + "开始")
    baidu_zhidao = BaiduZhidao(word, no)
    baidu_zhidao.search()
    baidu_zhidao.find_comments()
    print("进程：" + str(no) + "结束")


if __name__ == '__main__':
    task_no = 0  # 进程编号 文件名的命名从此开始
    f = open("search.txt", 'r', encoding="UTF-8")
    search_keywords = f.readlines()  # 按行读取待搜索的关键词 一行一个
    p = Pool(8)  # 最大进程数
    for w in search_keywords:
        task_no = task_no + 1
        p.apply_async(fun, args=(w, task_no))
    print("等待所有进程结束")
    p.close()
    p.join()
    print("所有进程已经结束")
