from multiprocessing import Process
from pymongo import MongoClient
from bs4 import BeautifulSoup

import requests
import sys
import re
import time
import random
import pprint

sys.setrecursionlimit(1000000)

client = MongoClient('localhost', 27017, connect = False)

baiduzhidao_qa_db = client['baiduzhidao_qa_db']
baiduzhidao_qa_coll = baiduzhidao_qa_db['baiduzhidao_qa_coll']

h = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Host': 'zhidao.baidu.com',
    'Pragma': 'no-cache',
    'Referer': 'https://zhidao.baidu.com/question/578425885.html?fr=qrl&index=0&qbl=topic_question_0',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'
}


def parse_questions(soup = None, url = ''):
    questions = []

    if url:
        try:
            r = requests.get(url)
            r.encoding = 'gbk'
            soup = BeautifulSoup(r.text, 'html.parser')
        except:
            return

    if soup:
        for i in soup.findAll('a'):
            if not i.get('href'):
                continue

            if re.match('^/question', i.get('href')):
                questions.append('https://zhidao.baidu.com' + i.get('href'))

        random.shuffle(questions)

        for q in questions:
            parse_answer(q)


def parse_answer(url):
    print(url)

    qid = re.findall('\d{4,}', url)[0]

    if baiduzhidao_qa_coll.find_one({'qid': qid}):
        return

    try:
        r = requests.get(url, headers = h)
        r.encoding = 'gbk'
        soup = BeautifulSoup(r.text, 'html.parser')
    except:
        return

    try:
        question = soup.find('h1').get_text().strip()
    except:
        question = ''

    try:
        question_desc = soup.find(class_ = 'q-content').get_text().strip()
    except:
        question_desc = ''

    try:
        best_answer = {
            'text': soup.find(class_ = 'wgt-best').find(class_ = 'answer').find(class_ = 'best-text').get_text().strip(),
            'like': int(soup.find(class_ = 'wgt-best').find(attrs = {'alog-action': 'qb-zan-btnbestbox'}).get('data-evaluate')),
            'unlike': int(soup.find(class_ = 'wgt-best').find(attrs = {'alog-action': 'qb-evaluate-outer'}).get('data-evaluate'))
        }
    except:
        best_answer = {

        }

    other_answers = []

    for ans in soup.select('.wgt-answers .answer'):
        try:
            i = {
                'answer_text': re.sub('展开$', '', ans.find(class_ = 'answer-text').get_text()).strip(),
                'like': int(ans.find(attrs = {'alog-action': "qb-zan-btn"}).get('data-evaluate')),
                'unlike': int(ans.find(attrs = {'alog-action': "qb-evaluate-outer"}).get('data-evaluate'))
            }
        except:
            continue

        other_answers.append(i)

    item = {
        'url': url,
        'qid': qid,
        'question': question,
        'question_desc': question_desc,
        'best_answer': best_answer,
        'other_answers': other_answers
    }

    if not baiduzhidao_qa_coll.find_one({'qid': qid}) and (item['best_answer'] or item['other_answers']) and item['question']:
        baiduzhidao_qa_coll.insert_one(item)

        pprint.pprint(item)

    parse_questions(soup = soup, url = '')


def random_qid():
    return ''.join([random.sample('0123456789', 1)[0] for i in range(8)])


if __name__ == '__main__':
    pool = []

    for i in range(10):
        p = Process(target = parse_answer, args = ('https://zhidao.baidu.com/question/' + random_qid(),))
        pool.append(p)

    for p in pool:
        p.start()

    for p in pool:
        p.join()
