import json
import os
from queue import Queue
import queue
from threading import Thread
import time

import requests
from bs4 import BeautifulSoup

# css selectors
TOPIC_SELECTOR = ".zm-item-tag"
QUESTION_SELECTOR = ".question_link"
DETAIL_SELECTOR = "div[id=zh-question-detail]"
TITLE_SELECTOR = "#zh-question-title"
EXPAND_SELECTOR = ".toggle-expand"
ANSWERS_SELECTOR = ".zm-editable-content.clearfix"
URL_HEAD = "http://www.zhihu.com"

# 防止连网太勤被禁
SLEEP_TIME = 0.3


def _put_by_selector(q, s, selector, soup):
    """

    :param q: queue
    :param s: set
    :param selector: css selector
    :param soup: soup
    """
    for url in map(lambda x: x['href'], soup.select(selector)):
        if url not in s:
            s.add(url)
            q.put(url)


class Crawler(Thread):
    def __init__(self, url, top_queue, ques_queue, top_set, ques_set):
        super().__init__()
        self.que_queue = ques_queue
        self.topic_set = top_set
        self.que_set = ques_set
        self.topic_queue = top_queue
        self.link = url

    def run(self):
        print("Crawl " + self.link)
        soup = BeautifulSoup(requests.get(URL_HEAD + self.link).text)
        _put_by_selector(self.topic_queue, self.topic_set, TOPIC_SELECTOR, soup)
        _put_by_selector(self.que_queue, self.que_set, QUESTION_SELECTOR, soup)


class Parser(Thread):
    global save_dir

    def __init__(self, url, ques_queue, ques_set, folder):
        super().__init__()
        self.ques_queue = ques_queue
        self.ques_set = ques_set
        self.folder = folder
        self.url = url

    def run(self):
        link = URL_HEAD + self.url
        print("Parse " + link)
        soup = BeautifulSoup(requests.get(link).text)
        tmp = soup.select(TITLE_SELECTOR)
        if len(tmp) == 0:
            print(URL_HEAD + self.url + " 连接失败，可以重新加回队列，另一种实现是使用dic进行生命周期的追踪")
            return
        title = tmp[0].text.strip("\n")
        detail = soup.select(DETAIL_SELECTOR)[0].text
        answers = list(map(lambda x: x.text, soup.select(ANSWERS_SELECTOR)))
        expands = soup.select(EXPAND_SELECTOR)
        if len(expands) != 0:
            Parser.__replace_by_completed_answer(answers, expands)
        result = {"title": title, "url": link, "detail": detail, "answers": answers}
        with open(os.path.join(self.folder, title.replace("/", " ").replace("\\", "")), "w") as f:
            json.dump(result, f, ensure_ascii=False)
        _put_by_selector(self.ques_queue, self.ques_set, QUESTION_SELECTOR, soup)

    @staticmethod
    def __replace_by_completed_answer(answers, expands):
        tmp_dic = {}
        expand = list(map(lambda x: URL_HEAD + x['href'], expands))
        if "javascript:;" in expand[0]:  # TODO 问题中的显示全部是js的 所以在这里需要去掉
            del expand[0]
            if len(expand) == 0:
                return
        count = 0
        for index, answer in enumerate(answers):
            if "显示全部" in answer[-20:]:  # 只检测末尾
                tmp_dic[index] = Parser.__find_completed_answer(expand[count])
                count += 1
        for index, answer in tmp_dic.items():
            answers[index] = answer

    @staticmethod
    def __find_completed_answer(link):
        time.sleep(SLEEP_TIME)
        soup = BeautifulSoup(requests.get(link).text)
        return soup.select(ANSWERS_SELECTOR)[0].text


flag = True


def crawler_listen(num, top_queue, ques_queue, top_set, ques_set):
    global flag

    count = 0
    while count < num and flag:
        count += 1
        try:
            link = top_queue.get(timeout=10)
        except queue.Empty:
            break
        time.sleep(SLEEP_TIME)
        Crawler(link, top_queue, ques_queue, top_set, ques_set).start()


def parser_listen(num, ques_queue, ques_set, save_folder):
    global flag
    count = 0
    while count < num:
        count += 1
        try:
            link = ques_queue.get(timeout=10)
        except queue.Empty:
            break
        time.sleep(SLEEP_TIME)
        Parser(link, ques_queue, ques_set, save_folder).start()
    flag = False


if __name__ == "__main__":
    save_dir = "/home/hason/zhihu"
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
    start_link = "/topic/19554859"  # 起点
    topic_queue = Queue()
    que_queue = Queue()
    topic_set = set()
    que_set = set()
    topic_queue.put(start_link)
    topic_set.add(start_link)
    number_of_topic = 100
    number_of_question = 1000

    crawl_thread = Thread(target=crawler_listen,
                          args=(number_of_topic, topic_queue, que_queue, topic_set, que_set))
    parse_thread = Thread(target=parser_listen, args=(number_of_question, que_queue, que_set, save_dir))

    crawl_thread.start()
    parse_thread.start()
    crawl_thread.join()
    parse_thread.join()
