from const import SORT_BY_DEF, HTML_HEADERS, JSON_HEADERS, LIMIT_SIZE
from crawl_utils import Controller
from REST_API import all_answers_api, answer_comment_api
import os, requests, json
from bs4 import BeautifulSoup
import time, random

class Utils(Controller):
    def __init__(self, crawl_times=-1, max_api_times=10, limit=20):
        super(Utils, self).__init__(crawl_times, max_api_times, limit)

    def check(self):
        self.craw_times += 1
        if self.craw_times == self.max_api_times:
            self.sleep()
            self.craw_times = -1

    def get(self, url, hearders):
        time.sleep(
            random.random()*5 + 3.5
        )
        try:
            response = requests.get(url, headers=hearders, timeout=10)
            response.raise_for_status()
            return response
        except requests.exceptions.ReadTimeout as e:
            print('连接超时：', e)
            raise requests.exceptions.ReadTimeout
        except requests.exceptions.ConnectionError as e:
            print('无法连接：', e)
            raise requests.exceptions.ConnectionError
        except requests.HTTPError as e:
            print('连接错误：', e)
            raise requests.HTTPError

    def get_html(self, url):
        return self.get(url, HTML_HEADERS)

    def get_json(self, url):
        return self.get(url, JSON_HEADERS)

class ZhihuCrawler(Utils):
    def __init__(self, crawl_times=-1, max_api_times=10, limit=20):
        super(ZhihuCrawler, self).__init__(crawl_times, max_api_times, limit)

    def save_comment(self, dic, cmt_id, ans_dir):
        assert dic['id'] == cmt_id
        if len(dic['child_comments']) > 0: # firstly, we need to extract out its child comments
            child_comments = dic.pop("child_comments")
            for item in child_comments:
                if os.path.exists(f"{ans_dir}/{item['id']}.json"):
                    # only root comments can be saved first
                    print("****error****")
                    print(item)
                else:
                    item['in_reply_to_status_id'] = cmt_id
                    with open(f"{ans_dir}/{item['id']}.json", 'w', encoding='utf-8') as fw:
                        json.dump(dic, fw, ensure_ascii=False)

        if os.path.exists(f"{ans_dir}/{cmt_id}.json"):
        # we have saved the comment because it is a child comment in another comment
            return
        else:
            with open(f"{ans_dir}/{cmt_id}.json", 'w', encoding='utf-8') as fw:
                json.dump(dic, fw, ensure_ascii=False)

    def answerComments(self, ans_id, ans_dir):
        warehouse = f"./{ans_dir}/response/"
        if not os.path.exists(warehouse):
            os.system(f"mkdir {warehouse}")
        while not self.is_end():
            url = answer_comment_api(ans_id, LIMIT_SIZE, self.next_offset(), "normal")
            try:
                response = self.get_json(url)
            except:
                return
            else:
                thread = response.json()
                items = thread['data']
                for item in items:
                    if os.path.exists(ans_dir):
                        os.system(f"mkdir {ans_dir}")
                    if item['reply_to_author'] is None:
                        item['in_reply_to_status_id'] = ans_id
                        self.save_comment(item, f"{ans_dir}/{item['id']}.json")


                self.increase(num=len(items))

    def thread(self, question_id):
        """
        :param question_id: thread is a muti-agent onversation around a specific question
        :type question_id: str
        :return:
        :rtype:
        """
        warehouse = f"./{question_id}"
        if not os.path.exists(warehouse):
            os.system(f"mkdir {warehouse}")
        raw_url = f"https://www.zhihu.com/question/{question_id}"
        try:
            resp = self.get_html(raw_url)
        except:
            return
        else:
            soup = BeautifulSoup(resp.text, 'html.parser', from_encoding='utf-8')
            initData =  soup.find("script", id='js-initialData')
            if initData is None:
                return
            dic = json.loads(
                initData.text
            )['initialState']['entities']['questions'][f'{question_id}']
            dic['content'] = dic.pop("detail")
            dic['in_reply_to_status_id'] = None
            with open(f"{warehouse}/source.json", 'w', encoding='utf-8') as fw:
                json.dump(dic, fw, ensure_ascii=False)

        ans_id_list = []
        while not self.is_end():
            url = all_answers_api(question_id, LIMIT_SIZE, self.next_offset(), SORT_BY_DEF)
            try:
                response = self.get_json(url)
            except:
                return
            else:
                thread = response.json()
                items = thread['data']
                for item in items:
                    ans_dir = f"{warehouse}/{item['id']}"

                    if os.path.exists(ans_dir):
                        os.system(f"mkdir {ans_dir}")

                    item['in_reply_to_status_id'] = f'{question_id}'
                    with open(f"{ans_dir}/{item['id']}.json", 'w', encoding='utf-8') as fw:
                        json.dump(item, fw, ensure_ascii=False)

                    ans_id_list.append(item['id'])
                self.increase(num=len(items))

        for ans_id in ans_id_list:
            self.answerComments(ans_id, f"{warehouse}/{ans_id}")

