# -*- coding:utf-8 -*-

import requests
from lxml import etree
from fake_useragent import UserAgent
from http import cookiejar
import json
from datetime import datetime
import time
import re
import csv
from pymongo import MongoClient


requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告


"""
知乎-话题-精华
1.精华列表 2.获取列表中的详细内容 3.获取所有回答 4.获取评论内容
https://www.zhihu.com/topic/21239753/top-answers
账号:18241255868
密码:gkx55868
"""


class Search():
    def __init__(self):
        #声明一个CookieJar对象实例来保存cookie
        # requests.utils.dict_from_cookiejar(html.cookies)  # 获取cookies转dict
        self.cookie = cookiejar.CookieJar()
        # ua = UserAgent(use_cache_server=False)  # 禁用服务器缓存
        self.headers = {
            # 'User-Agent': ua.random,
            'authority': 'www.zhihu.com',
            'method': 'GET',
            'path': '/api/v4/topics/21239753/feeds/essence?include=data%5B%3F%28target.type%3Dtopic_sticky_module%29%5D.target.data%5B%3F%28target.type%3Danswer%29%5D.target.content%2Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%3Bdata%5B%3F%28target.type%3Dtopic_sticky_module%29%5D.target.data%5B%3F%28target.type%3Danswer%29%5D.target.is_normal%2Ccomment_count%2Cvoteup_count%2Ccontent%2Crelevant_info%2Cexcerpt.author.badge%5B%3F%28type%3Dbest_answerer%29%5D.topics%3Bdata%5B%3F%28target.type%3Dtopic_sticky_module%29%5D.target.data%5B%3F%28target.type%3Darticle%29%5D.target.content%2Cvoteup_count%2Ccomment_count%2Cvoting%2Cauthor.badge%5B%3F%28type%3Dbest_answerer%29%5D.topics%3Bdata%5B%3F%28target.type%3Dtopic_sticky_module%29%5D.target.data%5B%3F%28target.type%3Dpeople%29%5D.target.answer_count%2Carticles_count%2Cgender%2Cfollower_count%2Cis_followed%2Cis_following%2Cbadge%5B%3F%28type%3Dbest_answerer%29%5D.topics%3Bdata%5B%3F%28target.type%3Danswer%29%5D.target.annotation_detail%2Ccontent%2Chermes_label%2Cis_labeled%2Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%3Bdata%5B%3F%28target.type%3Danswer%29%5D.target.author.badge%5B%3F%28type%3Dbest_answerer%29%5D.topics%3Bdata%5B%3F%28target.type%3Darticle%29%5D.target.annotation_detail%2Ccontent%2Chermes_label%2Cis_labeled%2Cauthor.badge%5B%3F%28type%3Dbest_answerer%29%5D.topics%3Bdata%5B%3F%28target.type%3Dquestion%29%5D.target.annotation_detail%2Ccomment_count%3B&limit=10&offset=15',
            'scheme': 'https',
            'accept': '*/*',
            'accept-encoding': 'gzip, deflate, br',
            'accept-language': 'zh-CN,zh;q=0.9',
            'cookie': '_zap=0cd063db-2a9b-467d-8d1d-44f6c8b867b4; _xsrf=7e96855f-cbb8-4d3e-a239-412611c29e41; d_c0="AMAZdsGHUBGPTqntPFzfMtHorYVKrW_Q9Gk=|1590242072"; _ga=GA1.2.1958429441.1590242076; _gid=GA1.2.488783735.1590242076; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1590242075,1590243670,1590243753; capsion_ticket="2|1:0|10:1590243752|14:capsion_ticket|44:YjgxNGRkYzUwZThmNDNlYjgxZjIxN2E5OTkyNzUwY2I=|8157190ef4822ad9d3f6a407ca078e5963ecab9195629692267a6f1875f6dd6b"; SESSIONID=exj5Eh52iEsEzeQFIT2eHTQ7hG742RjxsMeWf9kdOIk; JOID=VFwWCknwE91aoN4JRv-VxYWLX19Sl2y9ZOG0O36dVKkV0b9aB1wvFQum2wRHZVPAHXEkJah_PwdhxtZD9OScEjk=; osd=VFkSAknwFtlSoN4MQveVxYCPV19Skmi1ZOGxP3adVKwR2b9aAlgnFQuj3wxHZVbEFXEkIKx3Pwdkwt5D9OGYGjk=; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1590244003; _gat_gtag_UA_149949619_1=1; KLBRSID=fe0fceb358d671fa6cc33898c8c48b48|1590244002|1590242072',
            'dnt': '1',
            'referer': 'https://www.zhihu.com/topic/21239753/top-answers',
            'sec-fetch-dest': 'empty',
            'sec-fetch-mode': 'cors',
            'sec-fetch-site': 'same-origin',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36',
            'x-ab-param': 'zr_rel_search=base;se_club_boost=1;se_topicfeed=1;se_video_dnn=1;li_video_section=0;zr_km_answer=open_cvr;se_specialbutton=0;tp_club_entrance=1;li_salt_hot=1;soc_adweeklynew=2;ug_newtag=1;se_new_bert=0;se_page_quality=1;se_cbert_index=1;pf_adjust=0;zr_answer_rec_cp=open;se_content0=1;se_backsearch=0;se_searchvideo=0;se_v040=0;tp_discover=0;pf_creator_card=1;li_vip_verti_search=0;zr_rec_answer_cp=close;se_colorfultab=1;se_dnn_mt_v2=0;li_ebook_gen_search=0;se_clarify=0;li_answer_test_2=0;li_svip_cardshow=1;se_mobilecard=0;li_answer_card=0;zr_intervene=0;se_hotsearch_2=1;se_multianswer=2;tp_topic_entry=0;tp_topic_tab_new=0-0-0;tsp_hotlist_ui=1;li_viptab_name=0;zr_slot_training=1;se_adsrank=4;tp_topic_tab=0;tp_m_intro_re_topic=1;ls_fmp4=0;se_hotsearch=1;zr_expslotpaid=1;se_cardrank_2=1;se_aa_base=0;top_ydyq=X;li_panswer_topic=0;zr_ans_rec=gbrank;se_college=default;se_whitelist=0;tp_club_qa_entrance=0;soc_iosweeklynew=2;top_quality=0;top_root=0;pf_noti_entry_num=0;li_answer_test=3;li__edu_cold_start=0;zr_search_sim2=0;top_hotcommerce=1;li_yxzl_new_style_a=1;li_se_section=1;se_expired_ob=0;ug_goodcomment_0=1;li_topics_search=0;li_training_chapter=0;soc_notification=1;zr_search_sims=0;se_searchwiki=0;se_relation_1=2;tp_club__entrance2=0;top_ebook=0;zr_article_new=close;zr_slotpaidexp=1;zr_training_first=false;se_relationship=0;tp_meta_card=0;tp_club_reactionv2=0;pf_profile2_tab=0;se_video_dnn_2=0;se_cardrank_3=0;tp_move_scorecard=0;se_multi_images=0;se_v039=0;top_test_4_liguangyi=1;pf_foltopic_usernum=50;se_ffzx_jushen1=0;tp_header_style=1;tp_club_top=0;zr_art_rec=base;zr_training_boost=false;se_v2_highlight=0;tp_club_flow_ai=0;zr_zr_search_sims=0;se_new_cbert=0;tp_sft=a;ls_recommend_test=0;ls_videoad=2;li_svip_tab_search=1;se_v040_2=0;top_universalebook=1;ug_follow_topic_1=2;qap_question_visitor= 0;zr_search_topic=0;li_assessment_show=1;li_paid_answer_exp=0;se_zvideo_bert=1;se_oneboxtopic=0;tp_topic_style=0;pf_newguide_vertical=0;ls_video_commercial=0;se_entity22=0;zw_sameq_sorce=999;zr_search_paid=1;se_col_boost=0;se_clubrank=1;se_v038=0;qap_question_author=0;qap_thanks=1;se_merger_v2=0;se_billboardsearch=0;se_cardrank_4=1;tp_score_1=a;tp_movie_ux=0;li_literature=0;qap_labeltype=1;se_hotmore=2;se_sug_term=0;tp_club_feedv2=1;top_v_album=1;pf_fuceng=1;li_catalog_card=1;zr_test_aa1=0',
            'x-requested-with': 'fetch',
            'x-zse-83': '3_2.0',
            'x-zse-86': '1.0_aRt0k4e8o_2fF9OyyCO8bAUB60FXeXN0yhtqQ79y6R2Y',
            'x-zst-81': '3_2.0ae3TnRUTEvOOUCNMTQnTSHUZo02p-HNMZBO8YDRy2Xtue_t0K6P0EAuy-LS9-hp1DufI-we8gGHPgJO1xuPZ0GxCTJHR7820XM20cLRGDJXfgGCBxupMuD_Ie8FL7AtqM6O1VDQyQ6nxrRPCHukMoCXBEgOsiRP0XL2ZUBXmDDV9qhnyTXFMnXcTF_ntRueThXC9SvULZhYV-GOqG7p1xgVYy9SM6cH8YUS1rLeBCCF8k6Hm_qgCVqgLWqCPvXCfBMS8Ec9yhBNY8UHGmRVGbrpOhBLpcwSGmCYCkrOOyhoMicef-GcCXgXL0vNBPqpKxvC_2RgfJHV1QwCGXDpmSM_Y_qCBJLx1593GrUN1WCe18wHC5qfzFbp1kX2mWrHBMiOf9bp0HGO_QGLZiCNLcGXCurNLwCVCJbXKzDU_hu3MVcfy2UpCSTxYkic9uhO1MbxBOv3mOCcm-UF8j9CCqhwYoLXYahtL1THLwBHC'

        }

    # 获取链接的热点列表
    def get_contents(self, limit, cursor):
        postData = {
            'include': 'data[?(target.type=topic_sticky_module)].target.data[?(target.type=answer)].target.content,relationship.is_authorized,is_author,voting,is_thanked,is_nothelp;data[?(target.type=topic_sticky_module)].target.data[?(target.type=answer)].target.is_normal,comment_count,voteup_count,content,relevant_info,excerpt.author.badge[?(type=best_answerer)].topics;data[?(target.type=topic_sticky_module)].target.data[?(target.type=article)].target.content,voteup_count,comment_count,voting,author.badge[?(type=best_answerer)].topics;data[?(target.type=topic_sticky_module)].target.data[?(target.type=people)].target.answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics;data[?(target.type=answer)].target.annotation_detail,content,hermes_label,is_labeled,relationship.is_authorized,is_author,voting,is_thanked,is_nothelp;data[?(target.type=answer)].target.author.badge[?(type=best_answerer)].topics;data[?(target.type=article)].target.annotation_detail,content,hermes_label,is_labeled,author.badge[?(type=best_answerer)].topics;data[?(target.type=question)].target.annotation_detail,comment_count;',
            'limit': limit,# 第一次是5 。第二次 10
            'offset': cursor # 第一次是0 第二次15
        }
        count = 0  # 临时计数
        url = f"https://www.zhihu.com/api/v4/topics/21239753/feeds/essence"
        html = HttpUtils.do_request("GET", url, self.headers, postData)
        data_json = json.loads(html.text)
        next = data_json['paging']['next']
        limit = re.findall(r"limit=(.*?)&", next)[0]
        offset = re.findall(r"offset=(.*?)$", next)[0]
        for d in data_json['data']:
            dict_data = dict()
            dict_data['anwser_id'] = d['target']['id']  # id

            if d['target']['type'] == 'article':
                dict_data['title'] = d['target']['title']  # 标题
                dict_data['type'] = d['target']['type']  # 类型
                created = d['target']['created']  # 时间
                url_detail = f"https://zhuanlan.zhihu.com/p/{dict_data['anwser_id']}"
            elif d['target']['type'] == 'answer':
                dict_data['title'] = d['target']['question']['title']  # 标题
                dict_data['type'] = d['target']['question']['type']  # 类型
                dict_data['question_id'] = d['target']['question']['id']  # id
                created = d['target']['question']['created']  # 时间
                url_detail = f"https://www.zhihu.com/question/{dict_data['question_id']}/answer/{dict_data['anwser_id']}"  # 链接
            dict_data['created'] = datetime.fromtimestamp(int(created)).strftime("%Y-%m-%d %H:%M:%S")  # 时间
            dict_data['content'] = d['target']['content']  # 内容
            dict_data['comment_count'] = d['target']['comment_count']  # 评论数量
            dict_data['voteup_count'] = d['target']['voteup_count']  # 赞同
            dict_data['user_id'] = d['target']['author']['id']  # 用户id
            dict_data['url_token'] = d['target']['author']['url_token']  # 用户id
            dict_data['name'] = d['target']['author']['name']  # 用户名
            dict_data['headline'] = d['target']['author']['headline']  # 用户简介
            dict_data['gender'] = d['target']['author']['gender']  # 性别
            dict_data['url'] = url_detail
            print(str(dict_data))
            # 如果是问答，则获取详细信息
            if d['target']['type'] == 'answer':
                count += 1
                print(f"========{count}=============")
                # 获取详细信息
                time.sleep(1)
                self.get_detail(url_detail, dict_data)

        print(f"=====limit:{limit}===offset:{offset}=====")
        self.get_contents(limit, offset)

    # 获取详细信息
    def get_detail(self, url, item):
        headers = {
            'authority': 'www.zhihu.com',
            'method': 'GET',
            'path': '/question/31152483/answer/52736978',
            'scheme': 'https',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'accept-encoding': 'gzip, deflate, br',
            'accept-language': 'zh-CN,zh;q=0.9',
            'cache-control': 'max-age=0',
            # 'cookie': '_zap=f6f74f04-f98b-40db-a672-dfcb506ecfa9; _xsrf=83f09dcb-8246-4c9a-ba31-6a594354fc29; d_c0="AFCbzJkVURGPTsbuWc52bfJdeGVFNfQyi24=|1590279256"; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1590279259; _ga=GA1.2.1054371391.1590279260; _gid=GA1.2.189812088.1590279260; capsion_ticket="2|1:0|10:1590282120|14:capsion_ticket|44:OTM2MmE1YTZiZTRlNDNiYzkxMzU3ODMwZTE0MzhiOTU=|08b014b6d8bb8b94fb656fe72f278f523216a34638d0dd9f70e60da53d40abdf"; SESSIONID=wsJh1osUm2CobDPOpEWyNe9jRMpg6ijsaohWeGGNwiR; JOID=W10SBEw4364oQqUBKTbdvSzFi1I2Q67kQH3GcHxb4e9HIO9dZL4aZ3hAqwMpzg0jspdSJoT-XAkGpi81wa5xFj4=; osd=U1oUBU4w2KgpQK0GLzfftSvDilA-RKjlQnXBdn1Z6ehBIe1VY7gbZXBHrQIrxgols5VaIYL_XgEBoC43yal3Fzw=; z_c0="2|1:0|10:1590282211|4:z_c0|92:Mi4xS0pSN0d3QUFBQUFBVUp2TW1SVlJFU1lBQUFCZ0FsVk40eFczWHdBMXdaRXRub3dId1NDQ2tYaUdlc1RFbV9kaG9B|9125305348238a2de4f58bd3cc9eaad25f468bbd8d3b9f4c158e451cc633d707"; _gat_gtag_UA_149949619_1=1; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1590282389; KLBRSID=4843ceb2c0de43091e0ff7c22eadca8c|1590282433|1590279256',
            'dnt': '1',
            'sec-fetch-dest': 'document',
            'sec-fetch-mode': 'navigate',
            'sec-fetch-site': 'none',
            'sec-fetch-user': '?1',
            'upgrade-insecure-requests': '1',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
        }
        try:
            html_detail = HttpUtils.do_request("GET", url, headers, "")
            root_detail = etree.HTML(html_detail.text)
            dict_data = dict()
            tag_count = 0
            dict_data['title'] = root_detail.xpath('//h1[@class="QuestionHeader-title"]/text()')[0]  # 标题
            tag = "|".join(root_detail.xpath('//div[@class="QuestionHeader-topics"]/div//text()'))  # 标签
            for t in tag.split("|"):
                tag_count += 1
                dict_data['tag_' + str(tag_count)] = t

            dict_data['gz'] = "".join(root_detail.xpath('//div[@class="NumberBoard-item"][1]//text()'))\
                .replace("关注者", "").strip()  # 关注数量
            dict_data['ll'] = "".join(root_detail.xpath('//div[@class="NumberBoard-item"][2]//text()'))\
                .replace("被浏览", "").strip()  # 浏览数量
            dict_data['zt'] = "".join(root_detail.xpath('//button[@class="Button Button--plain"]//text()'))\
                .replace("人赞同了该回答", "").strip()  # 赞同回答
            # 回答总数
            if len(root_detail.xpath('//h4[@class="List-headerText"]//text()')) > 0:
                hd = root_detail.xpath('//h4[@class="List-headerText"]//text()')[0]
            else:
                hd = root_detail.xpath('//a[@class="QuestionMainAction ViewAll-QuestionMainAction"]//text()')[0]
            dict_data['hd'] = hd.replace("查看全部", "").replace("个回答", "").strip()
            dict_data['ctime'] = root_detail.xpath('//div[@class="ContentItem-time"]/a/span/text()')[0]\
                .replace("编辑于", "").replace("发布于", "").strip()
            dict_data['url'] = url
            print(f"============详情:{dict_data['title']}=============")
            self.insertItem("知乎", dict_data)
        except:
            print(f"{url}")

    # 根据questions,获取问题中的评论
    def get_question_comments(self, question, offset):
        """
        根据question,获取问题中的评论
        :param answerid:answerid
        :param offset:游标默认0
        :return:
        """
        headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36',
        }

        postData = {
            'order': 'normal',
            'limit': '10',
            'offset': offset,  # 初始0
            'status': 'open'
        }
        url = f"https://www.zhihu.com/api/v4/questions/{question}/root_comments"
        html = HttpUtils.do_request("GET", url, headers, postData)
        data_json = json.loads(html.text)
        next = data_json['paging']['next']
        limit = re.findall(r"limit=(.*?)&", next)[0]
        offset = re.findall(r"offset=(.*?)&", next)[0]
        if len(data_json['data']) > 0:
            for d in data_json['data']:
                dict_data = dict()
                dict_data['name'] = d['author']['member']['name']  # 用户名
                dict_data['url_token'] = d['author']['member']['url_token']  # id
                dict_data['content'] = d['content']  # 内容
                dict_data['vote_count'] = d['vote_count']  # 点赞数量
                dict_data['created_time'] = datetime.fromtimestamp(int(d['created_time'])).strftime("%Y-%m-%d %H:%M:%S")  # 时间

            #翻页
            print(f"=====limit:{limit}===offset:{offset}=====")
            self.get_question_comments(question, offset)

    # 根据answerid,获取回答中的评论
    def get_answer_comments(self, answerid, offset):
        """
        根据answerid,获取评论
        :param answerid:answerid
        :param offset:游标默认0
        :return:
        """
        headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
        }

        postData = {
            'order': 'normal',
            'limit': '20',
            'offset': offset,  # 初始0
            'status': 'open'
        }
        url = f"https://www.zhihu.com/api/v4/answers/{answerid}/root_comments"
        html = HttpUtils.do_request("GET", url, headers, postData)
        html.encoding = "utf-8"
        data_json = json.loads(html.text)
        next = data_json['paging']['next']
        limit = re.findall(r"limit=(.*?)&", next)[0]
        offset = re.findall(r"offset=(.*?)&", next)[0]
        if len(data_json['data']) > 0:
            for d in data_json['data']:
                dict_data = dict()
                dict_data['name'] = d['author']['member']['name']  # 用户名
                dict_data['url_token'] = d['author']['member']['url_token']  # id
                dict_data['content'] = d['content']  # 评论内容
                dict_data['vote_count'] = d['vote_count']  # 点赞数量
                dict_data['created_time'] = datetime.fromtimestamp(int(d['created_time'])).strftime("%Y-%m-%d %H:%M:%S")  # 时间
                print(str(dict_data))
                with open("知乎_1.csv", "a", encoding="utf-8-sig", newline="") as csvfile:
                    fileheader = ["name", "url_token", "content", "vote_count", "created_time"]
                    writer = csv.DictWriter(csvfile, fieldnames=fileheader)
                    writer.writerow(dict_data)

            #翻页
            print(f"=====limit:{limit}===offset:{offset}=====")
            self.get_answer_comments(answerid, offset)

    # 根据questionid,获取所有回答
    def get_question(self, questionid, offset):
        headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
        }
        postData = {
            'include': 'data[*].is_normal,admin_closed_comment,reward_info,is_collapsed,annotation_action,annotation_detail,collapse_reason,is_sticky,collapsed_by,suggest_edit,comment_count,can_comment,content,editable_content,voteup_count,reshipment_settings,comment_permission,created_time,updated_time,review_info,relevant_info,question,excerpt,relationship.is_authorized,is_author,voting,is_thanked,is_nothelp,is_labeled,is_recognized,paid_info,paid_info_content;data[*].mark_infos[*].url;data[*].author.follower_count,badge[*].topics',
            'offset': offset,
            'limit': 10,
            'sort_by': 'default',
            'platform': 'desktop'
        }
        url = f"https://www.zhihu.com/api/v4/questions/{questionid}/answers"
        html_question = HttpUtils.do_request("GET", url, headers, postData)
        data_json = json.loads(html_question.text)
        next = data_json['paging']['next']
        limit = re.findall(r"limit=(.*?)&", next)[0]
        offset = re.findall(r"offset=(.*?)&", next)[0]
        if len(data_json['data']) > 0:
            for d in data_json['data']:
                dict_data = dict()
                dict_data['id'] = d['id']  # 回答ID
                dict_data['name'] = d['author']['name']  # 用户名
                dict_data['voteup_count'] = d['voteup_count']  # 点赞数量
                dict_data['comment_count'] = d['comment_count']  # 评论数量
                dict_data['created_time'] = datetime.fromtimestamp(int(d['created_time']))\
                    .strftime("%Y-%m-%d %H:%M:%S")  # 时间
                dict_data['content'] = d['content']  # 内容
                print(str(dict_data))

            #翻页
            print(f"=====limit:{limit}===offset:{offset}=====")
            self.get_question(questionid, offset)


    def get_user(self, url):
        headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
        }
        url = "https://www.zhihu.com/people/shi-zi-nan-hai"
        html = HttpUtils.do_request("GET", url, headers, "")
        root = etree.HTML(html.text)
        username = root.xpath('//span[@class="ProfileHeader-name"]/text()')[0]
        desc = root.xpath('//span[@class="ztext ProfileHeader-headline"]/text()')[0]
        hy = root.xpath('//span[@class="ProfileHeader-detailValue"]/text()')[0]

    def get_c(self, url):
        headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
        }
        url = "https://zhuanlan.zhihu.com/p/20111054"
        html_c = HttpUtils.do_request("GET", url, headers, "")
        root = etree.HTML(html_c.text)
        count = 0
        line = 0
        for r in root.xpath('//p/a'):
            count += 1
            if count > 100:
                line += 1
                url_c = r.xpath('./@href')[0]
                txt = r.xpath('./text()')[0]
                print(f"========{line}=============")
                if line < 150:
                    # time.sleep(1)
                    self.get_detail(url_c, dict())

    def insertItem(self, tableName, data):
        my_set = db[tableName]
        my_set.insert_one(data)


if __name__ == '__main__':
    conn = MongoClient('127.0.0.1', 27017)
    db = conn["Test"]
    search = Search()

    # # 获取用户信息
    # search.get_user("")

    # # 获取链接的热点列表
    # search.get_contents(10, 0)

    # search.get_c("")

    # # 根据question,获取问题的评论
    # search.get_question_comments("407635860", 0)

    # # 根据answerid,获取回答的评论
    search.get_answer_comments("1386632814", 0)
    #
    # 根据questionid,获取所有回答
    # search.get_question("407635860", 0)