import requests
from hyper.contrib import HTTP20Adapter
from fake_useragent import UserAgent
import csv
import pandas as pd

class ZhihuBot():
    def __init__(self):
        # 自动处理cookie
        self.session = requests.Session()
        # 发送http2请求
        self.session.mount('https://www.zhihu.com', HTTP20Adapter())

        # 定值请求头
        self.headers = {
            ':authority': 'www.zhihu.com', # 这样的键需要用http2发送
            ':method': 'GET',
            ':scheme': 'https',
            'accept': '*/*',
            'accept-encoding': 'gzip, deflate, br',
            'accept-language': 'zh-CN,zh;q=0.9',
            'cache-control': 'no-cache',
            'dnt': '1',
            'pragma': 'no-cache',
            'sec-fetch-dest': 'empty',
            'sec-fetch-mode': 'cors',
            'sec-fetch-site': 'same-origin',
            'user-agent': UserAgent().random,
            'x-ab-param': 'zr_rec_answer_cp=close;tp_club_tab=0;qap_article_like=1;zr_video_rank=new_rank;soc_feed_intimacy=2;li_svip_tab_search=0;zr_rel_search=base;se_backsearch=0;tp_club_qa=1;soc_iosreadfilter=0;tp_topic_tab=0;zr_video_recall=current_recall;zr_search_sim=0;tp_club_pk=1;se_hotsearch_2=1;tp_meta_card=0;li_purchase_test=0;se_college=default;soc_ri_merge=0;li_answers_link=0;se_hotmore=2;soc_iosreadline=0;se_billboardsearch=0;li_sku_bottom_bar_re=0;pf_noti_entry_num=0;top_ydyq=X;li_se_section=0;top_test_4_liguangyi=1;pf_newguide_vertical=0;tp_qa_metacard_top=top;se_pek_test=1;soc_yxzl_zcfw=0;soc_zuichangfangwen=0;ls_fmp4=0;se_ffzx_jushen1=0;se_col_boost=0;soc_iosintimacy=2;li_paid_answer_exp=0;tp_club_android_feed=old;tp_club_feed=1;zr_slot_cold_start=aver;se_aa_base=0;se_colorfultab=1;se_clubrank=0;tp_header_style=1;ug_follow_answerer=0;tp_score_1=a;tp_discover=0;soc_cardheight=2;se_prf=0;soc_adreadfilter=0;soc_zcfw_badcase=0;tp_topic_style=0;ug_goodcomment_0=1;se_cardrank_2=1;tp_qa_metacard=1;zr_video_rank_nn=new_rank;ug_zero_follow=0;li_assessment_show=1;zr_km_answer=open_cvr;se_new_merger=1;se_multianswer=2;tp_sft=a;top_hotcommerce=1;li_hot_voted=0;ug_fw_answ_aut_1=0;qap_thanks=1;zr_intervene=0;tp_club_discover=0;top_v_album=1;ug_follow_answerer_0=0;li_ebook_read=0;se_cate_l3=0;zr_km_feed_nlp=old;se_sug_term=0;tsp_hotlist_ui=1;ls_recommend_test=0;li_vip_verti_search=0;qap_labeltype=1;zr_search_paid=0;se_entity_model_14=0;tp_club_header=1;tp_club_tab_feed=0;soc_wonderuser_recom=2;soc_ioshotrenew=0;se_cardrank_4=1;zw_payc_qaedit=0;zr_ans_rec=gbrank;se_hotsearch_num=1;soc_zcfw_shipinshiti=1;li_salt_hot=1;se_highlight_new=0;se_pek_test2=1;se_rf_w=0;se_aa=0;zr_update_merge_size=1;se_dnn_mt_v2=0;tp_sft_v2=d;soc_userrec=2;soc_stickypush=1;zr_article_new=close;pf_creator_card=1;se_pek_test3=1;tp_club_qa_pic=1;tp_club_pic_swiper=1;qap_question_author=0;se_whitelist=0;li_catalog_card=1;soc_adpinweight=0;se_entity_model=0;tp_club_android_join=1;ug_zero_follow_0=0;top_ebook=0;top_root=0;pf_foltopic_usernum=50;tp_topic_head=0;soc_zcfw_broadcast2=1;tsp_videobillboard=1;li_ebok_chap=0;se_specialbutton=0;se_relation_1=2;se_hotsearch=1;tp_topic_tab_new=0-0-0;soc_newfeed=2;soc_brdcst4=3;top_new_feed=5;li_yxzl_new_style_a=1;se_club_post=5;tp_topic_entry=0;zr_training_first=false;soc_notification=1;soc_adhotrenew=0;se_hot_timebox=0;se_cardrank_3=0;se_article_icon=0;soc_leave_recommend=2;soc_authormore2=2;zw_sameq_sorce=999;zr_slot_training=1;li_ebook_gen_search=0;li_video_section=0;soc_iospinweight=0;qap_payc_invite=0;soc_authormore=2;zr_slot_up2=0;se_topicfeed=0;li_education_box=0;qap_question_visitor= 0;top_quality=0;tp_m_intro_re_topic=1;soc_adreadline=0;tp_qa_toast=1;se_page_quality=0;zr_test_aa1=0;tp_discovery_ab_1=0;li_answer_test_2=0;li_svip_cardshow=1;ug_newtag=0;top_universalebook=1;zr_answer_rec_cp=open;zr_training_boost=false;se_content0=0;se_cbert_index=0;tp_discover_copy=0;ls_videoad=2;zr_art_rec=base;tp_club_join=0;tp_topic_rec=1;qap_ques_invite=0;se_searchwiki=0;pf_adjust=0;se_rel_bi=0;zr_expslotpaid=1;zr_slotpaidexp=1;soc_zcfw_broadcast=0;pf_profile2_tab=0;se_expired_ob=0;se_new_p=0;tp_club_pic=0.6;tp_sticky_android=2;pf_fuceng=1;ug_follow_topic_1=2;li_answer_card=0',
            'x-requested-with': 'fetch',
            'x-zse-83': '3_2.0',
            'x-zse-86': '1.0_a0NBbQHBrTSYcMY8mRt0Fbr8oXYpHCFqyUYyci9BFB2f',
            'x-zst-81': '3_2.0ae3TnRUTEvOOUCNMTQnTSHUZo02p-HNMZBO8YDQ0SXtuo7YyB6P0Eiuy-LS9-hp1DufI-we8gGHPgJO1xuPZ0GxCTJHR7820XM20cLRGDJXfgGCBxupMuD_Ie8FL7AtqM6O1VDQyQ6nxrRPCHukMoCXBEgOsiRP0XL2ZUBXmDDV9qhnyTXFMnXcTF_ntRueTh02m8qr_SMOmPgx95GNMkqSY3vx_jBH0m9t1bgrqoUoMQuCK7rOG20cBoUOPvLcV8DH1ycHG8qVYeCN_vBVZJ6NOqBoKLDHLyqOCnuSCqqLBzDwfj9OBoDpBQ9FxwhN1XCLLXhV_tgFL_qoqDUo1BrcLeLN8swSLAhtGUhumSJO1Cq395qfzCremZBoX3B3GhGOGYUSCigVBXwOYj9CLxhXBFqpOIBC_DvL_OguLgCSKqCxmFuY9ab3mTqNm8veYHUVY2UH_bvO9SqC_sUXyVg39e7SO6eNG-hXO3J9C',
        }

        # 变化的参数
        self.offset = 0
        self.limit = 20
        self.params = {
            'include': r'data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics',
            'offset': self.offset,
            'limit': self.limit,
        }

        # 用来修改请求头中的referer
        self.page_index = 1

        # 要存储的粉丝信息
        self.filednames = ['id', 'url_token', 'name', 'headline', 'is_vip', 'follower_count', 'answer_count', 'articles_count']

        # 粉丝信息存储路径
        self.file_path = 'fans.csv'

    def query(self, url_token):
        '''
            查询并储存url_token指向的用户所有的粉丝信息
        '''
        self.url_token = url_token
        # 知乎提供的API
        self.url = f"https://www.zhihu.com/api/v4/members/{url_token}/followers"

        # 文本中写入表头
        with open(self.file_path, 'w') as f:
            writer = csv.DictWriter(f, self.filednames)
            writer.writeheader()

        # 遍历所有页，抓取所有关注者信息
        print(f'开始抓取url_token为{url_token}的用户的所有关注者列表！')
        while self._query_single():
            # 读取完一页内容后，需要更新一些参数
            self.offset += self.limit
            self.page_index += 1
        print("关注者列表信息已抓取完毕！")

    def _query_single(self):
        '''
            查询并储存当前页所有关注者的信息
        '''
        print(f"fetching page {self.page_index}...")

        # 修改请求头
        self.headers.update({
            ':path': '/api/v4/members/' + self.url_token + 
                r'/followers?include=data%5B*%5D.answer_count%2Carticles_count%2Cgender%2Cfollower_count%2Cis_followed%2Cis_following%2Cbadge%5B%3F(type%3Dbest_answerer)%5D.topics&offset=' + 
                str(self.offset) + r'&limit=20',
            'referer': f'https://www.zhihu.com/people/{self.url_token}/followers?page={self.page_index}',
        })

        # 修改请求参数
        self.params.update({
            'offset': self.offset,
            'limit': self.limit,
        })

        resp = self.session.get(url=self.url, headers=self.headers, params=self.params).json()

        # 将会员状态、一句话介绍、回答数、文章数、关注者人数等信息记录下来
        with open(self.file_path, 'a', encoding="utf-8") as f:
            writer = csv.DictWriter(f, fieldnames=self.filednames)
            result = []
            for item in resp['data']:
                tmp = {field: item[field] for field in self.filednames if field != 'is_vip'}
                tmp['is_vip'] = item['vip_info']['is_vip']
                result.append(tmp)
            writer.writerows(result)

        return not resp['paging']['is_end'] # 判断是否读取了所有数据

    def check_bot(self):
        '''
            从`self.file_path`处读取粉丝信息，判断是否为机器人
        '''
        # 首先从已经保存下来的信息中判断是不是机器人，做上相应的标注
        df = pd.read_csv(self.file_path)
        df['status'] = '非机器人'
        flag = (pd.isnull(df.headline)) & (df.is_vip == False) & (df.follower_count == 0) & (df.answer_count == 0) & (df.articles_count == 0)
        df.loc[flag, 'status'] = '待核验'

        # 待核验的用户url
        url_lst = df.loc[flag, 'url_token']
        print(f"您的好友列表中有{len(url_lst)}/{len(df)}个账号疑似机器人！")
        
        index = 1
        count = 0

        # 删除这个请求头，防止访问出错
        self.headers.pop(':path')

        # 如果被打上了待核验的标签，就要去查看账号的动态了
        try:
            for item in url_lst:
                print(f'checking {index}/{len(url_lst)} ', end=" ")
                if self._check_index(item):
                    count += 1
                    print(f'您的粉丝{item}是个机器人！ 已经检出机器人{count}个！')
                    df.loc[df.url_token == item, 'status'] = '机器人确诊'
                else:
                    print(f'您的粉丝{item}存在关注话题以外的动态，解除疑似！')
                    df.loc[df.url_token == item, 'status'] = '解除疑似'
                index += 1
        except Exception as e:
            print(e)
        finally:
            df.to_csv(self.file_path, index=False) # 发现意外时赶紧存盘，及时止损
            print(f"您当前拥有{count}/{len(df)}个机器人粉丝！nice job！")

    def _check_index(self, url_token, referer=None) -> bool:
        '''
            查看个人主页是否有关注话题以外的操作
        '''
        if referer:
            act_url = url_token
            self.headers.update({
                'Referer': referer,
            })
        else:
            act_url = f'https://www.zhihu.com/api/v3/feed/members/{url_token}/activities'
            self.headers.update({
                'Referer': f'https://www.zhihu.com/people/{url_token}',
            })
        
        resp = self.session.get(url=act_url, headers=self.headers).json()
        
        paging = resp['paging']

        # 开始处理当前页的信息
        for item in resp['data']:
            if '关注了话题' not in item['action_text']:
                return False # 存在关注话题以外的活动就是活人
        
        # 递归处理剩下的动态
        if paging['is_end']:
            return True  # 翻到最后都没有返回，那就是说明是真机器人了
        else:
            return self._check_index(paging['next'], paging['previous'])


if __name__ == "__main__":
    z = ZhihuBot()
    z.query("im401")
    z.check_bot()
    print("工作完成，脚本退出中...")