import os
import re
import time

import requests


class ZhihuSpider:
    def __init__(self, question_id_dict):
        self.question_id_dict = question_id_dict
        self.user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36'
        self.cookies = '_xsrf=yaOOGa2vKmMDWcZ1iRzpUNkjggQjUOSz; KLBRSID=b5ffb4aa1a842930a6f64d0a8f93e9bf|1619598487|1619598487'
        self.img_tag = re.compile(r"""<img\s.*?\s?data-original\s*=\s*['|"]?([^\s'"]+).*?>""", re.I)
        self.pic_urls = []
        self.base_dir = r'C:\Users\mooejun\Downloads\spider\SpiderZhihu'

    def get_pic_urls(self):
        for question_id in question_id_dict.keys():
            headers = {
                'referer': 'https://www.zhihu.com/question/' + question_id,
                'user-agent': self.user_agent,
                'cookie': self.cookies
            }
            for i in range(0, 500, 5):
                try:
                    url = 'https://www.zhihu.com/api/v4/questions/' + question_id + '/answers?include=data%5B%2A%5D.is_normal%2Cadmin_closed_comment%2Creward_info%2Cis_collapsed%2Cannotation_action%2Cannotation_detail%2Ccollapse_reason%2Cis_sticky%2Ccollapsed_by%2Csuggest_edit%2Ccomment_count%2Ccan_comment%2Ccontent%2Ceditable_content%2Cvoteup_count%2Creshipment_settings%2Ccomment_permission%2Ccreated_time%2Cupdated_time%2Creview_info%2Crelevant_info%2Cquestion%2Cexcerpt%2Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%2Cis_labeled%2Cis_recognized%2Cpaid_info%2Cpaid_info_content%3Bdata%5B%2A%5D.mark_infos%5B%2A%5D.url%3Bdata%5B%2A%5D.author.follower_count%2Cbadge%5B%2A%5D.topics&limit=5&offset=' + str(
                        i) + '&platform=desktop&sort_by=default'
                    res = requests.get(url, headers=headers)
                    if res.status_code == 200:
                        data = res.json()
                        if not data['data']:
                            print('no data ({})'.format(url))
                            break
                        for answer in data['data']:
                            answer_id = answer.get('id', '')
                            content = answer.get('content', '')
                            voteup_count = answer.get('voteup_count', '')  # 点赞数
                            if content:
                                image_url_list = self.img_tag.findall(content)
                                for image_url in image_url_list:
                                    print('图片url:{}, 问题id:{}, 回答id:{}'.format(image_url, question_id, answer_id))
                                    self.pic_urls.append((image_url, question_id, answer_id))
                    else:
                        print('返回值:{}, url:{}'.format(res.status_code, url))

                    # 防止访问频繁
                    time.sleep(1.1)
                except Exception as e:
                    print('请求出错({})'.format(e))
                    time.sleep(1.1)
                    continue
            print('爬取结束-{}'.format(question_id))

    def download_pic(self):
        for pic_url, question_id, answer_id in self.pic_urls:
            headers = {
                'referer': 'https://www.zhihu.com/question/' + question_id,
                'user-agent': self.user_agent,
                'cookie': self.cookies
            }
            data = None
            for _ in range(3):
                try:
                    res = requests.get(pic_url, headers=headers, timeout=3)
                    print('返回值:{}, url:{}'.format(res.status_code, pic_url))
                    if res.status_code == 200:
                        data = res.content
                        break
                except Exception as e:
                    print('请求异常({})'.format(e))

            if data:
                # 获取图片所属的问题名称
                question_name = self.question_id_dict.get(question_id)
                # 创建图片存储的文件夹
                pic_dir = os.path.join(self.base_dir, question_name, str(answer_id))
                os.makedirs(pic_dir, exist_ok=True)
                # 图片名称
                pic_name = pic_url.split('/')[-1].split('?')[0]
                pic_path = os.path.join(pic_dir, pic_name)
                with open(pic_path, 'wb') as f:
                    f.write(data)
                    print('下载成功: {}'.format(pic_name))

    def run(self):
        self.get_pic_urls()
        self.download_pic()


if __name__ == '__main__':
    question_id_dict = {
        # '368001626': '你见过的最性感的身材是怎样的',
        # '338323696': '女人穿紧身牛仔裤可以性感到什么程度',
        # '34243513': '你见过最漂亮的女生长什么样',
        '379350200': '女生身材好是一种怎样的感觉',
        '424555505': '你相册中最性感的照片是哪一张',
        '379425092': '女生长得漂亮，身材又好是怎样的一种体验',
    }
    spider = ZhihuSpider(question_id_dict)
    spider.run()
