# encoding:utf-8
'''
@author : xaing
@time : 2019/4/7 14:52
@地址 : 广州
@作用 :
@结果 :
'''


from version1 import webUtils as web
from version1 import common
from version1 import logUtils as log
from version1 import imgUtils as img
from version1 import jsUtils as js
import re
import execjs
import json






def zhihu():

    # 总结果
    imgLists = []
    index = 0

    # 这里控制爬取的深度，应该再加上一个状态的检测，如果报404了，就不继续爬了
    while index < 5:
        num = index * 20
        # 知乎帖子规律，limit控制展示的数量，不能超过20，否则会报不合法的参数，  offset 是游标，要操作的话要 limit * offset = 新的offset
        url = 'https://www.zhihu.com/api/v4/questions/313825759/answers?include=data[*].is_normal,admin_closed_comment,reward_info,is_collapsed,annotation_' \
              'action,annotation_detail,collapse_reason,is_sticky,collapsed_by,suggest_edit,comment_count,can_comment,content,editable_content,voteup_count,reshipment_' \
              'settings,comment_permission,created_time,updated_time,review_info,relevant_info,question,excerpt,relationship.is_authorized,is_author,voting,is_thanked,is_' \
              'nothelp,is_labeled,is_recognized,paid_info;data[*].mark_infos[*].url;data[*].author.follower_count,badge[*].topics&limit=20&offset=' + str(num) + '&platform=desktop&sort_by=default'

        common.printStr("循环第 ", index, )
        html = web.get(url, None)

        # 必须转json后才能做取值操作
        html = json.loads(html)
        common.printStr(html['data'])

        p = r'<img src="([^"]+\.[gjp][ipn][fpg])"'
        imgList = re.findall(p, str(html))
        print(imgList)

        imgLists.extend(imgList)
        common.printStr("元素数量:", len(imgLists))
        index += 1

    # 下载图片
    img.downImgBatch("e://下载/1", imgLists, True)




zhihu()