# -*-coding:utf-8 -*- python3

import re
import os
import pycurl
import json
import time
from urllib.request import urlretrieve
# from urllib.request import unquote

def mkdir(path):
    path = path.strip()
    isExists = os.path.exists(path)
    if not isExists:
        os.makedirs(path)
        return True
    else:
        return False

def find_last(string,str):
    last_position=-1
    while True:
        position=string.find(str,last_position+1)
        if position==-1:
            return last_position
        last_position=position

def saveImages(imglist, name):
    for imgUrl in imglist:
        i = find_last(imgUrl, "/")
        fileName = name + "/" + imgUrl[i + 1:]
        print(fileName)
        urlretrieve(imgUrl, fileName)

def getAllImg(html):
    # 利用正则表达式把源代码中的图片地址过滤出来
    # reg = r'src="(https://.+?\.jpg)"'
    # reg = r'data-actualsrc="(https?://pic.+?\.jpg|png|jpeg)"'
    reg = r'data-actualsrc="(.*?)">'
    imgre = re.compile(reg, re.S)
    imglist = imgre.findall(html)  # 表示在整个网页中过滤出所有图片的地址，放在imglist中
    return imglist

class joincontents:
    def __init__(self):
        self.contents = ''
    def callback(self,curl):
        self.contents = self.contents + curl.decode('utf-8')

def curlurl(url):
    t = joincontents()
    c = pycurl.Curl()
    c.setopt(pycurl.WRITEFUNCTION,t.callback)
    c.setopt(pycurl.ENCODING, 'gzip')
    c.setopt(pycurl.URL,url)
    c.setopt(pycurl.COOKIE, """_zap=8799a434-8331-4b8b-9dd2-b3cffba63730; d_c0="ADBCSzfSLgyPTlDMT_g-TElHu2t0pA2wTaQ=|1502080544"; q_c1=15bbe5222e8e446397cdee2cdeca3c9b|1508469568000|1500000696000; z_c0=Mi4xeTJZRUFBQUFBQUFBTUVKTE45SXVEQmNBQUFCaEFsVk5FVmJ0V2dEMnFUcmJ4Ty1QUGpVYnM0VHM1UkU2WTlXdFlR|1509951505|2f64790e9e9692ca5f86c8b66cdac0955efa78ba; __utma=51854390.176547422.1504862302.1505284063.1509951673.3; __utmz=51854390.1509951673.3.3.utmcsr=zhihu.com|utmccn=(referral)|utmcmd=referral|utmcct=/people/tangxiao/collections; __utmv=51854390.100-1|2=registration_date=20120522=1^3=entry_date=20120522=1; aliyungf_tc=AQAAAF3nCj9LBgsAXYJ6d3cDcKwEplwL; q_c1=15bbe5222e8e446397cdee2cdeca3c9b|1512627153000|1500000696000; _xsrf=d7bd7ab4-8f14-4b3e-8fc6-003418839d8c""")
    c.perform()
    # NAMELOOKUP_TIME =  c.getinfo(c.NAMELOOKUP_TIME)
    # CONNECT_TIME =  c.getinfo(c.CONNECT_TIME)
    # PRETRANSFER_TIME =   c.getinfo(c.PRETRANSFER_TIME)
    # STARTTRANSFER_TIME = c.getinfo(c.STARTTRANSFER_TIME)
    # TOTAL_TIME = c.getinfo(c.TOTAL_TIME)
    # HTTP_CODE =  c.getinfo(c.HTTP_CODE)
    # SIZE_DOWNLOAD =  c.getinfo(c.SIZE_DOWNLOAD)
    # HEADER_SIZE = c.getinfo(c.HEADER_SIZE)
    # SPEED_DOWNLOAD=c.getinfo(c.SPEED_DOWNLOAD)
    # print("HTTP状态码：%s" %(HTTP_CODE))
    # print("DNS解析时间：%.2f ms"%(NAMELOOKUP_TIME*1000))
    # print("建立连接时间：%.2f ms" %(CONNECT_TIME*1000))
    # print("准备传输时间：%.2f ms" %(PRETRANSFER_TIME*1000))
    # print("传输开始时间：%.2f ms" %(STARTTRANSFER_TIME*1000))
    # print("传输结束总时间：%.2f ms" %(TOTAL_TIME*1000))
    # print("下载数据包大小：%d bytes/s" %(SIZE_DOWNLOAD))
    # print("HTTP头部大小：%d byte" %(HEADER_SIZE))
    # print("平均下载速度：%d bytes/s" %(SPEED_DOWNLOAD))
    return t.contents

def down(url, folder):
    # url = unquote(url)
    if url.find("https") == -1:
        url = url.replace("http", "https")
    content = curlurl(url)
    if content == None:
        return 1
    try:
        data = json.loads(content)
        for k in data["data"]:
            imglist = getAllImg(k["content"])
            saveImages(imglist, folder)
        if data["paging"]["is_end"] == True:
            return 1
        down(data["paging"]["next"], folder)
    except:
        pass

if __name__ == '__main__':
    #54104076 没完成
    qid = "50608844" #知乎问题编号
    url = "https://www.zhihu.com/api/v4/questions/"+qid+"/answers?sort_by=default&include=data[*].is_normal,is_sticky,collapsed_by,suggest_edit,comment_count,can_comment,content,editable_content,voteup_count,reshipment_settings,comment_permission,mark_infos,created_time,updated_time,relationship.is_authorized,is_author,voting,is_thanked,is_nothelp,upvoted_followees;data[*].author.badge[?(type=best_answerer)].topics&limit=20&offset=0"
    start = time.time()
    content = curlurl(url)
    data = json.loads(content)
    title = data["data"][0]["question"]["title"]
    folder = os.path.join(os.getcwd(), "image-zhifu/" + qid + "-" + title)
    mkdir(folder)
    down(url, folder)
    print(u"总共耗时：%f 秒" % (time.time() - start))
