# coding: utf-8
import json

import requests
import re
import csv
import time
import threading


# 请求网页
def GET_HTML(url, key=True):
    headers = {'User-Agent': 'Moziall/5.0'}
    html = requests.get(url=url, headers=headers)
    html.encoding = 'utf-8'

    if key:
        return html.text
    else:
        return html.content


# 写入文件
def SAVE_CONNECT(fileName, s):
    thLock = threading.Lock()
    thLock.acquire()
    fp = open(fileName, 'a+')
    fp.write(s)
    fp.write('\n')
    thLock.release()


# 下载图片
def DOWNLOAD_IMAGE(url):
    name = url.split('/')[-1]
    # headers = {'Proxy-Connection': 'keep-alive'}
    # r = requests.get(url, stream=True, headers=headers)
    # length = float(r.headers['content-length'])
    # f = open('images/'+name, 'wb')
    # for chunk in r.iter_content(chunk_size=1024):
    #     if chunk:
    #         f.write(chunk)
    # f.close()
    r = requests.get(url, headers={'User-Agent':'Moziall/5.0'})
    fp = open('images/'+name, 'wb')
    fp.write(r.content)
    fp.close()

    print('download image ', name)


# 获取图片url
def check_content(conn):
    urls = set()
    for i in conn.split(' '):
        if i.find('.jpg') >= 0:
            # print(i)
            # print('='*23)
            url = re.findall('="(.*?)"', i)[0]
            if url.find('_hd.jpg') >= 0:
                continue
            urls.add(url)
    return urls


# 运行
def run():
    next = "https://www.zhihu.com/api/v4/questions/267707433/answers?include=data%5B%2A%5D.is_normal%2Cadmin_closed_comment%2Creward_info%2Cis_collapsed%2Cannotation_action%2Cannotation_detail%2Ccollapse_reason%2Cis_sticky%2Ccollapsed_by%2Csuggest_edit%2Ccomment_count%2Ccan_comment%2Ccontent%2Ceditable_content%2Cvoteup_count%2Creshipment_settings%2Ccomment_permission%2Ccreated_time%2Cupdated_time%2Creview_info%2Crelevant_info%2Cquestion%2Cexcerpt%2Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%2Cis_labeled%2Cis_recognized%2Cpaid_info%2Cpaid_info_content%3Bdata%5B%2A%5D.mark_infos%5B%2A%5D.url%3Bdata%5B%2A%5D.author.follower_count%2Cbadge%5B%2A%5D.topics&limit=5&offset=10&platform=desktop&sort_by=default"
    # next = "https://www.zhihu.com/api/v4/questions/267707433/answers?include=data%5B%2A%5D.is_normal%2Cadmin_closed_comment%2Creward_info%2Cis_collapsed%2Cannotation_action%2Cannotation_detail%2Ccollapse_reason%2Cis_sticky%2Ccollapsed_by%2Csuggest_edit%2Ccomment_count%2Ccan_comment%2Ccontent%2Ceditable_content%2Cvoteup_count%2Creshipment_settings%2Ccomment_permission%2Ccreated_time%2Cupdated_time%2Creview_info%2Crelevant_info%2Cquestion%2Cexcerpt%2Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%2Cis_labeled%2Cis_recognized%2Cpaid_info%2Cpaid_info_content%3Bdata%5B%2A%5D.mark_infos%5B%2A%5D.url%3Bdata%5B%2A%5D.author.follower_count%2Cbadge%5B%2A%5D.topics&limit=5&offset=10&platform=desktop&sort_by=default"

    content_li = []
    urls = set()
    while True:
        # 获取数据
        html = GET_HTML(next)
        SAVE_CONNECT('net_url.txt', next)
        html = json.loads(html)
        for data_dic in html['data']:
            content_li.append(data_dic.get('content'))
        for i in content_li:
            urls.update(check_content(i))
        next = html['paging']['next']


        # 保存和下载数据
        for url in urls:
            print(url)
            DOWNLOAD_IMAGE(url)
            SAVE_CONNECT('image_url.txt', url)

        content_li.clear()
        # next = ''
        time.sleep(3)


if __name__ == '__main__':
    run()
