# -*- coding:utf-8 -*-
import json
import random
import time
from ssl import _create_unverified_context
from json import loads

import jieba.analyse as analyse
import pyperclip as pyperclip
import requests
from lxml import etree
from selenium import webdriver
from selenium.webdriver import Keys
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import tkinter.messagebox

import urllib.request
import urllib.parse

postContent = '<p>"一天就有翻天覆地的变化！！！ 今天就吐血整理一波高质量男装<em>穿搭</em>博主平台包括微信公众号和小红薯和微博 服饰护肤发型分享的都超级全，非常适合新手！！废话不多说，直接上干货 一、微信公众号 （一）落落的<em>穿搭</em>笔记 风格：简约通勤、少年感 内容：<em>穿搭</em>、<em>穿搭</em>风格测评、干货 [图片] 博主落落是"</p>'
postDefault = '{"pics": "http://dummyimage.com/400x400","title": "结最斯必化经","content": "cupidatat sed mollit"}'


# typeId, movie_count, rating, vote_count,start
def get_url_data_in_ranking_list2(offset=0, lc_idx=0, post=None):
    try:
        context = _create_unverified_context()  # 屏蔽ssl证书
        headers = {
            'User-Agent': 'Mozilla/5.0 (X11; Linux aarch64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.188 Safari/537.36 CrKey/1.54.250320',
            'Referer': 'https://www.zhihu.com/',
            'Authority':'www.zhihu.com',
            # 'Path': '/api/v4/search_v3?gk_version=gz-gaokao&t=general&q=%E7%A9%BF%E6%90%AD&correction=1&offset=0&limit=20&filter_fields=&lc_idx=0&show_all_topics=0&search_source=Normal',
            'Sec-Fetch-Dest':'empty',
            'Sec-Fetch-Mode':'cors',
            'Sec-Fetch-Site':'same-origin',
            'X-Ab-Pb': 'CiIbAD8ARwC0AHQBOwLXAtgCtwPWBIsFjAUnB3QIvgqHC/gMEhEAAAAAAAEAAAAEAAAAAAAAAA==',
            # 'X-Api-Version':'3.0.91',
            # 'X-App-Za':'OS=Web',
            # 'X-Requested-With': 'fetch',
            # 'X-Zse-93': '101_3_3.0',
            'X-Zse-96': '2.0_i7hs9cNdjOq6+IV8HISYyBlcMnKAtei056HUTTIFV56nY0ReuOlOQ5JofkpMi=wt',
            # 'X-Zst-81': '3_2.0ae3TnRUTEvOOUCNMTQnTQ7r0QLtxE_YqXX2ZbvefkqHPS0FZG0Yq-0HMUBSfYuPyfXFZe7r0e6nxE7tywLOm-Uw1tukR-JS8zGXTgwL1Aunu0cXCnJXI7Qr0EMYxc4PGtJH8gcVBAu3OuufIK8t1FUC8UunLcbO1NukyrAuOiRnxEL2ZZrxmDucmqhPXnXFMTAoTF6RhRuLPF0e0Y9LY9Ueq6gS8DBe9q9gfcgg_oT38jcXCyqV8ncUKc4OpFqc1Au3_VbSTv7FKYBXObhFfVuCBjcSuHBN_cU3GXg98QMFKQvHLsBgGBrCfDwtYwqof6GHK_c38tBxLADxB8ePMQMe1NBe8g9H1fhwBCrrLDhxsZUCmm0eBk43_VGxsCqfzGUFMkUpmQqFVqbH918xBPGRq1ULfyref-9VYrDgB64g8yUwYpDgYkXxYjqCxfwxLZUV1XUH820cXk0Fq1Dc0Cbu9zggxzwOq0BOZQHO1XqpOQTeCfXFC',
            'Cookie': '_zap=3bcad227-b55d-4ee9-89df-8483b0576449; d_c0="ARBfbO0rZRSPThP--GPHgvOruN_7lvx6UH0=|1643166894"; _9755xjdesxxd_=32; YD00517437729195%3AWM_TID=O1YyYt%2BvKlpAQQUUQFN76dJ0PQZTYbhX; _xsrf=wt7FmXQ3mLOQHd2xxzimlDCXKCxs0dQy; YD00517437729195%3AWM_NI=Yx3h%2Bv0qsR%2Fc%2FPw5fUEQoEqaNdx%2FxIurLbQ0QIGDOf2uh302ayU3DesnY5sPL5M8%2FGtBr%2Fkq1lA1GpwAk1VQTHuGampKsnKxiwZDJYExUwWfaXjU0zjx8SUy7fNFHnMsTXM%3D; YD00517437729195%3AWM_NIKE=9ca17ae2e6ffcda170e2e6eeb9e248f3e7af85b767f4968bb2d55e969a9f86c46ea5a6b985e94d8c87fdb8c12af0fea7c3b92a9ba8fd89aa7ef4afbdb3ae69ed90b9d6e55f878fa1a6f46b8ebcb8d0fb43b7ae8483d43c939789a5ce6ef2aba8d4b33aa3a79bd4c76fb1b297d6f25c90eb83d9ae4db89ebed0ea7bf3b48fd2c67cb59fa5d2d55b9898bba8e467b495bd8fca74aba996a5c95387a89b98d66ea388a6b6b372aaa8f8b4d873f5ba82a7cb668fad9cb9d037e2a3; q_c1=fabef73af68d4dc496591e8fe7988a43|1680076167000|1680076167000; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1685496572,1685580646,1685943077,1686006987; z_c0=2|1:0|10:1686007004|4:z_c0|80:MS4xSkpYU05BQUFBQUFtQUFBQVlBSlZUU1RCYW1XM2FSZWRsT2pKS3hWSXhCbTdmWktUR2ZFRjNRPT0=|307400e9d6c23b123f8dc6bab7ff89acff402c62f5aeb9b8f8d8f02d9903f1f4; SESSIONID=1eulK6vyge9a0AMYBkbsaQ5Hg8HNpToG66iuKg12qRF; JOID=W14QAUyqDV-TKFzkTaEcTR7FfIBVlGst-m1lkTPDZ2X1HROmF_tmI_wpUOBPrvjyb3HlwfZbduO1fTGI0yPDTP4=; osd=UVgWBU2gC1mXKVbiS6UdRxjDeIFfkm0p-2djlzfCbWPzGRKsEf1iIvYvVuROpP70a3Dvx_Bfd-mzezWJ2SXFSP8=; tst=r; BAIDU_SSP_lcr=https://www.baidu.com/link?url=WbiHbqUUFuiaKAjRRUmXqyCZeTjDGcHULaJVzX9I1c7&wd=&eqid=807ca0af002bb7ff00000003647e6f57; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1686013807; KLBRSID=5430ad6ccb1a51f38ac194049bce5dfe|1686013809|1686006984'
            # '': '',
        }
        # offset=0  #当前第几页 0 20 40 60 80
        # lc_idx=0  #当前第几页
        param = f'gk_version=gz-gaokao&t=general&q=%E7%A9%BF%E6%90%AD&correction=1&offset={offset}&limit=20&filter_fields=&lc_idx={lc_idx}&show_all_topics=0&search_hash_id=488c3ad6005f4b0f34ca6043058995bb&search_source=Normal&vertical_info=0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0'
        url = 'https://www.zhihu.com/api/v4/search_v3?' + param
        # print(url)

        # req = urllib.request.Request(url=url, headers=headers)
        # f = urllib.request.urlopen(req, context=context)
        # response = f.read().decode("utf-8")
        # jsonData = loads(response)  # 将json转为python对象

        #他妈的 知乎的这个反爬虫有点强 爬了一次爬不了了 算了 自己随便找几个json文件加进去吧 操
        jsonData=None
        with open(f'../postJsonData/sourcejson/{offset}.json', 'r', encoding='UTF-8') as f:
            jsonData = json.load(f)
        print(jsonData)

        storeProductL = list()

        for product in jsonData['data']:
            storeProduct = loads(post)
            content = postContent
            title = "教资面试，冬天女生该怎么穿呢？"
            pics = json.dumps([])
            if product['type'] != 'search_result':
                pass
            else:
                postObject = product['object']
                if postObject['type'] == 'answer':
                    content = postObject['content']
                    title = postObject['question']['name']
                    if postObject['thumbnail_info'] != None and postObject['thumbnail_info']['thumbnails'] != None:
                        picsL = list()
                        for x in postObject['thumbnail_info']['thumbnails']:
                            if x['type'] == 'image':
                                picsL.append(x['url'])
                        pics = json.dumps(picsL)
                elif postObject['type'] == 'article':
                    content = postObject['content']
                    title = postObject['title']
                    if postObject['thumbnail_info'] != None and postObject['thumbnail_info']['thumbnails'] != None:
                        picsL = list()
                        for x in postObject['thumbnail_info']['thumbnails']:
                            if x['type'] == 'image':
                                picsL.append(x['url'])
                        pics = json.dumps(picsL)
            storeProduct['content'] = content
            storeProduct['pics'] = pics
            storeProduct['title'] = title
            # print(storeProduct)
            storeProductL.append(storeProduct)
        return storeProductL
    except Exception as ex:
        print(ex)
        err_str = "出现未知异常：{}".format(ex)
        return [err_str, -1]


# page=1
limit = 25
page = 0
for page in range(1,6):  # 20, 120, 20 知乎反爬 不爬了 操
    try:
        storeProductL = get_url_data_in_ranking_list2(offset=page, lc_idx=page, post=postDefault)
        # 将美食信息写入JSON文件
        with open(f'../postJsonData/非自动爬取知乎穿搭{page}.json', 'w', encoding='utf-8') as f:
            json.dump(storeProductL, f, ensure_ascii=False, indent=4)
    except Exception as ex:
        print("爬取完了......")
        print(ex)
time.sleep(5)
