# -*- coding: utf-8 -*-
import os 
import time 
 

from os.path import join, getsize 
from heapq import nlargest 
 
import requests
from bs4 import BeautifulSoup
import json
import urllib.request
import re #写正则表达式要导入的

standCookie = "_zap=383d9fd2-c856-4386-b8a9-27a3f09224f2; d_c0=ANDcF2kCihiPTue158SS2AD5od1Y1OiB_30=|1714358515; __snaker__id=pgkfVM5KYQuZY08t; q_c1=c747c5728d7440b29e03167a792c26d8|1714358536000|1714358536000; __zse_ck=001_/cvtvhW9mZqBI5C91HJkD8iBszyO2jB4AcXvzEY56Y4MPEY0vLw9LK2R6orRhCLIqjyYKoTm6i=EQc65N4WVlG9homyhAUe+WD6IfxTyRGyB6aa6BkTp8DTd58WI+Uhz; z_c0=2|1:0|10:1719795167|4:z_c0|80:MS4xZ2psaEFBQUFBQUFtQUFBQVlBSlZUZDlMYjJmVWp4OU5fOHRSaW9MX2NSalFmXzVYb0Z2NDFRPT0=|9e6cd340d4b98e52177501d08b0f2072a3b8bbf4b64b35c5e9d0a4f5cabef641; _xsrf=65b277e3-8674-46d4-bd18-ac73d1745758; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1719571615,1719795168,1719829510,1719884391; BEC=7e33fec1f95d805b0b89c2974da3470f; SESSIONID=ItlBR5qIFhCyP6zm7cuP2f4cvfserHiRVjzwmpkCNF9; JOID=UVscAk8RSahqZYuMGxNuv0G3n-oJWDfMLTC-91pQIeEbMLr2fG02Jg5igY4WkVpqcm8bkFCX87ohUH9wTBv6zmI=; osd=W1sUA0kbSaBrY4GMExJotUG_nuwDWD_NKzq-_1tWK-ETMbz8fGU3IARiiY8Qm1pic2kRkFiW9bAhWH52Rhvyz2Q=; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1719893380; tst=r; KLBRSID=f48cb29c5180c5b0d91ded2e70103232|1719893380|1719893300"


def returnArr(url):
    questionString = ''
    answerString = ''
    realURL = ''
    if 'question' in url and 'answer' in url:
        arr = url.split('/')
        defIndex = len(arr) + 1
        for iString in arr:
            if defIndex == 1:
                questionString = iString.replace('\n', '');
                defIndex = len(arr) + 1
            if defIndex == 2:
                answerString = iString.replace('\n', '');
                defIndex = len(arr) + 1
            if iString == 'question':
                defIndex = 1;
            if iString == 'answer':
                defIndex = 2;
    
    if '-' in url and '知乎' in url:
        arrURL = url.split('https://www.zhihu.com')
        realURL = 'https://www.zhihu.com' + arrURL[len(arrURL) - 1]
    else:
        realURL = url 
    
    if questionString != '' and answerString != '' and realURL != '':
        print(questionString)
        print(answerString)
        print(realURL)
        standURL = 'https://www.zhihu.com/question/' + questionString

        netWork(standURL, questionString, answerString)



def netWork(url, questionString, answerString):
    headers = {
         "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
      "accept-language": "zh-CN,zh;q=0.9",
      "priority": "u=0, i",
      "sec-ch-ua": "\"Google Chrome\";v=\"125\", \"Chromium\";v=\"125\", \"Not.A/Brand\";v=\"24\"",
      "sec-ch-ua-mobile": "?0",
      "sec-ch-ua-platform": "\"Windows\"",
      "sec-fetch-dest": "document",
      "sec-fetch-mode": "navigate",
      "sec-fetch-site": "none",
      "sec-fetch-user": "?1",
      "upgrade-insecure-requests": "1",
      "Content-Type":"text/html; charset=utf-8",
    "cookie": standCookie
    }
    print(url)
    response = requests.get(url,headers=headers)
    response.encoding = 'utf-8'
    print('--------------------------------------->')
    # print(response.text)
    try :
        if not '后自动跳转至回答所在的问题页' in response.text:
            # print(1111,response.text)
            beautifulSoup = BeautifulSoup(response.text, 'html.parser',from_encoding='utf-8')
            
            # print(1112,)
            resultStrring = beautifulSoup.find('script', id = 'js-initialData')
            # print(str(resultStrring))
            frame = str(resultStrring)
            newFrame = frame.replace('</script>', '')
            target = ">"
            index = newFrame.index(target)
            result = newFrame[index+1:]

            jsonModel = json.loads(result)
            print(jsonModel['initialState']['entities']['questions'])
                
            tmpTitle = jsonModel['initialState']['entities']['questions'][questionString]['title']
            tmpContent = jsonModel['initialState']['entities']['answers'][answerString]['content']
            print(tmpTitle)
            # print(tmpContent)
            return; 
            if 'img' in tmpContent or 'IMG' in tmpContent:
                re_img = r'<[img|IMG].*?>' 
                re_img_rc = r'.+?data-original="(\S+)"' 
                img_url_list = []
                img_list= re.findall(re_img, tmpContent)#找到所有的img标签
                print(img_list)
                # if len(img_list) == 0:
                    
                for tag in img_list:
                    if '<i>' in tag:
                        continue;
                    if 'data-original' not in tag:
                        continue;
                    imgPath = re.findall(re_img_rc, tag)[0]
                    print(imgPath)
                    img_url_list.append(imgPath)#找到所有的img_url
                    
                arrPic = list(set(img_url_list))
                for item in arrPic:
                    downLoadPNG(item)
                    time.sleep(1);
                
                print('一共发现{}图片--------------------->'.format(len(arrPic)))
                # print()
        elif '后自动跳转至回答所在的问题页' in response.text:
            print('文章已经被删除-------------------->')  
        else:
            print('知乎被发现了-------------------->')  
    except Exception as e :
        print('报错了-------------------->{error}'.format(error=e)) 
    

def downLoadPNG(url):
    imgSavePath = 'trash3'
    print('url->{url}'.format(url=url))
    firstTag = url.split('?')[0].split('/')
    imgTypeArr = firstTag[len(firstTag) - 1].split('.')
    imgType = imgTypeArr[len(imgTypeArr) - 1]
    img_data = requests.get(url).content
    filepath = os.path.join(imgSavePath)
    fileName = "{a}/{b}.{c}".format(a=filepath, b=str(int(time.time())), c=imgType)
    if not os.path.exists(imgSavePath):
            os.makedirs(filepath)
    with open(fileName, 'wb') as f:
        f.write(img_data)

url = """
https://www.zhihu.com/question/636586870/answer/3339583810
"""

returnArr(url)














