'''
    爬取　sogou　网站中的图片
'''

import os
import requests
from bs4 import BeautifulSoup
import jsonpath
import json


def getHtmlCode(url):
    '''
        该方法传入url, 返回url的html 的源代码
    '''
    headers={
        'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 UBrowser/4.0.3214.0 Safari/537.36'
    }
    r = requests.get(url, headers=headers)
    r.encoding='UTF-8' # 指定网页解析的编码格式
    page=r.text # 获取url页面的源代码字符串文本
    return page

def getImg(page, localPath):
    '''
        传入html的源码，截取其中的img标签，将图片保存到本机
    '''
    if not os.path.exists(localPath): # 如果不存在该地址
        os.mkdir(localPath)

    soup = BeautifulSoup(page, 'html.parser') # 按照html格式解析页面
    jsonTemp = soup.script.text
    
    print(jsonTemp)


    # print(imgList)
    # imgList = jsonpath.jsonpath(imgList, '$..smallThumbUrl')
    # print(imgList)

    x=0
    # for imgUrl in imgList: # 列表循环
        

    #     try:
    #         print("--------------------------")
    #     #     print('正在下载:%s'%imgUrl.get('src'))
    #     #     if 'http://' not in imgUrl.get('src'): # 不是绝对路径 http开始
    #     #         m='https://pic.sogou.com/'+imgUrl.get('src') # 补全绝对路径
    #     #         print('正在下载:%s'%m)
    #     #         ir = requests.get(m)
    #     #     else:
    #     #         ir = requests.get(imgUrl.get('src'))

    #     #     #用 write()　方法写入本地文件中
    #     #     open(localPath +'%d.jpg'%x, 'wb').write(ir.content)
    #     #     x+=1
    #     except:
    #         continue

class SaveData:

    def __init__(self, url):
        self.url = url
        self.headers = {
            # "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36"
            
            # 使用较老版本的请求头，该浏览器不支持js
            "User-Agent": "Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)"
        }

    def get_data(self):
        """
        请求数据
        :param url:
        :return:
        """
        response = requests.get(self.url, headers=self.headers)
        return response.content

    def save_data(self, data, num, localPath):
        """
        保存数据
        :param data:
        :param num:
        :return:
        """
        file_name = localPath + str(num) + ".html"
        with open(file_name, "wb+") as f:
            f.write(data)

if __name__ == "__main__":
    url = 'https://pic.sogou.com/pics?query=%E5%A5%B3%E4%BA%BA&w=05009900&p=&_asf=pic.sogou.com&_ast=1576676251&sc=index&sut=48512&sst0=1576676251303'
    localPath='/home/colby/work/python_project/reptile/tmpimg/'
    page = getHtmlCode(url)
    getImg(page, localPath)

    # mySaveData = SaveData(url)
    # mySaveData.save_data(mySaveData.get_data(), 1, localPath)