import requests  # 导入库
import time
from bs4 import BeautifulSoup  # 自带对HTML进行页面分析的功能，可以直接将页面封装为python对象
# 希望通过一个 函数 返回我们的页面
import re  # 正则表达式

q = "图片"
page = "1"
url = "http://soso.nipic.com/?q=" + q + "&page=" + page  # 定义一个字符串url路径
down = 'E:/img/'         #下载地址

def getHtmlText(url):
    # 以1个table做缩减控制
    htmltext = -1
    while(htmltext == -1):
        try:
            dom = requests.get(url)
            dom.raise_for_status()#url访问后的状态
            htmltext = dom.text# 返回页面信息 注明：如果后续开发有错误生，我们会修改这里
        except Exception as err:
            print(err)
    return htmltext
    # 向url 发送请求 并返回页面
    # requests会有两种方式发送请求    一种是get 一种是post
    # 获取页面信息
    # <Response [200]>成功

def findImgUrl(text):  # text 就是我们的页面文本
    try:
        obj = BeautifulSoup(text, "html.parser")
        ul = obj.find("ul", attrs={"id": "img-list-outer"})
        a = ul.find_all("a", attrs={"class": "search-works-thumb relative"})
        # a这个对象是列表[]list
        i = len(a)
        for i in range(i):
            """
            strs = a[i]["href"]#获取 a标签中中的href属性的值
            if re.findall('hturl=',strs):
                #通过正则表达式在strs中查砸后是否有 ‘hturl’这个字符串 如果有就执行
                s = strs.split('hturl=')
                a[i] = s[1]
           """
            a[i] = a[i]["href"]
            if a[i].find("hturl") != -1:     #如果包含hturl执行分割
                text = a[i].split("hturl=")
                a[i] = text[1]
        return a
    except Exception as err:
        print(err)

def find_img_down_url(img_url):
    text = getHtmlText(img_url)
    obj = BeautifulSoup(text, "html.parser")
    if(obj.find("img", attrs={"class": "works-img"})):
        ul = obj.find("img", attrs={"class": "works-img"})
        url = ul["src"]
        return url
    elif(obj.find("img", attrs={"class": "bigimg"})):
        ul = obj.find("img", attrs={"class": "bigimg"})
        url = ul["src"]
        return url
    else:
        print("获取图片链接发生错误" + img_url)

def print_img_url(list):
    i = len(list)
    for i in range(i):
        print("第"+ str(i+1)+"张图片："+find_img_down_url(list[i]))
        time.sleep(0.5)
    print('图片链接获取完毕')
    return 0
def down_img(list):
    i = len(list)
    for i in range(i):
        down_url = find_img_down_url(list[i])
        img = requests.get(down_url)
        img_name = down_url.split("/")[-1]
        imgdriver = down + img_name#获取文件名称和本地路径名称
        with open(imgdriver,"wb") as f:
            f.write(img.content)
            f.close()
            print('图片'+ str(i+1) +'【' + img_name + '】保存成功')
        time.sleep(0.1)
    print("全部图像下载完成")


def main():
    text = getHtmlText(url)
    list = findImgUrl(text)
    print_img_url(list)        #查看图片链接
    #down_img(list)             #下载图片

main()