
import base64
from bs4 import BeautifulSoup
import requests
import json
import urllib

def main():
    with open("D:\\project\\pytest\\base64png.txt", "r") as f:
        imgdata = base64.b64decode(f.read())
        file = open('D:\\project\\pytest\\1.jpg', 'wb')
        file.write(imgdata)
        file.close()
        getImg()

        getSogouImag('壁纸', 3, 'D:\\project\\pytest\\')

def getSogouImag(category,length,path):
    n = length
    cate = category
    imgs = requests.get('http://pic.sogou.com/pics/channel/getAllRecomPicByTag.jsp?category='+cate+'&tag=%E5%85%A8%E9%83%A8&start=0&len='+str(n))
    jd = json.loads(imgs.text)
    jd = jd['all_items']
    imgs_url = []
    for j in jd:
        imgs_url.append(j['bthumbUrl'])
    m = 0
    for img_url in imgs_url:
            print('***** '+str(m)+'.jpg *****'+'   Downloading...')
            urllib.request.urlretrieve(img_url,path+str(m)+'.jpg')
            m = m + 1
    print('Download complete!')

def getImg():
    res = requests.get('http://pic.sogou.com/pics/recommend?category=%B1%DA%D6%BD')
    soup = BeautifulSoup(res.text, 'html.parser')
    print(soup.select('img'))

# if __name__=="__main__":
    # main()


import urllib.request  #导入用于打开URL的扩展库模块
import urllib.parse
import re    #导入正则表达式模块

def open_url(url):
  # res = requests.get(url)
  # soup = BeautifulSoup(res.text, 'html.parser')
  # print(soup.select('img'))

  req=urllib.request.Request (url)   #将Request类实例化并传入url为初始值，然后赋值给req
  #添加header，伪装成浏览器

  # req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0')
  req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36')
  #访问url，并将页面的二进制数据赋值给page
  page=urllib.request.urlopen(req)
  #将page中的内容转换为utf-8编码
  html=page.read().decode('utf-8')

  return html

def get_img(html):
  # [^"]+\.jpg 匹配除"以外的所有字符多次,后面跟上转义的.和png
  p=r'<img class src="([^"]+)"'
  #返回正则表达式在字符串中所有匹配结果的列表
  imglist=re.findall(p,html )

  #循环遍历列表的每一个值
  for each in imglist :
    #以/为分隔符，-1返回最后一个值
    filename=each.split("/")[-1]
    #访问each，并将页面的二进制数据赋值给photo
    photo=urllib.request.urlopen(each )
    w=photo.read()
    #打开指定文件，并允许写入二进制数据
    f=open('D:\\project\\pytest\\imgs'+filename+'.png','wb')
    #写入获取的数据
    f.write(w)
    #关闭文件
    f.close()

def requests_view(response):
    import webbrowser
    requests_url = response.url
    base_url = '<head><base href="%s">' %(requests_url)
    base_url = base_url.encode('utf-8')
    content = response.content.replace(b"<head>",base_url)
    tem_html = open('tmp.html','wb')
    tem_html.write(content)
    tem_html.close()
    webbrowser.open_new_tab("tmp.html")
headers = {"User-Agent":'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
proxies = {'https':"114.215.107.94:60443",'http':"211.147.67.150:80"}


#该模块既可以导入到别的模块中使用，另外该模块也可自我执行
if __name__=='__main__':
  #定义url
  url="http://aa.data-image.com/#/ComicShow/?chapter_id=3359&comic_id=111"

  requests_view(requests.get("http://www.spbeen.com/tool/request_info/", headers=headers, proxies=proxies))
  #将url作为open_url()的参数，然后将open_url()的返回值作为参数赋给get_img()
  get_img(open_url(url))