import sys
from bs4 import BeautifulSoup
import requests
import re
path='/Users/zhaomeng/mv'
num=1
content='http://www.youzi4.cc/mm/meinv/index_'   #
# 爬取具体图片连接
headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36',
        'Referer':'http://www.youzi4.cc/',
        'Cookie':'BUSER=95b22bacda2b256ad16f4716a2cb9642; UM_distinctid=16a524cb1d670f-0c42da4818f7b-366d7e02-13c680-16a524cb1d73c7; Hm_lvt_a5380fe98a4f8ada8d996e42fd889959=1556158919; CNZZDATA1263076883=1951933051-1556159993-%7C1556159993; CNZZDATA1272874627=1426906288-1556156669-%7C1556172870; security_session_verify=f1e3941ff7da077d11d404a2e6d363dd; Hm_lpvt_a5380fe98a4f8ada8d996e42fd889959=1556175773',
 }
html='.html'
max=2
for n in range(1, max):
    url=content+str(n)+html    
    webdata=requests.get(url,headers=headers).text
    soup=BeautifulSoup(webdata,'lxml')
    link=soup.select("img")  #通过标签名查找
    reg=r'src="(.*?)"'
    imgre=re.compile(reg)
    imglist=re.findall(imgre,str(link))
    for imgurl in imglist:
        pic=requests.get(imgurl,headers=headers).content
        with open('/Users/zhaomeng/mv/'+str(num)+'.jpg','wb')as f:
            f.write(pic)
        num=num+1
 
    

   
 