

from bs4 import BeautifulSoup
import os,traceback
from urllib import request
# 创建文件夹，

path = os.getcwd()                        # 获取此脚本所在目录

new_path = os.path.join(path,u'暴走漫画')
if not os.path.isdir(new_path):
    os.mkdir(new_path)

def get_next(soup):
    try:
        next_url=soup.find("a",attrs={"class":"next"}).get("href")
        print("下一页："+next_url)
        return next_url
    except Exception as e:
        print("出现异常"+str(traceback.print_exc(e)))
        return None
    
def get_content(url):
    content = request.urlopen(url).read()
    _soup = BeautifulSoup(content,"lxml")

    my_girl = _soup.find_all('div',class_='img-wrap')
    for girl in my_girl:
        jokes = girl.find('img')
        link = jokes.get('src')
        flink = link
        print (flink)
        content2 =request.urlopen(flink).read()
    with open(u'暴走漫画'+'/'+flink[-11:],'wb') as wf:
            wf.write(content2)

   
    




def load_next(next_url):
   
    html=request.urlopen(next_url).read()
    soup=BeautifulSoup(html,"lxml")
    next_page=""
    #抓取下一页链接
    next_url=get_next(soup)
    if next_url!=None :
        next_page=next_url
    return {"next_url":next_page}
    

if __name__=="__main__" :
    next_page=None
    start_url="http://baozoumanhua.com/"
    load=load_next(start_url)
    next_page=load["next_url"]
    get_content(start_url+next_page)
