import requests
from bs4 import BeautifulSoup
import os

class Meizi:
    def __init__(self):
        self.headers = {'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"} #浏览器请求头（大部分网站没有这个请求头会报错、请务必加上哦）
        self.baseUrl = "https://www.rosmms.com"
        self.firstNavList = ['https://www.rosmms.com/rosimm/index.htm']
        for i in list(range(1,22)):
            index = 'https://www.rosmms.com/rosimm/index_{}.htm'.format(i)
            self.firstNavList.append(index)
        # print(self.firstNavList)

        self.navList = []
        # self.list = ['https://www.rosmms.com/rosimm/2017/10/26/2404_2.htm','https://www.rosmms.com/rosimm/2017/10/26/2404_2.htm','b','c','d','d']
        # self.list = set(self.list)
        # print(self.list)
        for index_url in self.firstNavList:
            start_html = self.request(index_url)
            Soup = BeautifulSoup(start_html.text, 'lxml')  ##使用BeautifulSoup来解析我们获取到的网页（‘lxml’是指定的解析器 具体请参考官方文档哦）
            # print(Soup)
            all_p = Soup.find_all('p', class_='p-title')  ##意思是先查找 class为 all 的div标签，然后查找所有的<a>标签。
            for p in all_p:
                a = p.a  # 取出a标签的文本
                print(a['href']+a.text)
                self.mkdir(a.text)
                print('开始保存图片 ---文件名：', a.text)
                page_url = self.baseUrl + a['href']
                self.getSourcePage(page_url)



    def getSourcePage(self,url):
        self.navList = []
        html = self.request(url)
        url_list = url.split('/')
        del url_list[-1]#删除最后一个对象
        now_base_url = ''
        for str in url_list:#拼接基础url
            now_base_url += str
            now_base_url += '/'

        soup = BeautifulSoup(html.text,'lxml')
        navAllList = soup.find('div',class_='page page_c').find_all('a')
        for a in navAllList:
            self.navList.append(now_base_url+a['href'])
        self.navList.append(url)
        self.navList = set(self.navList)#url去重
        for nav_url in self.navList:
            print("开始下载图片-------url是：",nav_url)
            self.downImg(nav_url)


    def downImg(self, page_url):  ##这个函数处理图片页面地址获得图片的实际地址
        img_html = self.request(page_url)
        img_list = BeautifulSoup(img_html.text, 'lxml').find('p', id='imgString').find_all('img')
        # print(img_list)
        for img in img_list:
          self.save(img['src'])

    def save(self, img_url):  ##这个函数保存图片
        name = img_url.split('/')[-1]
        print('正在保存：',name)
        img = self.request(img_url)
        f = open(name, 'ab')
        f.write(img.content)
        f.close()

    def mkdir(self, path):  ##这个函数创建文件夹
        path = path.strip()
        mainPath = "/Users/agon/Desktop/MeiziTu/"
        isExists = os.path.exists(os.path.join(mainPath,path))
        if not isExists:
            print(u'建了一个名字叫做', path, u'的文件夹！')
            os.makedirs(os.path.join(mainPath,path))
            os.chdir(os.path.join(mainPath+path))  ##切换到目录
            return True
        else:
            print(u'名字叫做', path, u'的文件夹已经存在了！')
            return False

    def request(self, url):  ##这个函数获取网页的response 然后返回
        content = requests.get(url, headers=self.headers)
        content.encoding = 'gbk'
        return content

Mz = Meizi()
