
__author__='foxaaa 1234567'    

import os
import requests
import time
from selenium import webdriver
import zlib
from lxml import etree

#设置浏览器
chrome_options = webdriver.ChromeOptions()
prefs={"profile.managed_default_content_settings.images":2}
chrome_options.add_experimental_option("prefs",prefs)
chrome_options.add_argument("--headless")

# chromedriver 地址 http://chromedriver.storage.googleapis.com/index.html 
chrome_path='C:\\Program Files (x86)\\Google\\Chrome\\Application\\chromedriver'

PICTURES_PATH=os.path.join(os.getcwd(),'pictures\\')

headers ={  
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
    'Referer': 'http://www.mmjpg.com/'
}

class Spider(object):
    def __init__(self,page_num):
        self.page_num=page_num
        self.page_urls=['http://www.mmjpg.com/']
        self.girl_urls=[]
        self.girl_name=''
        self.pic_urls=[]

    def get_page_urls(self):
        if int(page_num)>1:
            for n in range(2,int(page_num)+1):
                page_url='http://www.mmjpg.com/home/'+str(n)
                self.page_urls.append(page_url)
        elif int(page_url)== 1:           
            pass

    def get_girl_urls(self):
        for page_url in self.page_urls:
            html=requests.get(page_url).content
            selector=etree.HTML(html)
            self.girl_urls+=(selector.xpath('//span[@class="title"]/a/@href'))
       
    
    def get_pic_urls(self):
        dirver =webdriver.Chrome(chrome_path,chrome_options=chrome_options)
        for girl_url in self.girl_urls:
            dirver.get(girl_url)
            time.sleep(3)
            dirver.find_element_by_xpath('//em[@class="ch all"]').click()
            time.sleep(3)
            html=dirver.page_source
            selector=etree.HTML(html)
            self.girl_name=selector.xpath('//div[@class="article"]/h2/text()')[0] 
            self.pic_urls=selector.xpath('//div[@id="content"]/img/@data-img')           
            try:              
                self.download_pic()
               # print(self.pic_urls)
            except Exception as e:
                print("{}保存失败".format(self.girl_name)+str(e))

    

        
    def download_pic(self):
       
        try:
            os.makedirs(PICTURES_PATH)
        except: 
            pass
        girl_path=PICTURES_PATH+self.girl_name
        try:
            os.makedirs(girl_path)
        except:
            print("{}已存在".format(self.girl_name))
        img_name=0
        for pic_url in self.pic_urls:
            img_name+=1
            time.sleep(5)
            print(pic_url)
            img_data=requests.get(pic_url, timeout=15,headers= {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
            'Referer': 'http://www.mmjpg.com/mm/1359'
            })
            #print(img_data.content)
            pic_path=girl_path+'\\'+str(img_name)+'.jpg'
            if os.path.isfile(pic_path):
                print('{}第{}张已存在'.format(self.girl_name,img_name))
                pass
            else:
                with open(pic_path,'wb') as f:
                    f.write(img_data.content)                 
                    f.close
                    print('正在保存{}第{}张'.format(self.girl_name,img_name))
        return

    def start(self):
        self.get_page_urls()
        self.get_girl_urls()
        self.get_pic_urls()

if __name__ =='__main__': 
    page_num=input("请输入页码：")
    jpg_spider=Spider(page_num)
    jpg_spider.start()

