# -*- coding: utf-8 -*-
from selenium import webdriver          #
from lxml import etree
import requests

def save_imgs(path, count, imgs):
    file_name = path + '\\' + str(count) + '.jpg'
    # 保存文件
    try:
        f = open(file_name, 'ab+')              # 打开
        f.write(imgs.content)                   # 写入内容
        f.close()                               # 关闭
    except:
        print("error")

def render_page(url, switch):
    driver = webdriver.PhantomJS()
    driver.get(url)
    if switch == 2:
        try:
            for i in range(15):
                driver.find_element_by_xpath('//*[@id="swfPreview"]/div/div[5]/div[2]/div/div[3]/a/p/span[2] | //*[@id="swfPreview"]/div/div[5]/div[2]/div/div[4]/a/p/span[2]').click()
        except:
            pass
    else:
        pass
    html = driver.page_source
    driver.close()
    return html

def analysis_pages(path, html, switch):
    if switch == 2:
        imgs = etree.HTML(html).xpath('//*[@id="canvasWrapper"]/div/div/img')
    else:
        imgs = etree.HTML(html).xpath('//*[@id="flow-ppt-wrap"]/div/div/div/div/img')
    count = 0
    for img in imgs:
        count += 1

        img_url = img.xpath('@data-src')  # 有的图片的 地址位于 src 中,有的图片地址位于 data-src 中
        if img_url == []:  # 所以，判断当 data-src 中没有内容的时候,换用另一种方法
            img_url = img.xpath('@src')
        imgs = requests.get(img_url[0])  # 对图片网址发出请求

        save_imgs(path, count, imgs)

if __name__ == '__main__':
    print("您将爬取的内容来源于下列哪一网站:(请选择)")
    switch = int(input("1.百度文库     2.爱问共享资料\n"))
    url = input("请输入要爬取的网址：")
    html = render_page(url, switch)
    path = 'E:\\百度文库下载'               # 路径
    analysis_pages(path, html, switch)


