#! usr/bin/env python
# -*- coding: utf-8 -*-

import shutil
import urllib.error
import string
import time
import os
import urllib.request
import urllib.parse
from bs4 import BeautifulSoup
from selenium import webdriver
import datetime
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import sys
import selenium.webdriver.support.ui as ui
import socket
import selenium.webdriver.phantomjs
from pathlib import Path
import re

save_dir = 'c:/google/'

def SaveImage(link, word):
    count = len(os.listdir(save_dir + word)) + 1
    path = Path(save_dir)
    name = link.split('/')[-1]
    # url解码
    name = urllib.parse.unquote(name)
    name = urllib.parse.unquote(name)
    name = re.sub(r'[|\\/><:?"*]','',name)
    if not name.endswith(('.jpg','.png','.gif','.jpeg')):
        name = name + '.jpg'
    filename = path / word / name

    try:
        if filename.exists():
            return
        urllib.request.urlretrieve(link, str(filename))
    except urllib.error.HTTPError as urllib_err:
        print(urllib_err)
    except Exception as err:
        print(err)
        print("产生未知错误，放弃保存:%s" % link)
    except:
        pass
    else:
        print("已下载" +word +': '+ str(count) + "张图")
    time.sleep(3)

def FindLink(InputData,word):
    driver = webdriver.Chrome('C:/Tools/chromedriver.exe')
    url = 'https://www.google.com.hk/search?q={0}{1}&newwindow=1&safe=strict&source=lnms&tbm=isch&sa=X&ved=0ahUKEwir1MTc6fnWAhWJjJQKHXfECE4Q_AUICigB&biw=1440&bih=769'
    for suffix in (['']+list(string.ascii_lowercase) + list(range(0,500))):
        
        driver.get(url.format(InputData, str(suffix)))
        for i in range(14):
            try:
                formore = driver.find_element_by_xpath('//*[@id="smb"]')
                formore.click()
            except:
                pass
            time.sleep(5)
            # 浏览器向下滚动
            js = "var q=document.documentElement.scrollTop=%d" % 100000
            driver.execute_script(js)
        soup = BeautifulSoup(driver.page_source, 'lxml')
        # with open("google_img.html","wb") as f:
        #     f.write(soup.prettify().encode('utf-8',errors='ignore'))
        if not os.path.exists(save_dir + word):
            os.mkdir(save_dir + word)
            
        for http in soup.select('.rg_meta'):
            link = eval(http.contents[0])['ou']
            SaveImage(link,word)
    driver.quit() # 使用close() webdriver进程未退出

if __name__=='__main__':
    #'明星','人物','动物','卡通','桌子',
    words = ['树木','杂志','设备']
    for word in words:
        InputData=urllib.parse.quote(word)
        FindLink(InputData, word)
    
