# -*- coding: utf-8 -*-
import os
import time
import urllib.request
from selenium import webdriver

import requests
from lxml import etree
from selenium.webdriver.chrome.options import Options

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
}

save_path = './中国地图/'  # 保存内容
chrome_options = Options()
# 设置chrome浏览器无界面模式
chrome_options.add_argument('--headless')
driver = webdriver.Chrome(executable_path="/Users/lankun/software/chromedriver", chrome_options=chrome_options)


def FileSave(save_path, name, img_url):
    if not os.path.exists(save_path):  # 判断文件路径是否存在，若不在，则创建
        os.makedirs(save_path)
    try:
        urllib.request.urlretrieve(img_url, save_path + "/" + name)  # 打开图片地址，下载图片保存在本地，
    except:
        print("error")


def parse_img(page_url):
    driver.get(page_url)
    time.sleep(2)  # 休息 2 秒，等待网页加载
    html = driver.page_source
    driver.close()

    dom = etree.HTML(html)
    url = dom.xpath('//*[@id="maps"]/p[1]/img/@src')
    url = "http://m.onegreen.net" + url[0]
    return get_redirect_url(url)


def get_redirect_url(origin_url):
    response = requests.get(origin_url, headers=headers)
    return response.url


def spider(title, page_url):
    try:
        img_url_1 = parse_img(page_url)
        FileSave(save_path + title, title + ".jpg", img_url_1)
    except:
        print(title + " 第一页，解析图片地址出错")
    print(title + ' 爬取完成第1页')
    for number in range(2, 20):
        try:
            url = page_url.replace(".html", "_") + str(number) + ".html"
            img_url_more = parse_img(url)
            FileSave(save_path + title, str(number) + ".jpg", img_url_more)
            print(title + ' 爬取完成第' + str(number) + '页')
        except:
            print(title + " 页面全部爬取完成，共爬取页数= " + str(number - 1))
            break


if __name__ == "__main__":
    start = time.time()
    print('start......')
    base_url = "http://m.onegreen.net/maps/List/List_787.html"
    myPage = requests.get(base_url, headers=headers).content.decode("gbk")  # 返回网页html源码
    dom = etree.HTML(myPage)
    all_title = dom.xpath('//*[@id="main3"]/table/tr/td/a[2]/@title')
    all_page = dom.xpath('//*[@id="main3"]/table/tr/td/a[2]/@href')

    for title, page in zip(all_title, all_page):
        page_url = "http://m.onegreen.net" + page
        spider(title, page_url)
    driver.quit()
    print('end')
    end = time.time()
    print('爬虫运行时间为%.4f秒' % (end - start))
