# -*- coding: utf-8 -*-
import os
import time
import urllib.request

import requests
from lxml import etree

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
}

save_path = './中国疆域地图/'  # 保存内容


def FileSave(save_path, title, img_url):
    if not os.path.exists(save_path):  # 判断文件路径是否存在，若不在，则创建
        os.mkdir(save_path)
    try:
        file_name = save_path + title + ".jpg"
        time.sleep(0.2)
        urllib.request.urlretrieve(img_url, file_name)  # 打开图片地址，下载图片保存在本地，
    except:
        print("error")


def Page_Level(myPage):  # 一级页面
    dom = etree.HTML(myPage)
    all_urls = []
    # title = dom.xpath('//*[@id="main3"]/table/tbody/tr/td/a[2]/@title')
    urls = dom.xpath('//*[@id="main3"]/table/tr/td/a[2]/@href')
    for url in urls:
        url = 'http://ditu.ps123.net' + url
        all_urls.append(url)
    try:
        Page_Level2(all_urls)
    except:
        print("本二级页爬取有错误")


def Page_Level2(all_urls):
    for u in all_urls:
        time.sleep(0.3)
        Page = requests.get(u, headers=headers).content.decode("gbk")  # 返回网页html源码
        dom = etree.HTML(Page)
        title = dom.xpath('//*[@id="main"]/font[1]/text()')
        img = dom.xpath('//*[@id="main"]/img/@src')
        if len(title) == 0 or len(img) == 0:
            print("图片解析爬取的时候出现错误，url= " + u)
            continue
        img = 'http://ditu.ps123.net' + img[0]
        FileSave(save_path, title[0], img)


def spider(url):
    myPage = requests.get(url, headers=headers).content.decode("gbk")  # 返回网页html源码
    try:
        Page_Level(myPage)  # 爬取
    except:
        print("本一级页爬取有错误")


if __name__ == "__main__":
    start = time.time()
    print('start......')
    for number in range(1, 20):
        start_url = r"http://ditu.ps123.net/china/List_665_" + str(number) + ".html"
        spider(start_url)
        print('爬取完成第' + str(number) + '页')
    start_url = r"http://ditu.ps123.net/china/List_665.html"
    spider(start_url)
    print('end')
    end = time.time()
    print('爬虫运行时间为%.4f秒' % (end - start))
