#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------
# -- This"s Files Download
# -- 网络爬虫 - 文件下载
# ****************************
# Author: lmay.Zhou
# Blog: www.lmaye.com
# Email lmay@lmaye.com
# Date: 2018/6/6 15:21 星期三
# ----------------------------------------------------------
import os
import re
import urllib.request
from bs4 import BeautifulSoup


def get_html(url):
    """

        :param url: 请求路径
        :return:
    """
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.7 Safari/537.36",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
    }
    request = urllib.request.Request(url, headers=headers)
    page = urllib.request.urlopen(request)
    html = page.read().decode("utf-8")
    # html = page.read()
    # html = html.decode('utf-8')
    page.close()
    return html


def get_article_urls(request_url):
    x = 1
    urls = []
    while x <= 6:
        soup = BeautifulSoup(get_html(request_url.format(x)), "html.parser")
        for lab in soup.find_all("a", class_="goog"):
            # print(lab)
            urls.append(lab["href"])
        x += 1
    return urls


def download(urls, out_path):
    # 目录是否存在
    if not os.path.exists(out_path):
        os.makedirs(out_path)
    for ul in urls:
        # 下载会被拦截
        # urllib.request.urlretrieve(ul, out_path + ul[ul.rfind("/"):])
        # 虚拟请求头部，防止爬虫批次下载拦截
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.7 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
        }
        request = urllib.request.Request(ul, headers=headers)
        f = open(out_path + ul[ul.rfind("/"):], 'wb')
        f.write((urllib.request.urlopen(request)).read())
        print("下载完成: {}".format(out_path + ul[ul.rfind("/"):]))
        f.close()


if __name__ == "__main__":
    host = "http://www.tingroom.com"
    article_urls = get_article_urls(host + "/flash/liuyinan/list_{}.html")
    download_urls = []
    for url in article_urls:
        article_url = host + url
        soup = BeautifulSoup(get_html(article_url), "html.parser")
        for lab in soup.find_all(href=re.compile("swf")):
            download_urls.append(lab["href"])

    download(download_urls, "D:\\新东方爵以词雄词汇-刘一男")
