import os
import re
import urllib.request
from urllib.parse import urlparse
from bs4 import BeautifulSoup
def load_url(url):
    print("url==",url)
    request = urllib.request.Request(url)
    request.add_header("User-Agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36")
    response = urllib.request.urlopen(request)
    data_utf8 = response.read()  # .decode("utf-8")
    data_utf8_new = data_utf8
    return data_utf8_new

def  parseurl_to_subdir(url):
    o = urlparse(url)
    subdir = o.path
    if subdir == '':
        subdir += '/index.html'

    elif re.match(r'.+/$', subdir):  # 以/结尾：
        subdir += 'index.html'

    elif re.match(r'.+\..+', subdir):  # 结尾没有/ ，是可读文件
        pass

    else:  # 结尾没有/ ，但不是可读文件，只是省略掉默认文件名
        subdir += '/index.html'
    return subdir

def write_to_local(newdir,data):
    _mkdir(os.path.dirname(newdir))
    with open(newdir, "wb") as f2:
        f2.write(data)
    f2.close()
def _mkdir(newdir):
    if os.path.isdir(newdir):
        pass
    elif os.path.isfile(newdir):
        raise OSError("a file with the same name as the desired " \
                      "dir, '%s', already exists." % newdir)
    else:
        head, tail = os.path.split(newdir)
        if head and not os.path.isdir(head):
            _mkdir(head)
        #print "_mkdir %s" % repr(newdir)
        if tail:
            os.mkdir(newdir)
#单网页下载模块 输入：url locdir   输出：写入本地

def scrpt_single_web(url,locdir):
    #获取文件
    file = load_url(url)
    #解析出locdir后面的子目录 subdir
    subdir = parseurl_to_subdir(url)
    #保存到本地
    newdir=locdir + subdir
    if not os.path.exists(newdir):
        write_to_local(newdir,file)
    return file


#suburl list
def parse_for_link(html):
    #取得所有href 与 src
    soup = BeautifulSoup(html, 'html.parser')
    tags=soup.find_all()
    link_list=[]
    for tag in tags :
        link_href = tag.get('href')
        link_src  = tag.get('src')
        if link_href:
            if not re.match(r'//.+',link_href):
                link_list.append(link_href)
        elif link_src:
            if not re.match(r'//.+',link_src):
                link_list.append(link_src)
        else:
            pass
    return link_list
#析出需要的链接
def select_link(link_list,url):
    o=urlparse(url)#提取host地址
    host = o.scheme +"://"+o.netloc
    selected_link=[]
    for link in link_list:
        #链接以“/”开头的
        if re.match(r'^/',link):
            # append 入 list
            selected_link.append(host+link)
        elif o.netloc == urlparse(link).netloc:
            selected_link.append(link)
    return selected_link


locdir = os.getcwd()
url ="https://eggjs.org/zh-cn/tutorials/"
main_file=scrpt_single_web(url,locdir)
link_list=parse_for_link(main_file)
selected_link=select_link(link_list,url)

for link in selected_link:
    scrpt_single_web(link,locdir)



















