import os
import re
import urllib.request
from urllib.parse import urlparse
from os.path import join
from bs4 import BeautifulSoup

#保存到本地
def _mkdir(newdir):
    """works the way a good mkdir should :)
        - already exists, silently complete
        - regular file in the way, raise an exception
        - parent directory(ies) does not exist, make them as well
    """
    if os.path.isdir(newdir):
        pass
    elif os.path.isfile(newdir):
        raise OSError("a file with the same name as the desired " \
                      "dir, '%s', already exists." % newdir)
    else:
        head, tail = os.path.split(newdir)
        if head and not os.path.isdir(head):
            _mkdir(head)
        #print "_mkdir %s" % repr(newdir)
        if tail:
            os.mkdir(newdir)
def write_to_local(root,tar,data):
    file_path =os.path.join(str(root),str(tar))
    #print(root)
    #print(file_path)
    _mkdir(os.path.dirname(file_path))
    #print(file_path)
    with open(file_path, "w+b") as f2:
        f2.write(data)
    f2.close()
    return file_path
def now_write(root,base,dict):
    for i in range(len(dict['url'])):
        #print(i)
        #print(base+dict['url'][i])
        tar_dir = dict['dir'][i]
        file_path = join(str(root),str(tar_dir))
        print(file_path)
        if not os.path.exists(file_path):
            data_now_doc = load_url(base+dict['url'][i])
            write_to_local(root, tar_dir, data_now_doc)

#解析网页地址：包括子链接与本页依赖文件链接的解析
def parse_for_subpage_link(html):
    #寻找所有href
    # 找出所有“/”打头 并且后面不跟 “#”的链接
    soup = BeautifulSoup(html, 'html.parser')
    url_arr = []
    for line in soup.find_all():
        url_href = line.get('href')
        if url_href and re.match(r'/.+', url_href) and not re.match(r'/api.+',url_href) and not re.match(r'/css.+',url_href):
            url_arr.append(url_href)


    #append入list
    return url_arr



def parse_now_page_for_download(html):#html type==utf-8
    soup = BeautifulSoup(html, 'html.parser')
    arr={}
    url_arr = []
    tar_arr = []
    for line in soup.find_all():
        url_href = line.get('href')
        url_src = line.get('src')
        if url_href and re.match(r'/.+',url_href) :
            url_arr.append(url_href)
            me = re.match(r'(/)(.+)',url_href)
            tar_href = me.group(2)
            tar_arr.append(tar_href)
        if url_src and re.match(r'/.+',url_src)  :
            url_arr.append(url_src)
            me = re.match(r'(/)(.+)', url_src)
            tar_src = me.group(2)
            tar_arr.append(tar_src)
    arr['url']=url_arr
    arr['dir']=tar_arr
    return arr

#解析path
def parse_url_for_dir(url,netloc):
    o = urlparse(url)
    print(url)
    path= o.path
    print(path)
    if re.match(r'.+/$',path) or url == netloc or path =='/' :

        path=path+"index.html"
    else :
        pass
    print(path)
    return path

#url读取函数
def load_url(url):
    request = urllib.request.Request(url)
    response = urllib.request.urlopen(request)
    data_utf8 = response.read()  # .decode("utf-8")
    data_utf8_new = data_utf8
    return data_utf8_new

#LOAD_URL
def scrpt_web(url):
    root_dir=os.getcwd() #ok
    netloc=url
    html_data=load_url(url) #ok
    tar_dir=parse_url_for_dir(url,netloc) #ok
    #print(tar_dir)
    write_to_local(root_dir,tar_dir,html_data) #ok
    arr=parse_now_page_for_download(html_data) #ok
    now_write(root_dir,netloc,arr)
    arr_sub=parse_for_subpage_link(html_data)
    print(arr_sub)
    for i in arr_sub:
        #print(i)
        html_data = load_url(netloc+i)  # ok
        tar_dir = parse_url_for_dir(i,netloc)  # ok

        write_to_local(root_dir,tar_dir,html_data)

scrpt_web("http://10eos.com")


