import urllib.request
import os
from os.path import join
from bs4 import BeautifulSoup
import re

def _mkdir(newdir):
    """works the way a good mkdir should :)
        - already exists, silently complete
        - regular file in the way, raise an exception
        - parent directory(ies) does not exist, make them as well
    """
    if os.path.isdir(newdir):
        pass
    elif os.path.isfile(newdir):
        raise OSError("a file with the same name as the desired " \
                      "dir, '%s', already exists." % newdir)
    else:
        head, tail = os.path.split(newdir)
        if head and not os.path.isdir(head):
            _mkdir(head)
        #print "_mkdir %s" % repr(newdir)
        if tail:
            os.mkdir(newdir)

def load_url(url):
    request = urllib.request.Request(url)
    response = urllib.request.urlopen(request)
    data_utf8 = response.read()  # .decode("utf-8")
    print(url)
    if re.match(r".html",url):
        data_unicode = data_utf8.decode("utf-8")
        arr = []
        newline = ""
        for word in data_unicode:
            newline += word
            if word == ">":
                newline += "\n"
                arr.append(newline)
            elif word == "<":
                newline = "<"
        data_utf8_new = b""
        for i in arr:
            line=i.encode("utf-8")
            #print("yes2")
            #me=re.match(br'(.+href=\")(/)(.+)', line)
            #me=re.match(r'(.+href=\")(/)(.+)', a)
            #if  me:
             #   data_utf8_new += me.group(1)+me.group(3)+b"\n"
             #   print("yes")

            data_utf8_new += line


    else :
        data_utf8_new = data_utf8
    return data_utf8_new

def write_to_local(root,tar,data):
    file_path = join(str(root), str(tar))

    #print()
    _mkdir(os.path.dirname(file_path))
    with open(file_path, "w+b") as f2:
        f2.write(data)
    #f2.close()
    return file_path

def read_from_local(root,tar):
    file_path = join(str(root), str(tar))
    with open(file_path, "r+b") as f2:
        data=f2.read()
    f2.close()
    return data
url_base = "http://10eos.com"
url_sub  = "/"
url = url_base + url_sub
data_utf8=load_url(url)

root = os.getcwd()
tar = "baidu4.html"
file_path=write_to_local(root,tar,data_utf8)

html = read_from_local(root,tar)
#print(html.decode("utf-8"))

soup = BeautifulSoup(html,'html.parser')
#print(soup.prettify())
url_arr=[]
for link in soup.find_all():
    tmp=link.get('href')
    tmp=link.get('src')
    #print(type(tmp))
    if tmp and (re.match(r"/.+",tmp) and not re.match(r"/.+/#",tmp) and not re.match(r"/#.+",tmp) ):
        #print(tmp)
        ma = re.match(r"(/)(.+)",tmp)
        url_arr.append(ma.group(2))
        print(url_arr)
for i in range(len(url_arr)):
    url_sub = url_arr[i]
    url = url_base +"/"+url_sub
    data=load_url(url)

    write_to_local(root,url_arr[i],data)
    print(data)
