# coding =utf-8
import os
import re
import time
import urllib.parse
import urllib.request
import traceback


def searchall(content, *pattern):
    """
    find substring with regular expression list.
    return list if matched more than 1 substring.
    """
    result = []
    for p in pattern:
        res = re.findall(p, content)
        result.extend(res)
    return result


def url2request(url, headers=None):
    """
    wrap url to request object with headers.
    """
    header = headers if headers is not None else{
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)'
                      ' Chrome/60.0.3112.90 Safari/537.36',
        'charset': 'utf-8'}
    req = urllib.request.Request(url, headers=header)
    return req


def getresourcefromurl(url):
    """
    get the resource path in url
    """
    resource = url.split("/")[-1] if len(url.split("/")) > 0 else ""
    if resource.find("?"):
        resource = resource.split("?")[0]
    return resource


def getmaindomain(url: str):
    urlsplit = url.split(r'.')
    if len(urlsplit) == 0:
        return url
    else:
        return urlsplit[1]


def formarturl(url: str):
    """
    remove last / in url
    :param url: 
    :return: 
    """
    return '' if url is None else url[:-1] if url.endswith("/") else url


def getbaseurl(url: str):
    """
    get basic sub url from url
    """
    if searchall(url, 'http.?://[^\s].*') is None or url.endswith("/"):
        return url
    else:
        return formarturl("/".join(url.split("/")[:-1])) + "/"


def __geturlfromhtmlattr(content: str, attr: str, originalurl):
    """
    get url in  'tag="url"' 
    :param content: 
    :param attr: 
    :param originalurl: 
    :return: 
    """
    res = searchall(content, r'(?<=%s=\")[^\s]*?(?=\")' % attr)
    if len(res) > 0:
        res = res[0]
    else:
        return ''
    res = replace(res, r"\\", "/")
    if res.strip() != "" and len(re.findall(__maindomain, res)) != 0:
        if res.startswith("http"):
            return res
        elif res.startswith(r"/"):
            return formarturl(getbaseurl(originalurl)) + res
        else:
            return formarturl(getbaseurl(originalurl)) + "/" + res
    else:
        return ''


def getpageurl(content: str, originalurl: str):
    """
    get url from html tag <a> attribute 'href'
    :param originalurl: 
    :return: 
    """
    urls = searchall(content, '<a.*? href[^\s\(\(]*\"')
    tmplist = []
    for res in urls:
        res = __geturlfromhtmlattr(res, "href", originalurl)
        if res != '':
            tmplist.append(res)
    result = list(set(tmplist))
    for item in result:
        print(item)
    print(len(result))
    return result


def getimgurl(content: str, originalurl: str):
    """
    get jpg and png url in tag a and tag img
    :param originalurl: 
    :return: 
    """
    herf_urls = searchall(content, '<a.*? href[^\s]*?jpg\"')
    resultlist = []
    for res in herf_urls:
        res = __geturlfromhtmlattr(res, "href", originalurl)
        if res != '':
            resultlist.append(res)
    img_urls = searchall(content, '<img.*? src[^\s]*?jpg\"')
    for res in img_urls:
        res = __geturlfromhtmlattr(res, "src", originalurl)
        if res != '':
            resultlist.append(res)
    result = list(set(resultlist))
    print("-------------------imgurls--------------------------------")
    for item in result:
        print(item)
    return result


def replace(src, sub, rplstr=""):
    return re.sub(sub, rplstr, src)


def getpagecontent(url: str):
    """
    request to url,and return the code of the page
    :param url: 
    :return: 
    """
    # check if url is belong outer domian
    if len(re.findall(__maindomain, url)) == 0:
        print("outer url %s" % url)
        return ''
    print("inner url %s" % url)
    requests = url2request(url)
    try:
        with urllib.request.urlopen(requests) as f:
            # print("f.info=", f.info())
            cont = f.read()
            encode = re.search('charset=.*?>', str(cont))
            if encode is not None:
                encode = encode.group()
                encode = re.search('(?<=charset=)[\d\w\-_]*', encode).group()
                print(encode)
            else:
                encode = "utf-8"
            return cont.decode(encode)
    except Exception as e:
        print("exception occurs in method getpagecontent()  ", e, "url>> " + requests.get_full_url())
        return ""


def downloadimg(imgurl, fname=None, dowonloaddir='py_download', min_size=100 * 1024):
    if not os.path.exists(dowonloaddir):
        print("downloaddir", dowonloaddir)
        os.makedirs(dowonloaddir)
    try:
        with urllib.request.urlopen(url2request(imgurl)) as f:
            cont = f.read()
            fname = getresourcefromurl(imgurl) if fname is None else fname
            imgsize = len(cont)
            print("img %s size is %s bytes" % (fname, str(imgsize)))
            if imgsize > min_size:
                with open(formarturl(dowonloaddir) + "/" + fname, "wb") as img:
                    img.write(cont)
                    return True
            else:
                return False
    except Exception as e:
        print("exeception in download img:%s,\n error message:%s" % (imgurl, e))
        print(traceback.format_exc(e))
        return False


__recursiveurllist = []
__alreadyrequest = []
__downloadimg = []
__maindomain = None


def logresult():
    with open("catch.log", "w") as f:
        f.write("already request urls:" + str(__alreadyrequest))
        f.write("\n\n")
        f.write("download imgs url:" + str(__downloadimg))


def recursiverequest(url: str, dldfolder=None, deepth=20):
    try:
        if deepth <= 0:
            deepth = 1
        __recursiveurllist.append(url)
        __alreadyrequest.append(url)
        content = getpagecontent(url)
        if content == '':
            return
        imgurls = getimgurl(content, url)
        print("get %d img urls from page :%s" % (len(imgurls), url))
        for img in imgurls:
            print("download from %s start" % img)
            if downloadimg(img, dowonloaddir=dldfolder, min_size=50 * 1024):
                __downloadimg.append(img)
        if len(__alreadyrequest) >= deepth:
            return
        urls = getpageurl(content, url)
        print("get %d page urls from page :%s" % (len(urls), url))
        __recursiveurllist.extend(urls)
        if len(urls) > 0:
            for item in urls:
                if item not in __alreadyrequest:
                    print("already length:", len(__alreadyrequest))
                    if len(__alreadyrequest) >= deepth:
                        break
                    recursiverequest(item, dldfolder, deepth)
    except Exception as e:
        print("exeception in recursiverequest:error message:%s" % e)


if __name__ == '__main__':
    start = time.time()
    url = "http://www.ugirls.com/Shop/Detail/Product-387.html"
    # url = "http://pic.sogou.com/?from=result"
    __maindomain = getmaindomain(url)
    recursiverequest(url, "sogou", 5)
    logresult()
    end = time.time()
    print("finished in %d seconds" % (end - start))
