# coding =utf-8
import random
import urllib.request
import urllib.parse
import re
import datetime
import os


def searchurl(content, partern):
    res = re.findall(partern, content)
    return res


getresource = lambda img: img.split("/")[-1] if len(img.split("/")) > 0 else "%s.jpg" % (
    str(datetime.datetime.now()) + str(random.randint))


def replace(src, sub):
    return re.sub(sub, "", src)


def getpagecontent(url):
    with urllib.request.urlopen(url) as f:
        cont = f.read()
        return f.getcode(), f.info, cont


def downloadimg(imgurl, fname=None):
    try:
        with urllib.request.urlopen(imgurl) as f:
            cont = f.read()
            fname = getresource(imgurl) if fname is None else fname
            with open("pics/" + fname, "wb") as img:
                img.write(cont)
    except Exception as e:
        print(e)


findurl = lambda content: re.findall('"http://.*?"', str(content))


def recursiverequest(url):
    content = getpagecontent(url)
    urllist = []
    urls = findurl(content)
    urllist.extend(urls)
    if len(urls) > 0:
        for item in urls:
            recursiverequest(item)
    else:
        urllist.append(url)
    return urllist


