import requests
import hashlib
import socket
import time
from bs4 import BeautifulSoup

# 屏蔽ssl错误
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)


# 设置socket全局超时为5秒
socket.setdefaulttimeout(15)

# 获取域名后缀
def getDomainSuffix(domain, suffixList):
    domainSuffix = None
    for suffix in suffixList:
        if domain[-len(suffix):] == suffix:
            domainSuffix = domain[-len(suffix):]
            break
    return domainSuffix

# 获取域名, 文件类型
def getUrlInfo(url):
    if url.find("http://") == 0:
        url = url.replace("http://",'')
    elif url.find("https://") == 0:
        url = url.replace("https://",'')

    # [path_1]带有参数的路径
    # [path_2]没有参数的路径
    # [fileType]文件类型
    if url.find("/") == -1:
        if url.find("?") != -1:
            url = url[:url.find("?")]
        url += "/"
    path_1 = url[url.find("/"):]
    if path_1.find('?') != -1:
        path_2 = path_1[:path_1.find('?')]
    else:
        path_2 = path_1

    if path_2.rfind(".") != -1:
        fileType = path_2[path_2.rfind(".")+1:]
    else:
        fileType = None

    domain = url.replace(path_1,'')

    # 取根域名
    hzList = ".gov.cn,.com,.com.cn,.cc,.net,.net.cn,.org,.edu.cn,.cn,.org.cn".split(",")
    domainHz = ''
    for hz in hzList:
        if domain[-len(hz):] == hz:
            domainHz = hz
            break
    if domainHz == '':
        domainHz = "."+domain.split(".")[-1]
    domainInfoList = domain.replace(domainHz,'').split(".")

    baseDomain = domainInfoList[-1]+domainHz
    return baseDomain, domain, fileType

# 获取当前body所有的URL
def getBodyUrls(body):
    soup = BeautifulSoup(body, 'html.parser')
    aList = soup.findAll(name='a')
    urlList = []
    for a in aList:
        if 'href' in a.attrs and a['href'][:4]=='http':
            urlList.append(a['href'])
    return urlList

# 访问网页,返回状态码和网页body
def httpGet(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.104 Safari/537.36'}
    try:
        response = requests.get(url, headers=headers,verify=False, timeout=15)
        statusCode = response.status_code
        text = response.text
    except:
        statusCode = False
        text = ''

    return statusCode, text

# 获取正常数据网页链接
def getContentUrl(urlList):
    typeList = "bak,mdb,css,flv,mp3,mp4,swf,js,jpg,jpeg,png,css,gif,txt,ico,pdf,css3,txt,rar,zip,mkv,avi,mp4,swf,wmv,exe,msi,mpeg,ppt,pptx,doc,docx,xls,xlsx,woff2,woff,map,svg,ttf,m3u8,webp,tiff,bmp,7z,tgz,tar,bz,tbz,gz,apk,ipa".split(",")
    contentUrlList = []
    for url in urlList:
        _,_,type = getUrlInfo(url)
        # 去掉描点属性
        if url.find("#") != -1:
            url = url[:url.find("#")]
        if type not in typeList and url not in contentUrlList:
            contentUrlList.append(url)
    return contentUrlList

# md5
def md5(data):
    data = str(data)
    m = hashlib.md5()
    m.update(data.encode(encoding='utf-8'))
    value = m.hexdigest()
    return value

# 是否为泛域名解析
def isPanDomain(domain):
    testDomain = "{}.{}".format(md5(str(time.time()))[:6],domain)
    try:
        socket.gethostbyname(testDomain)
        isPan = True
    except Exception as e:
        isPan = False
    return isPan
