import requests
import re
import dataBase
import generate
import os

def download(url):
    flag = True
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 Edg/112.0.1722.68',
    }
    try:
        response = requests.get(url = url, headers = headers,timeout = 0.1)
        result = response.content.decode('utf-8')
        if 'text' in response.headers.get('Content-Type'):
            pass
        else:
            flag = False
        response.close()
    except Exception as e:
        flag = False
    if flag:
        return result
    else:
        return flag
    
def pick(str):
    """从给定字符串中提取中文字符，并用空格分隔"""
    chinese_chars = re.findall(r'[\u4e00-\u9fa5]+', str)
    return ' '.join(chinese_chars)    

def saveUrl(db,hashdb,urls):
    newurls = {}
    newhashes = {}
    for url in urls:
        hash = generate.str_to_hash(url)
        rep = hashdb.check(hash)
        if rep == False:
            id = generate.generate_random_string()
            newurls[id] = url
            newhashes[hash] = True
    try:
        db.insertBunch(newurls)
        hashdb.insertBunch(newhashes)
    except Exception as e:
        print(e)
        print('Error while dumping urls')
        return False
    return True

def saveHtml(htmlStr):
    filename = generate.generate_random_filename() + '.html'
    if os.path.isdir('pages'):
        pass
    else:
        os.makedirs('pages')
    fileroute = os.path.join('pages',filename)
    try:
        with open(fileroute,'w',encoding = 'utf-8') as f:
            f.write(htmlStr)
    except Exception as e:
        print(e)
        return False
    return True

def saveContent(content):
    filename = generate.generate_random_filename() + '.txt'
    if os.path.isdir('content'):
        pass
    else:
        os.makedirs('content')
    fileroute = os.path.join('content',filename)
    try:
        with open(fileroute,'w',encoding = 'utf-8') as f:
            f.write(content)
    except Exception as e:
        print(e)
        return False
    return True

def analyse(htmlStr):
    aList = re.findall('<a[^>]*>',htmlStr)
    result = []
    for a in aList:
        g = re.search('href[\s]*=[\s]*[\'"]([^>\'""]*)[\'"]',a)
        if g != None:
            url = g.group(1)
            result.append(url)
    dellist = []
    for i in range(0,len(result)):
        if result[i][0] != 'h':
            dellist.append(i)
    dellist.reverse()
    for i in dellist:
        result.pop(i) 
    if len(result) >= 1:
        return result
    else:
        return False