from bs4 import BeautifulSoup as bs
import requests
from config import Config

webConf = Config()
webConf.load("@/config/web.json")
debug = webConf["debug"] # True or False
header = webConf["header"]

def webFetch(url):
    '''
        获取一个网页的内容
        输入参数：url
        输出参数：success（是否成功），data（网页元数据）
    '''
    try:
        resp = requests.get(url, headers = header, stream = False)
        ret = resp.content
        resp.close()
        return True, ret
    except:
        return False, ""

def getAllAttrib(content, rule):
    '''
        获取一个页面中的所有符合要求的标签及内容
        输入参数：
            content 表示网页的元数据
            rule 表示要获取哪些标签
                e.g. rule = {"a": ["href"], "img": ["src", "style"]}
        输出参数：
            返回获取到的内容
                e.g. ret = {"a": {"href": ["gitee.com", "zhuchengyang.gitee.io"]}, "img": {"src": [], "style": []}}
    '''
    ret = {}
    soup = bs(r, "html.parser")
    for tg in rule:
        tgRet = {}
        for elem in soup.find_all(tg):
            for attrib in tg:
                if elem.has_attr(attrib):
                    attr = elem.attrs[attrib]
                    if not attrib in tgRet:
                        tgRet[attrib] = [attr]
                    else:
                        rgRet[attrib].append(attr)
        ret["tg"] = tgRet
    
    return ret
