import requests

import sys
from bs4 import BeautifulSoup
from urllib.parse import urlparse

sys.path.append('../')
from Models import Urls

headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36 Edg/84.0.522.40'}


# 第一次遍历首页所有url
def get_url_lists(site_url):
    urlInfo = urlparse(site_url)
    site = urlInfo.netloc

    urla = []
    global headers
    rs = requests.get(site_url, headers=headers)
    soup = BeautifulSoup(rs.text, 'html.parser')
    url_list = soup.select('a')
    for i in url_list:
        if i.has_attr("href"):
            # 只输出带有href属性的a标签url
            if "javascript:" in i["href"]:
                continue
            if ("http://" in i["href"] or "https://" in i["href"]) and site not in i["href"]:
                continue
            elif ("http://" in i["href"] or "https://" in i["href"]) and site in i["href"]:
                url = i["href"]
            else:
                url = urlInfo.scheme + "://" + site.rstrip("/") + "/" + str(i["href"]).lstrip("/")

            urla.append(url)

    urlb = set(urla)
    # 将列表转换成set集合来实现去重

    return urlb


def get_sub_url(urlb):
    sub_url = []
    for i in urlb:
        sub_url += list(get_url_lists(i))
    # 把刚抓到的url列表并入新的列表
    sub_url = set(sub_url)
    return sub_url


def startGetUrls(site):
    urlList = get_sub_url(get_url_lists(site))
    Urls.Urls.insertMulti(urlList)


startGetUrls("http://www.songboy.net/")
