import requests
import re
from bs4 import BeautifulSoup
import time

headers = {
    'cache-control': "no-cache",
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0) Gecko/20100101 Firefox/56.0'
}


class SpiderHelp(object):
    # 通过get方法，获取beautifulsoup
    def beautifulsoup_bypost(self, url, data_load):
        r = requests.post(url, data=data_load, headers=headers)
        if r.status_code != 200:
            time.sleep(5)
            r = requests.get(url, params=data_load, headers=headers)
        return self.__response_to_soup(r)

    # 通过post方法，获取beautifulsoup
    def beautifulsoup_byget(self, url, data_load=None):
        if data_load is None:
            data_load = {"nothing": "nothing"}
        r = requests.get(url, params=data_load, headers=headers)
        if r.status_code != 200:
            time.sleep(5)
            r = requests.get(url, params=data_load, headers=headers)
        return self.__response_to_soup(r)

    # 把抓取到的html转成soup
    @staticmethod
    def __response_to_soup(r):
        r.encoding = "utf-8"
        soup = BeautifulSoup(r.text, "lxml")
        return soup

    # 找到当前页面的所有符合要求的links
    @staticmethod
    def target_links(soup, pattern):
        urlLink = []
        for link in soup.find_all('a', {"href": re.compile(r'^http')}):
            url = link.get('href')
            # 找到link的url，并记录
            match = pattern.search(url)
            if match:
                urlLink.append(url)
        return urlLink
