import re
import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent


def runCrawler(method: str, url: str, headers: dict=None, cssSelector: str=None, regularSelector: str=None):
    try:
        if not headers: headers = {}
        headers['User-Agent'] = UserAgent().random
        requests_result = requests.request(method, url, headers=headers)
        content = requests_result.text
        
        if cssSelector:
            returnRes = []
            
            # 使用BeautifulSoup读取网页
            soup = BeautifulSoup(content, 'html.parser')
            
            links = soup.select(cssSelector) # 使用CSS选择器的方式，只查找一个元素请使用select_one
            for link in links:
                if regularSelector:
                    returnRes.extend(re.findall(regularSelector, str(link)))
                else:
                    returnRes.append(str(link))
                    
            return returnRes
            
        return content
    except Exception as e:
        raise e