import requests

class BaseCrawler(object):
    def __init__(self, targetUrl):
        super(BaseCrawler, self).__init__()
        self.__targetUrl = targetUrl
        self.__header = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36'
        }

    def __getPageContent(self, url):
        response = requests.get(url, headers=self.__header);
        if response.status_code == 200:
            response.encoding = "UTF-8"
            html = response.text
        else:
            html = "<html></html>"
        return html

    def parserPage(self, url, **kwargs):
        items = {}
        page = self.__getPageContent(url)
        elements = kwargs["elementsFunc"](page)
        itemFunc = kwargs["itemFunc"]
        if elements:
            for element in elements:
                items = itemFunc(element)
                yield items

    def downloader(self, link, file_path, file_name, ext):
        ir = requests.get(link)
        file = "%s/%s.%s" % (file_path, file_name, ext)
        open(file, "wb").write(ir.content)


