import requests
import importlib
import os

from myspider import settings as default_settings

try:
    user_settings = importlib.import_module(os.environ.get("MYSPIDER_SETTINGS"))
except:
    user_settings = default_settings

def get_settings(name):
    return getattr(user_settings, name, getattr(default_settings, name))



class MySpider:
    def __init__(self):
        self.sess = requests.Session()
        self.processors = []
        self.urls = []
        # 加载ITEM处理插件
        for processor in get_settings("ITEM_PROCESSORS"):
            module, object = processor.rsplit(".", 1)
            p_module = importlib.import_module(module)
            processor_cls = getattr(p_module, object)
            processor_ = processor_cls()
            processor_.open_spider(self)
            self.processors.append(processor_)

    @property
    def session(self):
        return self.sess

    def get_start_urls(self):
        raise NotImplementedError

    # 通过广度优先爬去策略采集网络数据
    def crawl(self):
        # 获得起始采集数据的URL与解析方法/函数
        self.urls.extend(self.get_start_urls())

        # 顺序爬去URL列表
        for url, parse_func in self.urls:
            response = self.sess.get(url)
            response.encoding = "gbk"
            response.raise_for_status()

            # 获得解析的数据集并确定新的URL集合，并把采集到的数据交给处理队列进行处理。
            for r in parse_func(response):
                if isinstance(r, dict):
                    for processor in self.processors:
                        r = processor.process(r)
                        if not r:
                            break
                else:
                    # 去重处理
                    if r not in self.urls:
                        self.urls.append(r)
