from urllib.parse import urlparse
import re
import logging
class Rule(object):
    # 封装rabbit信息 传参给spider
    def __init__(self,data):

        url = data.get('url')

        self.clue_id = data.get('clue_id')
        self.task_id = data.get('task_id')
        self.company_id = data.get('company_id')
        self.clue_name = data.get('clue_name')
        if not url.startswith('http://') and not url.startswith('https://'):
            url = 'http://{}/'.format(url)
        self.allow_url = urlparse(url).netloc + urlparse(url).path
        self.start_urls = [url,]
        self.allowed_domains = [re.sub(r'^www\.', '', urlparse(url).hostname)]

class WorkerRule(object):
    """
    {"url": "http://so.67.com/index.php?actionKey=search&c=all&dr=all&kw=%E3%80%8A%E7%9B%96%E4%B8%96%E8%8B%B1%E9%9B%84%E3%80%8B&method=searchResults&p=51",
    "clue_id": "914",
    "clue_name": null,
    "company_id": "187",
    "task_id": "181"}
    """
    def __init__(self, data):
        self.start_urls = [data.get('url'),]
        self.clue_id = data.get('clue_id')
        self.clue_name = data.get('clue_name')
        self.company_id = data.get('company_id')
        self.task_id = data.get('task_id')



