#!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'wshu'
__version__ = '1.0'
"""
    ***********************************
    *  @filename : hf.py
    *  @Author : wshu
    *  @CodeDate : 2020/3/26 11:05
    *  @Software : PyCharm
    ***********************************
    v1.0/dev update: 【三轮车改装挖掘机】
    (1) Spider为主从分布式爬取策略，由master服务器提供url分发
    (2) 多台slave服务器进行网页提取功能，slave提取url，master处理解析
"""
# import re
# import scrapy
# import base64
# import logging
# from utils.db import Db
# from goose3 import Goose
# from goose3.text import StopWordsChinese
# from scrapy_splash import SplashRequest
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
from scrapy_redis.spiders import RedisCrawlSpider
# from logging import INFO, WARNING, DEBUG, ERROR
# from hfbot_v1.items import Hflink, Hfimg, Hfbad
from utils.utils import filter_domain, allow_d, physize, log_console, download_html

class HfSpider(RedisCrawlSpider):
    """
    使用scrapy-redis实现分布式爬虫
    todo: js加载(scrapy-splash)
    """
    name = 'hf'
    # redis key
    redis_key = 'hf:start_urls'

    # 规则
    rules = (
        Rule(LinkExtractor(
            allow=()), callback='parse_url', follow=True),  # 定制文本链接规则
        Rule(LinkExtractor(
            allow=(r'(http(s?):)([/|.|\w|\s|-])*\.(?:jpg|gif|png)'), tags=('img'), attrs=('src'),
                           deny_extensions=[], canonicalize=False,
                           unique=True), callback='parse_img', follow=True),  # 定制图片链接规则
    )

    def __init__(self, *args, **kwargs):
        domain = kwargs.pop('domain', '')
        self.allowed_domains = allow_d(domain)
        super(HfSpider, self).__init__(*args, **kwargs)


    def parse_url(self, response):
        """
        提取url，过滤正文
        """
        URL = response.url
        self.logger.info('链接: {}'.format(URL))
        with open('链接.txt', 'a+', encoding='utf-8') as fp:
            fp.write(URL + '\n')



    def parse_img(self, response):
        """
        提取图片，过滤图片
        """
        URL = response.url
        self.logger.info('图片: {}'.format(URL))
        with open('图片.txt', 'a+', encoding='utf-8') as fp:
            fp.write(URL + '\n')