#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Time: 2019/4/16  21:05
# @Author: 余浪人
# @email: yulangren520@gmail.com
from apps.models.collector_model import Collector_Model
from apps.spiders.spider import Xpath_rule, Re_rule
import time


# obj_s = Collector_Model.query.filter(Collector_Model.tim_acquisition != 0).all()  # 所有自动采集(采集对象体)

def mission(obj):
    url = obj.URL
    encode_code = 'utf_8' if (obj.encode_code == 1) else 'gbk' if (obj.encode_code == 2) else 'big5'
    if obj.URL_create and obj.create_is_range:  # 是否批量采集
        start, end = obj.create_range.split('|')[0], obj.create_range.split('|')[-1]
        if obj.url_is_desc:  # 是否倒序采集
            url = [obj.URL_create.replace('{$ID}', str(i)) for i in range(int(end) + 1, int(start), -1)]
        else:
            url = [obj.URL_create.replace('{$ID}', str(i)) for i in range(int(start), int(end) + 1)]
    set_time = obj.set_time if (obj.set_time > 0) else 1  # 自动采集时间(秒)
    if obj.url_relative and obj.rule_cls == 1:  # 相对地址
        """ 创建爬虫对象"""
        spider = Xpath_rule(obj.class_lD, obj.list_rule, obj.URL_rule, obj.img_url_rule, obj.title_rule,
                            obj.content_rule, obj.restrictive, set_time,
                            img_save_path=obj.img_save_path, keys_rule=obj.keys_rule, time_rule=obj.time_rule,
                            author_rule=obj.author_rule, source_rule=obj.source_rule, goal_url=obj.goal_url,
                            url_relative=obj.url_relative, replace_content=obj.replace_content, encode_code=encode_code)
    elif not obj.url_relative and obj.rule_cls == 1:
        spider = Xpath_rule(obj.class_lD, obj.list_rule, obj.URL_rule, obj.img_url_rule, obj.title_rule,
                            obj.content_rule, obj.restrictive, set_time,
                            img_save_path=obj.img_save_path, keys_rule=obj.keys_rule, time_rule=obj.time_rule,
                            author_rule=obj.author_rule, source_rule=obj.source_rule,
                            replace_content=obj.replace_content, encode_code=encode_code)
    elif obj.url_relative and obj.rule_cls == 2:
        spider = Re_rule(obj.class_lD, obj.list_rule, obj.URL_rule, obj.img_url_rule, obj.title_rule,
                         obj.content_rule, obj.restrictive, set_time,
                         img_save_path=obj.img_save_path, keys_rule=obj.keys_rule, time_rule=obj.time_rule,
                         author_rule=obj.author_rule, source_rule=obj.source_rule, goal_url=obj.goal_url,
                         url_relative=obj.url_relative, replace_content=obj.replace_content, encode_code=encode_code)
    elif not obj.url_relative and obj.rule_cls == 2:
        spider = Re_rule(obj.class_lD, obj.list_rule, obj.URL_rule, obj.img_url_rule, obj.title_rule,
                         obj.content_rule, obj.restrictive, set_time,
                         img_save_path=obj.img_save_path, keys_rule=obj.keys_rule, time_rule=obj.time_rule,
                         author_rule=obj.author_rule, source_rule=obj.source_rule,
                         replace_content=obj.replace_content, encode_code=encode_code)
    return spider.start_requests(url_data=url)  # 调用蜘蛛抓取 没有返回值


def test_spider(obj_s):
    # while True:
    for obj in obj_s:  #TODO 爬虫调度  待调试
        pass
        # if obj.tim_acquisition and obj.is_start: mission(obj)  # 开始采集
        # time.sleep(3600)  # 2小时调一次
