#encoding:utf8

"""
 configuration for general spider
"""
import os

###################################### 数据库配置
# 数据库配置,如果为空,那么直接输出
urldb_config = {
    # "host": "http://ikan.space:11011",
    "host": "http://localhost:11011",
    "auth": "auth_code"
}

db_config = {
    # "host": "http://ikan.space:11021",
    "host": "http://localhost:11021",
    "auth": "auth_code"
}

opener_config = {
    # "host": "http://ikan.space:9001",
    "host": "http://localhost:9001",
    "auth": "auth_code"
}
######################################


###################################### 爬虫配置
# 记录source的名字
source = "spider"

# spider初始的链接
start_urls = [
    # r"",   # accept all
]


# 爬取得有效的url链接. (为空则匹配所有), match
re_valid_urls = [
    r'',  # 正则匹配所有
]


# 爬取的无效的url链接, search
re_invalid_urls = [
    r'[error|not\wfound]', #
    r'\.js$',
    r'\.css$'
]

# opener type when use remote url parser
opener = 'remote'
opener_type = 'r'


# log configuration
log_info = {
    "name": "genyanw",
    "path": os.path.join(os.path.dirname(os.path.abspath(__file__)), "./log/"),
    "filename": "run.log",
    "level": "debug",
    # "handlers": ['console'],
    # "formatter": "simple",
    # "backupCount": 30,
    # "interval": 1,
    # "when": "midnight",
    # "maxBytes": 10 * 1024 * 1024
}

# parser interval time
PARSER_INTERVAL = 0.1  # use human interval multiply coef

# Status Print INterval
PRINT_INTERVAL = 5
