import re
import time
import os, sys, json

import requests
from bs4 import BeautifulSoup

# -------------------------------------------
# 获取配置信息
try:
    with open(sys.path[0] + '/config.json') as config:
        Config = json.load(config)
        # print(Config)
        global roots, whites
        roots = Config['roots']
        whites = Config['whites']
        during = Config['during'] * 60 * 60
except:
    print('配置文件不存在,使用默认配置')
    roots = ['https://www.python.org', 'https://jx3.xoyo.com/index/', 'https://browniu.gitee.io']
    whites = ['https://jx3', 'https://www.python', 'https://browniu'],
    during = 5 * 60

# -------------------------------------------
# 全局变量
size = 50000
depth = 300
cycle = 1
countAll = 0
headers = {
    'User-Agent': 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)'
}
details: []
# -------------------------------------------
# 获取状态记录数据
try:
    with open('logs/log.json', 'r') as logR:
        logRobject = json.load(logR)
        cycle = logRobject['cycle']
        countAll = logRobject['count']
except:
    print('初始化')


# -------------------------------------------
# 访问核心程序（遍历转子）
def xsjDiscoverSeed(target):
    # 重试机制（避免因为访问状态异常终止主进程）
    try:
        res = requests.get(target, timeout=3, headers=headers)
        res.encoding = 'utf-8'
        if res.status_code == 200:
            # hash 查重
            newHash = True
            for has in pageHashs:
                if (hash(res) == has):
                    newHash = False
                    # print('copy hash')
            if newHash:
                pageHashs.append(hash(res))
                # 解析DOM
                soup = BeautifulSoup(res.text, "html.parser")
                # print(soup)
                global pageDep
                seeds = []
                index = 0
                # 采集目标
                doms = soup.find_all('a')
                # doms.append(soup.find_all('a'))
                # print(doms)

                for link in doms:
                    item = link.get('href')
                    # 初始化白名单参数
                    # 无差别输出
                    # print(item)
                    iswhite = False
                    # 遍历白名单
                    for wh in whites:
                        if isinstance(item, str) and re.match(wh, item):
                            iswhite = True
                    # 执行白名单
                    if iswhite:
                        # print(item)
                        if not (re.match('http', item)):
                            item = 'http:' + item
                            # print(item)
                        index += 1
                        # 判断容量
                        if index < size:
                            global pageNum
                            # 输出当前位置
                            # print('\r当前：', pageNum, item, end='', flush=True)
                            pageNum += 1
                            new = True
                            # 查重
                            for page in pages:
                                if page == item:
                                    new = False
                                    # print('发现重复哦')
                                    break
                            if new:
                                # print(item)
                                seeds.append(item)
                        else:
                            break
                # 收集合法种子
                pages.extend(seeds)
                # 代数增加
                pageDep += 1
                # 遍历下一代种子
                for seed in seeds:
                    if pageDep > depth:
                        break
                    else:
                        xsjDiscoverSeed(seed)
    except requests.exceptions.RequestException as e:
        # 异常上报
        logs.append({
            'error': ['error', e]
        })
        return


# -------------------------------------------
def init():
    # 定义全局变量
    global pages, pageHashs, pageNum, pageDep, logs, cycle, count
    logs = []

    # 启动时输出文本
    print('\r==')
    print('== 爬取启动 ============', cycle)
    print('-----------------------------')
    # 获取启动时间戳（用于统计本轮次耗时）
    firsttime = time.time()
    nexttime = firsttime + (10 * 60 * during)
    # 初始化遍历总量值
    count = 0
    # 遍历 目标列表
    for root in roots:
        # 初始化本轮次参数
        pages = []
        pageNum = 0
        pageDep = 0
        pageHashs = []
        # 获取转子启动时间戳（用于统计单转子耗时）
        beforetime = time.time()
        # 执行转子程序
        xsjDiscoverSeed(root)
        # 获取转子卸载时间戳（用于统计单转子耗时）
        aftertime = time.time()
        count += pageNum
        # 输出单目标信息
        print('')
        print('目标：', root)
        print('足迹：', pageNum, '有效量：', len(list(set(pages))))
        print('用时：', round(aftertime - beforetime), 's')
        print('-----------------------------')
    # 轮次参数自增
    cycle += 1
    # 获取卸载时间戳（用于统计本轮次耗时）
    lasttime = time.time()
    # 输出本轮次信息
    print('爬取完成 总耗时', round(lasttime - firsttime), 's', '总量', count)
    print('日期：', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    print('=============================')
    # 数据统计
    # 写入运行日志（追加）
    logWrite(
        '轮次：' + str(cycle) + ' | 耗时:' + str(round(lasttime - firsttime)) + 's | 爬取量：' + str(count) + ' | 异常：' + str(
            len(logs)) + ' | 日期：' + str(
            time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) + '\n',
        sys.path[0] + '/logs/log.txt',
        'a+'
    )
    # 统计数据上报（覆盖）
    with open('logs/log.json', 'w') as logRw:
        json.dump({'cycle': cycle, 'count': countAll + count}, logRw)
    # 异常上报（新建）
    if len(logs) > 0:
        logFile = sys.path[0] + '/logs/err/err_' + time.strftime('%Y%m%d_%H%M%S', time.localtime(time.time())) + '.txt'
        logWrite(logs, logFile, 'w')
        print('异常:', len(logs))
        print('详情请查看', logFile)
        print('=============================')

    # 轮次预报及执行
    waittime = round(nexttime - (lasttime - firsttime))
    for i in range(0, waittime):
        print('\r{0} 秒后进行下一次爬取'.format(waittime - i - 1), end='')
        time.sleep(1)
    # 循环执行(s)
    time.sleep(1)
    init()


# -------------------------------------------
# 文件读写
def logWrite(data, file, type):
    fo = open(file, type)
    fo.write(str(data))
    fo.close()


# -------------------------------------------
# 启动程序
init()
# -------------------------------------------
# python3 spider-py/spider.py
