# -*- coding: utf-8 -*-
import sys
import os
import requests
import re
import socket
import urllib3
from bs4 import BeautifulSoup
import time
import redis
from threading import Thread
import asyncio


"""
    介绍：1.使用Redis控制日志等级、将数据存于当前目录的data目录下.使用时请用Redis连接:   host='127.0.0.1', port=6379
                                                                            （可以再主函数中修改参数更改默认参数）                                                                         
         2.多线程爬取数据（网络访问）、数据存储（IO）、日志存储（IO），三者通过Redis类似栈的生产-消费模式实现异步
         3.并发协程网络访问
    注意：若输入路径，路径中不能有空格，会应该参数判断，如：C:\\Users\\Evan Ju\\OneDrive\\Desktop\\主机桌面\\test\\
                                                    会被判断为：C:\\Users\\Evan
         为解决问题，爬虫完毕需要手动退出，没有找到合适的参考变量，控制循环结束
    版本：1.0
    有疑问或者bug请联系我：13679620795,2857111062@qq.com,2590292@gmail.com
                                                    朱淳 Evan Ju
"""


# 定义全局变量
text = """
    spider.py -u url -d deep -f logfile -l loglevel(1-5) --concurrency number --key "HTML5"
    ●-u指定爬虫开始地址
    ●-d指定爬虫深度
    ●-f日志文件路径，可选参数，默认spider.log
    ●-l日志记录文件记录详细程度,数字越大记录越详细，可选参数,默认1，范围1~5
    ●--concurrency指定线程池大小，多线程爬取页面，可选参数,默认1
    ●--key 页面内的关键词,保存满足该关键词的网页，可选参数,默认为所有页面  
"""
url = "https://www.csdn.net/"
deep = '1'
logfile = os.getcwd() + "\\spider.log"
log_level = '1'
concurrency = '1'
key = "ALSDVCSRTTUYGDSS12235" # 如果关键词为该字符串，默认保存所有页面
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome\
    91.0.4472.114 Safari/537.36 Edg/91.0.864.59'
}
redis_kv = redis.Redis()
start_time = time.strftime(r"%Y-%m-%d %H-%M-%S", time.localtime())  # 用于文件命名
start = time.time()  # 用于计算运行时长
index = 0  # 用于计算爬取总数以及命名爬取文件
control_signal = 1  # 控制总体循环，以下三种信号为0时，程序终止
log_signal = 1  # 日志进程控制信号，如果完成变为0
access_signal = 1  # 获取地址内容控制信号，如果完成变为0
save_signal = 1  # 保存数据进程控制信号，如果完成变为0
loop = asyncio.get_event_loop()  # 进程池
data_signal = start_time + "data"  # 存取data数据的key
url_signal = start_time + "url"  # 存取url数据的key
result = ""


# 返回当前日期
def get_current():
    return time.strftime(r"%Y-%m-%d %H-%M-%S", time.localtime())


# 日志生成
def set_log(level, name, value):
    redis_kv.rpush(start_time + '_' + str(level), get_current() + ' ------ ' + name + ' ------ ' + value)


# redis连接
def connect_redis(host='127.0.0.1', port='6379'):
    global redis_kv
    pool = redis.ConnectionPool(host=host, port=port)
    redis_kv = redis.Redis(connection_pool=pool)
    set_log(1, 'redis', '已连接')


# 获取redis存储值，转换为utf-8编码，返回value
def get_value_to_str(redis_key, encoding='utf-8'):
    return str(redis_kv.lpop(redis_key), encoding=encoding)



# 检查网址是否合法，并在一定程度上进行纠正
def inspect_url(url_slave):
    global url
    master_log = 0
    if url == url_slave:
        set_log(4, '网址检查', "根节点" + url_slave)
        master_log = 1
    if not re.match(r'^https?:/{2}\w.+$', url_slave):
        set_log(4, '网址检查', "网址输入不规范，正在修正" + url_slave)
        url_slave = 'http://' + url_slave
        try:
            if str(requests.get(url_slave, headers, allow_redirects=False).status_code) == "200":
                set_log(4, '网址检查', "修正成功" + url_slave)
                if master_log == 1:
                    url = url_slave
                return url_slave
            else:
                return 0
        except socket.gaierror:
            set_log(4, '网址检查', "修正失败，网址不合法或者无法访问，正在舍弃" + url_slave)
        except urllib3.exceptions.NewConnectionError:
            set_log(4, '网址检查', "修正失败，网址不合法或者无法访问，正在舍弃" + url_slave)
        except urllib3.exceptions.MaxRetryError:
            set_log(4, '网址检查', "修正失败，网址不合法或者无法访问，正在舍弃" + url_slave)
        except requests.exceptions.ConnectionError:
            set_log(4, '网址检查', "修正失败，网址不合法或者无法访问，正在舍弃" + url_slave)
    else:
        set_log(4, '网址检查', "网址合法" + url_slave)
        return url_slave


# 返回参数数据，判断参数是否符合标准，参数合法返回 1， 不合法返回 0
def get_parameter():
    global url
    global deep
    global logfile
    global log_level
    global concurrency
    global key
    # 获取输入参数
    parameter_list = sys.argv
    set_log(1, '参数', " ".join(parameter_list))
    try:
        # 将每个参数对应赋值，并且检查是否符合要求
        url = parameter_list[parameter_list.index('-u') + 1]
        deep = parameter_list[parameter_list.index('-d') + 1]
    except ValueError:
        set_log(1, '参数', "参数设置不全！或者错误，参数名请用小写、英文输入")
        set_log(1, '提示', "日志文件存于" + logfile)
        return 0
    except IndexError:
        set_log(1, '参数', "参数值缺失！")
        return 0
    # 可选参数判断
    try:
        if parameter_list[parameter_list.index('-f')] is not None:
            logfile = parameter_list[parameter_list.index('-f') + 1]
    except ValueError:
        pass
    try:
        if parameter_list[parameter_list.index('-l')] is not None:
            log_level = parameter_list[parameter_list.index('-l') + 1]
    except ValueError:
        pass
    try:
        if parameter_list[parameter_list.index('--concurrency')] is not None:
            concurrency = parameter_list[parameter_list.index('--concurrency') + 1]
    except ValueError:
        pass
    try:
        if parameter_list[parameter_list.index('--key')] is not None:
            key = parameter_list[parameter_list.index('--key') + 1]
    except ValueError:
        pass
    set_log(1, '参数', "可选参数设置: -f " + logfile + " -l " + log_level + " --concurrency " + concurrency + \
            " --key " + key)
    if log_level not in ['1', '2', '3', '4', '5', '1.1']:
        set_log(1, '参数', "详细程度 -l 范围在1~5的整数")
        return 0
    if inspect_url(url) == 0:
        set_log(1, '参数', "网址无效，请检查")
        return 0
    if not os.path.exists(logfile):
        (file_path, file_name) = os.path.split(logfile)
        set_log(1, '参数', "输入路径不存在，正在创建" + logfile)
        try:
            os.makedirs(file_path)
        except FileExistsError:
            pass
    return 1


# 获取网页全部信息文本
def get_text(url_slave):
    url_slave = inspect_url(url_slave)
    if url_slave == 0 or url_slave is None:
        set_log(1, '提示', "无法获取数据" + str(url_slave))
        return 0
    else:
        try:
            return requests.get(url_slave, headers, allow_redirects=False).content.decode('utf-8')
        except UnicodeDecodeError:
            try:
                return requests.get(url_slave, headers, allow_redirects=False).content.decode('gbk')
            except UnicodeDecodeError:
                set_log(1, '获取信息', "无法获取数据，无法解码" + str(url_slave))
                return 0
            else:
                set_log(1, '网络或者其它不可抗力原因', "无法获取数据，放弃该地址，记号【Error_Connect_Failed】" + str(url_slave))
                return 0


# 解析html文档为bs4对象
def analysis_html(html_slave):
    return BeautifulSoup(html_slave, 'lxml')


# 根据网址获取bs4对象
def get_bs(url_slave):
    html = get_text(url_slave)
    if html == 0:
        return 0
    else:
        return BeautifulSoup(html, 'lxml')


# 修正网址
def correct_url(url_slave):
    global url
    if re.match('\\//', url_slave):
        set_log(4, '修正网址', "网址不全，正在修正" + url_slave)
        url_slave = str(url_slave)[2:]
        set_log(5, '修正网址', "修正完成" + url_slave)
        return url_slave
    if re.match(r'^https?:/{2}\w.+$', url_slave):
        set_log(5, '修正网址', "网址正确，正在返回" + url_slave)
        return url_slave
    if re.match('\\/', url_slave):
        set_log(4, '修正网址', "网址不全，正在修正" + url_slave)
        set_log(5, '修正网址', "修正完成" + url + str(url_slave))
        return url + str(url_slave)
    if re.match('^[0-9a-zA-z].*', url_slave):
        set_log(4, '修正网址', "网址不全，正在修正" + url_slave)
        set_log(5, '修正网址', "修正完成" + url + '/' + url_slave)
        return url + '/' + url_slave
    return 'Error'


# 返回该网页下所有网址的数组, 若爬出的网址不规范，修改修正
def get_all_slave_url(bs4):
    url_list = []
    try:
        for i in bs4.find_all('a'):
            judge_url = correct_url(str(i.attrs['href']))
            # 规范格式并测试是否能访问网页
            if judge_url is not 'Error' and inspect_url(judge_url) != 0:
                url_list.append(judge_url)
                set_log(5, '获取网页', "获取网页内子网页" + judge_url)
    except KeyError:
        pass
    set_log(4, '获取网页', "获取网页内所有可访问子网页" + str(len(url_list)) + "个")
    return url_list


# 从Redis提取数据保存至文件
def save_data():
    global index
    global start_time
    global redis_kv
    global control_signal
    global save_signal
    global data_signal
    try:
        if not os.path.exists(os.getcwd() + "\\data"):
            set_log(3, '保存数据', "没有创建目录，正在创建" + os.getcwd() + "\\data")
            os.mkdir(os.getcwd() + "\\data")
        if not os.path.exists(os.getcwd() + "data\\" + start_time):
            set_log(3, '保存数据', "没有创建目录，正在创建" + os.getcwd() + "\\data\\" + start_time)
            os.mkdir(os.getcwd() + "\\data\\" + start_time)
    except FileExistsError:
        pass
    while control_signal:
        if len(redis_kv.lrange(data_signal, 0, -1)) > 0:
            set_log(4, '获取数据', "获取成功，正在保存...第" + str(index) + "个")
            save_signal = 1
            data = get_value_to_str(data_signal)
            set_log(5, "data消费", "data缓冲区消费1个data，data剩余个数：" + str(len(redis_kv.lrange(data_signal, 0, -1))))
            data_path = os.getcwd() + "\\data\\" + start_time + '\\' + str(index) + '.txt'
            set_log(3, '保存数据', "正在创建数据文件" + data_path)
            try:
                write = open(data_path, 'w')
                write.write(str(data))
                set_log(2, '保存数据', "保存成功数据文件" + data_path)
                write.close()
            except UnicodeEncodeError:
                write = open(data_path, 'w', encoding='utf-8')
                write.write(str(data))
                set_log(2, '保存数据', "保存成功数据文件" + data_path)
                write.close()
        else:
            time.sleep(0.01)


# 启动保存线程
def start_save():
    thread_log = Thread(target=save_data, args=())
    thread_log.setDaemon(True)
    set_log(5, '日志', '保存线程已打开' + str(thread_log))
    thread_log.start()


# 访问网页，保存当前节点信息，上交至Redis，可并发
async def access():
    global index
    global key
    global redis_kv
    global start_time
    global access_signal
    global data_signal
    global url_signal
    while control_signal:
        if len(redis_kv.lrange(url_signal, 0, -1)) > 0:
            access_signal = 1
            url_slave = get_value_to_str(url_signal)
            set_log(5, "url消费", "url缓冲区消费1条网址，url剩余个数：" + str(len(redis_kv.lrange(url_signal, 0, -1))))
            set_log(4, '获取网址', "正在提取该网页数据" + str(url_slave))
            index += 1
            data = get_text(url_slave)
            if key is "ALSDVCSRTTUYGDSS12235":
                redis_kv.rpush(data_signal, str(get_text(url_slave)))
                set_log(5, "data生产", "data缓冲区生产1个data，data剩余个数：" + str(len(redis_kv.lrange(data_signal, 0, -1))))
            elif key in data:
                redis_kv.rpush(data_signal, str(get_text(url_slave)))
                set_log(5, "data生产", "data缓冲区生产1个data，data剩余个数：" + str(len(redis_kv.lrange(data_signal, 0, -1))))
            else:
                set_log(4, '保存数据', url_slave + "该页面没有关键字" + key)
        else:
            access_signal = 0
            await asyncio.sleep(0.1)


# 启动访问线程
def start_access():
    thread_log = Thread(target=access, args=())
    thread_log.setDaemon(True)
    set_log(5, '日志', '启动线程已打开' + str(thread_log))
    thread_log.start()


# 日志保存，预先保留至Redis，持久化于选择目录
def log_save(data):
    global logfile
    write = open(logfile, 'a')
    write.write(data + "\n")
    write.close()
    return 1


# 日志打印控制
def log_print(sleep_time):
    global log_level
    global redis_kv
    global start_time
    global control_signal
    global log_signal
    global access_signal
    global save_signal
    global loop
    while control_signal:
        for level in range(int(log_level)):
            if len(redis_kv.lrange(start_time + '_' + str(level + 1), 0, -1)) > 0:
                log = get_value_to_str(start_time + '_' + str(level + 1))
                print(log)
                time.sleep(sleep_time)
                log_save(log)


# 启动日志线程
def start_log(sleep_time):
    global log_level
    global redis_kv
    global start_time
    thread_log = Thread(target=log_print, args=(sleep_time, ))
    thread_log.setDaemon(True)
    set_log(5, '日志', '日志线程已打开' + str(thread_log))
    thread_log.start()


# 深度优先的查询算法,将地址传到Redis队列中
def select_content(url_slave, deep_slave):
    global index
    global deep
    global redis_kv
    global start_time
    global url_signal
    deep_slave = int(deep_slave)
    # 将地址传到Redis队列中
    redis_kv.rpush(url_signal, str(url_slave))
    set_log(5, "url生产", "生产1条网址，放入url缓冲区，url剩余个数：" + str(len(redis_kv.lrange(url_signal, 0, -1))))
    set_log(3, '访问网页', str(url_slave))
    deep_slave -= 1
    url_slaves = []
    # 如果该节点有子节点从左往右遍历子节点
    if deep_slave > 0:
        bs = get_bs(url_slave)
        if bs == 0:
            set_log(5, "解析数据", "获取数据失败：" + url_slave)
        else:
            url_slaves = get_all_slave_url(bs)
        if len(url_slaves) > 0:
            for u in url_slaves:
                select_content(u, deep_slave)
    else:
        set_log(5, '遍历', '深度遍历完毕，已达到' + deep)


# 查询线程启动
def start_select(url_slave, deep_slave):
    thread_log = Thread(target=select_content, args=(url_slave, deep_slave,))
    thread_log.setDaemon(True)
    set_log(5, '查询', '查询线程已打开' + str(thread_log))
    thread_log.start()
    return thread_log


# 删除这次爬虫Redis生成主键，该功能可能会报错...如果key的value已经被全部取出了
def delete_redis_key():
    try:
        redis_kv.delete(*redis_kv.keys(pattern='*' + start_time + '*'))
    except redis.exceptions.ResponseError:
        pass


# 等待日志打印至控制台以及保存，结束程序
def wait_log_and_end():
    global log_level
    for i in range(int(log_level)):
        while len(redis_kv.lrange(start_time + '_' + str(i + 1), 0, -1)) > 0:
            time.sleep(0.001)
        sys.exit(0)


def close():
    global access_signal
    global save_signal
    global control_signal
    while control_signal:
        pass
    os.exit()


# 关闭线程启动
def start_close():
    thread_log = Thread(target=close, args=())
    thread_log.setDaemon(True)
    set_log(5, '查询', '查询线程已打开' + str(thread_log))
    thread_log.start()


async def task_select():
    global url
    global deep
    await select_content(url, int(deep))


async def task_access():
    await access()


async def task_save_data():
    await save_data()


# 异步协程，并发访问
async def thread_tasks():
    global url
    global deep
    global concurrency
    global loop
    tasks_list = []
    for i in range(int(concurrency)):
        tasks_list.append(asyncio.gather(task_access()))
    await asyncio.wait(tasks_list)

if __name__ == '__main__':
    # 连接redis
    connect_redis()
    # 判断参数是否正确
    is_start = get_parameter()
    # 关闭警告，防止证书认证失败导致爬虫中断
    requests.packages.urllib3.disable_warnings()
    # # 日志打印启动
    start_log(0.01)
    # 判断参数有误，关闭程序
    if is_start == 0:
        # 等待日志打印至控制台以及保存
        wait_log_and_end()
    # 查询启动
    select_status = start_select(url, deep)
    # 保存启动
    start_save()
    # 爬虫开始
    asyncio.run(thread_tasks())








