import re

import requests
from bs4 import BeautifulSoup
from urllib import parse

import config
# 导入需要的模块
from modules import sql_injection

# 全局变量，用于存放爬取的url，TODO:定义到调度模块中
SIMILAR_SET = set()  # 用于判重的集合


class Crawler:
    def __init__(self, target, depth=2, login_info=None, inner_session=requests.session(), patterns=None):
        """

        :param target:
        :param depth: 最大爬取深度,
        :param login_info: 登录信息，格式为{'url',{'user':'id'...}}
        :param inner_session: 可直接传入session登录
        :param patterns: 去重的正则匹配表达式列表
        """
        self.target = target
        self.max_depth = depth
        self.depth = 1  # 当前已经爬取的深度
        self.scanList = [target]  # 等待下次扫描的url
        self.url_struct = parse.urlparse(target)
        self.session = inner_session  #
        self.patterns = patterns
        self.queue = []
        self.queue.append(target)
        if login_info is not None:
            try:
                payload = login_info['payload'].copy()
                r = self.session.get(login_info['url'])
                # 获取token
                token = BeautifulSoup(r.content, 'html.parser').find('input', type='hidden')
                if token is not None:
                    payload[token.attrs.get('name')] = token.attrs.get('value')
                self.session.post(login_info['url'], data=payload)
            except Exception as e:
                print("\033[31m[x]登陆失败 \033[0m")

    def get_next_urls(self, s_url):
        """
        获取下一级的所有页面连接
        流程为：
            获取页面→寻找所有的<a>标签中的转跳->判重->加入到扫描队列和待爬取队列中
        """
        try:
            response = self.session.get(s_url)


        except:
            print("\033[31m [x]无法连接的页面 \033[0m")
            return
        if response.status_code != 404:
            html = response.text  # TODO:加入编码
            soup = BeautifulSoup(html, features='html.parser')
            tags = soup.find_all(href=True)
            for tag in tags:
                next_url = str(tag.get('href')).strip()
                next_url = parse.urljoin(s_url, next_url)
                flag = 1
                for exemption in config.crawler_exemptions:
                    if re.search(exemption, next_url):
                        flag = 0

                if self.url_similar_check(next_url) and parse.urlparse(next_url).netloc == self.url_struct.netloc and flag:
                    # url没有重复并且在相同的域名里
                    self.scanList.append(next_url)
                    self.queue.append(next_url)
                # else:
                #     print("重复或超出域", next_url)

    def url_similar_check(self, url):
        """
        URL重复
        当url路径和参数键值类似时，则判为重复
        TODO:相似性分析
        """
        global SIMILAR_SET
        url_hash = None
        if self.patterns is not None:
            for pattern in self.patterns:
                if re.match(pattern, url):
                    url_hash = hash(str(pattern))
                    break
        if url_hash is None:
            url_struct = parse.urlparse(url)
            query_set = url_struct.query.split('&')
            para = ''
            for q in query_set:
                para += q.split('=')[0]
            url_hash = hash(url_struct.path + para)
        if url_hash not in SIMILAR_SET:
            SIMILAR_SET.add(url_hash)
            return True
        return False

    def run(self):
        while self.depth <= self.max_depth and self.scanList:
            level_list = self.scanList
            self.scanList = []
            for url in level_list:
                self.get_next_urls(url)
            # print("结束一层！")
            # for i in QUEUE:
            #     # pass
            #     print(i)
            self.depth += 1


if __name__ == "__main__":

    # c = Crawler('http://pikachu:8000/vul/sqli/sqli.php')

    # 登录
    c = Crawler('http://43.143.198.167', login_info=config.login_info)

    # 正则表达式去重
    # c = Crawler('http://yhdm81.com', 1,patterns=[r'http://yhdm81.com/([a-z]+)/y?([0-9]+)'])

    # session = requests.session()
    # login_payload = {
    #     "identity": "teacher",
    #     "username": "SuperTeacher",
    #     "pswd": "10086",
    #     "verify": "submit",
    # }
    # login = {'url': 'http://bwapp:8000/login.php', 'payload': login_payload}
    # r = session.get('http://43.143.198.167/user/login')
    # token = BeautifulSoup(r.content, 'html.parser').find('input', type='hidden')
    # if token is not None:
    #     login_payload[token.attrs.get('name')] = token.attrs.get('value')
    # r = session.post('http://43.143.198.167/user/login', data=login_payload)
    #
    # c = Crawler('http://43.143.198.167/', depth=3, inner_session=session)
    #
    c.run()
    print("爬取结果")
    for i in c.queue:
        # pass
        print(i)
