import re
import time
import os, sys, json
from typing import List, Any

import requests
from bs4 import BeautifulSoup

# ----------------------------------------------------------
target = 'http://jx2.xoyo.com'
logg = False
# ----------------------------------------------------------
DEPTH = 10
CAPA = 1000
depth = 0
capa = 0
# ----------------------------------------------------------
headers = {
    'User-Agent': 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)'
}
result = {
    'detail': [],
    'date': '',
    'logs': [],
    'index': ''
}
logs = []
# ----------------------------------------------------------
time1 = 0
time_1 = 0
time2 = 0
time_2 = 0

# ----------------------------------------------------------
hashs: List[Any] = []
links: List[Any] = []
count = 0
track = 0
seedStore = []


# ----------------------------------------------------------
def init(target):
    if not re.search('http', target):
        target = 'http://' + target
    print('target', target)
    global seedStore, hashs, whileName
    seedStore = []
    hashs = []
    whileName = geneWhite(target)
    time1 = time.time()
    core(target)
    time_1 = round(time.time() - time1)

    result = {
        'base': {
            'target': target,
            'count': len(seedStore),
            'time': str(time_1) + ' s',
            'data': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
        },
        'detail': {
            'seeds': seedStore,
            'logs': logs
        }
    }

    # print()
    # print(result)
    return result


# ----------------------------------------------------------
def geneWhite(tar):
    target_arr = tar.split('://')[1].split('.')
    if len(target_arr) > 2:
        wh = target_arr[0] + '.' + target_arr[1]
    else:
        wh = target_arr[0]

    return wh

    # whileName = geneWhite(target)


# ----------------------------------------------------------
def core(tar):
    global res
    try:
        res = requests.get(tar, timeout=3, headers=headers)
        res.encoding = 'utf-8'
        # print(res)
        if res.status_code == 200:
            # hash
            hashFlag = True
            for h in hashs:
                if hash(res) == h:
                    hashFlag = False
            if hashFlag:
                hashs.append(hash(res))
                # dom
                soup = BeautifulSoup(res.text, 'html.parser')
                seeds = []
                doms = list(set(soup.select('a[href]')))
                for dom in doms:
                    # print(dom)
                    href = dom.get('href')
                    if re.search(whileName, href):
                        if not (re.search('http', href)):
                            href = 'http:' + href
                        newFlag = True
                        for seed in seeds:
                            if seed == href:
                                newFlag = False
                        for seed in seedStore:
                            if seed == href:
                                newFlag = False
                        if newFlag:
                            # print(href)
                            print('\rscaning：', href, end='', flush=True)
                            seeds.append(href)

                seedStore.extend(seeds)
                for seed in seeds:
                    core(seed)

    except requests.exceptions.RequestException as e:
        logs.append(str(e))
        # print('err', e)

# ----------------------------------------------------------
# init()
