import time
import urllib.request
import threading
url = input("输入你要扫描到的网址：")
txt = input("输入字典(php.txt)：")
open_url = []
all_url = []
threads = []

def search_url(url, txt):
    with open(txt, 'r') as f:
        for each in f:
            each = each.replace('\n', '')
            urllist = url + each
            all_url.append(urllist)



def handle_url(urllist):
    print("查找：" + urllist + '\n')
    try:
        req = urllib.request.urlopen(urllist)
        if req.getcode() == 200:
            open_url.append(urllist)
        if req.getcode() == 301:
            open_url.append(urllist)
    except:
        pass


def main():
    search_url(url, txt)
    for each in all_url:
        t = threading.Thread(target=handle_url, args=(each,))
        threads.append(t)
        t.start()
    for t in threads:
        t.join()
    if open_url:
        print("扫描成功，网站存在的后台地址为：")
        for each in open_url:
            print("[+]" + each)
    else:
        print("没有扫描到网站后台,字典不够给力")


if __name__ == "__main__":
    while True:
        start = time.perf_counter()
        main()
        end = time.perf_counter()
        print("The function spend time is %.3f seconds" % (end - start))
        url = input("输入你要扫描到的网址：")
        txt = input("输入字典(php.txt)：")
        open_url = []
        all_url = []
        threads = []

