# -*- coding: utf-8 -*-
# @time     : 2022/2/18 0018 16:52
# @Author   : mingy
# @File     : domain_search.py
# @Software : PyCharm

import sys
import re
import json
import time
import requests
import argparse
import contextlib
from urllib.parse import urlparse
from fake_useragent import UserAgent

# 修改为登录bing搜索引擎的cookie
bing_cookie = ""

# 修改为登录百度搜索引擎的cookie
baidu_cookie = ""

Accept = "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7"

# 修改为virustotal的apikey
vt_apikey = ""

ua = UserAgent()
if ua.random:
    User_Agent = ua.random
else:
    User_Agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 " \
             "Safari/537.36 Edg/98.0.1108.56"

def bing_search(site, page, proxy):
    Subdomain = []
    html = ""
    headers = {
        'User-Agent': User_Agent,
        'Cookie': bing_cookie
    }
    for p in range(int(page)):
        with contextlib.suppress(Exception):
            url = "https://www.bing.com/search?q=site%3A{0}&qs=n&form=QBRE&sp=-1&pq=site%3A{0}" \
                  "&sc=2-11&sk=&cvid=C1A7FC61462345B1A71F431E60467C43&toHttps=1" \
                  "&redig=3FEC4F2BE86247E8AE3BB965A62CD454&pn=2&first={1}1&FROM=PERE".format(site, p)
            html = requests.get(url, headers=headers, timeout=5, proxies=proxy).content.decode()
        # domains = re.findall('<a target="_blank" href="(.*?)"', html)
        domains = re.findall('<a class="tilk" target="_blank" href="(.*?)"', html)
        for url in domains:
            domain = urlparse(url).netloc
            Subdomain.append(domain)
        Subdomain = list(set(Subdomain))
    print(">>> Bing 搜索引擎获取子域名: {} 个 <<<".format(len(Subdomain)))
    return Subdomain

def baidu_search(site, page, proxy):  
    Subdomain = [] 
    html = ""
    headers = {  
        'Accept': Accept,  
        'User-Agent': User_Agent,  
        'Accept - Encoding': "gzip, deflate, br",  
        'Cookie': baidu_cookie  
    }  
    for p in range(int(page)):  
        try:
            url = "https://www.baidu.com/s?wd=site%3A{0}&pn={1}0&oq=site%3A{0}" \
                  "&tn=baiduhome_pg&ie=utf-8&rsv_idx=2&rsv_pq=d59fc7380000344c" \
                  "&rsv_t=38efmxGEvInEMk2hU6IhokqHGzr3WTIIPSDy2Kx%2FsmGphjpX6JSRFpfdGfHMYJkw3le%2B".format(site, p)
            html = requests.get(url, headers=headers, timeout=5, proxies=proxy).content.decode() 
        except Exception:  
            pass  
        domains = re.findall('<span class="c-color-gray" aria-hidden="true">(.*?)/</span>', html)  
        Subdomain.extend(domains)  
        Subdomain = list(set(Subdomain))
    print(f">>> Baidu 搜索引擎获取子域名: {len(Subdomain)} 个 <<<")
    return Subdomain


def vt_search(site, proxy):
    Subdomain = []
    html = ""
    header = {
        'x-apikey': f'{vt_apikey}'
    }
    try:
        url = "https://www.virustotal.com/api/v3/domains/" + site + "/subdomains?limit=100"
        html = requests.get(url, headers=header, timeout=5, proxies=proxy).content.decode()
    except Exception:  
        pass 
    jsons = json.loads(html)
    for i in range(len(jsons["data"])):
        domains = jsons["data"][i]["id"]
        Subdomain.append(domains)
        Subdomain = list(set(Subdomain))
    print(f'>>> Virustotal API获取子域名: {len(Subdomain)} 个 <<<')
    return Subdomain


def saveDomain(Subdomain, site):
    print(">>> 子域名搜索结果 >>>")
    filename = f"{site}.txt"
    for i in Subdomain:
        print(i)
        with open(filename, "a+") as f:
            f.write(i + "\n")
            f.close()
    print("<<< 子域名搜索完毕 <<<")
    print("<<< 保存子域名到文件: {} 成功 <<<".format(filename))

def main():
    parser = argparse.ArgumentParser(description=">>> Search Subdomain by mingy <<<", usage=f'python {sys.argv[0]} -t yijingsec.com -s baidu -pa 5 -pr 127.0.0.1:7891')
    parser.add_argument("-t", "--target", help="目标域名: yijingsec.com")
    parser.add_argument("-s", "--search", help="搜索引擎: baidu、bing、vt、all")
    parser.add_argument("-pa", "--page", help="搜索页数: 5")
    parser.add_argument("-pr", "--proxy", help="使用代理: 127.0.0.1:7891")
    if len(sys.argv) == 1:
        sys.argv.append('-h')
    args = parser.parse_args()

    if args.proxy:
        proxy = {
            'http': f'{args.proxy}',
            'https': f'{args.proxy}'
        }
    else:
        proxy = None
   
    if args.target:
        site = args.target
    else:
        print("请指定目标域名")
        print(f"Ex: python {sys.argv[0]} -t yijingsec.com")
        exit()
    
    page = args.page if args.page else 5

    if args.search == "bing":
        bing_subdomain = bing_search(site, page, proxy)
        saveDomain(bing_subdomain, site)
    elif args.search == "baidu":
        baidu_subdomain = baidu_search(site, page, proxy)
        saveDomain(baidu_subdomain, site)
    elif args.search == "vt":
        vt_subdomain = vt_search(site, proxy)
        saveDomain(vt_subdomain, site)
    elif args.search == "all":
        domain = baidu_search(site, page, proxy) + bing_search(site, page, proxy) + vt_search(site, proxy)
        Subdomain = list(set(domain))
        print(f">>> 去重后总共获取子域名： {len(Subdomain)} 个 <<<")
        saveDomain(Subdomain, site)
    else:
        print("请指定搜索引擎: baidu、bing、vt、all")
        print(f"Ex: python {sys.argv[0]} -t yijingsec.com -s baidu")
        exit()

if __name__ == '__main__':
    main()