'''
0xdF zoomEye Search
'''
from time import sleep
import requests
import json
import urllib.request
from urllib import parse
import os
from concurrent import futures
from requests.exceptions import RequestException
from multiprocessing import Pool

ips1 = []
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:104.0) Gecko/20100101 Firefox/104.0'}


def is_200(ip):
    line = str(ip).split("|")
    url = str(line[0]) + "://" + str(line[1]) + ":" + str(line[2]) + "/"
    try:
        requests.adapters.DEFAULT_RETRIES = 5  # 增加重连次数
        s = requests.session()
        s.keep_alive = False  # 关闭多余连接
        status = s.get(url, timeout=5, headers=headers).status
    except requests.exceptions.SSLError:
        status = "200"
    except Exception as e:
        if "status" in str(e):
            status = "200"
        else:
            status = "error"
    print(str(status), end="|")
    for i in line:
        if len(i.split(".")) == 4:
            print(i, end=":")
        else:
            print(i, end="|")
    print()
    with open('ip.txt', 'a', encoding='utf-8') as f:
        f.write(str(url + "," + str(status)) + '\n')
        f.close()


def search(endpage, search_info, headers):
    page = 0
    while page < int(endpage):
        page = page + 1
        sleep(3)
        search_url = 'https://www.zoomeye.org/search?q=' + parse.quote(search_info) + "&page=" + str(
            page) + "&pageSize=50&t=v4%2Bv6%2Bweb"
        print("正在采集:" + search_url)
        r = requests.get(url=search_url, headers=headers)
        datas = json.loads(r.text, strict=False)
        try:
            matches = datas['matches']
        except:
            print("已全部爬取！")
            break
        for line in matches:
            ip = line['ip']
            try:
                site = line['site']
            except:
                site = 'NONE'
            try:
                port = line['portinfo']['port']  # 端口
            except:
                port = "80"
            try:
                title = line['title']  # 标题
            except:
                try:
                    title = line['portinfo']['title']  # 标题
                except:
                    title = "NONE"
            try:
                country = line['geoinfo']['country']['code']  # 国家
            except:
                country = "NONE"
            try:
                isp = line['geoinfo']['isp']
            except:
                isp = "NONE"
            try:
                timestamp = line['timestamp']
            except:
                timestamp = "NONE"
            try:
                webapp = line['webapp'][0]['name']
            except:
                webapp = "NONE"
            try:
                service = line['portinfo']['service']
                if "https" in service:
                    service = "https"
                else:
                    service = "http"
            except:
                service = "http"
            res = str(service) + "|" + str(ip) + "|" + str(port) + "|" + site + "|" + str(title) + "|" + str(
                country) + "|" + str(isp) + "|" + str(webapp) + "|" + str(timestamp)
            ips1.append(res)
        print("采集完毕 第" + str(page) + "页")
    pool = Pool()
    pool.map(is_200, ips1)


if __name__ == "__main__":
    search_info = input("请输入搜索标题:")
    end_page = input("请输入收集页数")
    headers = {
        'Host': 'www.zoomeye.org',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:104.0) Gecko/20100101 Firefox/104.0',
        'Accept': 'application/json, text/plain, */*',
        'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
        'Accept-Encoding': 'gzip, deflate',
        'Cube-Authorization': '',
        'Connection': 'close',
        'Referer': 'https://www.zoomeye.org/',
        'Cookie': ''
        'Sec-Fetch-Dest': 'empty',
        'Sec-Fetch-Mode': 'cors',
        'Sec-Fetch-Site': 'same-origin',
    }
    search(str(end_page), search_info, headers)
