desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Sets IP information in colname_ipdata.'
def set_data(self, addr, force=False):
if ((not force) and (self.get_data(addr) is not None)): return for data in [self.globaldb.data.country_byip(addr), self.globaldb.data.as_byip(addr), self.globaldb.data.location_byip(addr)]: if (data is not None): self.db[self.colname_ipdata].update({'addr': addr}, {'$set': data}, upsert=True)
'Gets IP information in colname_ipdata.'
def get_data(self, addr):
data = self.find_one(self.colname_ipdata, {'addr': addr}) if (data is not None): del data['_id'] return data
'Initializes the data columns, and creates the default indexes.'
def init(self):
self.db[self.colname_geoip_country].drop() self.db[self.colname_geoip_as].drop() self.db[self.colname_geoip_city].drop() self.db[self.colname_country_codes].drop() self.db[self.colname_city_locations].drop() self.create_indexes()
'Initializes the "agent" columns, i.e., drops those columns and creates the default indexes.'
def init(self):
self.db[self.colname_agents].drop() self.db[self.colname_scans].drop() self.db[self.colname_masters].drop() self.create_indexes()
'The DB connection.'
@property def db(self):
try: return self._db except AttributeError: self._db = Graph(self.dburl) return self._db
'The tuple representing the database version'
@property def db_version(self):
try: return self._db_version except: self._db_version = self.db.neo4j_version return self._db_version
'Returns a WHERE clause (tuple (query, parameters)) from a single filter (no OR). Devs: `flt` **can** be set from an untrusted source.'
def _add_clause_from_filter(self, flt, mode='node'):
if (not flt): return None if (flt[0] in '-!~'): neg = True flt = flt[1:] else: neg = False array_mode = None len_mode = None if flt.startswith('ANY '): array_mode = 'ANY' flt = flt[4:] elif flt.startswith('ALL '): array_mode = 'ALL' flt = flt[4:] elif flt.startswith('ONE '): array_mode = 'SINGLE' flt = flt[4:] elif flt.startswith('NONE '): array_mode = 'NONE' flt = flt[5:] elif flt.startswith('LEN '): len_mode = 'LENGTH' flt = flt[4:] try: operator = self.operators_re.search(flt).group() except AttributeError: operator = None attr = flt else: (attr, value) = [elt.strip() for elt in flt.split(operator, 1)] value = utils.str2pyval(value) if (attr[0] in '@#'): qtype = attr[0] attr = attr[1:] else: qtype = '@' try: (elements, attr) = attr.rsplit('.', 1) if (elements == 'meta'): if (mode == 'edge'): elements = ['linkmeta'] self.meta_link = True elif (mode == 'node'): elements = ['srcmeta', 'dstmeta'] self.meta_src = True self.meta_dst = True elif (elements == 'src.meta'): elements = ['srcmeta'] self.meta_src = True elif (elements == 'dst.meta'): elements = ['dstmeta'] self.meta_dst = True else: elements = [elements] except ValueError: if (mode == 'node'): elements = ['src', 'dst'] elif (mode == 'edge'): elements = ['link'] else: raise ValueError() else: assert all((self.identifier.search(elt) for elt in elements)) assert self.identifier.search(attr) if (operator is None): if (qtype == '@'): return (('%s(%s)' % (('NOT ' if neg else ''), ' OR '.join((('EXISTS(`%s`.`%s`)' % (elt, attr)) for elt in elements)))), {}) if (qtype == '#'): identifier = self.nextid() return (('%s(%s)' % (('NOT ' if neg else ''), ' OR '.join((('{%s} IN labels(`%s`)' % (identifier, elt)) for elt in elements)))), {identifier: attr}) if (qtype == '@'): identifier = self.nextid() operator = self.operators[operator] clauses = [] for elt in elements: attr_expr = ('%s.%s' % tuple((cypher_escape(s) for s in (elt, attr)))) if (array_mode is not None): lval = 'x' elif (len_mode is not None): lval = ('%s(%s)' % (len_mode, attr_expr)) else: lval = attr_expr clause_part = ('%s %s {%s}' % (lval, operator, identifier)) if (array_mode is not None): if (array_mode in ['ALL', 'ANY', 'SINGLE']): prereq = ('LENGTH(%s) <> 0 AND' % attr_expr) elif (array_mode in ['NONE']): prereq = 'LENGTH(%s) = 0 OR' clause_part = ('%s %s(x IN %s WHERE %s)' % (prereq, array_mode, attr_expr, clause_part)) clauses.append(clause_part) clause = ' OR '.join(clauses) if neg: clause = ('%s OR NOT (%s)' % (' OR '.join((('NOT EXISTS(`%s`.`%s`)' % (elt, attr)) for elt in elements)), clause)) value = Neo4jDB.to_dbprop(attr, value) return (('%s' % clause), {identifier: value}) raise ValueError()
'ADD a WHERE clause from a node filter. Devs: `flt` **can** be set from an untrusted source.'
def add_clause_from_filter(self, flt, mode='node'):
(clauses, params) = ([], {}) for subflt in self._split_filter_or(flt): if subflt: (subclause, subparams) = self._add_clause_from_filter(subflt, mode=mode) clauses.append(subclause) params.update(subparams) return self.add_clause(('WHERE %s' % ' OR '.join((('(%s)' % clause) for clause in clauses))), **params)
'`size` is the number of inserts per commit and `retries` is the number of times to retry a failed transaction (when inserting concurrently for example). 0 is forever, 1 does not retry, 2 retries once, etc.'
def __init__(self, db, size=None, retries=0):
self.db = db self.queries = [] self.start_time = time.time() self.count = 0 self.commited_count = 0 self.size = (config.NEO4J_BATCH_SIZE if (size is None) else size) self.retries = retries
'Transforms (year, month, date, hour) into datetime.'
@staticmethod def _time_quad2date(time_quad):
return datetime(*time_quad)
'Transforms a neo4j returned by executing a query into an iterator of {src: <dict>, flow: <dict>, dst: <dict>}.'
@classmethod def cursor2json_iter(cls, cursor):
for (src, flow, dst) in cursor: for rec in [src, flow, dst]: cls._cleanup_record(rec) src_props = cls._get_props(src['elt'], src.get('meta')) src_ref = cls._get_ref(src['elt'], src_props) src_labels = cls._get_labels(src['elt'], src_props) src_node = cls._node2json(src_ref, src_labels, src_props) dst_props = cls._get_props(dst['elt'], dst.get('meta')) dst_ref = cls._get_ref(dst['elt'], dst_props) dst_labels = cls._get_labels(dst['elt'], dst_props) dst_node = cls._node2json(dst_ref, dst_labels, dst_props) flow_props = cls._get_props(flow['elt'], flow.get('meta')) flow_ref = cls._get_ref(flow['elt'], flow_props) flow_labels = cls._get_labels(flow['elt'], flow_props) flow_node = cls._edge2json(flow_ref, src_ref, dst_ref, flow_labels, flow_props) (yield {'src': src_node, 'dst': dst_node, 'flow': flow_node})
'Transforms a cursor of triplets of (node, edge, node) to a graph of hosts and flows. All the elements are of the form {elt: <neo4j element-like>, meta: [<list of metadata>]} This is an internal API that is very likely to change.'
@classmethod def cursor2json_graph(cls, cursor):
random.seed(0) g = {'nodes': [], 'edges': []} done = set() for row in cls.cursor2json_iter(cursor): for (node, typ) in ((row['src'], 'nodes'), (row['flow'], 'edges'), (row['dst'], 'nodes')): if (node['id'] not in done): g[typ].append(node) done.add(node['id']) return g
'Returns a dict of {flow: {time_in_day: count}} WARNING/FIXME: this mutates the query'
def flow_daily(self, query):
query.add_clause('WITH src.elt as src, link.elt as link, dst.elt as dst\nMATCH (link)-[:SEEN]->(t:Time)\nWITH src, link, dst, t, (t.time % 86400) as time_in_day\nWITH [link.proto, COALESCE(link.dport, link.type)] AS flow,\n time_in_day, COUNT(*) AS count\n') query.ret = 'RETURN flow, time_in_day, count' query.orderby = 'ORDER BY flow[0], flow[1], time_in_day' counts = self._cursor2flow_daily(self.run(query)) return counts
'Returns an iterator of: {fields: <fields>, count: <number of occurrence or sum of sumfields>, collected: <collected fields>}. WARNING/FIXME: this mutates the query'
def top(self, query, fields, collect=None, sumfields=None):
collect = (collect or []) sumfields = (sumfields or []) for flist in (fields, collect, sumfields): for i in range(len(flist)): if flist[i].startswith('link.'): flist[i] = flist[i].replace('flow.', 'link.') if ('.' not in flist[i]): flist[i] = ('link.%s' % flist[i]) flist[i] = '.'.join((cypher_escape(elt) for elt in flist[i].split('.'))) cy_fields = ('[%s]' % ', '.join(fields)) cy_collect = ('[%s]' % ', '.join(collect)) cy_sumfields = ('SUM(%s)' % ' + '.join(sumfields)) query.add_clause(('WITH src.elt as src, link.elt as link, dst.elt as dst\nWITH %s as fields, %s as count, %s as collected' % (cy_fields, ('COUNT(*)' if (not sumfields) else cy_sumfields), ('NULL' if (not collect) else ('COLLECT(DISTINCT %s)' % cy_collect))))) query.ret = 'RETURN fields, count, collected' query.orderby = 'ORDER BY count DESC' top = self._cursor2top(self.run(query)) return top
'Cleanup mistakes when predicting client/server ports'
def cleanup_flows(self):
self._cleanup_phase1() self._cleanup_phase2() self._sanity_check()
'Subclasses can override this method to generate the CSV line from the original line.'
@staticmethod def fixline(line):
return line
'The DB connection.'
@property def db(self):
try: return self._db except AttributeError: self._db = create_engine(self.dburl, echo=config.DEBUG_DB) return self._db
'Filters records by their ObjectID. `oid` can be a single or many (as a list or any iterable) object ID(s), specified as strings or an `ObjectID`s.'
@classmethod def searchobjectid(cls, oid, neg=False):
if isinstance(oid, (int, basestring)): oid = [int(oid)] else: oid = [int(oid) for oid in oid] return cls._searchobjectid(oid, neg=neg)
'Filters (if `neg` == True, filters out) one particular host (IP address).'
@classmethod def searchhost(cls, addr, neg=False):
if neg: return (Host.addr != cls.convert_ip(addr)) return (Host.addr == cls.convert_ip(addr))
'This method produces a generator of distinct values for a given field.'
def distinct(self, field, flt=None, sort=None, limit=None, skip=None, **kargs):
if isinstance(field, basestring): field = self.fields[field] if (flt is None): flt = self.flt_empty sort = [((self.fields[key] if isinstance(key, basestring) else key), way) for (key, way) in (sort or [])] req = self._distinct_req(field, flt, **kargs) for (key, way) in sort: req = req.order_by((key if (way >= 0) else desc(key))) if (skip is not None): req = req.offset(skip) if (limit is not None): req = req.limit(limit) return (next(iter(viewvalues(res))) for res in self.db.execute(req))
'Returns a condition that is true iff all of the given conditions is true.'
@classmethod def flt_and(cls, *args):
return reduce(cls._flt_and, args)
'Returns a condition that is true iff any of the given conditions is true.'
@classmethod def flt_or(cls, *args):
return reduce(cls._flt_or, args)
'`size` is the number of inserts per commit and `retries` is the number of times to retry a failed transaction (when inserting concurrently for example). 0 is forever, 1 does not retry, 2 retries once, etc.'
def __init__(self, db, size=None, retries=0):
self.db = db self.start_time = time.time() self.commited_counts = {} self.size = (config.POSTGRES_BATCH_SIZE if (size is None) else size) self.retries = retries self.conn = db.connect() self.trans = self.conn.begin() self.queries = {}
'Returns an iterator of: {fields: <fields>, count: <number of occurrence or sum of sumfields>, collected: <collected fields>}.'
def top(self, query, fields, collect=None, sumfields=None):
raise NotImplementedError()
'Country database has been dropped in favor of Location/City'
def feed_geoip_country(self, *_, **__):
pass
'Returns a generator of every (start, stop) IP ranges for a country given its ISO-3166-1 "alpha-2" code or its name.'
def ipranges_bycountry(self, code):
if (len(code) != 2): return self.db.execute(select([Location_Range.start, Location_Range.stop]).select_from(join(join(Location, Location_Range), Country)).where((Country.name == code))) return self.db.execute(select([Location_Range.start, Location_Range.stop]).select_from(join(Location, Location_Range)).where((Location.country_code == code)))
'Returns a generator of every (start, stop) IP ranges for an Autonomous System given its number or its name.'
def ipranges_byas(self, asnum):
if isinstance(asnum, basestring): try: if asnum.startswith('AS'): asnum = int(asnum[2:]) else: asnum = int(asnum) except ValueError: return self.db.execute(select([AS_Range.start, AS_Range.stop]).select_from(join(AS, AS_Range)).where((AS.name == asnum))) return self.db.execute(select([AS_Range.start, AS_Range.stop]).where((AS_Range.aut_sys == asnum)))
'Backend-specific subclasses may use this method to create some bulk insert structures.'
def start_store_hosts(self):
self.bulk = self.start_bulk_insert()
'Backend-specific subclasses may use this method to commit bulk insert structures.'
def stop_store_hosts(self):
self.bulk.close() self.bulk = None
'Removes the host scan result. "host" must be a record as yielded by .get() or a valid NmapFilter() instance. The scan files that are no longer linked to a scan are removed at the end of the call.'
def remove(self, host, archive=False):
if isinstance(host, dict): base = [host['_id']] else: base = host.query(select([Scan.id]), archive=archive).cte('base') self.db.execute(delete(Scan).where(Scan.id.in_(base))) base = select([Association_Scan_ScanFile.scan_file]).cte('base') self.db.execute(delete(ScanFile).where(ScanFile.sha256.notin_(base)))
'This method makes use of the aggregation framework to produce top values for a given field or pseudo-field. Pseudo-fields are: - category / label / asnum / country / net[:mask] - port - port:open / :closed / :filtered / :<servicename> - portlist:open / :closed / :filtered - countports:open / :closed / :filtered - service / service:<portnbr> - product / product:<portnbr> - cpe / cpe.<part> / cpe:<cpe_spec> / cpe.<part>:<cpe_spec> - devicetype / devicetype:<portnbr> - script:<scriptid> / script:<port>:<scriptid> / script:host:<scriptid> - cert.* / smb.* / sshkey.* - modbus.* / s7.* / enip.* - mongo.dbs.* - vulns.* - screenwords - file.* / file.*:scriptid - hop'
def topvalues(self, field, flt=None, topnbr=10, sort=None, limit=None, skip=None, least=False, archive=False):
if (flt is None): flt = NmapFilter() base = flt.query(select([Scan.id]).select_from(flt.select_from), archive=archive).cte('base') order = ('count' if least else desc('count')) outputproc = None if (field == 'port'): field = (Port, [Port.protocol, Port.port], (Port.state == 'open')) elif (field == 'ttl'): field = (Port, [Port.state_reason_ttl], (Port.state_reason_ttl != None)) elif (field == 'ttlinit'): field = (Port, [func.least(255, func.power(2, func.ceil(func.log(2, Port.state_reason_ttl))))], (Port.state_reason_ttl != None)) outputproc = int elif field.startswith('port:'): info = field[5:] field = (Port, [Port.protocol, Port.port], ((Port.state == info) if (info in set(['open', 'filtered', 'closed', 'open|filtered'])) else (Port.service_name == info))) elif field.startswith('countports:'): info = field[11:] return ({'count': result[0], '_id': result[1]} for result in self.db.execute(select([func.count().label('count'), column('cnt')]).select_from(select([func.count().label('cnt')]).select_from(Port).where(and_((Port.state == info), exists(select([1]).select_from(base).where((Port.scan == base.c.id))))).group_by(Port.scan).alias('cnt')).group_by('cnt').order_by(order).limit(topnbr))) elif field.startswith('portlist:'): info = field[9:] return ({'count': result[0], '_id': [(proto, int(port)) for (proto, port) in (elt.split(',') for elt in result[1][3:(-3)].split(')","('))]} for result in self.db.execute(select([func.count().label('count'), column('ports')]).select_from(select([func.array_agg(postgresql.aggregate_order_by(tuple_(Port.protocol, Port.port).label('a'), tuple_(Port.protocol, Port.port).label('a'))).label('ports')]).where(and_((Port.state == info), Port.scan.in_(base))).group_by(Port.scan).alias('ports')).group_by('ports').order_by(order).limit(topnbr))) elif (field == 'service'): field = (Port, [Port.service_name], (Port.state == 'open')) elif field.startswith('service:'): info = field[8:] if ('/' in info): info = info.split('/', 1) field = (Port, [Port.service_name], and_((Port.protocol == info[0]), (Port.port == int(info[1])))) else: field = (Port, [Port.service_name], (Port.port == int(info))) elif (field == 'product'): field = (Port, [Port.service_name, Port.service_product], (Port.state == 'open')) elif field.startswith('product:'): info = field[8:] if info.isdigit(): info = int(info) flt = self.flt_and(flt, self.searchport(info)) field = (Port, [Port.service_name, Port.service_product], and_((Port.state == 'open'), (Port.port == info))) elif (info.startswith('tcp/') or info.startswith('udp/')): info = (info[:3], int(info[4:])) flt = self.flt_and(flt, self.searchport(info[1], protocol=info[0])) field = (Port, [Port.service_name, Port.service_product], and_((Port.state == 'open'), (Port.port == info[1]), (Port.protocol == info[0]))) else: flt = self.flt_and(flt, self.searchservice(info)) field = (Port, [Port.service_name, Port.service_product], and_((Port.state == 'open'), (Port.service_name == info))) elif (field == 'devicetype'): field = (Port, [Port.service_devicetype], (Port.state == 'open')) elif field.startswith('devicetype:'): info = field[11:] if info.isdigit(): info = int(info) flt = self.flt_and(flt, self.searchport(info)) field = (Port, [Port.service_devicetype], and_((Port.state == 'open'), (Port.port == info))) elif (info.startswith('tcp/') or info.startswith('udp/')): info = (info[:3], int(info[4:])) flt = self.flt_and(flt, self.searchport(info[1], protocol=info[0])) field = (Port, [Port.service_devicetype], and_((Port.state == 'open'), (Port.port == info[1]), (Port.protocol == info[0]))) else: flt = self.flt_and(flt, self.searchservice(info)) field = (Port, [Port.service_devicetype], and_((Port.state == 'open'), (Port.service_name == info))) elif (field == 'version'): field = (Port, [Port.service_name, Port.service_product, Port.service_version], (Port.state == 'open')) elif field.startswith('version:'): info = field[8:] if info.isdigit(): info = int(info) flt = self.flt_and(flt, self.searchport(info)) field = (Port, [Port.service_name, Port.service_product, Port.service_version], and_((Port.state == 'open'), (Port.port == info))) elif (info.startswith('tcp/') or info.startswith('udp/')): info = (info[:3], int(info[4:])) flt = self.flt_and(flt, self.searchport(info[1], protocol=info[0])) field = (Port, [Port.service_name, Port.service_product, Port.service_version], and_((Port.state == 'open'), (Port.port == info[1]), (Port.protocol == info[0]))) elif (':' in info): info = info.split(':', 1) flt = self.flt_and(flt, self.searchproduct(info[1], service=info[0])) field = (Port, [Port.service_name, Port.service_product, Port.service_version], and_((Port.state == 'open'), (Port.service_name == info[0]), (Port.service_product == info[1]))) else: flt = self.flt_and(flt, self.searchservice(info)) field = (Port, [Port.service_name, Port.service_product, Port.service_version], and_((Port.state == 'open'), (Port.service_name == info))) elif (field == 'asnum'): field = (Scan, [Scan.info['as_num']], None) elif (field == 'as'): field = (Scan, [Scan.info['as_num'], Scan.info['as_name']], None) elif (field == 'country'): field = (Scan, [Scan.info['country_code'], Scan.info['country_name']], None) elif (field == 'city'): field = (Scan, [Scan.info['country_code'], Scan.info['city']], None) elif ((field == 'net') or field.startswith('net:')): info = field[4:] info = (int(info) if info else 24) field = (Scan, [func.set_masklen(text('scan.addr::cidr'), info)], None) elif ((field == 'script') or field.startswith('script:')): info = field[7:] if info: field = (Script, [Script.output], (Script.name == info)) else: field = (Script, [Script.name], None) elif (field in ['category', 'categories']): field = (Category, [Category.name], None) elif (field == 'source'): field = (Scan, [Scan.source], None) elif (field == 'domains'): field = (Hostname, [func.unnest(Hostname.domains)], None) elif field.startswith('domains:'): level = (int(field[8:]) - 1) base1 = select([func.unnest(Hostname.domains).label('domains')]).where(exists(select([1]).select_from(base).where((Hostname.scan == base.c.id)))).cte('base1') return ({'count': result[1], '_id': result[0]} for result in self.db.execute(select([base1.c.domains, func.count().label('count')]).where(base1.c.domains.op('~')(('^([^\\.]+\\.){%d}[^\\.]+$' % level))).group_by(base1.c.domains).order_by(order).limit(topnbr))) elif (field == 'hop'): field = (Hop, [Hop.ipaddr], None) elif (field.startswith('hop') and (field[3] in ':>')): ttl = int(field[4:]) field = (Hop, [Hop.ipaddr], ((Hop.ttl > ttl) if (field[3] == '>') else (Hop.ttl == ttl))) elif ((field == 'file') or (field.startswith('file') and (field[4] in '.:'))): if field.startswith('file:'): scripts = field[5:] if ('.' in scripts): (scripts, field) = scripts.split('.', 1) else: field = 'filename' scripts = scripts.split(',') flt = ((Script.name == scripts[0]) if (len(scripts) == 1) else Script.name.in_(scripts)) else: field = (field[5:] or 'filename') flt = True field = (Script, [func.jsonb_array_elements(func.jsonb_array_elements(Script.data['ls']['volumes']).op('->')('files')).op('->>')(field).label(field)], and_(flt, Script.data.op('@>')('{"ls": {"volumes": [{"files": []}]}}'))) elif field.startswith('modbus.'): subfield = field[7:] field = (Script, [Script.data['modbus-discover'][subfield]], and_((Script.name == 'modbus-discover'), Script.data['modbus-discover'].has_key(subfield))) else: raise NotImplementedError() s_from = {Script: join(Script, Port), Port: Port, Category: join(Association_Scan_Category, Category), Hostname: Hostname, Hop: join(Trace, Hop)} where_clause = {Script: (Port.scan == base.c.id), Port: (Port.scan == base.c.id), Category: (Association_Scan_Category.scan == base.c.id), Hostname: (Hostname.scan == base.c.id), Hop: (Trace.scan == base.c.id)} if (field[0] == Scan): req = flt.query(select(([func.count().label('count')] + field[1])).select_from(Scan).group_by(*field[1]), archive=archive) else: req = select(([func.count().label('count')] + field[1])).select_from(s_from[field[0]]).group_by(*field[1]).where(exists(select([1]).select_from(base).where(where_clause[field[0]]))) if (field[2] is not None): req = req.where(field[2]) if (outputproc is None): return ({'count': result[0], '_id': (result[1:] if (len(result) > 2) else result[1])} for result in self.db.execute(req.order_by(order).limit(topnbr))) else: return ({'count': result[0], '_id': outputproc((result[1:] if (len(result) > 2) else result[1]))} for result in self.db.execute(req.order_by(order).limit(topnbr)))
'Filters (if `neg` == True, filters out) one particular host (IP address).'
@classmethod def searchhost(cls, addr, neg=False):
if neg: return NmapFilter(main=(Scan.addr != cls.convert_ip(addr))) return NmapFilter(main=(Scan.addr == cls.convert_ip(addr)))
'Filters (if `neg` == True, filters out) one particular country, or a list of countries.'
@classmethod def searchcountry(cls, country, neg=False):
country = utils.country_unalias(country) return NmapFilter(main=cls._searchstring_list(Scan.info['country_code'].astext, country, neg=neg))
'Filters (if `neg` == True, filters out) one particular city'
@classmethod def searchcity(cls, city, neg=False):
return NmapFilter(main=cls._searchstring_re(Scan.info['city'].astext, city, neg=neg))
'Filters (if `neg` == True, filters out) one or more particular AS number(s).'
@classmethod def searchasnum(cls, asnum, neg=False):
return NmapFilter(main=cls._searchstring_list(Scan.info['as_num'], asnum, neg=neg, map_=str))
'Filters (if `neg` == True, filters out) one or more particular AS.'
@classmethod def searchasname(cls, asname, neg=False):
return NmapFilter(main=cls._searchstring_re(Scan.info['as_name'].astext, asname, neg=neg))
'Filters (if `neg` == True, filters out) records with specified protocol/port at required state. Be aware that when a host has a lot of ports filtered or closed, it will not report all of them, but only a summary, and thus the filter might not work as expected. This filter will always work to find open ports.'
@staticmethod def searchport(port, protocol='tcp', state='open', neg=False):
if (port == 'host'): return NmapFilter(port=[(True, ((Port.port >= 0) if neg else (Port.port == (-1))))]) return NmapFilter(port=[((not neg), and_((Port.port == port), (Port.protocol == protocol), (Port.state == state)))])
'Filters records with at least one port other than those listed in `ports` with state `state`.'
@staticmethod def searchportsother(ports, protocol='tcp', state='open'):
return NmapFilter(port=[(True, and_(or_(Port.port.notin_(ports), (Port.protocol != protocol)), (Port.state == state)))])
'Filters records with open port number between minn and maxn'
@staticmethod def searchcountopenports(minn=None, maxn=None, neg=False):
assert ((minn is not None) or (maxn is not None)) req = select([column('scan')]).select_from(select([Port.scan.label('scan'), func.count().label('count')]).where((Port.state == 'open')).group_by(Port.scan).alias('pcnt')) if (minn == maxn): req = req.where((column('count') == minn)) else: if (minn is not None): req = req.where((column('count') >= minn)) if (maxn is not None): req = req.where((column('count') <= maxn)) return NmapFilter(main=(Scan.id.notin_(req) if neg else Scan.id.in_(req)))
'Filters records with at least one open port.'
@staticmethod def searchopenport(neg=False):
return NmapFilter(port=[((not neg), (Port.state == 'open'))])
'Search an open port with a particular service.'
@classmethod def searchservice(cls, srv, port=None, protocol=None):
req = cls._searchstring_re(Port.service_name, srv) if (port is not None): req = and_(req, (Port.port == port)) if (protocol is not None): req = and_(req, (Port.protocol == protocol)) return NmapFilter(port=[(True, req)])
'Search a port with a particular `product`. It is (much) better to provide the `service` name and/or `port` number since those fields are indexed.'
@classmethod def searchproduct(cls, product, version=None, service=None, port=None, protocol=None):
req = cls._searchstring_re(Port.service_product, product) if (version is not None): req = and_(req, cls._searchstring_re(Port.service_version, version)) if (service is not None): req = and_(req, cls._searchstring_re(Port.service_name, service)) if (port is not None): req = and_(req, (Port.port == port)) if (protocol is not None): req = and_(req, (Port.protocol == protocol)) return NmapFilter(port=[(True, req)])
'Search a particular content in the scripts results.'
@classmethod def searchscript(cls, name=None, output=None, values=None):
req = True if (name is not None): req = and_(req, cls._searchstring_re(Script.name, name, neg=False)) if (output is not None): req = and_(req, cls._searchstring_re(Script.output, output, neg=False)) if values: if (name is None): raise TypeError('.searchscript() needs a `name` arg when using a `values` arg') req = and_(req, Script.data.contains({xmlnmap.ALIASES_TABLE_ELEMS.get(name, name): values})) return NmapFilter(script=[req])
'Search shared files from a file name (either a string or a regexp), only from scripts using the "ls" NSE module.'
@classmethod def searchfile(cls, fname=None, scripts=None):
if (fname is None): req = Script.data.op('@>')('{"ls": {"volumes": [{"files": []}]}}') elif isinstance(fname, utils.REGEXP_T): base1 = select([Script.port, func.jsonb_array_elements(func.jsonb_array_elements(Script.data['ls']['volumes']).op('->')('files')).op('->>')('filename').label('filename')]).where(Script.data.op('@>')('{"ls": {"volumes": [{"files": []}]}}')).cte('base1') base2 = select([column('port')]).select_from(base1).where(column('filename').op(('~*' if (fname.flags & re.IGNORECASE) else '~'))(fname.pattern)).cte('base2') return NmapFilter(port=[(True, Port.id.in_(base2))]) else: req = Script.data.op('@>')(json.dumps({'ls': {'volumes': [{'files': [{'filename': fname}]}]}})) if (scripts is None): return NmapFilter(script=[req]) if isinstance(scripts, basestring): scripts = [scripts] if (len(scripts) == 1): return NmapFilter(script=[and_((Script.name == scripts.pop()), req)]) return NmapFilter(script=[and_(Script.name.in_(scripts), req)])
'Queries the passive database with the provided filter "flt", and returns a generator.'
def _get(self, flt, limit=None, skip=None, sort=None):
req = flt.query(select([Host.addr, Passive.sensor, Passive.count, Passive.firstseen, Passive.lastseen, Passive.info, Passive.port, Passive.recontype, Passive.source, Passive.targetval, Passive.value, Passive.moreinfo]).select_from(flt.select_from)) for (key, way) in (sort or []): req = req.order_by((key if (way >= 0) else desc(key))) if (skip is not None): req = req.offset(skip) if (limit is not None): req = req.limit(limit) return self.db.execute(req)
'Queries the passive database with the provided filter "flt", and returns the first result, or None if no result exists.'
def get_one(self, flt, skip=None):
return self._get(flt, limit=1, skip=skip).fetchone()
'Like `.insert_or_update()`, but `specs` parameter has to be an iterable of `(timestamp, spec)` (if `separated_timestamps` is True) or `spec` (if it is False) values. This will perform PostgreSQL COPY FROM inserts with the major drawback that the `getinfos` parameter will be called (if it is not `None`) for each spec, even when the spec already exists in the database and the call was hence unnecessary. It\'s up to you to decide whether having bulk insert is worth it or if you want to go with the regular `.insert_or_update()` method.'
def insert_or_update_bulk(self, specs, getinfos=None, separated_timestamps=True):
more_to_read = True tmp = self.create_tmp_table(Passive, extracols=[Column('addr', postgresql.INET), Column('context', String(32))]) if config.DEBUG_DB: total_upserted = 0 total_start_time = time.time() while more_to_read: if config.DEBUG_DB: start_time = time.time() with PassiveCSVFile(specs, self.get_context, tmp, getinfos=getinfos, separated_timestamps=separated_timestamps, limit=config.POSTGRES_BATCH_SIZE) as fdesc: self.copy_from(fdesc, tmp.name) more_to_read = fdesc.more_to_read if config.DEBUG_DB: count_upserted = fdesc.count self.db.execute(postgresql.insert(Context).from_select(['name'], select([column('context')]).select_from(tmp).where(tmp.columns['context'].isnot(null())).distinct(column('context'))).on_conflict_do_nothing()) insrt = postgresql.insert(Host) self.db.execute(insrt.from_select([column(col) for col in ['context', 'addr', 'firstseen', 'lastseen']], select([Context.id, column('addr'), func.min_(column('firstseen')), func.max_(column('lastseen'))]).select_from(join(Context, tmp, (Context.name == column('context')))).where(tmp.columns['addr'].isnot(null())).group_by(Context.id, column('addr'))).on_conflict_do_update(index_elements=['addr', 'context'], set_={'firstseen': func.least(Host.firstseen, insrt.excluded.firstseen), 'lastseen': func.greatest(Host.lastseen, insrt.excluded.lastseen)})) insrt = postgresql.insert(Passive) self.db.execute(insrt.from_select([column(col) for col in ['host', 'count', 'firstseen', 'lastseen', 'sensor', 'port', 'recontype', 'source', 'targetval', 'value', 'fullvalue', 'info', 'moreinfo']], select(([Host.id, func.sum_(tmp.columns['count']), func.min_(tmp.columns['firstseen']), func.max_(tmp.columns['lastseen'])] + [tmp.columns[col] for col in ['sensor', 'port', 'recontype', 'source', 'targetval', 'value', 'fullvalue', 'info', 'moreinfo']])).select_from(join(tmp, join(Context, Host), (((Context.name == tmp.columns['context']) | (Context.name.is_(null()) & tmp.columns['context'].is_(null()))) & ((Host.addr == tmp.columns['addr']) | (Host.addr.is_(null()) & tmp.columns['addr'].is_(null())))))).group_by(Host.id, *(tmp.columns[col] for col in ['sensor', 'port', 'recontype', 'source', 'targetval', 'value', 'fullvalue', 'info', 'moreinfo']))).on_conflict_do_update(index_elements=['host', 'sensor', 'recontype', 'port', 'source', 'value', 'targetval', 'info'], set_={'firstseen': func.least(Passive.firstseen, insrt.excluded.firstseen), 'lastseen': func.greatest(Passive.lastseen, insrt.excluded.lastseen), 'count': (Passive.count + insrt.excluded.count)})) self.db.execute(delete(tmp)) if config.DEBUG_DB: stop_time = time.time() time_spent = (stop_time - start_time) total_upserted += count_upserted total_time_spent = (stop_time - total_start_time) utils.LOGGER.debug('DB:PERFORMANCE STATS %s upserts, %f s, %s/s\n DCTB total: %s upserts, %f s, %s/s', utils.num2readable(count_upserted), time_spent, utils.num2readable((count_upserted / time_spent)), utils.num2readable(total_upserted), total_time_spent, utils.num2readable((total_upserted / total_time_spent)))
'This function uses a MongoDB backup file as a source to feed the passive table.'
def migrate_from_mongodb_backup(self, backupfdesc):
def _backupgen(fdesc): for line in fdesc: try: line = line.decode() except AttributeError: pass try: line = json.loads(line) except ValueError: utils.LOGGER.warning('ignoring line [%r]', line) continue try: del line['_id'] except KeyError: pass line.update(line.pop('infos', {})) line.update(line.pop('fullinfos', {})) for (key, value) in viewitems(line): if (isinstance(value, dict) and (len(value) == 1) and ('$numberLong' in value)): line[key] = int(value['$numberLong']) (yield line) self.insert_or_update_bulk(_backupgen(backupfdesc), getinfos=None, separated_timestamps=False)
'This method makes use of the aggregation framework to produce top values for a given field. If `distinct` is True (default), the top values are computed by distinct events. If it is False, they are computed based on the "count" field.'
def topvalues(self, field, flt=None, topnbr=10, sort=None, limit=None, skip=None, least=False, distinct=True):
if isinstance(field, basestring): field = self.fields[field] outputproc = None if (flt is None): flt = PassiveFilter() base = flt.query(select([Passive.id]).select_from(flt.select_from)).cte('base') order = ('count' if least else desc('count')) req = flt.query(select([(func.count() if distinct else func.sum(Passive.count)).label('count'), field]).select_from(flt.select_from).group_by(field)) if (outputproc is None): outputproc = (lambda val: val) return ({'count': result[0], '_id': outputproc((result[1:] if (len(result) > 2) else result[1]))} for result in self.db.execute(req.order_by(order).limit(topnbr)))
'Filters (if `neg` == True, filters out) one particular host (IP address).'
@staticmethod def searchhost(addr, neg=False):
return PassiveFilter(main=PostgresDB.searchhost(addr, neg=neg))
'Decides whether we should log a record'
def filter(self, record):
if (record.levelno < logging.INFO): if record.msg.startswith('DB:'): return config.DEBUG_DB return config.DEBUG if (record.levelno != logging.WARNING): return True if (record.msg in self.warnings): return False if (len(self.warnings) > self.MAX_WARNINGS_STORED): self.warnings = set() self.warnings.add(record.msg) return True
'Stores parent\'s arguments for latter (manual) processing.'
def add_argument(self, *args, **kargs):
self.args.append((args, kargs))
'ranges must be given in the "correct" order *and* not overlap.'
def __init__(self, ranges=None):
self.ranges = {} self.length = 0 if (ranges is not None): for rnge in ranges: self.append(*rnge)
'Creates the Argus object. fdesc: a file-like object or a filename pcap_filter: a PCAP filter to use with racluster'
def __init__(self, fdesc, pcap_filter=None):
cmd = ['racluster', '-u', '-n', '-c', ',', '-m'] cmd.extend(self.aggregation) cmd.append('-s') cmd.extend(self.fields) cmd.extend(['-r', (fdesc if isinstance(fdesc, basestring) else '-')]) if (pcap_filter is not None): cmd.extend(['-', pcap_filter]) super(Argus, self).__init__(cmd, ({} if isinstance(fdesc, basestring) else {'stdin': fdesc})) self.fdesc.readline()
'Creates the NetFlow object. fdesc: a file-like object or a filename pcap_filter: a PCAP filter to use with nfdump'
def __init__(self, fdesc, pcap_filter=None):
cmd = ['nfdump', '-aq', '-o', self.fmt] cmdkargs = {} if isinstance(fdesc, basestring): with open(fdesc) as fde: if (fde.read(2) not in utils.FileOpener.FILE_OPENERS_MAGIC): cmd.extend(['-r', fdesc]) else: cmdkargs['stdin'] = utils.open_file(fdesc) else: cmdkargs['stdin'] = fdesc if (pcap_filter is not None): cmd.append(pcap_filter) super(NetFlow, self).__init__(cmd, cmdkargs)
'Builds an Agent instance from a description string of the form [tor:][hostname:]path.'
@classmethod def from_string(cls, string, localbase='', maxwaiting=60):
string = string.split(':', 1) if (string[0].lower() == 'tor'): string = string[1].split(':', 1) usetor = True else: usetor = False if (len(string) == 1): return cls(None, string[0], os.path.join(localbase, string[0].replace('/', '_')), maxwaiting=maxwaiting) return cls(string[0], string[1], os.path.join(localbase, ('%s_%s' % (string[0].replace('@', '_'), string[1].replace('/', '_')))), usetor=usetor, maxwaiting=maxwaiting)
'Get local storage path for directory `dirname`.'
def get_local_path(self, dirname):
return (os.path.join(self.localpathbase, dirname) + '/')
'Get remote storage path for directory `dirname` as an rsync address.'
def get_remote_path(self, dirname):
if (dirname and (dirname[(-1)] != '/')): dirname += '/' return (self.rsyncbase + dirname)
'Create local directories used to manage the agent'
def create_local_dirs(self):
for dirname in ['input', 'remoteinput', 'remotecur', 'remoteoutput']: utils.makedirs(self.get_local_path(dirname))
'Get the number of targets that can be sent to the agent (based on the total number of targets currently on hold and the `maxwaiting` attribute value).'
def may_receive(self):
curwaiting = sum((len(os.listdir(self.get_local_path(p))) for p in ['input', 'remoteinput'])) return (self.maxwaiting - curwaiting)
'Add a new target (locally), given its category and address (technically, addr can be a network or a hostname that can be resolved from the agent).'
def add_target(self, category, addr):
with open(os.path.join(self.get_local_path('input'), ('%s.%s' % (category, addr.replace('/', '_')))), 'w') as fdesc: fdesc.write(('%s\n' % addr)) return True return False
'Synchronize the local and remote directories, and the relevant `Campaign`s.'
def sync(self):
subprocess.call((self.rsync + ['-a', self.get_local_path('input'), self.get_local_path('remoteinput')])) subprocess.call((self.rsync + ['-a', '--remove-source-files', self.get_local_path('input'), self.get_remote_path('input')])) subprocess.call((self.rsync + ['-a', '--delete', self.get_remote_path('input'), self.get_local_path('remoteinput')])) subprocess.call((self.rsync + ['-a', '--delete', self.get_remote_path('cur'), self.get_local_path('remotecur')])) subprocess.call((self.rsync + ['-a', '--remove-source-files', self.get_remote_path('output'), self.get_local_path('remoteoutput')])) for campaign in self.campaigns: campaign.sync(self)
'This function should only be called from `agent.sync()` method. It stores the results of terminated scans according to the target status.'
def sync(self, agent):
remout = agent.get_local_path('remoteoutput') for remfname in glob.glob(os.path.join(remout, (self.visiblecategory + '.*.xml'))): locfname = os.path.basename(remfname).split('.', 4) locfname[0] = self.category status = 'unknown' with open(remfname) as remfdesc: remfcontent = remfdesc.read() if ('<status state="up"' in remfcontent): status = 'up' elif ('<status state="down"' in remfcontent): if (not self.storedown): remfdesc.close() os.unlink(remfname) continue status = 'down' del remfcontent locfname = os.path.join(self.outputpath, locfname[0], status, re.sub('[/@:]', '_', agent.name), *locfname[1:]) utils.makedirs(os.path.dirname(locfname)) os.rename(remfname, locfname)
'Send targets to scan to `agent`, depending on how many it can receive.'
def feed(self, agent, maxnbr=None):
for _ in range(max(agent.may_receive(), (maxnbr or 0))): addr = utils.int2ip(next(self.targiter)) with open(os.path.join(agent.get_local_path('input'), ('%s.%s' % (self.visiblecategory, addr))), 'w') as fdesc: fdesc.write(('%s\n' % addr))
'Feed periodically the agents affected to the `Campaign` (`self.agents`).'
def feedloop(self):
while True: for agent in self.agents: try: self.feed(agent, maxnbr=self.maxfeed) except StopIteration: return time.sleep(self.sleep)
'Prepare binary data. Subclasses may want to do some kind of conversion here.'
@staticmethod def _to_binary(data):
return data
'Executed before _addhost for host object post-treatment'
def _pre_addhost(self):
if ('cpes' in self._curhost): cpes = self._curhost['cpes'] self._curhost['cpes'] = list(viewvalues(cpes))
'Subclasses may store self._curhost here.'
def _addhost(self):
pass
'Subclasses may store self._curscan here.'
def _storescan(self):
pass
'Subclasses may add scan information (first argument) to self._curscan here.'
def _addscaninfo(self, _):
pass
'Adds the cpe in self._curdata to the host-wide cpe list, taking port/script/osmatch context into account.'
def _add_cpe_to_host(self):
cpe = self._curdata self._curdata = None path = None if (self._curport is not None): if ((self._curscript is not None) and ('id' in self._curscript)): path = ('ports{port:%s, scripts.id:%s}' % (self._curport['port'], self._curscript['id'])) else: path = ('ports.port:%s' % self._curport['port']) elif ((self._curscript is not None) and ('id' in self._curscript)): path = ('scripts.id:%s' % self._curscript['id']) elif (('os' in self._curhost) and self._curhost['os'].get('osmatch', [])): lastosmatch = self._curhost['os']['osmatch'][(-1)] line = lastosmatch['line'] path = ('os.osmatch.line:%s' % line) cpes = self._curhost.setdefault('cpes', {}) if (cpe not in cpes): try: cpeobj = cpe2dict(cpe) except ValueError: utils.LOGGER.warning('Invalid cpe format (%s)', cpe) return cpes[cpe] = cpeobj else: cpeobj = cpes[cpe] cpeobj.setdefault('origins', []).append(path)
'Save a clip\'s frame to an image file. Saves the frame of clip corresponding to time ``t`` in \'filename\'. ``t`` can be expressed in seconds (15.35), in (min, sec), in (hour, min, sec), or as a string: \'01:03:05.35\'. If ``withmask`` is ``True`` the mask is saved in the alpha layer of the picture (only works with PNGs).'
@convert_to_seconds(['t']) @convert_masks_to_RGB def save_frame(self, filename, t=0, withmask=True):
im = self.get_frame(t) if (withmask and (self.mask is not None)): mask = (255 * self.mask.get_frame(t)) im = np.dstack([im, mask]).astype('uint8') else: im = im.astype('uint8') imsave(filename, im)
'Write the clip to a videofile. Parameters filename Name of the video file to write in. The extension must correspond to the "codec" used (see below), or simply be \'.avi\' (which will work with any codec). fps Number of frames per second in the resulting video file. If None is provided, and the clip has an fps attribute, this fps will be used. codec Codec to use for image encoding. Can be any codec supported by ffmpeg. If the filename is has extension \'.mp4\', \'.ogv\', \'.webm\', the codec will be set accordingly, but you can still set it if you don\'t like the default. For other extensions, the output filename must be set accordingly. Some examples of codecs are: ``\'libx264\'`` (default codec for file extension ``.mp4``) makes well-compressed videos (quality tunable using \'bitrate\'). ``\'mpeg4\'`` (other codec for extension ``.mp4``) can be an alternative to ``\'libx264\'``, and produces higher quality videos by default. ``\'rawvideo\'`` (use file extension ``.avi``) will produce a video of perfect quality, of possibly very huge size. ``png`` (use file extension ``.avi``) will produce a video of perfect quality, of smaller size than with ``rawvideo``. ``\'libvorbis\'`` (use file extension ``.ogv``) is a nice video format, which is completely free/ open source. However not everyone has the codecs installed by default on their machine. ``\'libvpx\'`` (use file extension ``.webm``) is tiny a video format well indicated for web videos (with HTML5). Open source. audio Either ``True``, ``False``, or a file name. If ``True`` and the clip has an audio clip attached, this audio clip will be incorporated as a soundtrack in the movie. If ``audio`` is the name of an audio file, this audio file will be incorporated as a soundtrack in the movie. audiofps frame rate to use when generating the sound. temp_audiofile the name of the temporary audiofile to be generated and incorporated in the the movie, if any. audio_codec Which audio codec should be used. Examples are \'libmp3lame\' for \'.mp3\', \'libvorbis\' for \'ogg\', \'libfdk_aac\':\'m4a\', \'pcm_s16le\' for 16-bit wav and \'pcm_s32le\' for 32-bit wav. Default is \'libmp3lame\', unless the video extension is \'ogv\' or \'webm\', at which case the default is \'libvorbis\'. audio_bitrate Audio bitrate, given as a string like \'50k\', \'500k\', \'3000k\'. Will determine the size/quality of audio in the output file. Note that it mainly an indicative goal, the bitrate won\'t necessarily be the this in the final file. preset Sets the time that FFMPEG will spend optimizing the compression. Choices are: ultrafast, superfast, veryfast, faster, fast, medium, slow, slower, veryslow, placebo. Note that this does not impact the quality of the video, only the size of the video file. So choose ultrafast when you are in a hurry and file size does not matter. threads Number of threads to use for ffmpeg. Can speed up the writing of the video on multicore computers. ffmpeg_params Any additional ffmpeg parameters you would like to pass, as a list of terms, like [\'-option1\', \'value1\', \'-option2\', \'value2\']. write_logfile If true, will write log files for the audio and the video. These will be files ending with \'.log\' with the name of the output file in them. verbose Boolean indicating whether to print infomation. progress_bar Boolean indicating whether to show the progress bar. Examples >>> from moviepy.editor import VideoFileClip >>> clip = VideoFileClip("myvideo.mp4").subclip(100,120) >>> clip.write_videofile("my_new_video.mp4")'
@requires_duration @use_clip_fps_by_default @convert_masks_to_RGB def write_videofile(self, filename, fps=None, codec=None, bitrate=None, audio=True, audio_fps=44100, preset='medium', audio_nbytes=4, audio_codec=None, audio_bitrate=None, audio_bufsize=2000, temp_audiofile=None, rewrite_audio=True, remove_temp=True, write_logfile=False, verbose=True, threads=None, ffmpeg_params=None, progress_bar=True):
(name, ext) = os.path.splitext(os.path.basename(filename)) ext = ext[1:].lower() if (codec is None): try: codec = extensions_dict[ext]['codec'][0] except KeyError: raise ValueError("MoviePy couldn't find the codec associated with the filename. Provide the 'codec' parameter in write_videofile.") if (audio_codec is None): if (ext in ['ogv', 'webm']): audio_codec = 'libvorbis' else: audio_codec = 'libmp3lame' elif (audio_codec == 'raw16'): audio_codec = 'pcm_s16le' elif (audio_codec == 'raw32'): audio_codec = 'pcm_s32le' audiofile = (audio if is_string(audio) else None) make_audio = ((audiofile is None) and (audio == True) and (self.audio is not None)) if make_audio: if (temp_audiofile is not None): audiofile = temp_audiofile else: if (audio_codec in extensions_dict): audio_ext = audio_codec else: try: audio_ext = find_extension(audio_codec) except ValueError: raise ValueError('The audio_codec you chose is unknown by MoviePy. You should report this. In the meantime, you can specify a temp_audiofile with the right extension in write_videofile.') audiofile = ((name + Clip._TEMP_FILES_PREFIX) + ('wvf_snd.%s' % audio_ext)) verbose_print(verbose, ('[MoviePy] >>>> Building video %s\n' % filename)) if make_audio: self.audio.write_audiofile(audiofile, audio_fps, audio_nbytes, audio_bufsize, audio_codec, bitrate=audio_bitrate, write_logfile=write_logfile, verbose=verbose, progress_bar=progress_bar) ffmpeg_write_video(self, filename, fps, codec, bitrate=bitrate, preset=preset, write_logfile=write_logfile, audiofile=audiofile, verbose=verbose, threads=threads, ffmpeg_params=ffmpeg_params, progress_bar=progress_bar) if (remove_temp and make_audio): os.remove(audiofile) verbose_print(verbose, ('[MoviePy] >>>> Video ready: %s \n\n' % filename))
'Writes the videoclip to a sequence of image files. Parameters nameformat A filename specifying the numerotation format and extension of the pictures. For instance "frame%03d.png" for filenames indexed with 3 digits and PNG format. Also possible: "some_folder/frame%04d.jpeg", etc. fps Number of frames per second to consider when writing the clip. If not specified, the clip\'s ``fps`` attribute will be used if it has one. withmask will save the clip\'s mask (if any) as an alpha canal (PNGs only). verbose Boolean indicating whether to print infomation. progress_bar Boolean indicating whether to show the progress bar. Returns names_list A list of all the files generated. Notes The resulting image sequence can be read using e.g. the class ``ImageSequenceClip``.'
@requires_duration @use_clip_fps_by_default @convert_masks_to_RGB def write_images_sequence(self, nameformat, fps=None, verbose=True, withmask=True, progress_bar=True):
verbose_print(verbose, ('[MoviePy] Writing frames %s.' % nameformat)) tt = np.arange(0, self.duration, (1.0 / fps)) filenames = [] total = (int((self.duration / fps)) + 1) for (i, t) in tqdm(enumerate(tt), total=total, disable=(not progress_bar)): name = (nameformat % i) filenames.append(name) self.save_frame(name, t, withmask=withmask) verbose_print(verbose, ('[MoviePy]: Done writing frames %s.\n\n' % nameformat)) return filenames
'Write the VideoClip to a GIF file. Converts a VideoClip into an animated GIF using ImageMagick or ffmpeg. Parameters filename Name of the resulting gif file. fps Number of frames per second (see note below). If it isn\'t provided, then the function will look for the clip\'s ``fps`` attribute (VideoFileClip, for instance, have one). program Software to use for the conversion, either \'imageio\' (this will use the library FreeImage through ImageIO), or \'ImageMagick\', or \'ffmpeg\'. opt Optimalization to apply. If program=\'imageio\', opt must be either \'wu\' (Wu) or \'nq\' (Neuquant). If program=\'ImageMagick\', either \'optimizeplus\' or \'OptimizeTransparency\'. fuzz (ImageMagick only) Compresses the GIF by considering that the colors that are less than fuzz% different are in fact the same. Notes The gif will be playing the clip in real time (you can only change the frame rate). If you want the gif to be played slower than the clip you will use :: >>> # slow down clip 50% and make it a gif >>> myClip.speedx(0.5).to_gif(\'myClip.gif\')'
@requires_duration @convert_masks_to_RGB def write_gif(self, filename, fps=None, program='imageio', opt='nq', fuzz=1, verbose=True, loop=0, dispose=False, colors=None, tempfiles=False):
if (program == 'imageio'): write_gif_with_image_io(self, filename, fps=fps, opt=opt, loop=loop, verbose=verbose, colors=colors) elif tempfiles: opt1 = opt if (opt1 == 'nq'): opt1 = 'optimizeplus' else: opt1 = 'OptimizeTransparency' write_gif_with_tempfiles(self, filename, fps=fps, program=program, opt=opt1, fuzz=fuzz, verbose=verbose, loop=loop, dispose=dispose, colors=colors) else: write_gif(self, filename, fps=fps, program=program, opt=opt, fuzz=fuzz, verbose=verbose, loop=loop, dispose=dispose, colors=colors)
'Apply a transformation to a part of the clip. Returns a new clip in which the function ``fun`` (clip->clip) has been applied to the subclip between times `ta` and `tb` (in seconds). Examples >>> # The scene between times t=3s and t=6s in ``clip`` will be >>> # be played twice slower in ``newclip`` >>> newclip = clip.subapply(lambda c:c.speedx(0.5) , 3,6)'
def subfx(self, fx, ta=0, tb=None, **kwargs):
left = (None if (ta == 0) else self.subclip(0, ta)) center = self.subclip(ta, tb).fx(fx, **kwargs) right = (None if (tb is None) else self.subclip(t_start=tb)) clips = [c for c in [left, center, right] if (c is not None)] from moviepy.video.compositing.concatenate import concatenate_videoclips return concatenate_videoclips(clips).set_start(self.start)
'Modifies the images of a clip by replacing the frame `get_frame(t)` by another frame, `image_func(get_frame(t))`'
def fl_image(self, image_func, apply_to=None):
if (apply_to is None): apply_to = [] return self.fl((lambda gf, t: image_func(gf(t))), apply_to)
'Returns the result of the blit of the clip\'s frame at time `t` on the given `picture`, the position of the clip being given by the clip\'s ``pos`` attribute. Meant for compositing.'
def blit_on(self, picture, t):
(hf, wf) = framesize = picture.shape[:2] if (self.ismask and (picture.max() != 0)): return np.minimum(1, (picture + self.blit_on(np.zeros(framesize), t))) ct = (t - self.start) img = self.get_frame(ct) mask = (None if (self.mask is None) else self.mask.get_frame(ct)) if (mask is not None): if ((img.shape[0] != mask.shape[0]) or (img.shape[1] != mask.shape[1])): img = self.fill_array(img, mask.shape) (hi, wi) = img.shape[:2] pos = self.pos(ct) if isinstance(pos, str): pos = {'center': ['center', 'center'], 'left': ['left', 'center'], 'right': ['right', 'center'], 'top': ['center', 'top'], 'bottom': ['center', 'bottom']}[pos] else: pos = list(pos) if self.relative_pos: for (i, dim) in enumerate([wf, hf]): if (not isinstance(pos[i], str)): pos[i] = (dim * pos[i]) if isinstance(pos[0], str): D = {'left': 0, 'center': ((wf - wi) / 2), 'right': (wf - wi)} pos[0] = D[pos[0]] if isinstance(pos[1], str): D = {'top': 0, 'center': ((hf - hi) / 2), 'bottom': (hf - hi)} pos[1] = D[pos[1]] pos = map(int, pos) return blit(img, picture, pos, mask=mask, ismask=self.ismask)
'Add a mask VideoClip to the VideoClip. Returns a copy of the clip with a completely opaque mask (made of ones). This makes computations slower compared to having a None mask but can be useful in many cases. Choose Set ``constant_size`` to `False` for clips with moving image size.'
def add_mask(self):
if self.has_constant_size: mask = ColorClip(self.size, 1.0, ismask=True) return self.set_mask(mask.set_duration(self.duration)) else: make_frame = (lambda t: np.ones(self.get_frame(t).shape[:2], dtype=float)) mask = VideoClip(ismask=True, make_frame=make_frame) return self.set_mask(mask.set_duration(self.duration))
'Place the clip on a colored background. Returns a clip made of the current clip overlaid on a color clip of a possibly bigger size. Can serve to flatten transparent clips. Parameters size Size (width, height) in pixels of the final clip. By default it will be the size of the current clip. color Background color of the final clip ([R,G,B]). pos Position of the clip in the final clip. \'center\' is the default col_opacity Parameter in 0..1 indicating the opacity of the colored background.'
def on_color(self, size=None, color=(0, 0, 0), pos=None, col_opacity=None):
from .compositing.CompositeVideoClip import CompositeVideoClip if (size is None): size = self.size if (pos is None): pos = 'center' colorclip = ColorClip(size, color) if (col_opacity is not None): colorclip = ColorClip(size, color, duration=self.duration).set_opacity(col_opacity) result = CompositeVideoClip([colorclip, self.set_pos(pos)]) else: result = CompositeVideoClip([self.set_pos(pos)], size=size, bg_color=color) if (isinstance(self, ImageClip) and (not hasattr(pos, '__call__')) and ((self.mask is None) or isinstance(self.mask, ImageClip))): new_result = result.to_ImageClip() if (result.mask is not None): new_result.mask = result.mask.to_ImageClip() return new_result.set_duration(result.duration) return result
'Change the clip\'s ``get_frame``. Returns a copy of the VideoClip instance, with the make_frame attribute set to `mf`.'
@outplace def set_make_frame(self, mf):
self.make_frame = mf self.size = self.get_frame(0).shape[:2][::(-1)]
'Attach an AudioClip to the VideoClip. Returns a copy of the VideoClip instance, with the `audio` attribute set to ``audio``, which must be an AudioClip instance.'
@outplace def set_audio(self, audioclip):
self.audio = audioclip
'Set the clip\'s mask. Returns a copy of the VideoClip with the mask attribute set to ``mask``, which must be a greyscale (values in 0-1) VideoClip'
@outplace def set_mask(self, mask):
assert ((mask is None) or mask.ismask) self.mask = mask
'Set the opacity/transparency level of the clip. Returns a semi-transparent copy of the clip where the mask is multiplied by ``op`` (any float, normally between 0 and 1).'
@add_mask_if_none @outplace def set_opacity(self, op):
self.mask = self.mask.fl_image((lambda pic: (op * pic)))
'Set the clip\'s position in compositions. Sets the position that the clip will have when included in compositions. The argument ``pos`` can be either a couple ``(x,y)`` or a function ``t-> (x,y)``. `x` and `y` mark the location of the top left corner of the clip, and can be of several types. Examples >>> clip.set_pos((45,150)) # x=45, y=150 >>> # clip horizontally centered, at the top of the picture >>> clip.set_pos(("center","top")) >>> # clip is at 40% of the width, 70% of the height: >>> clip.set_pos((0.4,0.7), relative=True) >>> # clip\'s position is horizontally centered, and moving up ! >>> clip.set_pos(lambda t: (\'center\', 50+t) )'
@apply_to_mask @outplace def set_position(self, pos, relative=False):
self.relative_pos = relative if hasattr(pos, '__call__'): self.pos = pos else: self.pos = (lambda t: pos)
'Returns an ImageClip made out of the clip\'s frame at time ``t``, which can be expressed in seconds (15.35), in (min, sec), in (hour, min, sec), or as a string: \'01:03:05.35\'.'
@convert_to_seconds(['t']) def to_ImageClip(self, t=0, with_mask=True):
newclip = ImageClip(self.get_frame(t), ismask=self.ismask) if (with_mask and (self.mask is not None)): newclip.mask = self.mask.to_ImageClip(t) return newclip
'Return a mask a video clip made from the clip.'
def to_mask(self, canal=0):
if self.ismask: return self else: newclip = self.fl_image((lambda pic: ((1.0 * pic[:, :, canal]) / 255))) newclip.ismask = True return newclip
'Return a non-mask video clip made from the mask video clip.'
def to_RGB(self):
if self.ismask: f = (lambda pic: np.dstack((3 * [(255 * pic)])).astype('uint8')) newclip = self.fl_image(f) newclip.ismask = False return newclip else: return self
'Remove the clip\'s audio. Return a copy of the clip with audio set to None.'
@outplace def without_audio(self):
self.audio = None
'Transform the clip\'s audio. Return a new clip whose audio has been transformed by ``fun``.'
@outplace def afx(self, fun, *a, **k):
self.audio = self.audio.fx(fun, *a, **k)
'General transformation filter. Equivalent to VideoClip.fl . The result is no more an ImageClip, it has the class VideoClip (since it may be animated)'
def fl(self, fl, apply_to=None, keep_duration=True):
if (apply_to is None): apply_to = [] newclip = VideoClip.fl(self, fl, apply_to=apply_to, keep_duration=keep_duration) newclip.__class__ = VideoClip return newclip
'Image-transformation filter. Does the same as VideoClip.fl_image, but for ImageClip the tranformed clip is computed once and for all at the beginning, and not for each \'frame\'.'
@outplace def fl_image(self, image_func, apply_to=None):
if (apply_to is None): apply_to = [] arr = image_func(self.get_frame(0)) self.size = arr.shape[:2][::(-1)] self.make_frame = (lambda t: arr) self.img = arr for attr in apply_to: if hasattr(self, attr): a = getattr(self, attr) if (a is not None): new_a = a.fl_image(image_func) setattr(self, attr, new_a)
'Time-transformation filter. Applies a transformation to the clip\'s timeline (see Clip.fl_time). This method does nothing for ImageClips (but it may affect their masks or their audios). The result is still an ImageClip.'
@outplace def fl_time(self, time_func, apply_to=None, keep_duration=False):
if (apply_to is None): apply_to = ['mask', 'audio'] for attr in apply_to: if hasattr(self, attr): a = getattr(self, attr) if (a is not None): new_a = a.fl_time(time_func) setattr(self, attr, new_a)
'Returns the list of all valid entries for the argument of ``TextClip`` given (can be ``font``, ``color``, etc...)'
@staticmethod def list(arg):
popen_params = {'stdout': sp.PIPE, 'stderr': DEVNULL, 'stdin': DEVNULL} if (os.name == 'nt'): popen_params['creationflags'] = 134217728 process = sp.Popen([get_setting('IMAGEMAGICK_BINARY'), '-list', arg], **popen_params) result = process.communicate()[0] lines = result.splitlines() if (arg == 'font'): return [l.decode('UTF-8')[8:] for l in lines if l.startswith(' Font:')] elif (arg == 'color'): return [l.split(' ')[0] for l in lines[2:]] else: raise Exception("Moviepy:Error! Argument must equal 'font' or 'color'")
'Returns the of all valid entries which contain ``string`` for the argument ``arg`` of ``TextClip``, for instance >>> # Find all the available fonts which contain "Courier" >>> print ( TextClip.search(\'Courier\', \'font\') )'
@staticmethod def search(string, arg):
string = string.lower() names_list = TextClip.list(arg) return [name for name in names_list if (string in name.lower())]
'Returns a sequence of [(t1,t2), txt] covering all the given subclip from t_start to t_end. The first and last times will be cropped so as to be exactly t_start and t_end if possible.'
def in_subclip(self, t_start=None, t_end=None):
def is_in_subclip(t1, t2): try: return ((t_start <= t1 < t_end) or (t_start < t2 <= t_end)) except: return False def try_cropping(t1, t2): try: return (max(t1, t_start), min(t2, t_end)) except: return (t1, t2) return [(try_cropping(t1, t2), txt) for ((t1, t2), txt) in self.subtitles if is_in_subclip(t1, t2)]
'Returns a FramesMatches object obtained by filtering out the FramesMatch which do not satistify the condition ``cond``. ``cond`` is a function (FrameMatch -> bool). Examples >>> # Only keep the matches corresponding to (> 1 second) sequences. >>> new_matches = matches.filter( lambda match: match.time_span > 1)'
def filter(self, cond):
return FramesMatches(filter(cond, self))
'Loads a FramesMatches object from a file. >>> matching_frames = FramesMatches.load("somefile")'
@staticmethod def load(filename):
arr = np.loadtxt(filename) mfs = [FramesMatch(*e) for e in arr] return FramesMatches(mfs)
'Finds all the frames tht look alike in a clip, for instance to make a looping gif. This teturns a FramesMatches object of the all pairs of frames with (t2-t1 < max_d) and whose distance is under dist_thr. This is well optimized routine and quite fast. Examples We find all matching frames in a given video and turn the best match with a duration of 1.5s or more into a GIF: >>> from moviepy.editor import VideoFileClip >>> from moviepy.video.tools.cuts import find_matching_frames >>> clip = VideoFileClip("foo.mp4").resize(width=200) >>> matches = find_matching_frames(clip, 10, 3) # will take time >>> best = matches.filter(lambda m: m.time_span > 1.5).best() >>> clip.subclip(best.t1, best.t2).write_gif("foo.gif") Parameters clip A MoviePy video clip, possibly transformed/resized dist_thr Distance above which a match is rejected max_d Maximal duration (in seconds) between two matching frames fps Frames per second (default will be clip.fps)'
@staticmethod def from_clip(clip, dist_thr, max_d, fps=None):
N_pixels = ((clip.w * clip.h) * 3) dot_product = (lambda F1, F2: ((F1 * F2).sum() / N_pixels)) F = {} def distance(t1, t2): uv = dot_product(F[t1]['frame'], F[t2]['frame']) (u, v) = (F[t1]['|F|sq'], F[t2]['|F|sq']) return np.sqrt(((u + v) - (2 * uv))) matching_frames = [] for (t, frame) in clip.iter_frames(with_times=True, progress_bar=True): flat_frame = (1.0 * frame.flatten()) F_norm_sq = dot_product(flat_frame, flat_frame) F_norm = np.sqrt(F_norm_sq) for t2 in list(F.keys()): if ((t - t2) > max_d): F.pop(t2) else: F[t2][t] = {'min': abs((F[t2]['|F|'] - F_norm)), 'max': (F[t2]['|F|'] + F_norm)} F[t2][t]['rejected'] = (F[t2][t]['min'] > dist_thr) t_F = sorted(F.keys()) F[t] = {'frame': flat_frame, '|F|sq': F_norm_sq, '|F|': F_norm} for (i, t2) in enumerate(t_F): if F[t2][t]['rejected']: continue dist = distance(t, t2) F[t2][t]['min'] = F[t2][t]['max'] = dist F[t2][t]['rejected'] = (dist >= dist_thr) for t3 in t_F[(i + 1):]: (t3t, t2t3) = (F[t3][t], F[t2][t3]) t3t['max'] = min(t3t['max'], (dist + t2t3['max'])) t3t['min'] = max(t3t['min'], (dist - t2t3['max']), (t2t3['min'] - dist)) if (t3t['min'] > dist_thr): t3t['rejected'] = True matching_frames += [(t1, t, F[t1][t]['min'], F[t1][t]['max']) for t1 in F if ((t1 != t) and (not F[t1][t]['rejected']))] return FramesMatches([FramesMatch(*e) for e in matching_frames])
'match_thr The smaller, the better-looping the gifs are. min_time_span Only GIFs with a duration longer than min_time_span (in seconds) will be extracted. nomatch_thr If None, then it is chosen equal to match_thr'
def select_scenes(self, match_thr, min_time_span, nomatch_thr=None, time_distance=0):
if (nomatch_thr is None): nomatch_thr = match_thr dict_starts = defaultdict((lambda : [])) for (start, end, d_min, d_max) in self: dict_starts[start].append([end, d_min, d_max]) starts_ends = sorted(dict_starts.items(), key=(lambda k: k[0])) result = [] min_start = 0 for (start, ends_distances) in starts_ends: if (start < min_start): continue ends = [end for (end, d_min, d_max) in ends_distances] great_matches = [(end, d_min, d_max) for (end, d_min, d_max) in ends_distances if (d_max < match_thr)] great_long_matches = [(end, d_min, d_max) for (end, d_min, d_max) in great_matches if ((end - start) > min_time_span)] if (great_long_matches == []): continue poor_matches = set([end for (end, d_min, d_max) in ends_distances if (d_min > nomatch_thr)]) short_matches = [end for end in ends if ((end - start) <= 0.6)] if (len(poor_matches.intersection(short_matches)) == 0): continue end = max([end for (end, d_min, d_max) in great_long_matches]) (end, d_min, d_max) = [e for e in great_long_matches if (e[0] == end)][0] result.append(FramesMatch(start, end, d_min, d_max)) min_start = (start + time_distance) return FramesMatches(result)