repo
stringclasses
3 values
pull_number
int64
64
162
instance_id
stringclasses
3 values
issue_numbers
stringclasses
2 values
base_commit
stringclasses
3 values
patch
stringclasses
3 values
created_at
stringclasses
3 values
mixpanel/mixpanel-python
64
mixpanel__mixpanel-python-64
["63"]
40c98e0b285898384cc4aa6cc803d8d0f46f6218
diff --git a/mixpanel/__init__.py b/mixpanel/__init__.py --- a/mixpanel/__init__.py +++ b/mixpanel/__init__.py @@ -345,6 +345,7 @@ def send(self, endpoint, json_message, api_key=None): :param endpoint: the Mixpanel API endpoint appropriate for the message :type endpoint: "events" | "people" | "imports" :param str json_message: a JSON message formatted for the endpoint + :param str api_key: your Mixpanel project's API key :raises MixpanelException: if the endpoint doesn't exist, the server is unreachable, or the message cannot be processed """ @@ -412,6 +413,7 @@ def __init__(self, max_size=50, events_url=None, people_url=None, import_url=Non 'imports': [], } self._max_size = min(50, max_size) + self._api_key = None def send(self, endpoint, json_message, api_key=None): """Record an event or profile update. @@ -424,16 +426,22 @@ def send(self, endpoint, json_message, api_key=None): :param endpoint: the Mixpanel API endpoint appropriate for the message :type endpoint: "events" | "people" | "imports" :param str json_message: a JSON message formatted for the endpoint + :param str api_key: your Mixpanel project's API key :raises MixpanelException: if the endpoint doesn't exist, the server is unreachable, or any buffered message cannot be processed + + .. versionadded:: 4.3.2 + The *api_key* parameter. """ if endpoint not in self._buffers: raise MixpanelException('No such endpoint "{0}". Valid endpoints are one of {1}'.format(endpoint, self._buffers.keys())) buf = self._buffers[endpoint] buf.append(json_message) + if api_key is not None: + self._api_key = api_key if len(buf) >= self._max_size: - self._flush_endpoint(endpoint, api_key) + self._flush_endpoint(endpoint) def flush(self): """Immediately send all buffered messages to Mixpanel. @@ -444,13 +452,13 @@ def flush(self): for endpoint in self._buffers.keys(): self._flush_endpoint(endpoint) - def _flush_endpoint(self, endpoint, api_key=None): + def _flush_endpoint(self, endpoint): buf = self._buffers[endpoint] while buf: batch = buf[:self._max_size] batch_json = '[{0}]'.format(','.join(batch)) try: - self._consumer.send(endpoint, batch_json, api_key) + self._consumer.send(endpoint, batch_json, self._api_key) except MixpanelException as orig_e: mp_e = MixpanelException(orig_e) mp_e.message = batch_json
2016-12-22T00:07:05Z
NVIDIA/NeMo
162
NVIDIA__NeMo-162
f5f09838b96ab48f40d97c100fbcfc5b7f1ac59e
diff --git a/collections/nemo_nlp/nemo_nlp/data/data_layers.py b/collections/nemo_nlp/nemo_nlp/data/data_layers.py --- a/collections/nemo_nlp/nemo_nlp/data/data_layers.py +++ b/collections/nemo_nlp/nemo_nlp/data/data_layers.py @@ -683,9 +683,9 @@ def _collate_fn(self, x): [np.stack(x, axis=0) for x in components] src_ids = torch.Tensor(src_ids).long().to(self._device) src_segment_ids = torch.Tensor(src_segment_ids).long().to(self._device) - src_mask = torch.Tensor(src_mask).float().to(self._device) + src_mask = torch.Tensor(src_mask).long().to(self._device) tgt_ids = torch.Tensor(tgt_ids).long().to(self._device) - tgt_mask = torch.Tensor(tgt_mask).float().to(self._device) + tgt_mask = torch.Tensor(tgt_mask).long().to(self._device) sent_ids = torch.Tensor(sent_ids).long().to(self._device) return src_ids, src_segment_ids, src_mask, tgt_ids, tgt_mask, sent_ids diff --git a/collections/nemo_nlp/nemo_nlp/data/datasets/bert_pretraining.py b/collections/nemo_nlp/nemo_nlp/data/datasets/bert_pretraining.py --- a/collections/nemo_nlp/nemo_nlp/data/datasets/bert_pretraining.py +++ b/collections/nemo_nlp/nemo_nlp/data/datasets/bert_pretraining.py @@ -249,7 +249,7 @@ def truncate_seq_pair(a, b, max_num_tokens): input_ids, output_mask = self.mask_ids(output_ids) - input_mask = np.zeros(self.max_seq_length, dtype=np.float32) + input_mask = np.zeros(self.max_seq_length, dtype=np.long) input_mask[:len(input_ids)] = 1 input_type_ids = np.zeros(self.max_seq_length, dtype=np.int) @@ -263,7 +263,7 @@ def truncate_seq_pair(a, b, max_num_tokens): # TODO: wrap the return value with () for consistent style. return np.array(input_ids), input_type_ids,\ - np.array(input_mask, dtype=np.float32), np.array(output_ids),\ + np.array(input_mask, dtype=np.long), np.array(output_ids),\ np.array(output_mask, dtype=np.float32), is_next def mask_ids(self, ids): diff --git a/collections/nemo_nlp/nemo_nlp/data/datasets/glue.py b/collections/nemo_nlp/nemo_nlp/data/datasets/glue.py --- a/collections/nemo_nlp/nemo_nlp/data/datasets/glue.py +++ b/collections/nemo_nlp/nemo_nlp/data/datasets/glue.py @@ -55,7 +55,7 @@ def __getitem__(self, idx): feature = self.features[idx] return (np.array(feature.input_ids), np.array(feature.segment_ids), - np.array(feature.input_mask, dtype=np.float32), + np.array(feature.input_mask, dtype=np.long), np.array(feature.label_id)) diff --git a/collections/nemo_nlp/nemo_nlp/data/datasets/joint_intent_slot.py b/collections/nemo_nlp/nemo_nlp/data/datasets/joint_intent_slot.py --- a/collections/nemo_nlp/nemo_nlp/data/datasets/joint_intent_slot.py +++ b/collections/nemo_nlp/nemo_nlp/data/datasets/joint_intent_slot.py @@ -214,7 +214,7 @@ def __len__(self): def __getitem__(self, idx): return (np.array(self.all_input_ids[idx]), np.array(self.all_segment_ids[idx]), - np.array(self.all_input_mask[idx]), + np.array(self.all_input_mask[idx], dtype=np.long), np.array(self.all_loss_mask[idx]), np.array(self.all_subtokens_mask[idx]), self.all_intents[idx], @@ -263,6 +263,6 @@ def __len__(self): def __getitem__(self, idx): return (np.array(self.all_input_ids[idx]), np.array(self.all_segment_ids[idx]), - np.array(self.all_input_mask[idx], dtype=np.float32), + np.array(self.all_input_mask[idx], dtype=np.long), np.array(self.all_loss_mask[idx]), np.array(self.all_subtokens_mask[idx])) diff --git a/collections/nemo_nlp/nemo_nlp/data/datasets/punctuation_capitalization.py b/collections/nemo_nlp/nemo_nlp/data/datasets/punctuation_capitalization.py --- a/collections/nemo_nlp/nemo_nlp/data/datasets/punctuation_capitalization.py +++ b/collections/nemo_nlp/nemo_nlp/data/datasets/punctuation_capitalization.py @@ -386,7 +386,7 @@ def __len__(self): def __getitem__(self, idx): return (np.array(self.all_input_ids[idx]), np.array(self.all_segment_ids[idx]), - np.array(self.all_input_mask[idx], dtype=np.float32), + np.array(self.all_input_mask[idx], dtype=np.long), np.array(self.all_loss_mask[idx]), np.array(self.all_subtokens_mask[idx]), np.array(self.punct_all_labels[idx]), diff --git a/collections/nemo_nlp/nemo_nlp/data/datasets/sentence_classification.py b/collections/nemo_nlp/nemo_nlp/data/datasets/sentence_classification.py --- a/collections/nemo_nlp/nemo_nlp/data/datasets/sentence_classification.py +++ b/collections/nemo_nlp/nemo_nlp/data/datasets/sentence_classification.py @@ -115,7 +115,7 @@ def __getitem__(self, idx): return (np.array(feature.input_ids), np.array(feature.segment_ids), - np.array(feature.input_mask, dtype=np.float32), + np.array(feature.input_mask, dtype=np.long), feature.sent_label) def convert_sequences_to_features(self, diff --git a/collections/nemo_nlp/nemo_nlp/data/datasets/token_classification.py b/collections/nemo_nlp/nemo_nlp/data/datasets/token_classification.py --- a/collections/nemo_nlp/nemo_nlp/data/datasets/token_classification.py +++ b/collections/nemo_nlp/nemo_nlp/data/datasets/token_classification.py @@ -333,7 +333,7 @@ def __len__(self): def __getitem__(self, idx): return (np.array(self.all_input_ids[idx]), np.array(self.all_segment_ids[idx]), - np.array(self.all_input_mask[idx], dtype=np.float32), + np.array(self.all_input_mask[idx], dtype=np.long), np.array(self.all_loss_mask[idx]), np.array(self.all_subtokens_mask[idx]), np.array(self.all_labels[idx])) @@ -377,6 +377,6 @@ def __len__(self): def __getitem__(self, idx): return (np.array(self.all_input_ids[idx]), np.array(self.all_segment_ids[idx]), - np.array(self.all_input_mask[idx], dtype=np.float32), + np.array(self.all_input_mask[idx], dtype=np.long), np.array(self.all_loss_mask[idx]), np.array(self.all_subtokens_mask[idx])) diff --git a/collections/nemo_nlp/nemo_nlp/data/tokenizers/bert_tokenizer.py b/collections/nemo_nlp/nemo_nlp/data/tokenizers/bert_tokenizer.py --- a/collections/nemo_nlp/nemo_nlp/data/tokenizers/bert_tokenizer.py +++ b/collections/nemo_nlp/nemo_nlp/data/tokenizers/bert_tokenizer.py @@ -1,5 +1,5 @@ from .tokenizer_spec import TokenizerSpec -from pytorch_transformers import BertTokenizer +from transformers import BertTokenizer import re diff --git a/collections/nemo_nlp/nemo_nlp/data/tokenizers/gpt2_tokenizer.py b/collections/nemo_nlp/nemo_nlp/data/tokenizers/gpt2_tokenizer.py --- a/collections/nemo_nlp/nemo_nlp/data/tokenizers/gpt2_tokenizer.py +++ b/collections/nemo_nlp/nemo_nlp/data/tokenizers/gpt2_tokenizer.py @@ -1,5 +1,5 @@ from .tokenizer_spec import TokenizerSpec -from pytorch_transformers import GPT2Tokenizer +from transformers import GPT2Tokenizer class NemoGPT2Tokenizer(TokenizerSpec): diff --git a/collections/nemo_nlp/nemo_nlp/huggingface/bert.py b/collections/nemo_nlp/nemo_nlp/huggingface/bert.py --- a/collections/nemo_nlp/nemo_nlp/huggingface/bert.py +++ b/collections/nemo_nlp/nemo_nlp/huggingface/bert.py @@ -1,10 +1,10 @@ # Copyright (c) 2019 NVIDIA Corporation from typing import Optional, List -from pytorch_transformers import (BertConfig, - BertModel, - BERT_PRETRAINED_MODEL_ARCHIVE_MAP, - BERT_PRETRAINED_CONFIG_ARCHIVE_MAP) +from transformers import (BertConfig, + BertModel, + BERT_PRETRAINED_MODEL_ARCHIVE_MAP, + BERT_PRETRAINED_CONFIG_ARCHIVE_MAP) from nemo.backends.pytorch.nm import TrainableNM from nemo.core.neural_modules import PretrainedModelInfo @@ -18,7 +18,7 @@ class BERT(TrainableNM): """ BERT wraps around the Huggingface implementation of BERT from their - pytorch-transformers repository for easy use within NeMo. + transformers repository for easy use within NeMo. Args: pretrained_model_name (str): If using a pretrained model, this should diff --git a/collections/nemo_nlp/setup.py b/collections/nemo_nlp/setup.py --- a/collections/nemo_nlp/setup.py +++ b/collections/nemo_nlp/setup.py @@ -25,7 +25,7 @@ 'python-dateutil<2.8.1,>=2.1', 'boto3', 'unidecode', - 'pytorch-transformers', + 'transformers', 'matplotlib', 'h5py', 'youtokentome' diff --git a/examples/nlp/joint_intent_slot_infer.py b/examples/nlp/joint_intent_slot_infer.py --- a/examples/nlp/joint_intent_slot_infer.py +++ b/examples/nlp/joint_intent_slot_infer.py @@ -2,7 +2,7 @@ import os import numpy as np -from pytorch_transformers import BertTokenizer +from transformers import BertTokenizer from sklearn.metrics import confusion_matrix, classification_report import nemo diff --git a/examples/nlp/joint_intent_slot_infer_b1.py b/examples/nlp/joint_intent_slot_infer_b1.py --- a/examples/nlp/joint_intent_slot_infer_b1.py +++ b/examples/nlp/joint_intent_slot_infer_b1.py @@ -1,7 +1,7 @@ import argparse import numpy as np -from pytorch_transformers import BertTokenizer +from transformers import BertTokenizer import nemo import nemo_nlp diff --git a/examples/nlp/joint_intent_slot_with_bert.py b/examples/nlp/joint_intent_slot_with_bert.py --- a/examples/nlp/joint_intent_slot_with_bert.py +++ b/examples/nlp/joint_intent_slot_with_bert.py @@ -3,7 +3,7 @@ import os import numpy as np -from pytorch_transformers import BertTokenizer +from transformers import BertTokenizer import nemo from nemo.utils.lr_policies import get_lr_policy diff --git a/examples/nlp/sentence_classification_with_bert.py b/examples/nlp/sentence_classification_with_bert.py --- a/examples/nlp/sentence_classification_with_bert.py +++ b/examples/nlp/sentence_classification_with_bert.py @@ -2,7 +2,7 @@ import math import numpy as np -from pytorch_transformers import BertTokenizer +from transformers import BertTokenizer from torch import nn import torch diff --git a/nemo/nemo/backends/pytorch/nm.py b/nemo/nemo/backends/pytorch/nm.py --- a/nemo/nemo/backends/pytorch/nm.py +++ b/nemo/nemo/backends/pytorch/nm.py @@ -36,7 +36,7 @@ def __init__(self, **kwargs): nn.Module.__init__(self) # For PyTorch API self._device = get_cuda_device(self.placement) - def __call__(self, force_pt=False, *input, **kwargs): + def __call__(self, *input, force_pt=False, **kwargs): pt_call = len(input) > 0 or force_pt if pt_call: return nn.Module.__call__(self, *input, **kwargs) diff --git a/scripts/get_decoder_params_from_bert.py b/scripts/get_decoder_params_from_bert.py --- a/scripts/get_decoder_params_from_bert.py +++ b/scripts/get_decoder_params_from_bert.py @@ -1,6 +1,6 @@ import torch -from pytorch_transformers import BERT_PRETRAINED_MODEL_ARCHIVE_MAP -from pytorch_transformers.file_utils import cached_path +from transformers import BERT_PRETRAINED_MODEL_ARCHIVE_MAP +from transformers.file_utils import cached_path import argparse state_dict_mappings = {
2019-12-03T01:19:14Z
mlsecproject/combine
103
mlsecproject__combine-103
d662493af9f6ee7bc36c5509c277f93980009bec
diff --git a/baler.py b/baler.py --- a/baler.py +++ b/baler.py @@ -1,21 +1,21 @@ import ConfigParser -import csv import datetime as dt import gzip import json +import logging import os -import sys +import re import requests +import sys import time -import re -from Queue import Queue +import unicodecsv import threading from logger import get_logger -import logging - +from Queue import Queue logger = get_logger('baler') + def tiq_output(reg_file, enr_file): config = ConfigParser.SafeConfigParser() cfg_success = config.read('combine.cfg') @@ -43,8 +43,8 @@ def tiq_output(reg_file, enr_file): outbound_data = [row for row in reg_data if row[2] == 'outbound'] try: - bale_reg_csvgz(inbound_data, os.path.join(tiq_dir, 'raw', 'public_inbound', today+'.csv.gz')) - bale_reg_csvgz(outbound_data, os.path.join(tiq_dir, 'raw', 'public_outbound', today+'.csv.gz')) + bale_reg_csvgz(inbound_data, os.path.join(tiq_dir, 'raw', 'public_inbound', today + '.csv.gz')) + bale_reg_csvgz(outbound_data, os.path.join(tiq_dir, 'raw', 'public_outbound', today + '.csv.gz')) except: pass @@ -52,8 +52,8 @@ def tiq_output(reg_file, enr_file): outbound_data = [row for row in enr_data if row[2] == 'outbound'] try: - bale_enr_csvgz(inbound_data, os.path.join(tiq_dir, 'enriched', 'public_inbound', today+'.csv.gz')) - bale_enr_csvgz(outbound_data, os.path.join(tiq_dir, 'enriched', 'public_outbound', today+'.csv.gz')) + bale_enr_csvgz(inbound_data, os.path.join(tiq_dir, 'enriched', 'public_inbound', today + '.csv.gz')) + bale_enr_csvgz(outbound_data, os.path.join(tiq_dir, 'enriched', 'public_outbound', today + '.csv.gz')) except: pass @@ -64,7 +64,7 @@ def bale_reg_csvgz(harvest, output_file): """ bale the data as a gziped csv file""" logger.info('Output regular data as GZip CSV to %s' % output_file) with gzip.open(output_file, 'wb') as csv_file: - bale_writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL) + bale_writer = unicodecsv.writer(csv_file, quoting=unicodecsv.QUOTE_ALL) # header row bale_writer.writerow(('entity', 'type', 'direction', 'source', 'notes', 'date')) @@ -75,7 +75,7 @@ def bale_reg_csv(harvest, output_file): """ bale the data as a csv file""" logger.info('Output regular data as CSV to %s' % output_file) with open(output_file, 'wb') as csv_file: - bale_writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL) + bale_writer = unicodecsv.writer(csv_file, quoting=unicodecsv.QUOTE_ALL) # header row bale_writer.writerow(('entity', 'type', 'direction', 'source', 'notes', 'date')) @@ -86,112 +86,117 @@ def bale_enr_csv(harvest, output_file): """ output the data as an enriched csv file""" logger.info('Output enriched data as CSV to %s' % output_file) with open(output_file, 'wb') as csv_file: - bale_writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL) + bale_writer = unicodecsv.writer(csv_file, quoting=unicodecsv.QUOTE_ALL) # header row bale_writer.writerow(('entity', 'type', 'direction', 'source', 'notes', 'date', 'asnumber', 'asname', 'country', 'host', 'rhost')) bale_writer.writerows(harvest) + def bale_enr_csvgz(harvest, output_file): """ output the data as an enriched gziped csv file""" logger.info('Output enriched data as GZip CSV to %s' % output_file) with gzip.open(output_file, 'wb') as csv_file: - bale_writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL) + bale_writer = unicodecsv.writer(csv_file, quoting=unicodecsv.QUOTE_ALL) # header row bale_writer.writerow(('entity', 'type', 'direction', 'source', 'notes', 'date', 'asnumber', 'asname', 'country', 'host', 'rhost')) bale_writer.writerows(harvest) -def bale_CRITs_indicator(base_url,data,indicator_que): + +def bale_CRITs_indicator(base_url, data, indicator_que): """ One thread of adding indicators to CRITs""" while not indicator_que.empty(): - indicator=indicator_que.get() + indicator = indicator_que.get() if indicator[1] == 'IPv4': # using the IP API - url=base_url+'ips/' - data['add_indicator']="true" - data['ip']=indicator[0] - data['ip_type']='Address - ipv4-addr' - data['reference']=indicator[3] + url = base_url + 'ips/' + data['add_indicator'] = "true" + data['ip'] = indicator[0] + data['ip_type'] = 'Address - ipv4-addr' + data['reference'] = indicator[3] # getting the source automatically: - source=re.findall(r'\/\/(.*?)\/',data['reference']) + source = re.findall(r'\/\/(.*?)\/', data['reference']) if source: - data['source']=source[0] - res = requests.post(url,data=data,verify=False) - if not res.status_code in [201,200,400]: + data['source'] = source[0] + res = requests.post(url, data=data, verify=False) + if not res.status_code in [201, 200, 400]: logger.info("Issues with adding: %s" % data['ip']) elif indicator[1] == "FQDN": # using the Domain API - url=base_url+'domains/' - data['add_indicator']="true" - data['domain']=indicator[0] - data['reference']=indicator[3] + url = base_url + 'domains/' + data['add_indicator'] = "true" + data['domain'] = indicator[0] + data['reference'] = indicator[3] # getting the source automatically: - source=re.findall(r'\/\/(.*?)\/',data['reference']) + source = re.findall(r'\/\/(.*?)\/', data['reference']) if source: - data['source']=source[0] - res = requests.post(url,data=data,verify=False) - if not res.status_code in [201,200,400]: + data['source'] = source[0] + res = requests.post(url, data=data, verify=False) + if not res.status_code in [201, 200, 400]: logger.info("Issues with adding: %s" % data['domain']) else: - logger.info("don't yet know what to do with: %s[%s]" % (indicator[1],indicator[0])) + logger.info("don't yet know what to do with: %s[%s]" % (indicator[1], indicator[0])) + -def bale_CRITs(harvest,filename): +def bale_CRITs(harvest, filename): """ taking the output from combine and pushing it to the CRITs web API""" # checking the minimum requirements for parameters # it would be nice to have some metadata on the feeds that can be imported in the intel library: # -> confidence # -> type of feed (bot vs spam vs ddos, you get the picture) - data={'confidence':'medium'} - start_time=time.time() + data = {'confidence': 'medium'} + start_time = time.time() config = ConfigParser.SafeConfigParser() cfg_success = config.read('combine.cfg') if not cfg_success: logger.error('tiq_output: Could not read combine.cfg.\n') logger.error('HINT: edit combine-example.cfg and save as combine.cfg.\n') return - if config.has_option('Baler','crits_username'): - data['username']=config.get('Baler', 'crits_username') + if config.has_option('Baler', 'crits_username'): + data['username'] = config.get('Baler', 'crits_username') else: raise 'Please check the combine.cnf file for the crits_username field in the [Baler] section' - if config.has_option('Baler','crits_api_key'): - data['api_key']=config.get('Baler', 'crits_api_key') + if config.has_option('Baler', 'crits_api_key'): + data['api_key'] = config.get('Baler', 'crits_api_key') else: raise 'Please check the combine.cnf file for the crits_api_key field in the [Baler] section' - if config.has_option('Baler','crits_campaign'): - data['campaign']=config.get('Baler', 'crits_campaign') + if config.has_option('Baler', 'crits_campaign'): + data['campaign'] = config.get('Baler', 'crits_campaign') else: logger.info('Lacking a campaign name, we will default to "combine." Errors might ensue if it does not exist in CRITs') - data['campaign']='combine' - if config.has_option('Baler','crits_url'): - base_url=config.get('Baler','crits_url') + data['campaign'] = 'combine' + if config.has_option('Baler', 'crits_url'): + base_url = config.get('Baler', 'crits_url') else: raise 'Please check the combine.cnf file for the crits_url field in the [Baler] section' - if config.has_option('Baler','crits_maxThreads'): - maxThreads=int(config.get('Baler', 'crits_maxThreads')) + if config.has_option('Baler', 'crits_maxThreads'): + maxThreads = int(config.get('Baler', 'crits_maxThreads')) else: logger.info('No number of maximum Threads has been given, defaulting to 10') - maxThreads=10 + maxThreads = 10 - data['source']='Combine' - data['method']='trawl' + data['source'] = 'Combine' + data['method'] = 'trawl' # initializing the Queue to the list of indicators in the harvest - ioc_queue=Queue() + ioc_queue = Queue() for indicator in harvest: ioc_queue.put(indicator) - total_iocs=ioc_queue.qsize() + total_iocs = ioc_queue.qsize() for x in range(maxThreads): - th=threading.Thread(target=bale_CRITs_indicator, args=(base_url,data,ioc_queue)) + th = threading.Thread(target=bale_CRITs_indicator, args=(base_url, data, ioc_queue)) th.start() for x in threading.enumerate(): - if x.name=="MainThread": + if x.name == "MainThread": continue x.join() - logger.info('Output %d indicators to CRITs using %d threads. Operation tool %d seconds\n' % (total_iocs,maxThreads,time.time()-start_time)) + logger.info('Output %d indicators to CRITs using %d threads. Operation tool %d seconds\n' % + (total_iocs, maxThreads, time.time() - start_time)) + def bale(input_file, output_file, output_format, is_regular): config = ConfigParser.SafeConfigParser() @@ -203,13 +208,13 @@ def bale(input_file, output_file, output_format, is_regular): logger.info('Reading processed data from %s' % input_file) with open(input_file, 'rb') as f: - harvest = json.load(f) + harvest = json.load(f, encoding='utf8') # TODO: also need plugins here (cf. #23) if is_regular: - format_funcs = {'csv': bale_reg_csv,'crits':bale_CRITs} + format_funcs = {'csv': bale_reg_csv, 'crits': bale_CRITs} else: - format_funcs = {'csv': bale_enr_csv,'crits':bale_CRITs} + format_funcs = {'csv': bale_enr_csv, 'crits': bale_CRITs} format_funcs[output_format](harvest, output_file) if __name__ == "__main__": diff --git a/winnower.py b/winnower.py --- a/winnower.py +++ b/winnower.py @@ -9,30 +9,36 @@ import sys from netaddr import IPAddress, IPRange, IPSet +from sortedcontainers import SortedDict from logger import get_logger import logging logger = get_logger('winnower') +# from http://en.wikipedia.org/wiki/Reserved_IP_addresses: +reserved_ranges = IPSet(['0.0.0.0/8', '100.64.0.0/10', '127.0.0.0/8', '192.88.99.0/24', + '198.18.0.0/15', '198.51.100.0/24', '203.0.113.0/24', '233.252.0.0/24']) +gi_org = SortedDict() + def load_gi_org(filename): - gi_org = {} with open(filename, 'rb') as f: org_reader = csv.DictReader(f, fieldnames=['start', 'end', 'org']) for row in org_reader: - gi_org[IPRange(row['start'], row['end'])] = row['org'] + gi_org[row['start']] = (IPRange(row['start'], row['end']), unicode(row['org'], errors='replace')) + return gi_org -def org_by_addr(address, org_data): +def org_by_addr(address): as_num = None as_name = None - for iprange in org_data: - if address in iprange: - as_num, sep, as_name = org_data[iprange].partition(' ') - as_num = as_num.replace("AS", "") # Making sure the variable only has the number - break + gi_index = gi_org.bisect(str(int(address))) + gi_net = gi_org[gi_org.iloc[gi_index - 1]] + if address in gi_net[0]: + as_num, sep, as_name = gi_net[1].partition(' ') + as_num = as_num.replace("AS", "") # Making sure the variable only has the number return as_num, as_name @@ -46,8 +52,8 @@ def maxhits(dns_records): return hostname -def enrich_IPv4(address, org_data, geo_data, dnsdb=None): - as_num, as_name = org_by_addr(address, org_data) +def enrich_IPv4(address, geo_data, dnsdb=None): + as_num, as_name = org_by_addr(address) country = geo_data.country_code_by_addr('%s' % address) if dnsdb: hostname = maxhits(dnsdb.query_rdata_ip('%s' % address)) @@ -73,12 +79,9 @@ def filter_date(records, date): def reserved(address): - # from http://en.wikipedia.org/wiki/Reserved_IP_addresses: - ranges = IPSet(['0.0.0.0/8', '100.64.0.0/10', '127.0.0.0/8', '192.88.99.0/24', - '198.18.0.0/15', '198.51.100.0/24', '203.0.113.0/24', '233.252.0.0/24']) a_reserved = address.is_reserved() a_private = address.is_private() - a_inr = address in ranges + a_inr = address in reserved_ranges if a_reserved or a_private or a_inr: return True else: @@ -138,7 +141,7 @@ def winnow(in_file, out_file, enr_file): # TODO: make these locations configurable? logger.info('Loading GeoIP data') - org_data = load_gi_org('data/GeoIPASNum2.csv') + gi_org = load_gi_org('data/GeoIPASNum2.csv') geo_data = pygeoip.GeoIP('data/GeoIP.dat', pygeoip.MEMORY_CACHE) wheat = [] @@ -147,23 +150,21 @@ def winnow(in_file, out_file, enr_file): logger.info('Beginning winnowing process') for each in crop: (addr, addr_type, direction, source, note, date) = each - # TODO: enrich DNS indicators as well if addr_type == 'IPv4' and is_ipv4(addr): - logger.info('Enriching %s' % addr) + #logger.info('Enriching %s' % addr) ipaddr = IPAddress(addr) if not reserved(ipaddr): wheat.append(each) if enrich_ip: - e_data = (addr, addr_type, direction, source, note, date) + enrich_IPv4(ipaddr, org_data, geo_data, dnsdb) + e_data = (addr, addr_type, direction, source, note, date) + enrich_IPv4(ipaddr, geo_data, dnsdb) enriched.append(e_data) else: - e_data = (addr, addr_type, direction, source, note, date) + enrich_IPv4(ipaddr, org_data, geo_data) + e_data = (addr, addr_type, direction, source, note, date) + enrich_IPv4(ipaddr, geo_data) enriched.append(e_data) else: logger.error('Found invalid address: %s from: %s' % (addr, source)) elif addr_type == 'FQDN' and is_fqdn(addr): - # TODO: validate these (cf. https://github.com/mlsecproject/combine/issues/15 ) - logger.info('Enriching %s' % addr) + #logger.info('Enriching %s' % addr) wheat.append(each) if enrich_dns and dnsdb: e_data = (addr, addr_type, direction, source, note, date, enrich_FQDN(addr, date, dnsdb)) @@ -173,10 +174,12 @@ def winnow(in_file, out_file, enr_file): logger.info('Dumping results') with open(out_file, 'wb') as f: - json.dump(wheat, f, indent=2) + w_data = json.dumps(wheat, indent=2, ensure_ascii=False).encode('utf8') + f.write(w_data) with open(enr_file, 'wb') as f: - json.dump(enriched, f, indent=2) + e_data = json.dumps(enriched, indent=2, ensure_ascii=False).encode('utf8') + f.write(e_data) if __name__ == "__main__":
2014-12-26T18:31:08Z

No dataset card yet

New: Create and edit this dataset card directly on the website!

Contribute a Dataset Card
Downloads last month
2
Add dataset card