code
stringlengths 17
6.64M
|
---|
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Seqeval(datasets.Metric):
def _info(self):
return datasets.MetricInfo(description=_DESCRIPTION, citation=_CITATION, homepage='https://github.com/chakki-works/seqeval', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features({'predictions': datasets.Sequence(datasets.Value('string', id='label'), id='sequence'), 'references': datasets.Sequence(datasets.Value('string', id='label'), id='sequence')}), codebase_urls=['https://github.com/chakki-works/seqeval'], reference_urls=['https://github.com/chakki-works/seqeval'])
def _compute(self, predictions, references, suffix: bool=False, scheme: Optional[str]=None, mode: Optional[str]=None, sample_weight: Optional[List[int]]=None, zero_division: Union[(str, int)]='warn'):
if (scheme is not None):
try:
scheme_module = importlib.import_module('seqeval.scheme')
scheme = getattr(scheme_module, scheme)
except AttributeError:
raise ValueError(f'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {scheme}')
report = classification_report(y_true=references, y_pred=predictions, suffix=suffix, output_dict=True, scheme=scheme, mode=mode, sample_weight=sample_weight, zero_division=zero_division)
report.pop('macro avg')
report.pop('weighted avg')
overall_score = report.pop('micro avg')
scores = {type_name: {'precision': score['precision'], 'recall': score['recall'], 'f1': score['f1-score'], 'number': score['support']} for (type_name, score) in report.items()}
scores['overall_precision'] = overall_score['precision']
scores['overall_recall'] = overall_score['recall']
scores['overall_f1'] = overall_score['f1-score']
scores['overall_accuracy'] = accuracy_score(y_true=references, y_pred=predictions)
return scores
|
class NER(TokenClassificationTask):
def __init__(self, label_idx=(- 1)):
self.label_idx = label_idx
def read_examples_from_file(self, data_dir, mode: Union[(Split, str)]) -> List[InputExample]:
if isinstance(mode, Split):
mode = mode.value
file_path = os.path.join(data_dir, f'{mode}.txt')
guid_index = 1
examples = []
with open(file_path, encoding='utf-8') as f:
words = []
labels = []
for line in f:
if (line.startswith('-DOCSTART-') or (line == '') or (line == '\n')):
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}', words=words, labels=labels))
guid_index += 1
words = []
labels = []
else:
splits = line.split()
words.append(splits[0])
if (len(splits) > 1):
label = splits[self.label_idx].replace('\n', '')
labels.append((('I-' + label[2:]) if (label != 'O') else label))
else:
labels.append('O')
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}', words=words, labels=labels))
return examples
def write_predictions_to_file(self, writer: TextIO, test_input_reader: TextIO, preds_list: List):
example_id = 0
for line in test_input_reader:
if (line.startswith('-DOCSTART-') or (line == '') or (line == '\n')):
writer.write(line)
if (not preds_list[example_id]):
example_id += 1
elif preds_list[example_id]:
output_line = (((line.split()[0] + ' ') + preds_list[example_id].pop(0)) + '\n')
writer.write(output_line)
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0])
def get_labels(self, path: str) -> List[str]:
if path:
with open(path, 'r') as f:
labels = f.read().splitlines()
if ('O' not in labels):
labels = (['O'] + labels)
return labels
else:
return ['O', 'B-MISC', 'I-MISC', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC']
|
@dataclass
class InputExample():
'\n A single training/test example for token classification.\n\n Args:\n guid: Unique id for the example.\n words: list. The words of the sequence.\n labels: (Optional) list. The labels for each word of the sequence. This should be\n specified for train and dev examples, but not for test examples.\n '
guid: str
words: List[str]
labels: Optional[List[str]]
|
@dataclass
class InputFeatures():
'\n A single set of features of data.\n Property names are the same names as the corresponding inputs to a model.\n '
input_ids: List[int]
attention_mask: List[int]
token_type_ids: Optional[List[int]] = None
label_ids: Optional[List[int]] = None
|
class Split(Enum):
train = 'train'
dev = 'dev'
test = 'test'
|
class TokenClassificationTask():
def read_examples_from_file(self, data_dir, mode: Union[(Split, str)]) -> List[InputExample]:
raise NotImplementedError
def get_labels(self, path: str) -> List[str]:
raise NotImplementedError
def convert_examples_to_features(self, examples: List[InputExample], label_list: List[str], max_seq_length: int, tokenizer: PreTrainedTokenizer, cls_token_at_end=False, cls_token='[CLS]', cls_token_segment_id=1, sep_token='[SEP]', sep_token_extra=False, pad_on_left=False, pad_token=0, pad_token_segment_id=0, pad_token_label_id=(- 100), sequence_a_segment_id=0, mask_padding_with_zero=True) -> List[InputFeatures]:
'Loads a data file into a list of `InputFeatures`\n `cls_token_at_end` define the location of the CLS token:\n - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]\n - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]\n `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)\n '
label_map = {label: i for (i, label) in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ((ex_index % 10000) == 0):
logger.info('Writing example %d of %d', ex_index, len(examples))
tokens = []
label_ids = []
for (word, label) in zip(example.words, example.labels):
word_tokens = tokenizer.tokenize(word)
if (len(word_tokens) > 0):
tokens.extend(word_tokens)
label_ids.extend(([label_map[label]] + ([pad_token_label_id] * (len(word_tokens) - 1))))
special_tokens_count = tokenizer.num_special_tokens_to_add()
if (len(tokens) > (max_seq_length - special_tokens_count)):
tokens = tokens[:(max_seq_length - special_tokens_count)]
label_ids = label_ids[:(max_seq_length - special_tokens_count)]
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
tokens += [sep_token]
label_ids += [pad_token_label_id]
segment_ids = ([sequence_a_segment_id] * len(tokens))
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = ([cls_token] + tokens)
label_ids = ([pad_token_label_id] + label_ids)
segment_ids = ([cls_token_segment_id] + segment_ids)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([(1 if mask_padding_with_zero else 0)] * len(input_ids))
padding_length = (max_seq_length - len(input_ids))
if pad_on_left:
input_ids = (([pad_token] * padding_length) + input_ids)
input_mask = (([(0 if mask_padding_with_zero else 1)] * padding_length) + input_mask)
segment_ids = (([pad_token_segment_id] * padding_length) + segment_ids)
label_ids = (([pad_token_label_id] * padding_length) + label_ids)
else:
input_ids += ([pad_token] * padding_length)
input_mask += ([(0 if mask_padding_with_zero else 1)] * padding_length)
segment_ids += ([pad_token_segment_id] * padding_length)
label_ids += ([pad_token_label_id] * padding_length)
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
assert (len(label_ids) == max_seq_length)
if (ex_index < 5):
logger.info('*** Example ***')
logger.info('guid: %s', example.guid)
logger.info('tokens: %s', ' '.join([str(x) for x in tokens]))
logger.info('input_ids: %s', ' '.join([str(x) for x in input_ids]))
logger.info('input_mask: %s', ' '.join([str(x) for x in input_mask]))
logger.info('segment_ids: %s', ' '.join([str(x) for x in segment_ids]))
logger.info('label_ids: %s', ' '.join([str(x) for x in label_ids]))
if ('token_type_ids' not in tokenizer.model_input_names):
segment_ids = None
features.append(InputFeatures(input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids, label_ids=label_ids))
return features
|
def main():
'\n Process text data and evaluate the segmentation results for different datasets.\n\n This script uses the `TextPreProcessor` class from the `ekphrasis` package to preprocess and tokenize text data.\n It retrieves various datasets containing hashtags and their corresponding gold-standard segmentations.\n\n The datasets used in this script include the BOUN and STAN-Dev datasets, the HashSet Distant-sampled dataset,\n and the HashSet Manual dataset. For each dataset, the script reads the data, preprocesses the hashtags,\n computes the segmentation, and evaluates the results using the `evaluate_df` function.\n\n The evaluation results for each dataset are printed to the console.\n '
text_processor = TextPreProcessor(normalize=['url', 'email', 'percent', 'money', 'phone', 'user', 'time', 'url', 'date', 'number'], annotate={'hashtag', 'allcaps', 'elongated', 'repeated', 'emphasis', 'censored'}, fix_html=True, segmenter='twitter', corrector='twitter', unpack_hashtags=True, unpack_contractions=True, spell_correct_elong=False, tokenizer=SocialTokenizer(lowercase=True).tokenize, dicts=[emoticons])
dataset_dict = {'BOUN': 'https://raw.githubusercontent.com/prashantkodali/HashSet/master/datasets/boun-celebi-et-al.csv', 'STAN-Dev': 'https://raw.githubusercontent.com/prashantkodali/HashSet/master/datasets/stan-dev-celebi-etal.csv'}
for (key, value) in dataset_dict.items():
df = pd.read_csv(value, header=None, names=['characters', 'gold'])
df['segmentation'] = df['characters'].apply((lambda x: ' '.join(text_processor.pre_process_doc(('#' + x))[1:(- 1)])))
result = evaluate_df(df, gold_field='gold', segmentation_field='segmentation')
print(key, result)
dataset_dict = {'Distant-sampled': 'https://raw.githubusercontent.com/prashantkodali/HashSet/master/datasets/hashset/HashSet-Distant-sampled.csv'}
for (key, value) in dataset_dict.items():
df = pd.read_csv(value, sep=',')
df_1 = df[['Unsegmented_hashtag', 'Segmented_hashtag']]
df_1 = df_1.rename(columns={'Unsegmented_hashtag': 'characters', 'Segmented_hashtag': 'gold'})
df_2 = df[['Unsegmented_hashtag_lowerCase', 'Segmented_hashtag_lowerCase']]
df_2 = df_2.rename(columns={'Unsegmented_hashtag_lowerCase': 'characters', 'Segmented_hashtag_lowerCase': 'gold'})
df_1['segmentation'] = df_1['characters'].apply((lambda x: ' '.join(text_processor.pre_process_doc(('#' + x))[1:(- 1)])))
df_2['segmentation'] = df_2['characters'].apply((lambda x: ' '.join(text_processor.pre_process_doc(('#' + x))[1:(- 1)])))
result_1 = evaluate_df(df_1, gold_field='gold', segmentation_field='segmentation')
result_2 = evaluate_df(df_2, gold_field='gold', segmentation_field='segmentation')
print('HashSet sample', result_1)
print('Hashset sample, lowercase', result_2)
dataset_dict = {'Manual': 'https://raw.githubusercontent.com/prashantkodali/HashSet/master/datasets/hashset/HashSet-Manual.csv'}
for (key, value) in dataset_dict.items():
df = pd.read_csv(value, sep=',')
df = df[['Hashtag', 'Final Segmentation']]
df = df.rename(columns={'Hashtag': 'characters', 'Final Segmentation': 'gold'})
df['segmentation'] = df['characters'].apply((lambda x: ' '.join(text_processor.pre_process_doc(('#' + x))[1:(- 1)])))
result = evaluate_df(df, gold_field='gold', segmentation_field='segmentation')
print('HashSet Manual', result)
|
class Beamsearch(ModelLM):
def __init__(self, model_name_or_path='gpt2', model_type='gpt2', device='cuda', gpu_batch_size=1000):
'\n Initializes the Beamsearch class.\n\n Args:\n model_name_or_path (str): Name of the model or path to the model to be loaded. Default is "gpt2".\n model_type (str): Type of the model. Default is "gpt2".\n device (str): Device to be used for computation. Default is \'cuda\'.\n gpu_batch_size (int): Size of the batch to be processed on the GPU. Default is 1000.\n '
super().__init__(model_name_or_path=model_name_or_path, model_type=model_type, device=device, gpu_batch_size=gpu_batch_size)
def next_step(self, list_of_candidates):
'\n Generates the next possible candidates.\n\n Args:\n list_of_candidates (List[str]): List of current candidate strings.\n \n Returns:\n List[str]: List of possible next candidates.\n '
output = []
for candidate_string in list_of_candidates:
candidates = [(((candidate_string[:pos] + ' ') + candidate_string[pos:]) if pos else candidate_string) for pos in range(len(candidate_string))]
candidates = list(filter((lambda x: (not re.findall('.*?(?=\\s{2})', x))), candidates))
output.extend(candidates)
return output
def update_probabilities(self, tree, prob_dict):
'\n Updates the probabilities in the given probability dictionary.\n\n Args:\n tree (List[str]): List of candidate strings.\n prob_dict (dict): Dictionary of probabilities of the candidates.\n \n Returns:\n dict: Updated probability dictionary.\n '
for item in tree:
current_batch = []
for word in item:
if (word in prob_dict):
continue
else:
current_batch.append(word)
if current_batch:
current_batch_probs = self.model.get_probs(current_batch)
for (idx, word) in enumerate(current_batch):
prob_dict[word] = current_batch_probs[idx]
return prob_dict
def reshape_tree(self, tree, measure):
'\n Reshapes the tree according to the provided measure.\n\n Args:\n tree (List[str]): List of candidate strings.\n measure (int): Measure to reshape the tree.\n \n Returns:\n List[List[str]]: Reshaped tree.\n '
return [tree[x:(x + measure)] for x in range(0, len(tree), measure)]
def flatten_list(self, list_):
'\n Flattens a nested list.\n\n Args:\n list_ (List[List[Any]]): Nested list to be flattened.\n \n Returns:\n List[Any]: Flattened list.\n '
return [item for sublist in list_ for item in sublist]
def trim_tree(self, tree, prob_dict, topk):
'\n Trims the tree to the top k candidates.\n\n Args:\n tree (List[str]): List of candidate strings.\n prob_dict (dict): Dictionary of probabilities of the candidates.\n topk (int): Number of top candidates to be retained.\n \n Returns:\n List[str]: List of top k candidates.\n '
output = []
probs = [prob_dict[x] for x in tree]
candidates = [Node(item, item.replace(' ', ''), probs[idx]) for (idx, item) in enumerate(tree)]
for (key, group) in itertools.groupby(candidates, key=(lambda x: x.characters)):
sorted_group = sorted(list(group), key=(lambda x: x.score))
trimmed_group = sorted_group[0:topk]
trimmed_group = [x.hypothesis for x in trimmed_group]
output.extend(trimmed_group)
return output
def run(self, dataset, topk=20, steps=13):
'\n Runs the beamsearch algorithm on the provided dataset.\n\n Args:\n dataset (List[str]): List of initial candidate strings.\n topk (int): Number of top candidates to be retained in each step. Default is 20.\n steps (int): Number of steps to run the algorithm. Default is 13.\n \n Returns:\n ProbabilityDictionary: Dictionary of final probabilities of the candidates.\n '
tree = dataset
prob_dict = {}
for i in range(steps):
tree = self.next_step(tree)
tree = self.reshape_tree(tree, self.gpu_batch_size)
prob_dict = self.update_probabilities(tree, prob_dict)
tree = self.flatten_list(tree)
tree = self.trim_tree(tree, prob_dict, topk)
return ProbabilityDictionary(prob_dict)
|
class BertLM(MiniconsLM):
'\n Implements a BERT-based language model scorer, to compute sentence probabilities.\n This class uses a transformer-based Masked Language Model (MLM) for scoring. \n \n Args:\n model_name_or_path (str): Identifier for the model to be loaded, which can be a model \n name or the path to the directory where the model is stored.\n gpu_batch_size (int, optional): The size of the batch to be processed on the GPU. \n Defaults to 1.\n gpu_id (int, optional): Identifier of the GPU device to be used. Defaults to 0.\n\n '
def __init__(self, model_name_or_path, gpu_batch_size=1, gpu_id=0):
super().__init__(model_name_or_path=model_name_or_path, device='cuda', gpu_batch_size=gpu_batch_size, model_type='MaskedLMScorer')
|
@dataclass
class Node():
'A dataclass for representing a Node in a segmentation task.\n\n Attributes:\n hypothesis (str): The hypothesis segmentation of the hashtag.\n characters (str): The characters in the hashtag.\n score (float): The score assigned to the segmentation.\n '
hypothesis: str
characters: str
score: float
|
@dataclass
class ProbabilityDictionary(object):
'A dataclass for managing a dictionary with probability values.\n\n Attributes:\n dictionary (dict): The dictionary object that this class wraps around.\n '
dictionary: dict
def get_segmentations(self, astype='dict', gold_array=None):
'Fetches the segmentations from the ProbabilityDictionary.\n\n Args:\n astype (str, optional): The type of the output. Options include \'dict\' and \'list\'. Default is \'dict\'.\n gold_array (list, optional): An array of "gold standard" segmentations.\n\n Returns:\n dict/list: The segmentations, either as a dictionary or a list, depending on \'astype\'.\n '
top_1 = self.get_top_k(k=1)
if (gold_array and (astype == 'list')):
gold_df = pd.DataFrame([{'gold': x, 'characters': x.replace(' ', '')} for x in gold_array])
seg_df = pd.DataFrame([{'segmentation': x, 'characters': x.replace(' ', '')} for x in top_1])
output_df = pd.merge(gold_df, seg_df, how='left', on='characters')
output_series = output_df['segmentation'].values.tolist()
output_series = [str(x) for x in output_series]
return output_series
if (astype == 'dict'):
return {k.replace(' ', ''): k for (k, v) in top_1.items()}
elif (astype == 'list'):
return list(top_1.keys())
def get_top_k(self, k=2, characters_field='characters', segmentation_field='segmentation', score_field='score', return_dataframe=False, fill=False):
"Fetches the top-k segmentations based on their scores.\n\n Args:\n k (int, optional): The number of top segmentations to fetch. Default is 2.\n characters_field (str, optional): The name of the 'characters' field. Default is 'characters'.\n segmentation_field (str, optional): The name of the 'segmentation' field. Default is 'segmentation'.\n score_field (str, optional): The name of the 'score' field. Default is 'score'.\n return_dataframe (bool, optional): Whether to return a DataFrame or not. Default is False.\n fill (bool, optional): Whether to fill missing values or not. Default is False.\n\n Returns:\n DataFrame/dict: The top-k segmentations, either as a DataFrame or a dictionary, depending on 'return_dataframe'.\n \n Raises:\n NotImplementedError: If 'fill' is True and 'return_dataframe' is False.\n "
df = self.to_dataframe(characters_field=characters_field, segmentation_field=segmentation_field, score_field=score_field)
df = df.sort_values(by=score_field, ascending=True).groupby(characters_field).head(k)
if ((fill == False) and (return_dataframe == True)):
return df
elif ((fill == True) and (return_dataframe == True)):
df['group_length'] = df.groupby(characters_field)[segmentation_field].transform(len)
df['group_length'] = (((df['group_length'] * (- 1)) + k) + 1)
len_array = df['group_length'].values
df = df.drop(columns=['group_length'])
records = np.array(df.to_dict('records'))
cloned_records = list(np.repeat(records, len_array))
df = pd.DataFrame(cloned_records)
return df
elif ((fill == False) and (return_dataframe == False)):
keys = df[segmentation_field].values
values = df[score_field].values
output = {k: v for (k, v) in list(zip(keys, values))}
return output
elif ((fill == True) and (return_dataframe == False)):
raise NotImplementedError
def to_dataframe(self, characters_field='characters', segmentation_field='segmentation', score_field='score'):
"Converts the ProbabilityDictionary to a DataFrame.\n\n Args:\n characters_field (str, optional): The name of the 'characters' field. Default is 'characters'.\n segmentation_field (str, optional): The name of the 'segmentation' field. Default is 'segmentation'.\n score_field (str, optional): The name of the 'score' field. Default is 'score'.\n\n Returns:\n DataFrame: The DataFrame representation of the ProbabilityDictionary.\n "
df = [{characters_field: key.replace(' ', ''), segmentation_field: key, score_field: value} for (key, value) in self.dictionary.items()]
df = pd.DataFrame(df)
df = df.sort_values(by=[characters_field, score_field])
return df
def to_csv(self, filename, characters_field='characters', segmentation_field='segmentation', score_field='score'):
"Exports the ProbabilityDictionary to a CSV file.\n\n Args:\n filename (str): The name of the CSV file.\n characters_field (str, optional): The name of the 'characters' field. Default is 'characters'.\n segmentation_field (str, optional): The name of the 'segmentation' field. Default is 'segmentation'.\n score_field (str, optional): The name of the 'score' field. Default is 'score'.\n "
df = self.to_dataframe(characters_field=characters_field, segmentation_field=segmentation_field, score_field=score_field)
df.to_csv(filename)
def to_json(self, filepath):
'Exports the ProbabilityDictionary to a JSON file.\n\n Args:\n filepath (str): The path of the JSON file.\n '
with open(filepath, 'w') as f:
json.dump(self.dictionary, f)
|
def enforce_prob_dict(dictionary, score_field='score', segmentation_field='segmentation'):
"Enforces that the input dictionary is a ProbabilityDictionary.\n\n This function takes a dictionary-like object and converts it to a ProbabilityDictionary, \n if it's not already one. It can handle dict objects, lists of strings, and DataFrames.\n\n Args:\n dictionary (dict/list/DataFrame): The input dictionary-like object.\n score_field (str, optional): The name of the 'score' field. Default is 'score'.\n segmentation_field (str, optional): The name of the 'segmentation' field. Default is 'segmentation'.\n\n Returns:\n ProbabilityDictionary: The enforced ProbabilityDictionary.\n\n Raises:\n NotImplementedError: If the input dictionary-like object is of an unsupported type.\n "
if isinstance(dictionary, ProbabilityDictionary):
return dictionary
elif isinstance(dictionary, dict):
return ProbabilityDictionary(dictionary)
elif (isinstance(dictionary, list) and all((isinstance(x, str) for x in dictionary))):
dct = {k: 0.0 for k in list(set(dictionary))}
return ProbabilityDictionary(dct)
elif isinstance(dictionary, pd.DataFrame):
df = dictionary
df_scores = df[score_field].values.tolist()
df_segs = df[segmentation_field].values.tolist()
dct = {k: v for (k, v) in list(zip(df_segs, df_scores))}
return ProbabilityDictionary(dct)
else:
raise NotImplementedError
|
class GPT2LM(MiniconsLM):
"A Language Model (LM) scorer using GPT2.\n\n This class utilizes the PaddedGPT2LMScorer for scoring sentences.\n\n Args:\n model_name_or_path (str): Name or path of the model to be used.\n device (str): The device to run the model on. Default is 'cuda'.\n gpu_batch_size (int): The batch size for GPU processing. Default is 20.\n "
def __init__(self, model_name_or_path, device='cuda', gpu_batch_size=20):
super().__init__(model_name_or_path=model_name_or_path, device=device, gpu_batch_size=gpu_batch_size, model_type='IncrementalLMScorer')
|
class MiniconsLM(object):
def __init__(self, model_name_or_path, device='cuda', gpu_batch_size=20, model_type='IncrementalLMScorer'):
self.scorer = getattr(scorer, model_type)(model_name_or_path, device)
self.gpu_batch_size = gpu_batch_size
self.model_type = model_type
def get_probs(self, list_of_candidates):
probs = []
dl = DataLoader(list_of_candidates, batch_size=self.gpu_batch_size)
for batch in dl:
probs.extend(self.get_batch_scores(batch))
return probs
def incremental_sequence_score(self, batch):
tokens = self.scorer.prepare_text(batch, bos_token=True, eos_token=True)
stats = self.scorer.compute_stats(tokens, prob=True)
log_stats = [[math.log(x) for x in sequence] for sequence in stats]
sum_log_stats = [sum(x) for x in log_stats]
pos_sum_log_stats = [(1 - x) for x in sum_log_stats]
return pos_sum_log_stats
def get_batch_scores(self, batch):
if (self.model_type == 'IncrementalLMScorer'):
return self.incremental_sequence_score(batch)
elif (self.model_type == 'MaskedLMScorer'):
return self.scorer.sequence_score(batch, reduction=(lambda x: x.sum(0).item()))
elif (self.model_type == 'Seq2SeqScorer'):
return self.scorer.sequence_score(batch, source_format='blank')
else:
warnings.warn(f'Model type {self.model_type} not implemented. Assuming reduction = lambda x: x.sum(0).item()')
return self.scorer.sequence_score(batch, reduction=(lambda x: x.sum(0).item()))
|
class ModelLM(object):
"\n A Language Model (LM) class that supports both GPT2 and BERT models.\n\n This class acts as a wrapper around the GPT2LM and BertLM classes, providing\n a unified interface for interacting with either type of model. The specific\n type of model to use is determined by the 'model_type' argument provided during\n initialization.\n\n Args:\n model_name_or_path (str, optional): The name or path of the pre-trained model.\n model_type (str, optional): The type of the model to use. Should be either 'gpt2' or 'bert'.\n device (str, optional): The device on which to run the computations. Defaults to None which implies CPU.\n gpu_batch_size (int, optional): The batch size to use when performing computations on the GPU.\n gpu_id (int, optional): The ID of the GPU to use. Only relevant if 'model_type' is 'bert'. Default is 0.\n\n Raises:\n ValueError: If an unsupported 'model_type' is provided.\n "
def __init__(self, model_name_or_path=None, model_type=None, device=None, gpu_batch_size=None, gpu_id=0):
self.gpu_batch_size = gpu_batch_size
if (model_type is None):
self.model = None
elif (model_type == 'gpt2'):
self.model = GPT2LM(model_name_or_path, device=device, gpu_batch_size=gpu_batch_size)
elif (model_type == 'bert'):
self.model = BertLM(model_name_or_path, gpu_batch_size=gpu_batch_size, gpu_id=gpu_id)
elif (model_type == 'seq2seq'):
self.model = MiniconsLM(model_name_or_path, device=device, gpu_batch_size=gpu_batch_size, model_type='Seq2SeqScorer')
elif (model_type == 'masked'):
self.model = MiniconsLM(model_name_or_path, device=device, gpu_batch_size=gpu_batch_size, model_type='MaskedLMScorer')
elif (model_type == 'incremental'):
self.model = MiniconsLM(model_name_or_path, device=device, gpu_batch_size=gpu_batch_size, model_type='IncrementalLMScorer')
else:
self.model = MiniconsLM(model_name_or_path, device=device, gpu_batch_size=gpu_batch_size, model_type=model_type)
|
class Reranker(ModelLM):
'\n A class that inherits from the ModelLM class and specializes in re-ranking given data.\n\n This class provides a method to re-rank data using a particular language model, which is determined\n during initialization from the parent ModelLM class. The re-ranking is performed based on the \n probabilities produced by the language model.\n\n Args:\n model_name_or_path (str, optional): The name or path of the pre-trained model. Default is "bert-base-cased".\n model_type (str, optional): The type of the model to use. Default is "bert".\n gpu_batch_size (int, optional): The batch size to use when performing computations on the GPU. Default is 1000.\n gpu_id (int, optional): The ID of the GPU to use. Default is 0.\n device (str, optional): The device on which to run the computations. Default is "cuda".\n\n '
def __init__(self, model_name_or_path='bert-base-cased', model_type='bert', gpu_batch_size=1000, gpu_id=0, device='cuda'):
super().__init__(model_name_or_path=model_name_or_path, model_type=model_type, device=device, gpu_batch_size=gpu_batch_size, gpu_id=gpu_id)
def rerank(self, data):
"\n Reranks the given data using the language model.\n\n The data is first converted to a ProbabilityDictionary, and then the model's probabilities \n for each candidate in the data are computed. The results are returned as a new \n ProbabilityDictionary.\n\n Args:\n data (dict or ProbabilityDictionary): The data to be reranked. If a dict is provided, it is \n converted to a ProbabilityDictionary.\n\n Returns:\n ProbabilityDictionary: A ProbabilityDictionary of the reranked data, where the keys are \n the candidates from the input data and the values are the scores computed by the \n language model.\n "
input_data = enforce_prob_dict(data)
candidates = list(input_data.dictionary.keys())
scores = self.model.get_probs(candidates)
rank = {k: v for (k, v) in list(zip(candidates, scores))}
return ProbabilityDictionary(rank)
|
def run_ensemble(a_diff, b_diff, a_rank, b_rank, alpha=0.0, beta=0.0):
"\n Computes the ensemble output using given differences and ranks with weights alpha and beta.\n\n Args:\n a_diff (array-like): Differences corresponding to 'a'.\n b_diff (array-like): Differences corresponding to 'b'.\n a_rank (array-like): Ranks corresponding to 'a'.\n b_rank (array-like): Ranks corresponding to 'b'.\n alpha (float, optional): The weight for 'a_diff'. Default is 0.0.\n beta (float, optional): The weight for 'b_diff'. Default is 0.0.\n\n Returns:\n array-like: An array-like object representing the ensemble output.\n "
delta = ((alpha * a_diff) - (beta * b_diff))
decision = (delta < 0).astype(int)
negation = (~ (delta < 0)).astype(int)
output = ((a_rank * negation) + (b_rank * decision))
return output
|
def top2_ensemble(dict_1, dict_2, alpha=0.2, beta=0.1):
"\n Computes the ensemble of two given dictionaries using the specified weights alpha and beta.\n\n Args:\n dict_1 (dict): The first input dictionary.\n dict_2 (dict): The second input dictionary.\n alpha (float, optional): The weight for differences in 'dict_1'. Default is 0.2.\n beta (float, optional): The weight for differences in 'dict_2'. Default is 0.1.\n\n Returns:\n DataFrame: A pandas DataFrame representing the ensemble of the two input dictionaries.\n "
a = enforce_prob_dict(dict_1).to_dataframe(characters_field='hashtag')
b = enforce_prob_dict(dict_2).to_dataframe(characters_field='hashtag')
ensemble_df = build_ensemble_df(a, b)
ref_diff = ensemble_df['diff'].values
aux_diff = ensemble_df['diff_2'].values
ref_rank = ensemble_df['rank'].values
aux_rank = ensemble_df['rank_2'].values
ensemble_df['ensemble_rank'] = run_ensemble(ref_diff, aux_diff, ref_rank, aux_rank, alpha=alpha, beta=beta)
return ensemble_df
|
class Top2_Ensembler(object):
'\n A class that provides a method to run the ensemble of a segmenter run and a reranker run.\n\n Args:\n None\n '
def __init__(self):
pass
def run(self, segmenter_run, reranker_run, alpha=0.222, beta=0.111):
"\n Runs the ensemble of a segmenter run and a reranker run.\n\n Args:\n segmenter_run (dict or ProbabilityDictionary): The result of a segmenter run.\n reranker_run (dict or ProbabilityDictionary): The result of a reranker run.\n alpha (float, optional): The weight for differences in 'segmenter_run'. Default is 0.222.\n beta (float, optional): The weight for differences in 'reranker_run'. Default is 0.111.\n\n Returns:\n ProbabilityDictionary: A ProbabilityDictionary representing the ensemble of the segmenter and reranker runs.\n "
ensemble = top2_ensemble(segmenter_run, reranker_run, alpha=alpha, beta=beta)
ensemble_prob_dict = enforce_prob_dict(ensemble, score_field='ensemble_rank')
return ensemble_prob_dict
|
class Modeler(object):
hashtagSegmentor = None
t = 0
totals = 0
totalh = 0
p = 0
r = 0
n = 0
modelerParams = {}
def __init__(self):
pass
def loadParameters(self, args):
leftoverArgs = []
for arg in args:
if (self.loadParameter(arg) == False):
leftoverArgs.append(arg)
return leftoverArgs
def loadParameter(self, param):
pass
def getRunCode(self):
return ''
def train(self, featureFile):
pass
def segmentHashtag(self, hashtag):
pass
def segmentFile(self, fileToSegment, featureFileName, params):
pass
def calculateScore(self, testFile, params):
pass
def loadModelerParams(self, params):
pass
def test(self, testFile, featureFileName, params):
(acc, precision, recall, fscore) = self.calculateScore(testFile, featureFileName, params)
print(('MAXENT ACC %f PRE %f REC %f F1 %f\n' % (acc, precision, recall, fscore)))
def isFeatureOn(self, feature):
return False
def reset(self):
self.t = 0
self.totals = 0
self.totalh = 0
self.p = 0
self.r = 0
self.n = 0
def countEntry(self, segmented, trueSegmentation):
sw = segmented.split(' ')
hw = trueSegmentation.split(' ')
for s in sw:
for h in hw:
if (s == h):
self.p = (self.p + 1)
break
for h in hw:
for s in sw:
if (s == h):
self.r = (self.r + 1)
break
self.totals = (self.totals + len(sw))
self.totalh = (self.totalh + len(hw))
self.n += 1
if (segmented == trueSegmentation):
self.t += 1
def calculatePrecision(self):
if (self.totals > 0):
return (float((self.p * 100)) / float(self.totals))
return 0
def calculateRecall(self):
if (self.totalh > 0):
return (float((self.r * 100)) / float(self.totalh))
return 0
def calculateFScore(self):
precision = self.calculatePrecision()
recall = self.calculateRecall()
if ((precision + recall) > 0):
return (((2 * precision) * recall) / (precision + recall))
return 0
def calculateAccuracy(self):
if (self.n > 0):
return (float((100 * self.t)) / float(self.n))
return 0
|
def evaluate_dictionary(data, gold, n=10):
"\n Evaluates the input data dictionary against a gold standard using various metrics. \n It computes these metrics for the top 'n' entries of the data.\n\n Args:\n data (dict or ProbabilityDictionary): The input data to be evaluated.\n gold (list): The gold standard list of strings to compare against.\n n (int, optional): The number of top entries from the data to be considered for evaluation. Default is 10.\n\n Returns:\n dict: A dictionary containing the computed metrics (F1 score, accuracy, precision, recall) for each of the top 'n' entries.\n "
gold_dict = {}
for item in gold:
gold_dict.update({item.replace(' ', ''): item})
input_data = enforce_prob_dict(data)
final_metrics = {}
for i in range(1, (n + 1)):
df = input_data.get_top_k(k=i, characters_field='hashtag', segmentation_field='segmentation', score_field='score', return_dataframe=True)
df['gold'] = df['hashtag'].apply((lambda x: gold_dict[x]))
if (i > 1):
df['truth_field'] = df['gold'].combine(df['segmentation'], (lambda x, y: int((x == y))))
df = df.sort_values(by='truth_field', ascending=False)
df = df.groupby('gold').head(1)
records = df.to_dict('records')
modeler = Modeler()
for item in records:
modeler.countEntry(item['gold'], item['segmentation'])
metrics = {'f1': modeler.calculateFScore(), 'accuracy': modeler.calculateAccuracy(), 'precision': modeler.calculatePrecision(), 'recall': modeler.calculateRecall()}
if (i > 1):
metrics = {f'top_{i}_{key}': value for (key, value) in metrics.items()}
final_metrics.update(metrics)
return final_metrics
|
def evaluate_df(df, gold_field='gold', segmentation_field='segmentation'):
'\n Evaluates the given dataframe based on the gold_field and segmentation_field and returns various metric values.\n\n This function creates a new column "truth_value" in the dataframe by comparing gold_field and segmentation_field.\n It then sorts the dataframe by gold_field and "truth_value" and retains the first row for each gold_field group.\n The metrics calculated include F1 score, accuracy, recall and precision.\n\n Args:\n df (pandas.DataFrame): The dataframe to be evaluated.\n gold_field (str, optional): The field in the dataframe used as the \'truth\' field for evaluation. Defaults to "gold".\n segmentation_field (str, optional): The field in the dataframe used as the \'prediction\' field for evaluation. Defaults to "segmentation".\n \n Returns:\n dict: A dictionary containing F1 score, accuracy, recall, and precision metrics.\n '
evaluator = Modeler()
df['truth_value'] = df[gold_field].combine(df[segmentation_field], (lambda x, y: (x == y)))
df = df.sort_values(by=[gold_field, 'truth_value'], ascending=False).groupby(gold_field).head(1)
records = df.to_dict('records')
for row in records:
evaluator.countEntry(row[segmentation_field], row[gold_field])
metrics = {'f1': evaluator.calculateFScore(), 'acc': evaluator.calculateAccuracy(), 'recall': evaluator.calculateRecall(), 'precision': evaluator.calculatePrecision()}
return metrics
|
def filter_top_k(input_df, k, gold_field='hashtag', score_field='score', segmentation_field='segmentation', fill=False):
'\n Filters the top k rows of the input_df for each group defined by the gold_field. \n\n The function sorts the input_df by score_field in ascending order and retains the first k rows for each group.\n If fill option is set to True, it also clones the records based on the length of each group. \n\n Args:\n input_df (pandas.DataFrame): The input dataframe to filter.\n k (int): The number of top records to retain for each group.\n gold_field (str, optional): The field used to define groups in the dataframe. Defaults to "hashtag".\n score_field (str, optional): The field used to sort the dataframe. Defaults to "score".\n segmentation_field (str, optional): The field used if the fill option is set to True. Defaults to "segmentation".\n fill (bool, optional): Whether to clone the records based on the length of each group. Defaults to False.\n\n Returns:\n pandas.DataFrame: The filtered dataframe.\n '
df = copy.deepcopy(input_df)
df = df.sort_values(by=score_field, ascending=True).groupby(gold_field).head(k)
if fill:
df['group_length'] = df.groupby(gold_field)[segmentation_field].transform(len)
df['group_length'] = (((df['group_length'] * (- 1)) + k) + 1)
len_array = df['group_length'].values
df = df.drop(columns=['group_length'])
records = np.array(df.to_dict('records'))
cloned_records = list(np.repeat(records, len_array))
df = pd.DataFrame(cloned_records)
df = df.sort_values(by=score_field, ascending=True).groupby(gold_field).head(k)
length = df.groupby(gold_field).size().values
assert (length == k).all()
return df
|
def read_experiment_dataset(data, dataset, model):
'\n Reads and returns the dataset for a given model from a collection of datasets.\n\n The function filters the data based on the dataset and model parameters, converts the filtered data into a pandas \n DataFrame and returns it.\n\n Args:\n data (list of dicts): The collection of datasets. Each element is a dictionary which must contain "dataset" and \n "model" keys along with a "data" key which contains the actual data.\n dataset (str): The name of the dataset to read.\n model (str): The name of the model for which the dataset needs to be read.\n\n Returns:\n pandas.DataFrame: The selected dataset as a dataframe.\n '
selected_data = [x for x in data if ((x['dataset'] == dataset) and (x['model'] == model))][0]['data']
output = pd.DataFrame(selected_data)
return output
|
def project_scores(a, b, segmentation_field='segmentation', score_field='score'):
'\n Projects the score from dataframe \'b\' onto dataframe \'a\' based on the segmentation_field.\n\n It first creates a view of dataframe \'b\' with unique values of the segmentation_field.\n Then it merges this view with dataframe \'a\' and updates the score field in \'a\' with the score from \'b\'.\n The resulting dataframe is sorted by score in ascending order.\n\n Args:\n a (pandas.DataFrame): The dataframe onto which the scores are to be projected.\n b (pandas.DataFrame): The dataframe from which the scores are taken.\n segmentation_field (str, optional): The field based on which scores are projected. Defaults to "segmentation".\n score_field (str, optional): The field which contains the scores to be projected. Defaults to "score".\n\n Returns:\n pandas.DataFrame: The dataframe \'a\' with updated scores projected from dataframe \'b\'.\n '
b_view = b[[segmentation_field, score_field]].drop_duplicates(subset=[segmentation_field])
df = pd.merge(a, b_view, on=segmentation_field, how='left')
df = df.drop([(score_field + '_x')], axis=1)
df = df.rename(columns={(score_field + '_y'): score_field})
df = df.sort_values(by=score_field, ascending=True)
return df
|
def filter_and_project_scores(a, b, characters_field='hashtag', segmentation_field='segmentation'):
'\n Filters the top two records of the dataframe \'a\', projects the scores from dataframe \'b\' onto \'a\',\n and returns both the modified dataframes.\n\n Args:\n a (pandas.DataFrame): The first dataframe, which will be filtered and onto which the scores will be projected.\n b (pandas.DataFrame): The second dataframe, from which the scores are taken.\n characters_field (str, optional): The field used to sort the dataframes. Defaults to "hashtag".\n segmentation_field (str, optional): The field based on which scores are projected. Defaults to "segmentation".\n\n Returns:\n list of pandas.DataFrame: The modified dataframes \'a\' and \'b\' after filtering and projecting scores.\n '
models = copy.deepcopy([a, b])
for (idx, m) in enumerate(models):
models[idx] = models[idx].sort_values(by=[characters_field, segmentation_field])
models[0] = filter_top_k(models[0], 2, fill=True)
models[1] = project_scores(models[0], models[1])
for (idx, m) in enumerate(models):
models[idx] = models[idx].sort_values(by=[characters_field, segmentation_field]).reset_index(drop=True)
return models
|
def calculate_diff_scores(a, b, characters_field='hashtag', score_field='score'):
'\n Calculates the difference in scores between pairs of records in the dataframes \'a\' and \'b\'.\n\n Args:\n a (pandas.DataFrame): The first dataframe.\n b (pandas.DataFrame): The second dataframe.\n characters_field (str, optional): The field used to sort the dataframes. Defaults to "hashtag".\n score_field (str, optional): The field which contains the scores. Defaults to "score".\n\n Returns:\n list of pandas.DataFrame: The modified dataframes \'a\' and \'b\' with an additional \'diff\' column indicating the score difference.\n '
models = copy.deepcopy([a, b])
for (idx, m) in enumerate(models):
models[idx] = models[idx].sort_values(by=[characters_field, score_field])
score_pairs = models[idx][score_field].values.reshape((- 1), 2)
models[idx]['rank'] = score_pairs.argsort().flatten()
models[idx]['diff'] = np.repeat(np.subtract.reduce(score_pairs, axis=1).flatten(), 2)
models[idx]['diff'] = models[idx]['diff'].fillna(0.0)
return models
|
def build_ensemble_df(a, b):
"\n Builds an ensemble dataframe from the input dataframes 'a' and 'b'.\n\n It filters and projects the scores from 'b' onto 'a', calculates the score differences,\n and then merges the differences back into the 'a' dataframe. \n\n Args:\n a (pandas.DataFrame): The first dataframe.\n b (pandas.DataFrame): The second dataframe.\n\n Returns:\n pandas.DataFrame: The resulting ensemble dataframe with projected scores and score differences.\n "
models = filter_and_project_scores(a, b)
models = calculate_diff_scores(models[0], models[1])
for (idx, m) in enumerate(models):
models[idx]['diff'] = np.abs(models[idx]['diff'].values)
models[0]['diff_2'] = models[1]['diff']
models[0]['rank_2'] = models[1]['rank']
return models[0]
|
class TransformerWordSegmenter(BaseWordSegmenter):
def __init__(self, segmenter_model_name_or_path='gpt2', segmenter_model_type='gpt2', segmenter_device='cuda', segmenter_gpu_batch_size=1000, reranker_gpu_batch_size=1000, reranker_model_name_or_path=None, reranker_model_type='bert', reranker_device='cuda'):
'Word segmentation API initialization. \n A GPT-2 model must be passed to `segmenter_model_name_or_path`, and optionally a BERT model to `reranker_model_name_or_path`.\n If `reranker_model_name_or_path` is set to `False` or `None`, the word segmenter object will work without a reranker.\n\n\n Args:\n segmenter_model_name_or_path (str, optional): GPT-2 that will be fetched from the Hugging Face Model Hub. Defaults to "gpt2".\n segmenter_model_type (str, optional): Transformer decoder model type. Defaults to "gpt2".\n segmenter_device (str, optional): Device. Defaults to "cuda".\n segmenter_gpu_batch_size (int, optional): Segmenter GPU batch size. Defaults to 1.\n reranker_gpu_batch_size (int, optional): Reranker GPU split size. Defaults to 2000.\n reranker_model_name_or_path (str, optional): BERT model that will be fetched from the Hugging Face Model Hub. It is possible to turn off the reranker by passing a None or False value to this argument. Defaults to "bert-base-uncased".\n reranker_model_type (str, optional): Transformer encoder model type. Defaults to "bert".\n '
segmenter_model = Beamsearch(model_name_or_path=segmenter_model_name_or_path, model_type=segmenter_model_type, device=segmenter_device, gpu_batch_size=segmenter_gpu_batch_size)
if reranker_model_name_or_path:
reranker_model = Reranker(model_name_or_path=reranker_model_name_or_path, model_type=reranker_model_type, gpu_batch_size=reranker_gpu_batch_size, device=reranker_device)
else:
reranker_model = None
ensembler = Top2_Ensembler()
super().__init__(segmenter=segmenter_model, reranker=reranker_model, ensembler=ensembler)
def segment(self, word_list, topk: int=20, steps: int=13, alpha: float=0.222, beta: float=0.111, use_reranker: bool=True, return_ranks: bool=False):
segmenter_kwargs = {'topk': topk, 'steps': steps}
ensembler_kwargs = {'alpha': alpha, 'beta': beta}
return super().segment(word_list, segmenter_kwargs=segmenter_kwargs, ensembler_kwargs=ensembler_kwargs, use_reranker=use_reranker, return_ranks=return_ranks)
|
def coerce_segmenter_objects(method):
"\n A decorator function that ensures the returned value from the decorated method is of certain types \n or converts the returned value to one of the allowed types.\n\n It also handles different types of 'inputs' passed to the decorated method. It checks whether the input is a string \n or an iterable, and in the case of unsupported input type, raises a NotImplementedError.\n\n Args:\n method (function): The method to be decorated.\n\n Returns:\n function: The decorated function which enforces specific output types and handles different input types.\n "
def wrapper(self, inputs, *args, **kwargs):
if isinstance(inputs, str):
output = method(self, [inputs], *args, **kwargs)
elif isinstance(inputs, Iterable):
output = method(self, inputs, *args, **kwargs)
else:
raise NotImplementedError(str(type(inputs)))
for allowed_type in [WordSegmenterOutput, TweetSegmenterOutput]:
if isinstance(output, allowed_type):
return output
if isinstance(output, str):
return WordSegmenterOutput(output=[output])
if isinstance(output, Iterable):
return WordSegmenterOutput(output=output)
return wrapper
|
class BaseSegmenter(object):
'\n Base class for text segmenter objects.\n '
@coerce_segmenter_objects
def predict(self, inputs, *args, **kwargs):
'\n Predict method that delegates to the segment method.\n It is decorated with coerce_segmenter_objects to handle different input types and enforce output type.\n\n Args:\n inputs (str or Iterable): The inputs to be segmented.\n *args: Variable length argument list.\n **kwargs: Arbitrary keyword arguments.\n\n Returns:\n WordSegmenterOutput or TweetSegmenterOutput: The output from the segment method.\n '
return self.segment(inputs, *args, **kwargs)
def segment(self, inputs, *args, **kwargs):
'\n Abstract method for segmentation. Should be implemented in a child class.\n\n Args:\n inputs (str or Iterable): The inputs to be segmented.\n *args: Variable length argument list.\n **kwargs: Arbitrary keyword arguments.\n\n Raises:\n NotImplementedError: If this method is not overridden in a child class.\n '
raise NotImplementedError('This method should be implemented in a child class.')
def preprocess(self, inputs, lower=False, remove_hashtag=True, hashtag_character='#'):
'\n Preprocesses the inputs based on the given parameters.\n\n Args:\n inputs (str or Iterable): The inputs to be preprocessed.\n lower (bool, optional): Whether to convert the inputs to lower case. Defaults to False.\n remove_hashtag (bool, optional): Whether to remove the hashtag character from the inputs. Defaults to True.\n hashtag_character (str, optional): The hashtag character to be removed. Defaults to "#".\n\n Returns:\n str or list of str: The preprocessed inputs.\n\n Raises:\n NotImplementedError: If the type of inputs is neither str nor Iterable.\n '
def preprocess_input(word):
if lower:
word = word.lower()
if remove_hashtag:
word = word.lstrip(hashtag_character)
return word
if isinstance(inputs, str):
inputs = preprocess_input(inputs)
elif isinstance(inputs, Iterable):
inputs = [preprocess_input(x) for x in inputs]
else:
raise NotImplementedError(str(type(inputs)))
return inputs
|
@dataclass
class WordSegmenterOutput():
output: List[str]
segmenter_rank: Union[(pd.DataFrame, None)] = None
reranker_rank: Union[(pd.DataFrame, None)] = None
ensemble_rank: Union[(pd.DataFrame, None)] = None
|
@dataclass
class HashtagContainer():
hashtags: List[List[str]]
hashtag_set: List[str]
replacement_dict: dict
|
@dataclass
class TweetSegmenterOutput():
output: List[str]
word_segmenter_output: Any
|
class RegexWordSegmenter(BaseSegmenter):
"\n A subclass of BaseSegmenter which uses regex rules to perform segmentation.\n\n Args:\n regex_rules (list of str, optional): List of regex rules used for segmentation. \n If none are provided, it uses the default rule: [r'([A-Z]+)'].\n\n Attributes:\n regex_rules (list of _sre.SRE_Pattern): List of compiled regex rules used for segmentation.\n "
def __init__(self, regex_rules=None):
'\n Initializes the RegexWordSegmenter with given or default regex rules and compiles them.\n '
if (not regex_rules):
regex_rules = ['([A-Z]+)']
self.regex_rules = [re.compile(x) for x in regex_rules]
def segment_word(self, rule, word):
'\n Segments a word based on a given regex rule.\n\n Args:\n rule (_sre.SRE_Pattern): The compiled regex rule to be used for segmentation.\n word (str): The word to be segmented.\n\n Returns:\n str: The segmented word.\n '
return rule.sub(' \\1', word).strip()
def segmentation_generator(self, word_list):
'\n A generator that iterates over the word list and yields segmented words \n based on the regex rules.\n\n Args:\n word_list (list of str): The list of words to be segmented.\n\n Yields:\n str: The segmented word.\n '
for rule in self.regex_rules:
for (idx, word) in enumerate(word_list):
(yield self.segment_word(rule, word))
def segment(self, inputs: List[str], **kwargs):
'\n Segments a list of strings based on the regex rules. Before segmentation, \n the inputs are preprocessed using the inherited preprocess method.\n\n Args:\n inputs (List[str]): The list of strings to be segmented.\n **kwargs: Arbitrary keyword arguments for the inherited preprocess method.\n\n Returns:\n list of str: The segmented inputs.\n '
inputs = super().preprocess(inputs, **kwargs)
return list(self.segmentation_generator(inputs))
|
class BaseWordSegmenter(BaseSegmenter):
'\n Initializes BaseWordSegmenter class with segmenter, reranker and ensembler models.\n\n Args:\n segmenter: The model used for initial word segmentation.\n reranker: The model used for reranking the segmented words.\n ensembler: The model used for ensemble operations over the segmenter and reranker models.\n '
def __init__(self, segmenter=None, reranker=None, ensembler=None):
self.segmenter_model = segmenter
self.reranker_model = reranker
self.ensembler = ensembler
def get_segmenter(self):
'\n Returns the segmenter model.\n '
return self.segmenter_model.model
def get_reranker(self):
'\n Returns the reranker model.\n '
return self.reranker_model.model
def get_ensembler(self):
'\n Returns the ensembler model.\n '
return self.ensembler
def set_segmenter(self, segmenter):
'\n Sets the segmenter model.\n\n Args:\n segmenter: The model used for initial hashtag segmentation.\n '
self.segmenter_model.model = segmenter
def set_reranker(self, reranker):
'\n Sets the reranker model.\n\n Args:\n reranker: The model used for reranking the segmented hashtags.\n '
self.reranker_model.model = reranker
def set_ensembler(self, ensembler):
'\n Sets the ensembler model.\n\n Args:\n ensembler: The model used for ensemble operations over the segmenter and reranker models.\n '
self.ensembler = ensembler
def segment(self, word_list: List[str], segmenter_run: Any=None, preprocessing_kwargs: dict={}, segmenter_kwargs: dict={}, ensembler_kwargs: dict={}, reranker_kwargs: dict={}, use_reranker: bool=True, use_ensembler: bool=True, return_ranks: bool=False) -> Any:
'\n Segments the input list of words using the segmenter, reranker, and ensembler models.\n Allows customization of the segmenting process with multiple keyword arguments.\n\n Args:\n word_list: List of strings, where each string is a word to be segmented.\n segmenter_run: Optional argument to use a pre-existing segmenter run, defaults to None.\n preprocessing_kwargs: Keyword arguments to be used during the preprocessing phase.\n segmenter_kwargs: Keyword arguments to be used by the segmenter model.\n ensembler_kwargs: Keyword arguments to be used by the ensembler model.\n reranker_kwargs: Keyword arguments to be used by the reranker model.\n use_reranker: Boolean flag to indicate whether to use the reranker model, defaults to True.\n use_ensembler: Boolean flag to indicate whether to use the ensembler model, defaults to True.\n return_ranks: Boolean flag to indicate whether to return the ranks from the models, defaults to False.\n\n Returns:\n Returns the segmented words. If return_ranks is True, also returns the segmenter_rank, reranker_rank, and ensemble_rank.\n '
word_list = super().preprocess(word_list, **preprocessing_kwargs)
if (not isinstance(segmenter_run, pd.DataFrame)):
segmenter_run = self.segmenter_model.run(word_list, **segmenter_kwargs)
ensemble_prob_dict = None
segmenter_prob_dict = enforce_prob_dict(segmenter_run, score_field='score')
if (use_reranker and self.reranker_model):
reranker_run = self.reranker_model.rerank(segmenter_run, **reranker_kwargs)
if (use_reranker and self.reranker_model and use_ensembler and self.ensembler):
ensemble_prob_dict = self.ensembler.run(segmenter_run, reranker_run, **ensembler_kwargs)
segs = ensemble_prob_dict.get_segmentations(astype='list', gold_array=word_list)
else:
segs = segmenter_prob_dict.get_segmentations(astype='list', gold_array=word_list)
if (not return_ranks):
return segs
else:
segmenter_df = segmenter_prob_dict.to_dataframe().reset_index(drop=True)
reranker_df = None
ensembler_df = None
if use_reranker:
if self.reranker_model:
reranker_df = reranker_run.to_dataframe().reset_index(drop=True)
if (use_ensembler and self.ensembler and ensemble_prob_dict):
ensembler_df = ensemble_prob_dict.to_dataframe().reset_index(drop=True)
return WordSegmenterOutput(segmenter_rank=segmenter_df, reranker_rank=reranker_df, ensemble_rank=ensembler_df, output=segs)
|
class TwitterTextMatcher(object):
def __init__(self):
'\n Initializes TwitterTextMatcher object with a Parser from ttp module.\n '
self.parser = ttp.Parser()
def __call__(self, tweets):
'\n Makes the TwitterTextMatcher instance callable. It parses the given tweets and returns their tags.\n\n Args:\n tweets: A list of strings, where each string is a tweet.\n\n Returns:\n A list of hashtags for each tweet.\n '
return [self.parser.parse(x).tags for x in tweets]
|
class TweetSegmenter(BaseSegmenter):
def __init__(self, matcher=None, word_segmenter=None):
'\n Initializes a TweetSegmenter instance with a TwitterTextMatcher and a WordSegmenter.\n\n Args:\n matcher (TwitterTextMatcher, optional): Instance of TwitterTextMatcher used for matching text in tweets. \n Defaults to an instance of TwitterTextMatcher if not provided.\n word_segmenter (WordSegmenter, optional): Instance of WordSegmenter used for segmenting words in tweets. \n Defaults to an instance of RegexWordSegmenter if not provided.\n '
if matcher:
self.matcher = matcher
else:
self.matcher = TwitterTextMatcher()
if word_segmenter:
self.word_segmenter = word_segmenter
else:
self.word_segmenter = RegexWordSegmenter()
def extract_hashtags(self, tweets):
'\n Extracts hashtags from the provided list of tweets.\n\n Args:\n tweets: A list of strings, where each string is a tweet.\n\n Returns:\n A list of hashtags extracted from each tweet.\n '
return self.matcher(tweets)
def compile_dict(self, hashtags, segmentations, hashtag_token=None, lower=False, separator=' ', hashtag_character='#'):
'\n Compiles a dictionary mapping hashtags to their corresponding segmentations.\n\n Args:\n hashtags (list): List of hashtags extracted from tweets.\n segmentations (list): Corresponding segmentations of the hashtags.\n hashtag_token (str, optional): Token to prepend to the segmented hashtag value. If not provided, no token is prepended.\n lower (bool, optional): If True, converts the hashtag value to lowercase. Defaults to False.\n separator (str, optional): Separator used between hashtag_token and the value. Defaults to " ".\n hashtag_character (str, optional): Character representing a hashtag. Defaults to "#".\n\n Returns:\n dict: A dictionary mapping hashtags to their segmented versions.\n '
hashtag_buffer = {k: v for (k, v) in zip(hashtags, segmentations)}
replacement_dict = {}
for (key, value) in hashtag_buffer.items():
if (not key.startswith(hashtag_character)):
hashtag_key = (hashtag_character + key)
else:
hashtag_key = key
if hashtag_token:
hashtag_value = ((hashtag_token + separator) + value)
else:
hashtag_value = value
if lower:
hashtag_value = hashtag_value.lower()
replacement_dict.update({hashtag_key: hashtag_value})
return replacement_dict
def replace_hashtags(self, tweet, regex_pattern, replacement_dict):
'\n Replaces the hashtags in a tweet based on a provided replacement dictionary.\n\n Args:\n tweet (str): The tweet in which hashtags are to be replaced.\n regex_pattern (re.Pattern): Compiled regular expression pattern for matching hashtags in the tweet.\n replacement_dict (dict): Dictionary with original hashtags as keys and their replacements as values.\n\n Returns:\n str: The tweet with hashtags replaced.\n '
if (not replacement_dict):
return tweet
tweet = regex_pattern.sub((lambda m: replacement_dict[m.group(0)]), tweet)
return tweet
def segmented_tweet_generator(self, tweets, hashtags, hashtag_set, replacement_dict, flag=0):
'\n Yields segmented tweets from a provided list of tweets.\n\n Args:\n tweets (list): List of tweets to be segmented.\n hashtags (list): List of hashtags extracted from each tweet.\n hashtag_set (set): Set of unique hashtags extracted from all tweets.\n replacement_dict (dict): Dictionary with original hashtags as keys and their replacements as values.\n flag (int, optional): Flags for the regular expression compilation. Defaults to 0.\n\n Yields:\n str: Segmented version of each tweet.\n '
hashtag_set_index = {value: idx for (idx, value) in enumerate(hashtag_set)}
replacement_pairs = [(key, value) for (key, value) in replacement_dict.items()]
for (idx, tweet_hashtags) in enumerate(hashtags):
tweet_dict = [hashtag_set_index[hashtag] for hashtag in tweet_hashtags]
tweet_dict = [replacement_pairs[index] for index in tweet_dict]
tweet_dict = dict(tweet_dict)
tweet_map = map(re.escape, sorted(tweet_dict, key=len, reverse=True))
regex_pattern = re.compile('|'.join(tweet_map), flag)
tweet = self.replace_hashtags(tweets[idx], regex_pattern, tweet_dict)
(yield tweet)
def build_hashtag_container(self, tweets: str, preprocessing_kwargs: dict={}, segmenter_kwargs: dict={}):
'\n Constructs a HashtagContainer from a list of tweets.\n\n Args:\n tweets (list): List of tweets.\n preprocessing_kwargs (dict, optional): Keyword arguments for preprocessing. Defaults to an empty dictionary.\n segmenter_kwargs (dict, optional): Keyword arguments for the segmenter. Defaults to an empty dictionary.\n\n Returns:\n tuple: A tuple containing a HashtagContainer instance and the output from the word segmenter.\n '
hashtags = self.extract_hashtags(tweets)
hashtag_set = list(set(reduce((lambda x, y: (x + y)), hashtags)))
word_segmenter_output = self.word_segmenter.predict(hashtag_set, **segmenter_kwargs)
segmentations = word_segmenter_output.output
replacement_dict = self.compile_dict(hashtag_set, segmentations, **preprocessing_kwargs)
return (HashtagContainer(hashtags, hashtag_set, replacement_dict), word_segmenter_output)
def segment(self, tweets: List[str], regex_flag: Any=0, preprocessing_kwargs: dict={}, segmenter_kwargs: dict={}):
'\n Segments a list of tweets into individual words and replaces the hashtags based on the preprocessing and segmenter configurations.\n\n Args:\n tweets (List[str]): List of tweets to be segmented.\n regex_flag (Any, optional): Regular expression flags used in replacing the hashtags. Defaults to 0.\n preprocessing_kwargs (dict, optional): Dictionary of keyword arguments used for preprocessing the tweets. Defaults to an empty dictionary.\n segmenter_kwargs (dict, optional): Dictionary of keyword arguments used for the WordSegmenter. Defaults to an empty dictionary.\n\n Returns:\n TweetSegmenterOutput: Contains the output of WordSegmenter and the segmented tweets.\n '
(hashtag_container, word_segmenter_output) = self.build_hashtag_container(tweets, preprocessing_kwargs, segmenter_kwargs)
output = list(self.segmented_tweet_generator(tweets, *dataclasses.astuple(hashtag_container), flag=regex_flag))
return TweetSegmenterOutput(word_segmenter_output=word_segmenter_output, output=output)
|
@pytest.mark.skipif((not CUDA_IS_AVAILABLE), reason='A GPU is not available.')
def test_cuda_availability():
'\n Checks if CUDA is available for the running tests.\n \n Raises:\n Exception: If CUDA is not available.\n '
assert CUDA_IS_AVAILABLE
|
@pytest.fixture(scope='module')
def tweet_segmenter():
'\n Initializes and returns a TweetSegmenter object with a TwitterTextMatcher and a RegexWordSegmenter.\n \n Returns:\n TweetSegmenter: An instance of the TweetSegmenter class.\n '
return TweetSegmenter(matcher=TwitterTextMatcher(), word_segmenter=RegexWordSegmenter())
|
@pytest.fixture(scope='module')
def word_segmenter_gpt2_bert():
'\n Initializes and returns a BaseWordSegmenter object with Beamsearch, Reranker, and Top2_Ensembler.\n \n Returns:\n BaseWordSegmenter: An instance of the BaseWordSegmenter class.\n '
segmenter = Beamsearch(model_name_or_path='distilgpt2', gpu_batch_size=1000)
reranker = Reranker(model_name_or_path='bert-base-cased', gpu_batch_size=1000)
ensembler = Top2_Ensembler()
ws = BaseWordSegmenter(segmenter=segmenter, reranker=reranker, ensembler=ensembler)
return ws
|
@pytest.mark.parametrize('word_segmenter', SEGMENTER_FIXTURES)
def test_word_segmenter_output(word_segmenter):
'\n Tests the predict function of the provided word_segmenter.\n \n Args:\n word_segmenter (BaseWordSegmenter): The word_segmenter to be tested.\n '
test_boun_hashtags = ['minecraf', 'ourmomentfragrance', 'waybackwhen']
predictions = word_segmenter.predict(test_boun_hashtags).output
predictions_chars = [x.replace(' ', '') for x in predictions]
assert all([(predictions_chars[0] == 'minecraf'), (predictions_chars[1] == 'ourmomentfragrance'), (predictions_chars[2] == 'waybackwhen')])
|
def test_twitter_text_matcher():
'\n Tests the functionality of the TwitterTextMatcher.\n '
matcher = TwitterTextMatcher()
result = matcher(['esto es #UnaGenialidad'])
assert (result == [['UnaGenialidad']])
|
def test_regex_word_segmentation():
'\n Tests the predict function of the RegexWordSegmenter.\n '
ws = RegexWordSegmenter()
test_case = ['UnaGenialidad']
prediction = ws.predict(test_case)
assert (prediction.output == ['Una Genialidad'])
|
def test_hashtag_container(tweet_segmenter):
'\n Tests the build_hashtag_container method of the provided tweet_segmenter.\n \n Args:\n tweet_segmenter (TweetSegmenter): The tweet_segmenter to be tested.\n '
original_tweet = 'esto es #UnaGenialidad'
(hashtag_container, word_segmenter_output) = tweet_segmenter.build_hashtag_container([original_tweet])
assert all([(hashtag_container.hashtags == [['UnaGenialidad']]), (hashtag_container.hashtag_set == ['UnaGenialidad']), (hashtag_container.replacement_dict == {'#UnaGenialidad': 'Una Genialidad'}), isinstance(word_segmenter_output, hashformers.segmenter.WordSegmenterOutput)])
|
def test_tweet_segmentation(tweet_segmenter):
'\n Tests the segmentation process of the provided tweet_segmenter.\n \n Args:\n tweet_segmenter (TweetSegmenter): The tweet_segmenter to be tested.\n '
original_tweet = 'esto es #UnaGenialidad'
expected_tweet = 'esto es Una Genialidad'
(hashtag_container, word_segmenter_output) = tweet_segmenter.build_hashtag_container([original_tweet])
tweet = list(tweet_segmenter.segmented_tweet_generator([original_tweet], *dataclasses.astuple(hashtag_container), flag=0))[0]
assert (tweet == expected_tweet)
|
def test_tweet_segmenter_output_format(tweet_segmenter):
"\n Tests the predict method's output of the provided tweet_segmenter.\n \n Args:\n tweet_segmenter (TweetSegmenter): The tweet_segmenter to be tested.\n "
original_tweet = 'esto es #UnaGenialidad'
expected_tweet = 'esto es Una Genialidad'
output_tweets = tweet_segmenter.predict([original_tweet])
output_tweets = output_tweets.output
assert (output_tweets[0] == expected_tweet)
|
class Agent():
def __init__(self):
self.name = 'randomAgent'
def act(self, stateObs, actions):
action_id = randint(0, (len(actions) - 1))
return action_id
|
def get_src(path):
source = []
for (root, _, files) in os.walk(path):
for f in files:
if f.endswith('.java'):
source.append(os.path.join(root, f))
return source
|
def main(dir):
if shutil.which('javac'):
try:
sys.path.append(dir)
import check_build
except ImportError:
raise ImportError("Can't import check_build from the given directory.")
except:
raise Exception('Invalid directory path.')
try:
path = os.path.join(dir, dest)
if os.path.isdir(path):
shutil.rmtree(path)
os.makedirs(path, exist_ok=True)
src_path = os.path.join(dir, 'src')
source = get_src(src_path)
subprocess.run((['javac', '-d', path] + source), check=True)
hash = check_build.dirHash(src_path)
check_build.saveChecksum(path, hash)
except PermissionError as e:
print("Could not edit directory '{}'. Check that you have proper permisions in the installation directory.".format(dest))
raise e
except subprocess.CalledProcessError as e:
print('Failed to build java source code. Make sure you have Java JDK installed (> 7) and javac works.')
print('This build process has not been tested on Windows. Feel free to contribute fixes to the build.py file to get this working on Windows.')
raise e
else:
raise Exception("Command 'javac' is not found. Can't compile source code. May need to install Java JDK or fix path variables.")
|
def fileHash(filepath, blocksize=4096):
md5 = hashlib.md5()
with open(filepath, 'rb') as fp:
while 1:
data = fp.read(blocksize)
if data:
md5.update(data)
else:
break
return md5.hexdigest()
|
def dirHash(dirpath):
files = []
for (root, dirs, filenames) in os.walk(dirpath):
for f in filenames:
if f.endswith('.java'):
files.append(os.path.relpath(os.path.join(root, f), dirpath))
hashes = []
for f in files:
hashes.append(fileHash(os.path.join(dirpath, f)))
return hashes
|
def compare(hash1, hash2):
return (frozenset(hash1) == frozenset(hash2))
|
def saveChecksum(path, hash):
filename = os.path.join(path, 'checksum.csv')
with open(filename, 'w') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(hash)
|
def isCorrectBuild(src_path, build_path):
build_files = os.path.join(build_path, 'checksum.csv')
with open(build_files) as csvfile:
reader = csv.reader(csvfile)
build_hash = list(reader)[0]
src_hash = dirHash(src_path)
return compare(build_hash, src_hash)
|
class Agent(AbstractPlayer):
def __init__(self):
'\n Method to be called at the start of an evaluation process (training & testing) for a particular game.\n '
AbstractPlayer.__init__(self)
self.alpha_base = 0.05
self.gamma = 0.99
self.lambd = 0.5
self.loss_reward = 0.0
self.win_reward = 1.0
self.action_selection_policy = 1
self.epsilon_start = 1.0
self.epsilon_final = 0.05
self.epsilon_decay = 500
self.print_information = True
self.alpha = None
self.item_types = set()
self.item_pairs_dict = []
self.running_mode = 0
self.lastSsoType = LEARNING_SSO_TYPE.JSON
self.blockSize = None
self.worldDimension = None
self.maximumDistance = None
self.n_actions = None
self.max_dimensions = [0.0, 0.0]
self.max_distance = None
self.previous_gameScore = None
self.w = [None, None]
self.e = None
self.q_old = None
self.f_length_single = 4
self.f_length_full = None
self.previous_state_features = None
self.previous_state_features_extended = None
self.previous_action = None
self.n_plays = 0
self.n_wins = 0
self.n_losses = 0
self.n_total_steps = 0
self.average_score = 0.0
def init(self, sso, elapsed_timer):
'\n Method to be called at the start of every level of a game.\n\n :param sso: observation of the current state of the game\n :param elapsed_timer: the timer\n :return:\n '
self.blockSize = sso.blockSize
self.worldDimension = sso.worldDimension
self.maximumDistance = (self.worldDimension[0] + self.worldDimension[1])
self.n_actions = len(sso.availableActions)
self.previous_state_features = None
self.previous_state_features_extended = None
self.previous_action = None
self.previous_gameScore = 0
if (self.n_plays == 3):
self.running_mode = 1
self.max_distance = (self.max_dimensions[0] + self.max_dimensions[1])
self.generate_dictionary()
self.f_length_full = (self.f_length_single * len(self.item_pairs_dict))
for (i, _) in enumerate(self.w):
self.w[i] = np.zeros(((self.f_length_full * self.n_actions),), dtype=np.float32)
self.e = np.zeros(((self.f_length_full * self.n_actions),), dtype=np.float32)
self.q_old = 0
if (self.running_mode == 0):
dimensions = [(self.worldDimension[0] / self.blockSize), (self.worldDimension[1] / self.blockSize)]
if (dimensions[0] > self.max_dimensions[0]):
self.max_dimensions[0] = dimensions[0]
if (dimensions[1] > self.max_dimensions[1]):
self.max_dimensions[1] = dimensions[1]
def learn(self, state_features, reward, action_probabilities):
'\n Executes a step of the learning algorithm.\n\n :param state_features: state features\n :param reward: transition reward\n :param action_probabilities: action probabilities from the "state_features" according to the current policy\n '
q_previous = self.w[0].dot(self.previous_state_features_extended)
q = 0.0
for (i_action, action_probability) in enumerate(action_probabilities):
q += (action_probability * self.w[1].dot(self.extend_features(state_features, i_action)))
delta = ((reward + (self.gamma * q)) - q_previous)
self.e = (((self.gamma * self.lambd) * self.e) + ((1.0 - (((self.alpha * self.gamma) * self.lambd) * self.e.dot(self.previous_state_features_extended))) * self.previous_state_features_extended))
self.w[0] += (((self.alpha * ((delta + q_previous) - self.q_old)) * self.e) - ((self.alpha * (q_previous - self.q_old)) * self.previous_state_features_extended))
self.q_old = q
np.clip(self.w[0], (- 10.0), 10.0, out=self.w[0])
if random.randint(0, 1):
(self.w[0], self.w[1]) = (self.w[1], self.w[0])
def act(self, sso, elapsed_timer):
'\n Method used to determine the next move to be performed by the agent.\n\n :param sso: observation of the current state of the game\n :param elapsed_timer: the timer\n :return: index of the action to be taken\n '
if (self.running_mode == 1):
reward = (sso.gameScore - self.previous_gameScore)
state_features = self.extract_features(sso)
(action, action_probabilities) = self.select_action(state_features)
state_features_extended = self.extend_features(state_features, action)
if (sso.gameTick > 0):
self.learn(state_features, reward, action_probabilities)
self.previous_state_features = np.copy(state_features)
self.previous_state_features_extended = np.copy(state_features_extended)
self.previous_action = action
self.n_total_steps += 1
elif (self.running_mode == 0):
self.scan_observations(sso)
if (sso.gameTick == 200):
return 'ACTION_ESCAPE'
self.previous_action = random.randint(0, (self.n_actions - 1))
self.previous_gameScore = sso.gameScore
return sso.availableActions[self.previous_action]
def softmax(self, x):
'\n Converts an input vector into another same sized output vector of real values in the range [0,1] that add up to\n 1.\n\n :param x: input vector\n :return: output vector\n '
e_x = np.exp((x - np.max(x)))
return (e_x / e_x.sum(axis=0))
def select_action(self, state_features):
'\n Determines the action to be taken based on state features by using softmax policy.\n\n :param state_features: state features\n :return: selected action, probabilities for the actions\n '
action_values = np.zeros((self.n_actions,), dtype=np.float32)
for i_action in xrange(self.n_actions):
features_extended = self.extend_features(state_features, i_action)
action_values[i_action] = (self.w[0].dot(features_extended) + self.w[1].dot(features_extended))
selected_action = None
random_number = random.uniform(0.0, 1.0)
if (self.action_selection_policy == 0):
action_probabilities = self.softmax(action_values)
action_probabilities_cs = np.cumsum(action_probabilities)
for i_action in xrange(self.n_actions):
if (random_number <= action_probabilities_cs[i_action]):
selected_action = i_action
break
elif (self.action_selection_policy == 1):
epsilon = (self.epsilon_final + ((self.epsilon_start - self.epsilon_final) * math.exp((((- 1.0) * self.n_total_steps) / self.epsilon_decay))))
action_probabilities = np.zeros((self.n_actions,), dtype=np.float32)
greedy_action = np.argmax(action_values)
action_probabilities[greedy_action] = (1.0 - epsilon)
action_probabilities[(action_probabilities == 0.0)] = (epsilon / (self.n_actions - 1))
if (random_number < epsilon):
selected_action = random.randint(0, (self.n_actions - 1))
else:
selected_action = greedy_action
return (selected_action, action_probabilities)
def scan_observations(self, sso):
'\n Scans the given state observation and adds the observed item types to the set.\n\n :param sso: observation of the current state of the game\n '
if (sso.avatarType != 0):
self.item_types.add(sso.avatarType)
for observation in sso.NPCPositions:
self.item_types.add(observation[0].itype)
for observation in sso.immovablePositions:
self.item_types.add(observation[0].itype)
for observation in sso.movablePositions:
self.item_types.add(observation[0].itype)
for observation in sso.resourcesPositions:
self.item_types.add(observation[0].itype)
for observation in sso.portalsPositions:
self.item_types.add(observation[0].itype)
for observation in sso.fromAvatarSpritesPositions:
self.item_types.add(observation[0].itype)
def generate_dictionary(self):
'\n Generates the dictionary of item type pairs by using the item types set.\n Avatar-Avatar pair is possibly redundant, yet is kept for the sake of generality.\n '
item_types_list = list(self.item_types)
item_types_list.sort()
for i in range(len(item_types_list)):
for j in range(i, len(item_types_list)):
self.item_pairs_dict.append((item_types_list[i], item_types_list[j]))
def extract_features(self, sso):
'\n Extracts a set of features from a given state observation.\n\n :param sso: observation of the current state of the game\n :return: state features vector\n '
features = np.zeros((self.f_length_full,), dtype=np.float32)
all_observations = []
avatar_observation = Observation()
avatar_observation.itype = sso.avatarType
avatar_observation.position.x = sso.avatarPosition[0]
avatar_observation.position.y = sso.avatarPosition[1]
all_observations.append([avatar_observation])
for observations in sso.NPCPositions:
if (observations[0].itype in self.item_types):
all_observations.append(observations)
for observations in sso.immovablePositions:
if (observations[0].itype in self.item_types):
all_observations.append(observations)
for observations in sso.movablePositions:
if (observations[0].itype in self.item_types):
all_observations.append(observations)
for observations in sso.resourcesPositions:
if (observations[0].itype in self.item_types):
all_observations.append(observations)
for observations in sso.portalsPositions:
if (observations[0].itype in self.item_types):
all_observations.append(observations)
for observations in sso.fromAvatarSpritesPositions:
if (observations[0].itype in self.item_types):
all_observations.append(observations)
for i in range(len(all_observations)):
item_type_1 = all_observations[i][0].itype
for j in range(i, len(all_observations)):
item_type_2 = all_observations[j][0].itype
features_single = np.zeros((4,), dtype=np.float32)
min_distance = float('inf')
min_distances = [self.max_dimensions[0], self.max_dimensions[1]]
for ii in range(len(all_observations[i])):
if (all_observations[i][ii] is not None):
for jj in range(len(all_observations[j])):
if (all_observations[j][jj] is not None):
if (all_observations[i][ii].obsID != all_observations[j][jj].obsID):
distances = [((all_observations[j][jj].position.x - all_observations[i][ii].position.x) / self.blockSize), ((all_observations[j][jj].position.y - all_observations[i][ii].position.y) / self.blockSize)]
distance = (abs(distances[0]) + abs(distances[1]))
if (distance < min_distance):
min_distance = distance
min_distances = [distances[0], distances[1]]
normalized_proximities = np.array([(1.0 - abs((min_distances[0] / self.max_dimensions[0]))), (1.0 - abs((min_distances[1] / self.max_dimensions[1])))])
normalized_proximities[(normalized_proximities < 0)] = 0.0
if (min_distances[0] < 0.0):
features_single[0] = normalized_proximities[0]
elif (min_distances[0] > 0.0):
features_single[1] = normalized_proximities[0]
elif (min_distances[0] == 0.0):
features_single[0] = normalized_proximities[0]
features_single[1] = normalized_proximities[0]
if (min_distances[1] < 0.0):
features_single[2] = normalized_proximities[1]
elif (min_distances[1] > 0.0):
features_single[3] = normalized_proximities[1]
elif (min_distances[1] == 0.0):
features_single[2] = normalized_proximities[1]
features_single[3] = normalized_proximities[1]
target_tuple = (item_type_1, item_type_2)
if (item_type_2 < item_type_1):
target_tuple = (item_type_2, item_type_1)
f_index = self.item_pairs_dict.index(target_tuple)
f_start = (f_index * self.f_length_single)
f_end = (f_start + self.f_length_single)
features[f_start:f_end] = np.copy(features_single)
if (self.alpha is None):
self.alpha = (self.alpha_base / np.linalg.norm(features, ord=2))
return features
def extend_features(self, state_features, action_index):
'\n Extends the given feature vector by multiplying its size and filling up with zeros for the other actions.\n\n :param state_features: state features\n :param action_index: index of relevant action\n :return: extended state features vector\n '
extended_state_features = np.zeros(((self.n_actions * self.f_length_full),), dtype=np.float32)
if (action_index is None):
return extended_state_features
partition_start = (action_index * self.f_length_full)
partition_end = (partition_start + self.f_length_full)
extended_state_features[partition_start:partition_end] = np.copy(state_features)
return extended_state_features
def result(self, sso, elapsed_timer):
'\n Method used to perform actions in case of a game end.\n This is the last thing called when a level is played (the game is already in a terminal state).\n\n :param sso: observation of the current state of the game\n :param elapsed_timer: the timer\n :return: id of the next level to be played\n '
self.n_plays += 1
if (self.running_mode == 1):
reward = (sso.gameScore - self.previous_gameScore)
if (sso.gameTick < 2000):
if (sso.gameWinner == 'PLAYER_LOSES'):
reward += self.loss_reward
elif (sso.gameWinner == 'PLAYER_WINS'):
reward += self.win_reward
features = np.zeros((self.f_length_full,), dtype=np.float32)
action_probabilities = np.zeros((self.n_actions,), dtype=np.float32)
self.learn(features, reward, action_probabilities)
if self.print_information:
self.average_score += ((sso.gameScore - self.average_score) / (self.n_plays - 3))
if (sso.gameWinner == 'PLAYER_LOSES'):
self.n_losses += 1
print(' LOSS |', sso.gameScore, '|', int(self.average_score), '| (', self.n_wins, '-', self.n_losses, ')')
elif (sso.gameWinner == 'PLAYER_WINS'):
self.n_wins += 1
print('> WIN |', sso.gameScore, '|', int(self.average_score), '| (', self.n_wins, '-', self.n_losses, ')')
return (self.n_plays % 3)
|
class Agent(AbstractPlayer):
def __init__(self):
AbstractPlayer.__init__(self)
self.lastSsoType = LEARNING_SSO_TYPE.JSON
'\n * Public method to be called at the start of every level of a game.\n * Perform any level-entry initialization here.\n * @param sso Phase Observation of the current game.\n * @param elapsedTimer Timer (1s)\n '
def init(self, sso, elapsedTimer):
pass
'\n * Method used to determine the next move to be performed by the agent.\n * This method can be used to identify the current state of the game and all\n * relevant details, then to choose the desired course of action.\n *\n * @param sso Observation of the current state of the game to be used in deciding\n * the next action to be taken by the agent.\n * @param elapsedTimer Timer (40ms)\n * @return The action to be performed by the agent.\n '
def act(self, sso, elapsedTimer):
if (sso.gameTick == 1000):
return 'ACTION_ESCAPE'
else:
index = random.randint(0, (len(sso.availableActions) - 1))
return sso.availableActions[index]
'\n * Method used to perform actions in case of a game end.\n * This is the last thing called when a level is played (the game is already in a terminal state).\n * Use this for actions such as teardown or process data.\n *\n * @param sso The current state observation of the game.\n * @param elapsedTimer Timer (up to CompetitionParameters.TOTAL_LEARNING_TIME\n * or CompetitionParameters.EXTRA_LEARNING_TIME if current global time is beyond TOTAL_LEARNING_TIME)\n * @return The next level of the current game to be played.\n * The level is bound in the range of [0,2]. If the input is any different, then the level\n * chosen will be ignored, and the game will play a random one instead.\n '
def result(self, sso, elapsedTimer):
return random.randint(0, 2)
|
class AbstractPlayer():
def __init__(self):
self.lastSsoType = LEARNING_SSO_TYPE.JSON
def init(self, sso, timer):
'\n * Public method to be called at the start of every level of a game.\n * Perform any level-entry initialization here.\n * @param sso Phase Observation of the current game.\n * @param elapsedTimer Timer (1s)\n '
pass
def act(self, sso, timer):
'\n * Method used to determine the next move to be performed by the agent.\n * This method can be used to identify the current state of the game and all\n * relevant details, then to choose the desired course of action.\n *\n * @param sso Observation of the current state of the game to be used in deciding\n * the next action to be taken by the agent.\n * @param elapsedTimer Timer (40ms)\n * @return The action to be performed by the agent.\n '
pass
def result(self, sso, timer):
'\n * Method used to perform actions in case of a game end.\n * This is the last thing called when a level is played (the game is already in a terminal state).\n * Use this for actions such as teardown or process data.\n *\n * @param sso The current state observation of the game.\n * @param elapsedTimer Timer (up to CompetitionParameters.TOTAL_LEARNING_TIME\n * or CompetitionParameters.EXTRA_LEARNING_TIME if current global time is beyond TOTAL_LEARNING_TIME)\n * @return The next level of the current game to be played.\n * The level is bound in the range of [0,2]. If the input is any different, then the level\n * chosen will be ignored, and the game will play a random one instead.\n '
pass
|
class ClientComm():
'\n * Client communication, set up the socket for a given agent\n '
def __init__(self, agentName):
self.TOKEN_SEP = '#'
self.io = IOSocket(CompetitionParameters.SOCKET_PORT)
self.sso = SerializableStateObservation()
self.agentName = agentName
self.lastMessageId = 0
self.LOG = False
self.player = None
self.global_ect = None
self.lastSsoType = LEARNING_SSO_TYPE.JSON
def startComm(self):
self.io.initBuffers()
try:
self.listen()
except Exception as e:
logging.exception(e)
print('Start listen [FAILED]')
traceback.print_exc()
sys.exit()
'\n * Method that perpetually listens for messages from the server.\n * With the use of additional helper methods, this function interprets\n * messages and represents the core response-generation methodology of the agent.\n * @throws IOException\n '
def listen(self):
line = ''
while (line is not None):
line = self.io.readLine()
line = line.rstrip('\r\n')
self.processLine(line)
if (self.sso.phase == Phase.START):
self.start()
elif (self.sso.phase == 'INIT'):
self.sso.phase = Phase.INIT
self.init()
elif (self.sso.phase == Phase.INIT):
self.init()
elif (self.sso.phase == 'END'):
self.sso.phase = Phase.END
self.result()
elif (self.sso.phase == Phase.END):
self.result()
elif (self.sso.phase == 'ABORT'):
self.sso.phase = Phase.ABORT
self.result()
elif (self.sso.phase == Phase.ABORT):
self.result()
elif (self.sso.phase == 'ACT'):
self.sso.phase = Phase.ACT
self.act()
elif (self.sso.phase == Phase.ACT):
self.act()
elif (self.sso.phase == Phase.FINISH):
line = None
elif (self.sso.phase == 'FINISH'):
line = None
else:
self.io.writeToServer(self.lastMessageId, 'ERROR', self.LOG)
'\n Helper method that converts a given dictionary into\n a correct SSO type\n '
def as_sso(self, d):
self.sso.__dict__.update(d)
return self.sso
def parse_json(self, input):
parsed_input = json.loads(input)
self.sso.__dict__.update(parsed_input)
if parsed_input.get('observationGrid'):
self.sso.observationGrid = [[[None for j in range(self.sso.observationGridMaxCol)] for i in range(self.sso.observationGridMaxRow)] for k in range(self.sso.observationGridNum)]
for i in range(self.sso.observationGridNum):
for j in range(len(parsed_input['observationGrid'][i])):
for k in range(len(parsed_input['observationGrid'][i][j])):
self.sso.observationGrid[i][j][k] = Observation(parsed_input['observationGrid'][i][j][k])
if parsed_input.get('NPCPositions'):
self.sso.NPCPositions = [[None for j in range(self.sso.NPCPositionsMaxRow)] for i in range(self.sso.NPCPositionsNum)]
for i in range(self.sso.NPCPositionsNum):
for j in range(len(parsed_input['NPCPositions'][i])):
self.sso.NPCPositions[i][j] = Observation(parsed_input['NPCPositions'][i][j])
if parsed_input.get('immovablePositions'):
self.sso.immovablePositions = [[None for j in range(self.sso.immovablePositionsMaxRow)] for i in range(self.sso.immovablePositionsNum)]
for i in range(self.sso.immovablePositionsNum):
for j in range(len(parsed_input['immovablePositions'][i])):
self.sso.immovablePositions[i][j] = Observation(parsed_input['immovablePositions'][i][j])
if parsed_input.get('movablePositions'):
self.sso.movablePositions = [[None for j in range(self.sso.movablePositionsMaxRow)] for i in range(self.sso.movablePositionsNum)]
for i in range(self.sso.movablePositionsNum):
for j in range(len(parsed_input['movablePositions'][i])):
self.sso.movablePositions[i][j] = Observation(parsed_input['movablePositions'][i][j])
if parsed_input.get('resourcesPositions'):
self.sso.resourcesPositions = [[None for j in range(self.sso.resourcesPositionsMaxRow)] for i in range(self.sso.resourcesPositionsNum)]
for i in range(self.sso.resourcesPositionsNum):
for j in range(len(parsed_input['resourcesPositions'][i])):
self.sso.resourcesPositions[i][j] = Observation(parsed_input['resourcesPositions'][i][j])
if parsed_input.get('portalsPositions'):
self.sso.portalsPositions = [[None for j in range(self.sso.portalsPositionsMaxRow)] for i in range(self.sso.portalsPositionsNum)]
for i in range(self.sso.portalsPositionsNum):
for j in range(len(parsed_input['portalsPositions'][i])):
self.sso.portalsPositions[i][j] = Observation(parsed_input['portalsPositions'][i][j])
if parsed_input.get('fromAvatarSpritesPositions'):
self.sso.fromAvatarSpritesPositions = [[None for j in range(self.sso.fromAvatarSpritesPositionsMaxRow)] for i in range(self.sso.fromAvatarSpritesPositionsNum)]
for i in range(self.sso.fromAvatarSpritesPositionsNum):
for j in range(len(parsed_input['fromAvatarSpritesPositions'][i])):
self.sso.fromAvatarSpritesPositions[i][j] = Observation(parsed_input['fromAvatarSpritesPositions'][i][j])
"\n * Method that interprets the received messages from the server's side.\n * A message can either be a string (in the case of initialization), or\n * a json object containing an encapsulated state observation.\n * This method deserializes the json object into a local state observation\n * instance.\n * @param msg Message received from server to be interpreted.\n * @throws IOException\n "
def processLine(self, msg):
try:
if (msg is None):
print('Message is null')
return
message = msg.split(self.TOKEN_SEP)
if (len(message) < 2):
print('Message not complete')
return
self.lastMessageId = message[0]
js = message[1]
self.sso = SerializableStateObservation()
if (js == 'START'):
self.sso.phase = Phase.START
elif (js == 'FINISH'):
self.sso.phase = Phase.FINISH
else:
js.replace('"', '')
self.parse_json(js)
if (self.sso.phase == 'ACT'):
if ((self.lastSsoType == LEARNING_SSO_TYPE.IMAGE) or (self.lastSsoType == 'IMAGE') or (self.lastSsoType == LEARNING_SSO_TYPE.BOTH) or (self.lastSsoType == 'BOTH')):
if self.sso.imageArray:
self.sso.convertBytesToPng(self.sso.imageArray)
except Exception as e:
logging.exception(e)
print('Line processing [FAILED]')
traceback.print_exc()
sys.exit()
'\n * Manages the start of the communication. It starts the whole process, and sets up the timer for the whole run.\n '
def start(self):
self.global_ect = ElapsedCpuTimer()
self.global_ect.setMaxTimeMillis(CompetitionParameters.TOTAL_LEARNING_TIME)
ect = ElapsedCpuTimer()
ect.setMaxTimeMillis(CompetitionParameters.START_TIME)
self.startAgent()
if ect.exceededMaxTime():
self.io.writeToServer(self.lastMessageId, 'START_FAILED', self.LOG)
else:
self.io.writeToServer(self.lastMessageId, (('START_DONE' + '#') + self.lastSsoType), self.LOG)
def startAgent(self):
try:
try:
module = importlib.import_module(self.agentName, __name__)
try:
self.player = getattr(module, 'Agent')()
self.lastSsoType = self.player.lastSsoType
except AttributeError:
logging.error('ERROR: Class does not exist')
traceback.print_exc()
sys.exit()
except ImportError:
logging.error('ERROR: Module does not exist')
traceback.print_exc()
sys.exit()
print('Agent startup [OK]')
except Exception as e:
logging.exception(e)
print('Agent startup [FAILED]')
traceback.print_exc()
sys.exit()
'\n * Manages the init of a game played.\n '
def init(self):
ect = ElapsedCpuTimer()
ect.setMaxTimeMillis(CompetitionParameters.INITIALIZATION_TIME)
self.player.init(self.sso, ect.copy())
self.lastSsoType = self.player.lastSsoType
if ect.exceededMaxTime():
self.io.writeToServer(self.lastMessageId, 'INIT_FAILED', self.LOG)
else:
self.io.writeToServer(self.lastMessageId, (('INIT_DONE' + '#') + self.lastSsoType), self.LOG)
'\n * Manages the action request for an agent. The agent is requested for an action,\n * which is sent back to the server\n '
def act(self):
ect = ElapsedCpuTimer()
ect.setMaxTimeMillis(CompetitionParameters.ACTION_TIME)
action = str(self.player.act(self.sso, ect.copy()))
if ((not action) or (action == '')):
action = 'ACTION_NIL'
self.lastSsoType = self.player.lastSsoType
if ect.exceededMaxTime():
if (ect.elapsedNanos() > (CompetitionParameters.ACTION_TIME_DISQ * 1000000)):
self.io.writeToServer(self.lastMessageId, 'END_OVERSPENT', self.LOG)
else:
self.io.writeToServer(self.lastMessageId, (('ACTION_NIL' + '#') + self.lastSsoType), self.LOG)
else:
self.io.writeToServer(self.lastMessageId, ((action + '#') + self.lastSsoType), self.LOG)
"\n * Manages the aresult sent to the agent. The time limit for this call will be TOTAL_LEARNING_TIME\n * or EXTRA_LEARNING_TIME if current global time is beyond TOTAL_LEARNING_TIME.\n * The agent is assumed to return the next level to play. It will be ignored if\n * a) All training levels have not been played yet (in which case the starting sequence 0-1-2 continues).\n * b) It's outside the range [0,4] (in which case we play one at random)\n * c) or we are in the validation phase (in which case the starting sequence 3-4 continues).\n "
def result(self):
ect = ElapsedCpuTimer()
if (not self.global_ect.exceededMaxTime()):
ect = self.global_ect.copy()
else:
ect.setMaxTimeMillis(CompetitionParameters.EXTRA_LEARNING_TIME)
nextLevel = self.player.result(self.sso, ect.copy())
self.lastSsoType = self.player.lastSsoType
if ect.exceededMaxTime():
self.io.writeToServer(self.lastMessageId, 'END_OVERSPENT', self.LOG)
elif self.global_ect.exceededMaxTime():
end_message = ('END_VALIDATION' if self.sso.isValidation else 'END_TRAINING')
self.io.writeToServer(self.lastMessageId, end_message, self.LOG)
else:
self.io.writeToServer(self.lastMessageId, ((str(nextLevel) + '#') + self.lastSsoType), self.LOG)
|
class ClientCommGYM():
'\n * Client communication, set up the socket for a given agent\n '
def __init__(self, game, version, lvl, pathStr):
self.tempDir = tempfile.TemporaryDirectory()
self.addLevel('')
self.TOKEN_SEP = '#'
self.io = IOSocket(self.tempDir.name)
self.sso = SerializableStateObservation()
self.lastMessageId = 0
self.LOG = False
self.player = None
self.global_ect = None
self.lastSsoType = LEARNING_SSO_TYPE.JSON
self.sso.Terminal = False
baseDir = os.path.join(pathStr, 'gvgai')
srcDir = os.path.join(baseDir, 'src')
buildDir = os.path.join(baseDir, 'GVGAI_Build')
gamesDir = os.path.join(pathStr, 'games', '{}_v{}'.format(game, version))
cmd = ['java', '-classpath', buildDir, 'tracks.singleLearning.utils.JavaServer', '-game', game, '-gamesDir', gamesDir, '-imgDir', baseDir, '-portNum', str(self.io.port)]
sys.path.append(baseDir)
import check_build
if (not os.path.isdir(buildDir)):
raise Exception("Couldn't find build directory. Please run build.py from the install directory or reinstall with pip.")
elif (not check_build.isCorrectBuild(srcDir, buildDir)):
raise Exception('Your build is out of date. Please run build.py from the install directory or reinstall with pip.')
else:
try:
self.java = subprocess.Popen(cmd, stdout=subprocess.DEVNULL, cwd=self.tempDir.name)
except subprocess.CalledProcessError as e:
print('exit code: {}'.format(e.returncode))
print('stderr: {}'.format(e.stderr.decode(sys.getfilesystemencoding())))
self.startComm()
self.reset(lvl)
def startComm(self):
self.io.initBuffers()
self.reset(0)
'\n * Method that perpetually listens for messages from the server.\n * With the use of additional helper methods, this function interprets\n * messages and represents the core response-generation methodology of the agent.\n * @throws IOException\n '
def step(self, act):
if (not self.sso.Terminal):
if (act == 0):
self.act('')
else:
action = self.sso.availableActions[(act - 1)]
self.act(action)
self.line = self.io.readLine()
self.line = self.line.rstrip('\r\n')
self.processLine(self.line)
score = self.reward()
self.lastScore = self.sso.gameScore
else:
score = 0
if ((self.sso.isGameOver == True) or (self.sso.gameWinner == 'PLAYER_WINS') or (self.sso.phase == 'FINISH') or (self.sso.phase == 'ABORT') or (self.sso.phase == 'End')):
self.sso.image = misc.imread(os.path.join(self.tempDir.name, 'gameStateByBytes.png'))
self.sso.Terminal = True
else:
self.sso.Terminal = False
actions = self.actions()
info = {'winner': self.sso.gameWinner, 'actions': self.actions()}
return (self.sso.image, score, self.sso.Terminal, info)
def reset(self, lvl):
self.lastScore = 0
if hasattr(self, 'line'):
flag = True
restart = True
if self.sso.Terminal:
self.io.writeToServer(self.lastMessageId, ((str(lvl) + '#') + self.lastSsoType), self.LOG)
else:
self.io.writeToServer(self.lastMessageId, 'END_OVERSPENT', self.LOG)
self.line = self.io.readLine()
self.line = self.line.rstrip('\r\n')
self.processLine(self.line)
self.io.writeToServer(self.lastMessageId, ((str(lvl) + '#') + self.lastSsoType), self.LOG)
else:
restart = True
flag = True
self.line = ''
while flag:
if restart:
self.line = self.io.readLine()
self.line = self.line.rstrip('\r\n')
self.processLine(self.line)
else:
self.line = ''
if (self.sso.phase == Phase.START):
self.start()
elif (self.sso.phase == 'INIT'):
self.sso.phase = Phase.INIT
self.init()
elif (self.sso.phase == 'ACT'):
flag = False
for i in range(1):
self.act(0)
self.line = self.io.readLine()
self.line = self.line.rstrip('\r\n')
self.processLine(self.line)
if ((self.sso.isGameOver == True) or (self.sso.gameWinner == 'WINNER') or (self.sso.phase == 'FINISH') or (self.sso.phase == 'End')):
self.sso.image = misc.imread(os.path.join(self.tempDir.name, 'gameStateByBytes.png'))
self.sso.Terminal = True
self.lastScore = 0
else:
self.sso.Terminal = False
return self.sso.image
def reward(self):
scoreDelta = (self.sso.gameScore - self.lastScore)
return scoreDelta
def actions(self):
nil = ['ACTION_NIL']
return (nil + self.sso.availableActions)
def as_sso(self, d):
self.sso.__dict__.update(d)
return self.sso
def parse_json(self, input):
parsed_input = json.loads(input)
self.sso.__dict__.update(parsed_input)
if parsed_input.get('observationGrid'):
self.sso.observationGrid = [[[None for j in range(self.sso.observationGridMaxCol)] for i in range(self.sso.observationGridMaxRow)] for k in range(self.sso.observationGridNum)]
for i in range(self.sso.observationGridNum):
for j in range(len(parsed_input['observationGrid'][i])):
for k in range(len(parsed_input['observationGrid'][i][j])):
self.sso.observationGrid[i][j][k] = Observation(parsed_input['observationGrid'][i][j][k])
if parsed_input.get('NPCPositions'):
self.sso.NPCPositions = [[None for j in range(self.sso.NPCPositionsMaxRow)] for i in range(self.sso.NPCPositionsNum)]
for i in range(self.sso.NPCPositionsNum):
for j in range(len(parsed_input['NPCPositions'][i])):
self.sso.NPCPositions[i][j] = Observation(parsed_input['NPCPositions'][i][j])
if parsed_input.get('immovablePositions'):
self.sso.immovablePositions = [[None for j in range(self.sso.immovablePositionsMaxRow)] for i in range(self.sso.immovablePositionsNum)]
for i in range(self.sso.immovablePositionsNum):
for j in range(len(parsed_input['immovablePositions'][i])):
self.sso.immovablePositions[i][j] = Observation(parsed_input['immovablePositions'][i][j])
if parsed_input.get('movablePositions'):
self.sso.movablePositions = [[None for j in range(self.sso.movablePositionsMaxRow)] for i in range(self.sso.movablePositionsNum)]
for i in range(self.sso.movablePositionsNum):
for j in range(len(parsed_input['movablePositions'][i])):
self.sso.movablePositions[i][j] = Observation(parsed_input['movablePositions'][i][j])
if parsed_input.get('resourcesPositions'):
self.sso.resourcesPositions = [[None for j in range(self.sso.resourcesPositionsMaxRow)] for i in range(self.sso.resourcesPositionsNum)]
for i in range(self.sso.resourcesPositionsNum):
for j in range(len(parsed_input['resourcesPositions'][i])):
self.sso.resourcesPositions[i][j] = Observation(parsed_input['resourcesPositions'][i][j])
if parsed_input.get('portalsPositions'):
self.sso.portalsPositions = [[None for j in range(self.sso.portalsPositionsMaxRow)] for i in range(self.sso.portalsPositionsNum)]
for i in range(self.sso.portalsPositionsNum):
for j in range(len(parsed_input['portalsPositions'][i])):
self.sso.portalsPositions[i][j] = Observation(parsed_input['portalsPositions'][i][j])
if parsed_input.get('fromAvatarSpritesPositions'):
self.sso.fromAvatarSpritesPositions = [[None for j in range(self.sso.fromAvatarSpritesPositionsMaxRow)] for i in range(self.sso.fromAvatarSpritesPositionsNum)]
for i in range(self.sso.fromAvatarSpritesPositionsNum):
for j in range(len(parsed_input['fromAvatarSpritesPositions'][i])):
self.sso.fromAvatarSpritesPositions[i][j] = Observation(parsed_input['fromAvatarSpritesPositions'][i][j])
"\n * Method that interprets the received messages from the server's side.\n * A message can either be a string (in the case of initialization), or\n * a json object containing an encapsulated state observation.\n * This method deserializes the json object into a local state observation\n * instance.\n * @param msg Message received from server to be interpreted.\n * @throws IOException\n "
def processLine(self, msg):
try:
if (msg is None):
print('Message is null')
return
message = msg.split(self.TOKEN_SEP)
if (len(message) < 2):
print('Message not complete')
return
self.lastMessageId = message[0]
js = message[1]
self.sso = SerializableStateObservation()
if (js == 'START'):
self.sso.phase = Phase.START
elif (js == 'FINISH'):
self.sso.phase = Phase.FINISH
else:
js.replace('"', '')
self.parse_json(js)
if (self.sso.phase == 'ACT'):
if ((self.lastSsoType == LEARNING_SSO_TYPE.IMAGE) or (self.lastSsoType == 'IMAGE') or (self.lastSsoType == LEARNING_SSO_TYPE.BOTH) or (self.lastSsoType == 'BOTH')):
if self.sso.imageArray:
self.sso.convertBytesToPng(self.sso.imageArray, self.tempDir.name)
self.sso.image = misc.imread(os.path.join(self.tempDir.name, 'gameStateByBytes.png'))
except Exception as e:
logging.exception(e)
print('Line processing [FAILED]')
sys.exit()
'\n * Manages the start of the communication. It starts the whole process, and sets up the timer for the whole run.\n '
def start(self):
self.global_ect = ElapsedCpuTimer()
self.global_ect.setMaxTimeMillis(CompetitionParameters.TOTAL_LEARNING_TIME)
ect = ElapsedCpuTimer()
ect.setMaxTimeMillis(CompetitionParameters.START_TIME)
if ect.exceededMaxTime():
self.io.writeToServer(self.lastMessageId, 'START_FAILED', self.LOG)
else:
self.io.writeToServer(self.lastMessageId, (('START_DONE' + '#') + self.lastSsoType), self.LOG)
def init(self):
ect = ElapsedCpuTimer()
ect.setMaxTimeMillis(CompetitionParameters.INITIALIZATION_TIME)
self.lastSsoType = LEARNING_SSO_TYPE.IMAGE
actions = self.actions()
if ect.exceededMaxTime():
self.io.writeToServer(self.lastMessageId, 'INIT_FAILED', self.LOG)
else:
self.io.writeToServer(self.lastMessageId, (('INIT_DONE' + '#') + self.lastSsoType), self.LOG)
'\n * Manages the action request for an agent. The agent is requested for an action,\n * which is sent back to the server\n '
def act(self, action):
ect = ElapsedCpuTimer()
ect.setMaxTimeMillis(CompetitionParameters.ACTION_TIME)
if ((not action) or (action == '')):
action = 'ACTION_NIL'
self.lastSsoType = LEARNING_SSO_TYPE.IMAGE
if ect.exceededMaxTime():
if (ect.elapsedNanos() > (CompetitionParameters.ACTION_TIME_DISQ * 1000000)):
self.io.writeToServer(self.lastMessageId, 'END_OVERSPENT', self.LOG)
else:
self.io.writeToServer(self.lastMessageId, (('ACTION_NIL' + '#') + self.lastSsoType), self.LOG)
else:
self.io.writeToServer(self.lastMessageId, ((action + '#') + self.lastSsoType), self.LOG)
def addLevel(self, path):
lvlName = os.path.join(self.tempDir.name, 'game_lvl5.txt')
if (path is ''):
open(lvlName, 'w+').close()
else:
shutil.copyfile(path, lvlName)
def __del__(self):
try:
self.java.kill()
except:
pass
|
class CompetitionParameters():
'\n * Competition parameters, should be the same with the ones on Server side:\n * refer to core.competition.CompetitionParameters\n '
def __init__(self):
pass
if ('win32' in sys.platform):
OS_WIN = True
else:
OS_WIN = False
USE_SOCKETS = True
START_TIME = 1000
INITIALIZATION_TIME = 1000
ACTION_TIME = 40
ACTION_TIME_DISQ = 50
MILLIS_IN_MIN = (60 * 1000)
TOTAL_LEARNING_TIME = np.inf
EXTRA_LEARNING_TIME = np.inf
SOCKET_PORT = 8080
SCREENSHOT_FILENAME = 'gameStateByBytes.png'
|
class ElapsedCpuTimer():
'\n * Timer, corresponding to the server Java code tools.ElapsedCpuTimer\n '
def __init__(self):
self.oldTime = self.getTime()
self.maxTime = 0
if sys.platform.startswith('win32'):
OS_WIN = True
else:
OS_WIN = False
def copy(self):
newCpuTimer = ElapsedCpuTimer()
newCpuTimer.maxTime = self.maxTime
newCpuTimer.oldTime = self.oldTime
return newCpuTimer
def elapsed(self):
return (self.getTime() - self.oldTime)
def elapsedNanos(self):
return self.elapsed()
def elapsedMillis(self):
return (self.elapsed() / 1000000)
def elapsedSeconds(self):
return (self.elapsedMillis() / 1000)
def elapsedMinutes(self):
return (self.elapsedSeconds() / 60.0)
def elapsedHours(self):
return (self.elapsedMinutes() / 60)
def getTime(self):
return (time.time() * 1000000000)
'\n * Return current time in millesecond\n '
def getCpuTime(self):
return int(round((time.time() * 1000)))
def setMaxTimeMillis(self, timeToSet):
self.maxTime = (timeToSet * 1000000)
self.oldTime = self.getTime()
def remainingTimeMillis(self):
diff = (self.maxTime - self.elapsed())
return diff
def exceededMaxTime(self):
return (self.elapsed() > self.maxTime)
|
class IOSocket():
'\n * Socket for communication\n '
def __init__(self, tmpDir):
self.BUFF_SIZE = 8192
self.END = '\n'
self.TOKEN_SEP = '#'
(self.hostname, self.port) = self.getOpenAddress()
self.connected = False
self.socket = None
self.logfilename = os.path.normpath(os.path.join(tmpDir, 'logs', 'clientLog.txt'))
os.makedirs(os.path.join(tmpDir, 'logs'), exist_ok=True)
self.logfile = open(self.logfilename, 'a')
def initBuffers(self):
print((((('Connecting to host ' + str(self.hostname)) + ' at port ') + str(self.port)) + ' ...'))
while (not self.connected):
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.hostname, self.port))
self.connected = True
print('Client connected to server [OK]')
except Exception as e:
time.sleep(1)
def writeToFile(self, line):
sys.stdout.write((line + os.linesep))
self.logfile.write((line + os.linesep))
sys.stdout.flush()
self.logfile.flush()
def writeToServer(self, messageId, line, log):
msg = (((str(messageId) + self.TOKEN_SEP) + line) + '\n')
try:
self.socket.send(msg.encode())
if log:
self.writeToFile(msg.strip('\n'))
except Exception as e:
logging.exception(e)
print((('Write ' + self.logfilename) + ' to server [FAILED]'))
traceback.print_exc()
sys.exit()
def readLine(self):
try:
msg = self.recv_end()
return msg
except Exception as e:
logging.exception(e)
print('Read from server [FAILED]')
traceback.print_exc()
sys.exit()
def shutDown(self):
self.socket.shutdown(0)
def getOpenAddress(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 0))
address = s.getsockname()
s.close()
return address
def recv_end(self):
total_data = []
data = ''
while True:
databuffer = self.socket.recv(self.BUFF_SIZE)
data = databuffer.decode()
if (self.END in data):
total_data.append(data[:data.find(self.END)])
break
total_data.append(data)
if (len(total_data) > 1):
last_pair = (total_data[(- 2)] + total_data[(- 1)])
if (self.END in last_pair):
total_data[(- 2)] = last_pair[:last_pair.find(self.END)]
total_data.pop()
break
return ''.join(total_data)
|
class PythonClient():
'\n * Python Client given an agent name (SampleRandomAgent by default)\n '
def __init__(self, args):
if (len(args) > 0):
agentName = args
else:
print('ERROR: Missing argument')
sys.exit()
print('start python client')
ccomm = ClientComm(agentName)
ccomm.startComm()
|
class SerializableStateObservation():
'\n * Serialized state observation, corresponding to the Java Client code:\n * GVGAI-JavaClient.src.serialization.SerializableStateObservation\n '
def __init__(self):
self.imageArray = bytearray([])
self.phase = Phase()
self.isValidation = True
self.winner = WINNER()
self.actions = ACTIONS()
self.gameScore = 0.0
self.gameTick = 0
self.gameWinner = self.winner.NO_WINNER
self.isGameOver = True
self.worldDimension = []
self.blockSize = 0
self.noOfPlayers = 0
self.avatarSpeed = 0.0
self.avatarOrientation = []
self.avatarPosition = []
self.avatarLastAction = None
self.avatarType = 0
self.avatarHealthPoints = 0
self.avatarMaxHealthPoints = 0
self.avatarLimitHealthPoints = 0
self.isAvatarAlive = True
self.availableActions = []
self.avatarResources = {}
self.observationGrid = []
self.NPCPositionsNum = 0
self.NPCPositionsMaxRow = 0
self.NPCPositions = []
self.immovablePositions = []
self.immovablePositionsNum = 0
self.immovablePositionsMaxRow = 0
self.movablePositions = []
self.resourcesPositions = []
self.portalsPositions = []
self.fromAvatarSpritesPositions = []
def convertBytesToPng(self, pixels, loc):
for (i, e) in enumerate(pixels):
pixels[i] = (e & 255)
image = Image.open(io.BytesIO(bytearray(pixels)))
image.save(path.join(loc, CompetitionParameters.SCREENSHOT_FILENAME))
|
class Phase():
'\n * Used to control the communication between server and client, corresponding to the Java Client code:\n * GVGAI-JavaClient.src.serialization.SerializableStateObservation\n '
def __init__(self):
pass
(START, INIT, ACT, ABORT, END, FINISH) = range(6)
|
class Observation():
'\n * Serialized state observation, corresponding to the Java Client code:\n * GVGAI-JavaClient.src.serialization.Observation\n '
def __init__(self, parse_Observation=None):
if (parse_Observation is None):
self.category = (- 1)
self.itype = (- 1)
self.obsID = (- 1)
self.position = Vector2d()
self.reference = Vector2d()
self.sqDist = (- 1)
else:
self.category = parse_Observation['category']
self.itype = parse_Observation['itype']
self.obsID = parse_Observation['obsID']
self.position = Vector2d(parse_Observation['position'])
self.reference = Vector2d(parse_Observation['reference'])
self.sqDist = parse_Observation['sqDist']
|
class Vector2d():
'\n * Serialized state observation, corresponding to the Java Client code:\n * GVGAI-JavaClient.src.serialization.Vector2d\n '
def __init__(self, position=None):
if (position is None):
self.x = (- 1)
self.y = (- 1)
else:
self.x = position['x']
self.y = position['y']
|
class ACTIONS():
'\n * All action types, corresponding to the server Java code ontology.Types\n '
def __init__(self):
pass
ACTION_NIL = 0
ACTION_UP = 1
ACTION_LEFT = 2
ACTION_DOWN = 3
ACTION_RIGHT = 4
ACTION_USE = 5
ACTION_ESCAPE = 6
|
class WINNER():
'\n * Winner/Loser types, corresponding to the server Java code ontology.Types\n '
def __init__(self):
pass
PLAYER_DISQ = (- 100)
NO_WINNER = (- 1)
PLAYER_LOSES = 0
PLAYER_WINS = 1
|
class LEARNING_SSO_TYPE():
def __init__(self):
pass
IMAGE = 'IMAGE'
JSON = 'JSON'
BOTH = 'BOTH'
|
class InstallWithJava(install):
def run(self):
'compile special java dependencies before the others.'
java.main(path)
install.run(self)
|
class DevelopWithJava(develop):
def run(self):
'compile special java dependencies before the others.'
java.main(path)
develop.run(self)
|
class EggInfoWithJava(egg_info):
def run(self):
'compile special java dependencies before the others.'
egg_info.run(self)
|
def alexnet(image):
net_data = np.load('./perceptual_models/alexnet/bvlc_alexnet.npy').item()
k_h = 11
k_w = 11
c_o = 96
s_h = 4
s_w = 4
conv1W = tf.Variable(net_data['conv1'][0])
conv1b = tf.Variable(net_data['conv1'][1])
conv1 = relu(conv(image, conv1W, conv1b, k_h, k_w, c_o, s_h, s_w, padding='SAME', group=1))
radius = 2
alpha = 2e-05
beta = 0.75
bias = 1.0
lrn1 = tf.nn.local_response_normalization(conv1, depth_radius=radius, alpha=alpha, beta=beta, bias=bias)
k_h = 3
k_w = 3
s_h = 2
s_w = 2
padding = 'VALID'
maxpool1 = tf.nn.max_pool(lrn1, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)
k_h = 5
k_w = 5
c_o = 256
s_h = 1
s_w = 1
group = 2
conv2W = tf.Variable(net_data['conv2'][0])
conv2b = tf.Variable(net_data['conv2'][1])
conv2 = relu(conv(maxpool1, conv2W, conv2b, k_h, k_w, c_o, s_h, s_w, padding='SAME', group=group))
radius = 2
alpha = 2e-05
beta = 0.75
bias = 1.0
lrn2 = tf.nn.local_response_normalization(conv2, depth_radius=radius, alpha=alpha, beta=beta, bias=bias)
k_h = 3
k_w = 3
s_h = 2
s_w = 2
padding = 'VALID'
maxpool2 = tf.nn.max_pool(lrn2, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)
k_h = 3
k_w = 3
c_o = 384
s_h = 1
s_w = 1
group = 1
conv3W = tf.Variable(net_data['conv3'][0])
conv3b = tf.Variable(net_data['conv3'][1])
conv3 = relu(conv(maxpool2, conv3W, conv3b, k_h, k_w, c_o, s_h, s_w, padding='SAME', group=group))
k_h = 3
k_w = 3
c_o = 384
s_h = 1
s_w = 1
group = 2
conv4W = tf.Variable(net_data['conv4'][0])
conv4b = tf.Variable(net_data['conv4'][1])
conv4 = relu(conv(conv3, conv4W, conv4b, k_h, k_w, c_o, s_h, s_w, padding='SAME', group=group))
k_h = 3
k_w = 3
c_o = 256
s_h = 1
s_w = 1
group = 2
conv5W = tf.Variable(net_data['conv5'][0])
conv5b = tf.Variable(net_data['conv5'][1])
conv5 = relu(conv(conv4, conv5W, conv5b, k_h, k_w, c_o, s_h, s_w, padding='SAME', group=group))
return conv5
|
def conv(input_, kernel, biases, k_h, k_w, c_o, s_h, s_w, padding='VALID', group=1):
'From https://github.com/ethereon/caffe-tensorflow\n '
c_i = input_.get_shape()[(- 1)]
assert ((c_i % group) == 0)
assert ((c_o % group) == 0)
convolve = (lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding))
if (group == 1):
conv = convolve(input_, kernel)
else:
input_groups = tf.split(axis=3, num_or_size_splits=group, value=input_)
kernel_groups = tf.split(axis=3, num_or_size_splits=group, value=kernel)
output_groups = [convolve(i, k) for (i, k) in zip(input_groups, kernel_groups)]
conv = tf.concat(axis=3, values=output_groups)
return tf.reshape(tf.nn.bias_add(conv, biases), ([(- 1)] + conv.get_shape().as_list()[1:]))
|
def convBlock(numIn, numOut, inp, params, idx):
epsilon = 1e-05
beta1 = tf.Variable(params[idx])
idx = (idx + 1)
gamma1 = tf.Variable(params[idx])
idx = (idx + 1)
(batch_mean1, batch_var1) = tf.nn.moments(inp, [0, 1, 2], name='moments')
bn1 = tf.nn.batch_norm_with_global_normalization(inp, batch_mean1, batch_var1, beta1, gamma1, epsilon, scale_after_normalization=False)
bn1 = relu(bn1)
conv1 = conv2dp(bn1, (numOut / 2), params[idx:(idx + 2)])
idx = (idx + 2)
beta2 = tf.Variable(params[idx])
idx = (idx + 1)
gamma2 = tf.Variable(params[idx])
idx = (idx + 1)
(batch_mean2, batch_var2) = tf.nn.moments(conv1, [0, 1, 2], name='moments')
bn2 = tf.nn.batch_norm_with_global_normalization(conv1, batch_mean2, batch_var2, beta2, gamma2, epsilon, scale_after_normalization=False)
bn2 = relu(bn2)
conv2 = conv2dp(bn2, (numOut / 2), params[idx:(idx + 2)])
idx = (idx + 2)
beta3 = tf.Variable(params[idx])
idx = (idx + 1)
gamma3 = tf.Variable(params[idx])
idx = (idx + 1)
(batch_mean3, batch_var3) = tf.nn.moments(conv2, [0, 1, 2], name='moments')
bn3 = tf.nn.batch_norm_with_global_normalization(conv2, batch_mean3, batch_var3, beta3, gamma3, epsilon, scale_after_normalization=False)
bn3 = relu(bn3)
conv3 = conv2dp(bn3, numOut, params[idx:(idx + 2)])
idx = (idx + 2)
return (conv3, idx)
|
def skipLayer(numIn, numOut, inp, params, idx):
if (numIn == numOut):
return (inp, idx)
else:
conv1 = conv2dp(inp, numOut, params[idx:(idx + 2)])
idx = (idx + 2)
return (conv1, idx)
|
def residual(numIn, numOut, inp, params, idx):
(convb, idx) = convBlock(numIn, numOut, inp, params, idx)
(skipl, idx) = skipLayer(numIn, numOut, inp, params, idx)
return (tf.add(convb, skipl), idx)
|
def hourglass(n, numIn, numOut, inp, params, idx, layers):
(up1, idx) = residual(numIn, 256, inp, params, idx)
(up2, idx) = residual(256, 256, up1, params, idx)
(up4, idx) = residual(256, numOut, up2, params, idx)
pool1 = MaxPooling(inp, [2, 2])
(low1, idx) = residual(numIn, 256, pool1, params, idx)
(low2, idx) = residual(256, 256, low1, params, idx)
(low5, idx) = residual(256, 256, low2, params, idx)
if (n > 1):
(low6, idx, layers) = hourglass((n - 1), 256, numOut, low5, params, idx, layers)
else:
(low6, idx) = residual(256, numOut, low5, params, idx)
(low7, idx) = residual(numOut, numOut, low6, params, idx)
up5 = tf.image.resize_images(low7, [int((low7.get_shape()[1] * 2)), int((low7.get_shape()[2] * 2))])
layers.append(tf.add(up4, up5))
return (tf.add(up4, up5), idx, layers)
|
def get_params():
params = []
for i in xrange(1, 801):
p = np.load((('./perceptual_models/hourglass/hourglass_weights_' + str(i)) + '.npy'))
if (len(p.shape) == 4):
p = p.swapaxes(0, 1).swapaxes(0, 2).swapaxes(1, 3)
params.append(p)
return params
|
def hg_forward(inp):
epsilon = 1e-05
params = get_params()
idx = 0
inp = tf.image.resize_images(inp, [256, 256])
conv1_ = conv2dp(inp, 64, params[idx:(idx + 2)], d_h=2, d_w=2)
idx = (idx + 2)
beta1 = tf.Variable(params[idx])
idx = (idx + 1)
gamma1 = tf.Variable(params[idx])
idx = (idx + 1)
(batch_mean1, batch_var1) = tf.nn.moments(conv1_, [0, 1, 2], name='moments')
bn1 = tf.nn.batch_norm_with_global_normalization(conv1_, batch_mean1, batch_var1, beta1, gamma1, epsilon, scale_after_normalization=False)
conv1 = relu(bn1)
(r1, idx) = residual(64, 128, conv1, params, idx)
pool = MaxPooling(r1, [2, 2])
(r4, idx) = residual(128, 128, pool, params, idx)
(r5, idx) = residual(128, 128, r4, params, idx)
(r6, idx) = residual(128, 256, r5, params, idx)
layers = []
(out, idx, layers) = hourglass(4, 256, 512, r6, params, idx, layers)
return layers
|
def batch_norm(inputs, name, train=True, reuse=False):
return tf.contrib.layers.batch_norm(inputs=inputs, is_training=train, reuse=reuse, scope=name, scale=True)
|
def local_response_norm(x):
return tf.nn.local_response_normalization(x)
|
def binary_cross_entropy(preds, targets, name=None):
'Computes binary cross entropy given `preds`.\n\n For brevity, let `x = `, `z = targets`. The logistic loss is\n\n loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))\n\n Args:\n preds: A `Tensor` of type `float32` or `float64`.\n targets: A `Tensor` of the same type and shape as `preds`.\n '
eps = 1e-12
with ops.op_scope([preds, targets], name, 'bce_loss') as name:
preds = ops.convert_to_tensor(preds, name='preds')
targets = ops.convert_to_tensor(targets, name='targets')
return tf.reduce_mean((- ((targets * tf.log((preds + eps))) + ((1.0 - targets) * tf.log(((1.0 - preds) + eps))))))
|
def conv_cond_concat(x, y):
'Concatenate conditioning vector on feature map axis.'
x_shapes = x.get_shape()
y_shapes = y.get_shape()
return tf.concat(axis=3, values=[x, (y * tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]]))])
|
def conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name='conv2d', reuse=False, padding='SAME'):
with tf.variable_scope(name, reuse=reuse):
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[(- 1)], output_dim], initializer=tf.contrib.layers.xavier_initializer())
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding=padding)
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
return conv
|
def sigmoid(x):
return tf.nn.sigmoid(x)
|
def conv2dp(input_, output_dim, params, d_h=1, d_w=1):
w = tf.Variable(params[0])
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
biases = tf.Variable(params[1])
conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
return conv
|
def deconv2d(input_, output_shape, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name='deconv2d', reuse=False, with_w=False, padding='SAME'):
with tf.variable_scope(name, reuse=reuse):
w = tf.get_variable('w', [k_h, k_h, output_shape[(- 1)], input_.get_shape()[(- 1)]], initializer=tf.contrib.layers.xavier_initializer())
try:
deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1], padding=padding)
except AttributeError:
deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1])
biases = tf.get_variable('biases', [output_shape[(- 1)]], initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
if with_w:
return (deconv, w, biases)
else:
return deconv
|
def lrelu(x, leak=0.2, name='lrelu'):
with tf.variable_scope(name):
f1 = (0.5 * (1 + leak))
f2 = (0.5 * (1 - leak))
return ((f1 * x) + (f2 * abs(x)))
|
def relu(x):
return tf.nn.relu(x)
|
def tanh(x):
return tf.nn.tanh(x)
|
def shape2d(a):
'\n a: a int or tuple/list of length 2\n '
if (type(a) == int):
return [a, a]
if isinstance(a, (list, tuple)):
assert (len(a) == 2)
return list(a)
raise RuntimeError('Illegal shape: {}'.format(a))
|
def shape4d(a):
return (([1] + shape2d(a)) + [1])
|
def UnPooling2x2ZeroFilled(x):
out = tf.concat(axis=3, values=[x, tf.zeros_like(x)])
out = tf.concat(axis=2, values=[out, tf.zeros_like(out)])
sh = x.get_shape().as_list()
if (None not in sh[1:]):
out_size = [(- 1), (sh[1] * 2), (sh[2] * 2), sh[3]]
return tf.reshape(out, out_size)
else:
sh = tf.shape(x)
return tf.reshape(out, [(- 1), (sh[1] * 2), (sh[2] * 2), sh[3]])
|
def MaxPooling(x, shape, stride=None, padding='VALID'):
"\n MaxPooling on images.\n :param input: NHWC tensor.\n :param shape: int or [h, w]\n :param stride: int or [h, w]. default to be shape.\n :param padding: 'valid' or 'same'. default to 'valid'\n :returns: NHWC tensor.\n "
padding = padding.upper()
shape = shape4d(shape)
if (stride is None):
stride = shape
else:
stride = shape4d(stride)
return tf.nn.max_pool(x, ksize=shape, strides=stride, padding=padding)
|
def FixedUnPooling(x, shape, unpool_mat=None):
'\n Unpool the input with a fixed mat to perform kronecker product with.\n :param input: NHWC tensor\n :param shape: int or [h, w]\n :param unpool_mat: a tf/np matrix with size=shape. If None, will use a mat\n with 1 at top-left corner.\n :returns: NHWC tensor\n '
shape = shape2d(shape)
if ((shape[0] == 2) and (shape[1] == 2) and (unpool_mat is None)):
return UnPooling2x2ZeroFilled(x)
input_shape = tf.shape(x)
if (unpool_mat is None):
mat = np.zeros(shape, dtype='float32')
mat[0][0] = 1
unpool_mat = tf.Variable(mat, trainable=False, name='unpool_mat')
elif isinstance(unpool_mat, np.ndarray):
unpool_mat = tf.Variable(unpool_mat, trainable=False, name='unpool_mat')
assert (unpool_mat.get_shape().as_list() == list(shape))
fx = flatten(tf.transpose(x, [0, 3, 1, 2]))
fx = tf.expand_dims(fx, (- 1))
mat = tf.expand_dims(flatten(unpool_mat), 0)
prod = tf.matmul(fx, mat)
prod = tf.reshape(prod, tf.pack([(- 1), input_shape[3], input_shape[1], input_shape[2], shape[0], shape[1]]))
prod = tf.transpose(prod, [0, 2, 4, 3, 5, 1])
prod = tf.reshape(prod, tf.pack([(- 1), (input_shape[1] * shape[0]), (input_shape[2] * shape[1]), input_shape[3]]))
return prod
|
def linear(input_, output_size, name, stddev=0.02, bias_start=0.0, reuse=False, with_w=False):
shape = input_.get_shape().as_list()
with tf.variable_scope(name, reuse=reuse):
matrix = tf.get_variable('Matrix', [shape[1], output_size], tf.float32, tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable('bias', [output_size], initializer=tf.constant_initializer(bias_start))
if with_w:
return ((tf.matmul(input_, matrix) + bias), matrix, bias)
else:
return (tf.matmul(input_, matrix) + bias)
|
def batch_norm(inputs, name, train=True, reuse=False):
return tf.contrib.layers.batch_norm(inputs=inputs, is_training=train, reuse=reuse, scope=name, scale=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.