code
stringlengths 17
6.64M
|
---|
def rebuild_cond_unit_col(valid_col_units, cond_unit, kmap):
if (cond_unit is None):
return cond_unit
(not_op, op_id, val_unit, val1, val2) = cond_unit
val_unit = rebuild_val_unit_col(valid_col_units, val_unit, kmap)
return (not_op, op_id, val_unit, val1, val2)
|
def rebuild_condition_col(valid_col_units, condition, kmap):
for idx in range(len(condition)):
if ((idx % 2) == 0):
condition[idx] = rebuild_cond_unit_col(valid_col_units, condition[idx], kmap)
return condition
|
def rebuild_select_col(valid_col_units, sel, kmap):
if (sel is None):
return sel
(distinct, _list) = sel
new_list = []
for it in _list:
(agg_id, val_unit) = it
new_list.append((agg_id, rebuild_val_unit_col(valid_col_units, val_unit, kmap)))
if DISABLE_DISTINCT:
distinct = None
return (distinct, new_list)
|
def rebuild_from_col(valid_col_units, from_, kmap):
if (from_ is None):
return from_
from_['table_units'] = [rebuild_table_unit_col(valid_col_units, table_unit, kmap) for table_unit in from_['table_units']]
from_['conds'] = rebuild_condition_col(valid_col_units, from_['conds'], kmap)
return from_
|
def rebuild_group_by_col(valid_col_units, group_by, kmap):
if (group_by is None):
return group_by
return [rebuild_col_unit_col(valid_col_units, col_unit, kmap) for col_unit in group_by]
|
def rebuild_order_by_col(valid_col_units, order_by, kmap):
if ((order_by is None) or (len(order_by) == 0)):
return order_by
(direction, val_units) = order_by
new_val_units = [rebuild_val_unit_col(valid_col_units, val_unit, kmap) for val_unit in val_units]
return (direction, new_val_units)
|
def rebuild_sql_col(valid_col_units, sql, kmap):
if (sql is None):
return sql
sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap)
sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap)
sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap)
sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap)
sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap)
sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap)
sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap)
sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap)
sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap)
return sql
|
def build_foreign_key_map(entry):
cols_orig = entry['column_names_original']
tables_orig = entry['table_names_original']
cols = []
for col_orig in cols_orig:
if (col_orig[0] >= 0):
t = tables_orig[col_orig[0]]
c = col_orig[1]
cols.append((((('__' + t.lower()) + '.') + c.lower()) + '__'))
else:
cols.append('__all__')
def keyset_in_list(k1, k2, k_list):
for k_set in k_list:
if ((k1 in k_set) or (k2 in k_set)):
return k_set
new_k_set = set()
k_list.append(new_k_set)
return new_k_set
foreign_key_list = []
foreign_keys = entry['foreign_keys']
for fkey in foreign_keys:
(key1, key2) = fkey
key_set = keyset_in_list(key1, key2, foreign_key_list)
key_set.add(key1)
key_set.add(key2)
foreign_key_map = {}
for key_set in foreign_key_list:
sorted_list = sorted(list(key_set))
midx = sorted_list[0]
for idx in sorted_list:
foreign_key_map[cols[idx]] = cols[midx]
return foreign_key_map
|
def build_foreign_key_map_from_json(table):
with open(table) as f:
data = json.load(f)
tables = {}
for entry in data:
tables[entry['db_id']] = build_foreign_key_map(entry)
return tables
|
class Schema():
'\n Simple schema which maps table&column to a unique identifier\n '
def __init__(self, schema, table):
self._schema = schema
self._table = table
self._idMap = self._map(self._schema, self._table)
@property
def schema(self):
return self._schema
@property
def idMap(self):
return self._idMap
def _map(self, schema, table):
column_names_original = table['column_names_original']
table_names_original = table['table_names_original']
for (i, (tab_id, col)) in enumerate(column_names_original):
if (tab_id == (- 1)):
idMap = {'*': i}
else:
key = table_names_original[tab_id].lower()
val = col.lower()
idMap[((key + '.') + val)] = i
for (i, tab) in enumerate(table_names_original):
key = tab.lower()
idMap[key] = i
return idMap
|
def get_schemas_from_json(fpath):
with open(fpath) as f:
data = json.load(f)
db_names = [db['db_id'] for db in data]
tables = {}
schemas = {}
for db in data:
db_id = db['db_id']
schema = {}
column_names_original = db['column_names_original']
table_names_original = db['table_names_original']
tables[db_id] = {'column_names_original': column_names_original, 'table_names_original': table_names_original}
for (i, tabn) in enumerate(table_names_original):
table = str(tabn.lower())
cols = [str(col.lower()) for (td, col) in column_names_original if (td == i)]
schema[table] = cols
schemas[db_id] = schema
return (schemas, db_names, tables)
|
class Schema():
'\n Simple schema which maps table&column to a unique identifier\n '
def __init__(self, schema):
self._schema = schema
self._idMap = self._map(self._schema)
@property
def schema(self):
return self._schema
@property
def idMap(self):
return self._idMap
def _map(self, schema):
idMap = {'*': '__all__'}
id = 1
for (key, vals) in schema.items():
for val in vals:
idMap[((key.lower() + '.') + val.lower())] = (((('__' + key.lower()) + '.') + val.lower()) + '__')
id += 1
for key in schema:
idMap[key.lower()] = (('__' + key.lower()) + '__')
id += 1
return idMap
|
def get_schema(db):
"\n Get database's schema, which is a dict with table name as key\n and list of column names as value\n :param db: database path\n :return: schema dict\n "
schema = {}
conn = sqlite3.connect(db)
cursor = conn.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = [str(table[0].lower()) for table in cursor.fetchall()]
for table in tables:
cursor.execute('PRAGMA table_info({})'.format(table))
schema[table] = [str(col[1].lower()) for col in cursor.fetchall()]
return schema
|
def get_schema_from_json(fpath):
with open(fpath) as f:
data = json.load(f)
schema = {}
for entry in data:
table = str(entry['table'].lower())
cols = [str(col['column_name'].lower()) for col in entry['col_data']]
schema[table] = cols
return schema
|
def tokenize(string):
string = str(string)
string = string.replace("'", '"')
quote_idxs = [idx for (idx, char) in enumerate(string) if (char == '"')]
assert ((len(quote_idxs) % 2) == 0), 'Unexpected quote'
vals = {}
for i in range((len(quote_idxs) - 1), (- 1), (- 2)):
qidx1 = quote_idxs[(i - 1)]
qidx2 = quote_idxs[i]
val = string[qidx1:(qidx2 + 1)]
key = '__val_{}_{}__'.format(qidx1, qidx2)
string = ((string[:qidx1] + key) + string[(qidx2 + 1):])
vals[key] = val
toks = [word.lower() for word in word_tokenize(string)]
for i in range(len(toks)):
if (toks[i] in vals):
toks[i] = vals[toks[i]]
eq_idxs = [idx for (idx, tok) in enumerate(toks) if (tok == '=')]
eq_idxs.reverse()
prefix = ('!', '>', '<')
for eq_idx in eq_idxs:
pre_tok = toks[(eq_idx - 1)]
if (pre_tok in prefix):
toks = ((toks[:(eq_idx - 1)] + [(pre_tok + '=')]) + toks[(eq_idx + 1):])
return toks
|
def scan_alias(toks):
"Scan the index of 'as' and build the map for all alias"
as_idxs = [idx for (idx, tok) in enumerate(toks) if (tok == 'as')]
alias = {}
for idx in as_idxs:
alias[toks[(idx + 1)]] = toks[(idx - 1)]
return alias
|
def get_tables_with_alias(schema, toks):
tables = scan_alias(toks)
for key in schema:
assert (key not in tables), 'Alias {} has the same name in table'.format(key)
tables[key] = key
return tables
|
def parse_col(toks, start_idx, tables_with_alias, schema, default_tables=None):
'\n :returns next idx, column id\n '
tok = toks[start_idx]
if (tok == '*'):
return ((start_idx + 1), schema.idMap[tok])
if ('.' in tok):
(alias, col) = tok.split('.')
key = ((tables_with_alias[alias] + '.') + col)
return ((start_idx + 1), schema.idMap[key])
assert ((default_tables is not None) and (len(default_tables) > 0)), 'Default tables should not be None or empty'
for alias in default_tables:
table = tables_with_alias[alias]
if (tok in schema.schema[table]):
key = ((table + '.') + tok)
return ((start_idx + 1), schema.idMap[key])
assert False, 'Error col: {}'.format(tok)
|
def parse_col_unit(toks, start_idx, tables_with_alias, schema, default_tables=None):
'\n :returns next idx, (agg_op id, col_id)\n '
idx = start_idx
len_ = len(toks)
isBlock = False
isDistinct = False
if (toks[idx] == '('):
isBlock = True
idx += 1
if (toks[idx] in AGG_OPS):
agg_id = AGG_OPS.index(toks[idx])
idx += 1
assert ((idx < len_) and (toks[idx] == '('))
idx += 1
if (toks[idx] == 'distinct'):
idx += 1
isDistinct = True
(idx, col_id) = parse_col(toks, idx, tables_with_alias, schema, default_tables)
assert ((idx < len_) and (toks[idx] == ')'))
idx += 1
return (idx, (agg_id, col_id, isDistinct))
if (toks[idx] == 'distinct'):
idx += 1
isDistinct = True
agg_id = AGG_OPS.index('none')
(idx, col_id) = parse_col(toks, idx, tables_with_alias, schema, default_tables)
if isBlock:
assert (toks[idx] == ')')
idx += 1
return (idx, (agg_id, col_id, isDistinct))
|
def parse_val_unit(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
isBlock = False
if (toks[idx] == '('):
isBlock = True
idx += 1
col_unit1 = None
col_unit2 = None
unit_op = UNIT_OPS.index('none')
(idx, col_unit1) = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables)
if ((idx < len_) and (toks[idx] in UNIT_OPS)):
unit_op = UNIT_OPS.index(toks[idx])
idx += 1
(idx, col_unit2) = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables)
if isBlock:
assert (toks[idx] == ')')
idx += 1
return (idx, (unit_op, col_unit1, col_unit2))
|
def parse_table_unit(toks, start_idx, tables_with_alias, schema):
'\n :returns next idx, table id, table name\n '
idx = start_idx
len_ = len(toks)
key = tables_with_alias[toks[idx]]
if (((idx + 1) < len_) and (toks[(idx + 1)] == 'as')):
idx += 3
else:
idx += 1
return (idx, schema.idMap[key], key)
|
def parse_value(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
isBlock = False
if (toks[idx] == '('):
isBlock = True
idx += 1
if (toks[idx] == 'select'):
(idx, val) = parse_sql(toks, idx, tables_with_alias, schema)
elif ('"' in toks[idx]):
val = toks[idx]
idx += 1
else:
try:
val = float(toks[idx])
idx += 1
except:
end_idx = idx
while ((end_idx < len_) and (toks[end_idx] != ',') and (toks[end_idx] != ')') and (toks[end_idx] != 'and') and (toks[end_idx] not in CLAUSE_KEYWORDS) and (toks[end_idx] not in JOIN_KEYWORDS)):
end_idx += 1
(idx, val) = parse_col_unit(toks[start_idx:end_idx], 0, tables_with_alias, schema, default_tables)
idx = end_idx
if isBlock:
assert (toks[idx] == ')')
idx += 1
return (idx, val)
|
def parse_condition(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
conds = []
while (idx < len_):
(idx, val_unit) = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables)
not_op = False
if (toks[idx] == 'not'):
not_op = True
idx += 1
assert ((idx < len_) and (toks[idx] in WHERE_OPS)), 'Error condition: idx: {}, tok: {}'.format(idx, toks[idx])
op_id = WHERE_OPS.index(toks[idx])
idx += 1
val1 = val2 = None
if (op_id == WHERE_OPS.index('between')):
(idx, val1) = parse_value(toks, idx, tables_with_alias, schema, default_tables)
assert (toks[idx] == 'and')
idx += 1
(idx, val2) = parse_value(toks, idx, tables_with_alias, schema, default_tables)
else:
(idx, val1) = parse_value(toks, idx, tables_with_alias, schema, default_tables)
val2 = None
conds.append((not_op, op_id, val_unit, val1, val2))
if ((idx < len_) and ((toks[idx] in CLAUSE_KEYWORDS) or (toks[idx] in (')', ';')) or (toks[idx] in JOIN_KEYWORDS))):
break
if ((idx < len_) and (toks[idx] in COND_OPS)):
conds.append(toks[idx])
idx += 1
return (idx, conds)
|
def parse_select(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
assert (toks[idx] == 'select'), "'select' not found"
idx += 1
isDistinct = False
if ((idx < len_) and (toks[idx] == 'distinct')):
idx += 1
isDistinct = True
val_units = []
while ((idx < len_) and (toks[idx] not in CLAUSE_KEYWORDS)):
agg_id = AGG_OPS.index('none')
if (toks[idx] in AGG_OPS):
agg_id = AGG_OPS.index(toks[idx])
idx += 1
(idx, val_unit) = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables)
val_units.append((agg_id, val_unit))
if ((idx < len_) and (toks[idx] == ',')):
idx += 1
return (idx, (isDistinct, val_units))
|
def parse_from(toks, start_idx, tables_with_alias, schema):
'\n Assume in the from clause, all table units are combined with join\n '
assert ('from' in toks[start_idx:]), "'from' not found"
len_ = len(toks)
idx = (toks.index('from', start_idx) + 1)
default_tables = []
table_units = []
conds = []
while (idx < len_):
isBlock = False
if (toks[idx] == '('):
isBlock = True
idx += 1
if (toks[idx] == 'select'):
(idx, sql) = parse_sql(toks, idx, tables_with_alias, schema)
table_units.append((TABLE_TYPE['sql'], sql))
else:
if ((idx < len_) and (toks[idx] == 'join')):
idx += 1
(idx, table_unit, table_name) = parse_table_unit(toks, idx, tables_with_alias, schema)
table_units.append((TABLE_TYPE['table_unit'], table_unit))
default_tables.append(table_name)
if ((idx < len_) and (toks[idx] == 'on')):
idx += 1
(idx, this_conds) = parse_condition(toks, idx, tables_with_alias, schema, default_tables)
if (len(conds) > 0):
conds.append('and')
conds.extend(this_conds)
if isBlock:
assert (toks[idx] == ')')
idx += 1
if ((idx < len_) and ((toks[idx] in CLAUSE_KEYWORDS) or (toks[idx] in (')', ';')))):
break
return (idx, table_units, conds, default_tables)
|
def parse_where(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
if ((idx >= len_) or (toks[idx] != 'where')):
return (idx, [])
idx += 1
(idx, conds) = parse_condition(toks, idx, tables_with_alias, schema, default_tables)
return (idx, conds)
|
def parse_group_by(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
col_units = []
if ((idx >= len_) or (toks[idx] != 'group')):
return (idx, col_units)
idx += 1
assert (toks[idx] == 'by')
idx += 1
while ((idx < len_) and (not ((toks[idx] in CLAUSE_KEYWORDS) or (toks[idx] in (')', ';'))))):
(idx, col_unit) = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables)
col_units.append(col_unit)
if ((idx < len_) and (toks[idx] == ',')):
idx += 1
else:
break
return (idx, col_units)
|
def parse_order_by(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
val_units = []
order_type = 'asc'
if ((idx >= len_) or (toks[idx] != 'order')):
return (idx, val_units)
idx += 1
assert (toks[idx] == 'by')
idx += 1
while ((idx < len_) and (not ((toks[idx] in CLAUSE_KEYWORDS) or (toks[idx] in (')', ';'))))):
(idx, val_unit) = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables)
val_units.append(val_unit)
if ((idx < len_) and (toks[idx] in ORDER_OPS)):
order_type = toks[idx]
idx += 1
if ((idx < len_) and (toks[idx] == ',')):
idx += 1
else:
break
return (idx, (order_type, val_units))
|
def parse_having(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
if ((idx >= len_) or (toks[idx] != 'having')):
return (idx, [])
idx += 1
(idx, conds) = parse_condition(toks, idx, tables_with_alias, schema, default_tables)
return (idx, conds)
|
def parse_limit(toks, start_idx):
idx = start_idx
len_ = len(toks)
if ((idx < len_) and (toks[idx] == 'limit')):
idx += 2
return (idx, int(toks[(idx - 1)]))
return (idx, None)
|
def parse_sql(toks, start_idx, tables_with_alias, schema):
isBlock = False
len_ = len(toks)
idx = start_idx
sql = {}
if (toks[idx] == '('):
isBlock = True
idx += 1
(from_end_idx, table_units, conds, default_tables) = parse_from(toks, start_idx, tables_with_alias, schema)
sql['from'] = {'table_units': table_units, 'conds': conds}
(_, select_col_units) = parse_select(toks, idx, tables_with_alias, schema, default_tables)
idx = from_end_idx
sql['select'] = select_col_units
(idx, where_conds) = parse_where(toks, idx, tables_with_alias, schema, default_tables)
sql['where'] = where_conds
(idx, group_col_units) = parse_group_by(toks, idx, tables_with_alias, schema, default_tables)
sql['groupBy'] = group_col_units
(idx, having_conds) = parse_having(toks, idx, tables_with_alias, schema, default_tables)
sql['having'] = having_conds
(idx, order_col_units) = parse_order_by(toks, idx, tables_with_alias, schema, default_tables)
sql['orderBy'] = order_col_units
(idx, limit_val) = parse_limit(toks, idx)
sql['limit'] = limit_val
idx = skip_semicolon(toks, idx)
if isBlock:
assert (toks[idx] == ')')
idx += 1
idx = skip_semicolon(toks, idx)
for op in SQL_OPS:
sql[op] = None
if ((idx < len_) and (toks[idx] in SQL_OPS)):
sql_op = toks[idx]
idx += 1
(idx, IUE_sql) = parse_sql(toks, idx, tables_with_alias, schema)
sql[sql_op] = IUE_sql
return (idx, sql)
|
def load_data(fpath):
with open(fpath) as f:
data = json.load(f)
return data
|
def get_sql(schema, query):
toks = tokenize(query)
tables_with_alias = get_tables_with_alias(schema.schema, toks)
(_, sql) = parse_sql(toks, 0, tables_with_alias, schema)
return sql
|
def skip_semicolon(toks, start_idx):
idx = start_idx
while ((idx < len(toks)) and (toks[idx] == ';')):
idx += 1
return idx
|
class Column():
ATTRIBUTE_TXT = 'TXT'
ATTRIBUTE_NUM = 'NUM'
ATTRIBUTE_GROUP_BY_ABLE = 'GROUPBY'
def __init__(self, name, natural_name, table=None, attributes=None):
self.name = name
self.natural_name = natural_name
self.table = table
if (attributes is not None):
self.attributes = attributes
def __str__(self):
return ((((self.name + '||') + self.natural_name) + '||') + str(self.attributes))
|
class Table(object):
def __init__(self, name, natural_name):
self.name = name
self.natural_name = natural_name
self.foreign_keys = []
def add_foreign_key_to(self, my_col, their_col, that_table):
self.foreign_keys.append((my_col, their_col, that_table))
def get_foreign_keys(self):
return self.foreign_keys
def __str__(self):
return ((self.name + '||') + self.natural_name)
def __repr__(self):
return ((self.name + '||') + self.natural_name)
def __hash__(self):
val = 0
for c in self.name:
val = ((val * 10) + ord(c))
return val
def __eq__(self, rhs):
return (self.name == rhs.name)
def __ne__(self, rhs):
return (not (self.name == rhs.name))
|
class DummyTable(Table):
def add_foreign_key_to(self, my_col, their_col, that_table):
pass
def get_foreign_keys(self):
return []
|
class ColumnPlaceholder():
def __init__(self, id_in_pattern, attributes):
self.id_in_pattern = id_in_pattern
self.attributes = attributes
self.column = None
def attach_to_column(self, column):
self.column = column
|
class Pattern():
def __init__(self, schema, json_data):
self.schema = schema
self.raw_sql = json_data['SQL Pattern']
self.raw_questions = json_data['Question Patterns']
reference_id_to_original_id = json_data['Column Identity']
self.column_identity = {}
for (reference, original) in reference_id_to_original_id.items():
rid = int(reference)
oid = int(original)
self.column_identity[rid] = oid
raw_column_attributes = json_data['Column Attributes']
sorted_column_attributes = sorted([(int(column_id), attributes) for (column_id, attributes) in raw_column_attributes.items()])
self.column_id_to_column_placeholders = {}
self.column_placeholders = []
for (column_id, attributes) in sorted_column_attributes:
original_column_id = self.column_identity.get(column_id, None)
if (original_column_id is not None):
self.column_id_to_column_placeholders[column_id] = self.column_id_to_column_placeholders[original_column_id]
continue
column_placeholder = ColumnPlaceholder(column_id, attributes)
self.column_placeholders.append(column_placeholder)
self.column_id_to_column_placeholders[column_id] = column_placeholder
def populate(self):
if (self.raw_sql == 'SELECT * {FROM, 0}'):
table_name = random.choice(self.schema.orginal_table)
sql = 'SELECT * FROM {}'.format(table_name)
return (sql, ['list all information about {} .'.format(table_name), 'Show everything on {}'.format(table_name), 'Return all columns in {} .'.format(table_name)])
for column_placeholder in self.column_placeholders:
all_permissible_columns = self.schema.get_columns_with_attributes(column_placeholder.attributes)
if (len(all_permissible_columns) == 0):
raise Exception('No possible column found for column {} with required attributes: {}'.format(column_placeholder.id_in_pattern, column_placeholder.attributes))
chosen_column = random.choice(all_permissible_columns)
column_placeholder.attach_to_column(chosen_column)
column_id_to_tn = {}
generated_sql = self.raw_sql[:]
replacements = []
for match in re.finditer('{FROM,[,0-9]+}', self.raw_sql):
raw_from_token = match.group()
split = raw_from_token[1:(- 1)].split(',')[1:]
id_of_columns_involved = [int(x) for x in split]
placeholders_of_columns_involved = [self.column_id_to_column_placeholders[x] for x in id_of_columns_involved]
columns_used_for_this_from_clause = [x.column for x in placeholders_of_columns_involved]
try:
(from_clause, table_to_tn) = self.schema.generate_from_clause(columns_used_for_this_from_clause)
except:
return ('', [])
replacements.append((raw_from_token, from_clause))
for column_id in id_of_columns_involved:
column = self.column_id_to_column_placeholders[column_id].column
try:
tn = table_to_tn[column.table]
except:
global found_path_error
found_path_error += 1
return ('', [])
column_id_to_tn[column_id] = tn
for (original, new) in replacements:
generated_sql = re.sub(original, new, generated_sql)
replacements = []
val = None
table_name = None
for match in re.finditer('{[A-Z]+,[,0-9]+}', generated_sql):
raw_column_token = match.group()
(type, column_id) = raw_column_token[1:(- 1)].split(',')
column_id = int(column_id)
if (type == 'COLUMN'):
if (column_id not in column_id_to_tn):
column_id = self.column_identity[column_id]
tn = column_id_to_tn[column_id]
column_name = self.column_id_to_column_placeholders[column_id].column.name
result = 't{}.{}'.format(tn, column_name)
elif (type == 'VALUE'):
if (column_id == 1):
result = str(random.randint(1, 101))
val = result
elif (type == 'COLUMN_NAME'):
natural_name = self.column_id_to_column_placeholders[column_id].column.natural_name
result = natural_name
elif (type == 'TABLE_NAME'):
try:
natural_name = self.column_id_to_column_placeholders[column_id].column.table.natural_name
result = natural_name
except:
result = random.choice(self.schema.orginal_table)
table_name = result
else:
raise Exception('Unknown type {} in type field'.format(type))
replacements.append((raw_column_token, result))
for (original, new) in replacements:
generated_sql = re.sub(original, new, generated_sql)
generated_questions = []
for question_pattern in self.raw_questions:
generated_question = question_pattern[:]
replacements = []
for match in re.finditer('{[_A-Z]+,[0-9]+}', generated_question):
raw_column_token = match.group()
(type, column_id) = raw_column_token[1:(- 1)].split(',')
column_id = int(column_id)
if (type == 'COLUMN'):
tn = column_id_to_tn[column_id]
column_name = self.column_id_to_column_placeholders[column_id].column.name
result = 't{}.{}'.format(tn, column_name)
elif (type == 'VALUE'):
result = val
elif (type == 'COLUMN_NAME'):
natural_name = self.column_id_to_column_placeholders[column_id].column.natural_name
result = natural_name
elif (type == 'TABLE_NAME'):
try:
natural_name = self.column_id_to_column_placeholders[column_id].column.table.natural_name
result = natural_name
except:
if table_name:
result = table_name
else:
result = random.choice(self.schema.orginal_table)
else:
raise Exception('Unknown type {} in type field'.format(type))
replacements.append((raw_column_token, result))
for (original, new) in replacements:
generated_question = re.sub(original, new, generated_question)
generated_questions.append(generated_question)
return (generated_sql, generated_questions)
|
class Schema():
def __init__(self, json_data):
tables = []
table_index_to_table_object = {}
table_name_to_table_object = {}
next_table_index = 0
self.orginal_table = json_data['table_names_original']
for (table_name, table_name_natural) in zip(json_data['table_names_original'], json_data['table_names']):
table = Table(table_name, table_name_natural)
tables.append(table)
table_index_to_table_object[next_table_index] = table
table_name_to_table_object[table_name] = table
next_table_index += 1
columns = []
column_and_table_name_to_column_object = {}
for ((table_index, column_name), column_type, column_names_natural) in zip(json_data['column_names_original'], json_data['column_types'], json_data['column_names']):
if (table_index == (- 1)):
continue
its_table = table_index_to_table_object[table_index]
if (column_type == 'text'):
attributes = [Column.ATTRIBUTE_TXT]
elif (column_type == 'number'):
attributes = [Column.ATTRIBUTE_NUM]
else:
attributes = []
column = Column(column_name, column_names_natural[1], table=its_table, attributes=attributes)
column_and_table_name_to_column_object[(column_name, its_table.name)] = column
columns.append(column)
for ((from_table_name, from_column_name), (to_table_name, to_column_name)) in json_data['foreign_keys']:
from_table = table_name_to_table_object[from_table_name]
from_column = column_and_table_name_to_column_object[(from_column_name, from_table_name)]
to_table = table_name_to_table_object[to_table_name]
to_column = column_and_table_name_to_column_object[(to_column_name, to_table_name)]
from_table.add_foreign_key_to(from_column, to_column, to_table)
to_table.add_foreign_key_to(to_column, from_column, from_table)
self.all_columns = columns
self.all_tables = tables
def get_columns_with_attributes(self, column_attributes=[]):
results = []
for column in self.all_columns:
if all([(attribute in column.attributes) for attribute in column_attributes]):
results.append(column)
return results
class Join():
def __init__(self, schema, starting_table):
self.schema = schema
self.starting_table = starting_table
self.table_to_tn = {starting_table: 1}
self.joins = []
def find_a_way_to_join(self, table):
if (table in self.table_to_tn):
return
frontier = []
visited_tables = set()
found_path = None
for table in self.table_to_tn.keys():
visited_tables.add(table)
for (from_column, to_column, to_table) in table.get_foreign_keys():
frontier.append((table, from_column, to_column, to_table, []))
while (len(frontier) > 0):
(from_table, from_column, to_column, to_table, path) = frontier.pop(0)
path.append((from_table, from_column, to_column, to_table))
if (to_table == table):
found_path = path
break
else:
for (next_from_column, next_to_column, next_to_table) in to_table.get_foreign_keys():
frontier.append((to_table, next_from_column, next_to_column, next_to_table, path))
if (found_path is None):
raise Exception('A path could not be found from the current join {} to table {}'.format(self.table_to_tn.keys(), table))
for (from_table, from_column, to_column, to_table) in found_path:
if (to_table not in self.table_to_tn):
self.table_to_tn[to_table] = (len(self.table_to_tn) + 1)
self.joins.append((from_table, from_column, to_column, to_table))
def generate_from_clause(self):
if (len(self.joins) == 0):
return 'from {} as t1'.format(self.starting_table.name)
from_clause = 'from {} as t{} '.format(self.joins[0][0].name, self.table_to_tn[self.joins[0][0]])
for (from_table, from_column, to_column, to_table) in self.joins[1:]:
from_clause += 'join {} as t{}\non t{}.{} = t{}.{}'.format(to_table.name, self.table_to_tn[to_table], self.table_to_tn[from_table], from_column.name, self.table_to_tn[to_table], to_column.name)
return from_clause
def generate_from_clause(self, columns):
join = self.Join(self, columns[0].table)
for next_column in columns[1:]:
join.find_a_way_to_join(next_column.table)
return (join.generate_from_clause(), join.table_to_tn)
|
def load_database_schema(path):
data = json.load(open(path, 'r'))
schema = Schema(random.choice(data))
return schema
|
def load_patterns(data, schema):
patterns = []
for pattern_data in data:
patterns.append(Pattern(schema, pattern_data))
return patterns
|
def generate_every_db(db, schemas, tables, patterns_data):
db_name = db['db_id']
col_types = db['column_types']
process_sql_schema = schema_mod.Schema(schemas[db_name], tables[db_name])
if ('number' in col_types):
try:
schema = Schema(db)
except:
traceback.print_exc()
print('skip db {}'.format(db_name))
return
patterns = load_patterns(patterns_data, schema)
questions_and_queries = []
while (len(questions_and_queries) < 10):
pattern = random.choice(patterns)
try:
(sql, questions) = pattern.populate()
if (len(questions) != 0):
question = random.choice(questions)
questions_and_queries.append((question, sql))
except:
pass
return [{'db_id': db_name, 'query': query, 'query_toks': process_sql.tokenize(query), 'query_toks_no_value': None, 'question': question, 'question_toks': nltk.word_tokenize(question), 'sql': process_sql.get_sql(process_sql_schema, query)} for (question, query) in questions_and_queries]
else:
return []
|
def convert_to_op_index(is_not, op):
op = OLD_WHERE_OPS[op]
if (is_not and (op == 'in')):
return 7
try:
return NEW_WHERE_DICT[op]
except:
print('Unsupport op: {}'.format(op))
return (- 1)
|
def index_to_column_name(index, table):
column_name = table['column_names'][index][1]
table_index = table['column_names'][index][0]
table_name = table['table_names'][table_index]
return (table_name, column_name, index)
|
def get_label_cols(with_join, fk_dict, labels):
cols = set()
ret = []
for i in range(len(labels)):
cols.add(labels[i][0][2])
if (len(cols) > 3):
break
for col in cols:
if (with_join and (len(fk_dict[col]) > 0)):
ret.append(([col] + fk_dict[col]))
else:
ret.append(col)
return ret
|
class MultiSqlPredictor():
def __init__(self, question, sql, history):
self.sql = sql
self.question = question
self.history = history
self.keywords = ('intersect', 'except', 'union')
def generate_output(self):
for key in self.sql:
if ((key in self.keywords) and self.sql[key]):
return ((self.history + ['root']), key, self.sql[key])
return ((self.history + ['root']), 'none', self.sql)
|
class KeyWordPredictor():
def __init__(self, question, sql, history):
self.sql = sql
self.question = question
self.history = history
self.keywords = ('select', 'where', 'groupBy', 'orderBy', 'limit', 'having')
def generate_output(self):
sql_keywords = []
for key in self.sql:
if ((key in self.keywords) and self.sql[key]):
sql_keywords.append(key)
return (self.history, [len(sql_keywords), sql_keywords], self.sql)
|
class ColPredictor():
def __init__(self, question, sql, table, history, kw=None):
self.sql = sql
self.question = question
self.history = history
self.table = table
self.keywords = ('select', 'where', 'groupBy', 'orderBy', 'having')
self.kw = kw
def generate_output(self):
ret = []
candidate_keys = self.sql.keys()
if self.kw:
candidate_keys = [self.kw]
for key in candidate_keys:
if ((key in self.keywords) and self.sql[key]):
cols = []
sqls = []
if (key == 'groupBy'):
sql_cols = self.sql[key]
for col in sql_cols:
cols.append((index_to_column_name(col[1], self.table), col[2]))
sqls.append(col)
elif (key == 'orderBy'):
sql_cols = self.sql[key][1]
for col in sql_cols:
cols.append((index_to_column_name(col[1][1], self.table), col[1][2]))
sqls.append(col)
elif (key == 'select'):
sql_cols = self.sql[key][1]
for col in sql_cols:
cols.append((index_to_column_name(col[1][1][1], self.table), col[1][1][2]))
sqls.append(col)
elif ((key == 'where') or (key == 'having')):
sql_cols = self.sql[key]
for col in sql_cols:
if (not isinstance(col, list)):
continue
try:
cols.append((index_to_column_name(col[2][1][1], self.table), col[2][1][2]))
except:
print('Key:{} Col:{} Question:{}'.format(key, col, self.question))
sqls.append(col)
ret.append(((self.history + [key]), (len(cols), cols), sqls))
return ret
|
class OpPredictor():
def __init__(self, question, sql, history):
self.sql = sql
self.question = question
self.history = history
def generate_output(self):
return (self.history, convert_to_op_index(self.sql[0], self.sql[1]), (self.sql[3], self.sql[4]))
|
class AggPredictor():
def __init__(self, question, sql, history, kw=None):
self.sql = sql
self.question = question
self.history = history
self.kw = kw
def generate_output(self):
label = (- 1)
if self.kw:
key = self.kw
else:
key = self.history[(- 2)]
if (key == 'select'):
label = self.sql[0]
elif (key == 'orderBy'):
label = self.sql[1][0]
elif (key == 'having'):
label = self.sql[2][1][0]
return (self.history, label)
|
class DesAscPredictor():
def __init__(self, question, sql, table, history):
self.sql = sql
self.question = question
self.history = history
self.table = table
def generate_output(self):
for key in self.sql:
if ((key == 'orderBy') and self.sql[key]):
try:
col = self.sql[key][1][0][1][1]
except:
print('question:{} sql:{}'.format(self.question, self.sql))
if ((self.sql[key][0] == 'asc') and self.sql['limit']):
label = 0
elif ((self.sql[key][0] == 'asc') and (not self.sql['limit'])):
label = 1
elif ((self.sql[key][0] == 'desc') and self.sql['limit']):
label = 2
else:
label = 3
return ((self.history + [index_to_column_name(col, self.table), self.sql[key][1][0][1][0]]), label)
|
class AndOrPredictor():
def __init__(self, question, sql, table, history):
self.sql = sql
self.question = question
self.history = history
self.table = table
def generate_output(self):
if (('where' in self.sql) and self.sql['where'] and (len(self.sql['where']) > 1)):
return (self.history, COND_OPS[self.sql['where'][1]])
return (self.history, (- 1))
|
def parser_item_with_long_history(question_tokens, sql, table, history, dataset):
table_schema = [table['table_names'], table['column_names'], table['column_types']]
stack = [('root', sql)]
with_join = False
fk_dict = defaultdict(list)
for fk in table['foreign_keys']:
fk_dict[fk[0]].append(fk[1])
fk_dict[fk[1]].append(fk[0])
while (len(stack) > 0):
node = stack.pop()
if (node[0] == 'root'):
(history, label, sql) = MultiSqlPredictor(question_tokens, node[1], history).generate_output()
dataset['multi_sql_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': history[:], 'label': SQL_OPS[label]})
history.append(label)
if (label == 'none'):
stack.append((label, sql))
else:
node[1][label] = None
stack.append((label, node[1], sql))
elif (node[0] in ('intersect', 'except', 'union')):
stack.append(('root', node[1]))
stack.append(('root', node[2]))
elif (node[0] == 'none'):
with_join = (len(node[1]['from']['table_units']) > 1)
(history, label, sql) = KeyWordPredictor(question_tokens, node[1], history).generate_output()
label_idxs = []
for item in label[1]:
if (item in KW_DICT):
label_idxs.append(KW_DICT[item])
label_idxs.sort()
dataset['keyword_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': history[:], 'label': label_idxs})
if ('having' in label[1]):
stack.append(('having', node[1]))
if ('orderBy' in label[1]):
stack.append(('orderBy', node[1]))
if ('groupBy' in label[1]):
if ('having' in label[1]):
dataset['having_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': history[:], 'gt_col': node[1]['groupBy'][0][1], 'label': 1})
else:
dataset['having_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': history[:], 'gt_col': node[1]['groupBy'][0][1], 'label': 0})
stack.append(('groupBy', node[1]))
if ('where' in label[1]):
stack.append(('where', node[1]))
if ('select' in label[1]):
stack.append(('select', node[1]))
elif (node[0] in ('select', 'having', 'orderBy')):
history.append(node[0])
if (node[0] == 'orderBy'):
orderby_ret = DesAscPredictor(question_tokens, node[1], table, history).generate_output()
if orderby_ret:
dataset['des_asc_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': orderby_ret[0], 'gt_col': node[1]['orderBy'][1][0][1][1], 'label': orderby_ret[1]})
col_ret = ColPredictor(question_tokens, node[1], table, history, node[0]).generate_output()
agg_col_dict = dict()
op_col_dict = dict()
for (h, l, s) in col_ret:
if (l[0] == 0):
print('Warning: predicted 0 columns!')
continue
dataset['col_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': history[:], 'label': get_label_cols(with_join, fk_dict, l[1])})
for (col, sql_item) in zip(l[1], s):
key = '{}{}{}'.format(col[0][0], col[0][1], col[0][2])
if (key not in agg_col_dict):
agg_col_dict[key] = [(sql_item, col[0])]
else:
agg_col_dict[key].append((sql_item, col[0]))
if (key not in op_col_dict):
op_col_dict[key] = [(sql_item, col[0])]
else:
op_col_dict[key].append((sql_item, col[0]))
for key in agg_col_dict:
stack.append(('col', node[0], agg_col_dict[key], op_col_dict[key]))
elif (node[0] == 'col'):
history.append(node[2][0][1])
if (node[1] == 'where'):
stack.append(('op', node[2], 'where'))
else:
labels = []
for (sql_item, col) in node[2]:
(_, label) = AggPredictor(question_tokens, sql_item, history, node[1]).generate_output()
if ((label - 1) >= 0):
labels.append((label - 1))
dataset['agg_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': history[:], 'gt_col': node[2][0][1][2], 'label': labels[:min(len(labels), 3)]})
if (node[1] == 'having'):
stack.append(('op', node[2], 'having'))
if (len(labels) > 0):
history.append(AGG_OPS[(labels[0] + 1)])
elif (node[0] == 'op'):
labels = []
dataset['op_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': history[:], 'gt_col': node[1][0][1][2], 'label': labels})
for (sql_item, col) in node[1]:
(_, label, s) = OpPredictor(question_tokens, sql_item, history).generate_output()
if (label != (- 1)):
labels.append(label)
history.append(NEW_WHERE_OPS[label])
if isinstance(s[0], dict):
stack.append(('root', s[0]))
dataset['root_tem_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': history[:], 'gt_col': node[1][0][1][2], 'label': 0})
else:
dataset['root_tem_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': history[:], 'gt_col': node[1][0][1][2], 'label': 1})
if (len(labels) > 2):
print(question_tokens)
dataset['op_dataset'][(- 1)]['label'] = labels
elif (node[0] == 'where'):
history.append(node[0])
(hist, label) = AndOrPredictor(question_tokens, node[1], table, history).generate_output()
if (label != (- 1)):
dataset['andor_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': history[:], 'label': label})
col_ret = ColPredictor(question_tokens, node[1], table, history, 'where').generate_output()
op_col_dict = dict()
for (h, l, s) in col_ret:
if (l[0] == 0):
print('Warning: predicted 0 columns!')
continue
dataset['col_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': history[:], 'label': get_label_cols(with_join, fk_dict, l[1])})
for (col, sql_item) in zip(l[1], s):
key = '{}{}{}'.format(col[0][0], col[0][1], col[0][2])
if (key not in op_col_dict):
op_col_dict[key] = [(sql_item, col[0])]
else:
op_col_dict[key].append((sql_item, col[0]))
for key in op_col_dict:
stack.append(('col', 'where', op_col_dict[key]))
elif (node[0] == 'groupBy'):
history.append(node[0])
col_ret = ColPredictor(question_tokens, node[1], table, history, node[0]).generate_output()
agg_col_dict = dict()
for (h, l, s) in col_ret:
if (l[0] == 0):
print('Warning: predicted 0 columns!')
continue
dataset['col_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': history[:], 'label': get_label_cols(with_join, fk_dict, l[1])})
for (col, sql_item) in zip(l[1], s):
key = '{}{}{}'.format(col[0][0], col[0][1], col[0][2])
if (key not in agg_col_dict):
agg_col_dict[key] = [(sql_item, col[0])]
else:
agg_col_dict[key].append((sql_item, col[0]))
for key in agg_col_dict:
stack.append(('col', node[0], agg_col_dict[key]))
|
def parser_item(question_tokens, sql, table, history, dataset):
table_schema = [table['table_names'], table['column_names'], table['column_types']]
(history, label, sql) = MultiSqlPredictor(question_tokens, sql, history).generate_output()
dataset['multi_sql_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': history[:], 'label': SQL_OPS[label]})
history.append(label)
(history, label, sql) = KeyWordPredictor(question_tokens, sql, history).generate_output()
label_idxs = []
for item in label[1]:
if (item in KW_DICT):
label_idxs.append(KW_DICT[item])
label_idxs.sort()
dataset['keyword_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': history[:], 'label': label_idxs})
(hist, label) = AndOrPredictor(question_tokens, sql, table, history).generate_output()
if (label != (- 1)):
dataset['andor_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': (hist[:] + ['where']), 'label': label})
orderby_ret = DesAscPredictor(question_tokens, sql, table, history).generate_output()
if orderby_ret:
dataset['des_asc_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': orderby_ret[0][:], 'label': orderby_ret[1]})
col_ret = ColPredictor(question_tokens, sql, table, history).generate_output()
agg_candidates = []
op_candidates = []
for (h, l, s) in col_ret:
if (l[0] == 0):
print('Warning: predicted 0 columns!')
continue
dataset['col_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': h[:], 'label': list(set([l[1][i][0][2] for i in range(min(len(l[1]), 3))]))})
for (col, sql_item) in zip(l[1], s):
if (h[(- 1)] in ('where', 'having')):
op_candidates.append(((h + [col[0]]), sql_item))
if (h[(- 1)] in ('select', 'orderBy', 'having')):
agg_candidates.append(((h + [col[0]]), sql_item))
if (h[(- 1)] == 'groupBy'):
label = 0
if sql['having']:
label = 1
dataset['having_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': (h[:] + [col[0]]), 'label': label})
op_col_dict = dict()
for (h, sql_item) in op_candidates:
(_, label, s) = OpPredictor(question_tokens, sql_item, h).generate_output()
if (label == (- 1)):
continue
key = '{}{}'.format(h[(- 2)], h[(- 1)][2])
label = NEW_WHERE_OPS[label]
if (key in op_col_dict):
op_col_dict[key][1].append(label)
else:
op_col_dict[key] = [h[:], [label]]
if isinstance(s[0], dict):
dataset['root_tem_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': (h[:] + [label]), 'label': 0})
parser_item(question_tokens, s[0], table, (h[:] + [label]), dataset)
else:
dataset['root_tem_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': (h[:] + [label]), 'label': 1})
for key in op_col_dict:
dataset['op_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': op_col_dict[key][0], 'label': op_col_dict[key][1]})
agg_col_dict = dict()
for (h, sql_item) in agg_candidates:
(_, label) = AggPredictor(question_tokens, sql_item, h).generate_output()
if (label != 5):
key = '{}{}'.format(h[(- 2)], h[(- 1)][2])
if (key in agg_col_dict):
agg_col_dict[key][1].append(label)
else:
agg_col_dict[key] = [h[:], [label]]
for key in agg_col_dict:
dataset['agg_dataset'].append({'question_tokens': question_tokens, 'ts': table_schema, 'history': agg_col_dict[key][0], 'label': agg_col_dict[key][1]})
|
def get_table_dict(table_data_path):
data = json.load(open(table_data_path))
table = dict()
for item in data:
table[item['db_id']] = item
return table
|
def parse_data(data):
dataset = {'multi_sql_dataset': [], 'keyword_dataset': [], 'col_dataset': [], 'op_dataset': [], 'agg_dataset': [], 'root_tem_dataset': [], 'des_asc_dataset': [], 'having_dataset': [], 'andor_dataset': []}
table_dict = get_table_dict(table_data_path)
for item in data:
if (history_option == 'full'):
parser_item_with_long_history(item['question_toks'], item['sql'], table_dict[item['db_id']], [], dataset)
else:
parser_item(item['question_toks'], item['sql'], table_dict[item['db_id']], [], dataset)
print('finished preprocess')
for key in dataset:
print('dataset:{} size:{}'.format(key, len(dataset[key])))
json.dump(dataset[key], open('./generated_data/{}_{}_{}.json'.format(history_option, train_dev, key), 'w'), indent=2)
|
class Stack():
def __init__(self):
self.items = []
def isEmpty(self):
return (self.items == [])
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[(len(self.items) - 1)]
def size(self):
return len(self.items)
def insert(self, i, x):
return self.items.insert(i, x)
|
def to_batch_tables(tables, B, table_type):
col_seq = []
ts = [tables['table_names'], tables['column_names'], tables['column_types']]
tname_toks = [x.split(' ') for x in ts[0]]
col_type = ts[2]
cols = [x.split(' ') for (xid, x) in ts[1]]
tab_seq = [xid for (xid, x) in ts[1]]
cols_add = []
for (tid, col, ct) in zip(tab_seq, cols, col_type):
col_one = [ct]
if (tid == (- 1)):
tabn = ['all']
elif (table_type == 'no'):
tabn = []
else:
tabn = tname_toks[tid]
for t in tabn:
if (t not in col):
col_one.append(t)
col_one.extend(col)
cols_add.append(col_one)
col_seq = ([cols_add] * B)
return col_seq
|
class SuperModel(nn.Module):
def __init__(self, word_emb, N_word, N_h=300, N_depth=2, gpu=True, trainable_emb=False, table_type='std', use_hs=True):
super(SuperModel, self).__init__()
self.gpu = gpu
self.N_h = N_h
self.N_depth = N_depth
self.trainable_emb = trainable_emb
self.table_type = table_type
self.use_hs = use_hs
self.SQL_TOK = ['<UNK>', '<END>', 'WHERE', 'AND', 'EQL', 'GT', 'LT', '<BEG>']
self.embed_layer = WordEmbedding(word_emb, N_word, gpu, self.SQL_TOK, trainable=trainable_emb)
self.multi_sql = MultiSqlPredictor(N_word=N_word, N_h=N_h, N_depth=N_depth, gpu=gpu, use_hs=use_hs)
self.multi_sql.eval()
self.key_word = KeyWordPredictor(N_word=N_word, N_h=N_h, N_depth=N_depth, gpu=gpu, use_hs=use_hs)
self.key_word.eval()
self.col = ColPredictor(N_word=N_word, N_h=N_h, N_depth=N_depth, gpu=gpu, use_hs=use_hs)
self.col.eval()
self.op = OpPredictor(N_word=N_word, N_h=N_h, N_depth=N_depth, gpu=gpu, use_hs=use_hs)
self.op.eval()
self.agg = AggPredictor(N_word=N_word, N_h=N_h, N_depth=N_depth, gpu=gpu, use_hs=use_hs)
self.agg.eval()
self.root_teminal = RootTeminalPredictor(N_word=N_word, N_h=N_h, N_depth=N_depth, gpu=gpu, use_hs=use_hs)
self.root_teminal.eval()
self.des_asc = DesAscLimitPredictor(N_word=N_word, N_h=N_h, N_depth=N_depth, gpu=gpu, use_hs=use_hs)
self.des_asc.eval()
self.having = HavingPredictor(N_word=N_word, N_h=N_h, N_depth=N_depth, gpu=gpu, use_hs=use_hs)
self.having.eval()
self.andor = AndOrPredictor(N_word=N_word, N_h=N_h, N_depth=N_depth, gpu=gpu, use_hs=use_hs)
self.andor.eval()
self.softmax = nn.Softmax()
self.CE = nn.CrossEntropyLoss()
self.log_softmax = nn.LogSoftmax()
self.mlsml = nn.MultiLabelSoftMarginLoss()
self.bce_logit = nn.BCEWithLogitsLoss()
self.sigm = nn.Sigmoid()
if gpu:
self.cuda()
self.path_not_found = 0
def forward(self, q_seq, history, tables):
return self.full_forward(q_seq, history, tables)
def full_forward(self, q_seq, history, tables):
B = len(q_seq)
(q_emb_var, q_len) = self.embed_layer.gen_x_q_batch(q_seq)
col_seq = to_batch_tables(tables, B, self.table_type)
(col_emb_var, col_name_len, col_len) = self.embed_layer.gen_col_batch(col_seq)
mkw_emb_var = self.embed_layer.gen_word_list_embedding(['none', 'except', 'intersect', 'union'], B)
mkw_len = np.full(q_len.shape, 4, dtype=np.int64)
kw_emb_var = self.embed_layer.gen_word_list_embedding(['where', 'group by', 'order by'], B)
kw_len = np.full(q_len.shape, 3, dtype=np.int64)
stack = Stack()
stack.push(('root', None))
history = ([['root']] * B)
andor_cond = ''
has_limit = False
current_sql = {}
sql_stack = []
idx_stack = []
kw_stack = []
kw = ''
nested_label = ''
has_having = False
timeout = (time.time() + 2)
failed = False
while (not stack.isEmpty()):
if (time.time() > timeout):
failed = True
break
vet = stack.pop()
(hs_emb_var, hs_len) = self.embed_layer.gen_x_history_batch(history)
if ((len(idx_stack) > 0) and (stack.size() < idx_stack[(- 1)])):
idx_stack.pop()
current_sql = sql_stack.pop()
kw = kw_stack.pop()
if (isinstance(vet, tuple) and (vet[0] == 'root')):
if (history[0][(- 1)] != 'root'):
history[0].append('root')
(hs_emb_var, hs_len) = self.embed_layer.gen_x_history_batch(history)
if (vet[1] != 'original'):
idx_stack.append(stack.size())
sql_stack.append(current_sql)
kw_stack.append(kw)
else:
idx_stack.append(stack.size())
sql_stack.append(sql_stack[(- 1)])
kw_stack.append(kw)
if ('sql' in current_sql):
current_sql['nested_sql'] = {}
current_sql['nested_label'] = nested_label
current_sql = current_sql['nested_sql']
elif isinstance(vet[1], dict):
vet[1]['sql'] = {}
current_sql = vet[1]['sql']
elif (vet[1] != 'original'):
current_sql['sql'] = {}
current_sql = current_sql['sql']
if ((vet[1] == 'nested') or (vet[1] == 'original')):
stack.push('none')
history[0].append('none')
else:
score = self.multi_sql.forward(q_emb_var, q_len, hs_emb_var, hs_len, mkw_emb_var, mkw_len)
label = np.argmax(score[0].data.cpu().numpy())
label = SQL_OPS[label]
history[0].append(label)
stack.push(label)
if (label != 'none'):
nested_label = label
elif (vet in ('intersect', 'except', 'union')):
stack.push(('root', 'nested'))
stack.push(('root', 'original'))
elif (vet == 'none'):
score = self.key_word.forward(q_emb_var, q_len, hs_emb_var, hs_len, kw_emb_var, kw_len)
(kw_num_score, kw_score) = [x.data.cpu().numpy() for x in score]
num_kw = np.argmax(kw_num_score[0])
kw_score = list(np.argsort((- kw_score[0]))[:num_kw])
kw_score.sort(reverse=True)
for kw in kw_score:
stack.push(KW_OPS[kw])
stack.push('select')
elif (vet in ('select', 'orderBy', 'where', 'groupBy', 'having')):
kw = vet
current_sql[kw] = []
history[0].append(vet)
stack.push(('col', vet))
elif (isinstance(vet, tuple) and (vet[0] == 'col')):
score = self.col.forward(q_emb_var, q_len, hs_emb_var, hs_len, col_emb_var, col_len, col_name_len)
(col_num_score, col_score) = [x.data.cpu().numpy() for x in score]
col_num = (np.argmax(col_num_score[0]) + 1)
cols = np.argsort((- col_score[0]))[:col_num]
for col in cols:
if (vet[1] == 'where'):
stack.push(('op', 'where', col))
elif (vet[1] != 'groupBy'):
stack.push(('agg', vet[1], col))
elif (vet[1] == 'groupBy'):
history[0].append(index_to_column_name(col, tables))
current_sql[kw].append(index_to_column_name(col, tables))
if ((col_num > 1) and (vet[1] == 'where')):
score = self.andor.forward(q_emb_var, q_len, hs_emb_var, hs_len)
label = np.argmax(score[0].data.cpu().numpy())
andor_cond = COND_OPS[label]
current_sql[kw].append(andor_cond)
if ((vet[1] == 'groupBy') and (col_num > 0)):
score = self.having.forward(q_emb_var, q_len, hs_emb_var, hs_len, col_emb_var, col_len, col_name_len, np.full(B, cols[0], dtype=np.int64))
label = np.argmax(score[0].data.cpu().numpy())
if (label == 1):
has_having = (label == 1)
stack.push('having')
elif (isinstance(vet, tuple) and (vet[0] == 'agg')):
history[0].append(index_to_column_name(vet[2], tables))
if (vet[1] not in ('having', 'orderBy')):
try:
current_sql[kw].append(index_to_column_name(vet[2], tables))
except Exception as e:
traceback.print_exc()
print('history:{},current_sql:{} stack:{}'.format(history[0], current_sql, stack.items))
print('idx_stack:{}'.format(idx_stack))
print('sql_stack:{}'.format(sql_stack))
exit(1)
(hs_emb_var, hs_len) = self.embed_layer.gen_x_history_batch(history)
score = self.agg.forward(q_emb_var, q_len, hs_emb_var, hs_len, col_emb_var, col_len, col_name_len, np.full(B, vet[2], dtype=np.int64))
(agg_num_score, agg_score) = [x.data.cpu().numpy() for x in score]
agg_num = np.argmax(agg_num_score[0])
agg_idxs = np.argsort((- agg_score[0]))[:agg_num]
if (len(agg_idxs) > 0):
history[0].append(AGG_OPS[agg_idxs[0]])
if (vet[1] not in ('having', 'orderBy')):
current_sql[kw].append(AGG_OPS[agg_idxs[0]])
elif (vet[1] == 'orderBy'):
stack.push(('des_asc', vet[2], AGG_OPS[agg_idxs[0]]))
else:
stack.push(('op', 'having', vet[2], AGG_OPS[agg_idxs[0]]))
for agg in agg_idxs[1:]:
history[0].append(index_to_column_name(vet[2], tables))
history[0].append(AGG_OPS[agg])
if (vet[1] not in ('having', 'orderBy')):
current_sql[kw].append(index_to_column_name(vet[2], tables))
current_sql[kw].append(AGG_OPS[agg])
elif (vet[1] == 'orderBy'):
stack.push(('des_asc', vet[2], AGG_OPS[agg]))
else:
stack.push(('op', 'having', vet[2], agg_idxs))
if (len(agg_idxs) == 0):
if (vet[1] not in ('having', 'orderBy')):
current_sql[kw].append('none_agg')
elif (vet[1] == 'orderBy'):
stack.push(('des_asc', vet[2], 'none_agg'))
else:
stack.push(('op', 'having', vet[2], 'none_agg'))
elif (isinstance(vet, tuple) and (vet[0] == 'op')):
if (vet[1] == 'where'):
history[0].append(index_to_column_name(vet[2], tables))
(hs_emb_var, hs_len) = self.embed_layer.gen_x_history_batch(history)
score = self.op.forward(q_emb_var, q_len, hs_emb_var, hs_len, col_emb_var, col_len, col_name_len, np.full(B, vet[2], dtype=np.int64))
(op_num_score, op_score) = [x.data.cpu().numpy() for x in score]
op_num = (np.argmax(op_num_score[0]) + 1)
ops = np.argsort((- op_score[0]))[:op_num]
if (op_num > 0):
history[0].append(NEW_WHERE_OPS[ops[0]])
if (vet[1] == 'having'):
stack.push(('root_teminal', vet[2], vet[3], ops[0]))
else:
stack.push(('root_teminal', vet[2], ops[0]))
for op in ops[1:]:
history[0].append(index_to_column_name(vet[2], tables))
history[0].append(NEW_WHERE_OPS[op])
if (vet[1] == 'having'):
stack.push(('root_teminal', vet[2], vet[3], op))
else:
stack.push(('root_teminal', vet[2], op))
elif (isinstance(vet, tuple) and (vet[0] == 'root_teminal')):
score = self.root_teminal.forward(q_emb_var, q_len, hs_emb_var, hs_len, col_emb_var, col_len, col_name_len, np.full(B, vet[1], dtype=np.int64))
label = np.argmax(score[0].data.cpu().numpy())
label = ROOT_TERM_OPS[label]
if (len(vet) == 4):
current_sql[kw].append(index_to_column_name(vet[1], tables))
current_sql[kw].append(vet[2])
current_sql[kw].append(NEW_WHERE_OPS[vet[3]])
else:
try:
current_sql[kw].append(index_to_column_name(vet[1], tables))
except Exception as e:
traceback.print_exc()
print('history:{},current_sql:{} stack:{}'.format(history[0], current_sql, stack.items))
print('idx_stack:{}'.format(idx_stack))
print('sql_stack:{}'.format(sql_stack))
exit(1)
current_sql[kw].append(NEW_WHERE_OPS[vet[2]])
if (label == 'root'):
history[0].append('root')
current_sql[kw].append({})
stack.push(('root', current_sql[kw][(- 1)]))
else:
current_sql[kw].append('terminal')
elif (isinstance(vet, tuple) and (vet[0] == 'des_asc')):
current_sql[kw].append(index_to_column_name(vet[1], tables))
current_sql[kw].append(vet[2])
score = self.des_asc.forward(q_emb_var, q_len, hs_emb_var, hs_len, col_emb_var, col_len, col_name_len, np.full(B, vet[1], dtype=np.int64))
label = np.argmax(score[0].data.cpu().numpy())
(dec_asc, has_limit) = DEC_ASC_OPS[label]
history[0].append(dec_asc)
current_sql[kw].append(dec_asc)
current_sql[kw].append(has_limit)
if failed:
return None
print('history:{}'.format(history[0]))
if (len(sql_stack) > 0):
current_sql = sql_stack[0]
return current_sql
def gen_col(self, col, table, table_alias_dict):
colname = table['column_names_original'][col[2]][1]
table_idx = table['column_names_original'][col[2]][0]
if (table_idx not in table_alias_dict):
return colname
return 'T{}.{}'.format(table_alias_dict[table_idx], colname)
def gen_group_by(self, sql, kw, table, table_alias_dict):
ret = []
for i in range(0, len(sql)):
ret.append(self.gen_col(sql[i], table, table_alias_dict))
return '{} {}'.format(kw, ','.join(ret))
def gen_select(self, sql, kw, table, table_alias_dict):
ret = []
for i in range(0, len(sql), 2):
if ((sql[(i + 1)] == 'none_agg') or (not isinstance(sql[(i + 1)], basestring))):
ret.append(self.gen_col(sql[i], table, table_alias_dict))
else:
ret.append('{}({})'.format(sql[(i + 1)], self.gen_col(sql[i], table, table_alias_dict)))
return '{} {}'.format(kw, ','.join(ret))
def gen_where(self, sql, table, table_alias_dict):
if (len(sql) == 0):
return ''
start_idx = 0
andor = 'and'
if isinstance(sql[0], basestring):
start_idx += 1
andor = sql[0]
ret = []
for i in range(start_idx, len(sql), 3):
col = self.gen_col(sql[i], table, table_alias_dict)
op = sql[(i + 1)]
val = sql[(i + 2)]
where_item = ''
if (val == 'terminal'):
where_item = "{} {} '{}'".format(col, op, val)
else:
val = self.gen_sql(val, table)
where_item = '{} {} ({})'.format(col, op, val)
if (op == 'between'):
where_item += " and 'terminal'"
ret.append(where_item)
return 'where {}'.format(' {} '.format(andor).join(ret))
def gen_orderby(self, sql, table, table_alias_dict):
ret = []
limit = ''
if (sql[(- 1)] == True):
limit = 'limit 1'
for i in range(0, len(sql), 4):
if ((sql[(i + 1)] == 'none_agg') or (not isinstance(sql[(i + 1)], basestring))):
ret.append('{} {}'.format(self.gen_col(sql[i], table, table_alias_dict), sql[(i + 2)]))
else:
ret.append('{}({}) {}'.format(sql[(i + 1)], self.gen_col(sql[i], table, table_alias_dict), sql[(i + 2)]))
return 'order by {} {}'.format(','.join(ret), limit)
def gen_having(self, sql, table, table_alias_dict):
ret = []
for i in range(0, len(sql), 4):
if (sql[(i + 1)] == 'none_agg'):
col = self.gen_col(sql[i], table, table_alias_dict)
else:
col = '{}({})'.format(sql[(i + 1)], self.gen_col(sql[i], table, table_alias_dict))
op = sql[(i + 2)]
val = sql[(i + 3)]
if (val == 'terminal'):
ret.append("{} {} '{}'".format(col, op, val))
else:
val = self.gen_sql(val, table)
ret.append('{} {} ({})'.format(col, op, val))
return 'having {}'.format(','.join(ret))
def find_shortest_path(self, start, end, graph):
stack = [[start, []]]
visited = set()
while (len(stack) > 0):
(ele, history) = stack.pop()
if (ele == end):
return history
for node in graph[ele]:
if (node[0] not in visited):
stack.append((node[0], (history + [(node[0], node[1])])))
visited.add(node[0])
print('table {} table {}'.format(start, end))
self.path_not_found += 1
def gen_from(self, candidate_tables, table):
def find(d, col):
if (d[col] == (- 1)):
return col
return find(d, d[col])
def union(d, c1, c2):
r1 = find(d, c1)
r2 = find(d, c2)
if (r1 == r2):
return
d[r1] = r2
ret = ''
if (len(candidate_tables) <= 1):
if (len(candidate_tables) == 1):
ret = 'from {}'.format(table['table_names_original'][list(candidate_tables)[0]])
else:
ret = 'from {}'.format(table['table_names_original'][0])
return ({}, ret)
table_alias_dict = {}
uf_dict = {}
for t in candidate_tables:
uf_dict[t] = (- 1)
idx = 1
graph = defaultdict(list)
for (acol, bcol) in table['foreign_keys']:
t1 = table['column_names'][acol][0]
t2 = table['column_names'][bcol][0]
graph[t1].append((t2, (acol, bcol)))
graph[t2].append((t1, (bcol, acol)))
candidate_tables = list(candidate_tables)
start = candidate_tables[0]
table_alias_dict[start] = idx
idx += 1
ret = 'from {} as T1'.format(table['table_names_original'][start])
try:
for end in candidate_tables[1:]:
if (end in table_alias_dict):
continue
path = self.find_shortest_path(start, end, graph)
prev_table = start
if (not path):
table_alias_dict[end] = idx
idx += 1
ret = '{} join {} as T{}'.format(ret, table['table_names_original'][end], table_alias_dict[end])
continue
for (node, (acol, bcol)) in path:
if (node in table_alias_dict):
prev_table = node
continue
table_alias_dict[node] = idx
idx += 1
ret = '{} join {} as T{} on T{}.{} = T{}.{}'.format(ret, table['table_names_original'][node], table_alias_dict[node], table_alias_dict[prev_table], table['column_names_original'][acol][1], table_alias_dict[node], table['column_names_original'][bcol][1])
prev_table = node
except:
traceback.print_exc()
print('db:{}'.format(table['db_id']))
return (table_alias_dict, ret)
return (table_alias_dict, ret)
def gen_sql(self, sql, table):
select_clause = ''
from_clause = ''
groupby_clause = ''
orderby_clause = ''
having_clause = ''
where_clause = ''
nested_clause = ''
cols = {}
candidate_tables = set()
nested_sql = {}
nested_label = ''
parent_sql = sql
if ('nested_label' in sql):
nested_label = sql['nested_label']
nested_sql = sql['nested_sql']
sql = sql['sql']
elif ('sql' in sql):
sql = sql['sql']
for key in sql:
if (key not in KW_WITH_COL):
continue
for item in sql[key]:
if (isinstance(item, tuple) and (len(item) == 3)):
if (table['column_names'][item[2]][0] != (- 1)):
candidate_tables.add(table['column_names'][item[2]][0])
(table_alias_dict, from_clause) = self.gen_from(candidate_tables, table)
ret = []
if ('select' in sql):
select_clause = self.gen_select(sql['select'], 'select', table, table_alias_dict)
if (len(select_clause) > 0):
ret.append(select_clause)
else:
print('select not found:{}'.format(parent_sql))
else:
print('select not found:{}'.format(parent_sql))
if (len(from_clause) > 0):
ret.append(from_clause)
if ('where' in sql):
where_clause = self.gen_where(sql['where'], table, table_alias_dict)
if (len(where_clause) > 0):
ret.append(where_clause)
if ('groupBy' in sql):
groupby_clause = self.gen_group_by(sql['groupBy'], 'group by', table, table_alias_dict)
if (len(groupby_clause) > 0):
ret.append(groupby_clause)
if ('orderBy' in sql):
orderby_clause = self.gen_orderby(sql['orderBy'], table, table_alias_dict)
if (len(orderby_clause) > 0):
ret.append(orderby_clause)
if ('having' in sql):
having_clause = self.gen_having(sql['having'], table, table_alias_dict)
if (len(having_clause) > 0):
ret.append(having_clause)
if (len(nested_label) > 0):
nested_clause = '{} {}'.format(nested_label, self.gen_sql(nested_sql, table))
if (len(nested_clause) > 0):
ret.append(nested_clause)
return ' '.join(ret)
def check_acc(self, pred_sql, gt_sql):
pass
|
@attr.s
class TrainConfig():
eval_every_n = attr.ib(default=100)
report_every_n = attr.ib(default=100)
save_every_n = attr.ib(default=100)
keep_every_n = attr.ib(default=1000)
batch_size = attr.ib(default=32)
eval_batch_size = attr.ib(default=32)
max_steps = attr.ib(default=100000)
num_eval_items = attr.ib(default=None)
eval_on_train = attr.ib(default=True)
eval_on_val = attr.ib(default=True)
data_seed = attr.ib(default=None)
init_seed = attr.ib(default=None)
model_seed = attr.ib(default=None)
|
class Logger():
def __init__(self, log_path=None, reopen_to_flush=False):
self.log_file = None
self.reopen_to_flush = reopen_to_flush
if (log_path is not None):
os.makedirs(os.path.dirname(log_path), exist_ok=True)
self.log_file = open(log_path, 'a+')
def log(self, msg):
formatted = '[{}] {}'.format(datetime.datetime.now().replace(microsecond=0).isoformat(), msg)
print(formatted)
if self.log_file:
self.log_file.write((formatted + '\n'))
if self.reopen_to_flush:
log_path = self.log_file.name
self.log_file.close()
self.log_file = open(log_path, 'a+')
else:
self.log_file.flush()
|
class Trainer():
def __init__(self, logger, config):
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
self.logger = logger
self.train_config = registry.instantiate(TrainConfig, config['train'])
self.data_random = random_state.RandomContext(self.train_config.data_seed)
self.model_random = random_state.RandomContext(self.train_config.model_seed)
self.init_random = random_state.RandomContext(self.train_config.init_seed)
with self.init_random:
self.model_preproc = registry.instantiate(registry.lookup('model', config['model']).Preproc, config['model'], unused_keys=('name',))
self.model_preproc.load()
self.model = registry.construct('model', config['model'], unused_keys=('encoder_preproc', 'decoder_preproc'), preproc=self.model_preproc, device=device)
self.model.to(device)
def train(self, config, modeldir):
with self.init_random:
optimizer = registry.construct('optimizer', config['optimizer'], params=self.model.parameters())
lr_scheduler = registry.construct('lr_scheduler', config.get('lr_scheduler', {'name': 'noop'}), optimizer=optimizer)
saver = saver_mod.Saver(self.model, optimizer, keep_every_n=self.train_config.keep_every_n)
last_step = saver.restore(modeldir)
with self.data_random:
train_data = self.model_preproc.dataset('train')
train_data_loader = self._yield_batches_from_epochs(torch.utils.data.DataLoader(train_data, batch_size=self.train_config.batch_size, shuffle=True, drop_last=True, collate_fn=(lambda x: x)))
train_eval_data_loader = torch.utils.data.DataLoader(train_data, batch_size=self.train_config.eval_batch_size, collate_fn=(lambda x: x))
val_data = self.model_preproc.dataset('val')
val_data_loader = torch.utils.data.DataLoader(val_data, batch_size=self.train_config.eval_batch_size, collate_fn=(lambda x: x))
with self.data_random:
for batch in train_data_loader:
if (last_step >= self.train_config.max_steps):
break
if ((last_step % self.train_config.eval_every_n) == 0):
if self.train_config.eval_on_train:
self._eval_model(self.logger, self.model, last_step, train_eval_data_loader, 'train', num_eval_items=self.train_config.num_eval_items)
if self.train_config.eval_on_val:
self._eval_model(self.logger, self.model, last_step, val_data_loader, 'val', num_eval_items=self.train_config.num_eval_items)
with self.model_random:
optimizer.zero_grad()
loss = self.model.compute_loss(batch)
loss.backward()
lr_scheduler.update_lr(last_step)
optimizer.step()
if ((last_step % self.train_config.report_every_n) == 0):
self.logger.log('Step {}: loss={:.4f}'.format(last_step, loss.item()))
last_step += 1
if ((last_step % self.train_config.save_every_n) == 0):
saver.save(modeldir, last_step)
@staticmethod
def _yield_batches_from_epochs(loader):
while True:
for batch in loader:
(yield batch)
@staticmethod
def _eval_model(logger, model, last_step, eval_data_loader, eval_section, num_eval_items=None):
stats = collections.defaultdict(float)
model.eval()
with torch.no_grad():
for eval_batch in eval_data_loader:
batch_res = model.eval_on_batch(eval_batch)
for (k, v) in batch_res.items():
stats[k] += v
if (num_eval_items and (stats['total'] > num_eval_items)):
break
model.train()
for k in stats:
if (k != 'total'):
stats[k] /= stats['total']
if ('total' in stats):
del stats['total']
logger.log('Step {} stats, {}: {}'.format(last_step, eval_section, ', '.join(('{} = {}'.format(k, v) for (k, v) in stats.items()))))
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--logdir', required=True)
parser.add_argument('--config', required=True)
parser.add_argument('--config-args')
args = parser.parse_args()
if args.config_args:
config = json.loads(_jsonnet.evaluate_file(args.config, tla_codes={'args': args.config_args}))
else:
config = json.loads(_jsonnet.evaluate_file(args.config))
if ('model_name' in config):
args.logdir = os.path.join(args.logdir, config['model_name'])
reopen_to_flush = config.get('log', {}).get('reopen_to_flush')
logger = Logger(os.path.join(args.logdir, 'log.txt'), reopen_to_flush)
with open(os.path.join(args.logdir, 'config-{}.json'.format(datetime.datetime.now().strftime('%Y%m%dT%H%M%S%Z'))), 'w') as f:
json.dump(config, f, sort_keys=True, indent=4)
logger.log('Logging to {}'.format(args.logdir))
trainer = Trainer(logger, config)
trainer.train(config, modeldir=args.logdir)
|
def TreeRep(d):
'\n This is a python wrapper to the TreeRep algorithm written in Julia.\n \n Input:\n d - Distance matrix, assumed to be symmetric with 0 on the diagonal\n\n Output:\n W - Adjacenct matrix of the tree. Note that W may have rows/cols full of zeros, and they should be ignored.\n\n\n Example Code:\n d = np.array([[0,2,3],[2,0,2],[3,2,0]])\n W = TreeRep(d)\n print(W)\n '
Main.d = d
(Main.G, Main.dist) = Main.eval('TreeRep.metric_to_structure(d)')
edges = Main.eval('collect(edges(G))')
W = np.zeros_like(Main.dist)
for edge in edges:
src = (edge.src - 1)
dst = (edge.dst - 1)
W[(src, dst)] = Main.dist[(src, dst)]
W[(dst, src)] = Main.dist[(dst, src)]
return W
|
def TreeRep_no_recursion(d):
'\n This is a python wrapper to the TreeRep algorithm written in Julia.\n \n Input:\n d - Distance matrix, assumed to be symmetric with 0 on the diagonal\n Output:\n W - Adjacenct matrix of the tree. Note that W may have rows/cols full of zeros, and they should be ignored.\n Example Code:\n d = np.array([[0,2,3],[2,0,2],[3,2,0]])\n W = TreeRep(d)\n print(W)\n '
Main.d = d
(Main.G, Main.dist) = Main.eval('TreeRep.metric_to_structure_no_recursion(d)')
edges = Main.eval('collect(edges(G))')
W = np.zeros_like(Main.dist)
for edge in edges:
src = (edge.src - 1)
dst = (edge.dst - 1)
W[(src, dst)] = Main.dist[(src, dst)]
W[(dst, src)] = Main.dist[(dst, src)]
return W
|
class BatchCoCaBO(CoCaBO_Base):
def __init__(self, objfn, initN, bounds, acq_type, C, **kwargs):
super(BatchCoCaBO, self).__init__(objfn, initN, bounds, acq_type, C, **kwargs)
self.best_val_list = []
self.C_list = self.C
self.name = 'BCoCaBO'
def runOptim(self, budget, seed, initData=None, initResult=None):
if (initData and initResult):
self.data = initData[:]
self.result = initResult[:]
else:
(self.data, self.result) = self.initialise(seed)
bestUpperBoundEstimate = ((2 * budget) / 3)
gamma_list = [np.sqrt(((C * math.log((C / self.batch_size))) / (((math.e - 1) * self.batch_size) * bestUpperBoundEstimate))) for C in self.C_list]
gamma_list = [(g if (not np.isnan(g)) else 1) for g in gamma_list]
Wc_list_init = [np.ones(C) for C in self.C_list]
Wc_list = Wc_list_init
nDim = len(self.bounds)
result_list = []
starting_best = np.max(((- 1) * self.result[0]))
result_list.append([(- 1), None, None, starting_best, None])
continuous_dims = list(range(len(self.C_list), nDim))
categorical_dims = list(range(len(self.C_list)))
for t in tqdm(range(budget)):
self.iteration = t
(ht_batch_list, probabilityDistribution_list, S0) = self.compute_prob_dist_and_draw_hts(Wc_list, gamma_list, self.batch_size)
ht_batch_list = ht_batch_list.astype(int)
Gt_ht_list = self.RewardperCategoryviaBO(self.f, ht_batch_list, categorical_dims, continuous_dims)
Wc_list = self.update_weights_for_all_cat_var(Gt_ht_list, ht_batch_list, Wc_list, gamma_list, probabilityDistribution_list, self.batch_size, S0=S0)
(besty, li, vi) = self.getBestVal2(self.result)
result_list.append([t, ht_batch_list, Gt_ht_list, besty, self.mix_used, self.model_hp])
self.ht_recommedations.append(ht_batch_list)
df = pd.DataFrame(result_list, columns=['iter', 'ht', 'Reward', 'best_value', 'mix_val', 'model_hp'])
bestx = self.data[li][vi]
self.best_val_list.append([self.batch_size, self.trial_num, li, besty, bestx])
return df
def RewardperCategoryviaBO(self, objfn, ht_next_batch_list, categorical_dims, continuous_dims):
Zt = self.data[0]
yt = self.result[0]
(my_kernel, hp_bounds) = self.get_kernel(categorical_dims, continuous_dims)
gp_opt_params = {'method': 'multigrad', 'num_restarts': 5, 'restart_bounds': hp_bounds, 'hp_bounds': hp_bounds, 'verbose': False}
gp_kwargs = {'y_norm': 'meanstd', 'opt_params': gp_opt_params}
gp_args = (Zt, yt, my_kernel)
gp = GP(*gp_args, **gp_kwargs)
(opt_flag, gp) = self.set_model_params_and_opt_flag(gp)
if opt_flag:
gp.optimize()
self.model_hp = gp.param_array
acq_dict = {'type': 'subspace'}
acq_opt_params = {'method': 'samplegrad', 'num_local': 5, 'num_samples': 5000, 'num_chunks': 10, 'verbose': False}
ymin_opt_params = {'method': 'standard'}
(h_unique, h_counts) = np.unique(ht_next_batch_list, return_counts=True, axis=0)
z_batch_list = []
for (idx, curr_h) in enumerate(h_unique):
gp_for_bo = GPWithSomeFixedDimsAtStart(*gp_args, fixed_dim_vals=curr_h, **gp_kwargs)
gp_for_bo.param_array = gp.param_array
curr_batch_size = h_counts[idx]
interface = JobExecutorInSeriesBlocking(curr_batch_size)
if (len(z_batch_list) > 0):
self.surrogate = gp_for_bo
self.async_infill_strategy = 'kriging_believer'
(surrogate_x_with_fake, surrogate_y_with_fake) = add_hallucinations_to_x_and_y(self, gp_for_bo.X, gp_for_bo.Y_raw, np.vstack(z_batch_list))
gp_for_bo.set_XY(X=surrogate_x_with_fake, Y=surrogate_y_with_fake)
bo = BatchBOHeuristic(objfn, gp_for_bo, self.x_bounds, async_infill_strategy='kriging_believer', offset_acq=True, async_interface=interface, batch_size=curr_batch_size, acq_dict=acq_dict, y_min_opt_params=ymin_opt_params, acq_opt_params=acq_opt_params, optimise_surrogate_model=False)
(x_batch_for_curr_h, _) = bo.get_next()
z_batch_for_curr_h = np.hstack((np.vstack(([curr_h] * curr_batch_size)), np.vstack(x_batch_for_curr_h)))
z_batch_list.append(z_batch_for_curr_h)
z_batch_next = np.vstack(z_batch_list)
y_batch_next = np.zeros((self.batch_size, 1))
for b in range(self.batch_size):
x_next = z_batch_next[(b, continuous_dims)]
ht_next_list = z_batch_next[(b, categorical_dims)]
try:
y_next = objfn(ht_next_list, x_next)
except:
print('stop')
y_batch_next[b] = y_next
self.mix_used = gp.kern.mix[0]
self.data[0] = np.row_stack((self.data[0], z_batch_next))
self.result[0] = np.row_stack((self.result[0], y_batch_next))
ht_batch_list_rewards = self.compute_reward_for_all_cat_variable(ht_next_batch_list, self.batch_size)
bestval_ht = np.max((self.result[0] * (- 1)))
print(f'arm pulled={ht_next_batch_list[:]} ; y_best = {bestval_ht}; mix={self.mix_used}')
return ht_batch_list_rewards
def get_kernel(self, categorical_dims, continuous_dims):
if self.ARD:
hp_bounds = np.array([*([[0.0001, 3]] * len(continuous_dims)), [1e-06, 1]])
else:
hp_bounds = np.array([[0.0001, 3], [1e-06, 1]])
(fix_mix_in_this_iter, mix_value, hp_bounds) = self.get_mix(hp_bounds)
k_cat = CategoryOverlapKernel(len(categorical_dims), active_dims=categorical_dims)
k_cont = GPy.kern.Matern52(len(continuous_dims), lengthscale=self.default_cont_lengthscale, active_dims=continuous_dims, ARD=self.ARD)
my_kernel = MixtureViaSumAndProduct((len(categorical_dims) + len(continuous_dims)), k_cat, k_cont, mix=mix_value, fix_inner_variances=True, fix_mix=fix_mix_in_this_iter)
return (my_kernel, hp_bounds)
|
class CoCaBO(CoCaBO_Base):
def __init__(self, objfn, initN, bounds, acq_type, C, **kwargs):
super(CoCaBO, self).__init__(objfn, initN, bounds, acq_type, C, **kwargs)
self.best_val_list = []
self.C_list = self.C
self.name = 'CoCaBO'
def runOptim(self, budget, seed, batch_size=1, initData=None, initResult=None):
if (initData and initResult):
self.data = initData[:]
self.result = initResult[:]
else:
(self.data, self.result) = self.initialise(seed)
b = batch_size
bestUpperBoundEstimate = ((2 * budget) / 3)
gamma_list = [math.sqrt(((C * math.log(C)) / ((math.e - 1) * bestUpperBoundEstimate))) for C in self.C_list]
Wc_list_init = [np.ones(C) for C in self.C_list]
Wc_list = Wc_list_init
nDim = len(self.bounds)
result_list = []
starting_best = np.max(((- 1) * self.result[0]))
result_list.append([(- 1), None, None, starting_best, None])
continuous_dims = list(range(len(self.C_list), nDim))
categorical_dims = list(range(len(self.C_list)))
for t in tqdm(range(budget)):
self.iteration = t
(ht_list, probabilityDistribution_list) = self.compute_prob_dist_and_draw_hts(Wc_list, gamma_list, batch_size)
Gt_ht_list = self.RewardperCategoryviaBO(self.f, ht_list, categorical_dims, continuous_dims, self.bounds, self.acq_type, b)
Wc_list = self.update_weights_for_all_cat_var(Gt_ht_list, ht_list, Wc_list, gamma_list, probabilityDistribution_list, batch_size)
(besty, li, vi) = self.getBestVal2(self.result)
result_list.append([t, ht_list, Gt_ht_list, besty, self.mix_used, self.model_hp])
self.ht_recommedations.append(ht_list)
df = pd.DataFrame(result_list, columns=['iter', 'ht', 'Reward', 'best_value', 'mix_val', 'model_hp'])
bestx = self.data[li][vi]
self.best_val_list.append([batch_size, self.trial_num, li, besty, bestx])
return df
def RewardperCategoryviaBO(self, objfn, ht_next_list, categorical_dims, continuous_dims, bounds, acq_type, b):
Zt = self.data[0]
yt = self.result[0]
(my_kernel, hp_bounds) = self.get_kernel(categorical_dims, continuous_dims)
gp_opt_params = {'method': 'multigrad', 'num_restarts': 5, 'restart_bounds': hp_bounds, 'hp_bounds': hp_bounds, 'verbose': False}
gp = GP(Zt, yt, my_kernel, y_norm='meanstd', opt_params=gp_opt_params)
(opt_flag, gp) = self.set_model_params_and_opt_flag(gp)
if opt_flag:
gp.optimize()
self.model_hp = gp.param_array
self.mix_used = gp.kern.mix[0]
x_bounds = np.array([d['domain'] for d in bounds if (d['type'] == 'continuous')])
if (acq_type == 'EI'):
acq = EI(gp, np.min(gp.Y_raw))
elif (acq_type == 'LCB'):
acq = UCB(gp, 2.0)
acq_sub = AcquisitionOnSubspace(acq, my_kernel.k2.active_dims, ht_next_list)
def optimiser_func(x):
return (- acq_sub.evaluate(np.atleast_2d(x)))
res = sample_then_minimize(optimiser_func, x_bounds, num_samples=5000, num_chunks=10, num_local=3, minimize_options=None, evaluate_sequentially=False)
x_next = res.x
z_next = np.hstack((ht_next_list, x_next))
y_next = objfn(ht_next_list, x_next)
self.data[0] = np.row_stack((self.data[0], z_next))
self.result[0] = np.row_stack((self.result[0], y_next))
ht_next_list_array = np.atleast_2d(ht_next_list)
ht_list_rewards = self.compute_reward_for_all_cat_variable(ht_next_list_array, b)
ht_list_rewards = list(ht_list_rewards.flatten())
bestval_ht = np.max((self.result[0] * (- 1)))
print(f'arm pulled={ht_next_list[:]}; y_best = {bestval_ht}; mix={self.mix_used}')
return ht_list_rewards
def get_kernel(self, categorical_dims, continuous_dims):
if self.ARD:
hp_bounds = np.array([*([[0.0001, 3]] * len(continuous_dims)), [1e-06, 1]])
else:
hp_bounds = np.array([[0.0001, 3], [1e-06, 1]])
(fix_mix_in_this_iter, mix_value, hp_bounds) = self.get_mix(hp_bounds)
k_cat = CategoryOverlapKernel(len(categorical_dims), active_dims=categorical_dims)
k_cont = GPy.kern.Matern52(len(continuous_dims), lengthscale=self.default_cont_lengthscale, active_dims=continuous_dims, ARD=self.ARD)
my_kernel = MixtureViaSumAndProduct((len(categorical_dims) + len(continuous_dims)), k_cat, k_cont, mix=mix_value, fix_inner_variances=True, fix_mix=fix_mix_in_this_iter)
return (my_kernel, hp_bounds)
|
def CoCaBO_Exps(obj_func, budget, initN=24, trials=40, kernel_mix=0.5, batch=None):
saving_path = f'data/syntheticFns/{obj_func}/'
if (not os.path.exists(saving_path)):
os.makedirs(saving_path)
if (obj_func == 'func2C'):
f = testFunctions.syntheticFunctions.func2C
categories = [3, 5]
bounds = [{'name': 'h1', 'type': 'categorical', 'domain': (0, 1, 2)}, {'name': 'h2', 'type': 'categorical', 'domain': (0, 1, 2, 3, 4)}, {'name': 'x1', 'type': 'continuous', 'domain': ((- 1), 1)}, {'name': 'x2', 'type': 'continuous', 'domain': ((- 1), 1)}]
elif (obj_func == 'func3C'):
f = testFunctions.syntheticFunctions.func3C
categories = [3, 5, 4]
bounds = [{'name': 'h1', 'type': 'categorical', 'domain': (0, 1, 2)}, {'name': 'h2', 'type': 'categorical', 'domain': (0, 1, 2, 3, 4)}, {'name': 'h3', 'type': 'categorical', 'domain': (0, 1, 2, 3)}, {'name': 'x1', 'type': 'continuous', 'domain': ((- 1), 1)}, {'name': 'x2', 'type': 'continuous', 'domain': ((- 1), 1)}]
else:
raise NotImplementedError
if (batch == 1):
mabbo = CoCaBO(objfn=f, initN=initN, bounds=bounds, acq_type='LCB', C=categories, kernel_mix=kernel_mix)
else:
mabbo = BatchCoCaBO(objfn=f, initN=initN, bounds=bounds, acq_type='LCB', C=categories, kernel_mix=kernel_mix, batch_size=batch)
mabbo.runTrials(trials, budget, saving_path)
|
def DepRound(weights_p, k=1, isWeights=True):
' [[Algorithms for adversarial bandit problems with multiple plays, by T.Uchiya, A.Nakamura and M.Kudo, 2010](http://hdl.handle.net/2115/47057)] Figure 5 (page 15) is a very clean presentation of the algorithm.\n\n - Inputs: :math:`k < K` and weights_p :math:`= (p_1, \\dots, p_K)` such that :math:`\\sum_{i=1}^{K} p_i = k` (or :math:`= 1`).\n - Output: A subset of :math:`\\{1,\\dots,K\\}` with exactly :math:`k` elements. Each action :math:`i` is selected with probability exactly :math:`p_i`.\n\n Example:\n\n >>> import numpy as np; import random\n >>> np.random.seed(0); random.seed(0) # for reproductibility!\n >>> K = 5\n >>> k = 2\n\n >>> weights_p = [ 2, 2, 2, 2, 2 ] # all equal weights\n >>> DepRound(weights_p, k)\n [3, 4]\n >>> DepRound(weights_p, k)\n [3, 4]\n >>> DepRound(weights_p, k)\n [0, 1]\n\n >>> weights_p = [ 10, 8, 6, 4, 2 ] # decreasing weights\n >>> DepRound(weights_p, k)\n [0, 4]\n >>> DepRound(weights_p, k)\n [1, 2]\n >>> DepRound(weights_p, k)\n [3, 4]\n\n >>> weights_p = [ 3, 3, 0, 0, 3 ] # decreasing weights\n >>> DepRound(weights_p, k)\n [0, 4]\n >>> DepRound(weights_p, k)\n [0, 4]\n >>> DepRound(weights_p, k)\n [0, 4]\n >>> DepRound(weights_p, k)\n [0, 1]\n\n - See [[Gandhi et al, 2006](http://dl.acm.org/citation.cfm?id=1147956)] for the details.\n '
p = np.array(weights_p)
K = len(p)
assert (k < K), 'Error: k = {} should be < K = {}.'.format(k, K)
if (not np.isclose(np.sum(p), 1)):
p = (p / np.sum(p))
assert (np.all((0 <= p)) and np.all((p <= 1))), 'Error: the weights (p_1, ..., p_K) should all be 0 <= p_i <= 1 ...'.format(p)
assert np.isclose(np.sum(p), 1), 'Error: the sum of weights p_1 + ... + p_K should be = 1 (= {}).'.format(np.sum(p))
possible_ij = [a for a in range(K) if (0 < p[a] < 1)]
while possible_ij:
if (len(possible_ij) == 1):
i = np.random.choice(possible_ij, size=1)
j = i
else:
(i, j) = np.random.choice(possible_ij, size=2, replace=False)
(pi, pj) = (p[i], p[j])
assert (0 < pi < 1), 'Error: pi = {} (with i = {}) is not 0 < pi < 1.'.format(pi, i)
assert (0 < pj < 1), 'Error: pj = {} (with j = {}) is not 0 < pj < 1.'.format(pj, i)
assert (i != j), 'Error: i = {} is different than with j = {}.'.format(i, j)
(alpha, beta) = (min((1 - pi), pj), min(pi, (1 - pj)))
proba = (alpha / (alpha + beta))
if with_proba(proba):
(pi, pj) = ((pi + alpha), (pj - alpha))
else:
(pi, pj) = ((pi - beta), (pj + beta))
(p[i], p[j]) = (pi, pj)
possible_ij = [a for a in range(K) if (0 < p[a] < 1)]
if (len([a for a in range(K) if np.isclose(p[a], 0)]) == (K - k)):
break
subset = [a for a in range(K) if np.isclose(p[a], 1)]
if (len(subset) < k):
subset = [a for a in range(K) if (not np.isclose(p[a], 0))]
assert (len(subset) == k), 'Error: DepRound({}, {}) is supposed to return a set of size {}, but {} has size {}...'.format(weights_p, k, k, subset, len(subset))
return subset
|
class AcquisitionFunction(object):
'\n Base class for acquisition functions. Used to define the interface\n '
def __init__(self, surrogate=None, verbose=False):
self.surrogate = surrogate
self.verbose = verbose
def evaluate(self, x: np.ndarray, **kwargs) -> np.ndarray:
raise NotImplementedError
|
class AcquisitionOnSubspace():
def __init__(self, acq, free_idx, fixed_vals):
self.acq = acq
self.free_idx = free_idx
self.fixed_vals = fixed_vals
def evaluate(self, x: np.ndarray, **kwargs):
x_fixed = ([self.fixed_vals] * len(x))
x_complete = np.hstack((np.vstack(x_fixed), x))
return self.acq.evaluate(x_complete)
|
class EI(AcquisitionFunction):
'\n Expected improvement acquisition function for a Gaussian model\n\n Model should return (mu, var)\n '
def __init__(self, surrogate: GP, best: np.ndarray, verbose=False):
self.best = best
super().__init__(surrogate, verbose)
def __str__(self) -> str:
return 'EI'
def evaluate(self, x: np.ndarray, **kwargs) -> np.ndarray:
'\n Evaluates the EI acquisition function.\n\n Parameters\n ----------\n x\n Input to evaluate the acquisition function at\n\n '
if self.verbose:
print('Evaluating EI at', x)
(mu, var) = self.surrogate.predict(np.atleast_2d(x))
var = np.clip(var, 1e-08, np.inf)
s = np.sqrt(var)
gamma = ((self.best - mu) / s)
return (((s * gamma) * norm.cdf(gamma)) + (s * norm.pdf(gamma))).flatten()
|
class PI(AcquisitionFunction):
'\n Probability of improvement acquisition function for a Gaussian model\n\n Model should return (mu, var)\n '
def __init__(self, surrogate: GP, best: np.ndarray, tradeoff: float, verbose=False):
self.best = best
self.tradeoff = tradeoff
super().__init__(surrogate, verbose)
def __str__(self) -> str:
return f'PI-{self.tradeoff}'
def evaluate(self, x, **kwargs) -> np.ndarray:
'\n Evaluates the PI acquisition function.\n\n Parameters\n ----------\n x\n Input to evaluate the acquisition function at\n\n '
if self.verbose:
print('Evaluating PI at', x)
(mu, var) = self.surrogate.predict(x)
var = np.clip(var, 1e-08, np.inf)
s = np.sqrt(var)
gamma = (((self.best - mu) - self.tradeoff) / s)
return norm.cdf(gamma).flatten()
|
class UCB(AcquisitionFunction):
'\n Upper confidence bound acquisition function for a Gaussian model\n\n Model should return (mu, var)\n '
def __init__(self, surrogate: GP, tradeoff: float, verbose=False):
self.tradeoff = tradeoff
super().__init__(surrogate, verbose)
def __str__(self) -> str:
return f'UCB-{self.tradeoff}'
def evaluate(self, x, **kwargs) -> np.ndarray:
'\n Evaluates the UCB acquisition function.\n\n Parameters\n ----------\n x\n Input to evaluate the acquisition function at\n '
if self.verbose:
print('Evaluating UCB at', x)
(mu, var) = self.surrogate.predict(x)
var = np.clip(var, 1e-08, np.inf)
s = np.sqrt(var)
return (- (mu - (self.tradeoff * s)).flatten())
|
class AsyncBayesianOptimization(BayesianOptimisation):
"Async Bayesian optimization class\n\n Performs Bayesian optimization with a set number of busy and free workers\n\n Parameters\n ----------\n sampler : Callable\n function handle returning sample from expensive function being\n optimized\n\n surrogate : basic_gp.GP\n (GP) model that models the surface of 'objective'\n\n bounds : ndarray\n bounds of each dimension of x as a Dx2 vector (default [0, 1])\n\n async_interface : ExecutorBase\n Interface that deals with exchange of information between\n async workers and the BO loop\n\n batch_size : int\n How many tasks to suggest in one go. This will wait for the\n required number of workers to become free before evaluating the batch\n\n acq_dict : acquisition.AcquisitionFunction\n Defaults to EI\n\n starting_jobs : list(dicts)\n list of dicts in the form {'x': np.ndarray, 'f': callable, 't': float}\n\n optimise_surrogate_model : bool\n Whether to optimise the surrogate model after each BayesOpt iteration\n\n track_cond_k : bool\n Whether to keep track of cond(K) of the surrogate model across\n BayesOpt iterations\n\n y_min_opt_params : dict\n opt_params dict with the following fields:\n\n - method = 'standard', multigrad', 'direct'\n - n_direct_evals = for direct\n - num_restarts = for multigrad\n\n acq_opt_params : dict\n opt_params dict with the following fields:\n\n - method = 'multigrad', 'direct'\n - n_direct_evals = for direct\n - num_restarts = for multigrad\n\n n_bo_steps : int\n Number of BayesOpt steps\n\n min_acq : float\n cut-off threshold for acquisition function\n\n "
def __init__(self, sampler: Callable, surrogate: GP, bounds: np.ndarray, async_interface: ExecutorBase=None, starting_jobs: Optional[list]=None, **kwargs):
self.starting_jobs = starting_jobs
self.interface = async_interface
super().__init__(sampler, surrogate, bounds, **kwargs)
def _initialise_bo_df(self):
'\n Initialise the DataFrame for keeping track of the BO run\n '
self.df = pd.DataFrame(columns=['ii', 't', 'y_min', 'x_min', 'n_busy', 'x_busy', 'n_data', 'model_x', 'model_y', 'model_param_array', 'acq_at_sample', 'requested_x_sample', 'x_sample', 'y_sample', 'time_taken_opt_surrogate', 'time_taken_find_y_min', 'time_taken_get_next', 'time_taken_bo_step', 'var_at_y_min', 'cond_k'])
(self.x_min, self.y_min, self.var_at_y_min) = self._get_y_min()
if (self.starting_jobs is not None):
x_busy = np.vstack([job['x'] for job in self.starting_jobs])
else:
x_busy = None
starting_record = {'ii': (- 1), 'iteration': 0, 't': self.interface.status['t'], 'y_min': self.y_min, 'x_min': self.x_min, 'n_busy': self.interface.n_busy_workers, 'x_busy': x_busy, 'n_free': self.interface.n_free_workers, 'n_data': len(self.surrogate.X), 'model_x': self.surrogate.X, 'model_y': self.surrogate.Y, 'model_param_array': self.surrogate.param_array, 'acq_at_sample': np.nan, 'requested_x_sample': np.nan, 'y_sample': np.nan, 'x_sample': np.nan, 'time_taken_opt_surrogate': np.nan, 'time_taken_find_y_min': np.nan, 'time_taken_get_next': np.nan, 'time_taken_bo_step': np.nan, 'var_at_y_min': self.var_at_y_min, 'cond_k': np.nan}
self.df = self.df.append([starting_record], sort=True)
def _update_bo_df(self, x_batch, acq_at_x_best, new_sample_x, new_sample_y, time_dict):
"Updates the local dataframe with the current iteration's data\n\n Parameters\n ----------\n x_batch\n Best location to sample at\n acq_at_x_best\n Acquisition function value at x_best\n new_sample_x\n actual sample received\n new_sample_y\n actual sample received\n time_dict\n time taken for different parts of the algo in seconds\n\n "
current_record = {'ii': self.curr_bo_step, 't': self.interface.status['t'], 'iteration': (self.curr_bo_step + 1), 'y_min': self.y_min, 'x_min': self.x_min, 'n_busy': self.interface.n_busy_workers, 'x_busy': self.interface.get_array_of_running_jobs(), 'n_free': self.interface.n_free_workers, 'n_data': len(self.surrogate.X), 'model_x': self.surrogate.X, 'model_y': self.surrogate.Y, 'model_param_array': self.surrogate.param_array, 'acq_at_sample': acq_at_x_best, 'requested_x_sample': x_batch, 'y_sample': new_sample_y, 'x_sample': new_sample_x, 'time_taken_opt_surrogate': time_dict['time_taken_opt_surrogate'], 'time_taken_find_y_min': time_dict['time_taken_find_y_min'], 'time_taken_get_next': time_dict['time_taken_get_next'], 'time_taken_bo_step': time_dict['time_taken_bo_step'], 'var_at_y_min': self.var_at_y_min, 'cond_k': (self.cond_k_hist[self.curr_bo_step] if self.track_cond_k else None)}
self.df = self.df.append([current_record], sort=True)
def run(self):
'\n Run the Async BayesOpt loop\n '
t_starting_run = time.time()
if self.verbose:
print('Started BayesOpt.run()')
self._initialise_bo_df()
if (self.starting_jobs is not None):
for job in self.starting_jobs:
self.interface.add_job_to_queue(job)
for self.curr_bo_step in range(0, self.n_bo_steps):
(new_sample_x, new_sample_y) = (None, None)
if True:
t_beginning_of_bo_step = time.time()
if self.verbose:
print('**--** Starting BayesOpt iteration {}/{} **--**'.format((self.curr_bo_step + 1), self.n_bo_steps))
self.interface.run_until_n_free(self.batch_size)
n_free_workers = self.interface.status['n_free_workers']
completed_jobs = self.interface.get_completed_jobs()
if (len(completed_jobs) > 0):
(new_sample_x, new_sample_y) = self._add_completed_jobs_to_surrogate(completed_jobs)
assert (n_free_workers >= self.batch_size)
t_before_opt_surrogate = time.time()
self.optimize_surrogate_if_needed()
t_after_opt_surrogate = time.time()
t_before_find_y_min = time.time()
(self.x_min, self.y_min, self.var_at_y_min) = self._get_y_min()
t_after_find_y_min = t_before_get_next = time.time()
if self.verbose:
print('Selecting next point(s)...')
(x_batch, acq_at_x_batch) = self.get_next()
t_after_get_next = t_end_of_bo_step = time.time()
time_taken_opt_surrogate = (t_after_opt_surrogate - t_before_opt_surrogate)
time_taken_find_y_min = (t_after_find_y_min - t_before_find_y_min)
time_taken_get_next = (t_after_get_next - t_before_get_next)
time_taken_bo_step = (t_end_of_bo_step - t_beginning_of_bo_step)
time_taken_dict = {'time_taken_opt_surrogate': time_taken_opt_surrogate, 'time_taken_find_y_min': time_taken_find_y_min, 'time_taken_get_next': time_taken_get_next, 'time_taken_bo_step': time_taken_bo_step}
if self.create_plots:
self.plot_step(x_batch=x_batch)
jobs = []
for ii in range(len(x_batch)):
job = {'x': x_batch[ii], 'f': self.sampler}
jobs.append(job)
self.interface.add_job_to_queue(jobs)
self.save_history(None)
if (self.curr_bo_step == (self.n_bo_steps - 1)):
if (self.verbose > 1):
print('Used up budget.')
print('Minimum at', self.surrogate.X[np.argmin(self.surrogate.Y)])
self._update_bo_df(x_batch, acq_at_x_batch, new_sample_x, new_sample_y, time_taken_dict)
sys.stdout.flush()
if self.verbose:
print(f'Completed BO exp in; {round((time.time() - t_starting_run), 2)}s')
def get_next(self):
'Finds the next point(s) to sample at\n\n Returns\n -------\n x_best : np.ndarray\n Location to sample at\n acq_at_x_best : float\n Value of the acquisition function at the sampling locations\n '
raise NotImplementedError
def _add_completed_jobs_to_surrogate(self, completed_jobs):
x = []
y = []
for job in completed_jobs:
x.append(job['x'])
y.append(job['y'])
x = np.vstack(x)
y = np.vstack(y)
self._update_surrogate_with_new_data(x, y)
return (x, y)
def plot_step(self, x_batch=None, save_plots=None, **kwargs):
if (save_plots is None):
save_plots = self.save_plots
if isinstance(x_batch, list):
x_batch = np.vstack(x_batch)
(fig, axes) = super().plot_step(x_batch, external_call=True)
acq = self._create_acq_function()
if (len(self.bounds) == 1):
x_busy = self.interface.get_array_of_running_jobs()
if (x_busy is not None):
axes[0].plot(x_busy, self.surrogate.predict(x_busy)[0], 'g*', label='Busy', markersize=16)
axes[1].plot(x_busy, acq.evaluate(x_busy), 'g*', label='Busy', markersize=16)
axes[0].legend(numpoints=1)
axes[1].legend(numpoints=1)
if save_plots:
self.save_plots_to_disk(fig)
else:
fig.show()
return (fig, axes)
|
class AsyncBOHeuristicQEI(AsyncBayesianOptimization):
"Async BO with approximate q-EI\n\n Q-EI is approximated by sequentially finding the best location and\n setting its y-value using one of Ginsbourger's heuristics until the\n batch is full\n "
def __init__(self, sampler, surrogate, bounds, async_infill_strategy='kriging_believer', **kwargs):
from utils.ml_utils.models.additive_gp import GPWithSomeFixedDimsAtStart
if (async_infill_strategy is None):
self.async_infill_strategy = 'constant_liar_min'
else:
self.async_infill_strategy = async_infill_strategy
if isinstance(surrogate, GPWithSomeFixedDimsAtStart):
self.mabbo = True
elif isinstance(surrogate, GP):
self.mabbo = False
else:
raise NotImplementedError
super().__init__(sampler, surrogate, bounds, **kwargs)
def get_next(self):
'Finds the next point(s) to sample at\n\n This function interacts with the async interface to get info about\n completed and running jobs and computes the next point(s) to add\n to the queue based on the batch size\n\n Returns\n -------\n x_best : np.ndarray\n Location to sample at\n acq_at_x_best : float\n Value of the acquisition function at the sampling locations\n '
old_surrogate_x = self.surrogate.X
old_surrogate_y = self.surrogate.Y_raw
x_busy = self.interface.get_array_of_running_jobs()
if self.mabbo:
fixed_dim_vals = self.surrogate.fixed_dim_vals
else:
fixed_dim_vals = None
(surrogate_x_with_fake, surrogate_y_with_fake) = add_hallucinations_to_x_and_y(self, old_surrogate_x, old_surrogate_y, x_busy, fixed_dim_vals=fixed_dim_vals)
self.surrogate.set_XY(X=surrogate_x_with_fake, Y=surrogate_y_with_fake)
acq = self._create_acq_function()
(x_best, acq_at_x_best) = self._optimise_acq_func(acq)
x_batch = [x_best]
acq_at_each_x_batch = [acq_at_x_best]
if (self.batch_size > 1):
for ii in range((self.batch_size - 1)):
current_surrogate_x = self.surrogate.X
current_surrogate_y = self.surrogate.Y_raw
(surrogate_x_with_fake, surrogate_y_with_fake) = add_hallucinations_to_x_and_y(self, current_surrogate_x, current_surrogate_y, x_batch, fixed_dim_vals=fixed_dim_vals)
self.surrogate.set_XY(X=surrogate_x_with_fake, Y=surrogate_y_with_fake)
acq = self._create_acq_function()
(x_best, acq_at_x_best) = self._optimise_acq_func(acq)
x_batch.append(x_best)
acq_at_each_x_batch.append(acq_at_x_best)
self.surrogate.set_XY(X=old_surrogate_x, Y=old_surrogate_y)
assert (len(x_batch) == self.batch_size)
return (x_batch, acq_at_each_x_batch)
|
class BatchBOHeuristic(AsyncBOHeuristicQEI):
pass
|
class ExecutorBase():
'Base interface for interaction with multiple parallel workers\n\n The simulator and real async interfaces will subclass this class so that\n their interfaces are the same\n\n Main way to interact with this object is to queue jobs using\n add_job_to_queue(), to wait until the desired number of jobs have completed\n using run_until_n_free() and to get the results via get_completed_jobs().\n\n Parameters\n ----------\n n_workers : int\n Number of workers allowed\n\n verbose\n Verbosity\n\n Attributes\n ----------\n n_workers : int\n Total number of workers\n\n n_free_workers : int\n Number of workers without allocated jobs\n\n n_busy_workers : int\n Number of workers currently executing a job\n\n '
def __init__(self, n_workers: int, verbose: bool=False):
self.verbose = verbose
self.n_workers = n_workers
self.n_free_workers = n_workers
self.n_busy_workers = 0
self._queue = []
self._running_tasks = []
self._completed_tasks = []
@property
def age(self) -> float:
raise NotImplementedError
@property
def is_running(self) -> bool:
all_tasks_todo = (len(self._queue) + len(self._running_tasks))
if (all_tasks_todo > 0):
return True
else:
return False
@property
def status(self) -> Dict:
"Get current state (counts) of async workers.\n\n Returns\n -------\n Dict\n Fields are 'n_free_workers', 'n_busy_workers',\n 'n_running_tasks',\n 'n_completed_tasks', n_queue, 't'.\n "
status = {'n_free_workers': self.n_free_workers, 'n_busy_workers': self.n_busy_workers, 'n_completed_tasks': len(self._completed_tasks), 'n_queue': len(self._queue), 't': self.age, 'is_running': self.is_running}
if self.verbose:
print(f'''{self.__class__.__name__}.status:
{status}''')
return status
def _validate_job(self, job: dict) -> None:
assert ('x' in job.keys())
assert ('f' in job.keys())
assert callable(job['f'])
def run_until_n_free(self, n_desired_free_workers) -> None:
'Run the simulator until a desired number of workers are free\n\n Parameters\n ----------\n n_desired_free_workers: int\n\n '
raise NotImplementedError
def run_until_empty(self) -> None:
'Run the simulator until all jobs are completed\n\n '
raise NotImplementedError
def add_job_to_queue(self, job: Union[(Dict, List)]) -> None:
"Add a job to the queue\n\n Parameters\n ----------\n job : dict\n Dictionary with a job definition that is passed to a worker.\n\n Structure:\n\n {\n 'x': location of sample,\n 'f': function executing the sample,\n }\n\n "
if self.verbose:
print(f'''{self.__class__.__name__}.queue_job: queuing job:
{job}''')
if isinstance(job, list):
for j in job:
self._queue.append(j)
else:
self._queue.append(job)
self._update_internal_state()
def _update_internal_state(self) -> None:
'\n Main function that takes care of moving jobs to the correct places\n and setting statuses and counts\n '
raise NotImplementedError
def get_completed_jobs(self) -> List:
'Get the completed tasks and clear the internal list.\n\n Returns\n -------\n list\n List with dicts of the completed tasks\n '
if self.verbose:
print(f'{self.__class__.__name__}.get_completed_jobs: Getting completed jobs')
out = self._completed_tasks
self._completed_tasks = []
return out
def get_array_of_running_jobs(self) -> np.ndarray:
'Get a numpy array with each busy location in a row\n\n Returns\n -------\n numpy array of the busy locations stacked vertically\n '
list_of_jobs = self.get_list_of_running_jobs()
if (len(list_of_jobs) > 0):
x_busy = np.vstack([job['x'] for job in list_of_jobs])
else:
x_busy = None
return x_busy
def get_list_of_running_jobs(self) -> List:
'Get the currently-running tasks\n\n Returns\n -------\n List with dicts of the currently-running tasks\n '
if self.verbose:
print(f'{self.__class__.__name__}.get_running_jobs')
return self._running_tasks
|
class JobExecutor(ExecutorBase):
"Async controller that interacts with external async function calls\n\n Will be used to run ML algorithms in parallel for synch and async BO\n\n Functions that run must take in a job dict and return the same\n job dict with the result ['y'] and runtime ['t'].\n "
def __init__(self, n_workers: int, polling_frequency=0.5, verbose=False):
super().__init__(n_workers, verbose=verbose)
self._creation_time = time.time()
self._polling_delay = polling_frequency
self._executor = futures.ProcessPoolExecutor(n_workers)
self._futures = []
@property
def age(self) -> float:
return (time.time() - self._creation_time)
def run_until_n_free(self, n_desired_free_workers) -> None:
'Wait until a desired number of workers are free\n\n Parameters\n ----------\n n_desired_free_workers: int\n\n '
if self.verbose:
print(f'{self.__class__.__name__}.run_until_free({n_desired_free_workers})')
while (self.n_free_workers < n_desired_free_workers):
time.sleep(self._polling_delay)
self._update_internal_state()
def run_until_empty(self) -> None:
'Run the simulator until all jobs are completed\n\n '
if self.verbose:
print(f'{self.__class__.__name__}.run_until_empty()')
while (self.n_free_workers < self.n_workers):
time.sleep(self._polling_delay)
self._update_internal_state()
def _update_internal_state(self) -> None:
'\n Setting internal counts\n '
self._clean_up_completed_processes()
self._begin_jobs_if_workers_free()
self.n_free_workers = (self.n_workers - len(self._running_tasks))
self.n_busy_workers = len(self._running_tasks)
def _clean_up_completed_processes(self) -> None:
'\n Remove completed jobs from the current processes and save results\n '
if (len(self._futures) > 0):
idx_complete = np.where([(not f.running()) for f in self._futures])[0]
for ii in np.sort(idx_complete)[::(- 1)]:
f_complete = self._futures.pop(ii)
complete_job_dict = self._running_tasks.pop(ii)
complete_job_dict['y'] = f_complete.result()
self._completed_tasks.append(complete_job_dict)
def _begin_jobs_if_workers_free(self) -> None:
'\n If workers are free, start a job from the queue\n '
while ((len(self._futures) < self.n_workers) and (len(self._queue) > 0)):
self._futures.append(self._submit_job_to_executor(0))
def _submit_job_to_executor(self, index) -> futures.Future:
'Submits the chosen job from the queue to the executor\n\n Parameters\n ----------\n index\n Index in the queue of the job to be executed\n\n Returns\n -------\n Future object of the submitted job\n '
job = self._queue.pop(index)
self._validate_job(job)
self._running_tasks.append(job)
future = self._executor.submit(job['f'], job['x'])
return future
|
class JobExecutorInSeries(JobExecutor):
'Interface that runs the jobs in series\n but acts like a batch-running interface to the outside.\n\n self._futures is not a list of futures any more. This is a placeholder\n for the jobs that have yet to run to complete the batch\n '
def __init__(self, n_workers: int, polling_frequency=0.5, verbose=False):
super().__init__(n_workers, polling_frequency=polling_frequency, verbose=verbose)
self._executor = futures.ProcessPoolExecutor(1)
def _clean_up_completed_processes(self) -> None:
'\n Remove completed jobs from the current processes and save results\n '
if (len(self._futures) > 0):
is_complete = self._futures[0].running()
if is_complete:
f_complete = self._futures.pop(0)
complete_job_dict = self._running_tasks.pop(0)
complete_job_dict['y'] = f_complete.result()
self._completed_tasks.append(complete_job_dict)
def _begin_jobs_if_workers_free(self) -> None:
'\n If workers are free, start a job from the queue\n '
if (len(self._futures) == 0):
if (len(self._running_tasks) > 0):
job = self._running_tasks[0]
self._validate_job(job)
self._futures.append(self._executor.submit(job['f'], job['x']))
else:
while ((len(self._queue) > 0) and (len(self._running_tasks) < self.n_workers)):
self._running_tasks.append(self._queue.pop(0))
|
class JobExecutorInSeriesBlocking(ExecutorBase):
"Interface that runs the jobs in series and blocks execution of code\n until it's done\n "
def __init__(self, n_workers: int, verbose=False):
super().__init__(n_workers, verbose=verbose)
self._creation_time = time.time()
def run_until_n_free(self, n_desired_free_workers) -> None:
'Run the simulator until a desired number of workers are free\n\n Parameters\n ----------\n n_desired_free_workers: int\n\n '
while (self.n_free_workers < n_desired_free_workers):
self.run_next()
def run_until_empty(self) -> None:
'Run the simulator until all jobs are completed\n\n '
while (self.n_free_workers < self.n_workers):
self.run_next()
def _update_internal_state(self):
while ((len(self._running_tasks) < self.n_workers) and (len(self._queue) > 0)):
self._running_tasks.append(self._queue.pop(0))
self.n_busy_workers = len(self._running_tasks)
self.n_free_workers = (self.n_workers - self.n_busy_workers)
def run_next(self):
self._move_tasks_from_queue_to_running()
if (len(self._running_tasks) > 0):
job = self._running_tasks.pop(0)
self._validate_job(job)
result = job['f'](job['x'])
job['y'] = result
self._completed_tasks.append(job)
self._update_internal_state()
@property
def age(self):
return (time.time() - self._creation_time)
def _move_tasks_from_queue_to_running(self):
while ((len(self._running_tasks) < self.n_workers) and (len(self._queue) > 0)):
self._running_tasks.append(self._queue.pop(0))
|
def add_hallucinations_to_x_and_y(bo, old_x, old_y, x_new, fixed_dim_vals=None) -> Tuple[(np.ndarray, np.ndarray)]:
'Add hallucinations to the data arrays.\n\n Parameters\n ----------\n old_x\n Current x values\n old_y\n Current y values\n x_new\n Locations at which to use the async infill procedure. If x_busy\n is None, then nothing happens and the x and y arrays are returned\n\n Returns\n -------\n augmented_x (np.ndarray), augmented_y (list or np.ndarray)\n '
if (x_new is None):
x_out = old_x
y_out = old_y
else:
if isinstance(x_new, list):
x_new = np.vstack(x_new)
if (fixed_dim_vals is not None):
if (fixed_dim_vals.ndim == 1):
fixed_dim_vals = np.vstack(([fixed_dim_vals] * len(x_new)))
assert (len(fixed_dim_vals) == len(x_new))
x_new = np.hstack((fixed_dim_vals, x_new))
x_out = np.vstack((old_x, x_new))
fake_y = make_hallucinated_data(bo, x_new, bo.async_infill_strategy)
y_out = np.vstack((old_y, fake_y))
return (x_out, y_out)
|
def make_hallucinated_data(bo, x: np.ndarray, strat: str) -> np.ndarray:
"Returns fake y-values based on the chosen heuristic\n\n Parameters\n ----------\n x\n Used to get the value for the kriging believer. Otherwise, this\n sets the number of values returned\n\n bo\n Instance of BayesianOptimization\n\n strat\n string describing the type of hallucinated data. Choices are:\n 'constant_liar_min', 'constant_liar_median', 'kriging_believer',\n 'posterior_simple'\n\n Returns\n -------\n y : np.ndarray\n Values for the desired heuristic\n\n "
if (strat == 'constant_liar_min'):
if (x is None):
y = np.atleast_2d(bo.y_min)
else:
y = np.array(([bo.y_min] * len(x))).reshape((- 1), 1)
elif (strat == 'constant_liar_median'):
if (x is None):
y = np.atleast_2d(bo.y_min)
else:
y = np.array(([bo.y_min] * len(x))).reshape((- 1), 1)
elif (strat == 'kriging_believer'):
y = bo.surrogate.predict(x)[0]
elif (strat == 'posterior_simple'):
(mu, var) = bo.surrogate.predict(x)
y = np.random.multivariate_normal(mu.flatten(), np.diag(var.flatten())).reshape((- 1), 1)
elif (strat == 'posterior_full'):
(mu, var) = bo.surrogate.predict(x, full_cov=True)
y = np.random.multivariate_normal(mu.flatten(), var).reshape((- 1), 1)
else:
raise NotImplementedError
return y
|
def draw(weights):
choice = random.uniform(0, sum(weights))
choiceIndex = 0
for weight in weights:
choice -= weight
if (choice <= 0):
return choiceIndex
choiceIndex += 1
|
def distr(weights, gamma=0.0):
theSum = float(sum(weights))
return tuple(((((1.0 - gamma) * (w / theSum)) + (gamma / len(weights))) for w in weights))
|
def mean(aList):
theSum = 0
count = 0
for x in aList:
theSum += x
count += 1
return (0 if (count == 0) else (theSum / count))
|
def with_proba(epsilon):
'Bernoulli test, with probability :math:`\x0barepsilon`, return `True`, and with probability :math:`1 - \x0barepsilon`, return `False`.\n\n Example:\n\n >>> from random import seed; seed(0) # reproductible\n >>> with_proba(0.5)\n False\n >>> with_proba(0.9)\n True\n >>> with_proba(0.1)\n False\n >>> if with_proba(0.2):\n ... print("This happens 20% of the time.")\n '
assert (0 <= epsilon <= 1), "Error: for 'with_proba(epsilon)', epsilon = {:.3g} has to be between 0 and 1 to be a valid probability.".format(epsilon)
return (random() < epsilon)
|
def log_string(out_str):
global LOG_FOUT
LOG_FOUT.write(out_str)
LOG_FOUT.flush()
|
def log_string(out_str):
global LOG_FOUT
LOG_FOUT.write(out_str)
LOG_FOUT.flush()
|
def build_shared_mlp(mlp_spec: List[int], bn: bool=True):
layers = []
for i in range(1, len(mlp_spec)):
layers.append(nn.Conv2d(mlp_spec[(i - 1)], mlp_spec[i], kernel_size=1, bias=(not bn)))
if bn:
layers.append(nn.BatchNorm2d(mlp_spec[i]))
layers.append(nn.ReLU(True))
return nn.Sequential(*layers)
|
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super(_PointnetSAModuleBase, self).__init__()
self.npoint = None
self.groupers = None
self.mlps = None
def forward(self, xyz: torch.Tensor, features: Optional[torch.Tensor]) -> Tuple[(torch.Tensor, torch.Tensor)]:
"\n Parameters\n ----------\n xyz : torch.Tensor\n (B, N, 3) tensor of the xyz coordinates of the features\n features : torch.Tensor\n (B, C, N) tensor of the descriptors of the the features\n\n Returns\n -------\n new_xyz : torch.Tensor\n (B, npoint, 3) tensor of the new features' xyz\n new_features : torch.Tensor\n (B, \\sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors\n "
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
new_xyz = (pointnet2_utils.gather_operation(xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)).transpose(1, 2).contiguous() if (self.npoint is not None) else None)
for i in range(len(self.groupers)):
new_features = self.groupers[i](xyz, new_xyz, features)
new_features = self.mlps[i](new_features)
new_features = F.max_pool2d(new_features, kernel_size=[1, new_features.size(3)])
new_features = new_features.squeeze((- 1))
new_features_list.append(new_features)
return (new_xyz, torch.cat(new_features_list, dim=1))
|
class PointnetSAModuleMSG(_PointnetSAModuleBase):
'Pointnet set abstrction layer with multiscale grouping\n\n Parameters\n ----------\n npoint : int\n Number of features\n radii : list of float32\n list of radii to group with\n nsamples : list of int32\n Number of samples in each ball query\n mlps : list of list of int32\n Spec of the pointnet before the global max_pool for each scale\n bn : bool\n Use batchnorm\n '
def __init__(self, npoint, radii, nsamples, mlps, bn=True, use_xyz=True):
super(PointnetSAModuleMSG, self).__init__()
assert (len(radii) == len(nsamples) == len(mlps))
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append((pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz) if (npoint is not None) else pointnet2_utils.GroupAll(use_xyz)))
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(build_shared_mlp(mlp_spec, bn))
|
class PointnetSAModule(PointnetSAModuleMSG):
'Pointnet set abstrction layer\n\n Parameters\n ----------\n npoint : int\n Number of features\n radius : float\n Radius of ball\n nsample : int\n Number of samples in the ball query\n mlp : list\n Spec of the pointnet before the global max_pool\n bn : bool\n Use batchnorm\n '
def __init__(self, mlp, npoint=None, radius=None, nsample=None, bn=True, use_xyz=True):
super(PointnetSAModule, self).__init__(mlps=[mlp], npoint=npoint, radii=[radius], nsamples=[nsample], bn=bn, use_xyz=use_xyz)
|
class PointnetFPModule(nn.Module):
'Propigates the features of one set to another\n\n Parameters\n ----------\n mlp : list\n Pointnet module parameters\n bn : bool\n Use batchnorm\n '
def __init__(self, mlp, bn=True):
super(PointnetFPModule, self).__init__()
self.mlp = build_shared_mlp(mlp, bn=bn)
def forward(self, unknown, known, unknow_feats, known_feats):
'\n Parameters\n ----------\n unknown : torch.Tensor\n (B, n, 3) tensor of the xyz positions of the unknown features\n known : torch.Tensor\n (B, m, 3) tensor of the xyz positions of the known features\n unknow_feats : torch.Tensor\n (B, C1, n) tensor of the features to be propigated to\n known_feats : torch.Tensor\n (B, C2, m) tensor of features to be propigated\n\n Returns\n -------\n new_features : torch.Tensor\n (B, mlp[-1], n) tensor of the features of the unknown features\n '
if (known is not None):
(dist, idx) = pointnet2_utils.three_nn(unknown, known)
dist_recip = (1.0 / (dist + 1e-08))
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = (dist_recip / norm)
interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight)
else:
interpolated_feats = known_feats.expand(*(known_feats.size()[0:2] + [unknown.size(1)]))
if (unknow_feats is not None):
new_features = torch.cat([interpolated_feats, unknow_feats], dim=1)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze((- 1))
new_features = self.mlp(new_features)
return new_features.squeeze((- 1))
|
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz, npoint):
'\n Uses iterative furthest point sampling to select a set of npoint features that have the largest\n minimum distance\n\n Parameters\n ----------\n xyz : torch.Tensor\n (B, N, 3) tensor where N > npoint\n npoint : int32\n number of features in the sampled set\n\n Returns\n -------\n torch.Tensor\n (B, npoint) tensor containing the set\n '
out = _ext.furthest_point_sampling(xyz, npoint)
ctx.mark_non_differentiable(out)
return out
@staticmethod
def backward(ctx, grad_out):
return ()
|
class GatherOperation(Function):
@staticmethod
def forward(ctx, features, idx):
'\n\n Parameters\n ----------\n features : torch.Tensor\n (B, C, N) tensor\n\n idx : torch.Tensor\n (B, npoint) tensor of the features to gather\n\n Returns\n -------\n torch.Tensor\n (B, C, npoint) tensor\n '
ctx.save_for_backward(idx, features)
return _ext.gather_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
(idx, features) = ctx.saved_tensors
N = features.size(2)
grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N)
return (grad_features, None)
|
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown, known):
'\n Find the three nearest neighbors of unknown in known\n Parameters\n ----------\n unknown : torch.Tensor\n (B, n, 3) tensor of known features\n known : torch.Tensor\n (B, m, 3) tensor of unknown features\n\n Returns\n -------\n dist : torch.Tensor\n (B, n, 3) l2 distance to the three nearest neighbors\n idx : torch.Tensor\n (B, n, 3) index of 3 nearest neighbors\n '
(dist2, idx) = _ext.three_nn(unknown, known)
dist = torch.sqrt(dist2)
ctx.mark_non_differentiable(dist, idx)
return (dist, idx)
@staticmethod
def backward(ctx, grad_dist, grad_idx):
return ()
|
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features, idx, weight):
'\n Performs weight linear interpolation on 3 features\n Parameters\n ----------\n features : torch.Tensor\n (B, c, m) Features descriptors to be interpolated from\n idx : torch.Tensor\n (B, n, 3) three nearest neighbors of the target features in features\n weight : torch.Tensor\n (B, n, 3) weights\n\n Returns\n -------\n torch.Tensor\n (B, c, n) tensor of the interpolated features\n '
ctx.save_for_backward(idx, weight, features)
return _ext.three_interpolate(features, idx, weight)
@staticmethod
def backward(ctx, grad_out):
'\n Parameters\n ----------\n grad_out : torch.Tensor\n (B, c, n) tensor with gradients of ouputs\n\n Returns\n -------\n grad_features : torch.Tensor\n (B, c, m) tensor with gradients of features\n\n None\n\n None\n '
(idx, weight, features) = ctx.saved_tensors
m = features.size(2)
grad_features = _ext.three_interpolate_grad(grad_out.contiguous(), idx, weight, m)
return (grad_features, torch.zeros_like(idx), torch.zeros_like(weight))
|
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features, idx):
'\n\n Parameters\n ----------\n features : torch.Tensor\n (B, C, N) tensor of features to group\n idx : torch.Tensor\n (B, npoint, nsample) tensor containing the indicies of features to group with\n\n Returns\n -------\n torch.Tensor\n (B, C, npoint, nsample) tensor\n '
ctx.save_for_backward(idx, features)
return _ext.group_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
'\n\n Parameters\n ----------\n grad_out : torch.Tensor\n (B, C, npoint, nsample) tensor of the gradients of the output from forward\n\n Returns\n -------\n torch.Tensor\n (B, C, N) gradient of the features\n None\n '
(idx, features) = ctx.saved_tensors
N = features.size(2)
grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
return (grad_features, torch.zeros_like(idx))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.