repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
listlengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
listlengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
get_style_bits
|
def get_style_bits(match=False, comment=False, selected=False, data=False, diff=False, user=0):
""" Return an int value that contains the specified style bits set.
Available styles for each byte are:
match: part of the currently matched search
comment: user commented area
selected: selected region
data: labeled in the disassembler as a data region (i.e. not disassembled)
"""
style_bits = 0
if user:
style_bits |= (user & user_bit_mask)
if diff:
style_bits |= diff_bit_mask
if match:
style_bits |= match_bit_mask
if comment:
style_bits |= comment_bit_mask
if data:
style_bits |= (data_style & user_bit_mask)
if selected:
style_bits |= selected_bit_mask
return style_bits
|
python
|
def get_style_bits(match=False, comment=False, selected=False, data=False, diff=False, user=0):
""" Return an int value that contains the specified style bits set.
Available styles for each byte are:
match: part of the currently matched search
comment: user commented area
selected: selected region
data: labeled in the disassembler as a data region (i.e. not disassembled)
"""
style_bits = 0
if user:
style_bits |= (user & user_bit_mask)
if diff:
style_bits |= diff_bit_mask
if match:
style_bits |= match_bit_mask
if comment:
style_bits |= comment_bit_mask
if data:
style_bits |= (data_style & user_bit_mask)
if selected:
style_bits |= selected_bit_mask
return style_bits
|
[
"def",
"get_style_bits",
"(",
"match",
"=",
"False",
",",
"comment",
"=",
"False",
",",
"selected",
"=",
"False",
",",
"data",
"=",
"False",
",",
"diff",
"=",
"False",
",",
"user",
"=",
"0",
")",
":",
"style_bits",
"=",
"0",
"if",
"user",
":",
"style_bits",
"|=",
"(",
"user",
"&",
"user_bit_mask",
")",
"if",
"diff",
":",
"style_bits",
"|=",
"diff_bit_mask",
"if",
"match",
":",
"style_bits",
"|=",
"match_bit_mask",
"if",
"comment",
":",
"style_bits",
"|=",
"comment_bit_mask",
"if",
"data",
":",
"style_bits",
"|=",
"(",
"data_style",
"&",
"user_bit_mask",
")",
"if",
"selected",
":",
"style_bits",
"|=",
"selected_bit_mask",
"return",
"style_bits"
] |
Return an int value that contains the specified style bits set.
Available styles for each byte are:
match: part of the currently matched search
comment: user commented area
selected: selected region
data: labeled in the disassembler as a data region (i.e. not disassembled)
|
[
"Return",
"an",
"int",
"value",
"that",
"contains",
"the",
"specified",
"style",
"bits",
"set",
"."
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L22-L45
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
get_style_mask
|
def get_style_mask(**kwargs):
"""Get the bit mask that, when anded with data, will turn off the
selected bits
"""
bits = get_style_bits(**kwargs)
if 'user' in kwargs and kwargs['user']:
bits |= user_bit_mask
else:
bits &= (0xff ^ user_bit_mask)
return 0xff ^ bits
|
python
|
def get_style_mask(**kwargs):
"""Get the bit mask that, when anded with data, will turn off the
selected bits
"""
bits = get_style_bits(**kwargs)
if 'user' in kwargs and kwargs['user']:
bits |= user_bit_mask
else:
bits &= (0xff ^ user_bit_mask)
return 0xff ^ bits
|
[
"def",
"get_style_mask",
"(",
"*",
"*",
"kwargs",
")",
":",
"bits",
"=",
"get_style_bits",
"(",
"*",
"*",
"kwargs",
")",
"if",
"'user'",
"in",
"kwargs",
"and",
"kwargs",
"[",
"'user'",
"]",
":",
"bits",
"|=",
"user_bit_mask",
"else",
":",
"bits",
"&=",
"(",
"0xff",
"^",
"user_bit_mask",
")",
"return",
"0xff",
"^",
"bits"
] |
Get the bit mask that, when anded with data, will turn off the
selected bits
|
[
"Get",
"the",
"bit",
"mask",
"that",
"when",
"anded",
"with",
"data",
"will",
"turn",
"off",
"the",
"selected",
"bits"
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L48-L57
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
SegmentData.byte_bounds_offset
|
def byte_bounds_offset(self):
"""Return start and end offsets of this segment's data into the
base array's data.
This ignores the byte order index. Arrays using the byte order index
will have the entire base array's raw data.
"""
if self.data.base is None:
if self.is_indexed:
basearray = self.data.np_data
else:
basearray = self.data
return 0, len(basearray)
return int(self.data_start - self.base_start), int(self.data_end - self.base_start)
|
python
|
def byte_bounds_offset(self):
"""Return start and end offsets of this segment's data into the
base array's data.
This ignores the byte order index. Arrays using the byte order index
will have the entire base array's raw data.
"""
if self.data.base is None:
if self.is_indexed:
basearray = self.data.np_data
else:
basearray = self.data
return 0, len(basearray)
return int(self.data_start - self.base_start), int(self.data_end - self.base_start)
|
[
"def",
"byte_bounds_offset",
"(",
"self",
")",
":",
"if",
"self",
".",
"data",
".",
"base",
"is",
"None",
":",
"if",
"self",
".",
"is_indexed",
":",
"basearray",
"=",
"self",
".",
"data",
".",
"np_data",
"else",
":",
"basearray",
"=",
"self",
".",
"data",
"return",
"0",
",",
"len",
"(",
"basearray",
")",
"return",
"int",
"(",
"self",
".",
"data_start",
"-",
"self",
".",
"base_start",
")",
",",
"int",
"(",
"self",
".",
"data_end",
"-",
"self",
".",
"base_start",
")"
] |
Return start and end offsets of this segment's data into the
base array's data.
This ignores the byte order index. Arrays using the byte order index
will have the entire base array's raw data.
|
[
"Return",
"start",
"and",
"end",
"offsets",
"of",
"this",
"segment",
"s",
"data",
"into",
"the",
"base",
"array",
"s",
"data",
"."
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L268-L281
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
SegmentData.get_raw_index
|
def get_raw_index(self, i):
"""Get index into base array's raw data, given the index into this
segment
"""
if self.is_indexed:
return int(self.order[i])
if self.data.base is None:
return int(i)
return int(self.data_start - self.base_start + i)
|
python
|
def get_raw_index(self, i):
"""Get index into base array's raw data, given the index into this
segment
"""
if self.is_indexed:
return int(self.order[i])
if self.data.base is None:
return int(i)
return int(self.data_start - self.base_start + i)
|
[
"def",
"get_raw_index",
"(",
"self",
",",
"i",
")",
":",
"if",
"self",
".",
"is_indexed",
":",
"return",
"int",
"(",
"self",
".",
"order",
"[",
"i",
"]",
")",
"if",
"self",
".",
"data",
".",
"base",
"is",
"None",
":",
"return",
"int",
"(",
"i",
")",
"return",
"int",
"(",
"self",
".",
"data_start",
"-",
"self",
".",
"base_start",
"+",
"i",
")"
] |
Get index into base array's raw data, given the index into this
segment
|
[
"Get",
"index",
"into",
"base",
"array",
"s",
"raw",
"data",
"given",
"the",
"index",
"into",
"this",
"segment"
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L283-L291
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
SegmentData.get_indexes_from_base
|
def get_indexes_from_base(self):
"""Get array of indexes from the base array, as if this raw data were
indexed.
"""
if self.is_indexed:
return np.copy(self.order[i])
if self.data.base is None:
i = 0
else:
i = self.get_raw_index(0)
return np.arange(i, i + len(self), dtype=np.uint32)
|
python
|
def get_indexes_from_base(self):
"""Get array of indexes from the base array, as if this raw data were
indexed.
"""
if self.is_indexed:
return np.copy(self.order[i])
if self.data.base is None:
i = 0
else:
i = self.get_raw_index(0)
return np.arange(i, i + len(self), dtype=np.uint32)
|
[
"def",
"get_indexes_from_base",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_indexed",
":",
"return",
"np",
".",
"copy",
"(",
"self",
".",
"order",
"[",
"i",
"]",
")",
"if",
"self",
".",
"data",
".",
"base",
"is",
"None",
":",
"i",
"=",
"0",
"else",
":",
"i",
"=",
"self",
".",
"get_raw_index",
"(",
"0",
")",
"return",
"np",
".",
"arange",
"(",
"i",
",",
"i",
"+",
"len",
"(",
"self",
")",
",",
"dtype",
"=",
"np",
".",
"uint32",
")"
] |
Get array of indexes from the base array, as if this raw data were
indexed.
|
[
"Get",
"array",
"of",
"indexes",
"from",
"the",
"base",
"array",
"as",
"if",
"this",
"raw",
"data",
"were",
"indexed",
"."
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L293-L303
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
SegmentData.reverse_index_mapping
|
def reverse_index_mapping(self):
"""Get mapping from this segment's indexes to the indexes of
the base array.
If the index is < 0, the index is out of range, meaning that it doesn't
exist in this segment and is not mapped to the base array
"""
if self._reverse_index_mapping is None:
if self.is_indexed:
# Initialize array to out of range
r = np.zeros(self.base_length, dtype=np.int32) - 1
r[self.order] = np.arange(len(self.order), dtype=np.int32)
elif self.data.base is None:
# Starts at the beginning; produces the identity
r = np.arange(self.data_length, dtype=np.int32)
else:
r = np.zeros(self.base_length, dtype=np.int32) - 1
r[self.data_start - self.base_start:self.data_end - self.base_start] = np.arange(self.data_length, dtype=np.int32)
self._reverse_index_mapping = r
return self._reverse_index_mapping
|
python
|
def reverse_index_mapping(self):
"""Get mapping from this segment's indexes to the indexes of
the base array.
If the index is < 0, the index is out of range, meaning that it doesn't
exist in this segment and is not mapped to the base array
"""
if self._reverse_index_mapping is None:
if self.is_indexed:
# Initialize array to out of range
r = np.zeros(self.base_length, dtype=np.int32) - 1
r[self.order] = np.arange(len(self.order), dtype=np.int32)
elif self.data.base is None:
# Starts at the beginning; produces the identity
r = np.arange(self.data_length, dtype=np.int32)
else:
r = np.zeros(self.base_length, dtype=np.int32) - 1
r[self.data_start - self.base_start:self.data_end - self.base_start] = np.arange(self.data_length, dtype=np.int32)
self._reverse_index_mapping = r
return self._reverse_index_mapping
|
[
"def",
"reverse_index_mapping",
"(",
"self",
")",
":",
"if",
"self",
".",
"_reverse_index_mapping",
"is",
"None",
":",
"if",
"self",
".",
"is_indexed",
":",
"# Initialize array to out of range",
"r",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"base_length",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"-",
"1",
"r",
"[",
"self",
".",
"order",
"]",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"self",
".",
"order",
")",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"elif",
"self",
".",
"data",
".",
"base",
"is",
"None",
":",
"# Starts at the beginning; produces the identity",
"r",
"=",
"np",
".",
"arange",
"(",
"self",
".",
"data_length",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"else",
":",
"r",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"base_length",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"-",
"1",
"r",
"[",
"self",
".",
"data_start",
"-",
"self",
".",
"base_start",
":",
"self",
".",
"data_end",
"-",
"self",
".",
"base_start",
"]",
"=",
"np",
".",
"arange",
"(",
"self",
".",
"data_length",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"self",
".",
"_reverse_index_mapping",
"=",
"r",
"return",
"self",
".",
"_reverse_index_mapping"
] |
Get mapping from this segment's indexes to the indexes of
the base array.
If the index is < 0, the index is out of range, meaning that it doesn't
exist in this segment and is not mapped to the base array
|
[
"Get",
"mapping",
"from",
"this",
"segment",
"s",
"indexes",
"to",
"the",
"indexes",
"of",
"the",
"base",
"array",
"."
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L358-L377
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
SegmentData.get_reverse_index
|
def get_reverse_index(self, base_index):
"""Get index into this segment's data given the index into the base data
Raises IndexError if the base index doesn't map to anything in this
segment's data
"""
r = self.reverse_index_mapping[base_index]
if r < 0:
raise IndexError("index %d not mapped in this segment" % base_index)
return r
|
python
|
def get_reverse_index(self, base_index):
"""Get index into this segment's data given the index into the base data
Raises IndexError if the base index doesn't map to anything in this
segment's data
"""
r = self.reverse_index_mapping[base_index]
if r < 0:
raise IndexError("index %d not mapped in this segment" % base_index)
return r
|
[
"def",
"get_reverse_index",
"(",
"self",
",",
"base_index",
")",
":",
"r",
"=",
"self",
".",
"reverse_index_mapping",
"[",
"base_index",
"]",
"if",
"r",
"<",
"0",
":",
"raise",
"IndexError",
"(",
"\"index %d not mapped in this segment\"",
"%",
"base_index",
")",
"return",
"r"
] |
Get index into this segment's data given the index into the base data
Raises IndexError if the base index doesn't map to anything in this
segment's data
|
[
"Get",
"index",
"into",
"this",
"segment",
"s",
"data",
"given",
"the",
"index",
"into",
"the",
"base",
"data"
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L379-L388
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
DefaultSegment.resize
|
def resize(self, newsize, zeros=True):
""" Resize the data arrays.
This can only be performed on the container segment. Child segments
must adjust their rawdata to point to the correct place.
Since segments don't keep references to other segments, it is the
user's responsibility to update any child segments that point to this
segment's data.
Numpy can't do an in-place resize on an array that has a view, so the
data must be replaced and all segments that point to that raw data must
also be changed. This has to happen outside this method because it
doesn't know the segment list of segments using itself as a base.
"""
if not self.can_resize:
raise ValueError("Segment %s can't be resized" % str(self))
# only makes sense for the container (outermost) object
if not self.rawdata.is_base:
raise ValueError("Only container segments can be resized")
origsize = len(self)
self.rawdata.resize(newsize)
self.set_raw(self.rawdata) # force attributes to be reset
newsize = len(self)
if zeros:
if newsize > origsize:
self.data[origsize:] = 0
self.style[origsize:] = 0
return origsize, newsize
|
python
|
def resize(self, newsize, zeros=True):
""" Resize the data arrays.
This can only be performed on the container segment. Child segments
must adjust their rawdata to point to the correct place.
Since segments don't keep references to other segments, it is the
user's responsibility to update any child segments that point to this
segment's data.
Numpy can't do an in-place resize on an array that has a view, so the
data must be replaced and all segments that point to that raw data must
also be changed. This has to happen outside this method because it
doesn't know the segment list of segments using itself as a base.
"""
if not self.can_resize:
raise ValueError("Segment %s can't be resized" % str(self))
# only makes sense for the container (outermost) object
if not self.rawdata.is_base:
raise ValueError("Only container segments can be resized")
origsize = len(self)
self.rawdata.resize(newsize)
self.set_raw(self.rawdata) # force attributes to be reset
newsize = len(self)
if zeros:
if newsize > origsize:
self.data[origsize:] = 0
self.style[origsize:] = 0
return origsize, newsize
|
[
"def",
"resize",
"(",
"self",
",",
"newsize",
",",
"zeros",
"=",
"True",
")",
":",
"if",
"not",
"self",
".",
"can_resize",
":",
"raise",
"ValueError",
"(",
"\"Segment %s can't be resized\"",
"%",
"str",
"(",
"self",
")",
")",
"# only makes sense for the container (outermost) object",
"if",
"not",
"self",
".",
"rawdata",
".",
"is_base",
":",
"raise",
"ValueError",
"(",
"\"Only container segments can be resized\"",
")",
"origsize",
"=",
"len",
"(",
"self",
")",
"self",
".",
"rawdata",
".",
"resize",
"(",
"newsize",
")",
"self",
".",
"set_raw",
"(",
"self",
".",
"rawdata",
")",
"# force attributes to be reset",
"newsize",
"=",
"len",
"(",
"self",
")",
"if",
"zeros",
":",
"if",
"newsize",
">",
"origsize",
":",
"self",
".",
"data",
"[",
"origsize",
":",
"]",
"=",
"0",
"self",
".",
"style",
"[",
"origsize",
":",
"]",
"=",
"0",
"return",
"origsize",
",",
"newsize"
] |
Resize the data arrays.
This can only be performed on the container segment. Child segments
must adjust their rawdata to point to the correct place.
Since segments don't keep references to other segments, it is the
user's responsibility to update any child segments that point to this
segment's data.
Numpy can't do an in-place resize on an array that has a view, so the
data must be replaced and all segments that point to that raw data must
also be changed. This has to happen outside this method because it
doesn't know the segment list of segments using itself as a base.
|
[
"Resize",
"the",
"data",
"arrays",
"."
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L430-L458
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
DefaultSegment.restore_renamed_serializable_attributes
|
def restore_renamed_serializable_attributes(self):
"""Hook for the future if attributes have been renamed. The old
attribute names will have been restored in the __dict__.update in
__setstate__, so this routine should move attribute values to their new
names.
"""
if hasattr(self, 'start_addr'):
self.origin = self.start_addr
log.debug(f"moving start_addr to origin: {self.start_addr}")
delattr(self, 'start_addr')
|
python
|
def restore_renamed_serializable_attributes(self):
"""Hook for the future if attributes have been renamed. The old
attribute names will have been restored in the __dict__.update in
__setstate__, so this routine should move attribute values to their new
names.
"""
if hasattr(self, 'start_addr'):
self.origin = self.start_addr
log.debug(f"moving start_addr to origin: {self.start_addr}")
delattr(self, 'start_addr')
|
[
"def",
"restore_renamed_serializable_attributes",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'start_addr'",
")",
":",
"self",
".",
"origin",
"=",
"self",
".",
"start_addr",
"log",
".",
"debug",
"(",
"f\"moving start_addr to origin: {self.start_addr}\"",
")",
"delattr",
"(",
"self",
",",
"'start_addr'",
")"
] |
Hook for the future if attributes have been renamed. The old
attribute names will have been restored in the __dict__.update in
__setstate__, so this routine should move attribute values to their new
names.
|
[
"Hook",
"for",
"the",
"future",
"if",
"attributes",
"have",
"been",
"renamed",
".",
"The",
"old",
"attribute",
"names",
"will",
"have",
"been",
"restored",
"in",
"the",
"__dict__",
".",
"update",
"in",
"__setstate__",
"so",
"this",
"routine",
"should",
"move",
"attribute",
"values",
"to",
"their",
"new",
"names",
"."
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L518-L527
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
DefaultSegment.reconstruct_raw
|
def reconstruct_raw(self, rawdata):
"""Reconstruct the pointers to the parent data arrays
Each segment is a view into the primary segment's data, so those
pointers and the order must be restored in the child segments.
"""
start, end = self._rawdata_bounds
r = rawdata[start:end]
delattr(self, '_rawdata_bounds')
try:
if self._order_list:
order = to_numpy_list(self._order_list)
r = r.get_indexed(order)
delattr(self, '_order_list')
except AttributeError:
pass
self.set_raw(r)
|
python
|
def reconstruct_raw(self, rawdata):
"""Reconstruct the pointers to the parent data arrays
Each segment is a view into the primary segment's data, so those
pointers and the order must be restored in the child segments.
"""
start, end = self._rawdata_bounds
r = rawdata[start:end]
delattr(self, '_rawdata_bounds')
try:
if self._order_list:
order = to_numpy_list(self._order_list)
r = r.get_indexed(order)
delattr(self, '_order_list')
except AttributeError:
pass
self.set_raw(r)
|
[
"def",
"reconstruct_raw",
"(",
"self",
",",
"rawdata",
")",
":",
"start",
",",
"end",
"=",
"self",
".",
"_rawdata_bounds",
"r",
"=",
"rawdata",
"[",
"start",
":",
"end",
"]",
"delattr",
"(",
"self",
",",
"'_rawdata_bounds'",
")",
"try",
":",
"if",
"self",
".",
"_order_list",
":",
"order",
"=",
"to_numpy_list",
"(",
"self",
".",
"_order_list",
")",
"r",
"=",
"r",
".",
"get_indexed",
"(",
"order",
")",
"delattr",
"(",
"self",
",",
"'_order_list'",
")",
"except",
"AttributeError",
":",
"pass",
"self",
".",
"set_raw",
"(",
"r",
")"
] |
Reconstruct the pointers to the parent data arrays
Each segment is a view into the primary segment's data, so those
pointers and the order must be restored in the child segments.
|
[
"Reconstruct",
"the",
"pointers",
"to",
"the",
"parent",
"data",
"arrays"
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L529-L545
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
DefaultSegment.get_parallel_raw_data
|
def get_parallel_raw_data(self, other):
""" Get the raw data that is similar to the specified other segment
"""
start, end = other.byte_bounds_offset()
r = self.rawdata[start:end]
if other.rawdata.is_indexed:
r = r.get_indexed[other.order]
return r
|
python
|
def get_parallel_raw_data(self, other):
""" Get the raw data that is similar to the specified other segment
"""
start, end = other.byte_bounds_offset()
r = self.rawdata[start:end]
if other.rawdata.is_indexed:
r = r.get_indexed[other.order]
return r
|
[
"def",
"get_parallel_raw_data",
"(",
"self",
",",
"other",
")",
":",
"start",
",",
"end",
"=",
"other",
".",
"byte_bounds_offset",
"(",
")",
"r",
"=",
"self",
".",
"rawdata",
"[",
"start",
":",
"end",
"]",
"if",
"other",
".",
"rawdata",
".",
"is_indexed",
":",
"r",
"=",
"r",
".",
"get_indexed",
"[",
"other",
".",
"order",
"]",
"return",
"r"
] |
Get the raw data that is similar to the specified other segment
|
[
"Get",
"the",
"raw",
"data",
"that",
"is",
"similar",
"to",
"the",
"specified",
"other",
"segment"
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L547-L554
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
DefaultSegment.serialize_session
|
def serialize_session(self, mdict):
"""Save extra metadata to a dict so that it can be serialized
This is not saved by __getstate__ because child segments will point to
the same data and this allows it to only be saved for the base segment.
As well as allowing it to be pulled out of the main json so that it can
be more easily edited by hand if desired.
"""
mdict["comment ranges"] = [list(a) for a in self.get_style_ranges(comment=True)]
mdict["data ranges"] = [list(a) for a in self.get_style_ranges(data=True)]
for i in range(1, user_bit_mask):
r = [list(a) for a in self.get_style_ranges(user=i)]
if r:
slot = "user style %d" % i
mdict[slot] = r
# json serialization doesn't allow int keys, so convert to list of
# pairs
mdict["comments"] = self.get_sorted_comments()
|
python
|
def serialize_session(self, mdict):
"""Save extra metadata to a dict so that it can be serialized
This is not saved by __getstate__ because child segments will point to
the same data and this allows it to only be saved for the base segment.
As well as allowing it to be pulled out of the main json so that it can
be more easily edited by hand if desired.
"""
mdict["comment ranges"] = [list(a) for a in self.get_style_ranges(comment=True)]
mdict["data ranges"] = [list(a) for a in self.get_style_ranges(data=True)]
for i in range(1, user_bit_mask):
r = [list(a) for a in self.get_style_ranges(user=i)]
if r:
slot = "user style %d" % i
mdict[slot] = r
# json serialization doesn't allow int keys, so convert to list of
# pairs
mdict["comments"] = self.get_sorted_comments()
|
[
"def",
"serialize_session",
"(",
"self",
",",
"mdict",
")",
":",
"mdict",
"[",
"\"comment ranges\"",
"]",
"=",
"[",
"list",
"(",
"a",
")",
"for",
"a",
"in",
"self",
".",
"get_style_ranges",
"(",
"comment",
"=",
"True",
")",
"]",
"mdict",
"[",
"\"data ranges\"",
"]",
"=",
"[",
"list",
"(",
"a",
")",
"for",
"a",
"in",
"self",
".",
"get_style_ranges",
"(",
"data",
"=",
"True",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"user_bit_mask",
")",
":",
"r",
"=",
"[",
"list",
"(",
"a",
")",
"for",
"a",
"in",
"self",
".",
"get_style_ranges",
"(",
"user",
"=",
"i",
")",
"]",
"if",
"r",
":",
"slot",
"=",
"\"user style %d\"",
"%",
"i",
"mdict",
"[",
"slot",
"]",
"=",
"r",
"# json serialization doesn't allow int keys, so convert to list of",
"# pairs",
"mdict",
"[",
"\"comments\"",
"]",
"=",
"self",
".",
"get_sorted_comments",
"(",
")"
] |
Save extra metadata to a dict so that it can be serialized
This is not saved by __getstate__ because child segments will point to
the same data and this allows it to only be saved for the base segment.
As well as allowing it to be pulled out of the main json so that it can
be more easily edited by hand if desired.
|
[
"Save",
"extra",
"metadata",
"to",
"a",
"dict",
"so",
"that",
"it",
"can",
"be",
"serialized"
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L556-L574
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
DefaultSegment.get_index_from_base_index
|
def get_index_from_base_index(self, base_index):
"""Get index into this array's data given the index into the base array
"""
r = self.rawdata
try:
index = r.get_reverse_index(base_index)
except IndexError:
raise IndexError("index %d not in this segment" % base_index)
if index < 0:
raise IndexError("index %d not in this segment" % base_index)
return int(index)
|
python
|
def get_index_from_base_index(self, base_index):
"""Get index into this array's data given the index into the base array
"""
r = self.rawdata
try:
index = r.get_reverse_index(base_index)
except IndexError:
raise IndexError("index %d not in this segment" % base_index)
if index < 0:
raise IndexError("index %d not in this segment" % base_index)
return int(index)
|
[
"def",
"get_index_from_base_index",
"(",
"self",
",",
"base_index",
")",
":",
"r",
"=",
"self",
".",
"rawdata",
"try",
":",
"index",
"=",
"r",
".",
"get_reverse_index",
"(",
"base_index",
")",
"except",
"IndexError",
":",
"raise",
"IndexError",
"(",
"\"index %d not in this segment\"",
"%",
"base_index",
")",
"if",
"index",
"<",
"0",
":",
"raise",
"IndexError",
"(",
"\"index %d not in this segment\"",
"%",
"base_index",
")",
"return",
"int",
"(",
"index",
")"
] |
Get index into this array's data given the index into the base array
|
[
"Get",
"index",
"into",
"this",
"array",
"s",
"data",
"given",
"the",
"index",
"into",
"the",
"base",
"array"
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L652-L662
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
DefaultSegment.get_style_ranges
|
def get_style_ranges(self, **kwargs):
"""Return a list of start, end pairs that match the specified style
"""
style_bits = self.get_style_bits(**kwargs)
matches = (self.style & style_bits) == style_bits
return self.bool_to_ranges(matches)
|
python
|
def get_style_ranges(self, **kwargs):
"""Return a list of start, end pairs that match the specified style
"""
style_bits = self.get_style_bits(**kwargs)
matches = (self.style & style_bits) == style_bits
return self.bool_to_ranges(matches)
|
[
"def",
"get_style_ranges",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"style_bits",
"=",
"self",
".",
"get_style_bits",
"(",
"*",
"*",
"kwargs",
")",
"matches",
"=",
"(",
"self",
".",
"style",
"&",
"style_bits",
")",
"==",
"style_bits",
"return",
"self",
".",
"bool_to_ranges",
"(",
"matches",
")"
] |
Return a list of start, end pairs that match the specified style
|
[
"Return",
"a",
"list",
"of",
"start",
"end",
"pairs",
"that",
"match",
"the",
"specified",
"style"
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L689-L694
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
DefaultSegment.fixup_comments
|
def fixup_comments(self):
"""Remove any style bytes that are marked as commented but have no
comment, and add any style bytes where there's a comment but it isn't
marked in the style data.
This happens on the base data, so only need to do this on one segment
that uses this base data.
"""
style_base = self.rawdata.style_base
comment_text_indexes = np.asarray(list(self.rawdata.extra.comments.keys()), dtype=np.uint32)
comment_mask = self.get_style_mask(comment=True)
has_comments = np.where(style_base & comment_bit_mask > 0)[0]
both = np.intersect1d(comment_text_indexes, has_comments)
log.info("fixup comments: %d correctly marked, %d without style, %d empty text" % (np.alen(both), np.alen(comment_text_indexes) - np.alen(both), np.alen(has_comments) - np.alen(both)))
style_base &= comment_mask
comment_style = self.get_style_bits(comment=True)
style_base[comment_text_indexes] |= comment_style
|
python
|
def fixup_comments(self):
"""Remove any style bytes that are marked as commented but have no
comment, and add any style bytes where there's a comment but it isn't
marked in the style data.
This happens on the base data, so only need to do this on one segment
that uses this base data.
"""
style_base = self.rawdata.style_base
comment_text_indexes = np.asarray(list(self.rawdata.extra.comments.keys()), dtype=np.uint32)
comment_mask = self.get_style_mask(comment=True)
has_comments = np.where(style_base & comment_bit_mask > 0)[0]
both = np.intersect1d(comment_text_indexes, has_comments)
log.info("fixup comments: %d correctly marked, %d without style, %d empty text" % (np.alen(both), np.alen(comment_text_indexes) - np.alen(both), np.alen(has_comments) - np.alen(both)))
style_base &= comment_mask
comment_style = self.get_style_bits(comment=True)
style_base[comment_text_indexes] |= comment_style
|
[
"def",
"fixup_comments",
"(",
"self",
")",
":",
"style_base",
"=",
"self",
".",
"rawdata",
".",
"style_base",
"comment_text_indexes",
"=",
"np",
".",
"asarray",
"(",
"list",
"(",
"self",
".",
"rawdata",
".",
"extra",
".",
"comments",
".",
"keys",
"(",
")",
")",
",",
"dtype",
"=",
"np",
".",
"uint32",
")",
"comment_mask",
"=",
"self",
".",
"get_style_mask",
"(",
"comment",
"=",
"True",
")",
"has_comments",
"=",
"np",
".",
"where",
"(",
"style_base",
"&",
"comment_bit_mask",
">",
"0",
")",
"[",
"0",
"]",
"both",
"=",
"np",
".",
"intersect1d",
"(",
"comment_text_indexes",
",",
"has_comments",
")",
"log",
".",
"info",
"(",
"\"fixup comments: %d correctly marked, %d without style, %d empty text\"",
"%",
"(",
"np",
".",
"alen",
"(",
"both",
")",
",",
"np",
".",
"alen",
"(",
"comment_text_indexes",
")",
"-",
"np",
".",
"alen",
"(",
"both",
")",
",",
"np",
".",
"alen",
"(",
"has_comments",
")",
"-",
"np",
".",
"alen",
"(",
"both",
")",
")",
")",
"style_base",
"&=",
"comment_mask",
"comment_style",
"=",
"self",
".",
"get_style_bits",
"(",
"comment",
"=",
"True",
")",
"style_base",
"[",
"comment_text_indexes",
"]",
"|=",
"comment_style"
] |
Remove any style bytes that are marked as commented but have no
comment, and add any style bytes where there's a comment but it isn't
marked in the style data.
This happens on the base data, so only need to do this on one segment
that uses this base data.
|
[
"Remove",
"any",
"style",
"bytes",
"that",
"are",
"marked",
"as",
"commented",
"but",
"have",
"no",
"comment",
"and",
"add",
"any",
"style",
"bytes",
"where",
"there",
"s",
"a",
"comment",
"but",
"it",
"isn",
"t",
"marked",
"in",
"the",
"style",
"data",
"."
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L696-L712
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
DefaultSegment.get_entire_style_ranges
|
def get_entire_style_ranges(self, split_comments=None, **kwargs):
"""Find sections of the segment that have the same style value.
The arguments to this function are used as a mask for the style to
determine where to split the styles. Style bits that aren't included in
the list will be ignored when splitting. The returned list covers the
entire length of the segment.
Returns a list of tuples, each tuple containing two items: a start, end
tuple; and an integer with the style value.
"""
style_bits = self.get_style_bits(**kwargs)
matches = self.get_comment_locations(**kwargs)
groups = np.split(matches, np.where(np.diff(matches) != 0)[0] + 1)
if split_comments is None:
split_comments = []
# print groups
# split into groups with the same numbers
ranges = []
last_end = 0
if len(groups) == 1 and len(groups[0]) == 0:
# check for degenerate case
return
last_style = -1
for group in groups:
# each group is guaranteed to have the same style
size = len(group)
next_end = last_end + size
style = matches[last_end]
masked_style = style & style_bits
# print last_end, next_end, style, masked_style, size, group
if style & comment_bit_mask:
if masked_style in split_comments:
# print "interesting comment", last_end, next_end
ranges.append(((last_end, next_end), masked_style))
else:
# print "non-interesting comment", last_end, next_end
if last_style == masked_style:
((prev_end, _), _) = ranges.pop()
ranges.append(((prev_end, next_end), masked_style))
else:
ranges.append(((last_end, next_end), masked_style))
else:
if last_style == masked_style:
((prev_end, _), _) = ranges.pop()
ranges.append(((prev_end, next_end), masked_style))
else:
ranges.append(((last_end, next_end), masked_style))
last_style = masked_style
last_end = next_end
return ranges
|
python
|
def get_entire_style_ranges(self, split_comments=None, **kwargs):
"""Find sections of the segment that have the same style value.
The arguments to this function are used as a mask for the style to
determine where to split the styles. Style bits that aren't included in
the list will be ignored when splitting. The returned list covers the
entire length of the segment.
Returns a list of tuples, each tuple containing two items: a start, end
tuple; and an integer with the style value.
"""
style_bits = self.get_style_bits(**kwargs)
matches = self.get_comment_locations(**kwargs)
groups = np.split(matches, np.where(np.diff(matches) != 0)[0] + 1)
if split_comments is None:
split_comments = []
# print groups
# split into groups with the same numbers
ranges = []
last_end = 0
if len(groups) == 1 and len(groups[0]) == 0:
# check for degenerate case
return
last_style = -1
for group in groups:
# each group is guaranteed to have the same style
size = len(group)
next_end = last_end + size
style = matches[last_end]
masked_style = style & style_bits
# print last_end, next_end, style, masked_style, size, group
if style & comment_bit_mask:
if masked_style in split_comments:
# print "interesting comment", last_end, next_end
ranges.append(((last_end, next_end), masked_style))
else:
# print "non-interesting comment", last_end, next_end
if last_style == masked_style:
((prev_end, _), _) = ranges.pop()
ranges.append(((prev_end, next_end), masked_style))
else:
ranges.append(((last_end, next_end), masked_style))
else:
if last_style == masked_style:
((prev_end, _), _) = ranges.pop()
ranges.append(((prev_end, next_end), masked_style))
else:
ranges.append(((last_end, next_end), masked_style))
last_style = masked_style
last_end = next_end
return ranges
|
[
"def",
"get_entire_style_ranges",
"(",
"self",
",",
"split_comments",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"style_bits",
"=",
"self",
".",
"get_style_bits",
"(",
"*",
"*",
"kwargs",
")",
"matches",
"=",
"self",
".",
"get_comment_locations",
"(",
"*",
"*",
"kwargs",
")",
"groups",
"=",
"np",
".",
"split",
"(",
"matches",
",",
"np",
".",
"where",
"(",
"np",
".",
"diff",
"(",
"matches",
")",
"!=",
"0",
")",
"[",
"0",
"]",
"+",
"1",
")",
"if",
"split_comments",
"is",
"None",
":",
"split_comments",
"=",
"[",
"]",
"# print groups",
"# split into groups with the same numbers",
"ranges",
"=",
"[",
"]",
"last_end",
"=",
"0",
"if",
"len",
"(",
"groups",
")",
"==",
"1",
"and",
"len",
"(",
"groups",
"[",
"0",
"]",
")",
"==",
"0",
":",
"# check for degenerate case",
"return",
"last_style",
"=",
"-",
"1",
"for",
"group",
"in",
"groups",
":",
"# each group is guaranteed to have the same style",
"size",
"=",
"len",
"(",
"group",
")",
"next_end",
"=",
"last_end",
"+",
"size",
"style",
"=",
"matches",
"[",
"last_end",
"]",
"masked_style",
"=",
"style",
"&",
"style_bits",
"# print last_end, next_end, style, masked_style, size, group",
"if",
"style",
"&",
"comment_bit_mask",
":",
"if",
"masked_style",
"in",
"split_comments",
":",
"# print \"interesting comment\", last_end, next_end",
"ranges",
".",
"append",
"(",
"(",
"(",
"last_end",
",",
"next_end",
")",
",",
"masked_style",
")",
")",
"else",
":",
"# print \"non-interesting comment\", last_end, next_end",
"if",
"last_style",
"==",
"masked_style",
":",
"(",
"(",
"prev_end",
",",
"_",
")",
",",
"_",
")",
"=",
"ranges",
".",
"pop",
"(",
")",
"ranges",
".",
"append",
"(",
"(",
"(",
"prev_end",
",",
"next_end",
")",
",",
"masked_style",
")",
")",
"else",
":",
"ranges",
".",
"append",
"(",
"(",
"(",
"last_end",
",",
"next_end",
")",
",",
"masked_style",
")",
")",
"else",
":",
"if",
"last_style",
"==",
"masked_style",
":",
"(",
"(",
"prev_end",
",",
"_",
")",
",",
"_",
")",
"=",
"ranges",
".",
"pop",
"(",
")",
"ranges",
".",
"append",
"(",
"(",
"(",
"prev_end",
",",
"next_end",
")",
",",
"masked_style",
")",
")",
"else",
":",
"ranges",
".",
"append",
"(",
"(",
"(",
"last_end",
",",
"next_end",
")",
",",
"masked_style",
")",
")",
"last_style",
"=",
"masked_style",
"last_end",
"=",
"next_end",
"return",
"ranges"
] |
Find sections of the segment that have the same style value.
The arguments to this function are used as a mask for the style to
determine where to split the styles. Style bits that aren't included in
the list will be ignored when splitting. The returned list covers the
entire length of the segment.
Returns a list of tuples, each tuple containing two items: a start, end
tuple; and an integer with the style value.
|
[
"Find",
"sections",
"of",
"the",
"segment",
"that",
"have",
"the",
"same",
"style",
"value",
"."
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L725-L775
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
DefaultSegment.get_comments_at_indexes
|
def get_comments_at_indexes(self, indexes):
"""Get a list of comments at specified indexes"""
s = self.style[indexes]
has_comments = np.where(s & comment_bit_mask > 0)[0]
comments = []
for where_index in has_comments:
raw = self.get_raw_index(indexes[where_index])
try:
comment = self.rawdata.extra.comments[raw]
except KeyError:
comment = None
comments.append(comment)
return has_comments, comments
|
python
|
def get_comments_at_indexes(self, indexes):
"""Get a list of comments at specified indexes"""
s = self.style[indexes]
has_comments = np.where(s & comment_bit_mask > 0)[0]
comments = []
for where_index in has_comments:
raw = self.get_raw_index(indexes[where_index])
try:
comment = self.rawdata.extra.comments[raw]
except KeyError:
comment = None
comments.append(comment)
return has_comments, comments
|
[
"def",
"get_comments_at_indexes",
"(",
"self",
",",
"indexes",
")",
":",
"s",
"=",
"self",
".",
"style",
"[",
"indexes",
"]",
"has_comments",
"=",
"np",
".",
"where",
"(",
"s",
"&",
"comment_bit_mask",
">",
"0",
")",
"[",
"0",
"]",
"comments",
"=",
"[",
"]",
"for",
"where_index",
"in",
"has_comments",
":",
"raw",
"=",
"self",
".",
"get_raw_index",
"(",
"indexes",
"[",
"where_index",
"]",
")",
"try",
":",
"comment",
"=",
"self",
".",
"rawdata",
".",
"extra",
".",
"comments",
"[",
"raw",
"]",
"except",
"KeyError",
":",
"comment",
"=",
"None",
"comments",
".",
"append",
"(",
"comment",
")",
"return",
"has_comments",
",",
"comments"
] |
Get a list of comments at specified indexes
|
[
"Get",
"a",
"list",
"of",
"comments",
"at",
"specified",
"indexes"
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L947-L959
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
DefaultSegment.get_comment_restore_data
|
def get_comment_restore_data(self, ranges):
"""Get a chunk of data (designed to be opaque) containing comments,
styles & locations that can be used to recreate the comments on an undo
"""
restore_data = []
for start, end in ranges:
log.debug("range: %d-%d" % (start, end))
styles = self.style[start:end].copy()
items = {}
for i in range(start, end):
rawindex = self.get_raw_index(i)
try:
comment = self.rawdata.extra.comments[rawindex]
log.debug(" index: %d rawindex=%d '%s'" % (i, rawindex, comment))
items[i] = (rawindex, comment)
except KeyError:
log.debug(" index: %d rawindex=%d NO COMMENT TO SAVE" % (i, rawindex))
items[i] = (rawindex, None)
restore_data.append((start, end, styles, items))
return restore_data
|
python
|
def get_comment_restore_data(self, ranges):
"""Get a chunk of data (designed to be opaque) containing comments,
styles & locations that can be used to recreate the comments on an undo
"""
restore_data = []
for start, end in ranges:
log.debug("range: %d-%d" % (start, end))
styles = self.style[start:end].copy()
items = {}
for i in range(start, end):
rawindex = self.get_raw_index(i)
try:
comment = self.rawdata.extra.comments[rawindex]
log.debug(" index: %d rawindex=%d '%s'" % (i, rawindex, comment))
items[i] = (rawindex, comment)
except KeyError:
log.debug(" index: %d rawindex=%d NO COMMENT TO SAVE" % (i, rawindex))
items[i] = (rawindex, None)
restore_data.append((start, end, styles, items))
return restore_data
|
[
"def",
"get_comment_restore_data",
"(",
"self",
",",
"ranges",
")",
":",
"restore_data",
"=",
"[",
"]",
"for",
"start",
",",
"end",
"in",
"ranges",
":",
"log",
".",
"debug",
"(",
"\"range: %d-%d\"",
"%",
"(",
"start",
",",
"end",
")",
")",
"styles",
"=",
"self",
".",
"style",
"[",
"start",
":",
"end",
"]",
".",
"copy",
"(",
")",
"items",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"start",
",",
"end",
")",
":",
"rawindex",
"=",
"self",
".",
"get_raw_index",
"(",
"i",
")",
"try",
":",
"comment",
"=",
"self",
".",
"rawdata",
".",
"extra",
".",
"comments",
"[",
"rawindex",
"]",
"log",
".",
"debug",
"(",
"\" index: %d rawindex=%d '%s'\"",
"%",
"(",
"i",
",",
"rawindex",
",",
"comment",
")",
")",
"items",
"[",
"i",
"]",
"=",
"(",
"rawindex",
",",
"comment",
")",
"except",
"KeyError",
":",
"log",
".",
"debug",
"(",
"\" index: %d rawindex=%d NO COMMENT TO SAVE\"",
"%",
"(",
"i",
",",
"rawindex",
")",
")",
"items",
"[",
"i",
"]",
"=",
"(",
"rawindex",
",",
"None",
")",
"restore_data",
".",
"append",
"(",
"(",
"start",
",",
"end",
",",
"styles",
",",
"items",
")",
")",
"return",
"restore_data"
] |
Get a chunk of data (designed to be opaque) containing comments,
styles & locations that can be used to recreate the comments on an undo
|
[
"Get",
"a",
"chunk",
"of",
"data",
"(",
"designed",
"to",
"be",
"opaque",
")",
"containing",
"comments",
"styles",
"&",
"locations",
"that",
"can",
"be",
"used",
"to",
"recreate",
"the",
"comments",
"on",
"an",
"undo"
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L961-L981
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
DefaultSegment.restore_comments
|
def restore_comments(self, restore_data):
"""Restore comment styles and data
"""
for start, end, styles, items in restore_data:
log.debug("range: %d-%d" % (start, end))
self.style[start:end] = styles
for i in range(start, end):
rawindex, comment = items[i]
if comment:
log.debug(" restoring comment: rawindex=%d, '%s'" % (rawindex, comment))
self.rawdata.extra.comments[rawindex] = comment
else:
# no comment in original data, remove any if exists
try:
del self.rawdata.extra.comments[rawindex]
log.debug(" no comment in original data, removed comment in current data at rawindex=%d" % rawindex)
except KeyError:
log.debug(" no comment in original data or current data at rawindex=%d" % rawindex)
pass
|
python
|
def restore_comments(self, restore_data):
"""Restore comment styles and data
"""
for start, end, styles, items in restore_data:
log.debug("range: %d-%d" % (start, end))
self.style[start:end] = styles
for i in range(start, end):
rawindex, comment = items[i]
if comment:
log.debug(" restoring comment: rawindex=%d, '%s'" % (rawindex, comment))
self.rawdata.extra.comments[rawindex] = comment
else:
# no comment in original data, remove any if exists
try:
del self.rawdata.extra.comments[rawindex]
log.debug(" no comment in original data, removed comment in current data at rawindex=%d" % rawindex)
except KeyError:
log.debug(" no comment in original data or current data at rawindex=%d" % rawindex)
pass
|
[
"def",
"restore_comments",
"(",
"self",
",",
"restore_data",
")",
":",
"for",
"start",
",",
"end",
",",
"styles",
",",
"items",
"in",
"restore_data",
":",
"log",
".",
"debug",
"(",
"\"range: %d-%d\"",
"%",
"(",
"start",
",",
"end",
")",
")",
"self",
".",
"style",
"[",
"start",
":",
"end",
"]",
"=",
"styles",
"for",
"i",
"in",
"range",
"(",
"start",
",",
"end",
")",
":",
"rawindex",
",",
"comment",
"=",
"items",
"[",
"i",
"]",
"if",
"comment",
":",
"log",
".",
"debug",
"(",
"\" restoring comment: rawindex=%d, '%s'\"",
"%",
"(",
"rawindex",
",",
"comment",
")",
")",
"self",
".",
"rawdata",
".",
"extra",
".",
"comments",
"[",
"rawindex",
"]",
"=",
"comment",
"else",
":",
"# no comment in original data, remove any if exists",
"try",
":",
"del",
"self",
".",
"rawdata",
".",
"extra",
".",
"comments",
"[",
"rawindex",
"]",
"log",
".",
"debug",
"(",
"\" no comment in original data, removed comment in current data at rawindex=%d\"",
"%",
"rawindex",
")",
"except",
"KeyError",
":",
"log",
".",
"debug",
"(",
"\" no comment in original data or current data at rawindex=%d\"",
"%",
"rawindex",
")",
"pass"
] |
Restore comment styles and data
|
[
"Restore",
"comment",
"styles",
"and",
"data"
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L983-L1001
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
DefaultSegment.get_comments_in_range
|
def get_comments_in_range(self, start, end):
"""Get a list of comments at specified indexes"""
comments = {}
# Naive way, but maybe it's fast enough: loop over all comments
# gathering those within the bounds
for rawindex, comment in self.rawdata.extra.comments.items():
try:
index = self.get_index_from_base_index(rawindex)
except IndexError:
continue
if index >= start and index < end:
comments[index] = comment
return comments
|
python
|
def get_comments_in_range(self, start, end):
"""Get a list of comments at specified indexes"""
comments = {}
# Naive way, but maybe it's fast enough: loop over all comments
# gathering those within the bounds
for rawindex, comment in self.rawdata.extra.comments.items():
try:
index = self.get_index_from_base_index(rawindex)
except IndexError:
continue
if index >= start and index < end:
comments[index] = comment
return comments
|
[
"def",
"get_comments_in_range",
"(",
"self",
",",
"start",
",",
"end",
")",
":",
"comments",
"=",
"{",
"}",
"# Naive way, but maybe it's fast enough: loop over all comments",
"# gathering those within the bounds",
"for",
"rawindex",
",",
"comment",
"in",
"self",
".",
"rawdata",
".",
"extra",
".",
"comments",
".",
"items",
"(",
")",
":",
"try",
":",
"index",
"=",
"self",
".",
"get_index_from_base_index",
"(",
"rawindex",
")",
"except",
"IndexError",
":",
"continue",
"if",
"index",
">=",
"start",
"and",
"index",
"<",
"end",
":",
"comments",
"[",
"index",
"]",
"=",
"comment",
"return",
"comments"
] |
Get a list of comments at specified indexes
|
[
"Get",
"a",
"list",
"of",
"comments",
"at",
"specified",
"indexes"
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L1003-L1016
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
DefaultSegment.copy_user_data
|
def copy_user_data(self, source, index_offset=0):
"""Copy comments and other user data from the source segment to this
segment.
The index offset is the offset into self based on the index of source.
"""
for index, comment in source.iter_comments_in_segment():
self.set_comment_at(index + index_offset, comment)
|
python
|
def copy_user_data(self, source, index_offset=0):
"""Copy comments and other user data from the source segment to this
segment.
The index offset is the offset into self based on the index of source.
"""
for index, comment in source.iter_comments_in_segment():
self.set_comment_at(index + index_offset, comment)
|
[
"def",
"copy_user_data",
"(",
"self",
",",
"source",
",",
"index_offset",
"=",
"0",
")",
":",
"for",
"index",
",",
"comment",
"in",
"source",
".",
"iter_comments_in_segment",
"(",
")",
":",
"self",
".",
"set_comment_at",
"(",
"index",
"+",
"index_offset",
",",
"comment",
")"
] |
Copy comments and other user data from the source segment to this
segment.
The index offset is the offset into self based on the index of source.
|
[
"Copy",
"comments",
"and",
"other",
"user",
"data",
"from",
"the",
"source",
"segment",
"to",
"this",
"segment",
"."
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L1062-L1069
|
openstax/cnx-archive
|
cnxarchive/views/xpath.py
|
xpath_book
|
def xpath_book(request, uuid, version, return_json=True):
"""
Given a request, book UUID and version:
returns a JSON object or HTML list of results, each result containing:
module_name,
module_uuid,
xpath_results, an array of strings, each an individual xpath result.
"""
xpath_string = request.params.get('q')
results = execute_xpath(xpath_string, 'xpath', uuid, version)
if return_json:
return results
else:
return xpath_book_html(request, results)
|
python
|
def xpath_book(request, uuid, version, return_json=True):
"""
Given a request, book UUID and version:
returns a JSON object or HTML list of results, each result containing:
module_name,
module_uuid,
xpath_results, an array of strings, each an individual xpath result.
"""
xpath_string = request.params.get('q')
results = execute_xpath(xpath_string, 'xpath', uuid, version)
if return_json:
return results
else:
return xpath_book_html(request, results)
|
[
"def",
"xpath_book",
"(",
"request",
",",
"uuid",
",",
"version",
",",
"return_json",
"=",
"True",
")",
":",
"xpath_string",
"=",
"request",
".",
"params",
".",
"get",
"(",
"'q'",
")",
"results",
"=",
"execute_xpath",
"(",
"xpath_string",
",",
"'xpath'",
",",
"uuid",
",",
"version",
")",
"if",
"return_json",
":",
"return",
"results",
"else",
":",
"return",
"xpath_book_html",
"(",
"request",
",",
"results",
")"
] |
Given a request, book UUID and version:
returns a JSON object or HTML list of results, each result containing:
module_name,
module_uuid,
xpath_results, an array of strings, each an individual xpath result.
|
[
"Given",
"a",
"request",
"book",
"UUID",
"and",
"version",
":"
] |
train
|
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/xpath.py#L44-L59
|
openstax/cnx-archive
|
cnxarchive/views/xpath.py
|
xpath_page
|
def xpath_page(request, uuid, version):
"""Given a page UUID (and optional version), returns a JSON object of
results, as in xpath_book()"""
xpath_string = request.params.get('q')
return execute_xpath(xpath_string, 'xpath-module', uuid, version)
|
python
|
def xpath_page(request, uuid, version):
"""Given a page UUID (and optional version), returns a JSON object of
results, as in xpath_book()"""
xpath_string = request.params.get('q')
return execute_xpath(xpath_string, 'xpath-module', uuid, version)
|
[
"def",
"xpath_page",
"(",
"request",
",",
"uuid",
",",
"version",
")",
":",
"xpath_string",
"=",
"request",
".",
"params",
".",
"get",
"(",
"'q'",
")",
"return",
"execute_xpath",
"(",
"xpath_string",
",",
"'xpath-module'",
",",
"uuid",
",",
"version",
")"
] |
Given a page UUID (and optional version), returns a JSON object of
results, as in xpath_book()
|
[
"Given",
"a",
"page",
"UUID",
"(",
"and",
"optional",
"version",
")",
"returns",
"a",
"JSON",
"object",
"of",
"results",
"as",
"in",
"xpath_book",
"()"
] |
train
|
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/xpath.py#L116-L120
|
openstax/cnx-archive
|
cnxarchive/views/xpath.py
|
execute_xpath
|
def execute_xpath(xpath_string, sql_function, uuid, version):
"""Executes either xpath or xpath-module SQL function with given input
params."""
settings = get_current_registry().settings
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
try:
cursor.execute(SQL[sql_function],
{'document_uuid': uuid,
'document_version': version,
'xpath_string': xpath_string})
except psycopg2.Error as e:
exc = httpexceptions.HTTPBadRequest()
exc.explanation = e.pgerror
raise exc
for res in cursor.fetchall():
yield {'name': res[0],
'uuid': res[1],
'version': res[2],
'xpath_results': res[3]}
|
python
|
def execute_xpath(xpath_string, sql_function, uuid, version):
"""Executes either xpath or xpath-module SQL function with given input
params."""
settings = get_current_registry().settings
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
try:
cursor.execute(SQL[sql_function],
{'document_uuid': uuid,
'document_version': version,
'xpath_string': xpath_string})
except psycopg2.Error as e:
exc = httpexceptions.HTTPBadRequest()
exc.explanation = e.pgerror
raise exc
for res in cursor.fetchall():
yield {'name': res[0],
'uuid': res[1],
'version': res[2],
'xpath_results': res[3]}
|
[
"def",
"execute_xpath",
"(",
"xpath_string",
",",
"sql_function",
",",
"uuid",
",",
"version",
")",
":",
"settings",
"=",
"get_current_registry",
"(",
")",
".",
"settings",
"with",
"db_connect",
"(",
")",
"as",
"db_connection",
":",
"with",
"db_connection",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"try",
":",
"cursor",
".",
"execute",
"(",
"SQL",
"[",
"sql_function",
"]",
",",
"{",
"'document_uuid'",
":",
"uuid",
",",
"'document_version'",
":",
"version",
",",
"'xpath_string'",
":",
"xpath_string",
"}",
")",
"except",
"psycopg2",
".",
"Error",
"as",
"e",
":",
"exc",
"=",
"httpexceptions",
".",
"HTTPBadRequest",
"(",
")",
"exc",
".",
"explanation",
"=",
"e",
".",
"pgerror",
"raise",
"exc",
"for",
"res",
"in",
"cursor",
".",
"fetchall",
"(",
")",
":",
"yield",
"{",
"'name'",
":",
"res",
"[",
"0",
"]",
",",
"'uuid'",
":",
"res",
"[",
"1",
"]",
",",
"'version'",
":",
"res",
"[",
"2",
"]",
",",
"'xpath_results'",
":",
"res",
"[",
"3",
"]",
"}"
] |
Executes either xpath or xpath-module SQL function with given input
params.
|
[
"Executes",
"either",
"xpath",
"or",
"xpath",
"-",
"module",
"SQL",
"function",
"with",
"given",
"input",
"params",
"."
] |
train
|
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/xpath.py#L123-L144
|
openstax/cnx-archive
|
cnxarchive/views/xpath.py
|
xpath
|
def xpath(request):
"""View for the route. Determines UUID and version from input request
and determines the type of UUID (collection or module) and executes
the corresponding method."""
ident_hash = request.params.get('id')
xpath_string = request.params.get('q')
if not ident_hash or not xpath_string:
exc = httpexceptions.HTTPBadRequest
exc.explanation = 'You must supply both a UUID and an xpath'
raise exc
try:
uuid, version = split_ident_hash(ident_hash)
except IdentHashShortId as e:
uuid = get_uuid(e.id)
version = e.version
except IdentHashMissingVersion as e:
uuid = e.id
version = get_latest_version(e.id)
except IdentHashSyntaxError:
raise httpexceptions.HTTPBadRequest
settings = get_current_registry().settings
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
result = get_content_metadata(uuid, version, cursor)
resp = request.response
if result['mediaType'] == COLLECTION_MIMETYPE:
matched_route = request.matched_route.name
results = xpath_book(request, uuid, version,
return_json=matched_route.endswith('json'))
if matched_route.endswith('json'):
results = {'results': list(results)}
resp.body = json.dumps(results)
resp.content_type = 'application/json'
else:
resp.body = results
resp.content_type = 'application/xhtml+xml'
else:
results = {'results': list(xpath_page(request, uuid, version))}
resp.body = json.dumps(results)
resp.content_type = 'application/json'
resp.status = "200 OK"
return resp
|
python
|
def xpath(request):
"""View for the route. Determines UUID and version from input request
and determines the type of UUID (collection or module) and executes
the corresponding method."""
ident_hash = request.params.get('id')
xpath_string = request.params.get('q')
if not ident_hash or not xpath_string:
exc = httpexceptions.HTTPBadRequest
exc.explanation = 'You must supply both a UUID and an xpath'
raise exc
try:
uuid, version = split_ident_hash(ident_hash)
except IdentHashShortId as e:
uuid = get_uuid(e.id)
version = e.version
except IdentHashMissingVersion as e:
uuid = e.id
version = get_latest_version(e.id)
except IdentHashSyntaxError:
raise httpexceptions.HTTPBadRequest
settings = get_current_registry().settings
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
result = get_content_metadata(uuid, version, cursor)
resp = request.response
if result['mediaType'] == COLLECTION_MIMETYPE:
matched_route = request.matched_route.name
results = xpath_book(request, uuid, version,
return_json=matched_route.endswith('json'))
if matched_route.endswith('json'):
results = {'results': list(results)}
resp.body = json.dumps(results)
resp.content_type = 'application/json'
else:
resp.body = results
resp.content_type = 'application/xhtml+xml'
else:
results = {'results': list(xpath_page(request, uuid, version))}
resp.body = json.dumps(results)
resp.content_type = 'application/json'
resp.status = "200 OK"
return resp
|
[
"def",
"xpath",
"(",
"request",
")",
":",
"ident_hash",
"=",
"request",
".",
"params",
".",
"get",
"(",
"'id'",
")",
"xpath_string",
"=",
"request",
".",
"params",
".",
"get",
"(",
"'q'",
")",
"if",
"not",
"ident_hash",
"or",
"not",
"xpath_string",
":",
"exc",
"=",
"httpexceptions",
".",
"HTTPBadRequest",
"exc",
".",
"explanation",
"=",
"'You must supply both a UUID and an xpath'",
"raise",
"exc",
"try",
":",
"uuid",
",",
"version",
"=",
"split_ident_hash",
"(",
"ident_hash",
")",
"except",
"IdentHashShortId",
"as",
"e",
":",
"uuid",
"=",
"get_uuid",
"(",
"e",
".",
"id",
")",
"version",
"=",
"e",
".",
"version",
"except",
"IdentHashMissingVersion",
"as",
"e",
":",
"uuid",
"=",
"e",
".",
"id",
"version",
"=",
"get_latest_version",
"(",
"e",
".",
"id",
")",
"except",
"IdentHashSyntaxError",
":",
"raise",
"httpexceptions",
".",
"HTTPBadRequest",
"settings",
"=",
"get_current_registry",
"(",
")",
".",
"settings",
"with",
"db_connect",
"(",
")",
"as",
"db_connection",
":",
"with",
"db_connection",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"result",
"=",
"get_content_metadata",
"(",
"uuid",
",",
"version",
",",
"cursor",
")",
"resp",
"=",
"request",
".",
"response",
"if",
"result",
"[",
"'mediaType'",
"]",
"==",
"COLLECTION_MIMETYPE",
":",
"matched_route",
"=",
"request",
".",
"matched_route",
".",
"name",
"results",
"=",
"xpath_book",
"(",
"request",
",",
"uuid",
",",
"version",
",",
"return_json",
"=",
"matched_route",
".",
"endswith",
"(",
"'json'",
")",
")",
"if",
"matched_route",
".",
"endswith",
"(",
"'json'",
")",
":",
"results",
"=",
"{",
"'results'",
":",
"list",
"(",
"results",
")",
"}",
"resp",
".",
"body",
"=",
"json",
".",
"dumps",
"(",
"results",
")",
"resp",
".",
"content_type",
"=",
"'application/json'",
"else",
":",
"resp",
".",
"body",
"=",
"results",
"resp",
".",
"content_type",
"=",
"'application/xhtml+xml'",
"else",
":",
"results",
"=",
"{",
"'results'",
":",
"list",
"(",
"xpath_page",
"(",
"request",
",",
"uuid",
",",
"version",
")",
")",
"}",
"resp",
".",
"body",
"=",
"json",
".",
"dumps",
"(",
"results",
")",
"resp",
".",
"content_type",
"=",
"'application/json'",
"resp",
".",
"status",
"=",
"\"200 OK\"",
"return",
"resp"
] |
View for the route. Determines UUID and version from input request
and determines the type of UUID (collection or module) and executes
the corresponding method.
|
[
"View",
"for",
"the",
"route",
".",
"Determines",
"UUID",
"and",
"version",
"from",
"input",
"request",
"and",
"determines",
"the",
"type",
"of",
"UUID",
"(",
"collection",
"or",
"module",
")",
"and",
"executes",
"the",
"corresponding",
"method",
"."
] |
train
|
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/xpath.py#L156-L203
|
openstax/cnx-archive
|
cnxarchive/views/content.py
|
tree_to_html
|
def tree_to_html(tree):
"""Return html list version of book tree."""
ul = etree.Element('ul')
html_listify([tree], ul)
return HTML_WRAPPER.format(etree.tostring(ul))
|
python
|
def tree_to_html(tree):
"""Return html list version of book tree."""
ul = etree.Element('ul')
html_listify([tree], ul)
return HTML_WRAPPER.format(etree.tostring(ul))
|
[
"def",
"tree_to_html",
"(",
"tree",
")",
":",
"ul",
"=",
"etree",
".",
"Element",
"(",
"'ul'",
")",
"html_listify",
"(",
"[",
"tree",
"]",
",",
"ul",
")",
"return",
"HTML_WRAPPER",
".",
"format",
"(",
"etree",
".",
"tostring",
"(",
"ul",
")",
")"
] |
Return html list version of book tree.
|
[
"Return",
"html",
"list",
"version",
"of",
"book",
"tree",
"."
] |
train
|
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L48-L52
|
openstax/cnx-archive
|
cnxarchive/views/content.py
|
_get_content_json
|
def _get_content_json(ident_hash=None):
"""Return a content as a dict from its ident-hash (uuid@version)."""
request = get_current_request()
routing_args = request and request.matchdict or {}
if not ident_hash:
ident_hash = routing_args['ident_hash']
as_collated = asbool(request.GET.get('as_collated', True))
page_ident_hash = routing_args.get('page_ident_hash', '')
p_id, p_version = (None, None)
if page_ident_hash:
try:
p_id, p_version = split_ident_hash(page_ident_hash)
except IdentHashShortId as e:
p_id = get_uuid(e.id)
p_version = e.version
except IdentHashMissingVersion as e:
# page ident hash doesn't need a version
p_id = e.id
p_version = None
id, version = split_ident_hash(ident_hash, containing=p_id)
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
result = get_content_metadata(id, version, cursor)
# Build url for canonical link header
result['canon_url'] = get_canonical_url(result, request)
if result['mediaType'] == COLLECTION_MIMETYPE:
# Grab the collection tree.
result['tree'] = get_tree(ident_hash, cursor,
as_collated=as_collated)
result['collated'] = as_collated
if not result['tree']:
# If collated tree is not available, get the uncollated
# tree.
result['tree'] = get_tree(ident_hash, cursor)
result['collated'] = False
if page_ident_hash:
for id_ in flatten_tree_to_ident_hashes(result['tree']):
id, version = split_ident_hash(id_)
if id == p_id and (
version == p_version or not p_version):
content = None
if as_collated:
content = get_collated_content(
id_, ident_hash, cursor)
if content:
result = get_content_metadata(
id, version, cursor)
# Build url for canonical link header
result['canon_url'] = (
get_canonical_url(result, request))
result['content'] = content[:]
return result
# 302 'cause lack of baked content may be temporary
raise httpexceptions.HTTPFound(request.route_path(
request.matched_route.name,
_query=request.params,
ident_hash=join_ident_hash(id, version),
ext=routing_args['ext']),
headers=[("Cache-Control",
"max-age=60, public")])
raise httpexceptions.HTTPNotFound()
else:
result = get_content_metadata(id, version, cursor)
# Build url for canonical link header
result['canon_url'] = get_canonical_url(result, request)
# Grab the html content.
args = dict(id=id, version=result['version'],
filename='index.cnxml.html')
cursor.execute(SQL['get-resource-by-filename'], args)
try:
content = cursor.fetchone()[0]
except (TypeError, IndexError,): # None returned
logger.debug("module found, but "
"'index.cnxml.html' is missing.")
raise httpexceptions.HTTPNotFound()
result['content'] = content[:]
return result
|
python
|
def _get_content_json(ident_hash=None):
"""Return a content as a dict from its ident-hash (uuid@version)."""
request = get_current_request()
routing_args = request and request.matchdict or {}
if not ident_hash:
ident_hash = routing_args['ident_hash']
as_collated = asbool(request.GET.get('as_collated', True))
page_ident_hash = routing_args.get('page_ident_hash', '')
p_id, p_version = (None, None)
if page_ident_hash:
try:
p_id, p_version = split_ident_hash(page_ident_hash)
except IdentHashShortId as e:
p_id = get_uuid(e.id)
p_version = e.version
except IdentHashMissingVersion as e:
# page ident hash doesn't need a version
p_id = e.id
p_version = None
id, version = split_ident_hash(ident_hash, containing=p_id)
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
result = get_content_metadata(id, version, cursor)
# Build url for canonical link header
result['canon_url'] = get_canonical_url(result, request)
if result['mediaType'] == COLLECTION_MIMETYPE:
# Grab the collection tree.
result['tree'] = get_tree(ident_hash, cursor,
as_collated=as_collated)
result['collated'] = as_collated
if not result['tree']:
# If collated tree is not available, get the uncollated
# tree.
result['tree'] = get_tree(ident_hash, cursor)
result['collated'] = False
if page_ident_hash:
for id_ in flatten_tree_to_ident_hashes(result['tree']):
id, version = split_ident_hash(id_)
if id == p_id and (
version == p_version or not p_version):
content = None
if as_collated:
content = get_collated_content(
id_, ident_hash, cursor)
if content:
result = get_content_metadata(
id, version, cursor)
# Build url for canonical link header
result['canon_url'] = (
get_canonical_url(result, request))
result['content'] = content[:]
return result
# 302 'cause lack of baked content may be temporary
raise httpexceptions.HTTPFound(request.route_path(
request.matched_route.name,
_query=request.params,
ident_hash=join_ident_hash(id, version),
ext=routing_args['ext']),
headers=[("Cache-Control",
"max-age=60, public")])
raise httpexceptions.HTTPNotFound()
else:
result = get_content_metadata(id, version, cursor)
# Build url for canonical link header
result['canon_url'] = get_canonical_url(result, request)
# Grab the html content.
args = dict(id=id, version=result['version'],
filename='index.cnxml.html')
cursor.execute(SQL['get-resource-by-filename'], args)
try:
content = cursor.fetchone()[0]
except (TypeError, IndexError,): # None returned
logger.debug("module found, but "
"'index.cnxml.html' is missing.")
raise httpexceptions.HTTPNotFound()
result['content'] = content[:]
return result
|
[
"def",
"_get_content_json",
"(",
"ident_hash",
"=",
"None",
")",
":",
"request",
"=",
"get_current_request",
"(",
")",
"routing_args",
"=",
"request",
"and",
"request",
".",
"matchdict",
"or",
"{",
"}",
"if",
"not",
"ident_hash",
":",
"ident_hash",
"=",
"routing_args",
"[",
"'ident_hash'",
"]",
"as_collated",
"=",
"asbool",
"(",
"request",
".",
"GET",
".",
"get",
"(",
"'as_collated'",
",",
"True",
")",
")",
"page_ident_hash",
"=",
"routing_args",
".",
"get",
"(",
"'page_ident_hash'",
",",
"''",
")",
"p_id",
",",
"p_version",
"=",
"(",
"None",
",",
"None",
")",
"if",
"page_ident_hash",
":",
"try",
":",
"p_id",
",",
"p_version",
"=",
"split_ident_hash",
"(",
"page_ident_hash",
")",
"except",
"IdentHashShortId",
"as",
"e",
":",
"p_id",
"=",
"get_uuid",
"(",
"e",
".",
"id",
")",
"p_version",
"=",
"e",
".",
"version",
"except",
"IdentHashMissingVersion",
"as",
"e",
":",
"# page ident hash doesn't need a version",
"p_id",
"=",
"e",
".",
"id",
"p_version",
"=",
"None",
"id",
",",
"version",
"=",
"split_ident_hash",
"(",
"ident_hash",
",",
"containing",
"=",
"p_id",
")",
"with",
"db_connect",
"(",
")",
"as",
"db_connection",
":",
"with",
"db_connection",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"result",
"=",
"get_content_metadata",
"(",
"id",
",",
"version",
",",
"cursor",
")",
"# Build url for canonical link header",
"result",
"[",
"'canon_url'",
"]",
"=",
"get_canonical_url",
"(",
"result",
",",
"request",
")",
"if",
"result",
"[",
"'mediaType'",
"]",
"==",
"COLLECTION_MIMETYPE",
":",
"# Grab the collection tree.",
"result",
"[",
"'tree'",
"]",
"=",
"get_tree",
"(",
"ident_hash",
",",
"cursor",
",",
"as_collated",
"=",
"as_collated",
")",
"result",
"[",
"'collated'",
"]",
"=",
"as_collated",
"if",
"not",
"result",
"[",
"'tree'",
"]",
":",
"# If collated tree is not available, get the uncollated",
"# tree.",
"result",
"[",
"'tree'",
"]",
"=",
"get_tree",
"(",
"ident_hash",
",",
"cursor",
")",
"result",
"[",
"'collated'",
"]",
"=",
"False",
"if",
"page_ident_hash",
":",
"for",
"id_",
"in",
"flatten_tree_to_ident_hashes",
"(",
"result",
"[",
"'tree'",
"]",
")",
":",
"id",
",",
"version",
"=",
"split_ident_hash",
"(",
"id_",
")",
"if",
"id",
"==",
"p_id",
"and",
"(",
"version",
"==",
"p_version",
"or",
"not",
"p_version",
")",
":",
"content",
"=",
"None",
"if",
"as_collated",
":",
"content",
"=",
"get_collated_content",
"(",
"id_",
",",
"ident_hash",
",",
"cursor",
")",
"if",
"content",
":",
"result",
"=",
"get_content_metadata",
"(",
"id",
",",
"version",
",",
"cursor",
")",
"# Build url for canonical link header",
"result",
"[",
"'canon_url'",
"]",
"=",
"(",
"get_canonical_url",
"(",
"result",
",",
"request",
")",
")",
"result",
"[",
"'content'",
"]",
"=",
"content",
"[",
":",
"]",
"return",
"result",
"# 302 'cause lack of baked content may be temporary",
"raise",
"httpexceptions",
".",
"HTTPFound",
"(",
"request",
".",
"route_path",
"(",
"request",
".",
"matched_route",
".",
"name",
",",
"_query",
"=",
"request",
".",
"params",
",",
"ident_hash",
"=",
"join_ident_hash",
"(",
"id",
",",
"version",
")",
",",
"ext",
"=",
"routing_args",
"[",
"'ext'",
"]",
")",
",",
"headers",
"=",
"[",
"(",
"\"Cache-Control\"",
",",
"\"max-age=60, public\"",
")",
"]",
")",
"raise",
"httpexceptions",
".",
"HTTPNotFound",
"(",
")",
"else",
":",
"result",
"=",
"get_content_metadata",
"(",
"id",
",",
"version",
",",
"cursor",
")",
"# Build url for canonical link header",
"result",
"[",
"'canon_url'",
"]",
"=",
"get_canonical_url",
"(",
"result",
",",
"request",
")",
"# Grab the html content.",
"args",
"=",
"dict",
"(",
"id",
"=",
"id",
",",
"version",
"=",
"result",
"[",
"'version'",
"]",
",",
"filename",
"=",
"'index.cnxml.html'",
")",
"cursor",
".",
"execute",
"(",
"SQL",
"[",
"'get-resource-by-filename'",
"]",
",",
"args",
")",
"try",
":",
"content",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"[",
"0",
"]",
"except",
"(",
"TypeError",
",",
"IndexError",
",",
")",
":",
"# None returned",
"logger",
".",
"debug",
"(",
"\"module found, but \"",
"\"'index.cnxml.html' is missing.\"",
")",
"raise",
"httpexceptions",
".",
"HTTPNotFound",
"(",
")",
"result",
"[",
"'content'",
"]",
"=",
"content",
"[",
":",
"]",
"return",
"result"
] |
Return a content as a dict from its ident-hash (uuid@version).
|
[
"Return",
"a",
"content",
"as",
"a",
"dict",
"from",
"its",
"ident",
"-",
"hash",
"(",
"uuid"
] |
train
|
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L55-L137
|
openstax/cnx-archive
|
cnxarchive/views/content.py
|
get_content_json
|
def get_content_json(request):
"""Retrieve content as JSON using the ident-hash (uuid@version)."""
result = _get_content_json()
resp = request.response
resp.status = "200 OK"
resp.content_type = 'application/json'
resp.body = json.dumps(result)
return result, resp
|
python
|
def get_content_json(request):
"""Retrieve content as JSON using the ident-hash (uuid@version)."""
result = _get_content_json()
resp = request.response
resp.status = "200 OK"
resp.content_type = 'application/json'
resp.body = json.dumps(result)
return result, resp
|
[
"def",
"get_content_json",
"(",
"request",
")",
":",
"result",
"=",
"_get_content_json",
"(",
")",
"resp",
"=",
"request",
".",
"response",
"resp",
".",
"status",
"=",
"\"200 OK\"",
"resp",
".",
"content_type",
"=",
"'application/json'",
"resp",
".",
"body",
"=",
"json",
".",
"dumps",
"(",
"result",
")",
"return",
"result",
",",
"resp"
] |
Retrieve content as JSON using the ident-hash (uuid@version).
|
[
"Retrieve",
"content",
"as",
"JSON",
"using",
"the",
"ident",
"-",
"hash",
"(",
"uuid"
] |
train
|
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L140-L148
|
openstax/cnx-archive
|
cnxarchive/views/content.py
|
get_content_html
|
def get_content_html(request):
"""Retrieve content as HTML using the ident-hash (uuid@version)."""
result = _get_content_json()
media_type = result['mediaType']
if media_type == COLLECTION_MIMETYPE:
content = tree_to_html(result['tree'])
else:
content = result['content']
resp = request.response
resp.body = content
resp.status = "200 OK"
resp.content_type = 'application/xhtml+xml'
return result, resp
|
python
|
def get_content_html(request):
"""Retrieve content as HTML using the ident-hash (uuid@version)."""
result = _get_content_json()
media_type = result['mediaType']
if media_type == COLLECTION_MIMETYPE:
content = tree_to_html(result['tree'])
else:
content = result['content']
resp = request.response
resp.body = content
resp.status = "200 OK"
resp.content_type = 'application/xhtml+xml'
return result, resp
|
[
"def",
"get_content_html",
"(",
"request",
")",
":",
"result",
"=",
"_get_content_json",
"(",
")",
"media_type",
"=",
"result",
"[",
"'mediaType'",
"]",
"if",
"media_type",
"==",
"COLLECTION_MIMETYPE",
":",
"content",
"=",
"tree_to_html",
"(",
"result",
"[",
"'tree'",
"]",
")",
"else",
":",
"content",
"=",
"result",
"[",
"'content'",
"]",
"resp",
"=",
"request",
".",
"response",
"resp",
".",
"body",
"=",
"content",
"resp",
".",
"status",
"=",
"\"200 OK\"",
"resp",
".",
"content_type",
"=",
"'application/xhtml+xml'",
"return",
"result",
",",
"resp"
] |
Retrieve content as HTML using the ident-hash (uuid@version).
|
[
"Retrieve",
"content",
"as",
"HTML",
"using",
"the",
"ident",
"-",
"hash",
"(",
"uuid"
] |
train
|
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L151-L165
|
openstax/cnx-archive
|
cnxarchive/views/content.py
|
html_listify
|
def html_listify(tree, root_ul_element, parent_id=None):
"""Recursively construct HTML nested list version of book tree.
The original caller should not call this function with the
`parent_id` defined.
"""
request = get_current_request()
is_first_node = parent_id is None
if is_first_node:
parent_id = tree[0]['id']
for node in tree:
li_elm = etree.SubElement(root_ul_element, 'li')
a_elm = etree.SubElement(li_elm, 'a')
a_elm.text = node['title']
if node['id'] != 'subcol':
if is_first_node:
a_elm.set('href', request.route_path(
'content', ident_hash=node['id'], ext='.html'))
else:
a_elm.set('href', request.route_path(
'content',
separator=':',
ident_hash=parent_id,
page_ident_hash=node['id'],
ext='.html'))
if 'contents' in node:
elm = etree.SubElement(li_elm, 'ul')
html_listify(node['contents'], elm, parent_id)
|
python
|
def html_listify(tree, root_ul_element, parent_id=None):
"""Recursively construct HTML nested list version of book tree.
The original caller should not call this function with the
`parent_id` defined.
"""
request = get_current_request()
is_first_node = parent_id is None
if is_first_node:
parent_id = tree[0]['id']
for node in tree:
li_elm = etree.SubElement(root_ul_element, 'li')
a_elm = etree.SubElement(li_elm, 'a')
a_elm.text = node['title']
if node['id'] != 'subcol':
if is_first_node:
a_elm.set('href', request.route_path(
'content', ident_hash=node['id'], ext='.html'))
else:
a_elm.set('href', request.route_path(
'content',
separator=':',
ident_hash=parent_id,
page_ident_hash=node['id'],
ext='.html'))
if 'contents' in node:
elm = etree.SubElement(li_elm, 'ul')
html_listify(node['contents'], elm, parent_id)
|
[
"def",
"html_listify",
"(",
"tree",
",",
"root_ul_element",
",",
"parent_id",
"=",
"None",
")",
":",
"request",
"=",
"get_current_request",
"(",
")",
"is_first_node",
"=",
"parent_id",
"is",
"None",
"if",
"is_first_node",
":",
"parent_id",
"=",
"tree",
"[",
"0",
"]",
"[",
"'id'",
"]",
"for",
"node",
"in",
"tree",
":",
"li_elm",
"=",
"etree",
".",
"SubElement",
"(",
"root_ul_element",
",",
"'li'",
")",
"a_elm",
"=",
"etree",
".",
"SubElement",
"(",
"li_elm",
",",
"'a'",
")",
"a_elm",
".",
"text",
"=",
"node",
"[",
"'title'",
"]",
"if",
"node",
"[",
"'id'",
"]",
"!=",
"'subcol'",
":",
"if",
"is_first_node",
":",
"a_elm",
".",
"set",
"(",
"'href'",
",",
"request",
".",
"route_path",
"(",
"'content'",
",",
"ident_hash",
"=",
"node",
"[",
"'id'",
"]",
",",
"ext",
"=",
"'.html'",
")",
")",
"else",
":",
"a_elm",
".",
"set",
"(",
"'href'",
",",
"request",
".",
"route_path",
"(",
"'content'",
",",
"separator",
"=",
"':'",
",",
"ident_hash",
"=",
"parent_id",
",",
"page_ident_hash",
"=",
"node",
"[",
"'id'",
"]",
",",
"ext",
"=",
"'.html'",
")",
")",
"if",
"'contents'",
"in",
"node",
":",
"elm",
"=",
"etree",
".",
"SubElement",
"(",
"li_elm",
",",
"'ul'",
")",
"html_listify",
"(",
"node",
"[",
"'contents'",
"]",
",",
"elm",
",",
"parent_id",
")"
] |
Recursively construct HTML nested list version of book tree.
The original caller should not call this function with the
`parent_id` defined.
|
[
"Recursively",
"construct",
"HTML",
"nested",
"list",
"version",
"of",
"book",
"tree",
".",
"The",
"original",
"caller",
"should",
"not",
"call",
"this",
"function",
"with",
"the",
"parent_id",
"defined",
"."
] |
train
|
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L168-L195
|
openstax/cnx-archive
|
cnxarchive/views/content.py
|
get_export_allowable_types
|
def get_export_allowable_types(cursor, exports_dirs, id, version):
"""Return export types."""
request = get_current_request()
type_settings = request.registry.settings['_type_info']
type_names = [k for k, v in type_settings]
type_infos = [v for k, v in type_settings]
# We took the type_names directly from the setting this function uses to
# check for valid types, so it should never raise an ExportError here
file_tuples = get_export_files(cursor, id, version, type_names,
exports_dirs, read_file=False)
type_settings = dict(type_settings)
for filename, mimetype, file_size, file_created, state, file_content \
in file_tuples:
type_name = filename.rsplit('.', 1)[-1]
type_info = type_settings[type_name]
yield {
'format': type_info['user_friendly_name'],
'filename': filename,
'size': file_size,
'created': file_created and file_created.isoformat() or None,
'state': state,
'details': type_info['description'],
'path': request.route_path(
'export', ident_hash=join_ident_hash(id, version),
type=type_name, ignore=u'/{}'.format(filename))
}
|
python
|
def get_export_allowable_types(cursor, exports_dirs, id, version):
"""Return export types."""
request = get_current_request()
type_settings = request.registry.settings['_type_info']
type_names = [k for k, v in type_settings]
type_infos = [v for k, v in type_settings]
# We took the type_names directly from the setting this function uses to
# check for valid types, so it should never raise an ExportError here
file_tuples = get_export_files(cursor, id, version, type_names,
exports_dirs, read_file=False)
type_settings = dict(type_settings)
for filename, mimetype, file_size, file_created, state, file_content \
in file_tuples:
type_name = filename.rsplit('.', 1)[-1]
type_info = type_settings[type_name]
yield {
'format': type_info['user_friendly_name'],
'filename': filename,
'size': file_size,
'created': file_created and file_created.isoformat() or None,
'state': state,
'details': type_info['description'],
'path': request.route_path(
'export', ident_hash=join_ident_hash(id, version),
type=type_name, ignore=u'/{}'.format(filename))
}
|
[
"def",
"get_export_allowable_types",
"(",
"cursor",
",",
"exports_dirs",
",",
"id",
",",
"version",
")",
":",
"request",
"=",
"get_current_request",
"(",
")",
"type_settings",
"=",
"request",
".",
"registry",
".",
"settings",
"[",
"'_type_info'",
"]",
"type_names",
"=",
"[",
"k",
"for",
"k",
",",
"v",
"in",
"type_settings",
"]",
"type_infos",
"=",
"[",
"v",
"for",
"k",
",",
"v",
"in",
"type_settings",
"]",
"# We took the type_names directly from the setting this function uses to",
"# check for valid types, so it should never raise an ExportError here",
"file_tuples",
"=",
"get_export_files",
"(",
"cursor",
",",
"id",
",",
"version",
",",
"type_names",
",",
"exports_dirs",
",",
"read_file",
"=",
"False",
")",
"type_settings",
"=",
"dict",
"(",
"type_settings",
")",
"for",
"filename",
",",
"mimetype",
",",
"file_size",
",",
"file_created",
",",
"state",
",",
"file_content",
"in",
"file_tuples",
":",
"type_name",
"=",
"filename",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"[",
"-",
"1",
"]",
"type_info",
"=",
"type_settings",
"[",
"type_name",
"]",
"yield",
"{",
"'format'",
":",
"type_info",
"[",
"'user_friendly_name'",
"]",
",",
"'filename'",
":",
"filename",
",",
"'size'",
":",
"file_size",
",",
"'created'",
":",
"file_created",
"and",
"file_created",
".",
"isoformat",
"(",
")",
"or",
"None",
",",
"'state'",
":",
"state",
",",
"'details'",
":",
"type_info",
"[",
"'description'",
"]",
",",
"'path'",
":",
"request",
".",
"route_path",
"(",
"'export'",
",",
"ident_hash",
"=",
"join_ident_hash",
"(",
"id",
",",
"version",
")",
",",
"type",
"=",
"type_name",
",",
"ignore",
"=",
"u'/{}'",
".",
"format",
"(",
"filename",
")",
")",
"}"
] |
Return export types.
|
[
"Return",
"export",
"types",
"."
] |
train
|
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L222-L247
|
openstax/cnx-archive
|
cnxarchive/views/content.py
|
get_book_info
|
def get_book_info(cursor, real_dict_cursor, book_id,
book_version, page_id, page_version):
"""Return information about a given book.
Return the book's title, id, shortId, authors and revised date.
Raise HTTPNotFound if the page is not in the book.
"""
book_ident_hash = join_ident_hash(book_id, book_version)
page_ident_hash = join_ident_hash(page_id, page_version)
tree = get_tree(book_ident_hash, cursor)
# Check if the page appears in the book tree
if not tree or page_ident_hash not in flatten_tree_to_ident_hashes(tree):
# Return a 404 error if the page is not actually in the book tree
raise httpexceptions.HTTPNotFound()
sql_statement = """
SELECT m.name as title,
ident_hash(m.uuid, m.major_version, m.minor_version)
as ident_hash,
short_ident_hash(m.uuid, m.major_version, m.minor_version)
as shortId, ARRAY(
SELECT row_to_json(user_row)
FROM (
SELECT u.username, u.first_name as firstname,
u.last_name as surname, u.full_name as fullname,
u.title, u.suffix
) as user_row
) as authors,
m.revised
FROM modules m
JOIN users as u on u.username = ANY(m.authors)
WHERE ident_hash(m.uuid, m.major_version, m.minor_version) = %s
"""
real_dict_cursor.execute(sql_statement, vars=(book_ident_hash,))
return real_dict_cursor.fetchone()
|
python
|
def get_book_info(cursor, real_dict_cursor, book_id,
book_version, page_id, page_version):
"""Return information about a given book.
Return the book's title, id, shortId, authors and revised date.
Raise HTTPNotFound if the page is not in the book.
"""
book_ident_hash = join_ident_hash(book_id, book_version)
page_ident_hash = join_ident_hash(page_id, page_version)
tree = get_tree(book_ident_hash, cursor)
# Check if the page appears in the book tree
if not tree or page_ident_hash not in flatten_tree_to_ident_hashes(tree):
# Return a 404 error if the page is not actually in the book tree
raise httpexceptions.HTTPNotFound()
sql_statement = """
SELECT m.name as title,
ident_hash(m.uuid, m.major_version, m.minor_version)
as ident_hash,
short_ident_hash(m.uuid, m.major_version, m.minor_version)
as shortId, ARRAY(
SELECT row_to_json(user_row)
FROM (
SELECT u.username, u.first_name as firstname,
u.last_name as surname, u.full_name as fullname,
u.title, u.suffix
) as user_row
) as authors,
m.revised
FROM modules m
JOIN users as u on u.username = ANY(m.authors)
WHERE ident_hash(m.uuid, m.major_version, m.minor_version) = %s
"""
real_dict_cursor.execute(sql_statement, vars=(book_ident_hash,))
return real_dict_cursor.fetchone()
|
[
"def",
"get_book_info",
"(",
"cursor",
",",
"real_dict_cursor",
",",
"book_id",
",",
"book_version",
",",
"page_id",
",",
"page_version",
")",
":",
"book_ident_hash",
"=",
"join_ident_hash",
"(",
"book_id",
",",
"book_version",
")",
"page_ident_hash",
"=",
"join_ident_hash",
"(",
"page_id",
",",
"page_version",
")",
"tree",
"=",
"get_tree",
"(",
"book_ident_hash",
",",
"cursor",
")",
"# Check if the page appears in the book tree",
"if",
"not",
"tree",
"or",
"page_ident_hash",
"not",
"in",
"flatten_tree_to_ident_hashes",
"(",
"tree",
")",
":",
"# Return a 404 error if the page is not actually in the book tree",
"raise",
"httpexceptions",
".",
"HTTPNotFound",
"(",
")",
"sql_statement",
"=",
"\"\"\"\n SELECT m.name as title,\n ident_hash(m.uuid, m.major_version, m.minor_version)\n as ident_hash,\n short_ident_hash(m.uuid, m.major_version, m.minor_version)\n as shortId, ARRAY(\n SELECT row_to_json(user_row)\n FROM (\n SELECT u.username, u.first_name as firstname,\n u.last_name as surname, u.full_name as fullname,\n u.title, u.suffix\n ) as user_row\n ) as authors,\n m.revised\n FROM modules m\n JOIN users as u on u.username = ANY(m.authors)\n WHERE ident_hash(m.uuid, m.major_version, m.minor_version) = %s\n \"\"\"",
"real_dict_cursor",
".",
"execute",
"(",
"sql_statement",
",",
"vars",
"=",
"(",
"book_ident_hash",
",",
")",
")",
"return",
"real_dict_cursor",
".",
"fetchone",
"(",
")"
] |
Return information about a given book.
Return the book's title, id, shortId, authors and revised date.
Raise HTTPNotFound if the page is not in the book.
|
[
"Return",
"information",
"about",
"a",
"given",
"book",
"."
] |
train
|
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L251-L286
|
openstax/cnx-archive
|
cnxarchive/views/content.py
|
get_portal_type
|
def get_portal_type(cursor, id, version):
"""Return the module's portal_type."""
args = join_ident_hash(id, version)
sql_statement = """
SELECT m.portal_type
FROM modules as m
WHERE ident_hash(uuid, major_version, minor_version) = %s
"""
cursor.execute(sql_statement, vars=(args,))
res = cursor.fetchone()
if res is None:
return None
else:
return res[0]
|
python
|
def get_portal_type(cursor, id, version):
"""Return the module's portal_type."""
args = join_ident_hash(id, version)
sql_statement = """
SELECT m.portal_type
FROM modules as m
WHERE ident_hash(uuid, major_version, minor_version) = %s
"""
cursor.execute(sql_statement, vars=(args,))
res = cursor.fetchone()
if res is None:
return None
else:
return res[0]
|
[
"def",
"get_portal_type",
"(",
"cursor",
",",
"id",
",",
"version",
")",
":",
"args",
"=",
"join_ident_hash",
"(",
"id",
",",
"version",
")",
"sql_statement",
"=",
"\"\"\"\n SELECT m.portal_type\n FROM modules as m\n WHERE ident_hash(uuid, major_version, minor_version) = %s\n \"\"\"",
"cursor",
".",
"execute",
"(",
"sql_statement",
",",
"vars",
"=",
"(",
"args",
",",
")",
")",
"res",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"if",
"res",
"is",
"None",
":",
"return",
"None",
"else",
":",
"return",
"res",
"[",
"0",
"]"
] |
Return the module's portal_type.
|
[
"Return",
"the",
"module",
"s",
"portal_type",
"."
] |
train
|
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L290-L304
|
openstax/cnx-archive
|
cnxarchive/views/content.py
|
get_books_containing_page
|
def get_books_containing_page(cursor, uuid, version,
context_uuid=None, context_version=None):
"""Return a list of book names and UUIDs
that contain a given module UUID."""
with db_connect() as db_connection:
# Uses a RealDictCursor instead of the regular cursor
with db_connection.cursor(
cursor_factory=psycopg2.extras.RealDictCursor
) as real_dict_cursor:
# In the future the books-containing-page SQL might handle
# all of these cases. For now we branch the code out in here.
if context_uuid and context_version:
return [get_book_info(cursor, real_dict_cursor, context_uuid,
context_version, uuid, version)]
else:
portal_type = get_portal_type(cursor, uuid, version)
if portal_type == 'Module':
real_dict_cursor.execute(SQL['get-books-containing-page'],
{'document_uuid': uuid,
'document_version': version})
return real_dict_cursor.fetchall()
else:
# Books are currently not in any other book
return []
|
python
|
def get_books_containing_page(cursor, uuid, version,
context_uuid=None, context_version=None):
"""Return a list of book names and UUIDs
that contain a given module UUID."""
with db_connect() as db_connection:
# Uses a RealDictCursor instead of the regular cursor
with db_connection.cursor(
cursor_factory=psycopg2.extras.RealDictCursor
) as real_dict_cursor:
# In the future the books-containing-page SQL might handle
# all of these cases. For now we branch the code out in here.
if context_uuid and context_version:
return [get_book_info(cursor, real_dict_cursor, context_uuid,
context_version, uuid, version)]
else:
portal_type = get_portal_type(cursor, uuid, version)
if portal_type == 'Module':
real_dict_cursor.execute(SQL['get-books-containing-page'],
{'document_uuid': uuid,
'document_version': version})
return real_dict_cursor.fetchall()
else:
# Books are currently not in any other book
return []
|
[
"def",
"get_books_containing_page",
"(",
"cursor",
",",
"uuid",
",",
"version",
",",
"context_uuid",
"=",
"None",
",",
"context_version",
"=",
"None",
")",
":",
"with",
"db_connect",
"(",
")",
"as",
"db_connection",
":",
"# Uses a RealDictCursor instead of the regular cursor",
"with",
"db_connection",
".",
"cursor",
"(",
"cursor_factory",
"=",
"psycopg2",
".",
"extras",
".",
"RealDictCursor",
")",
"as",
"real_dict_cursor",
":",
"# In the future the books-containing-page SQL might handle",
"# all of these cases. For now we branch the code out in here.",
"if",
"context_uuid",
"and",
"context_version",
":",
"return",
"[",
"get_book_info",
"(",
"cursor",
",",
"real_dict_cursor",
",",
"context_uuid",
",",
"context_version",
",",
"uuid",
",",
"version",
")",
"]",
"else",
":",
"portal_type",
"=",
"get_portal_type",
"(",
"cursor",
",",
"uuid",
",",
"version",
")",
"if",
"portal_type",
"==",
"'Module'",
":",
"real_dict_cursor",
".",
"execute",
"(",
"SQL",
"[",
"'get-books-containing-page'",
"]",
",",
"{",
"'document_uuid'",
":",
"uuid",
",",
"'document_version'",
":",
"version",
"}",
")",
"return",
"real_dict_cursor",
".",
"fetchall",
"(",
")",
"else",
":",
"# Books are currently not in any other book",
"return",
"[",
"]"
] |
Return a list of book names and UUIDs
that contain a given module UUID.
|
[
"Return",
"a",
"list",
"of",
"book",
"names",
"and",
"UUIDs",
"that",
"contain",
"a",
"given",
"module",
"UUID",
"."
] |
train
|
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L307-L330
|
openstax/cnx-archive
|
cnxarchive/views/content.py
|
get_canonical_url
|
def get_canonical_url(metadata, request):
"""Builds canonical in book url from a pages metadata."""
slug_title = u'/{}'.format('-'.join(metadata['title'].split()))
settings = get_current_registry().settings
canon_host = settings.get('canonical-hostname',
re.sub('archive.', '', request.host))
if metadata['canonical'] is None:
canon_url = request.route_url(
'content',
ident_hash=metadata['id'],
ignore=slug_title)
else:
canon_url = request.route_url(
'content',
ident_hash=metadata['canonical'],
separator=':',
page_ident_hash=metadata['id'],
ignore=slug_title)
return re.sub(request.host, canon_host, canon_url)
|
python
|
def get_canonical_url(metadata, request):
"""Builds canonical in book url from a pages metadata."""
slug_title = u'/{}'.format('-'.join(metadata['title'].split()))
settings = get_current_registry().settings
canon_host = settings.get('canonical-hostname',
re.sub('archive.', '', request.host))
if metadata['canonical'] is None:
canon_url = request.route_url(
'content',
ident_hash=metadata['id'],
ignore=slug_title)
else:
canon_url = request.route_url(
'content',
ident_hash=metadata['canonical'],
separator=':',
page_ident_hash=metadata['id'],
ignore=slug_title)
return re.sub(request.host, canon_host, canon_url)
|
[
"def",
"get_canonical_url",
"(",
"metadata",
",",
"request",
")",
":",
"slug_title",
"=",
"u'/{}'",
".",
"format",
"(",
"'-'",
".",
"join",
"(",
"metadata",
"[",
"'title'",
"]",
".",
"split",
"(",
")",
")",
")",
"settings",
"=",
"get_current_registry",
"(",
")",
".",
"settings",
"canon_host",
"=",
"settings",
".",
"get",
"(",
"'canonical-hostname'",
",",
"re",
".",
"sub",
"(",
"'archive.'",
",",
"''",
",",
"request",
".",
"host",
")",
")",
"if",
"metadata",
"[",
"'canonical'",
"]",
"is",
"None",
":",
"canon_url",
"=",
"request",
".",
"route_url",
"(",
"'content'",
",",
"ident_hash",
"=",
"metadata",
"[",
"'id'",
"]",
",",
"ignore",
"=",
"slug_title",
")",
"else",
":",
"canon_url",
"=",
"request",
".",
"route_url",
"(",
"'content'",
",",
"ident_hash",
"=",
"metadata",
"[",
"'canonical'",
"]",
",",
"separator",
"=",
"':'",
",",
"page_ident_hash",
"=",
"metadata",
"[",
"'id'",
"]",
",",
"ignore",
"=",
"slug_title",
")",
"return",
"re",
".",
"sub",
"(",
"request",
".",
"host",
",",
"canon_host",
",",
"canon_url",
")"
] |
Builds canonical in book url from a pages metadata.
|
[
"Builds",
"canonical",
"in",
"book",
"url",
"from",
"a",
"pages",
"metadata",
"."
] |
train
|
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L333-L353
|
openstax/cnx-archive
|
cnxarchive/views/content.py
|
get_content
|
def get_content(request):
"""Retrieve content using the ident-hash (uuid@version).
Depending on extension or HTTP_ACCEPT header return HTML or JSON.
"""
ext = request.matchdict.get('ext')
accept = request.headers.get('ACCEPT', '')
if not ext:
if ('application/xhtml+xml' in accept):
result, resp = get_content_html(request)
else: # default to json
result, resp = get_content_json(request)
elif ext == '.html':
result, resp = get_content_html(request)
elif ext == '.json':
result, resp = get_content_json(request)
else:
raise httpexceptions.HTTPNotFound()
if result['stateid'] not in [1, 8]:
# state 1 = current, state 8 = fallback
cc = resp.cache_control
cc.prevent_auto = True
cc.no_cache = True
cc.no_store = True
cc.must_revalidate = True
else:
resp.cache_control.public = True
# Build the canonical link
resp.headerlist.append(
('Link', '<{}> ;rel="Canonical"'.format(result['canon_url'])))
return resp
|
python
|
def get_content(request):
"""Retrieve content using the ident-hash (uuid@version).
Depending on extension or HTTP_ACCEPT header return HTML or JSON.
"""
ext = request.matchdict.get('ext')
accept = request.headers.get('ACCEPT', '')
if not ext:
if ('application/xhtml+xml' in accept):
result, resp = get_content_html(request)
else: # default to json
result, resp = get_content_json(request)
elif ext == '.html':
result, resp = get_content_html(request)
elif ext == '.json':
result, resp = get_content_json(request)
else:
raise httpexceptions.HTTPNotFound()
if result['stateid'] not in [1, 8]:
# state 1 = current, state 8 = fallback
cc = resp.cache_control
cc.prevent_auto = True
cc.no_cache = True
cc.no_store = True
cc.must_revalidate = True
else:
resp.cache_control.public = True
# Build the canonical link
resp.headerlist.append(
('Link', '<{}> ;rel="Canonical"'.format(result['canon_url'])))
return resp
|
[
"def",
"get_content",
"(",
"request",
")",
":",
"ext",
"=",
"request",
".",
"matchdict",
".",
"get",
"(",
"'ext'",
")",
"accept",
"=",
"request",
".",
"headers",
".",
"get",
"(",
"'ACCEPT'",
",",
"''",
")",
"if",
"not",
"ext",
":",
"if",
"(",
"'application/xhtml+xml'",
"in",
"accept",
")",
":",
"result",
",",
"resp",
"=",
"get_content_html",
"(",
"request",
")",
"else",
":",
"# default to json",
"result",
",",
"resp",
"=",
"get_content_json",
"(",
"request",
")",
"elif",
"ext",
"==",
"'.html'",
":",
"result",
",",
"resp",
"=",
"get_content_html",
"(",
"request",
")",
"elif",
"ext",
"==",
"'.json'",
":",
"result",
",",
"resp",
"=",
"get_content_json",
"(",
"request",
")",
"else",
":",
"raise",
"httpexceptions",
".",
"HTTPNotFound",
"(",
")",
"if",
"result",
"[",
"'stateid'",
"]",
"not",
"in",
"[",
"1",
",",
"8",
"]",
":",
"# state 1 = current, state 8 = fallback",
"cc",
"=",
"resp",
".",
"cache_control",
"cc",
".",
"prevent_auto",
"=",
"True",
"cc",
".",
"no_cache",
"=",
"True",
"cc",
".",
"no_store",
"=",
"True",
"cc",
".",
"must_revalidate",
"=",
"True",
"else",
":",
"resp",
".",
"cache_control",
".",
"public",
"=",
"True",
"# Build the canonical link",
"resp",
".",
"headerlist",
".",
"append",
"(",
"(",
"'Link'",
",",
"'<{}> ;rel=\"Canonical\"'",
".",
"format",
"(",
"result",
"[",
"'canon_url'",
"]",
")",
")",
")",
"return",
"resp"
] |
Retrieve content using the ident-hash (uuid@version).
Depending on extension or HTTP_ACCEPT header return HTML or JSON.
|
[
"Retrieve",
"content",
"using",
"the",
"ident",
"-",
"hash",
"(",
"uuid@version",
")",
"."
] |
train
|
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L362-L395
|
openstax/cnx-archive
|
cnxarchive/views/content.py
|
get_extra
|
def get_extra(request):
"""Return information about a module / collection that cannot be cached."""
settings = get_current_registry().settings
exports_dirs = settings['exports-directories'].split()
args = request.matchdict
if args['page_ident_hash']:
context_id, context_version = split_ident_hash(args['ident_hash'])
try:
id, version = split_ident_hash(args['page_ident_hash'])
except IdentHashShortId as e:
id = get_uuid(e.id)
version = e.version
except IdentHashMissingVersion as e:
# Ideally we would find the page version
# that is in the book instead of latest
id = e.id
version = get_latest_version(e.id)
else:
context_id = context_version = None
id, version = split_ident_hash(args['ident_hash'])
results = {}
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
results['downloads'] = \
list(get_export_allowable_types(cursor, exports_dirs,
id, version))
results['isLatest'] = is_latest(id, version)
results['latestVersion'] = get_latest_version(id)
results['headVersion'] = get_head_version(id)
results['canPublish'] = get_module_can_publish(cursor, id)
results['state'] = get_state(cursor, id, version)
results['books'] = get_books_containing_page(cursor, id, version,
context_id,
context_version)
formatAuthors(results['books'])
resp = request.response
resp.content_type = 'application/json'
resp.body = json.dumps(results, default=json_serial)
return resp
|
python
|
def get_extra(request):
"""Return information about a module / collection that cannot be cached."""
settings = get_current_registry().settings
exports_dirs = settings['exports-directories'].split()
args = request.matchdict
if args['page_ident_hash']:
context_id, context_version = split_ident_hash(args['ident_hash'])
try:
id, version = split_ident_hash(args['page_ident_hash'])
except IdentHashShortId as e:
id = get_uuid(e.id)
version = e.version
except IdentHashMissingVersion as e:
# Ideally we would find the page version
# that is in the book instead of latest
id = e.id
version = get_latest_version(e.id)
else:
context_id = context_version = None
id, version = split_ident_hash(args['ident_hash'])
results = {}
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
results['downloads'] = \
list(get_export_allowable_types(cursor, exports_dirs,
id, version))
results['isLatest'] = is_latest(id, version)
results['latestVersion'] = get_latest_version(id)
results['headVersion'] = get_head_version(id)
results['canPublish'] = get_module_can_publish(cursor, id)
results['state'] = get_state(cursor, id, version)
results['books'] = get_books_containing_page(cursor, id, version,
context_id,
context_version)
formatAuthors(results['books'])
resp = request.response
resp.content_type = 'application/json'
resp.body = json.dumps(results, default=json_serial)
return resp
|
[
"def",
"get_extra",
"(",
"request",
")",
":",
"settings",
"=",
"get_current_registry",
"(",
")",
".",
"settings",
"exports_dirs",
"=",
"settings",
"[",
"'exports-directories'",
"]",
".",
"split",
"(",
")",
"args",
"=",
"request",
".",
"matchdict",
"if",
"args",
"[",
"'page_ident_hash'",
"]",
":",
"context_id",
",",
"context_version",
"=",
"split_ident_hash",
"(",
"args",
"[",
"'ident_hash'",
"]",
")",
"try",
":",
"id",
",",
"version",
"=",
"split_ident_hash",
"(",
"args",
"[",
"'page_ident_hash'",
"]",
")",
"except",
"IdentHashShortId",
"as",
"e",
":",
"id",
"=",
"get_uuid",
"(",
"e",
".",
"id",
")",
"version",
"=",
"e",
".",
"version",
"except",
"IdentHashMissingVersion",
"as",
"e",
":",
"# Ideally we would find the page version",
"# that is in the book instead of latest",
"id",
"=",
"e",
".",
"id",
"version",
"=",
"get_latest_version",
"(",
"e",
".",
"id",
")",
"else",
":",
"context_id",
"=",
"context_version",
"=",
"None",
"id",
",",
"version",
"=",
"split_ident_hash",
"(",
"args",
"[",
"'ident_hash'",
"]",
")",
"results",
"=",
"{",
"}",
"with",
"db_connect",
"(",
")",
"as",
"db_connection",
":",
"with",
"db_connection",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"results",
"[",
"'downloads'",
"]",
"=",
"list",
"(",
"get_export_allowable_types",
"(",
"cursor",
",",
"exports_dirs",
",",
"id",
",",
"version",
")",
")",
"results",
"[",
"'isLatest'",
"]",
"=",
"is_latest",
"(",
"id",
",",
"version",
")",
"results",
"[",
"'latestVersion'",
"]",
"=",
"get_latest_version",
"(",
"id",
")",
"results",
"[",
"'headVersion'",
"]",
"=",
"get_head_version",
"(",
"id",
")",
"results",
"[",
"'canPublish'",
"]",
"=",
"get_module_can_publish",
"(",
"cursor",
",",
"id",
")",
"results",
"[",
"'state'",
"]",
"=",
"get_state",
"(",
"cursor",
",",
"id",
",",
"version",
")",
"results",
"[",
"'books'",
"]",
"=",
"get_books_containing_page",
"(",
"cursor",
",",
"id",
",",
"version",
",",
"context_id",
",",
"context_version",
")",
"formatAuthors",
"(",
"results",
"[",
"'books'",
"]",
")",
"resp",
"=",
"request",
".",
"response",
"resp",
".",
"content_type",
"=",
"'application/json'",
"resp",
".",
"body",
"=",
"json",
".",
"dumps",
"(",
"results",
",",
"default",
"=",
"json_serial",
")",
"return",
"resp"
] |
Return information about a module / collection that cannot be cached.
|
[
"Return",
"information",
"about",
"a",
"module",
"/",
"collection",
"that",
"cannot",
"be",
"cached",
"."
] |
train
|
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L400-L439
|
ZELLMECHANIK-DRESDEN/dclab
|
dclab/rtdc_dataset/fmt_hierarchy.py
|
map_indices_child2parent
|
def map_indices_child2parent(child, child_indices):
"""Map child RTDCBase event indices to parent RTDCBase
Parameters
----------
child: RTDC_Hierarchy
hierarchy child with `child_indices`
child_indices: 1d ndarray
child indices to map
Returns
-------
parent_indices: 1d ndarray
hierarchy parent indices
"""
parent = child.hparent
# filters
pf = parent.filter.all
# indices corresponding to all child events
idx = np.where(pf)[0] # True means present in the child
# indices corresponding to selected child events
parent_indices = idx[child_indices]
return parent_indices
|
python
|
def map_indices_child2parent(child, child_indices):
"""Map child RTDCBase event indices to parent RTDCBase
Parameters
----------
child: RTDC_Hierarchy
hierarchy child with `child_indices`
child_indices: 1d ndarray
child indices to map
Returns
-------
parent_indices: 1d ndarray
hierarchy parent indices
"""
parent = child.hparent
# filters
pf = parent.filter.all
# indices corresponding to all child events
idx = np.where(pf)[0] # True means present in the child
# indices corresponding to selected child events
parent_indices = idx[child_indices]
return parent_indices
|
[
"def",
"map_indices_child2parent",
"(",
"child",
",",
"child_indices",
")",
":",
"parent",
"=",
"child",
".",
"hparent",
"# filters",
"pf",
"=",
"parent",
".",
"filter",
".",
"all",
"# indices corresponding to all child events",
"idx",
"=",
"np",
".",
"where",
"(",
"pf",
")",
"[",
"0",
"]",
"# True means present in the child",
"# indices corresponding to selected child events",
"parent_indices",
"=",
"idx",
"[",
"child_indices",
"]",
"return",
"parent_indices"
] |
Map child RTDCBase event indices to parent RTDCBase
Parameters
----------
child: RTDC_Hierarchy
hierarchy child with `child_indices`
child_indices: 1d ndarray
child indices to map
Returns
-------
parent_indices: 1d ndarray
hierarchy parent indices
|
[
"Map",
"child",
"RTDCBase",
"event",
"indices",
"to",
"parent",
"RTDCBase"
] |
train
|
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hierarchy.py#L310-L332
|
ZELLMECHANIK-DRESDEN/dclab
|
dclab/rtdc_dataset/fmt_hierarchy.py
|
map_indices_child2root
|
def map_indices_child2root(child, child_indices):
"""Map RTDC_Hierarchy event indices to root RTDCBase
Parameters
----------
child: RTDC_Hierarchy
RTDCBase hierarchy child
child_indices: 1d ndarray
child indices to map
Returns
-------
root_indices: 1d ndarray
hierarchy root indices
(not necessarily the indices of `parent`)
"""
while True:
indices = map_indices_child2parent(child=child,
child_indices=child_indices)
if isinstance(child.hparent, RTDC_Hierarchy):
child = child.hparent
child_indices = indices
else:
break
return indices
|
python
|
def map_indices_child2root(child, child_indices):
"""Map RTDC_Hierarchy event indices to root RTDCBase
Parameters
----------
child: RTDC_Hierarchy
RTDCBase hierarchy child
child_indices: 1d ndarray
child indices to map
Returns
-------
root_indices: 1d ndarray
hierarchy root indices
(not necessarily the indices of `parent`)
"""
while True:
indices = map_indices_child2parent(child=child,
child_indices=child_indices)
if isinstance(child.hparent, RTDC_Hierarchy):
child = child.hparent
child_indices = indices
else:
break
return indices
|
[
"def",
"map_indices_child2root",
"(",
"child",
",",
"child_indices",
")",
":",
"while",
"True",
":",
"indices",
"=",
"map_indices_child2parent",
"(",
"child",
"=",
"child",
",",
"child_indices",
"=",
"child_indices",
")",
"if",
"isinstance",
"(",
"child",
".",
"hparent",
",",
"RTDC_Hierarchy",
")",
":",
"child",
"=",
"child",
".",
"hparent",
"child_indices",
"=",
"indices",
"else",
":",
"break",
"return",
"indices"
] |
Map RTDC_Hierarchy event indices to root RTDCBase
Parameters
----------
child: RTDC_Hierarchy
RTDCBase hierarchy child
child_indices: 1d ndarray
child indices to map
Returns
-------
root_indices: 1d ndarray
hierarchy root indices
(not necessarily the indices of `parent`)
|
[
"Map",
"RTDC_Hierarchy",
"event",
"indices",
"to",
"root",
"RTDCBase"
] |
train
|
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hierarchy.py#L335-L359
|
ZELLMECHANIK-DRESDEN/dclab
|
dclab/rtdc_dataset/fmt_hierarchy.py
|
map_indices_parent2child
|
def map_indices_parent2child(child, parent_indices):
"""Map parent RTDCBase event indices to RTDC_Hierarchy
Parameters
----------
parent: RTDC_Hierarchy
hierarchy child
parent_indices: 1d ndarray
hierarchy parent (`child.hparent`) indices to map
Returns
-------
child_indices: 1d ndarray
child indices
"""
parent = child.hparent
# filters
pf = parent.filter.all
# indices in child
child_indices = []
count = 0
for ii in range(len(pf)):
if pf[ii]:
# only append indices if they exist in child
if ii in parent_indices:
# current child event count is the child index
child_indices.append(count)
# increment child event count
count += 1
return np.array(child_indices)
|
python
|
def map_indices_parent2child(child, parent_indices):
"""Map parent RTDCBase event indices to RTDC_Hierarchy
Parameters
----------
parent: RTDC_Hierarchy
hierarchy child
parent_indices: 1d ndarray
hierarchy parent (`child.hparent`) indices to map
Returns
-------
child_indices: 1d ndarray
child indices
"""
parent = child.hparent
# filters
pf = parent.filter.all
# indices in child
child_indices = []
count = 0
for ii in range(len(pf)):
if pf[ii]:
# only append indices if they exist in child
if ii in parent_indices:
# current child event count is the child index
child_indices.append(count)
# increment child event count
count += 1
return np.array(child_indices)
|
[
"def",
"map_indices_parent2child",
"(",
"child",
",",
"parent_indices",
")",
":",
"parent",
"=",
"child",
".",
"hparent",
"# filters",
"pf",
"=",
"parent",
".",
"filter",
".",
"all",
"# indices in child",
"child_indices",
"=",
"[",
"]",
"count",
"=",
"0",
"for",
"ii",
"in",
"range",
"(",
"len",
"(",
"pf",
")",
")",
":",
"if",
"pf",
"[",
"ii",
"]",
":",
"# only append indices if they exist in child",
"if",
"ii",
"in",
"parent_indices",
":",
"# current child event count is the child index",
"child_indices",
".",
"append",
"(",
"count",
")",
"# increment child event count",
"count",
"+=",
"1",
"return",
"np",
".",
"array",
"(",
"child_indices",
")"
] |
Map parent RTDCBase event indices to RTDC_Hierarchy
Parameters
----------
parent: RTDC_Hierarchy
hierarchy child
parent_indices: 1d ndarray
hierarchy parent (`child.hparent`) indices to map
Returns
-------
child_indices: 1d ndarray
child indices
|
[
"Map",
"parent",
"RTDCBase",
"event",
"indices",
"to",
"RTDC_Hierarchy"
] |
train
|
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hierarchy.py#L362-L392
|
ZELLMECHANIK-DRESDEN/dclab
|
dclab/rtdc_dataset/fmt_hierarchy.py
|
map_indices_root2child
|
def map_indices_root2child(child, root_indices):
"""Map root RTDCBase event indices to child RTDCBase
Parameters
----------
parent: RTDCBase
hierarchy parent of `child`.
root_indices: 1d ndarray
hierarchy root indices to map
(not necessarily the indices of `parent`)
Returns
-------
child_indices: 1d ndarray
child indices
"""
# construct hierarchy tree containing only RTDC_Hierarchy instances
hierarchy = [child]
while True:
if isinstance(child.hparent, RTDC_Hierarchy):
# the parent is a hierarchy tree
hierarchy.append(child.hparent)
child = child.hparent
else:
break
indices = root_indices
for hp in hierarchy[::-1]: # reverse order
# For each hierarchy parent, map the indices down the
# hierarchy tree.
indices = map_indices_parent2child(child=hp,
parent_indices=indices)
return indices
|
python
|
def map_indices_root2child(child, root_indices):
"""Map root RTDCBase event indices to child RTDCBase
Parameters
----------
parent: RTDCBase
hierarchy parent of `child`.
root_indices: 1d ndarray
hierarchy root indices to map
(not necessarily the indices of `parent`)
Returns
-------
child_indices: 1d ndarray
child indices
"""
# construct hierarchy tree containing only RTDC_Hierarchy instances
hierarchy = [child]
while True:
if isinstance(child.hparent, RTDC_Hierarchy):
# the parent is a hierarchy tree
hierarchy.append(child.hparent)
child = child.hparent
else:
break
indices = root_indices
for hp in hierarchy[::-1]: # reverse order
# For each hierarchy parent, map the indices down the
# hierarchy tree.
indices = map_indices_parent2child(child=hp,
parent_indices=indices)
return indices
|
[
"def",
"map_indices_root2child",
"(",
"child",
",",
"root_indices",
")",
":",
"# construct hierarchy tree containing only RTDC_Hierarchy instances",
"hierarchy",
"=",
"[",
"child",
"]",
"while",
"True",
":",
"if",
"isinstance",
"(",
"child",
".",
"hparent",
",",
"RTDC_Hierarchy",
")",
":",
"# the parent is a hierarchy tree",
"hierarchy",
".",
"append",
"(",
"child",
".",
"hparent",
")",
"child",
"=",
"child",
".",
"hparent",
"else",
":",
"break",
"indices",
"=",
"root_indices",
"for",
"hp",
"in",
"hierarchy",
"[",
":",
":",
"-",
"1",
"]",
":",
"# reverse order",
"# For each hierarchy parent, map the indices down the",
"# hierarchy tree.",
"indices",
"=",
"map_indices_parent2child",
"(",
"child",
"=",
"hp",
",",
"parent_indices",
"=",
"indices",
")",
"return",
"indices"
] |
Map root RTDCBase event indices to child RTDCBase
Parameters
----------
parent: RTDCBase
hierarchy parent of `child`.
root_indices: 1d ndarray
hierarchy root indices to map
(not necessarily the indices of `parent`)
Returns
-------
child_indices: 1d ndarray
child indices
|
[
"Map",
"root",
"RTDCBase",
"event",
"indices",
"to",
"child",
"RTDCBase"
] |
train
|
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hierarchy.py#L395-L427
|
ZELLMECHANIK-DRESDEN/dclab
|
dclab/rtdc_dataset/fmt_hierarchy.py
|
HierarchyFilter.apply_manual_indices
|
def apply_manual_indices(self, manual_indices):
"""Write to `self.manual`
Write `manual_indices` to the boolean array `self.manual`
and also store the indices as `self._man_root_ids`.
Notes
-----
If `self.parent_changed` is `True`, i.e. the parent applied
a filter and the child did not yet hear about this, then
`HierarchyFilterError` is raised. This is important, because
the size of the current filter would not match the size of
the filtered events of the parent and thus index-mapping
would not work.
"""
if self.parent_changed:
msg = "Cannot apply filter, because parent changed: " \
+ "dataset {}. ".format(self.rtdc_ds) \
+ "Run `RTDC_Hierarchy.apply_filter()` first!"
raise HierarchyFilterError(msg)
else:
self._man_root_ids = list(manual_indices)
cidx = map_indices_root2child(child=self.rtdc_ds,
root_indices=manual_indices)
if len(cidx):
self.manual[cidx] = False
|
python
|
def apply_manual_indices(self, manual_indices):
"""Write to `self.manual`
Write `manual_indices` to the boolean array `self.manual`
and also store the indices as `self._man_root_ids`.
Notes
-----
If `self.parent_changed` is `True`, i.e. the parent applied
a filter and the child did not yet hear about this, then
`HierarchyFilterError` is raised. This is important, because
the size of the current filter would not match the size of
the filtered events of the parent and thus index-mapping
would not work.
"""
if self.parent_changed:
msg = "Cannot apply filter, because parent changed: " \
+ "dataset {}. ".format(self.rtdc_ds) \
+ "Run `RTDC_Hierarchy.apply_filter()` first!"
raise HierarchyFilterError(msg)
else:
self._man_root_ids = list(manual_indices)
cidx = map_indices_root2child(child=self.rtdc_ds,
root_indices=manual_indices)
if len(cidx):
self.manual[cidx] = False
|
[
"def",
"apply_manual_indices",
"(",
"self",
",",
"manual_indices",
")",
":",
"if",
"self",
".",
"parent_changed",
":",
"msg",
"=",
"\"Cannot apply filter, because parent changed: \"",
"+",
"\"dataset {}. \"",
".",
"format",
"(",
"self",
".",
"rtdc_ds",
")",
"+",
"\"Run `RTDC_Hierarchy.apply_filter()` first!\"",
"raise",
"HierarchyFilterError",
"(",
"msg",
")",
"else",
":",
"self",
".",
"_man_root_ids",
"=",
"list",
"(",
"manual_indices",
")",
"cidx",
"=",
"map_indices_root2child",
"(",
"child",
"=",
"self",
".",
"rtdc_ds",
",",
"root_indices",
"=",
"manual_indices",
")",
"if",
"len",
"(",
"cidx",
")",
":",
"self",
".",
"manual",
"[",
"cidx",
"]",
"=",
"False"
] |
Write to `self.manual`
Write `manual_indices` to the boolean array `self.manual`
and also store the indices as `self._man_root_ids`.
Notes
-----
If `self.parent_changed` is `True`, i.e. the parent applied
a filter and the child did not yet hear about this, then
`HierarchyFilterError` is raised. This is important, because
the size of the current filter would not match the size of
the filtered events of the parent and thus index-mapping
would not work.
|
[
"Write",
"to",
"self",
".",
"manual"
] |
train
|
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hierarchy.py#L93-L118
|
ZELLMECHANIK-DRESDEN/dclab
|
dclab/rtdc_dataset/fmt_hierarchy.py
|
HierarchyFilter.retrieve_manual_indices
|
def retrieve_manual_indices(self):
"""Read from self.manual
Read from the boolean array `self.manual`, index all
occurences of `False` and find the corresponding indices
in the root hierarchy parent, return those and store them
in `self._man_root_ids` as well.
Notes
-----
This method also retrieves hidden indices, i.e. events
that are not part of the current hierarchy child but
which have been manually excluded before and are now
hidden because a hierarchy parent filtered it out.
If `self.parent_changed` is `True`, i.e. the parent applied
a filter and the child did not yet hear about this, then
nothing is computed and `self._man_root_ids` as-is. This
is important, because the size of the current filter would
not match the size of the filtered events of the parent and
thus index-mapping would not work.
"""
if self.parent_changed:
# ignore
pass
else:
# indices from boolean array
pbool = map_indices_child2root(
child=self.rtdc_ds,
child_indices=np.where(~self.manual)[0]).tolist()
# retrieve all indices that are currently not visible
# previous indices
pold = self._man_root_ids
# all indices previously selected either via
# - self.manual or
# - self.apply_manual_indices
pall = sorted(list(set(pbool + pold)))
# visible indices (only available child indices are returned)
pvis_c = map_indices_root2child(child=self.rtdc_ds,
root_indices=pall).tolist()
# map visible child indices back to root indices
pvis_p = map_indices_child2root(child=self.rtdc_ds,
child_indices=pvis_c).tolist()
# hidden indices
phid = list(set(pall) - set(pvis_p))
# Why not set `all_idx` to `pall`:
# - pbool is considered to be correct
# - pold contains hidden indices, but also might contain
# excess indices from before, i.e. if self.apply_manual_indices
# is called, self.manual is also updated. If however,
# self.manual is updated, self._man_root_ids are not updated.
# Thus, we trust pbool (self.manual) and only use pold
# (self._man_root_ids) to determine hidden indices.
all_idx = list(set(pbool + phid))
self._man_root_ids = sorted(all_idx)
return self._man_root_ids
|
python
|
def retrieve_manual_indices(self):
"""Read from self.manual
Read from the boolean array `self.manual`, index all
occurences of `False` and find the corresponding indices
in the root hierarchy parent, return those and store them
in `self._man_root_ids` as well.
Notes
-----
This method also retrieves hidden indices, i.e. events
that are not part of the current hierarchy child but
which have been manually excluded before and are now
hidden because a hierarchy parent filtered it out.
If `self.parent_changed` is `True`, i.e. the parent applied
a filter and the child did not yet hear about this, then
nothing is computed and `self._man_root_ids` as-is. This
is important, because the size of the current filter would
not match the size of the filtered events of the parent and
thus index-mapping would not work.
"""
if self.parent_changed:
# ignore
pass
else:
# indices from boolean array
pbool = map_indices_child2root(
child=self.rtdc_ds,
child_indices=np.where(~self.manual)[0]).tolist()
# retrieve all indices that are currently not visible
# previous indices
pold = self._man_root_ids
# all indices previously selected either via
# - self.manual or
# - self.apply_manual_indices
pall = sorted(list(set(pbool + pold)))
# visible indices (only available child indices are returned)
pvis_c = map_indices_root2child(child=self.rtdc_ds,
root_indices=pall).tolist()
# map visible child indices back to root indices
pvis_p = map_indices_child2root(child=self.rtdc_ds,
child_indices=pvis_c).tolist()
# hidden indices
phid = list(set(pall) - set(pvis_p))
# Why not set `all_idx` to `pall`:
# - pbool is considered to be correct
# - pold contains hidden indices, but also might contain
# excess indices from before, i.e. if self.apply_manual_indices
# is called, self.manual is also updated. If however,
# self.manual is updated, self._man_root_ids are not updated.
# Thus, we trust pbool (self.manual) and only use pold
# (self._man_root_ids) to determine hidden indices.
all_idx = list(set(pbool + phid))
self._man_root_ids = sorted(all_idx)
return self._man_root_ids
|
[
"def",
"retrieve_manual_indices",
"(",
"self",
")",
":",
"if",
"self",
".",
"parent_changed",
":",
"# ignore",
"pass",
"else",
":",
"# indices from boolean array",
"pbool",
"=",
"map_indices_child2root",
"(",
"child",
"=",
"self",
".",
"rtdc_ds",
",",
"child_indices",
"=",
"np",
".",
"where",
"(",
"~",
"self",
".",
"manual",
")",
"[",
"0",
"]",
")",
".",
"tolist",
"(",
")",
"# retrieve all indices that are currently not visible",
"# previous indices",
"pold",
"=",
"self",
".",
"_man_root_ids",
"# all indices previously selected either via",
"# - self.manual or",
"# - self.apply_manual_indices",
"pall",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
"pbool",
"+",
"pold",
")",
")",
")",
"# visible indices (only available child indices are returned)",
"pvis_c",
"=",
"map_indices_root2child",
"(",
"child",
"=",
"self",
".",
"rtdc_ds",
",",
"root_indices",
"=",
"pall",
")",
".",
"tolist",
"(",
")",
"# map visible child indices back to root indices",
"pvis_p",
"=",
"map_indices_child2root",
"(",
"child",
"=",
"self",
".",
"rtdc_ds",
",",
"child_indices",
"=",
"pvis_c",
")",
".",
"tolist",
"(",
")",
"# hidden indices",
"phid",
"=",
"list",
"(",
"set",
"(",
"pall",
")",
"-",
"set",
"(",
"pvis_p",
")",
")",
"# Why not set `all_idx` to `pall`:",
"# - pbool is considered to be correct",
"# - pold contains hidden indices, but also might contain",
"# excess indices from before, i.e. if self.apply_manual_indices",
"# is called, self.manual is also updated. If however,",
"# self.manual is updated, self._man_root_ids are not updated.",
"# Thus, we trust pbool (self.manual) and only use pold",
"# (self._man_root_ids) to determine hidden indices.",
"all_idx",
"=",
"list",
"(",
"set",
"(",
"pbool",
"+",
"phid",
")",
")",
"self",
".",
"_man_root_ids",
"=",
"sorted",
"(",
"all_idx",
")",
"return",
"self",
".",
"_man_root_ids"
] |
Read from self.manual
Read from the boolean array `self.manual`, index all
occurences of `False` and find the corresponding indices
in the root hierarchy parent, return those and store them
in `self._man_root_ids` as well.
Notes
-----
This method also retrieves hidden indices, i.e. events
that are not part of the current hierarchy child but
which have been manually excluded before and are now
hidden because a hierarchy parent filtered it out.
If `self.parent_changed` is `True`, i.e. the parent applied
a filter and the child did not yet hear about this, then
nothing is computed and `self._man_root_ids` as-is. This
is important, because the size of the current filter would
not match the size of the filtered events of the parent and
thus index-mapping would not work.
|
[
"Read",
"from",
"self",
".",
"manual"
] |
train
|
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hierarchy.py#L120-L175
|
ZELLMECHANIK-DRESDEN/dclab
|
dclab/rtdc_dataset/fmt_hierarchy.py
|
RTDC_Hierarchy.apply_filter
|
def apply_filter(self, *args, **kwargs):
"""Overridden `apply_filter` to perform tasks for hierarchy child"""
if self.filter is not None:
# make sure self.filter knows about root manual indices
self.filter.retrieve_manual_indices()
# Copy event data from hierarchy parent
self.hparent.apply_filter()
# update event index
event_count = np.sum(self.hparent._filter)
self._events = {}
self._events["index"] = np.arange(1, event_count + 1)
# set non-scalar column data
if "contour" in self.hparent:
self._events["contour"] = ChildContour(self)
if "image" in self.hparent:
self._events["image"] = ChildImage(self)
if "mask" in self.hparent:
self._events["mask"] = ChildMask(self)
if "trace" in self.hparent:
trdict = {}
for flname in dfn.FLUOR_TRACES:
if flname in self.hparent["trace"]:
trdict[flname] = ChildTrace(self, flname)
self._events["trace"] = trdict
# update config
self.config["experiment"]["event count"] = event_count
self._init_filters()
super(RTDC_Hierarchy, self).apply_filter(*args, **kwargs)
|
python
|
def apply_filter(self, *args, **kwargs):
"""Overridden `apply_filter` to perform tasks for hierarchy child"""
if self.filter is not None:
# make sure self.filter knows about root manual indices
self.filter.retrieve_manual_indices()
# Copy event data from hierarchy parent
self.hparent.apply_filter()
# update event index
event_count = np.sum(self.hparent._filter)
self._events = {}
self._events["index"] = np.arange(1, event_count + 1)
# set non-scalar column data
if "contour" in self.hparent:
self._events["contour"] = ChildContour(self)
if "image" in self.hparent:
self._events["image"] = ChildImage(self)
if "mask" in self.hparent:
self._events["mask"] = ChildMask(self)
if "trace" in self.hparent:
trdict = {}
for flname in dfn.FLUOR_TRACES:
if flname in self.hparent["trace"]:
trdict[flname] = ChildTrace(self, flname)
self._events["trace"] = trdict
# update config
self.config["experiment"]["event count"] = event_count
self._init_filters()
super(RTDC_Hierarchy, self).apply_filter(*args, **kwargs)
|
[
"def",
"apply_filter",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"filter",
"is",
"not",
"None",
":",
"# make sure self.filter knows about root manual indices",
"self",
".",
"filter",
".",
"retrieve_manual_indices",
"(",
")",
"# Copy event data from hierarchy parent",
"self",
".",
"hparent",
".",
"apply_filter",
"(",
")",
"# update event index",
"event_count",
"=",
"np",
".",
"sum",
"(",
"self",
".",
"hparent",
".",
"_filter",
")",
"self",
".",
"_events",
"=",
"{",
"}",
"self",
".",
"_events",
"[",
"\"index\"",
"]",
"=",
"np",
".",
"arange",
"(",
"1",
",",
"event_count",
"+",
"1",
")",
"# set non-scalar column data",
"if",
"\"contour\"",
"in",
"self",
".",
"hparent",
":",
"self",
".",
"_events",
"[",
"\"contour\"",
"]",
"=",
"ChildContour",
"(",
"self",
")",
"if",
"\"image\"",
"in",
"self",
".",
"hparent",
":",
"self",
".",
"_events",
"[",
"\"image\"",
"]",
"=",
"ChildImage",
"(",
"self",
")",
"if",
"\"mask\"",
"in",
"self",
".",
"hparent",
":",
"self",
".",
"_events",
"[",
"\"mask\"",
"]",
"=",
"ChildMask",
"(",
"self",
")",
"if",
"\"trace\"",
"in",
"self",
".",
"hparent",
":",
"trdict",
"=",
"{",
"}",
"for",
"flname",
"in",
"dfn",
".",
"FLUOR_TRACES",
":",
"if",
"flname",
"in",
"self",
".",
"hparent",
"[",
"\"trace\"",
"]",
":",
"trdict",
"[",
"flname",
"]",
"=",
"ChildTrace",
"(",
"self",
",",
"flname",
")",
"self",
".",
"_events",
"[",
"\"trace\"",
"]",
"=",
"trdict",
"# update config",
"self",
".",
"config",
"[",
"\"experiment\"",
"]",
"[",
"\"event count\"",
"]",
"=",
"event_count",
"self",
".",
"_init_filters",
"(",
")",
"super",
"(",
"RTDC_Hierarchy",
",",
"self",
")",
".",
"apply_filter",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Overridden `apply_filter` to perform tasks for hierarchy child
|
[
"Overridden",
"apply_filter",
"to",
"perform",
"tasks",
"for",
"hierarchy",
"child"
] |
train
|
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hierarchy.py#L269-L298
|
ZELLMECHANIK-DRESDEN/dclab
|
dclab/rtdc_dataset/fmt_hierarchy.py
|
RTDC_Hierarchy.hash
|
def hash(self):
"""Hashes of a hierarchy child changes if the parent changes"""
# Do not apply filters here (speed)
hph = self.hparent.hash
hpfilt = hashobj(self.hparent._filter)
dhash = hashobj(hph + hpfilt)
return dhash
|
python
|
def hash(self):
"""Hashes of a hierarchy child changes if the parent changes"""
# Do not apply filters here (speed)
hph = self.hparent.hash
hpfilt = hashobj(self.hparent._filter)
dhash = hashobj(hph + hpfilt)
return dhash
|
[
"def",
"hash",
"(",
"self",
")",
":",
"# Do not apply filters here (speed)",
"hph",
"=",
"self",
".",
"hparent",
".",
"hash",
"hpfilt",
"=",
"hashobj",
"(",
"self",
".",
"hparent",
".",
"_filter",
")",
"dhash",
"=",
"hashobj",
"(",
"hph",
"+",
"hpfilt",
")",
"return",
"dhash"
] |
Hashes of a hierarchy child changes if the parent changes
|
[
"Hashes",
"of",
"a",
"hierarchy",
"child",
"changes",
"if",
"the",
"parent",
"changes"
] |
train
|
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hierarchy.py#L301-L307
|
ZELLMECHANIK-DRESDEN/dclab
|
dclab/rtdc_dataset/fmt_tdms/event_image.py
|
ImageColumn.find_video_file
|
def find_video_file(rtdc_dataset):
"""Tries to find a video file that belongs to an RTDC dataset
Returns None if no video file is found.
"""
video = None
if rtdc_dataset._fdir.exists():
# Cell images (video)
videos = [v.name for v in rtdc_dataset._fdir.rglob("*.avi")]
# Filter videos according to measurement number
meas_id = rtdc_dataset._mid
videos = [v for v in videos if v.split("_")[0] == meas_id]
videos.sort()
if len(videos) != 0:
# Defaults to first avi file
video = videos[0]
# g/q video file names. q comes first.
for v in videos:
if v.endswith("imag.avi"):
video = v
break
# add this here, because fRT-DC measurements also contain
# videos ..._proc.avi
elif v.endswith("imaq.avi"):
video = v
break
if video is None:
return None
else:
return rtdc_dataset._fdir / video
|
python
|
def find_video_file(rtdc_dataset):
"""Tries to find a video file that belongs to an RTDC dataset
Returns None if no video file is found.
"""
video = None
if rtdc_dataset._fdir.exists():
# Cell images (video)
videos = [v.name for v in rtdc_dataset._fdir.rglob("*.avi")]
# Filter videos according to measurement number
meas_id = rtdc_dataset._mid
videos = [v for v in videos if v.split("_")[0] == meas_id]
videos.sort()
if len(videos) != 0:
# Defaults to first avi file
video = videos[0]
# g/q video file names. q comes first.
for v in videos:
if v.endswith("imag.avi"):
video = v
break
# add this here, because fRT-DC measurements also contain
# videos ..._proc.avi
elif v.endswith("imaq.avi"):
video = v
break
if video is None:
return None
else:
return rtdc_dataset._fdir / video
|
[
"def",
"find_video_file",
"(",
"rtdc_dataset",
")",
":",
"video",
"=",
"None",
"if",
"rtdc_dataset",
".",
"_fdir",
".",
"exists",
"(",
")",
":",
"# Cell images (video)",
"videos",
"=",
"[",
"v",
".",
"name",
"for",
"v",
"in",
"rtdc_dataset",
".",
"_fdir",
".",
"rglob",
"(",
"\"*.avi\"",
")",
"]",
"# Filter videos according to measurement number",
"meas_id",
"=",
"rtdc_dataset",
".",
"_mid",
"videos",
"=",
"[",
"v",
"for",
"v",
"in",
"videos",
"if",
"v",
".",
"split",
"(",
"\"_\"",
")",
"[",
"0",
"]",
"==",
"meas_id",
"]",
"videos",
".",
"sort",
"(",
")",
"if",
"len",
"(",
"videos",
")",
"!=",
"0",
":",
"# Defaults to first avi file",
"video",
"=",
"videos",
"[",
"0",
"]",
"# g/q video file names. q comes first.",
"for",
"v",
"in",
"videos",
":",
"if",
"v",
".",
"endswith",
"(",
"\"imag.avi\"",
")",
":",
"video",
"=",
"v",
"break",
"# add this here, because fRT-DC measurements also contain",
"# videos ..._proc.avi",
"elif",
"v",
".",
"endswith",
"(",
"\"imaq.avi\"",
")",
":",
"video",
"=",
"v",
"break",
"if",
"video",
"is",
"None",
":",
"return",
"None",
"else",
":",
"return",
"rtdc_dataset",
".",
"_fdir",
"/",
"video"
] |
Tries to find a video file that belongs to an RTDC dataset
Returns None if no video file is found.
|
[
"Tries",
"to",
"find",
"a",
"video",
"file",
"that",
"belongs",
"to",
"an",
"RTDC",
"dataset"
] |
train
|
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_tdms/event_image.py#L66-L95
|
ZELLMECHANIK-DRESDEN/dclab
|
dclab/rtdc_dataset/fmt_tdms/event_image.py
|
ImageMap._get_image_workaround_seek
|
def _get_image_workaround_seek(self, idx):
"""Same as __getitem__ but seek through the video beforehand
This is a workaround for an all-zero image returned by `imageio`.
"""
warnings.warn("imageio workaround used!")
cap = self.video_handle
mult = 50
for ii in range(idx//mult):
cap.get_data(ii*mult)
final = cap.get_data(idx)
return final
|
python
|
def _get_image_workaround_seek(self, idx):
"""Same as __getitem__ but seek through the video beforehand
This is a workaround for an all-zero image returned by `imageio`.
"""
warnings.warn("imageio workaround used!")
cap = self.video_handle
mult = 50
for ii in range(idx//mult):
cap.get_data(ii*mult)
final = cap.get_data(idx)
return final
|
[
"def",
"_get_image_workaround_seek",
"(",
"self",
",",
"idx",
")",
":",
"warnings",
".",
"warn",
"(",
"\"imageio workaround used!\"",
")",
"cap",
"=",
"self",
".",
"video_handle",
"mult",
"=",
"50",
"for",
"ii",
"in",
"range",
"(",
"idx",
"//",
"mult",
")",
":",
"cap",
".",
"get_data",
"(",
"ii",
"*",
"mult",
")",
"final",
"=",
"cap",
".",
"get_data",
"(",
"idx",
")",
"return",
"final"
] |
Same as __getitem__ but seek through the video beforehand
This is a workaround for an all-zero image returned by `imageio`.
|
[
"Same",
"as",
"__getitem__",
"but",
"seek",
"through",
"the",
"video",
"beforehand"
] |
train
|
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_tdms/event_image.py#L146-L157
|
mhostetter/nhl
|
nhl/list.py
|
List.select
|
def select(self, attr, default=None):
"""
Select a given attribute (or chain or attributes) from the objects within the
list.
Args:
attr (str): attributes to be selected (with initial `.` omitted)
default (any): value to return if given element in list doesn't contain
desired attribute
Returns:
nhl.List: list of selected attribute values
"""
return List([_select(item, attr, default) for item in self])
|
python
|
def select(self, attr, default=None):
"""
Select a given attribute (or chain or attributes) from the objects within the
list.
Args:
attr (str): attributes to be selected (with initial `.` omitted)
default (any): value to return if given element in list doesn't contain
desired attribute
Returns:
nhl.List: list of selected attribute values
"""
return List([_select(item, attr, default) for item in self])
|
[
"def",
"select",
"(",
"self",
",",
"attr",
",",
"default",
"=",
"None",
")",
":",
"return",
"List",
"(",
"[",
"_select",
"(",
"item",
",",
"attr",
",",
"default",
")",
"for",
"item",
"in",
"self",
"]",
")"
] |
Select a given attribute (or chain or attributes) from the objects within the
list.
Args:
attr (str): attributes to be selected (with initial `.` omitted)
default (any): value to return if given element in list doesn't contain
desired attribute
Returns:
nhl.List: list of selected attribute values
|
[
"Select",
"a",
"given",
"attribute",
"(",
"or",
"chain",
"or",
"attributes",
")",
"from",
"the",
"objects",
"within",
"the",
"list",
"."
] |
train
|
https://github.com/mhostetter/nhl/blob/32c91cc392826e9de728563d57ab527421734ee1/nhl/list.py#L44-L57
|
mhostetter/nhl
|
nhl/list.py
|
List.filter
|
def filter(self, attr, value, compare="=="):
"""
Filter list by a comparison of a given attribute (or chain or attributes).
Args:
attr (str): attributes to be compared (with initial `.` omitted)
value (any): value to compare attr against
compare (str): comparison type
"=": `attr == value`
"==": `attr == value`
">": `attr > value`
">=": `attr >= value`
"<": `attr < value`
"<=": `attr <= value`
"in": `value in attr`
"contains": `value in attr`
Returns:
nhl.List: reduced list with items that satisfy filter criterion
"""
return List([item for item in self if _filter(_select(item, attr), value, compare)])
|
python
|
def filter(self, attr, value, compare="=="):
"""
Filter list by a comparison of a given attribute (or chain or attributes).
Args:
attr (str): attributes to be compared (with initial `.` omitted)
value (any): value to compare attr against
compare (str): comparison type
"=": `attr == value`
"==": `attr == value`
">": `attr > value`
">=": `attr >= value`
"<": `attr < value`
"<=": `attr <= value`
"in": `value in attr`
"contains": `value in attr`
Returns:
nhl.List: reduced list with items that satisfy filter criterion
"""
return List([item for item in self if _filter(_select(item, attr), value, compare)])
|
[
"def",
"filter",
"(",
"self",
",",
"attr",
",",
"value",
",",
"compare",
"=",
"\"==\"",
")",
":",
"return",
"List",
"(",
"[",
"item",
"for",
"item",
"in",
"self",
"if",
"_filter",
"(",
"_select",
"(",
"item",
",",
"attr",
")",
",",
"value",
",",
"compare",
")",
"]",
")"
] |
Filter list by a comparison of a given attribute (or chain or attributes).
Args:
attr (str): attributes to be compared (with initial `.` omitted)
value (any): value to compare attr against
compare (str): comparison type
"=": `attr == value`
"==": `attr == value`
">": `attr > value`
">=": `attr >= value`
"<": `attr < value`
"<=": `attr <= value`
"in": `value in attr`
"contains": `value in attr`
Returns:
nhl.List: reduced list with items that satisfy filter criterion
|
[
"Filter",
"list",
"by",
"a",
"comparison",
"of",
"a",
"given",
"attribute",
"(",
"or",
"chain",
"or",
"attributes",
")",
"."
] |
train
|
https://github.com/mhostetter/nhl/blob/32c91cc392826e9de728563d57ab527421734ee1/nhl/list.py#L59-L79
|
robmcmullen/atrcopy
|
atrcopy/ataridos.py
|
AtariDosDiskImage.as_new_format
|
def as_new_format(self, format="ATR"):
""" Create a new disk image in the specified format
"""
first_data = len(self.header)
raw = self.rawdata[first_data:]
data = add_atr_header(raw)
newraw = SegmentData(data)
image = self.__class__(newraw)
return image
|
python
|
def as_new_format(self, format="ATR"):
""" Create a new disk image in the specified format
"""
first_data = len(self.header)
raw = self.rawdata[first_data:]
data = add_atr_header(raw)
newraw = SegmentData(data)
image = self.__class__(newraw)
return image
|
[
"def",
"as_new_format",
"(",
"self",
",",
"format",
"=",
"\"ATR\"",
")",
":",
"first_data",
"=",
"len",
"(",
"self",
".",
"header",
")",
"raw",
"=",
"self",
".",
"rawdata",
"[",
"first_data",
":",
"]",
"data",
"=",
"add_atr_header",
"(",
"raw",
")",
"newraw",
"=",
"SegmentData",
"(",
"data",
")",
"image",
"=",
"self",
".",
"__class__",
"(",
"newraw",
")",
"return",
"image"
] |
Create a new disk image in the specified format
|
[
"Create",
"a",
"new",
"disk",
"image",
"in",
"the",
"specified",
"format"
] |
train
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/ataridos.py#L489-L497
|
simse/pymitv
|
pymitv/tv.py
|
TV.set_source
|
def set_source(self, source):
"""Selects and saves source."""
route = Navigator(source=self.source).navigate_to_source(source)
# Save new source
self.source = source
return self._send_keystroke(route, wait=True)
|
python
|
def set_source(self, source):
"""Selects and saves source."""
route = Navigator(source=self.source).navigate_to_source(source)
# Save new source
self.source = source
return self._send_keystroke(route, wait=True)
|
[
"def",
"set_source",
"(",
"self",
",",
"source",
")",
":",
"route",
"=",
"Navigator",
"(",
"source",
"=",
"self",
".",
"source",
")",
".",
"navigate_to_source",
"(",
"source",
")",
"# Save new source\r",
"self",
".",
"source",
"=",
"source",
"return",
"self",
".",
"_send_keystroke",
"(",
"route",
",",
"wait",
"=",
"True",
")"
] |
Selects and saves source.
|
[
"Selects",
"and",
"saves",
"source",
"."
] |
train
|
https://github.com/simse/pymitv/blob/03213f591d70fbf90ba2b6af372e474c9bfb99f6/pymitv/tv.py#L100-L107
|
openstax/cnx-archive
|
cnxarchive/__init__.py
|
find_migrations_directory
|
def find_migrations_directory():
"""Finds and returns the location of the database migrations directory.
This function is used from a setuptools entry-point for db-migrator.
"""
here = os.path.abspath(os.path.dirname(__file__))
return os.path.join(here, 'sql/migrations')
|
python
|
def find_migrations_directory():
"""Finds and returns the location of the database migrations directory.
This function is used from a setuptools entry-point for db-migrator.
"""
here = os.path.abspath(os.path.dirname(__file__))
return os.path.join(here, 'sql/migrations')
|
[
"def",
"find_migrations_directory",
"(",
")",
":",
"here",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"here",
",",
"'sql/migrations'",
")"
] |
Finds and returns the location of the database migrations directory.
This function is used from a setuptools entry-point for db-migrator.
|
[
"Finds",
"and",
"returns",
"the",
"location",
"of",
"the",
"database",
"migrations",
"directory",
".",
"This",
"function",
"is",
"used",
"from",
"a",
"setuptools",
"entry",
"-",
"point",
"for",
"db",
"-",
"migrator",
"."
] |
train
|
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/__init__.py#L34-L39
|
openstax/cnx-archive
|
cnxarchive/__init__.py
|
declare_api_routes
|
def declare_api_routes(config):
"""Declare routes, with a custom pregenerator."""
# The pregenerator makes sure we can generate a path using
# request.route_path even if we don't have all the variables.
#
# For example, instead of having to do this:
# request.route_path('resource', hash=hash, ignore='')
# it's possible to do this:
# request.route_path('resource', hash=hash)
def pregenerator(path):
# find all the variables in the path
variables = [(s.split(':')[0], '') for s in path.split('{')[1:]]
def wrapper(request, elements, kwargs):
modified_kwargs = dict(variables)
modified_kwargs.update(kwargs)
return elements, modified_kwargs
return wrapper
def add_route(name, path, *args, **kwargs):
return config.add_route(name, path, *args,
pregenerator=pregenerator(path), **kwargs)
add_route('content', '/contents/{ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)}{separator:(:?)}{page_ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)?}{ignore:(/[^/.]*?/?)?}{ext:([.](html|json))?}') # noqa cnxarchive.views:get_content
add_route('resource', '/resources/{hash}{ignore:(/.*)?}') # noqa cnxarchive.views:get_resource
add_route('export', '/exports/{ident_hash}.{type}{ignore:(/.*)?}') # noqa cnxarchive.views:get_export
add_route('extras', '/extras{key:(/(featured|messages|licenses|subjects|languages))?}') # noqa cnxarchive.views:extras
add_route('content-extras', '/extras/{ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)}{separator:(:?)}{page_ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)?}') # noqa cnxarchive.views:get_extra
add_route('search', '/search') # cnxarchive.views:search
add_route('in-book-search', '/search/{ident_hash:([^:/]+)}') # noqa cnxarchive.views:in-book-search
add_route('in-book-search-page', '/search/{ident_hash:([^:/]+)}:{page_ident_hash}') # noqa cnxarchive.views:in_book_search_highlighted_results
add_route('sitemap-index', '/sitemap_index.xml') # noqa cnxarchive.views:sitemap
add_route('sitemap', '/sitemap-{from_id}.xml') # noqa cnxarchive.views:sitemap
add_route('robots', '/robots.txt') # noqa cnxarchive.views:robots
add_route('legacy-redirect', '/content/{objid}{ignore:(/)?}') # noqa cnxarchive.views:redirect_legacy_content
add_route('legacy-redirect-latest', '/content/{objid}/latest{ignore:(/)?}{filename:(.+)?}') # noqa cnxarchive.views:redirect_legacy_content
add_route('legacy-redirect-w-version', '/content/{objid}/{objver}{ignore:(/)?}{filename:(.+)?}') # noqa cnxarchive.views:redirect_legacy_content
add_route('recent', '/feeds/recent.rss') # cnxarchive.views:recent
add_route('oai', '/feeds/oai') # cnxarchive.views:oai
add_route('xpath', '/xpath.html') # cnxarchive.views.xpath
add_route('xpath-json', '/xpath.json')
|
python
|
def declare_api_routes(config):
"""Declare routes, with a custom pregenerator."""
# The pregenerator makes sure we can generate a path using
# request.route_path even if we don't have all the variables.
#
# For example, instead of having to do this:
# request.route_path('resource', hash=hash, ignore='')
# it's possible to do this:
# request.route_path('resource', hash=hash)
def pregenerator(path):
# find all the variables in the path
variables = [(s.split(':')[0], '') for s in path.split('{')[1:]]
def wrapper(request, elements, kwargs):
modified_kwargs = dict(variables)
modified_kwargs.update(kwargs)
return elements, modified_kwargs
return wrapper
def add_route(name, path, *args, **kwargs):
return config.add_route(name, path, *args,
pregenerator=pregenerator(path), **kwargs)
add_route('content', '/contents/{ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)}{separator:(:?)}{page_ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)?}{ignore:(/[^/.]*?/?)?}{ext:([.](html|json))?}') # noqa cnxarchive.views:get_content
add_route('resource', '/resources/{hash}{ignore:(/.*)?}') # noqa cnxarchive.views:get_resource
add_route('export', '/exports/{ident_hash}.{type}{ignore:(/.*)?}') # noqa cnxarchive.views:get_export
add_route('extras', '/extras{key:(/(featured|messages|licenses|subjects|languages))?}') # noqa cnxarchive.views:extras
add_route('content-extras', '/extras/{ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)}{separator:(:?)}{page_ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)?}') # noqa cnxarchive.views:get_extra
add_route('search', '/search') # cnxarchive.views:search
add_route('in-book-search', '/search/{ident_hash:([^:/]+)}') # noqa cnxarchive.views:in-book-search
add_route('in-book-search-page', '/search/{ident_hash:([^:/]+)}:{page_ident_hash}') # noqa cnxarchive.views:in_book_search_highlighted_results
add_route('sitemap-index', '/sitemap_index.xml') # noqa cnxarchive.views:sitemap
add_route('sitemap', '/sitemap-{from_id}.xml') # noqa cnxarchive.views:sitemap
add_route('robots', '/robots.txt') # noqa cnxarchive.views:robots
add_route('legacy-redirect', '/content/{objid}{ignore:(/)?}') # noqa cnxarchive.views:redirect_legacy_content
add_route('legacy-redirect-latest', '/content/{objid}/latest{ignore:(/)?}{filename:(.+)?}') # noqa cnxarchive.views:redirect_legacy_content
add_route('legacy-redirect-w-version', '/content/{objid}/{objver}{ignore:(/)?}{filename:(.+)?}') # noqa cnxarchive.views:redirect_legacy_content
add_route('recent', '/feeds/recent.rss') # cnxarchive.views:recent
add_route('oai', '/feeds/oai') # cnxarchive.views:oai
add_route('xpath', '/xpath.html') # cnxarchive.views.xpath
add_route('xpath-json', '/xpath.json')
|
[
"def",
"declare_api_routes",
"(",
"config",
")",
":",
"# The pregenerator makes sure we can generate a path using",
"# request.route_path even if we don't have all the variables.",
"#",
"# For example, instead of having to do this:",
"# request.route_path('resource', hash=hash, ignore='')",
"# it's possible to do this:",
"# request.route_path('resource', hash=hash)",
"def",
"pregenerator",
"(",
"path",
")",
":",
"# find all the variables in the path",
"variables",
"=",
"[",
"(",
"s",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
",",
"''",
")",
"for",
"s",
"in",
"path",
".",
"split",
"(",
"'{'",
")",
"[",
"1",
":",
"]",
"]",
"def",
"wrapper",
"(",
"request",
",",
"elements",
",",
"kwargs",
")",
":",
"modified_kwargs",
"=",
"dict",
"(",
"variables",
")",
"modified_kwargs",
".",
"update",
"(",
"kwargs",
")",
"return",
"elements",
",",
"modified_kwargs",
"return",
"wrapper",
"def",
"add_route",
"(",
"name",
",",
"path",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"config",
".",
"add_route",
"(",
"name",
",",
"path",
",",
"*",
"args",
",",
"pregenerator",
"=",
"pregenerator",
"(",
"path",
")",
",",
"*",
"*",
"kwargs",
")",
"add_route",
"(",
"'content'",
",",
"'/contents/{ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)}{separator:(:?)}{page_ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)?}{ignore:(/[^/.]*?/?)?}{ext:([.](html|json))?}'",
")",
"# noqa cnxarchive.views:get_content",
"add_route",
"(",
"'resource'",
",",
"'/resources/{hash}{ignore:(/.*)?}'",
")",
"# noqa cnxarchive.views:get_resource",
"add_route",
"(",
"'export'",
",",
"'/exports/{ident_hash}.{type}{ignore:(/.*)?}'",
")",
"# noqa cnxarchive.views:get_export",
"add_route",
"(",
"'extras'",
",",
"'/extras{key:(/(featured|messages|licenses|subjects|languages))?}'",
")",
"# noqa cnxarchive.views:extras",
"add_route",
"(",
"'content-extras'",
",",
"'/extras/{ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)}{separator:(:?)}{page_ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)?}'",
")",
"# noqa cnxarchive.views:get_extra",
"add_route",
"(",
"'search'",
",",
"'/search'",
")",
"# cnxarchive.views:search",
"add_route",
"(",
"'in-book-search'",
",",
"'/search/{ident_hash:([^:/]+)}'",
")",
"# noqa cnxarchive.views:in-book-search",
"add_route",
"(",
"'in-book-search-page'",
",",
"'/search/{ident_hash:([^:/]+)}:{page_ident_hash}'",
")",
"# noqa cnxarchive.views:in_book_search_highlighted_results",
"add_route",
"(",
"'sitemap-index'",
",",
"'/sitemap_index.xml'",
")",
"# noqa cnxarchive.views:sitemap",
"add_route",
"(",
"'sitemap'",
",",
"'/sitemap-{from_id}.xml'",
")",
"# noqa cnxarchive.views:sitemap",
"add_route",
"(",
"'robots'",
",",
"'/robots.txt'",
")",
"# noqa cnxarchive.views:robots",
"add_route",
"(",
"'legacy-redirect'",
",",
"'/content/{objid}{ignore:(/)?}'",
")",
"# noqa cnxarchive.views:redirect_legacy_content",
"add_route",
"(",
"'legacy-redirect-latest'",
",",
"'/content/{objid}/latest{ignore:(/)?}{filename:(.+)?}'",
")",
"# noqa cnxarchive.views:redirect_legacy_content",
"add_route",
"(",
"'legacy-redirect-w-version'",
",",
"'/content/{objid}/{objver}{ignore:(/)?}{filename:(.+)?}'",
")",
"# noqa cnxarchive.views:redirect_legacy_content",
"add_route",
"(",
"'recent'",
",",
"'/feeds/recent.rss'",
")",
"# cnxarchive.views:recent",
"add_route",
"(",
"'oai'",
",",
"'/feeds/oai'",
")",
"# cnxarchive.views:oai",
"add_route",
"(",
"'xpath'",
",",
"'/xpath.html'",
")",
"# cnxarchive.views.xpath",
"add_route",
"(",
"'xpath-json'",
",",
"'/xpath.json'",
")"
] |
Declare routes, with a custom pregenerator.
|
[
"Declare",
"routes",
"with",
"a",
"custom",
"pregenerator",
"."
] |
train
|
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/__init__.py#L42-L82
|
openstax/cnx-archive
|
cnxarchive/__init__.py
|
declare_type_info
|
def declare_type_info(config):
"""Lookup type info from app configuration."""
settings = config.registry.settings
settings['_type_info'] = []
for line in settings['exports-allowable-types'].splitlines():
if not line.strip():
continue
type_name, type_info = line.strip().split(':', 1)
type_info = type_info.split(',', 3)
settings['_type_info'].append((type_name, {
'type_name': type_name,
'file_extension': type_info[0],
'mimetype': type_info[1],
'user_friendly_name': type_info[2],
'description': type_info[3],
}))
|
python
|
def declare_type_info(config):
"""Lookup type info from app configuration."""
settings = config.registry.settings
settings['_type_info'] = []
for line in settings['exports-allowable-types'].splitlines():
if not line.strip():
continue
type_name, type_info = line.strip().split(':', 1)
type_info = type_info.split(',', 3)
settings['_type_info'].append((type_name, {
'type_name': type_name,
'file_extension': type_info[0],
'mimetype': type_info[1],
'user_friendly_name': type_info[2],
'description': type_info[3],
}))
|
[
"def",
"declare_type_info",
"(",
"config",
")",
":",
"settings",
"=",
"config",
".",
"registry",
".",
"settings",
"settings",
"[",
"'_type_info'",
"]",
"=",
"[",
"]",
"for",
"line",
"in",
"settings",
"[",
"'exports-allowable-types'",
"]",
".",
"splitlines",
"(",
")",
":",
"if",
"not",
"line",
".",
"strip",
"(",
")",
":",
"continue",
"type_name",
",",
"type_info",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"':'",
",",
"1",
")",
"type_info",
"=",
"type_info",
".",
"split",
"(",
"','",
",",
"3",
")",
"settings",
"[",
"'_type_info'",
"]",
".",
"append",
"(",
"(",
"type_name",
",",
"{",
"'type_name'",
":",
"type_name",
",",
"'file_extension'",
":",
"type_info",
"[",
"0",
"]",
",",
"'mimetype'",
":",
"type_info",
"[",
"1",
"]",
",",
"'user_friendly_name'",
":",
"type_info",
"[",
"2",
"]",
",",
"'description'",
":",
"type_info",
"[",
"3",
"]",
",",
"}",
")",
")"
] |
Lookup type info from app configuration.
|
[
"Lookup",
"type",
"info",
"from",
"app",
"configuration",
"."
] |
train
|
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/__init__.py#L85-L100
|
openstax/cnx-archive
|
cnxarchive/__init__.py
|
main
|
def main(global_config, **settings):
"""Main WSGI application factory."""
initialize_sentry_integration()
config = Configurator(settings=settings)
declare_api_routes(config)
declare_type_info(config)
# allowing the pyramid templates to render rss and xml
config.include('pyramid_jinja2')
config.add_jinja2_renderer('.rss')
config.add_jinja2_renderer('.xml')
mandatory_settings = ['exports-directories', 'exports-allowable-types']
for setting in mandatory_settings:
if not settings.get(setting, None):
raise ValueError('Missing {} config setting.'.format(setting))
config.scan(ignore='.tests')
config.include('cnxarchive.events.main')
config.add_tween('cnxarchive.tweens.conditional_http_tween_factory')
return config.make_wsgi_app()
|
python
|
def main(global_config, **settings):
"""Main WSGI application factory."""
initialize_sentry_integration()
config = Configurator(settings=settings)
declare_api_routes(config)
declare_type_info(config)
# allowing the pyramid templates to render rss and xml
config.include('pyramid_jinja2')
config.add_jinja2_renderer('.rss')
config.add_jinja2_renderer('.xml')
mandatory_settings = ['exports-directories', 'exports-allowable-types']
for setting in mandatory_settings:
if not settings.get(setting, None):
raise ValueError('Missing {} config setting.'.format(setting))
config.scan(ignore='.tests')
config.include('cnxarchive.events.main')
config.add_tween('cnxarchive.tweens.conditional_http_tween_factory')
return config.make_wsgi_app()
|
[
"def",
"main",
"(",
"global_config",
",",
"*",
"*",
"settings",
")",
":",
"initialize_sentry_integration",
"(",
")",
"config",
"=",
"Configurator",
"(",
"settings",
"=",
"settings",
")",
"declare_api_routes",
"(",
"config",
")",
"declare_type_info",
"(",
"config",
")",
"# allowing the pyramid templates to render rss and xml",
"config",
".",
"include",
"(",
"'pyramid_jinja2'",
")",
"config",
".",
"add_jinja2_renderer",
"(",
"'.rss'",
")",
"config",
".",
"add_jinja2_renderer",
"(",
"'.xml'",
")",
"mandatory_settings",
"=",
"[",
"'exports-directories'",
",",
"'exports-allowable-types'",
"]",
"for",
"setting",
"in",
"mandatory_settings",
":",
"if",
"not",
"settings",
".",
"get",
"(",
"setting",
",",
"None",
")",
":",
"raise",
"ValueError",
"(",
"'Missing {} config setting.'",
".",
"format",
"(",
"setting",
")",
")",
"config",
".",
"scan",
"(",
"ignore",
"=",
"'.tests'",
")",
"config",
".",
"include",
"(",
"'cnxarchive.events.main'",
")",
"config",
".",
"add_tween",
"(",
"'cnxarchive.tweens.conditional_http_tween_factory'",
")",
"return",
"config",
".",
"make_wsgi_app",
"(",
")"
] |
Main WSGI application factory.
|
[
"Main",
"WSGI",
"application",
"factory",
"."
] |
train
|
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/__init__.py#L137-L159
|
lucasmaystre/choix
|
choix/lsr.py
|
_init_lsr
|
def _init_lsr(n_items, alpha, initial_params):
"""Initialize the LSR Markov chain and the weights."""
if initial_params is None:
weights = np.ones(n_items)
else:
weights = exp_transform(initial_params)
chain = alpha * np.ones((n_items, n_items), dtype=float)
return weights, chain
|
python
|
def _init_lsr(n_items, alpha, initial_params):
"""Initialize the LSR Markov chain and the weights."""
if initial_params is None:
weights = np.ones(n_items)
else:
weights = exp_transform(initial_params)
chain = alpha * np.ones((n_items, n_items), dtype=float)
return weights, chain
|
[
"def",
"_init_lsr",
"(",
"n_items",
",",
"alpha",
",",
"initial_params",
")",
":",
"if",
"initial_params",
"is",
"None",
":",
"weights",
"=",
"np",
".",
"ones",
"(",
"n_items",
")",
"else",
":",
"weights",
"=",
"exp_transform",
"(",
"initial_params",
")",
"chain",
"=",
"alpha",
"*",
"np",
".",
"ones",
"(",
"(",
"n_items",
",",
"n_items",
")",
",",
"dtype",
"=",
"float",
")",
"return",
"weights",
",",
"chain"
] |
Initialize the LSR Markov chain and the weights.
|
[
"Initialize",
"the",
"LSR",
"Markov",
"chain",
"and",
"the",
"weights",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/lsr.py#L10-L17
|
lucasmaystre/choix
|
choix/lsr.py
|
_ilsr
|
def _ilsr(fun, params, max_iter, tol):
"""Iteratively refine LSR estimates until convergence.
Raises
------
RuntimeError
If the algorithm does not converge after ``max_iter`` iterations.
"""
converged = NormOfDifferenceTest(tol, order=1)
for _ in range(max_iter):
params = fun(initial_params=params)
if converged(params):
return params
raise RuntimeError("Did not converge after {} iterations".format(max_iter))
|
python
|
def _ilsr(fun, params, max_iter, tol):
"""Iteratively refine LSR estimates until convergence.
Raises
------
RuntimeError
If the algorithm does not converge after ``max_iter`` iterations.
"""
converged = NormOfDifferenceTest(tol, order=1)
for _ in range(max_iter):
params = fun(initial_params=params)
if converged(params):
return params
raise RuntimeError("Did not converge after {} iterations".format(max_iter))
|
[
"def",
"_ilsr",
"(",
"fun",
",",
"params",
",",
"max_iter",
",",
"tol",
")",
":",
"converged",
"=",
"NormOfDifferenceTest",
"(",
"tol",
",",
"order",
"=",
"1",
")",
"for",
"_",
"in",
"range",
"(",
"max_iter",
")",
":",
"params",
"=",
"fun",
"(",
"initial_params",
"=",
"params",
")",
"if",
"converged",
"(",
"params",
")",
":",
"return",
"params",
"raise",
"RuntimeError",
"(",
"\"Did not converge after {} iterations\"",
".",
"format",
"(",
"max_iter",
")",
")"
] |
Iteratively refine LSR estimates until convergence.
Raises
------
RuntimeError
If the algorithm does not converge after ``max_iter`` iterations.
|
[
"Iteratively",
"refine",
"LSR",
"estimates",
"until",
"convergence",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/lsr.py#L20-L33
|
lucasmaystre/choix
|
choix/lsr.py
|
lsr_pairwise
|
def lsr_pairwise(n_items, data, alpha=0.0, initial_params=None):
"""Compute the LSR estimate of model parameters.
This function implements the Luce Spectral Ranking inference algorithm
[MG15]_ for pairwise-comparison data (see :ref:`data-pairwise`).
The argument ``initial_params`` can be used to iteratively refine an
existing parameter estimate (see the implementation of
:func:`~choix.ilsr_pairwise` for an idea on how this works). If it is set
to `None` (the default), the all-ones vector is used.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to build the transition rates of the LSR Markov chain.
Returns
-------
params : numpy.ndarray
An estimate of model parameters.
"""
weights, chain = _init_lsr(n_items, alpha, initial_params)
for winner, loser in data:
chain[loser, winner] += 1 / (weights[winner] + weights[loser])
chain -= np.diag(chain.sum(axis=1))
return log_transform(statdist(chain))
|
python
|
def lsr_pairwise(n_items, data, alpha=0.0, initial_params=None):
"""Compute the LSR estimate of model parameters.
This function implements the Luce Spectral Ranking inference algorithm
[MG15]_ for pairwise-comparison data (see :ref:`data-pairwise`).
The argument ``initial_params`` can be used to iteratively refine an
existing parameter estimate (see the implementation of
:func:`~choix.ilsr_pairwise` for an idea on how this works). If it is set
to `None` (the default), the all-ones vector is used.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to build the transition rates of the LSR Markov chain.
Returns
-------
params : numpy.ndarray
An estimate of model parameters.
"""
weights, chain = _init_lsr(n_items, alpha, initial_params)
for winner, loser in data:
chain[loser, winner] += 1 / (weights[winner] + weights[loser])
chain -= np.diag(chain.sum(axis=1))
return log_transform(statdist(chain))
|
[
"def",
"lsr_pairwise",
"(",
"n_items",
",",
"data",
",",
"alpha",
"=",
"0.0",
",",
"initial_params",
"=",
"None",
")",
":",
"weights",
",",
"chain",
"=",
"_init_lsr",
"(",
"n_items",
",",
"alpha",
",",
"initial_params",
")",
"for",
"winner",
",",
"loser",
"in",
"data",
":",
"chain",
"[",
"loser",
",",
"winner",
"]",
"+=",
"1",
"/",
"(",
"weights",
"[",
"winner",
"]",
"+",
"weights",
"[",
"loser",
"]",
")",
"chain",
"-=",
"np",
".",
"diag",
"(",
"chain",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
")",
"return",
"log_transform",
"(",
"statdist",
"(",
"chain",
")",
")"
] |
Compute the LSR estimate of model parameters.
This function implements the Luce Spectral Ranking inference algorithm
[MG15]_ for pairwise-comparison data (see :ref:`data-pairwise`).
The argument ``initial_params`` can be used to iteratively refine an
existing parameter estimate (see the implementation of
:func:`~choix.ilsr_pairwise` for an idea on how this works). If it is set
to `None` (the default), the all-ones vector is used.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to build the transition rates of the LSR Markov chain.
Returns
-------
params : numpy.ndarray
An estimate of model parameters.
|
[
"Compute",
"the",
"LSR",
"estimate",
"of",
"model",
"parameters",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/lsr.py#L36-L71
|
lucasmaystre/choix
|
choix/lsr.py
|
ilsr_pairwise
|
def ilsr_pairwise(
n_items, data, alpha=0.0, initial_params=None, max_iter=100, tol=1e-8):
"""Compute the ML estimate of model parameters using I-LSR.
This function computes the maximum-likelihood (ML) estimate of model
parameters given pairwise-comparison data (see :ref:`data-pairwise`), using
the iterative Luce Spectral Ranking algorithm [MG15]_.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
fun = functools.partial(
lsr_pairwise, n_items=n_items, data=data, alpha=alpha)
return _ilsr(fun, initial_params, max_iter, tol)
|
python
|
def ilsr_pairwise(
n_items, data, alpha=0.0, initial_params=None, max_iter=100, tol=1e-8):
"""Compute the ML estimate of model parameters using I-LSR.
This function computes the maximum-likelihood (ML) estimate of model
parameters given pairwise-comparison data (see :ref:`data-pairwise`), using
the iterative Luce Spectral Ranking algorithm [MG15]_.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
fun = functools.partial(
lsr_pairwise, n_items=n_items, data=data, alpha=alpha)
return _ilsr(fun, initial_params, max_iter, tol)
|
[
"def",
"ilsr_pairwise",
"(",
"n_items",
",",
"data",
",",
"alpha",
"=",
"0.0",
",",
"initial_params",
"=",
"None",
",",
"max_iter",
"=",
"100",
",",
"tol",
"=",
"1e-8",
")",
":",
"fun",
"=",
"functools",
".",
"partial",
"(",
"lsr_pairwise",
",",
"n_items",
"=",
"n_items",
",",
"data",
"=",
"data",
",",
"alpha",
"=",
"alpha",
")",
"return",
"_ilsr",
"(",
"fun",
",",
"initial_params",
",",
"max_iter",
",",
"tol",
")"
] |
Compute the ML estimate of model parameters using I-LSR.
This function computes the maximum-likelihood (ML) estimate of model
parameters given pairwise-comparison data (see :ref:`data-pairwise`), using
the iterative Luce Spectral Ranking algorithm [MG15]_.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
|
[
"Compute",
"the",
"ML",
"estimate",
"of",
"model",
"parameters",
"using",
"I",
"-",
"LSR",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/lsr.py#L74-L109
|
lucasmaystre/choix
|
choix/lsr.py
|
lsr_pairwise_dense
|
def lsr_pairwise_dense(comp_mat, alpha=0.0, initial_params=None):
"""Compute the LSR estimate of model parameters given dense data.
This function implements the Luce Spectral Ranking inference algorithm
[MG15]_ for dense pairwise-comparison data.
The data is described by a pairwise-comparison matrix ``comp_mat`` such
that ``comp_mat[i,j]`` contains the number of times that item ``i`` wins
against item ``j``.
In comparison to :func:`~choix.lsr_pairwise`, this function is particularly
efficient for dense pairwise-comparison datasets (i.e., containing many
comparisons for a large fraction of item pairs).
The argument ``initial_params`` can be used to iteratively refine an
existing parameter estimate (see the implementation of
:func:`~choix.ilsr_pairwise` for an idea on how this works). If it is set
to `None` (the default), the all-ones vector is used.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
comp_mat : np.array
2D square matrix describing the pairwise-comparison outcomes.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to build the transition rates of the LSR Markov chain.
Returns
-------
params : np.array
An estimate of model parameters.
"""
n_items = comp_mat.shape[0]
ws, chain = _init_lsr(n_items, alpha, initial_params)
denom = np.tile(ws, (n_items, 1))
chain += comp_mat.T / (denom + denom.T)
chain -= np.diag(chain.sum(axis=1))
return log_transform(statdist(chain))
|
python
|
def lsr_pairwise_dense(comp_mat, alpha=0.0, initial_params=None):
"""Compute the LSR estimate of model parameters given dense data.
This function implements the Luce Spectral Ranking inference algorithm
[MG15]_ for dense pairwise-comparison data.
The data is described by a pairwise-comparison matrix ``comp_mat`` such
that ``comp_mat[i,j]`` contains the number of times that item ``i`` wins
against item ``j``.
In comparison to :func:`~choix.lsr_pairwise`, this function is particularly
efficient for dense pairwise-comparison datasets (i.e., containing many
comparisons for a large fraction of item pairs).
The argument ``initial_params`` can be used to iteratively refine an
existing parameter estimate (see the implementation of
:func:`~choix.ilsr_pairwise` for an idea on how this works). If it is set
to `None` (the default), the all-ones vector is used.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
comp_mat : np.array
2D square matrix describing the pairwise-comparison outcomes.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to build the transition rates of the LSR Markov chain.
Returns
-------
params : np.array
An estimate of model parameters.
"""
n_items = comp_mat.shape[0]
ws, chain = _init_lsr(n_items, alpha, initial_params)
denom = np.tile(ws, (n_items, 1))
chain += comp_mat.T / (denom + denom.T)
chain -= np.diag(chain.sum(axis=1))
return log_transform(statdist(chain))
|
[
"def",
"lsr_pairwise_dense",
"(",
"comp_mat",
",",
"alpha",
"=",
"0.0",
",",
"initial_params",
"=",
"None",
")",
":",
"n_items",
"=",
"comp_mat",
".",
"shape",
"[",
"0",
"]",
"ws",
",",
"chain",
"=",
"_init_lsr",
"(",
"n_items",
",",
"alpha",
",",
"initial_params",
")",
"denom",
"=",
"np",
".",
"tile",
"(",
"ws",
",",
"(",
"n_items",
",",
"1",
")",
")",
"chain",
"+=",
"comp_mat",
".",
"T",
"/",
"(",
"denom",
"+",
"denom",
".",
"T",
")",
"chain",
"-=",
"np",
".",
"diag",
"(",
"chain",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
")",
"return",
"log_transform",
"(",
"statdist",
"(",
"chain",
")",
")"
] |
Compute the LSR estimate of model parameters given dense data.
This function implements the Luce Spectral Ranking inference algorithm
[MG15]_ for dense pairwise-comparison data.
The data is described by a pairwise-comparison matrix ``comp_mat`` such
that ``comp_mat[i,j]`` contains the number of times that item ``i`` wins
against item ``j``.
In comparison to :func:`~choix.lsr_pairwise`, this function is particularly
efficient for dense pairwise-comparison datasets (i.e., containing many
comparisons for a large fraction of item pairs).
The argument ``initial_params`` can be used to iteratively refine an
existing parameter estimate (see the implementation of
:func:`~choix.ilsr_pairwise` for an idea on how this works). If it is set
to `None` (the default), the all-ones vector is used.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
comp_mat : np.array
2D square matrix describing the pairwise-comparison outcomes.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to build the transition rates of the LSR Markov chain.
Returns
-------
params : np.array
An estimate of model parameters.
|
[
"Compute",
"the",
"LSR",
"estimate",
"of",
"model",
"parameters",
"given",
"dense",
"data",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/lsr.py#L112-L154
|
lucasmaystre/choix
|
choix/lsr.py
|
ilsr_pairwise_dense
|
def ilsr_pairwise_dense(
comp_mat, alpha=0.0, initial_params=None, max_iter=100, tol=1e-8):
"""Compute the ML estimate of model parameters given dense data.
This function computes the maximum-likelihood (ML) estimate of model
parameters given dense pairwise-comparison data.
The data is described by a pairwise-comparison matrix ``comp_mat`` such
that ``comp_mat[i,j]`` contains the number of times that item ``i`` wins
against item ``j``.
In comparison to :func:`~choix.ilsr_pairwise`, this function is
particularly efficient for dense pairwise-comparison datasets (i.e.,
containing many comparisons for a large fraction of item pairs).
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
comp_mat : np.array
2D square matrix describing the pairwise-comparison outcomes.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
fun = functools.partial(
lsr_pairwise_dense, comp_mat=comp_mat, alpha=alpha)
return _ilsr(fun, initial_params, max_iter, tol)
|
python
|
def ilsr_pairwise_dense(
comp_mat, alpha=0.0, initial_params=None, max_iter=100, tol=1e-8):
"""Compute the ML estimate of model parameters given dense data.
This function computes the maximum-likelihood (ML) estimate of model
parameters given dense pairwise-comparison data.
The data is described by a pairwise-comparison matrix ``comp_mat`` such
that ``comp_mat[i,j]`` contains the number of times that item ``i`` wins
against item ``j``.
In comparison to :func:`~choix.ilsr_pairwise`, this function is
particularly efficient for dense pairwise-comparison datasets (i.e.,
containing many comparisons for a large fraction of item pairs).
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
comp_mat : np.array
2D square matrix describing the pairwise-comparison outcomes.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
fun = functools.partial(
lsr_pairwise_dense, comp_mat=comp_mat, alpha=alpha)
return _ilsr(fun, initial_params, max_iter, tol)
|
[
"def",
"ilsr_pairwise_dense",
"(",
"comp_mat",
",",
"alpha",
"=",
"0.0",
",",
"initial_params",
"=",
"None",
",",
"max_iter",
"=",
"100",
",",
"tol",
"=",
"1e-8",
")",
":",
"fun",
"=",
"functools",
".",
"partial",
"(",
"lsr_pairwise_dense",
",",
"comp_mat",
"=",
"comp_mat",
",",
"alpha",
"=",
"alpha",
")",
"return",
"_ilsr",
"(",
"fun",
",",
"initial_params",
",",
"max_iter",
",",
"tol",
")"
] |
Compute the ML estimate of model parameters given dense data.
This function computes the maximum-likelihood (ML) estimate of model
parameters given dense pairwise-comparison data.
The data is described by a pairwise-comparison matrix ``comp_mat`` such
that ``comp_mat[i,j]`` contains the number of times that item ``i`` wins
against item ``j``.
In comparison to :func:`~choix.ilsr_pairwise`, this function is
particularly efficient for dense pairwise-comparison datasets (i.e.,
containing many comparisons for a large fraction of item pairs).
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
comp_mat : np.array
2D square matrix describing the pairwise-comparison outcomes.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
|
[
"Compute",
"the",
"ML",
"estimate",
"of",
"model",
"parameters",
"given",
"dense",
"data",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/lsr.py#L157-L197
|
lucasmaystre/choix
|
choix/lsr.py
|
rank_centrality
|
def rank_centrality(n_items, data, alpha=0.0):
"""Compute the Rank Centrality estimate of model parameters.
This function implements Negahban et al.'s Rank Centrality algorithm
[NOS12]_. The algorithm is similar to :func:`~choix.ilsr_pairwise`, but
considers the *ratio* of wins for each pair (instead of the total count).
The transition rates of the Rank Centrality Markov chain are initialized
with ``alpha``. When ``alpha > 0``, this corresponds to a form of
regularization (see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float, optional
Regularization parameter.
Returns
-------
params : numpy.ndarray
An estimate of model parameters.
"""
_, chain = _init_lsr(n_items, alpha, None)
for winner, loser in data:
chain[loser, winner] += 1.0
# Transform the counts into ratios.
idx = chain > 0 # Indices (i,j) of non-zero entries.
chain[idx] = chain[idx] / (chain + chain.T)[idx]
# Finalize the Markov chain by adding the self-transition rate.
chain -= np.diag(chain.sum(axis=1))
return log_transform(statdist(chain))
|
python
|
def rank_centrality(n_items, data, alpha=0.0):
"""Compute the Rank Centrality estimate of model parameters.
This function implements Negahban et al.'s Rank Centrality algorithm
[NOS12]_. The algorithm is similar to :func:`~choix.ilsr_pairwise`, but
considers the *ratio* of wins for each pair (instead of the total count).
The transition rates of the Rank Centrality Markov chain are initialized
with ``alpha``. When ``alpha > 0``, this corresponds to a form of
regularization (see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float, optional
Regularization parameter.
Returns
-------
params : numpy.ndarray
An estimate of model parameters.
"""
_, chain = _init_lsr(n_items, alpha, None)
for winner, loser in data:
chain[loser, winner] += 1.0
# Transform the counts into ratios.
idx = chain > 0 # Indices (i,j) of non-zero entries.
chain[idx] = chain[idx] / (chain + chain.T)[idx]
# Finalize the Markov chain by adding the self-transition rate.
chain -= np.diag(chain.sum(axis=1))
return log_transform(statdist(chain))
|
[
"def",
"rank_centrality",
"(",
"n_items",
",",
"data",
",",
"alpha",
"=",
"0.0",
")",
":",
"_",
",",
"chain",
"=",
"_init_lsr",
"(",
"n_items",
",",
"alpha",
",",
"None",
")",
"for",
"winner",
",",
"loser",
"in",
"data",
":",
"chain",
"[",
"loser",
",",
"winner",
"]",
"+=",
"1.0",
"# Transform the counts into ratios.",
"idx",
"=",
"chain",
">",
"0",
"# Indices (i,j) of non-zero entries.",
"chain",
"[",
"idx",
"]",
"=",
"chain",
"[",
"idx",
"]",
"/",
"(",
"chain",
"+",
"chain",
".",
"T",
")",
"[",
"idx",
"]",
"# Finalize the Markov chain by adding the self-transition rate.",
"chain",
"-=",
"np",
".",
"diag",
"(",
"chain",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
")",
"return",
"log_transform",
"(",
"statdist",
"(",
"chain",
")",
")"
] |
Compute the Rank Centrality estimate of model parameters.
This function implements Negahban et al.'s Rank Centrality algorithm
[NOS12]_. The algorithm is similar to :func:`~choix.ilsr_pairwise`, but
considers the *ratio* of wins for each pair (instead of the total count).
The transition rates of the Rank Centrality Markov chain are initialized
with ``alpha``. When ``alpha > 0``, this corresponds to a form of
regularization (see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float, optional
Regularization parameter.
Returns
-------
params : numpy.ndarray
An estimate of model parameters.
|
[
"Compute",
"the",
"Rank",
"Centrality",
"estimate",
"of",
"model",
"parameters",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/lsr.py#L200-L233
|
lucasmaystre/choix
|
choix/lsr.py
|
lsr_rankings
|
def lsr_rankings(n_items, data, alpha=0.0, initial_params=None):
"""Compute the LSR estimate of model parameters.
This function implements the Luce Spectral Ranking inference algorithm
[MG15]_ for ranking data (see :ref:`data-rankings`).
The argument ``initial_params`` can be used to iteratively refine an
existing parameter estimate (see the implementation of
:func:`~choix.ilsr_rankings` for an idea on how this works). If it is set
to `None` (the default), the all-ones vector is used.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to build the transition rates of the LSR Markov chain.
Returns
-------
params : numpy.ndarray
An estimate of model parameters.
"""
weights, chain = _init_lsr(n_items, alpha, initial_params)
for ranking in data:
sum_ = weights.take(ranking).sum()
for i, winner in enumerate(ranking[:-1]):
val = 1.0 / sum_
for loser in ranking[i+1:]:
chain[loser, winner] += val
sum_ -= weights[winner]
chain -= np.diag(chain.sum(axis=1))
return log_transform(statdist(chain))
|
python
|
def lsr_rankings(n_items, data, alpha=0.0, initial_params=None):
"""Compute the LSR estimate of model parameters.
This function implements the Luce Spectral Ranking inference algorithm
[MG15]_ for ranking data (see :ref:`data-rankings`).
The argument ``initial_params`` can be used to iteratively refine an
existing parameter estimate (see the implementation of
:func:`~choix.ilsr_rankings` for an idea on how this works). If it is set
to `None` (the default), the all-ones vector is used.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to build the transition rates of the LSR Markov chain.
Returns
-------
params : numpy.ndarray
An estimate of model parameters.
"""
weights, chain = _init_lsr(n_items, alpha, initial_params)
for ranking in data:
sum_ = weights.take(ranking).sum()
for i, winner in enumerate(ranking[:-1]):
val = 1.0 / sum_
for loser in ranking[i+1:]:
chain[loser, winner] += val
sum_ -= weights[winner]
chain -= np.diag(chain.sum(axis=1))
return log_transform(statdist(chain))
|
[
"def",
"lsr_rankings",
"(",
"n_items",
",",
"data",
",",
"alpha",
"=",
"0.0",
",",
"initial_params",
"=",
"None",
")",
":",
"weights",
",",
"chain",
"=",
"_init_lsr",
"(",
"n_items",
",",
"alpha",
",",
"initial_params",
")",
"for",
"ranking",
"in",
"data",
":",
"sum_",
"=",
"weights",
".",
"take",
"(",
"ranking",
")",
".",
"sum",
"(",
")",
"for",
"i",
",",
"winner",
"in",
"enumerate",
"(",
"ranking",
"[",
":",
"-",
"1",
"]",
")",
":",
"val",
"=",
"1.0",
"/",
"sum_",
"for",
"loser",
"in",
"ranking",
"[",
"i",
"+",
"1",
":",
"]",
":",
"chain",
"[",
"loser",
",",
"winner",
"]",
"+=",
"val",
"sum_",
"-=",
"weights",
"[",
"winner",
"]",
"chain",
"-=",
"np",
".",
"diag",
"(",
"chain",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
")",
"return",
"log_transform",
"(",
"statdist",
"(",
"chain",
")",
")"
] |
Compute the LSR estimate of model parameters.
This function implements the Luce Spectral Ranking inference algorithm
[MG15]_ for ranking data (see :ref:`data-rankings`).
The argument ``initial_params`` can be used to iteratively refine an
existing parameter estimate (see the implementation of
:func:`~choix.ilsr_rankings` for an idea on how this works). If it is set
to `None` (the default), the all-ones vector is used.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to build the transition rates of the LSR Markov chain.
Returns
-------
params : numpy.ndarray
An estimate of model parameters.
|
[
"Compute",
"the",
"LSR",
"estimate",
"of",
"model",
"parameters",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/lsr.py#L236-L276
|
lucasmaystre/choix
|
choix/lsr.py
|
ilsr_rankings
|
def ilsr_rankings(
n_items, data, alpha=0.0, initial_params=None, max_iter=100, tol=1e-8):
"""Compute the ML estimate of model parameters using I-LSR.
This function computes the maximum-likelihood (ML) estimate of model
parameters given ranking data (see :ref:`data-rankings`), using the
iterative Luce Spectral Ranking algorithm [MG15]_.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
fun = functools.partial(
lsr_rankings, n_items=n_items, data=data, alpha=alpha)
return _ilsr(fun, initial_params, max_iter, tol)
|
python
|
def ilsr_rankings(
n_items, data, alpha=0.0, initial_params=None, max_iter=100, tol=1e-8):
"""Compute the ML estimate of model parameters using I-LSR.
This function computes the maximum-likelihood (ML) estimate of model
parameters given ranking data (see :ref:`data-rankings`), using the
iterative Luce Spectral Ranking algorithm [MG15]_.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
fun = functools.partial(
lsr_rankings, n_items=n_items, data=data, alpha=alpha)
return _ilsr(fun, initial_params, max_iter, tol)
|
[
"def",
"ilsr_rankings",
"(",
"n_items",
",",
"data",
",",
"alpha",
"=",
"0.0",
",",
"initial_params",
"=",
"None",
",",
"max_iter",
"=",
"100",
",",
"tol",
"=",
"1e-8",
")",
":",
"fun",
"=",
"functools",
".",
"partial",
"(",
"lsr_rankings",
",",
"n_items",
"=",
"n_items",
",",
"data",
"=",
"data",
",",
"alpha",
"=",
"alpha",
")",
"return",
"_ilsr",
"(",
"fun",
",",
"initial_params",
",",
"max_iter",
",",
"tol",
")"
] |
Compute the ML estimate of model parameters using I-LSR.
This function computes the maximum-likelihood (ML) estimate of model
parameters given ranking data (see :ref:`data-rankings`), using the
iterative Luce Spectral Ranking algorithm [MG15]_.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
|
[
"Compute",
"the",
"ML",
"estimate",
"of",
"model",
"parameters",
"using",
"I",
"-",
"LSR",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/lsr.py#L279-L314
|
lucasmaystre/choix
|
choix/lsr.py
|
lsr_top1
|
def lsr_top1(n_items, data, alpha=0.0, initial_params=None):
"""Compute the LSR estimate of model parameters.
This function implements the Luce Spectral Ranking inference algorithm
[MG15]_ for top-1 data (see :ref:`data-top1`).
The argument ``initial_params`` can be used to iteratively refine an
existing parameter estimate (see the implementation of
:func:`~choix.ilsr_top1` for an idea on how this works). If it is set to
`None` (the default), the all-ones vector is used.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Top-1 data.
alpha : float
Regularization parameter.
initial_params : array_like
Parameters used to build the transition rates of the LSR Markov chain.
Returns
-------
params : numpy.ndarray
An estimate of model parameters.
"""
weights, chain = _init_lsr(n_items, alpha, initial_params)
for winner, losers in data:
val = 1 / (weights.take(losers).sum() + weights[winner])
for loser in losers:
chain[loser, winner] += val
chain -= np.diag(chain.sum(axis=1))
return log_transform(statdist(chain))
|
python
|
def lsr_top1(n_items, data, alpha=0.0, initial_params=None):
"""Compute the LSR estimate of model parameters.
This function implements the Luce Spectral Ranking inference algorithm
[MG15]_ for top-1 data (see :ref:`data-top1`).
The argument ``initial_params`` can be used to iteratively refine an
existing parameter estimate (see the implementation of
:func:`~choix.ilsr_top1` for an idea on how this works). If it is set to
`None` (the default), the all-ones vector is used.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Top-1 data.
alpha : float
Regularization parameter.
initial_params : array_like
Parameters used to build the transition rates of the LSR Markov chain.
Returns
-------
params : numpy.ndarray
An estimate of model parameters.
"""
weights, chain = _init_lsr(n_items, alpha, initial_params)
for winner, losers in data:
val = 1 / (weights.take(losers).sum() + weights[winner])
for loser in losers:
chain[loser, winner] += val
chain -= np.diag(chain.sum(axis=1))
return log_transform(statdist(chain))
|
[
"def",
"lsr_top1",
"(",
"n_items",
",",
"data",
",",
"alpha",
"=",
"0.0",
",",
"initial_params",
"=",
"None",
")",
":",
"weights",
",",
"chain",
"=",
"_init_lsr",
"(",
"n_items",
",",
"alpha",
",",
"initial_params",
")",
"for",
"winner",
",",
"losers",
"in",
"data",
":",
"val",
"=",
"1",
"/",
"(",
"weights",
".",
"take",
"(",
"losers",
")",
".",
"sum",
"(",
")",
"+",
"weights",
"[",
"winner",
"]",
")",
"for",
"loser",
"in",
"losers",
":",
"chain",
"[",
"loser",
",",
"winner",
"]",
"+=",
"val",
"chain",
"-=",
"np",
".",
"diag",
"(",
"chain",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
")",
"return",
"log_transform",
"(",
"statdist",
"(",
"chain",
")",
")"
] |
Compute the LSR estimate of model parameters.
This function implements the Luce Spectral Ranking inference algorithm
[MG15]_ for top-1 data (see :ref:`data-top1`).
The argument ``initial_params`` can be used to iteratively refine an
existing parameter estimate (see the implementation of
:func:`~choix.ilsr_top1` for an idea on how this works). If it is set to
`None` (the default), the all-ones vector is used.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Top-1 data.
alpha : float
Regularization parameter.
initial_params : array_like
Parameters used to build the transition rates of the LSR Markov chain.
Returns
-------
params : numpy.ndarray
An estimate of model parameters.
|
[
"Compute",
"the",
"LSR",
"estimate",
"of",
"model",
"parameters",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/lsr.py#L317-L354
|
lucasmaystre/choix
|
choix/lsr.py
|
ilsr_top1
|
def ilsr_top1(
n_items, data, alpha=0.0, initial_params=None, max_iter=100, tol=1e-8):
"""Compute the ML estimate of model parameters using I-LSR.
This function computes the maximum-likelihood (ML) estimate of model
parameters given top-1 data (see :ref:`data-top1`), using the
iterative Luce Spectral Ranking algorithm [MG15]_.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Top-1 data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
fun = functools.partial(lsr_top1, n_items=n_items, data=data, alpha=alpha)
return _ilsr(fun, initial_params, max_iter, tol)
|
python
|
def ilsr_top1(
n_items, data, alpha=0.0, initial_params=None, max_iter=100, tol=1e-8):
"""Compute the ML estimate of model parameters using I-LSR.
This function computes the maximum-likelihood (ML) estimate of model
parameters given top-1 data (see :ref:`data-top1`), using the
iterative Luce Spectral Ranking algorithm [MG15]_.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Top-1 data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
fun = functools.partial(lsr_top1, n_items=n_items, data=data, alpha=alpha)
return _ilsr(fun, initial_params, max_iter, tol)
|
[
"def",
"ilsr_top1",
"(",
"n_items",
",",
"data",
",",
"alpha",
"=",
"0.0",
",",
"initial_params",
"=",
"None",
",",
"max_iter",
"=",
"100",
",",
"tol",
"=",
"1e-8",
")",
":",
"fun",
"=",
"functools",
".",
"partial",
"(",
"lsr_top1",
",",
"n_items",
"=",
"n_items",
",",
"data",
"=",
"data",
",",
"alpha",
"=",
"alpha",
")",
"return",
"_ilsr",
"(",
"fun",
",",
"initial_params",
",",
"max_iter",
",",
"tol",
")"
] |
Compute the ML estimate of model parameters using I-LSR.
This function computes the maximum-likelihood (ML) estimate of model
parameters given top-1 data (see :ref:`data-top1`), using the
iterative Luce Spectral Ranking algorithm [MG15]_.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Top-1 data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
|
[
"Compute",
"the",
"ML",
"estimate",
"of",
"model",
"parameters",
"using",
"I",
"-",
"LSR",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/lsr.py#L357-L391
|
lucasmaystre/choix
|
choix/ep.py
|
ep_pairwise
|
def ep_pairwise(n_items, data, alpha, model="logit", max_iter=100,
initial_state=None):
"""Compute a distribution of model parameters using the EP algorithm.
This function computes an approximate Bayesian posterior probability
distribution over model parameters, given pairwise-comparison data (see
:ref:`data-pairwise`). It uses the expectation propagation algorithm, as
presented, e.g., in [CG05]_.
The prior distribution is assumed to be isotropic Gaussian with variance
``1 / alpha``. The posterior is approximated by a a general multivariate
Gaussian distribution, described by a mean vector and a covariance matrix.
Two different observation models are available. ``logit`` (default) assumes
that pairwise-comparison outcomes follow from a Bradley-Terry model.
``probit`` assumes that the outcomes follow from Thurstone's model.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float
Inverse variance of the (isotropic) prior.
model : str, optional
Observation model. Either "logit" or "probit".
max_iter : int, optional
Maximum number of iterations allowed.
initial_state : tuple of array_like, optional
Natural parameters used to initialize the EP algorithm.
Returns
-------
mean : numpy.ndarray
The mean vector of the approximate Gaussian posterior.
cov : numpy.ndarray
The covariance matrix of the approximate Gaussian posterior.
Raises
------
ValueError
If the observation model is not "logit" or "probit".
"""
if model == "logit":
match_moments = _match_moments_logit
elif model == "probit":
match_moments = _match_moments_probit
else:
raise ValueError("unknown model '{}'".format(model))
return _ep_pairwise(
n_items, data, alpha, match_moments, max_iter, initial_state)
|
python
|
def ep_pairwise(n_items, data, alpha, model="logit", max_iter=100,
initial_state=None):
"""Compute a distribution of model parameters using the EP algorithm.
This function computes an approximate Bayesian posterior probability
distribution over model parameters, given pairwise-comparison data (see
:ref:`data-pairwise`). It uses the expectation propagation algorithm, as
presented, e.g., in [CG05]_.
The prior distribution is assumed to be isotropic Gaussian with variance
``1 / alpha``. The posterior is approximated by a a general multivariate
Gaussian distribution, described by a mean vector and a covariance matrix.
Two different observation models are available. ``logit`` (default) assumes
that pairwise-comparison outcomes follow from a Bradley-Terry model.
``probit`` assumes that the outcomes follow from Thurstone's model.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float
Inverse variance of the (isotropic) prior.
model : str, optional
Observation model. Either "logit" or "probit".
max_iter : int, optional
Maximum number of iterations allowed.
initial_state : tuple of array_like, optional
Natural parameters used to initialize the EP algorithm.
Returns
-------
mean : numpy.ndarray
The mean vector of the approximate Gaussian posterior.
cov : numpy.ndarray
The covariance matrix of the approximate Gaussian posterior.
Raises
------
ValueError
If the observation model is not "logit" or "probit".
"""
if model == "logit":
match_moments = _match_moments_logit
elif model == "probit":
match_moments = _match_moments_probit
else:
raise ValueError("unknown model '{}'".format(model))
return _ep_pairwise(
n_items, data, alpha, match_moments, max_iter, initial_state)
|
[
"def",
"ep_pairwise",
"(",
"n_items",
",",
"data",
",",
"alpha",
",",
"model",
"=",
"\"logit\"",
",",
"max_iter",
"=",
"100",
",",
"initial_state",
"=",
"None",
")",
":",
"if",
"model",
"==",
"\"logit\"",
":",
"match_moments",
"=",
"_match_moments_logit",
"elif",
"model",
"==",
"\"probit\"",
":",
"match_moments",
"=",
"_match_moments_probit",
"else",
":",
"raise",
"ValueError",
"(",
"\"unknown model '{}'\"",
".",
"format",
"(",
"model",
")",
")",
"return",
"_ep_pairwise",
"(",
"n_items",
",",
"data",
",",
"alpha",
",",
"match_moments",
",",
"max_iter",
",",
"initial_state",
")"
] |
Compute a distribution of model parameters using the EP algorithm.
This function computes an approximate Bayesian posterior probability
distribution over model parameters, given pairwise-comparison data (see
:ref:`data-pairwise`). It uses the expectation propagation algorithm, as
presented, e.g., in [CG05]_.
The prior distribution is assumed to be isotropic Gaussian with variance
``1 / alpha``. The posterior is approximated by a a general multivariate
Gaussian distribution, described by a mean vector and a covariance matrix.
Two different observation models are available. ``logit`` (default) assumes
that pairwise-comparison outcomes follow from a Bradley-Terry model.
``probit`` assumes that the outcomes follow from Thurstone's model.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float
Inverse variance of the (isotropic) prior.
model : str, optional
Observation model. Either "logit" or "probit".
max_iter : int, optional
Maximum number of iterations allowed.
initial_state : tuple of array_like, optional
Natural parameters used to initialize the EP algorithm.
Returns
-------
mean : numpy.ndarray
The mean vector of the approximate Gaussian posterior.
cov : numpy.ndarray
The covariance matrix of the approximate Gaussian posterior.
Raises
------
ValueError
If the observation model is not "logit" or "probit".
|
[
"Compute",
"a",
"distribution",
"of",
"model",
"parameters",
"using",
"the",
"EP",
"algorithm",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/ep.py#L32-L83
|
lucasmaystre/choix
|
choix/ep.py
|
_ep_pairwise
|
def _ep_pairwise(
n_items, comparisons, alpha, match_moments, max_iter, initial_state):
"""Compute a distribution of model parameters using the EP algorithm.
Raises
------
RuntimeError
If the algorithm does not converge after ``max_iter`` iterations.
"""
# Static variable that allows to check the # of iterations after the call.
_ep_pairwise.iterations = 0
m = len(comparisons)
prior_inv = alpha * np.eye(n_items)
if initial_state is None:
# Initially, mean and covariance come from the prior.
mean = np.zeros(n_items)
cov = (1 / alpha) * np.eye(n_items)
# Initialize the natural params in the function space.
tau = np.zeros(m)
nu = np.zeros(m)
# Initialize the natural params in the space of thetas.
prec = np.zeros((n_items, n_items))
xs = np.zeros(n_items)
else:
tau, nu = initial_state
mean, cov, xs, prec = _init_ws(
n_items, comparisons, prior_inv, tau, nu)
for _ in range(max_iter):
_ep_pairwise.iterations += 1
# Keep a copy of the old parameters for convergence testing.
tau_old = np.array(tau, copy=True)
nu_old = np.array(nu, copy=True)
for i in nprand.permutation(m):
a, b = comparisons[i]
# Update mean and variance in function space.
f_var = cov[a,a] + cov[b,b] - 2 * cov[a,b]
f_mean = mean[a] - mean[b]
# Cavity distribution.
tau_tot = 1.0 / f_var
nu_tot = tau_tot * f_mean
tau_cav = tau_tot - tau[i]
nu_cav = nu_tot - nu[i]
cov_cav = 1.0 / tau_cav
mean_cav = cov_cav * nu_cav
# Moment matching.
logpart, dlogpart, d2logpart = match_moments(mean_cav, cov_cav)
# Update factor params in the function space.
tau[i] = -d2logpart / (1 + d2logpart / tau_cav)
delta_tau = tau[i] - tau_old[i]
nu[i] = ((dlogpart - (nu_cav / tau_cav) * d2logpart)
/ (1 + d2logpart / tau_cav))
delta_nu = nu[i] - nu_old[i]
# Update factor params in the weight space.
prec[(a, a, b, b), (a, b, a, b)] += delta_tau * MAT_ONE_FLAT
xs[a] += delta_nu
xs[b] -= delta_nu
# Update mean and covariance.
if abs(delta_tau) > 0:
phi = -1.0 / ((1.0 / delta_tau) + f_var) * MAT_ONE
upd_mat = cov.take([a, b], axis=0)
cov = cov + upd_mat.T.dot(phi).dot(upd_mat)
mean = cov.dot(xs)
# Recompute the global parameters for stability.
cov = inv_posdef(prior_inv + prec)
mean = cov.dot(xs)
if _converged((tau, nu), (tau_old, nu_old)):
return mean, cov
raise RuntimeError(
"EP did not converge after {} iterations".format(max_iter))
|
python
|
def _ep_pairwise(
n_items, comparisons, alpha, match_moments, max_iter, initial_state):
"""Compute a distribution of model parameters using the EP algorithm.
Raises
------
RuntimeError
If the algorithm does not converge after ``max_iter`` iterations.
"""
# Static variable that allows to check the # of iterations after the call.
_ep_pairwise.iterations = 0
m = len(comparisons)
prior_inv = alpha * np.eye(n_items)
if initial_state is None:
# Initially, mean and covariance come from the prior.
mean = np.zeros(n_items)
cov = (1 / alpha) * np.eye(n_items)
# Initialize the natural params in the function space.
tau = np.zeros(m)
nu = np.zeros(m)
# Initialize the natural params in the space of thetas.
prec = np.zeros((n_items, n_items))
xs = np.zeros(n_items)
else:
tau, nu = initial_state
mean, cov, xs, prec = _init_ws(
n_items, comparisons, prior_inv, tau, nu)
for _ in range(max_iter):
_ep_pairwise.iterations += 1
# Keep a copy of the old parameters for convergence testing.
tau_old = np.array(tau, copy=True)
nu_old = np.array(nu, copy=True)
for i in nprand.permutation(m):
a, b = comparisons[i]
# Update mean and variance in function space.
f_var = cov[a,a] + cov[b,b] - 2 * cov[a,b]
f_mean = mean[a] - mean[b]
# Cavity distribution.
tau_tot = 1.0 / f_var
nu_tot = tau_tot * f_mean
tau_cav = tau_tot - tau[i]
nu_cav = nu_tot - nu[i]
cov_cav = 1.0 / tau_cav
mean_cav = cov_cav * nu_cav
# Moment matching.
logpart, dlogpart, d2logpart = match_moments(mean_cav, cov_cav)
# Update factor params in the function space.
tau[i] = -d2logpart / (1 + d2logpart / tau_cav)
delta_tau = tau[i] - tau_old[i]
nu[i] = ((dlogpart - (nu_cav / tau_cav) * d2logpart)
/ (1 + d2logpart / tau_cav))
delta_nu = nu[i] - nu_old[i]
# Update factor params in the weight space.
prec[(a, a, b, b), (a, b, a, b)] += delta_tau * MAT_ONE_FLAT
xs[a] += delta_nu
xs[b] -= delta_nu
# Update mean and covariance.
if abs(delta_tau) > 0:
phi = -1.0 / ((1.0 / delta_tau) + f_var) * MAT_ONE
upd_mat = cov.take([a, b], axis=0)
cov = cov + upd_mat.T.dot(phi).dot(upd_mat)
mean = cov.dot(xs)
# Recompute the global parameters for stability.
cov = inv_posdef(prior_inv + prec)
mean = cov.dot(xs)
if _converged((tau, nu), (tau_old, nu_old)):
return mean, cov
raise RuntimeError(
"EP did not converge after {} iterations".format(max_iter))
|
[
"def",
"_ep_pairwise",
"(",
"n_items",
",",
"comparisons",
",",
"alpha",
",",
"match_moments",
",",
"max_iter",
",",
"initial_state",
")",
":",
"# Static variable that allows to check the # of iterations after the call.",
"_ep_pairwise",
".",
"iterations",
"=",
"0",
"m",
"=",
"len",
"(",
"comparisons",
")",
"prior_inv",
"=",
"alpha",
"*",
"np",
".",
"eye",
"(",
"n_items",
")",
"if",
"initial_state",
"is",
"None",
":",
"# Initially, mean and covariance come from the prior.",
"mean",
"=",
"np",
".",
"zeros",
"(",
"n_items",
")",
"cov",
"=",
"(",
"1",
"/",
"alpha",
")",
"*",
"np",
".",
"eye",
"(",
"n_items",
")",
"# Initialize the natural params in the function space.",
"tau",
"=",
"np",
".",
"zeros",
"(",
"m",
")",
"nu",
"=",
"np",
".",
"zeros",
"(",
"m",
")",
"# Initialize the natural params in the space of thetas.",
"prec",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_items",
",",
"n_items",
")",
")",
"xs",
"=",
"np",
".",
"zeros",
"(",
"n_items",
")",
"else",
":",
"tau",
",",
"nu",
"=",
"initial_state",
"mean",
",",
"cov",
",",
"xs",
",",
"prec",
"=",
"_init_ws",
"(",
"n_items",
",",
"comparisons",
",",
"prior_inv",
",",
"tau",
",",
"nu",
")",
"for",
"_",
"in",
"range",
"(",
"max_iter",
")",
":",
"_ep_pairwise",
".",
"iterations",
"+=",
"1",
"# Keep a copy of the old parameters for convergence testing.",
"tau_old",
"=",
"np",
".",
"array",
"(",
"tau",
",",
"copy",
"=",
"True",
")",
"nu_old",
"=",
"np",
".",
"array",
"(",
"nu",
",",
"copy",
"=",
"True",
")",
"for",
"i",
"in",
"nprand",
".",
"permutation",
"(",
"m",
")",
":",
"a",
",",
"b",
"=",
"comparisons",
"[",
"i",
"]",
"# Update mean and variance in function space.",
"f_var",
"=",
"cov",
"[",
"a",
",",
"a",
"]",
"+",
"cov",
"[",
"b",
",",
"b",
"]",
"-",
"2",
"*",
"cov",
"[",
"a",
",",
"b",
"]",
"f_mean",
"=",
"mean",
"[",
"a",
"]",
"-",
"mean",
"[",
"b",
"]",
"# Cavity distribution.",
"tau_tot",
"=",
"1.0",
"/",
"f_var",
"nu_tot",
"=",
"tau_tot",
"*",
"f_mean",
"tau_cav",
"=",
"tau_tot",
"-",
"tau",
"[",
"i",
"]",
"nu_cav",
"=",
"nu_tot",
"-",
"nu",
"[",
"i",
"]",
"cov_cav",
"=",
"1.0",
"/",
"tau_cav",
"mean_cav",
"=",
"cov_cav",
"*",
"nu_cav",
"# Moment matching.",
"logpart",
",",
"dlogpart",
",",
"d2logpart",
"=",
"match_moments",
"(",
"mean_cav",
",",
"cov_cav",
")",
"# Update factor params in the function space.",
"tau",
"[",
"i",
"]",
"=",
"-",
"d2logpart",
"/",
"(",
"1",
"+",
"d2logpart",
"/",
"tau_cav",
")",
"delta_tau",
"=",
"tau",
"[",
"i",
"]",
"-",
"tau_old",
"[",
"i",
"]",
"nu",
"[",
"i",
"]",
"=",
"(",
"(",
"dlogpart",
"-",
"(",
"nu_cav",
"/",
"tau_cav",
")",
"*",
"d2logpart",
")",
"/",
"(",
"1",
"+",
"d2logpart",
"/",
"tau_cav",
")",
")",
"delta_nu",
"=",
"nu",
"[",
"i",
"]",
"-",
"nu_old",
"[",
"i",
"]",
"# Update factor params in the weight space.",
"prec",
"[",
"(",
"a",
",",
"a",
",",
"b",
",",
"b",
")",
",",
"(",
"a",
",",
"b",
",",
"a",
",",
"b",
")",
"]",
"+=",
"delta_tau",
"*",
"MAT_ONE_FLAT",
"xs",
"[",
"a",
"]",
"+=",
"delta_nu",
"xs",
"[",
"b",
"]",
"-=",
"delta_nu",
"# Update mean and covariance.",
"if",
"abs",
"(",
"delta_tau",
")",
">",
"0",
":",
"phi",
"=",
"-",
"1.0",
"/",
"(",
"(",
"1.0",
"/",
"delta_tau",
")",
"+",
"f_var",
")",
"*",
"MAT_ONE",
"upd_mat",
"=",
"cov",
".",
"take",
"(",
"[",
"a",
",",
"b",
"]",
",",
"axis",
"=",
"0",
")",
"cov",
"=",
"cov",
"+",
"upd_mat",
".",
"T",
".",
"dot",
"(",
"phi",
")",
".",
"dot",
"(",
"upd_mat",
")",
"mean",
"=",
"cov",
".",
"dot",
"(",
"xs",
")",
"# Recompute the global parameters for stability.",
"cov",
"=",
"inv_posdef",
"(",
"prior_inv",
"+",
"prec",
")",
"mean",
"=",
"cov",
".",
"dot",
"(",
"xs",
")",
"if",
"_converged",
"(",
"(",
"tau",
",",
"nu",
")",
",",
"(",
"tau_old",
",",
"nu_old",
")",
")",
":",
"return",
"mean",
",",
"cov",
"raise",
"RuntimeError",
"(",
"\"EP did not converge after {} iterations\"",
".",
"format",
"(",
"max_iter",
")",
")"
] |
Compute a distribution of model parameters using the EP algorithm.
Raises
------
RuntimeError
If the algorithm does not converge after ``max_iter`` iterations.
|
[
"Compute",
"a",
"distribution",
"of",
"model",
"parameters",
"using",
"the",
"EP",
"algorithm",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/ep.py#L86-L154
|
lucasmaystre/choix
|
choix/ep.py
|
_log_phi
|
def _log_phi(z):
"""Stable computation of the log of the Normal CDF and its derivative."""
# Adapted from the GPML function `logphi.m`.
if z * z < 0.0492:
# First case: z close to zero.
coef = -z / SQRT2PI
val = functools.reduce(lambda acc, c: coef * (c + acc), CS, 0)
res = -2 * val - log(2)
dres = exp(-(z * z) / 2 - res) / SQRT2PI
elif z < -11.3137:
# Second case: z very small.
num = functools.reduce(
lambda acc, r: -z * acc / SQRT2 + r, RS, 0.5641895835477550741)
den = functools.reduce(lambda acc, q: -z * acc / SQRT2 + q, QS, 1.0)
res = log(num / (2 * den)) - (z * z) / 2
dres = abs(den / num) * sqrt(2.0 / pi)
else:
res = log(normal_cdf(z))
dres = exp(-(z * z) / 2 - res) / SQRT2PI
return res, dres
|
python
|
def _log_phi(z):
"""Stable computation of the log of the Normal CDF and its derivative."""
# Adapted from the GPML function `logphi.m`.
if z * z < 0.0492:
# First case: z close to zero.
coef = -z / SQRT2PI
val = functools.reduce(lambda acc, c: coef * (c + acc), CS, 0)
res = -2 * val - log(2)
dres = exp(-(z * z) / 2 - res) / SQRT2PI
elif z < -11.3137:
# Second case: z very small.
num = functools.reduce(
lambda acc, r: -z * acc / SQRT2 + r, RS, 0.5641895835477550741)
den = functools.reduce(lambda acc, q: -z * acc / SQRT2 + q, QS, 1.0)
res = log(num / (2 * den)) - (z * z) / 2
dres = abs(den / num) * sqrt(2.0 / pi)
else:
res = log(normal_cdf(z))
dres = exp(-(z * z) / 2 - res) / SQRT2PI
return res, dres
|
[
"def",
"_log_phi",
"(",
"z",
")",
":",
"# Adapted from the GPML function `logphi.m`.",
"if",
"z",
"*",
"z",
"<",
"0.0492",
":",
"# First case: z close to zero.",
"coef",
"=",
"-",
"z",
"/",
"SQRT2PI",
"val",
"=",
"functools",
".",
"reduce",
"(",
"lambda",
"acc",
",",
"c",
":",
"coef",
"*",
"(",
"c",
"+",
"acc",
")",
",",
"CS",
",",
"0",
")",
"res",
"=",
"-",
"2",
"*",
"val",
"-",
"log",
"(",
"2",
")",
"dres",
"=",
"exp",
"(",
"-",
"(",
"z",
"*",
"z",
")",
"/",
"2",
"-",
"res",
")",
"/",
"SQRT2PI",
"elif",
"z",
"<",
"-",
"11.3137",
":",
"# Second case: z very small.",
"num",
"=",
"functools",
".",
"reduce",
"(",
"lambda",
"acc",
",",
"r",
":",
"-",
"z",
"*",
"acc",
"/",
"SQRT2",
"+",
"r",
",",
"RS",
",",
"0.5641895835477550741",
")",
"den",
"=",
"functools",
".",
"reduce",
"(",
"lambda",
"acc",
",",
"q",
":",
"-",
"z",
"*",
"acc",
"/",
"SQRT2",
"+",
"q",
",",
"QS",
",",
"1.0",
")",
"res",
"=",
"log",
"(",
"num",
"/",
"(",
"2",
"*",
"den",
")",
")",
"-",
"(",
"z",
"*",
"z",
")",
"/",
"2",
"dres",
"=",
"abs",
"(",
"den",
"/",
"num",
")",
"*",
"sqrt",
"(",
"2.0",
"/",
"pi",
")",
"else",
":",
"res",
"=",
"log",
"(",
"normal_cdf",
"(",
"z",
")",
")",
"dres",
"=",
"exp",
"(",
"-",
"(",
"z",
"*",
"z",
")",
"/",
"2",
"-",
"res",
")",
"/",
"SQRT2PI",
"return",
"res",
",",
"dres"
] |
Stable computation of the log of the Normal CDF and its derivative.
|
[
"Stable",
"computation",
"of",
"the",
"log",
"of",
"the",
"Normal",
"CDF",
"and",
"its",
"derivative",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/ep.py#L157-L176
|
lucasmaystre/choix
|
choix/ep.py
|
_init_ws
|
def _init_ws(n_items, comparisons, prior_inv, tau, nu):
"""Initialize parameters in the weight space."""
prec = np.zeros((n_items, n_items))
xs = np.zeros(n_items)
for i, (a, b) in enumerate(comparisons):
prec[(a, a, b, b), (a, b, a, b)] += tau[i] * MAT_ONE_FLAT
xs[a] += nu[i]
xs[b] -= nu[i]
cov = inv_posdef(prior_inv + prec)
mean = cov.dot(xs)
return mean, cov, xs , prec
|
python
|
def _init_ws(n_items, comparisons, prior_inv, tau, nu):
"""Initialize parameters in the weight space."""
prec = np.zeros((n_items, n_items))
xs = np.zeros(n_items)
for i, (a, b) in enumerate(comparisons):
prec[(a, a, b, b), (a, b, a, b)] += tau[i] * MAT_ONE_FLAT
xs[a] += nu[i]
xs[b] -= nu[i]
cov = inv_posdef(prior_inv + prec)
mean = cov.dot(xs)
return mean, cov, xs , prec
|
[
"def",
"_init_ws",
"(",
"n_items",
",",
"comparisons",
",",
"prior_inv",
",",
"tau",
",",
"nu",
")",
":",
"prec",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_items",
",",
"n_items",
")",
")",
"xs",
"=",
"np",
".",
"zeros",
"(",
"n_items",
")",
"for",
"i",
",",
"(",
"a",
",",
"b",
")",
"in",
"enumerate",
"(",
"comparisons",
")",
":",
"prec",
"[",
"(",
"a",
",",
"a",
",",
"b",
",",
"b",
")",
",",
"(",
"a",
",",
"b",
",",
"a",
",",
"b",
")",
"]",
"+=",
"tau",
"[",
"i",
"]",
"*",
"MAT_ONE_FLAT",
"xs",
"[",
"a",
"]",
"+=",
"nu",
"[",
"i",
"]",
"xs",
"[",
"b",
"]",
"-=",
"nu",
"[",
"i",
"]",
"cov",
"=",
"inv_posdef",
"(",
"prior_inv",
"+",
"prec",
")",
"mean",
"=",
"cov",
".",
"dot",
"(",
"xs",
")",
"return",
"mean",
",",
"cov",
",",
"xs",
",",
"prec"
] |
Initialize parameters in the weight space.
|
[
"Initialize",
"parameters",
"in",
"the",
"weight",
"space",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/ep.py#L227-L237
|
lucasmaystre/choix
|
choix/utils.py
|
exp_transform
|
def exp_transform(params):
"""Transform parameters into exp-scale weights."""
weights = np.exp(np.asarray(params) - np.mean(params))
return (len(weights) / weights.sum()) * weights
|
python
|
def exp_transform(params):
"""Transform parameters into exp-scale weights."""
weights = np.exp(np.asarray(params) - np.mean(params))
return (len(weights) / weights.sum()) * weights
|
[
"def",
"exp_transform",
"(",
"params",
")",
":",
"weights",
"=",
"np",
".",
"exp",
"(",
"np",
".",
"asarray",
"(",
"params",
")",
"-",
"np",
".",
"mean",
"(",
"params",
")",
")",
"return",
"(",
"len",
"(",
"weights",
")",
"/",
"weights",
".",
"sum",
"(",
")",
")",
"*",
"weights"
] |
Transform parameters into exp-scale weights.
|
[
"Transform",
"parameters",
"into",
"exp",
"-",
"scale",
"weights",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L22-L25
|
lucasmaystre/choix
|
choix/utils.py
|
softmax
|
def softmax(xs):
"""Stable implementation of the softmax function."""
ys = xs - np.max(xs)
exps = np.exp(ys)
return exps / exps.sum(axis=0)
|
python
|
def softmax(xs):
"""Stable implementation of the softmax function."""
ys = xs - np.max(xs)
exps = np.exp(ys)
return exps / exps.sum(axis=0)
|
[
"def",
"softmax",
"(",
"xs",
")",
":",
"ys",
"=",
"xs",
"-",
"np",
".",
"max",
"(",
"xs",
")",
"exps",
"=",
"np",
".",
"exp",
"(",
"ys",
")",
"return",
"exps",
"/",
"exps",
".",
"sum",
"(",
"axis",
"=",
"0",
")"
] |
Stable implementation of the softmax function.
|
[
"Stable",
"implementation",
"of",
"the",
"softmax",
"function",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L28-L32
|
lucasmaystre/choix
|
choix/utils.py
|
inv_posdef
|
def inv_posdef(mat):
"""Stable inverse of a positive definite matrix."""
# See:
# - http://www.seas.ucla.edu/~vandenbe/103/lectures/chol.pdf
# - http://scicomp.stackexchange.com/questions/3188
chol = np.linalg.cholesky(mat)
ident = np.eye(mat.shape[0])
res = solve_triangular(chol, ident, lower=True, overwrite_b=True)
return np.transpose(res).dot(res)
|
python
|
def inv_posdef(mat):
"""Stable inverse of a positive definite matrix."""
# See:
# - http://www.seas.ucla.edu/~vandenbe/103/lectures/chol.pdf
# - http://scicomp.stackexchange.com/questions/3188
chol = np.linalg.cholesky(mat)
ident = np.eye(mat.shape[0])
res = solve_triangular(chol, ident, lower=True, overwrite_b=True)
return np.transpose(res).dot(res)
|
[
"def",
"inv_posdef",
"(",
"mat",
")",
":",
"# See:",
"# - http://www.seas.ucla.edu/~vandenbe/103/lectures/chol.pdf",
"# - http://scicomp.stackexchange.com/questions/3188",
"chol",
"=",
"np",
".",
"linalg",
".",
"cholesky",
"(",
"mat",
")",
"ident",
"=",
"np",
".",
"eye",
"(",
"mat",
".",
"shape",
"[",
"0",
"]",
")",
"res",
"=",
"solve_triangular",
"(",
"chol",
",",
"ident",
",",
"lower",
"=",
"True",
",",
"overwrite_b",
"=",
"True",
")",
"return",
"np",
".",
"transpose",
"(",
"res",
")",
".",
"dot",
"(",
"res",
")"
] |
Stable inverse of a positive definite matrix.
|
[
"Stable",
"inverse",
"of",
"a",
"positive",
"definite",
"matrix",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L46-L54
|
lucasmaystre/choix
|
choix/utils.py
|
footrule_dist
|
def footrule_dist(params1, params2=None):
r"""Compute Spearman's footrule distance between two models.
This function computes Spearman's footrule distance between the rankings
induced by two parameter vectors. Let :math:`\sigma_i` be the rank of item
``i`` in the model described by ``params1``, and :math:`\tau_i` be its rank
in the model described by ``params2``. Spearman's footrule distance is
defined by
.. math::
\sum_{i=1}^N | \sigma_i - \tau_i |
By convention, items with the lowest parameters are ranked first (i.e.,
sorted using the natural order).
If the argument ``params2`` is ``None``, the second model is assumed to
rank the items by their index: item ``0`` has rank 1, item ``1`` has rank
2, etc.
Parameters
----------
params1 : array_like
Parameters of the first model.
params2 : array_like, optional
Parameters of the second model.
Returns
-------
dist : float
Spearman's footrule distance.
"""
assert params2 is None or len(params1) == len(params2)
ranks1 = rankdata(params1, method="average")
if params2 is None:
ranks2 = np.arange(1, len(params1) + 1, dtype=float)
else:
ranks2 = rankdata(params2, method="average")
return np.sum(np.abs(ranks1 - ranks2))
|
python
|
def footrule_dist(params1, params2=None):
r"""Compute Spearman's footrule distance between two models.
This function computes Spearman's footrule distance between the rankings
induced by two parameter vectors. Let :math:`\sigma_i` be the rank of item
``i`` in the model described by ``params1``, and :math:`\tau_i` be its rank
in the model described by ``params2``. Spearman's footrule distance is
defined by
.. math::
\sum_{i=1}^N | \sigma_i - \tau_i |
By convention, items with the lowest parameters are ranked first (i.e.,
sorted using the natural order).
If the argument ``params2`` is ``None``, the second model is assumed to
rank the items by their index: item ``0`` has rank 1, item ``1`` has rank
2, etc.
Parameters
----------
params1 : array_like
Parameters of the first model.
params2 : array_like, optional
Parameters of the second model.
Returns
-------
dist : float
Spearman's footrule distance.
"""
assert params2 is None or len(params1) == len(params2)
ranks1 = rankdata(params1, method="average")
if params2 is None:
ranks2 = np.arange(1, len(params1) + 1, dtype=float)
else:
ranks2 = rankdata(params2, method="average")
return np.sum(np.abs(ranks1 - ranks2))
|
[
"def",
"footrule_dist",
"(",
"params1",
",",
"params2",
"=",
"None",
")",
":",
"assert",
"params2",
"is",
"None",
"or",
"len",
"(",
"params1",
")",
"==",
"len",
"(",
"params2",
")",
"ranks1",
"=",
"rankdata",
"(",
"params1",
",",
"method",
"=",
"\"average\"",
")",
"if",
"params2",
"is",
"None",
":",
"ranks2",
"=",
"np",
".",
"arange",
"(",
"1",
",",
"len",
"(",
"params1",
")",
"+",
"1",
",",
"dtype",
"=",
"float",
")",
"else",
":",
"ranks2",
"=",
"rankdata",
"(",
"params2",
",",
"method",
"=",
"\"average\"",
")",
"return",
"np",
".",
"sum",
"(",
"np",
".",
"abs",
"(",
"ranks1",
"-",
"ranks2",
")",
")"
] |
r"""Compute Spearman's footrule distance between two models.
This function computes Spearman's footrule distance between the rankings
induced by two parameter vectors. Let :math:`\sigma_i` be the rank of item
``i`` in the model described by ``params1``, and :math:`\tau_i` be its rank
in the model described by ``params2``. Spearman's footrule distance is
defined by
.. math::
\sum_{i=1}^N | \sigma_i - \tau_i |
By convention, items with the lowest parameters are ranked first (i.e.,
sorted using the natural order).
If the argument ``params2`` is ``None``, the second model is assumed to
rank the items by their index: item ``0`` has rank 1, item ``1`` has rank
2, etc.
Parameters
----------
params1 : array_like
Parameters of the first model.
params2 : array_like, optional
Parameters of the second model.
Returns
-------
dist : float
Spearman's footrule distance.
|
[
"r",
"Compute",
"Spearman",
"s",
"footrule",
"distance",
"between",
"two",
"models",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L57-L95
|
lucasmaystre/choix
|
choix/utils.py
|
kendalltau_dist
|
def kendalltau_dist(params1, params2=None):
r"""Compute the Kendall tau distance between two models.
This function computes the Kendall tau distance between the rankings
induced by two parameter vectors. Let :math:`\sigma_i` be the rank of item
``i`` in the model described by ``params1``, and :math:`\tau_i` be its rank
in the model described by ``params2``. The Kendall tau distance is defined
as the number of pairwise disagreements between the two rankings, i.e.,
.. math::
\sum_{i=1}^N \sum_{j=1}^N
\mathbf{1} \{ \sigma_i > \sigma_j \wedge \tau_i < \tau_j \}
By convention, items with the lowest parameters are ranked first (i.e.,
sorted using the natural order).
If the argument ``params2`` is ``None``, the second model is assumed to
rank the items by their index: item ``0`` has rank 1, item ``1`` has rank
2, etc.
If some values are equal within a parameter vector, all items are given a
distinct rank, corresponding to the order in which the values occur.
Parameters
----------
params1 : array_like
Parameters of the first model.
params2 : array_like, optional
Parameters of the second model.
Returns
-------
dist : float
Kendall tau distance.
"""
assert params2 is None or len(params1) == len(params2)
ranks1 = rankdata(params1, method="ordinal")
if params2 is None:
ranks2 = np.arange(1, len(params1) + 1, dtype=float)
else:
ranks2 = rankdata(params2, method="ordinal")
tau, _ = kendalltau(ranks1, ranks2)
n_items = len(params1)
n_pairs = n_items * (n_items - 1) / 2
return round((n_pairs - n_pairs * tau) / 2)
|
python
|
def kendalltau_dist(params1, params2=None):
r"""Compute the Kendall tau distance between two models.
This function computes the Kendall tau distance between the rankings
induced by two parameter vectors. Let :math:`\sigma_i` be the rank of item
``i`` in the model described by ``params1``, and :math:`\tau_i` be its rank
in the model described by ``params2``. The Kendall tau distance is defined
as the number of pairwise disagreements between the two rankings, i.e.,
.. math::
\sum_{i=1}^N \sum_{j=1}^N
\mathbf{1} \{ \sigma_i > \sigma_j \wedge \tau_i < \tau_j \}
By convention, items with the lowest parameters are ranked first (i.e.,
sorted using the natural order).
If the argument ``params2`` is ``None``, the second model is assumed to
rank the items by their index: item ``0`` has rank 1, item ``1`` has rank
2, etc.
If some values are equal within a parameter vector, all items are given a
distinct rank, corresponding to the order in which the values occur.
Parameters
----------
params1 : array_like
Parameters of the first model.
params2 : array_like, optional
Parameters of the second model.
Returns
-------
dist : float
Kendall tau distance.
"""
assert params2 is None or len(params1) == len(params2)
ranks1 = rankdata(params1, method="ordinal")
if params2 is None:
ranks2 = np.arange(1, len(params1) + 1, dtype=float)
else:
ranks2 = rankdata(params2, method="ordinal")
tau, _ = kendalltau(ranks1, ranks2)
n_items = len(params1)
n_pairs = n_items * (n_items - 1) / 2
return round((n_pairs - n_pairs * tau) / 2)
|
[
"def",
"kendalltau_dist",
"(",
"params1",
",",
"params2",
"=",
"None",
")",
":",
"assert",
"params2",
"is",
"None",
"or",
"len",
"(",
"params1",
")",
"==",
"len",
"(",
"params2",
")",
"ranks1",
"=",
"rankdata",
"(",
"params1",
",",
"method",
"=",
"\"ordinal\"",
")",
"if",
"params2",
"is",
"None",
":",
"ranks2",
"=",
"np",
".",
"arange",
"(",
"1",
",",
"len",
"(",
"params1",
")",
"+",
"1",
",",
"dtype",
"=",
"float",
")",
"else",
":",
"ranks2",
"=",
"rankdata",
"(",
"params2",
",",
"method",
"=",
"\"ordinal\"",
")",
"tau",
",",
"_",
"=",
"kendalltau",
"(",
"ranks1",
",",
"ranks2",
")",
"n_items",
"=",
"len",
"(",
"params1",
")",
"n_pairs",
"=",
"n_items",
"*",
"(",
"n_items",
"-",
"1",
")",
"/",
"2",
"return",
"round",
"(",
"(",
"n_pairs",
"-",
"n_pairs",
"*",
"tau",
")",
"/",
"2",
")"
] |
r"""Compute the Kendall tau distance between two models.
This function computes the Kendall tau distance between the rankings
induced by two parameter vectors. Let :math:`\sigma_i` be the rank of item
``i`` in the model described by ``params1``, and :math:`\tau_i` be its rank
in the model described by ``params2``. The Kendall tau distance is defined
as the number of pairwise disagreements between the two rankings, i.e.,
.. math::
\sum_{i=1}^N \sum_{j=1}^N
\mathbf{1} \{ \sigma_i > \sigma_j \wedge \tau_i < \tau_j \}
By convention, items with the lowest parameters are ranked first (i.e.,
sorted using the natural order).
If the argument ``params2`` is ``None``, the second model is assumed to
rank the items by their index: item ``0`` has rank 1, item ``1`` has rank
2, etc.
If some values are equal within a parameter vector, all items are given a
distinct rank, corresponding to the order in which the values occur.
Parameters
----------
params1 : array_like
Parameters of the first model.
params2 : array_like, optional
Parameters of the second model.
Returns
-------
dist : float
Kendall tau distance.
|
[
"r",
"Compute",
"the",
"Kendall",
"tau",
"distance",
"between",
"two",
"models",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L98-L143
|
lucasmaystre/choix
|
choix/utils.py
|
rmse
|
def rmse(params1, params2):
r"""Compute the root-mean-squared error between two models.
Parameters
----------
params1 : array_like
Parameters of the first model.
params2 : array_like
Parameters of the second model.
Returns
-------
error : float
Root-mean-squared error.
"""
assert len(params1) == len(params2)
params1 = np.asarray(params1) - np.mean(params1)
params2 = np.asarray(params2) - np.mean(params2)
sqrt_n = math.sqrt(len(params1))
return np.linalg.norm(params1 - params2, ord=2) / sqrt_n
|
python
|
def rmse(params1, params2):
r"""Compute the root-mean-squared error between two models.
Parameters
----------
params1 : array_like
Parameters of the first model.
params2 : array_like
Parameters of the second model.
Returns
-------
error : float
Root-mean-squared error.
"""
assert len(params1) == len(params2)
params1 = np.asarray(params1) - np.mean(params1)
params2 = np.asarray(params2) - np.mean(params2)
sqrt_n = math.sqrt(len(params1))
return np.linalg.norm(params1 - params2, ord=2) / sqrt_n
|
[
"def",
"rmse",
"(",
"params1",
",",
"params2",
")",
":",
"assert",
"len",
"(",
"params1",
")",
"==",
"len",
"(",
"params2",
")",
"params1",
"=",
"np",
".",
"asarray",
"(",
"params1",
")",
"-",
"np",
".",
"mean",
"(",
"params1",
")",
"params2",
"=",
"np",
".",
"asarray",
"(",
"params2",
")",
"-",
"np",
".",
"mean",
"(",
"params2",
")",
"sqrt_n",
"=",
"math",
".",
"sqrt",
"(",
"len",
"(",
"params1",
")",
")",
"return",
"np",
".",
"linalg",
".",
"norm",
"(",
"params1",
"-",
"params2",
",",
"ord",
"=",
"2",
")",
"/",
"sqrt_n"
] |
r"""Compute the root-mean-squared error between two models.
Parameters
----------
params1 : array_like
Parameters of the first model.
params2 : array_like
Parameters of the second model.
Returns
-------
error : float
Root-mean-squared error.
|
[
"r",
"Compute",
"the",
"root",
"-",
"mean",
"-",
"squared",
"error",
"between",
"two",
"models",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L146-L165
|
lucasmaystre/choix
|
choix/utils.py
|
log_likelihood_pairwise
|
def log_likelihood_pairwise(data, params):
"""Compute the log-likelihood of model parameters."""
loglik = 0
for winner, loser in data:
loglik -= np.logaddexp(0, -(params[winner] - params[loser]))
return loglik
|
python
|
def log_likelihood_pairwise(data, params):
"""Compute the log-likelihood of model parameters."""
loglik = 0
for winner, loser in data:
loglik -= np.logaddexp(0, -(params[winner] - params[loser]))
return loglik
|
[
"def",
"log_likelihood_pairwise",
"(",
"data",
",",
"params",
")",
":",
"loglik",
"=",
"0",
"for",
"winner",
",",
"loser",
"in",
"data",
":",
"loglik",
"-=",
"np",
".",
"logaddexp",
"(",
"0",
",",
"-",
"(",
"params",
"[",
"winner",
"]",
"-",
"params",
"[",
"loser",
"]",
")",
")",
"return",
"loglik"
] |
Compute the log-likelihood of model parameters.
|
[
"Compute",
"the",
"log",
"-",
"likelihood",
"of",
"model",
"parameters",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L168-L173
|
lucasmaystre/choix
|
choix/utils.py
|
log_likelihood_rankings
|
def log_likelihood_rankings(data, params):
"""Compute the log-likelihood of model parameters."""
loglik = 0
params = np.asarray(params)
for ranking in data:
for i, winner in enumerate(ranking[:-1]):
loglik -= logsumexp(params.take(ranking[i:]) - params[winner])
return loglik
|
python
|
def log_likelihood_rankings(data, params):
"""Compute the log-likelihood of model parameters."""
loglik = 0
params = np.asarray(params)
for ranking in data:
for i, winner in enumerate(ranking[:-1]):
loglik -= logsumexp(params.take(ranking[i:]) - params[winner])
return loglik
|
[
"def",
"log_likelihood_rankings",
"(",
"data",
",",
"params",
")",
":",
"loglik",
"=",
"0",
"params",
"=",
"np",
".",
"asarray",
"(",
"params",
")",
"for",
"ranking",
"in",
"data",
":",
"for",
"i",
",",
"winner",
"in",
"enumerate",
"(",
"ranking",
"[",
":",
"-",
"1",
"]",
")",
":",
"loglik",
"-=",
"logsumexp",
"(",
"params",
".",
"take",
"(",
"ranking",
"[",
"i",
":",
"]",
")",
"-",
"params",
"[",
"winner",
"]",
")",
"return",
"loglik"
] |
Compute the log-likelihood of model parameters.
|
[
"Compute",
"the",
"log",
"-",
"likelihood",
"of",
"model",
"parameters",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L176-L183
|
lucasmaystre/choix
|
choix/utils.py
|
log_likelihood_top1
|
def log_likelihood_top1(data, params):
"""Compute the log-likelihood of model parameters."""
loglik = 0
params = np.asarray(params)
for winner, losers in data:
idx = np.append(winner, losers)
loglik -= logsumexp(params.take(idx) - params[winner])
return loglik
|
python
|
def log_likelihood_top1(data, params):
"""Compute the log-likelihood of model parameters."""
loglik = 0
params = np.asarray(params)
for winner, losers in data:
idx = np.append(winner, losers)
loglik -= logsumexp(params.take(idx) - params[winner])
return loglik
|
[
"def",
"log_likelihood_top1",
"(",
"data",
",",
"params",
")",
":",
"loglik",
"=",
"0",
"params",
"=",
"np",
".",
"asarray",
"(",
"params",
")",
"for",
"winner",
",",
"losers",
"in",
"data",
":",
"idx",
"=",
"np",
".",
"append",
"(",
"winner",
",",
"losers",
")",
"loglik",
"-=",
"logsumexp",
"(",
"params",
".",
"take",
"(",
"idx",
")",
"-",
"params",
"[",
"winner",
"]",
")",
"return",
"loglik"
] |
Compute the log-likelihood of model parameters.
|
[
"Compute",
"the",
"log",
"-",
"likelihood",
"of",
"model",
"parameters",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L186-L193
|
lucasmaystre/choix
|
choix/utils.py
|
log_likelihood_network
|
def log_likelihood_network(
digraph, traffic_in, traffic_out, params, weight=None):
"""
Compute the log-likelihood of model parameters.
If ``weight`` is not ``None``, the log-likelihood is correct only up to a
constant (independent of the parameters).
"""
loglik = 0
for i in range(len(traffic_in)):
loglik += traffic_in[i] * params[i]
if digraph.out_degree(i) > 0:
neighbors = list(digraph.successors(i))
if weight is None:
loglik -= traffic_out[i] * logsumexp(params.take(neighbors))
else:
weights = [digraph[i][j][weight] for j in neighbors]
loglik -= traffic_out[i] * logsumexp(
params.take(neighbors), b=weights)
return loglik
|
python
|
def log_likelihood_network(
digraph, traffic_in, traffic_out, params, weight=None):
"""
Compute the log-likelihood of model parameters.
If ``weight`` is not ``None``, the log-likelihood is correct only up to a
constant (independent of the parameters).
"""
loglik = 0
for i in range(len(traffic_in)):
loglik += traffic_in[i] * params[i]
if digraph.out_degree(i) > 0:
neighbors = list(digraph.successors(i))
if weight is None:
loglik -= traffic_out[i] * logsumexp(params.take(neighbors))
else:
weights = [digraph[i][j][weight] for j in neighbors]
loglik -= traffic_out[i] * logsumexp(
params.take(neighbors), b=weights)
return loglik
|
[
"def",
"log_likelihood_network",
"(",
"digraph",
",",
"traffic_in",
",",
"traffic_out",
",",
"params",
",",
"weight",
"=",
"None",
")",
":",
"loglik",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"traffic_in",
")",
")",
":",
"loglik",
"+=",
"traffic_in",
"[",
"i",
"]",
"*",
"params",
"[",
"i",
"]",
"if",
"digraph",
".",
"out_degree",
"(",
"i",
")",
">",
"0",
":",
"neighbors",
"=",
"list",
"(",
"digraph",
".",
"successors",
"(",
"i",
")",
")",
"if",
"weight",
"is",
"None",
":",
"loglik",
"-=",
"traffic_out",
"[",
"i",
"]",
"*",
"logsumexp",
"(",
"params",
".",
"take",
"(",
"neighbors",
")",
")",
"else",
":",
"weights",
"=",
"[",
"digraph",
"[",
"i",
"]",
"[",
"j",
"]",
"[",
"weight",
"]",
"for",
"j",
"in",
"neighbors",
"]",
"loglik",
"-=",
"traffic_out",
"[",
"i",
"]",
"*",
"logsumexp",
"(",
"params",
".",
"take",
"(",
"neighbors",
")",
",",
"b",
"=",
"weights",
")",
"return",
"loglik"
] |
Compute the log-likelihood of model parameters.
If ``weight`` is not ``None``, the log-likelihood is correct only up to a
constant (independent of the parameters).
|
[
"Compute",
"the",
"log",
"-",
"likelihood",
"of",
"model",
"parameters",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L196-L215
|
lucasmaystre/choix
|
choix/utils.py
|
statdist
|
def statdist(generator):
"""Compute the stationary distribution of a Markov chain.
Parameters
----------
generator : array_like
Infinitesimal generator matrix of the Markov chain.
Returns
-------
dist : numpy.ndarray
The unnormalized stationary distribution of the Markov chain.
Raises
------
ValueError
If the Markov chain does not have a unique stationary distribution.
"""
generator = np.asarray(generator)
n = generator.shape[0]
with warnings.catch_warnings():
# The LU decomposition raises a warning when the generator matrix is
# singular (which it, by construction, is!).
warnings.filterwarnings("ignore")
lu, piv = spl.lu_factor(generator.T, check_finite=False)
# The last row contains 0's only.
left = lu[:-1,:-1]
right = -lu[:-1,-1]
# Solves system `left * x = right`. Assumes that `left` is
# upper-triangular (ignores lower triangle).
try:
res = spl.solve_triangular(left, right, check_finite=False)
except:
# Ideally we would like to catch `spl.LinAlgError` only, but there seems
# to be a bug in scipy, in the code that raises the LinAlgError (!!).
raise ValueError(
"stationary distribution could not be computed. "
"Perhaps the Markov chain has more than one absorbing class?")
res = np.append(res, 1.0)
return (n / res.sum()) * res
|
python
|
def statdist(generator):
"""Compute the stationary distribution of a Markov chain.
Parameters
----------
generator : array_like
Infinitesimal generator matrix of the Markov chain.
Returns
-------
dist : numpy.ndarray
The unnormalized stationary distribution of the Markov chain.
Raises
------
ValueError
If the Markov chain does not have a unique stationary distribution.
"""
generator = np.asarray(generator)
n = generator.shape[0]
with warnings.catch_warnings():
# The LU decomposition raises a warning when the generator matrix is
# singular (which it, by construction, is!).
warnings.filterwarnings("ignore")
lu, piv = spl.lu_factor(generator.T, check_finite=False)
# The last row contains 0's only.
left = lu[:-1,:-1]
right = -lu[:-1,-1]
# Solves system `left * x = right`. Assumes that `left` is
# upper-triangular (ignores lower triangle).
try:
res = spl.solve_triangular(left, right, check_finite=False)
except:
# Ideally we would like to catch `spl.LinAlgError` only, but there seems
# to be a bug in scipy, in the code that raises the LinAlgError (!!).
raise ValueError(
"stationary distribution could not be computed. "
"Perhaps the Markov chain has more than one absorbing class?")
res = np.append(res, 1.0)
return (n / res.sum()) * res
|
[
"def",
"statdist",
"(",
"generator",
")",
":",
"generator",
"=",
"np",
".",
"asarray",
"(",
"generator",
")",
"n",
"=",
"generator",
".",
"shape",
"[",
"0",
"]",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"# The LU decomposition raises a warning when the generator matrix is",
"# singular (which it, by construction, is!).",
"warnings",
".",
"filterwarnings",
"(",
"\"ignore\"",
")",
"lu",
",",
"piv",
"=",
"spl",
".",
"lu_factor",
"(",
"generator",
".",
"T",
",",
"check_finite",
"=",
"False",
")",
"# The last row contains 0's only.",
"left",
"=",
"lu",
"[",
":",
"-",
"1",
",",
":",
"-",
"1",
"]",
"right",
"=",
"-",
"lu",
"[",
":",
"-",
"1",
",",
"-",
"1",
"]",
"# Solves system `left * x = right`. Assumes that `left` is",
"# upper-triangular (ignores lower triangle).",
"try",
":",
"res",
"=",
"spl",
".",
"solve_triangular",
"(",
"left",
",",
"right",
",",
"check_finite",
"=",
"False",
")",
"except",
":",
"# Ideally we would like to catch `spl.LinAlgError` only, but there seems",
"# to be a bug in scipy, in the code that raises the LinAlgError (!!).",
"raise",
"ValueError",
"(",
"\"stationary distribution could not be computed. \"",
"\"Perhaps the Markov chain has more than one absorbing class?\"",
")",
"res",
"=",
"np",
".",
"append",
"(",
"res",
",",
"1.0",
")",
"return",
"(",
"n",
"/",
"res",
".",
"sum",
"(",
")",
")",
"*",
"res"
] |
Compute the stationary distribution of a Markov chain.
Parameters
----------
generator : array_like
Infinitesimal generator matrix of the Markov chain.
Returns
-------
dist : numpy.ndarray
The unnormalized stationary distribution of the Markov chain.
Raises
------
ValueError
If the Markov chain does not have a unique stationary distribution.
|
[
"Compute",
"the",
"stationary",
"distribution",
"of",
"a",
"Markov",
"chain",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L218-L257
|
lucasmaystre/choix
|
choix/utils.py
|
generate_params
|
def generate_params(n_items, interval=5.0, ordered=False):
r"""Generate random model parameters.
This function samples a parameter independently and uniformly for each
item. ``interval`` defines the width of the uniform distribution.
Parameters
----------
n_items : int
Number of distinct items.
interval : float
Sampling interval.
ordered : bool, optional
If true, the parameters are ordered from lowest to highest.
Returns
-------
params : numpy.ndarray
Model parameters.
"""
params = np.random.uniform(low=0, high=interval, size=n_items)
if ordered:
params.sort()
return params - params.mean()
|
python
|
def generate_params(n_items, interval=5.0, ordered=False):
r"""Generate random model parameters.
This function samples a parameter independently and uniformly for each
item. ``interval`` defines the width of the uniform distribution.
Parameters
----------
n_items : int
Number of distinct items.
interval : float
Sampling interval.
ordered : bool, optional
If true, the parameters are ordered from lowest to highest.
Returns
-------
params : numpy.ndarray
Model parameters.
"""
params = np.random.uniform(low=0, high=interval, size=n_items)
if ordered:
params.sort()
return params - params.mean()
|
[
"def",
"generate_params",
"(",
"n_items",
",",
"interval",
"=",
"5.0",
",",
"ordered",
"=",
"False",
")",
":",
"params",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"low",
"=",
"0",
",",
"high",
"=",
"interval",
",",
"size",
"=",
"n_items",
")",
"if",
"ordered",
":",
"params",
".",
"sort",
"(",
")",
"return",
"params",
"-",
"params",
".",
"mean",
"(",
")"
] |
r"""Generate random model parameters.
This function samples a parameter independently and uniformly for each
item. ``interval`` defines the width of the uniform distribution.
Parameters
----------
n_items : int
Number of distinct items.
interval : float
Sampling interval.
ordered : bool, optional
If true, the parameters are ordered from lowest to highest.
Returns
-------
params : numpy.ndarray
Model parameters.
|
[
"r",
"Generate",
"random",
"model",
"parameters",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L260-L283
|
lucasmaystre/choix
|
choix/utils.py
|
generate_pairwise
|
def generate_pairwise(params, n_comparisons=10):
"""Generate pairwise comparisons from a Bradley--Terry model.
This function samples comparisons pairs independently and uniformly at
random over the ``len(params)`` choose 2 possibilities, and samples the
corresponding comparison outcomes from a Bradley--Terry model parametrized
by ``params``.
Parameters
----------
params : array_like
Model parameters.
n_comparisons : int
Number of comparisons to be returned.
Returns
-------
data : list of (int, int)
Pairwise-comparison samples (see :ref:`data-pairwise`).
"""
n = len(params)
items = tuple(range(n))
params = np.asarray(params)
data = list()
for _ in range(n_comparisons):
# Pick the pair uniformly at random.
a, b = random.sample(items, 2)
if compare((a, b), params) == a:
data.append((a, b))
else:
data.append((b, a))
return tuple(data)
|
python
|
def generate_pairwise(params, n_comparisons=10):
"""Generate pairwise comparisons from a Bradley--Terry model.
This function samples comparisons pairs independently and uniformly at
random over the ``len(params)`` choose 2 possibilities, and samples the
corresponding comparison outcomes from a Bradley--Terry model parametrized
by ``params``.
Parameters
----------
params : array_like
Model parameters.
n_comparisons : int
Number of comparisons to be returned.
Returns
-------
data : list of (int, int)
Pairwise-comparison samples (see :ref:`data-pairwise`).
"""
n = len(params)
items = tuple(range(n))
params = np.asarray(params)
data = list()
for _ in range(n_comparisons):
# Pick the pair uniformly at random.
a, b = random.sample(items, 2)
if compare((a, b), params) == a:
data.append((a, b))
else:
data.append((b, a))
return tuple(data)
|
[
"def",
"generate_pairwise",
"(",
"params",
",",
"n_comparisons",
"=",
"10",
")",
":",
"n",
"=",
"len",
"(",
"params",
")",
"items",
"=",
"tuple",
"(",
"range",
"(",
"n",
")",
")",
"params",
"=",
"np",
".",
"asarray",
"(",
"params",
")",
"data",
"=",
"list",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"n_comparisons",
")",
":",
"# Pick the pair uniformly at random.",
"a",
",",
"b",
"=",
"random",
".",
"sample",
"(",
"items",
",",
"2",
")",
"if",
"compare",
"(",
"(",
"a",
",",
"b",
")",
",",
"params",
")",
"==",
"a",
":",
"data",
".",
"append",
"(",
"(",
"a",
",",
"b",
")",
")",
"else",
":",
"data",
".",
"append",
"(",
"(",
"b",
",",
"a",
")",
")",
"return",
"tuple",
"(",
"data",
")"
] |
Generate pairwise comparisons from a Bradley--Terry model.
This function samples comparisons pairs independently and uniformly at
random over the ``len(params)`` choose 2 possibilities, and samples the
corresponding comparison outcomes from a Bradley--Terry model parametrized
by ``params``.
Parameters
----------
params : array_like
Model parameters.
n_comparisons : int
Number of comparisons to be returned.
Returns
-------
data : list of (int, int)
Pairwise-comparison samples (see :ref:`data-pairwise`).
|
[
"Generate",
"pairwise",
"comparisons",
"from",
"a",
"Bradley",
"--",
"Terry",
"model",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L286-L317
|
lucasmaystre/choix
|
choix/utils.py
|
generate_rankings
|
def generate_rankings(params, n_rankings, size=3):
"""Generate rankings according to a Plackett--Luce model.
This function samples subsets of items (of size ``size``) independently and
uniformly at random, and samples the correspoding partial ranking from a
Plackett--Luce model parametrized by ``params``.
Parameters
----------
params : array_like
Model parameters.
n_rankings : int
Number of rankings to generate.
size : int, optional
Number of items to include in each ranking.
Returns
-------
data : list of numpy.ndarray
A list of (partial) rankings generated according to a Plackett--Luce
model with the specified model parameters.
"""
n = len(params)
items = tuple(range(n))
params = np.asarray(params)
data = list()
for _ in range(n_rankings):
# Pick the alternatives uniformly at random.
alts = random.sample(items, size)
ranking = compare(alts, params, rank=True)
data.append(ranking)
return tuple(data)
|
python
|
def generate_rankings(params, n_rankings, size=3):
"""Generate rankings according to a Plackett--Luce model.
This function samples subsets of items (of size ``size``) independently and
uniformly at random, and samples the correspoding partial ranking from a
Plackett--Luce model parametrized by ``params``.
Parameters
----------
params : array_like
Model parameters.
n_rankings : int
Number of rankings to generate.
size : int, optional
Number of items to include in each ranking.
Returns
-------
data : list of numpy.ndarray
A list of (partial) rankings generated according to a Plackett--Luce
model with the specified model parameters.
"""
n = len(params)
items = tuple(range(n))
params = np.asarray(params)
data = list()
for _ in range(n_rankings):
# Pick the alternatives uniformly at random.
alts = random.sample(items, size)
ranking = compare(alts, params, rank=True)
data.append(ranking)
return tuple(data)
|
[
"def",
"generate_rankings",
"(",
"params",
",",
"n_rankings",
",",
"size",
"=",
"3",
")",
":",
"n",
"=",
"len",
"(",
"params",
")",
"items",
"=",
"tuple",
"(",
"range",
"(",
"n",
")",
")",
"params",
"=",
"np",
".",
"asarray",
"(",
"params",
")",
"data",
"=",
"list",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"n_rankings",
")",
":",
"# Pick the alternatives uniformly at random.",
"alts",
"=",
"random",
".",
"sample",
"(",
"items",
",",
"size",
")",
"ranking",
"=",
"compare",
"(",
"alts",
",",
"params",
",",
"rank",
"=",
"True",
")",
"data",
".",
"append",
"(",
"ranking",
")",
"return",
"tuple",
"(",
"data",
")"
] |
Generate rankings according to a Plackett--Luce model.
This function samples subsets of items (of size ``size``) independently and
uniformly at random, and samples the correspoding partial ranking from a
Plackett--Luce model parametrized by ``params``.
Parameters
----------
params : array_like
Model parameters.
n_rankings : int
Number of rankings to generate.
size : int, optional
Number of items to include in each ranking.
Returns
-------
data : list of numpy.ndarray
A list of (partial) rankings generated according to a Plackett--Luce
model with the specified model parameters.
|
[
"Generate",
"rankings",
"according",
"to",
"a",
"Plackett",
"--",
"Luce",
"model",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L320-L351
|
lucasmaystre/choix
|
choix/utils.py
|
compare
|
def compare(items, params, rank=False):
"""Generate a comparison outcome that follows Luce's axiom.
This function samples an outcome for the comparison of a subset of items,
from a model parametrized by ``params``. If ``rank`` is True, it returns a
ranking over the items, otherwise it returns a single item.
Parameters
----------
items : list
Subset of items to compare.
params : array_like
Model parameters.
rank : bool, optional
If true, returns a ranking over the items instead of a single item.
Returns
-------
outcome : int or list of int
The chosen item, or a ranking over ``items``.
"""
probs = probabilities(items, params)
if rank:
return np.random.choice(items, size=len(items), replace=False, p=probs)
else:
return np.random.choice(items, p=probs)
|
python
|
def compare(items, params, rank=False):
"""Generate a comparison outcome that follows Luce's axiom.
This function samples an outcome for the comparison of a subset of items,
from a model parametrized by ``params``. If ``rank`` is True, it returns a
ranking over the items, otherwise it returns a single item.
Parameters
----------
items : list
Subset of items to compare.
params : array_like
Model parameters.
rank : bool, optional
If true, returns a ranking over the items instead of a single item.
Returns
-------
outcome : int or list of int
The chosen item, or a ranking over ``items``.
"""
probs = probabilities(items, params)
if rank:
return np.random.choice(items, size=len(items), replace=False, p=probs)
else:
return np.random.choice(items, p=probs)
|
[
"def",
"compare",
"(",
"items",
",",
"params",
",",
"rank",
"=",
"False",
")",
":",
"probs",
"=",
"probabilities",
"(",
"items",
",",
"params",
")",
"if",
"rank",
":",
"return",
"np",
".",
"random",
".",
"choice",
"(",
"items",
",",
"size",
"=",
"len",
"(",
"items",
")",
",",
"replace",
"=",
"False",
",",
"p",
"=",
"probs",
")",
"else",
":",
"return",
"np",
".",
"random",
".",
"choice",
"(",
"items",
",",
"p",
"=",
"probs",
")"
] |
Generate a comparison outcome that follows Luce's axiom.
This function samples an outcome for the comparison of a subset of items,
from a model parametrized by ``params``. If ``rank`` is True, it returns a
ranking over the items, otherwise it returns a single item.
Parameters
----------
items : list
Subset of items to compare.
params : array_like
Model parameters.
rank : bool, optional
If true, returns a ranking over the items instead of a single item.
Returns
-------
outcome : int or list of int
The chosen item, or a ranking over ``items``.
|
[
"Generate",
"a",
"comparison",
"outcome",
"that",
"follows",
"Luce",
"s",
"axiom",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L354-L379
|
lucasmaystre/choix
|
choix/utils.py
|
probabilities
|
def probabilities(items, params):
"""Compute the comparison outcome probabilities given a subset of items.
This function computes, for each item in ``items``, the probability that it
would win (i.e., be chosen) in a comparison involving the items, given
model parameters.
Parameters
----------
items : list
Subset of items to compare.
params : array_like
Model parameters.
Returns
-------
probs : numpy.ndarray
A probability distribution over ``items``.
"""
params = np.asarray(params)
return softmax(params.take(items))
|
python
|
def probabilities(items, params):
"""Compute the comparison outcome probabilities given a subset of items.
This function computes, for each item in ``items``, the probability that it
would win (i.e., be chosen) in a comparison involving the items, given
model parameters.
Parameters
----------
items : list
Subset of items to compare.
params : array_like
Model parameters.
Returns
-------
probs : numpy.ndarray
A probability distribution over ``items``.
"""
params = np.asarray(params)
return softmax(params.take(items))
|
[
"def",
"probabilities",
"(",
"items",
",",
"params",
")",
":",
"params",
"=",
"np",
".",
"asarray",
"(",
"params",
")",
"return",
"softmax",
"(",
"params",
".",
"take",
"(",
"items",
")",
")"
] |
Compute the comparison outcome probabilities given a subset of items.
This function computes, for each item in ``items``, the probability that it
would win (i.e., be chosen) in a comparison involving the items, given
model parameters.
Parameters
----------
items : list
Subset of items to compare.
params : array_like
Model parameters.
Returns
-------
probs : numpy.ndarray
A probability distribution over ``items``.
|
[
"Compute",
"the",
"comparison",
"outcome",
"probabilities",
"given",
"a",
"subset",
"of",
"items",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L382-L402
|
lucasmaystre/choix
|
choix/opt.py
|
opt_pairwise
|
def opt_pairwise(n_items, data, alpha=1e-6, method="Newton-CG",
initial_params=None, max_iter=None, tol=1e-5):
"""Compute the ML estimate of model parameters using ``scipy.optimize``.
This function computes the maximum-likelihood estimate of model parameters
given pairwise-comparison data (see :ref:`data-pairwise`), using optimizers
provided by the ``scipy.optimize`` module.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under an isotropic Gaussian prior with variance ``1 / alpha``. See
:ref:`regularization` for details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float, optional
Regularization strength.
method : str, optional
Optimization method. Either "BFGS" or "Newton-CG".
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Tolerance for termination (method-specific).
Returns
-------
params : numpy.ndarray
The (penalized) ML estimate of model parameters.
Raises
------
ValueError
If the method is not "BFGS" or "Newton-CG".
"""
fcts = PairwiseFcts(data, alpha)
return _opt(n_items, fcts, method, initial_params, max_iter, tol)
|
python
|
def opt_pairwise(n_items, data, alpha=1e-6, method="Newton-CG",
initial_params=None, max_iter=None, tol=1e-5):
"""Compute the ML estimate of model parameters using ``scipy.optimize``.
This function computes the maximum-likelihood estimate of model parameters
given pairwise-comparison data (see :ref:`data-pairwise`), using optimizers
provided by the ``scipy.optimize`` module.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under an isotropic Gaussian prior with variance ``1 / alpha``. See
:ref:`regularization` for details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float, optional
Regularization strength.
method : str, optional
Optimization method. Either "BFGS" or "Newton-CG".
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Tolerance for termination (method-specific).
Returns
-------
params : numpy.ndarray
The (penalized) ML estimate of model parameters.
Raises
------
ValueError
If the method is not "BFGS" or "Newton-CG".
"""
fcts = PairwiseFcts(data, alpha)
return _opt(n_items, fcts, method, initial_params, max_iter, tol)
|
[
"def",
"opt_pairwise",
"(",
"n_items",
",",
"data",
",",
"alpha",
"=",
"1e-6",
",",
"method",
"=",
"\"Newton-CG\"",
",",
"initial_params",
"=",
"None",
",",
"max_iter",
"=",
"None",
",",
"tol",
"=",
"1e-5",
")",
":",
"fcts",
"=",
"PairwiseFcts",
"(",
"data",
",",
"alpha",
")",
"return",
"_opt",
"(",
"n_items",
",",
"fcts",
",",
"method",
",",
"initial_params",
",",
"max_iter",
",",
"tol",
")"
] |
Compute the ML estimate of model parameters using ``scipy.optimize``.
This function computes the maximum-likelihood estimate of model parameters
given pairwise-comparison data (see :ref:`data-pairwise`), using optimizers
provided by the ``scipy.optimize`` module.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under an isotropic Gaussian prior with variance ``1 / alpha``. See
:ref:`regularization` for details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float, optional
Regularization strength.
method : str, optional
Optimization method. Either "BFGS" or "Newton-CG".
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Tolerance for termination (method-specific).
Returns
-------
params : numpy.ndarray
The (penalized) ML estimate of model parameters.
Raises
------
ValueError
If the method is not "BFGS" or "Newton-CG".
|
[
"Compute",
"the",
"ML",
"estimate",
"of",
"model",
"parameters",
"using",
"scipy",
".",
"optimize",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/opt.py#L126-L166
|
lucasmaystre/choix
|
choix/opt.py
|
opt_rankings
|
def opt_rankings(n_items, data, alpha=1e-6, method="Newton-CG",
initial_params=None, max_iter=None, tol=1e-5):
"""Compute the ML estimate of model parameters using ``scipy.optimize``.
This function computes the maximum-likelihood estimate of model parameters
given ranking data (see :ref:`data-rankings`), using optimizers provided by
the ``scipy.optimize`` module.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under an isotropic Gaussian prior with variance ``1 / alpha``. See
:ref:`regularization` for details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
alpha : float, optional
Regularization strength.
method : str, optional
Optimization method. Either "BFGS" or "Newton-CG".
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Tolerance for termination (method-specific).
Returns
-------
params : numpy.ndarray
The (penalized) ML estimate of model parameters.
Raises
------
ValueError
If the method is not "BFGS" or "Newton-CG".
"""
fcts = Top1Fcts.from_rankings(data, alpha)
return _opt(n_items, fcts, method, initial_params, max_iter, tol)
|
python
|
def opt_rankings(n_items, data, alpha=1e-6, method="Newton-CG",
initial_params=None, max_iter=None, tol=1e-5):
"""Compute the ML estimate of model parameters using ``scipy.optimize``.
This function computes the maximum-likelihood estimate of model parameters
given ranking data (see :ref:`data-rankings`), using optimizers provided by
the ``scipy.optimize`` module.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under an isotropic Gaussian prior with variance ``1 / alpha``. See
:ref:`regularization` for details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
alpha : float, optional
Regularization strength.
method : str, optional
Optimization method. Either "BFGS" or "Newton-CG".
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Tolerance for termination (method-specific).
Returns
-------
params : numpy.ndarray
The (penalized) ML estimate of model parameters.
Raises
------
ValueError
If the method is not "BFGS" or "Newton-CG".
"""
fcts = Top1Fcts.from_rankings(data, alpha)
return _opt(n_items, fcts, method, initial_params, max_iter, tol)
|
[
"def",
"opt_rankings",
"(",
"n_items",
",",
"data",
",",
"alpha",
"=",
"1e-6",
",",
"method",
"=",
"\"Newton-CG\"",
",",
"initial_params",
"=",
"None",
",",
"max_iter",
"=",
"None",
",",
"tol",
"=",
"1e-5",
")",
":",
"fcts",
"=",
"Top1Fcts",
".",
"from_rankings",
"(",
"data",
",",
"alpha",
")",
"return",
"_opt",
"(",
"n_items",
",",
"fcts",
",",
"method",
",",
"initial_params",
",",
"max_iter",
",",
"tol",
")"
] |
Compute the ML estimate of model parameters using ``scipy.optimize``.
This function computes the maximum-likelihood estimate of model parameters
given ranking data (see :ref:`data-rankings`), using optimizers provided by
the ``scipy.optimize`` module.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under an isotropic Gaussian prior with variance ``1 / alpha``. See
:ref:`regularization` for details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
alpha : float, optional
Regularization strength.
method : str, optional
Optimization method. Either "BFGS" or "Newton-CG".
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Tolerance for termination (method-specific).
Returns
-------
params : numpy.ndarray
The (penalized) ML estimate of model parameters.
Raises
------
ValueError
If the method is not "BFGS" or "Newton-CG".
|
[
"Compute",
"the",
"ML",
"estimate",
"of",
"model",
"parameters",
"using",
"scipy",
".",
"optimize",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/opt.py#L169-L209
|
lucasmaystre/choix
|
choix/opt.py
|
opt_top1
|
def opt_top1(n_items, data, alpha=1e-6, method="Newton-CG",
initial_params=None, max_iter=None, tol=1e-5):
"""Compute the ML estimate of model parameters using ``scipy.optimize``.
This function computes the maximum-likelihood estimate of model parameters
given top-1 data (see :ref:`data-top1`), using optimizers provided by the
``scipy.optimize`` module.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under an isotropic Gaussian prior with variance ``1 / alpha``. See
:ref:`regularization` for details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Top-1 data.
alpha : float, optional
Regularization strength.
method : str, optional
Optimization method. Either "BFGS" or "Newton-CG".
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Tolerance for termination (method-specific).
Returns
-------
params : numpy.ndarray
The (penalized) ML estimate of model parameters.
Raises
------
ValueError
If the method is not "BFGS" or "Newton-CG".
"""
fcts = Top1Fcts(data, alpha)
return _opt(n_items, fcts, method, initial_params, max_iter, tol)
|
python
|
def opt_top1(n_items, data, alpha=1e-6, method="Newton-CG",
initial_params=None, max_iter=None, tol=1e-5):
"""Compute the ML estimate of model parameters using ``scipy.optimize``.
This function computes the maximum-likelihood estimate of model parameters
given top-1 data (see :ref:`data-top1`), using optimizers provided by the
``scipy.optimize`` module.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under an isotropic Gaussian prior with variance ``1 / alpha``. See
:ref:`regularization` for details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Top-1 data.
alpha : float, optional
Regularization strength.
method : str, optional
Optimization method. Either "BFGS" or "Newton-CG".
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Tolerance for termination (method-specific).
Returns
-------
params : numpy.ndarray
The (penalized) ML estimate of model parameters.
Raises
------
ValueError
If the method is not "BFGS" or "Newton-CG".
"""
fcts = Top1Fcts(data, alpha)
return _opt(n_items, fcts, method, initial_params, max_iter, tol)
|
[
"def",
"opt_top1",
"(",
"n_items",
",",
"data",
",",
"alpha",
"=",
"1e-6",
",",
"method",
"=",
"\"Newton-CG\"",
",",
"initial_params",
"=",
"None",
",",
"max_iter",
"=",
"None",
",",
"tol",
"=",
"1e-5",
")",
":",
"fcts",
"=",
"Top1Fcts",
"(",
"data",
",",
"alpha",
")",
"return",
"_opt",
"(",
"n_items",
",",
"fcts",
",",
"method",
",",
"initial_params",
",",
"max_iter",
",",
"tol",
")"
] |
Compute the ML estimate of model parameters using ``scipy.optimize``.
This function computes the maximum-likelihood estimate of model parameters
given top-1 data (see :ref:`data-top1`), using optimizers provided by the
``scipy.optimize`` module.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under an isotropic Gaussian prior with variance ``1 / alpha``. See
:ref:`regularization` for details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Top-1 data.
alpha : float, optional
Regularization strength.
method : str, optional
Optimization method. Either "BFGS" or "Newton-CG".
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Tolerance for termination (method-specific).
Returns
-------
params : numpy.ndarray
The (penalized) ML estimate of model parameters.
Raises
------
ValueError
If the method is not "BFGS" or "Newton-CG".
|
[
"Compute",
"the",
"ML",
"estimate",
"of",
"model",
"parameters",
"using",
"scipy",
".",
"optimize",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/opt.py#L212-L252
|
lucasmaystre/choix
|
choix/opt.py
|
PairwiseFcts.objective
|
def objective(self, params):
"""Compute the negative penalized log-likelihood."""
val = self._penalty * np.sum(params**2)
for win, los in self._data:
val += np.logaddexp(0, -(params[win] - params[los]))
return val
|
python
|
def objective(self, params):
"""Compute the negative penalized log-likelihood."""
val = self._penalty * np.sum(params**2)
for win, los in self._data:
val += np.logaddexp(0, -(params[win] - params[los]))
return val
|
[
"def",
"objective",
"(",
"self",
",",
"params",
")",
":",
"val",
"=",
"self",
".",
"_penalty",
"*",
"np",
".",
"sum",
"(",
"params",
"**",
"2",
")",
"for",
"win",
",",
"los",
"in",
"self",
".",
"_data",
":",
"val",
"+=",
"np",
".",
"logaddexp",
"(",
"0",
",",
"-",
"(",
"params",
"[",
"win",
"]",
"-",
"params",
"[",
"los",
"]",
")",
")",
"return",
"val"
] |
Compute the negative penalized log-likelihood.
|
[
"Compute",
"the",
"negative",
"penalized",
"log",
"-",
"likelihood",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/opt.py#L28-L33
|
lucasmaystre/choix
|
choix/opt.py
|
Top1Fcts.from_rankings
|
def from_rankings(cls, data, penalty):
"""Alternative constructor for ranking data."""
top1 = list()
for ranking in data:
for i, winner in enumerate(ranking[:-1]):
top1.append((winner, ranking[i+1:]))
return cls(top1, penalty)
|
python
|
def from_rankings(cls, data, penalty):
"""Alternative constructor for ranking data."""
top1 = list()
for ranking in data:
for i, winner in enumerate(ranking[:-1]):
top1.append((winner, ranking[i+1:]))
return cls(top1, penalty)
|
[
"def",
"from_rankings",
"(",
"cls",
",",
"data",
",",
"penalty",
")",
":",
"top1",
"=",
"list",
"(",
")",
"for",
"ranking",
"in",
"data",
":",
"for",
"i",
",",
"winner",
"in",
"enumerate",
"(",
"ranking",
"[",
":",
"-",
"1",
"]",
")",
":",
"top1",
".",
"append",
"(",
"(",
"winner",
",",
"ranking",
"[",
"i",
"+",
"1",
":",
"]",
")",
")",
"return",
"cls",
"(",
"top1",
",",
"penalty",
")"
] |
Alternative constructor for ranking data.
|
[
"Alternative",
"constructor",
"for",
"ranking",
"data",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/opt.py#L69-L75
|
lucasmaystre/choix
|
choix/opt.py
|
Top1Fcts.objective
|
def objective(self, params):
"""Compute the negative penalized log-likelihood."""
val = self._penalty * np.sum(params**2)
for winner, losers in self._data:
idx = np.append(winner, losers)
val += logsumexp(params.take(idx) - params[winner])
return val
|
python
|
def objective(self, params):
"""Compute the negative penalized log-likelihood."""
val = self._penalty * np.sum(params**2)
for winner, losers in self._data:
idx = np.append(winner, losers)
val += logsumexp(params.take(idx) - params[winner])
return val
|
[
"def",
"objective",
"(",
"self",
",",
"params",
")",
":",
"val",
"=",
"self",
".",
"_penalty",
"*",
"np",
".",
"sum",
"(",
"params",
"**",
"2",
")",
"for",
"winner",
",",
"losers",
"in",
"self",
".",
"_data",
":",
"idx",
"=",
"np",
".",
"append",
"(",
"winner",
",",
"losers",
")",
"val",
"+=",
"logsumexp",
"(",
"params",
".",
"take",
"(",
"idx",
")",
"-",
"params",
"[",
"winner",
"]",
")",
"return",
"val"
] |
Compute the negative penalized log-likelihood.
|
[
"Compute",
"the",
"negative",
"penalized",
"log",
"-",
"likelihood",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/opt.py#L77-L83
|
lucasmaystre/choix
|
choix/mm.py
|
_mm
|
def _mm(n_items, data, initial_params, alpha, max_iter, tol, mm_fun):
"""
Iteratively refine MM estimates until convergence.
Raises
------
RuntimeError
If the algorithm does not converge after `max_iter` iterations.
"""
if initial_params is None:
params = np.zeros(n_items)
else:
params = initial_params
converged = NormOfDifferenceTest(tol=tol, order=1)
for _ in range(max_iter):
nums, denoms = mm_fun(n_items, data, params)
params = log_transform((nums + alpha) / (denoms + alpha))
if converged(params):
return params
raise RuntimeError("Did not converge after {} iterations".format(max_iter))
|
python
|
def _mm(n_items, data, initial_params, alpha, max_iter, tol, mm_fun):
"""
Iteratively refine MM estimates until convergence.
Raises
------
RuntimeError
If the algorithm does not converge after `max_iter` iterations.
"""
if initial_params is None:
params = np.zeros(n_items)
else:
params = initial_params
converged = NormOfDifferenceTest(tol=tol, order=1)
for _ in range(max_iter):
nums, denoms = mm_fun(n_items, data, params)
params = log_transform((nums + alpha) / (denoms + alpha))
if converged(params):
return params
raise RuntimeError("Did not converge after {} iterations".format(max_iter))
|
[
"def",
"_mm",
"(",
"n_items",
",",
"data",
",",
"initial_params",
",",
"alpha",
",",
"max_iter",
",",
"tol",
",",
"mm_fun",
")",
":",
"if",
"initial_params",
"is",
"None",
":",
"params",
"=",
"np",
".",
"zeros",
"(",
"n_items",
")",
"else",
":",
"params",
"=",
"initial_params",
"converged",
"=",
"NormOfDifferenceTest",
"(",
"tol",
"=",
"tol",
",",
"order",
"=",
"1",
")",
"for",
"_",
"in",
"range",
"(",
"max_iter",
")",
":",
"nums",
",",
"denoms",
"=",
"mm_fun",
"(",
"n_items",
",",
"data",
",",
"params",
")",
"params",
"=",
"log_transform",
"(",
"(",
"nums",
"+",
"alpha",
")",
"/",
"(",
"denoms",
"+",
"alpha",
")",
")",
"if",
"converged",
"(",
"params",
")",
":",
"return",
"params",
"raise",
"RuntimeError",
"(",
"\"Did not converge after {} iterations\"",
".",
"format",
"(",
"max_iter",
")",
")"
] |
Iteratively refine MM estimates until convergence.
Raises
------
RuntimeError
If the algorithm does not converge after `max_iter` iterations.
|
[
"Iteratively",
"refine",
"MM",
"estimates",
"until",
"convergence",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/mm.py#L10-L29
|
lucasmaystre/choix
|
choix/mm.py
|
_mm_pairwise
|
def _mm_pairwise(n_items, data, params):
"""Inner loop of MM algorithm for pairwise data."""
weights = exp_transform(params)
wins = np.zeros(n_items, dtype=float)
denoms = np.zeros(n_items, dtype=float)
for winner, loser in data:
wins[winner] += 1.0
val = 1.0 / (weights[winner] + weights[loser])
denoms[winner] += val
denoms[loser] += val
return wins, denoms
|
python
|
def _mm_pairwise(n_items, data, params):
"""Inner loop of MM algorithm for pairwise data."""
weights = exp_transform(params)
wins = np.zeros(n_items, dtype=float)
denoms = np.zeros(n_items, dtype=float)
for winner, loser in data:
wins[winner] += 1.0
val = 1.0 / (weights[winner] + weights[loser])
denoms[winner] += val
denoms[loser] += val
return wins, denoms
|
[
"def",
"_mm_pairwise",
"(",
"n_items",
",",
"data",
",",
"params",
")",
":",
"weights",
"=",
"exp_transform",
"(",
"params",
")",
"wins",
"=",
"np",
".",
"zeros",
"(",
"n_items",
",",
"dtype",
"=",
"float",
")",
"denoms",
"=",
"np",
".",
"zeros",
"(",
"n_items",
",",
"dtype",
"=",
"float",
")",
"for",
"winner",
",",
"loser",
"in",
"data",
":",
"wins",
"[",
"winner",
"]",
"+=",
"1.0",
"val",
"=",
"1.0",
"/",
"(",
"weights",
"[",
"winner",
"]",
"+",
"weights",
"[",
"loser",
"]",
")",
"denoms",
"[",
"winner",
"]",
"+=",
"val",
"denoms",
"[",
"loser",
"]",
"+=",
"val",
"return",
"wins",
",",
"denoms"
] |
Inner loop of MM algorithm for pairwise data.
|
[
"Inner",
"loop",
"of",
"MM",
"algorithm",
"for",
"pairwise",
"data",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/mm.py#L32-L42
|
lucasmaystre/choix
|
choix/mm.py
|
mm_pairwise
|
def mm_pairwise(
n_items, data, initial_params=None, alpha=0.0,
max_iter=10000, tol=1e-8):
"""Compute the ML estimate of model parameters using the MM algorithm.
This function computes the maximum-likelihood (ML) estimate of model
parameters given pairwise-comparison data (see :ref:`data-pairwise`), using
the minorization-maximization (MM) algorithm [Hun04]_, [CD12]_.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under a (peaked) Dirichlet prior. See :ref:`regularization` for
details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
return _mm(
n_items, data, initial_params, alpha, max_iter, tol, _mm_pairwise)
|
python
|
def mm_pairwise(
n_items, data, initial_params=None, alpha=0.0,
max_iter=10000, tol=1e-8):
"""Compute the ML estimate of model parameters using the MM algorithm.
This function computes the maximum-likelihood (ML) estimate of model
parameters given pairwise-comparison data (see :ref:`data-pairwise`), using
the minorization-maximization (MM) algorithm [Hun04]_, [CD12]_.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under a (peaked) Dirichlet prior. See :ref:`regularization` for
details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
return _mm(
n_items, data, initial_params, alpha, max_iter, tol, _mm_pairwise)
|
[
"def",
"mm_pairwise",
"(",
"n_items",
",",
"data",
",",
"initial_params",
"=",
"None",
",",
"alpha",
"=",
"0.0",
",",
"max_iter",
"=",
"10000",
",",
"tol",
"=",
"1e-8",
")",
":",
"return",
"_mm",
"(",
"n_items",
",",
"data",
",",
"initial_params",
",",
"alpha",
",",
"max_iter",
",",
"tol",
",",
"_mm_pairwise",
")"
] |
Compute the ML estimate of model parameters using the MM algorithm.
This function computes the maximum-likelihood (ML) estimate of model
parameters given pairwise-comparison data (see :ref:`data-pairwise`), using
the minorization-maximization (MM) algorithm [Hun04]_, [CD12]_.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under a (peaked) Dirichlet prior. See :ref:`regularization` for
details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
|
[
"Compute",
"the",
"ML",
"estimate",
"of",
"model",
"parameters",
"using",
"the",
"MM",
"algorithm",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/mm.py#L45-L80
|
lucasmaystre/choix
|
choix/mm.py
|
_mm_rankings
|
def _mm_rankings(n_items, data, params):
"""Inner loop of MM algorithm for ranking data."""
weights = exp_transform(params)
wins = np.zeros(n_items, dtype=float)
denoms = np.zeros(n_items, dtype=float)
for ranking in data:
sum_ = weights.take(ranking).sum()
for i, winner in enumerate(ranking[:-1]):
wins[winner] += 1
val = 1.0 / sum_
for item in ranking[i:]:
denoms[item] += val
sum_ -= weights[winner]
return wins, denoms
|
python
|
def _mm_rankings(n_items, data, params):
"""Inner loop of MM algorithm for ranking data."""
weights = exp_transform(params)
wins = np.zeros(n_items, dtype=float)
denoms = np.zeros(n_items, dtype=float)
for ranking in data:
sum_ = weights.take(ranking).sum()
for i, winner in enumerate(ranking[:-1]):
wins[winner] += 1
val = 1.0 / sum_
for item in ranking[i:]:
denoms[item] += val
sum_ -= weights[winner]
return wins, denoms
|
[
"def",
"_mm_rankings",
"(",
"n_items",
",",
"data",
",",
"params",
")",
":",
"weights",
"=",
"exp_transform",
"(",
"params",
")",
"wins",
"=",
"np",
".",
"zeros",
"(",
"n_items",
",",
"dtype",
"=",
"float",
")",
"denoms",
"=",
"np",
".",
"zeros",
"(",
"n_items",
",",
"dtype",
"=",
"float",
")",
"for",
"ranking",
"in",
"data",
":",
"sum_",
"=",
"weights",
".",
"take",
"(",
"ranking",
")",
".",
"sum",
"(",
")",
"for",
"i",
",",
"winner",
"in",
"enumerate",
"(",
"ranking",
"[",
":",
"-",
"1",
"]",
")",
":",
"wins",
"[",
"winner",
"]",
"+=",
"1",
"val",
"=",
"1.0",
"/",
"sum_",
"for",
"item",
"in",
"ranking",
"[",
"i",
":",
"]",
":",
"denoms",
"[",
"item",
"]",
"+=",
"val",
"sum_",
"-=",
"weights",
"[",
"winner",
"]",
"return",
"wins",
",",
"denoms"
] |
Inner loop of MM algorithm for ranking data.
|
[
"Inner",
"loop",
"of",
"MM",
"algorithm",
"for",
"ranking",
"data",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/mm.py#L83-L96
|
lucasmaystre/choix
|
choix/mm.py
|
mm_rankings
|
def mm_rankings(n_items, data, initial_params=None, alpha=0.0,
max_iter=10000, tol=1e-8):
"""Compute the ML estimate of model parameters using the MM algorithm.
This function computes the maximum-likelihood (ML) estimate of model
parameters given ranking data (see :ref:`data-rankings`), using the
minorization-maximization (MM) algorithm [Hun04]_, [CD12]_.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under a (peaked) Dirichlet prior. See :ref:`regularization` for
details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
return _mm(n_items, data, initial_params, alpha, max_iter, tol,
_mm_rankings)
|
python
|
def mm_rankings(n_items, data, initial_params=None, alpha=0.0,
max_iter=10000, tol=1e-8):
"""Compute the ML estimate of model parameters using the MM algorithm.
This function computes the maximum-likelihood (ML) estimate of model
parameters given ranking data (see :ref:`data-rankings`), using the
minorization-maximization (MM) algorithm [Hun04]_, [CD12]_.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under a (peaked) Dirichlet prior. See :ref:`regularization` for
details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
return _mm(n_items, data, initial_params, alpha, max_iter, tol,
_mm_rankings)
|
[
"def",
"mm_rankings",
"(",
"n_items",
",",
"data",
",",
"initial_params",
"=",
"None",
",",
"alpha",
"=",
"0.0",
",",
"max_iter",
"=",
"10000",
",",
"tol",
"=",
"1e-8",
")",
":",
"return",
"_mm",
"(",
"n_items",
",",
"data",
",",
"initial_params",
",",
"alpha",
",",
"max_iter",
",",
"tol",
",",
"_mm_rankings",
")"
] |
Compute the ML estimate of model parameters using the MM algorithm.
This function computes the maximum-likelihood (ML) estimate of model
parameters given ranking data (see :ref:`data-rankings`), using the
minorization-maximization (MM) algorithm [Hun04]_, [CD12]_.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under a (peaked) Dirichlet prior. See :ref:`regularization` for
details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
|
[
"Compute",
"the",
"ML",
"estimate",
"of",
"model",
"parameters",
"using",
"the",
"MM",
"algorithm",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/mm.py#L99-L133
|
lucasmaystre/choix
|
choix/mm.py
|
_mm_top1
|
def _mm_top1(n_items, data, params):
"""Inner loop of MM algorithm for top1 data."""
weights = exp_transform(params)
wins = np.zeros(n_items, dtype=float)
denoms = np.zeros(n_items, dtype=float)
for winner, losers in data:
wins[winner] += 1
val = 1 / (weights.take(losers).sum() + weights[winner])
for item in itertools.chain([winner], losers):
denoms[item] += val
return wins, denoms
|
python
|
def _mm_top1(n_items, data, params):
"""Inner loop of MM algorithm for top1 data."""
weights = exp_transform(params)
wins = np.zeros(n_items, dtype=float)
denoms = np.zeros(n_items, dtype=float)
for winner, losers in data:
wins[winner] += 1
val = 1 / (weights.take(losers).sum() + weights[winner])
for item in itertools.chain([winner], losers):
denoms[item] += val
return wins, denoms
|
[
"def",
"_mm_top1",
"(",
"n_items",
",",
"data",
",",
"params",
")",
":",
"weights",
"=",
"exp_transform",
"(",
"params",
")",
"wins",
"=",
"np",
".",
"zeros",
"(",
"n_items",
",",
"dtype",
"=",
"float",
")",
"denoms",
"=",
"np",
".",
"zeros",
"(",
"n_items",
",",
"dtype",
"=",
"float",
")",
"for",
"winner",
",",
"losers",
"in",
"data",
":",
"wins",
"[",
"winner",
"]",
"+=",
"1",
"val",
"=",
"1",
"/",
"(",
"weights",
".",
"take",
"(",
"losers",
")",
".",
"sum",
"(",
")",
"+",
"weights",
"[",
"winner",
"]",
")",
"for",
"item",
"in",
"itertools",
".",
"chain",
"(",
"[",
"winner",
"]",
",",
"losers",
")",
":",
"denoms",
"[",
"item",
"]",
"+=",
"val",
"return",
"wins",
",",
"denoms"
] |
Inner loop of MM algorithm for top1 data.
|
[
"Inner",
"loop",
"of",
"MM",
"algorithm",
"for",
"top1",
"data",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/mm.py#L136-L146
|
lucasmaystre/choix
|
choix/mm.py
|
mm_top1
|
def mm_top1(
n_items, data, initial_params=None, alpha=0.0,
max_iter=10000, tol=1e-8):
"""Compute the ML estimate of model parameters using the MM algorithm.
This function computes the maximum-likelihood (ML) estimate of model
parameters given top-1 data (see :ref:`data-top1`), using the
minorization-maximization (MM) algorithm [Hun04]_, [CD12]_.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under a (peaked) Dirichlet prior. See :ref:`regularization` for
details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Top-1 data.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
return _mm(n_items, data, initial_params, alpha, max_iter, tol, _mm_top1)
|
python
|
def mm_top1(
n_items, data, initial_params=None, alpha=0.0,
max_iter=10000, tol=1e-8):
"""Compute the ML estimate of model parameters using the MM algorithm.
This function computes the maximum-likelihood (ML) estimate of model
parameters given top-1 data (see :ref:`data-top1`), using the
minorization-maximization (MM) algorithm [Hun04]_, [CD12]_.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under a (peaked) Dirichlet prior. See :ref:`regularization` for
details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Top-1 data.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
return _mm(n_items, data, initial_params, alpha, max_iter, tol, _mm_top1)
|
[
"def",
"mm_top1",
"(",
"n_items",
",",
"data",
",",
"initial_params",
"=",
"None",
",",
"alpha",
"=",
"0.0",
",",
"max_iter",
"=",
"10000",
",",
"tol",
"=",
"1e-8",
")",
":",
"return",
"_mm",
"(",
"n_items",
",",
"data",
",",
"initial_params",
",",
"alpha",
",",
"max_iter",
",",
"tol",
",",
"_mm_top1",
")"
] |
Compute the ML estimate of model parameters using the MM algorithm.
This function computes the maximum-likelihood (ML) estimate of model
parameters given top-1 data (see :ref:`data-top1`), using the
minorization-maximization (MM) algorithm [Hun04]_, [CD12]_.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under a (peaked) Dirichlet prior. See :ref:`regularization` for
details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Top-1 data.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
|
[
"Compute",
"the",
"ML",
"estimate",
"of",
"model",
"parameters",
"using",
"the",
"MM",
"algorithm",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/mm.py#L149-L183
|
lucasmaystre/choix
|
choix/mm.py
|
_choicerank
|
def _choicerank(n_items, data, params):
"""Inner loop of ChoiceRank algorithm."""
weights = exp_transform(params)
adj, adj_t, traffic_in, traffic_out = data
# First phase of message passing.
zs = adj.dot(weights)
# Second phase of message passing.
with np.errstate(invalid="ignore"):
denoms = adj_t.dot(traffic_out / zs)
return traffic_in, denoms
|
python
|
def _choicerank(n_items, data, params):
"""Inner loop of ChoiceRank algorithm."""
weights = exp_transform(params)
adj, adj_t, traffic_in, traffic_out = data
# First phase of message passing.
zs = adj.dot(weights)
# Second phase of message passing.
with np.errstate(invalid="ignore"):
denoms = adj_t.dot(traffic_out / zs)
return traffic_in, denoms
|
[
"def",
"_choicerank",
"(",
"n_items",
",",
"data",
",",
"params",
")",
":",
"weights",
"=",
"exp_transform",
"(",
"params",
")",
"adj",
",",
"adj_t",
",",
"traffic_in",
",",
"traffic_out",
"=",
"data",
"# First phase of message passing.",
"zs",
"=",
"adj",
".",
"dot",
"(",
"weights",
")",
"# Second phase of message passing.",
"with",
"np",
".",
"errstate",
"(",
"invalid",
"=",
"\"ignore\"",
")",
":",
"denoms",
"=",
"adj_t",
".",
"dot",
"(",
"traffic_out",
"/",
"zs",
")",
"return",
"traffic_in",
",",
"denoms"
] |
Inner loop of ChoiceRank algorithm.
|
[
"Inner",
"loop",
"of",
"ChoiceRank",
"algorithm",
"."
] |
train
|
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/mm.py#L186-L195
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.