diff --git "a/codeparrot-valid_1020.txt" "b/codeparrot-valid_1020.txt" new file mode 100644--- /dev/null +++ "b/codeparrot-valid_1020.txt" @@ -0,0 +1,10000 @@ + to _hashables. + Attribues: + id: The object's identifier, a 24-character uppercase hexadecimal string. + Usually, objects being created should not set id until the entire + project file structure is built. At that point, UpdateIDs() should + be called on the root object to assign deterministic values for id to + each object in the tree. + parent: The object's parent. This is set by a parent XCObject when a child + object is added to it. + _properties: The object's property dictionary. An object's properties are + described by its class' _schema variable. + """ + + _schema = {} + _should_print_single_line = False + + # See _EncodeString. + _encode_transforms = [] + i = 0 + while i < ord(' '): + _encode_transforms.append('\\U%04x' % i) + i = i + 1 + _encode_transforms[7] = '\\a' + _encode_transforms[8] = '\\b' + _encode_transforms[9] = '\\t' + _encode_transforms[10] = '\\n' + _encode_transforms[11] = '\\v' + _encode_transforms[12] = '\\f' + _encode_transforms[13] = '\\n' + + _alternate_encode_transforms = list(_encode_transforms) + _alternate_encode_transforms[9] = chr(9) + _alternate_encode_transforms[10] = chr(10) + _alternate_encode_transforms[11] = chr(11) + + def __init__(self, properties=None, id=None, parent=None): + self.id = id + self.parent = parent + self._properties = {} + self._hashables = [] + self._SetDefaultsFromSchema() + self.UpdateProperties(properties) + + def __repr__(self): + try: + name = self.Name() + except NotImplementedError: + return '<%s at 0x%x>' % (self.__class__.__name__, id(self)) + return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self)) + + def Copy(self): + """Make a copy of this object. + + The new object will have its own copy of lists and dicts. Any XCObject + objects owned by this object (marked "strong") will be copied in the + new object, even those found in lists. If this object has any weak + references to other XCObjects, the same references are added to the new + object without making a copy. + """ + + that = self.__class__(id=self.id, parent=self.parent) + for key, value in self._properties.iteritems(): + is_strong = self._schema[key][2] + + if isinstance(value, XCObject): + if is_strong: + new_value = value.Copy() + new_value.parent = that + that._properties[key] = new_value + else: + that._properties[key] = value + elif isinstance(value, str) or isinstance(value, unicode) or \ + isinstance(value, int): + that._properties[key] = value + elif isinstance(value, list): + if is_strong: + # If is_strong is True, each element is an XCObject, so it's safe to + # call Copy. + that._properties[key] = [] + for item in value: + new_item = item.Copy() + new_item.parent = that + that._properties[key].append(new_item) + else: + that._properties[key] = value[:] + elif isinstance(value, dict): + # dicts are never strong. + if is_strong: + raise TypeError, 'Strong dict for key ' + key + ' in ' + \ + self.__class__.__name__ + else: + that._properties[key] = value.copy() + else: + raise TypeError, 'Unexpected type ' + value.__class__.__name__ + \ + ' for key ' + key + ' in ' + self.__class__.__name__ + + return that + + def Name(self): + """Return the name corresponding to an object. + + Not all objects necessarily need to be nameable, and not all that do have + a "name" property. Override as needed. + """ + + # If the schema indicates that "name" is required, try to access the + # property even if it doesn't exist. This will result in a KeyError + # being raised for the property that should be present, which seems more + # appropriate than NotImplementedError in this case. + if 'name' in self._properties or \ + ('name' in self._schema and self._schema['name'][3]): + return self._properties['name'] + + raise NotImplementedError, \ + self.__class__.__name__ + ' must implement Name' + + def Comment(self): + """Return a comment string for the object. + + Most objects just use their name as the comment, but PBXProject uses + different values. + + The returned comment is not escaped and does not have any comment marker + strings applied to it. + """ + + return self.Name() + + def Hashables(self): + hashables = [self.__class__.__name__] + + name = self.Name() + if name != None: + hashables.append(name) + + hashables.extend(self._hashables) + + return hashables + + def ComputeIDs(self, recursive=True, overwrite=True, hash=None): + """Set "id" properties deterministically. + + An object's "id" property is set based on a hash of its class type and + name, as well as the class type and name of all ancestor objects. As + such, it is only advisable to call ComputeIDs once an entire project file + tree is built. + + If recursive is True, recurse into all descendant objects and update their + hashes. + + If overwrite is True, any existing value set in the "id" property will be + replaced. + """ + + def _HashUpdate(hash, data): + """Update hash with data's length and contents. + + If the hash were updated only with the value of data, it would be + possible for clowns to induce collisions by manipulating the names of + their objects. By adding the length, it's exceedingly less likely that + ID collisions will be encountered, intentionally or not. + """ + + hash.update(struct.pack('>i', len(data))) + hash.update(data) + + if hash == None: + hash = _new_sha1() + + hashables = self.Hashables() + assert len(hashables) > 0 + for hashable in hashables: + _HashUpdate(hash, hashable) + + if recursive: + for child in self.Children(): + child.ComputeIDs(recursive, overwrite, hash.copy()) + + if overwrite or self.id == None: + # Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is + # is 160 bits. Instead of throwing out 64 bits of the digest, xor them + # into the portion that gets used. + assert hash.digest_size % 4 == 0 + digest_int_count = hash.digest_size / 4 + digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest()) + id_ints = [0, 0, 0] + for index in xrange(0, digest_int_count): + id_ints[index % 3] ^= digest_ints[index] + self.id = '%08X%08X%08X' % tuple(id_ints) + + def EnsureNoIDCollisions(self): + """Verifies that no two objects have the same ID. Checks all descendants. + """ + + ids = {} + descendants = self.Descendants() + for descendant in descendants: + if descendant.id in ids: + other = ids[descendant.id] + raise KeyError, \ + 'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \ + (descendant.id, str(descendant._properties), + str(other._properties), self._properties['rootObject'].Name()) + ids[descendant.id] = descendant + + def Children(self): + """Returns a list of all of this object's owned (strong) children.""" + + children = [] + for property, attributes in self._schema.iteritems(): + (is_list, property_type, is_strong) = attributes[0:3] + if is_strong and property in self._properties: + if not is_list: + children.append(self._properties[property]) + else: + children.extend(self._properties[property]) + return children + + def Descendants(self): + """Returns a list of all of this object's descendants, including this + object. + """ + + children = self.Children() + descendants = [self] + for child in children: + descendants.extend(child.Descendants()) + return descendants + + def PBXProjectAncestor(self): + # The base case for recursion is defined at PBXProject.PBXProjectAncestor. + if self.parent: + return self.parent.PBXProjectAncestor() + return None + + def _EncodeComment(self, comment): + """Encodes a comment to be placed in the project file output, mimicing + Xcode behavior. + """ + + # This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If + # the string already contains a "*/", it is turned into "(*)/". This keeps + # the file writer from outputting something that would be treated as the + # end of a comment in the middle of something intended to be entirely a + # comment. + + return '/* ' + comment.replace('*/', '(*)/') + ' */' + + def _EncodeTransform(self, match): + # This function works closely with _EncodeString. It will only be called + # by re.sub with match.group(0) containing a character matched by the + # the _escaped expression. + char = match.group(0) + + # Backslashes (\) and quotation marks (") are always replaced with a + # backslash-escaped version of the same. Everything else gets its + # replacement from the class' _encode_transforms array. + if char == '\\': + return '\\\\' + if char == '"': + return '\\"' + return self._encode_transforms[ord(char)] + + def _EncodeString(self, value): + """Encodes a string to be placed in the project file output, mimicing + Xcode behavior. + """ + + # Use quotation marks when any character outside of the range A-Z, a-z, 0-9, + # $ (dollar sign), . (period), and _ (underscore) is present. Also use + # quotation marks to represent empty strings. + # + # Escape " (double-quote) and \ (backslash) by preceding them with a + # backslash. + # + # Some characters below the printable ASCII range are encoded specially: + # 7 ^G BEL is encoded as "\a" + # 8 ^H BS is encoded as "\b" + # 11 ^K VT is encoded as "\v" + # 12 ^L NP is encoded as "\f" + # 127 ^? DEL is passed through as-is without escaping + # - In PBXFileReference and PBXBuildFile objects: + # 9 ^I HT is passed through as-is without escaping + # 10 ^J NL is passed through as-is without escaping + # 13 ^M CR is passed through as-is without escaping + # - In other objects: + # 9 ^I HT is encoded as "\t" + # 10 ^J NL is encoded as "\n" + # 13 ^M CR is encoded as "\n" rendering it indistinguishable from + # 10 ^J NL + # All other nonprintable characters within the ASCII range (0 through 127 + # inclusive) are encoded as "\U001f" referring to the Unicode code point in + # hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e". + # Characters above the ASCII range are passed through to the output encoded + # as UTF-8 without any escaping. These mappings are contained in the + # class' _encode_transforms list. + + if _unquoted.search(value) and not _quoted.search(value): + return value + + return '"' + _escaped.sub(self._EncodeTransform, value) + '"' + + def _XCPrint(self, file, tabs, line): + file.write('\t' * tabs + line) + + def _XCPrintableValue(self, tabs, value, flatten_list=False): + """Returns a representation of value that may be printed in a project file, + mimicing Xcode's behavior. + + _XCPrintableValue can handle str and int values, XCObjects (which are + made printable by returning their id property), and list and dict objects + composed of any of the above types. When printing a list or dict, and + _should_print_single_line is False, the tabs parameter is used to determine + how much to indent the lines corresponding to the items in the list or + dict. + + If flatten_list is True, single-element lists will be transformed into + strings. + """ + + printable = '' + comment = None + + if self._should_print_single_line: + sep = ' ' + element_tabs = '' + end_tabs = '' + else: + sep = '\n' + element_tabs = '\t' * (tabs + 1) + end_tabs = '\t' * tabs + + if isinstance(value, XCObject): + printable += value.id + comment = value.Comment() + elif isinstance(value, str): + printable += self._EncodeString(value) + elif isinstance(value, unicode): + printable += self._EncodeString(value.encode('utf-8')) + elif isinstance(value, int): + printable += str(value) + elif isinstance(value, list): + if flatten_list and len(value) <= 1: + if len(value) == 0: + printable += self._EncodeString('') + else: + printable += self._EncodeString(value[0]) + else: + printable = '(' + sep + for item in value: + printable += element_tabs + \ + self._XCPrintableValue(tabs + 1, item, flatten_list) + \ + ',' + sep + printable += end_tabs + ')' + elif isinstance(value, dict): + printable = '{' + sep + for item_key, item_value in sorted(value.iteritems()): + printable += element_tabs + \ + self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \ + self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \ + sep + printable += end_tabs + '}' + else: + raise TypeError, "Can't make " + value.__class__.__name__ + ' printable' + + if comment != None: + printable += ' ' + self._EncodeComment(comment) + + return printable + + def _XCKVPrint(self, file, tabs, key, value): + """Prints a key and value, members of an XCObject's _properties dictionary, + to file. + + tabs is an int identifying the indentation level. If the class' + _should_print_single_line variable is True, tabs is ignored and the + key-value pair will be followed by a space insead of a newline. + """ + + if self._should_print_single_line: + printable = '' + after_kv = ' ' + else: + printable = '\t' * tabs + after_kv = '\n' + + # Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy + # objects without comments. Sometimes it prints them with comments, but + # the majority of the time, it doesn't. To avoid unnecessary changes to + # the project file after Xcode opens it, don't write comments for + # remoteGlobalIDString. This is a sucky hack and it would certainly be + # cleaner to extend the schema to indicate whether or not a comment should + # be printed, but since this is the only case where the problem occurs and + # Xcode itself can't seem to make up its mind, the hack will suffice. + # + # Also see PBXContainerItemProxy._schema['remoteGlobalIDString']. + if key == 'remoteGlobalIDString' and isinstance(self, + PBXContainerItemProxy): + value_to_print = value.id + else: + value_to_print = value + + # In another one-off, let's set flatten_list on buildSettings properties + # of XCBuildConfiguration objects, because that's how Xcode treats them. + if key == 'buildSettings' and isinstance(self, XCBuildConfiguration): + flatten_list = True + else: + flatten_list = False + + try: + printable += self._XCPrintableValue(tabs, key, flatten_list) + ' = ' + \ + self._XCPrintableValue(tabs, value_to_print, flatten_list) + \ + ';' + after_kv + except TypeError, e: + gyp.common.ExceptionAppend(e, + 'while printing key "%s"' % key) + raise + + self._XCPrint(file, 0, printable) + + def Print(self, file=sys.stdout): + """Prints a reprentation of this object to file, adhering to Xcode output + formatting. + """ + + self.VerifyHasRequiredProperties() + + if self._should_print_single_line: + # When printing an object in a single line, Xcode doesn't put any space + # between the beginning of a dictionary (or presumably a list) and the + # first contained item, so you wind up with snippets like + # ...CDEF = {isa = PBXFileReference; fileRef = 0123... + # If it were me, I would have put a space in there after the opening + # curly, but I guess this is just another one of those inconsistencies + # between how Xcode prints PBXFileReference and PBXBuildFile objects as + # compared to other objects. Mimic Xcode's behavior here by using an + # empty string for sep. + sep = '' + end_tabs = 0 + else: + sep = '\n' + end_tabs = 2 + + # Start the object. For example, '\t\tPBXProject = {\n'. + self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep) + + # "isa" isn't in the _properties dictionary, it's an intrinsic property + # of the class which the object belongs to. Xcode always outputs "isa" + # as the first element of an object dictionary. + self._XCKVPrint(file, 3, 'isa', self.__class__.__name__) + + # The remaining elements of an object dictionary are sorted alphabetically. + for property, value in sorted(self._properties.iteritems()): + self._XCKVPrint(file, 3, property, value) + + # End the object. + self._XCPrint(file, end_tabs, '};\n') + + def UpdateProperties(self, properties, do_copy=False): + """Merge the supplied properties into the _properties dictionary. + + The input properties must adhere to the class schema or a KeyError or + TypeError exception will be raised. If adding an object of an XCObject + subclass and the schema indicates a strong relationship, the object's + parent will be set to this object. + + If do_copy is True, then lists, dicts, strong-owned XCObjects, and + strong-owned XCObjects in lists will be copied instead of having their + references added. + """ + + if properties == None: + return + + for property, value in properties.iteritems(): + # Make sure the property is in the schema. + if not property in self._schema: + raise KeyError, property + ' not in ' + self.__class__.__name__ + + # Make sure the property conforms to the schema. + (is_list, property_type, is_strong) = self._schema[property][0:3] + if is_list: + if value.__class__ != list: + raise TypeError, \ + property + ' of ' + self.__class__.__name__ + \ + ' must be list, not ' + value.__class__.__name__ + for item in value: + if not isinstance(item, property_type) and \ + not (item.__class__ == unicode and property_type == str): + # Accept unicode where str is specified. str is treated as + # UTF-8-encoded. + raise TypeError, \ + 'item of ' + property + ' of ' + self.__class__.__name__ + \ + ' must be ' + property_type.__name__ + ', not ' + \ + item.__class__.__name__ + elif not isinstance(value, property_type) and \ + not (value.__class__ == unicode and property_type == str): + # Accept unicode where str is specified. str is treated as + # UTF-8-encoded. + raise TypeError, \ + property + ' of ' + self.__class__.__name__ + ' must be ' + \ + property_type.__name__ + ', not ' + value.__class__.__name__ + + # Checks passed, perform the assignment. + if do_copy: + if isinstance(value, XCObject): + if is_strong: + self._properties[property] = value.Copy() + else: + self._properties[property] = value + elif isinstance(value, str) or isinstance(value, unicode) or \ + isinstance(value, int): + self._properties[property] = value + elif isinstance(value, list): + if is_strong: + # If is_strong is True, each element is an XCObject, so it's safe + # to call Copy. + self._properties[property] = [] + for item in value: + self._properties[property].append(item.Copy()) + else: + self._properties[property] = value[:] + elif isinstance(value, dict): + self._properties[property] = value.copy() + else: + raise TypeError, "Don't know how to copy a " + \ + value.__class__.__name__ + ' object for ' + \ + property + ' in ' + self.__class__.__name__ + else: + self._properties[property] = value + + # Set up the child's back-reference to this object. Don't use |value| + # any more because it may not be right if do_copy is true. + if is_strong: + if not is_list: + self._properties[property].parent = self + else: + for item in self._properties[property]: + item.parent = self + + def HasProperty(self, key): + return key in self._properties + + def GetProperty(self, key): + return self._properties[key] + + def SetProperty(self, key, value): + self.UpdateProperties({key: value}) + + def DelProperty(self, key): + if key in self._properties: + del self._properties[key] + + def AppendProperty(self, key, value): + # TODO(mark): Support ExtendProperty too (and make this call that)? + + # Schema validation. + if not key in self._schema: + raise KeyError, key + ' not in ' + self.__class__.__name__ + + (is_list, property_type, is_strong) = self._schema[key][0:3] + if not is_list: + raise TypeError, key + ' of ' + self.__class__.__name__ + ' must be list' + if not isinstance(value, property_type): + raise TypeError, 'item of ' + key + ' of ' + self.__class__.__name__ + \ + ' must be ' + property_type.__name__ + ', not ' + \ + value.__class__.__name__ + + # If the property doesn't exist yet, create a new empty list to receive the + # item. + if not key in self._properties: + self._properties[key] = [] + + # Set up the ownership link. + if is_strong: + value.parent = self + + # Store the item. + self._properties[key].append(value) + + def VerifyHasRequiredProperties(self): + """Ensure that all properties identified as required by the schema are + set. + """ + + # TODO(mark): A stronger verification mechanism is needed. Some + # subclasses need to perform validation beyond what the schema can enforce. + for property, attributes in self._schema.iteritems(): + (is_list, property_type, is_strong, is_required) = attributes[0:4] + if is_required and not property in self._properties: + raise KeyError, self.__class__.__name__ + ' requires ' + property + + def _SetDefaultsFromSchema(self): + """Assign object default values according to the schema. This will not + overwrite properties that have already been set.""" + + defaults = {} + for property, attributes in self._schema.iteritems(): + (is_list, property_type, is_strong, is_required) = attributes[0:4] + if is_required and len(attributes) >= 5 and \ + not property in self._properties: + default = attributes[4] + + defaults[property] = default + + if len(defaults) > 0: + # Use do_copy=True so that each new object gets its own copy of strong + # objects, lists, and dicts. + self.UpdateProperties(defaults, do_copy=True) + + +class XCHierarchicalElement(XCObject): + """Abstract base for PBXGroup and PBXFileReference. Not represented in a + project file.""" + + # TODO(mark): Do name and path belong here? Probably so. + # If path is set and name is not, name may have a default value. Name will + # be set to the basename of path, if the basename of path is different from + # the full value of path. If path is already just a leaf name, name will + # not be set. + _schema = XCObject._schema.copy() + _schema.update({ + 'comments': [0, str, 0, 0], + 'fileEncoding': [0, str, 0, 0], + 'includeInIndex': [0, int, 0, 0], + 'indentWidth': [0, int, 0, 0], + 'lineEnding': [0, int, 0, 0], + 'sourceTree': [0, str, 0, 1, ''], + 'tabWidth': [0, int, 0, 0], + 'usesTabs': [0, int, 0, 0], + 'wrapsLines': [0, int, 0, 0], + }) + + def __init__(self, properties=None, id=None, parent=None): + # super + XCObject.__init__(self, properties, id, parent) + if 'path' in self._properties and not 'name' in self._properties: + path = self._properties['path'] + name = posixpath.basename(path) + if name != '' and path != name: + self.SetProperty('name', name) + + if 'path' in self._properties and \ + (not 'sourceTree' in self._properties or \ + self._properties['sourceTree'] == ''): + # If the pathname begins with an Xcode variable like "$(SDKROOT)/", take + # the variable out and make the path be relative to that variable by + # assigning the variable name as the sourceTree. + (source_tree, path) = SourceTreeAndPathFromPath(self._properties['path']) + if source_tree != None: + self._properties['sourceTree'] = source_tree + if path != None: + self._properties['path'] = path + if source_tree != None and path == None and \ + not 'name' in self._properties: + # The path was of the form "$(SDKROOT)" with no path following it. + # This object is now relative to that variable, so it has no path + # attribute of its own. It does, however, keep a name. + del self._properties['path'] + self._properties['name'] = source_tree + + def Name(self): + if 'name' in self._properties: + return self._properties['name'] + elif 'path' in self._properties: + return self._properties['path'] + else: + # This happens in the case of the root PBXGroup. + return None + + def Hashables(self): + """Custom hashables for XCHierarchicalElements. + + XCHierarchicalElements are special. Generally, their hashes shouldn't + change if the paths don't change. The normal XCObject implementation of + Hashables adds a hashable for each object, which means that if + the hierarchical structure changes (possibly due to changes caused when + TakeOverOnlyChild runs and encounters slight changes in the hierarchy), + the hashes will change. For example, if a project file initially contains + a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent + a/b. If someone later adds a/f2 to the project file, a/b can no longer be + collapsed, and f1 winds up with parent b and grandparent a. That would + be sufficient to change f1's hash. + + To counteract this problem, hashables for all XCHierarchicalElements except + for the main group (which has neither a name nor a path) are taken to be + just the set of path components. Because hashables are inherited from + parents, this provides assurance that a/b/f1 has the same set of hashables + whether its parent is b or a/b. + + The main group is a special case. As it is permitted to have no name or + path, it is permitted to use the standard XCObject hash mechanism. This + is not considered a problem because there can be only one main group. + """ + + if self == self.PBXProjectAncestor()._properties['mainGroup']: + # super + return XCObject.Hashables(self) + + hashables = [] + + # Put the name in first, ensuring that if TakeOverOnlyChild collapses + # children into a top-level group like "Source", the name always goes + # into the list of hashables without interfering with path components. + if 'name' in self._properties: + # Make it less likely for people to manipulate hashes by following the + # pattern of always pushing an object type value onto the list first. + hashables.append(self.__class__.__name__ + '.name') + hashables.append(self._properties['name']) + + # NOTE: This still has the problem that if an absolute path is encountered, + # including paths with a sourceTree, they'll still inherit their parents' + # hashables, even though the paths aren't relative to their parents. This + # is not expected to be much of a problem in practice. + path = self.PathFromSourceTreeAndPath() + if path != None: + components = path.split(posixpath.sep) + for component in components: + hashables.append(self.__class__.__name__ + '.path') + hashables.append(component) + + hashables.extend(self._hashables) + + return hashables + + def Compare(self, other): + # Allow comparison of these types. PBXGroup has the highest sort rank; + # PBXVariantGroup is treated as equal to PBXFileReference. + valid_class_types = { + PBXFileReference: 'file', + PBXGroup: 'group', + PBXVariantGroup: 'file', + } + self_type = valid_class_types[self.__class__] + other_type = valid_class_types[other.__class__] + + if self_type == other_type: + # If the two objects are of the same sort rank, compare their names. + return cmp(self.Name(), other.Name()) + + # Otherwise, sort groups before everything else. + if self_type == 'group': + return -1 + return 1 + + def CompareRootGroup(self, other): + # This function should be used only to compare direct children of the + # containing PBXProject's mainGroup. These groups should appear in the + # listed order. + # TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the + # generator should have a way of influencing this list rather than having + # to hardcode for the generator here. + order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products', + 'Build'] + + # If the groups aren't in the listed order, do a name comparison. + # Otherwise, groups in the listed order should come before those that + # aren't. + self_name = self.Name() + other_name = other.Name() + self_in = isinstance(self, PBXGroup) and self_name in order + other_in = isinstance(self, PBXGroup) and other_name in order + if not self_in and not other_in: + return self.Compare(other) + if self_name in order and not other_name in order: + return -1 + if other_name in order and not self_name in order: + return 1 + + # If both groups are in the listed order, go by the defined order. + self_index = order.index(self_name) + other_index = order.index(other_name) + if self_index < other_index: + return -1 + if self_index > other_index: + return 1 + return 0 + + def PathFromSourceTreeAndPath(self): + # Turn the object's sourceTree and path properties into a single flat + # string of a form comparable to the path parameter. If there's a + # sourceTree property other than "", wrap it in $(...) for the + # comparison. + components = [] + if self._properties['sourceTree'] != '': + components.append('$(' + self._properties['sourceTree'] + ')') + if 'path' in self._properties: + components.append(self._properties['path']) + + if len(components) > 0: + return posixpath.join(*components) + + return None + + def FullPath(self): + # Returns a full path to self relative to the project file, or relative + # to some other source tree. Start with self, and walk up the chain of + # parents prepending their paths, if any, until no more parents are + # available (project-relative path) or until a path relative to some + # source tree is found. + xche = self + path = None + while isinstance(xche, XCHierarchicalElement) and \ + (path == None or \ + (not path.startswith('/') and not path.startswith('$'))): + this_path = xche.PathFromSourceTreeAndPath() + if this_path != None and path != None: + path = posixpath.join(this_path, path) + elif this_path != None: + path = this_path + xche = xche.parent + + return path + + +class PBXGroup(XCHierarchicalElement): + """ + Attributes: + _children_by_path: Maps pathnames of children of this PBXGroup to the + actual child XCHierarchicalElement objects. + _variant_children_by_name_and_path: Maps (name, path) tuples of + PBXVariantGroup children to the actual child PBXVariantGroup objects. + """ + + _schema = XCHierarchicalElement._schema.copy() + _schema.update({ + 'children': [1, XCHierarchicalElement, 1, 1, []], + 'name': [0, str, 0, 0], + 'path': [0, str, 0, 0], + }) + + def __init__(self, properties=None, id=None, parent=None): + # super + XCHierarchicalElement.__init__(self, properties, id, parent) + self._children_by_path = {} + self._variant_children_by_name_and_path = {} + for child in self._properties.get('children', []): + self._AddChildToDicts(child) + + def _AddChildToDicts(self, child): + # Sets up this PBXGroup object's dicts to reference the child properly. + child_path = child.PathFromSourceTreeAndPath() + if child_path: + if child_path in self._children_by_path: + raise ValueError, 'Found multiple children with path ' + child_path + self._children_by_path[child_path] = child + + if isinstance(child, PBXVariantGroup): + child_name = child._properties.get('name', None) + key = (child_name, child_path) + if key in self._variant_children_by_name_and_path: + raise ValueError, 'Found multiple PBXVariantGroup children with ' + \ + 'name ' + str(child_name) + ' and path ' + \ + str(child_path) + self._variant_children_by_name_and_path[key] = child + + def AppendChild(self, child): + # Callers should use this instead of calling + # AppendProperty('children', child) directly because this function + # maintains the group's dicts. + self.AppendProperty('children', child) + self._AddChildToDicts(child) + + def GetChildByName(self, name): + # This is not currently optimized with a dict as GetChildByPath is because + # it has few callers. Most callers probably want GetChildByPath. This + # function is only useful to get children that have names but no paths, + # which is rare. The children of the main group ("Source", "Products", + # etc.) is pretty much the only case where this likely to come up. + # + # TODO(mark): Maybe this should raise an error if more than one child is + # present with the same name. + if not 'children' in self._properties: + return None + + for child in self._properties['children']: + if child.Name() == name: + return child + + return None + + def GetChildByPath(self, path): + if not path: + return None + + if path in self._children_by_path: + return self._children_by_path[path] + + return None + + def GetChildByRemoteObject(self, remote_object): + # This method is a little bit esoteric. Given a remote_object, which + # should be a PBXFileReference in another project file, this method will + # return this group's PBXReferenceProxy object serving as a local proxy + # for the remote PBXFileReference. + # + # This function might benefit from a dict optimization as GetChildByPath + # for some workloads, but profiling shows that it's not currently a + # problem. + if not 'children' in self._properties: + return None + + for child in self._properties['children']: + if not isinstance(child, PBXReferenceProxy): + continue + + container_proxy = child._properties['remoteRef'] + if container_proxy._properties['remoteGlobalIDString'] == remote_object: + return child + + return None + + def AddOrGetFileByPath(self, path, hierarchical): + """Returns an existing or new file reference corresponding to path. + + If hierarchical is True, this method will create or use the necessary + hierarchical group structure corresponding to path. Otherwise, it will + look in and create an item in the current group only. + + If an existing matching reference is found, it is returned, otherwise, a + new one will be created, added to the correct group, and returned. + + If path identifies a directory by virtue of carrying a trailing slash, + this method returns a PBXFileReference of "folder" type. If path + identifies a variant, by virtue of it identifying a file inside a directory + with an ".lproj" extension, this method returns a PBXVariantGroup + containing the variant named by path, and possibly other variants. For + all other paths, a "normal" PBXFileReference will be returned. + """ + + # Adding or getting a directory? Directories end with a trailing slash. + is_dir = False + if path.endswith('/'): + is_dir = True + normpath = posixpath.normpath(path) + if is_dir: + normpath = path + '/' + else: + normpath = path + + # Adding or getting a variant? Variants are files inside directories + # with an ".lproj" extension. Xcode uses variants for localization. For + # a variant path/to/Language.lproj/MainMenu.nib, put a variant group named + # MainMenu.nib inside path/to, and give it a variant named Language. In + # this example, grandparent would be set to path/to and parent_root would + # be set to Language. + variant_name = None + parent = posixpath.dirname(path) + grandparent = posixpath.dirname(parent) + parent_basename = posixpath.basename(parent) + (parent_root, parent_ext) = posixpath.splitext(parent_basename) + if parent_ext == '.lproj': + variant_name = parent_root + if grandparent == '': + grandparent = None + + # Putting a directory inside a variant group is not currently supported. + assert not is_dir or variant_name == None + + path_split = path.split(posixpath.sep) + if len(path_split) == 1 or \ + ((is_dir or variant_name != None) and len(path_split) == 2) or \ + not hierarchical: + # The PBXFileReference or PBXVariantGroup will be added to or gotten from + # this PBXGroup, no recursion necessary. + if variant_name == None: + # Add or get a PBXFileReference. + file_ref = self.GetChildByPath(normpath) + if file_ref != None: + assert file_ref.__class__ == PBXFileReference + else: + file_ref = PBXFileReference({'path': path}) + self.AppendChild(file_ref) + else: + # Add or get a PBXVariantGroup. The variant group name is the same + # as the basename (MainMenu.nib in the example above). grandparent + # specifies the path to the variant group itself, and path_split[-2:] + # is the path of the specific variant relative to its group. + variant_group_name = posixpath.basename(path) + variant_group_ref = self.AddOrGetVariantGroupByNameAndPath( + variant_group_name, grandparent) + variant_path = posixpath.sep.join(path_split[-2:]) + variant_ref = variant_group_ref.GetChildByPath(variant_path) + if variant_ref != None: + assert variant_ref.__class__ == PBXFileReference + else: + variant_ref = PBXFileReference({'name': variant_name, + 'path': variant_path}) + variant_group_ref.AppendChild(variant_ref) + # The caller is interested in the variant group, not the specific + # variant file. + file_ref = variant_group_ref + return file_ref + else: + # Hierarchical recursion. Add or get a PBXGroup corresponding to the + # outermost path component, and then recurse into it, chopping off that + # path component. + next_dir = path_split[0] + group_ref = self.GetChildByPath(next_dir) + if group_ref != None: + assert group_ref.__class__ == PBXGroup + else: + group_ref = PBXGroup({'path': next_dir}) + self.AppendChild(group_ref) + return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]), + hierarchical) + + def AddOrGetVariantGroupByNameAndPath(self, name, path): + """Returns an existing or new PBXVariantGroup for name and path. + + If a PBXVariantGroup identified by the name and path arguments is already + present as a child of this object, it is returned. Otherwise, a new + PBXVariantGroup with the correct properties is created, added as a child, + and returned. + + This method will generally be called by AddOrGetFileByPath, which knows + when to create a variant group based on the structure of the pathnames + passed to it. + """ + + key = (name, path) + if key in self._variant_children_by_name_and_path: + variant_group_ref = self._variant_children_by_name_and_path[key] + assert variant_group_ref.__class__ == PBXVariantGroup + return variant_group_ref + + variant_group_properties = {'name': name} + if path != None: + variant_group_properties['path'] = path + variant_group_ref = PBXVariantGroup(variant_group_properties) + self.AppendChild(variant_group_ref) + + return variant_group_ref + + def TakeOverOnlyChild(self, recurse=False): + """If this PBXGroup has only one child and it's also a PBXGroup, take + it over by making all of its children this object's children. + + This function will continue to take over only children when those children + are groups. If there are three PBXGroups representing a, b, and c, with + c inside b and b inside a, and a and b have no other children, this will + result in a taking over both b and c, forming a PBXGroup for a/b/c. + + If recurse is True, this function will recurse into children and ask them + to collapse themselves by taking over only children as well. Assuming + an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f + (d1, d2, and f are files, the rest are groups), recursion will result in + a group for a/b/c containing a group for d3/e. + """ + + # At this stage, check that child class types are PBXGroup exactly, + # instead of using isinstance. The only subclass of PBXGroup, + # PBXVariantGroup, should not participate in reparenting in the same way: + # reparenting by merging different object types would be wrong. + while len(self._properties['children']) == 1 and \ + self._properties['children'][0].__class__ == PBXGroup: + # Loop to take over the innermost only-child group possible. + + child = self._properties['children'][0] + + # Assume the child's properties, including its children. Save a copy + # of this object's old properties, because they'll still be needed. + # This object retains its existing id and parent attributes. + old_properties = self._properties + self._properties = child._properties + self._children_by_path = child._children_by_path + + if not 'sourceTree' in self._properties or \ + self._properties['sourceTree'] == '': + # The child was relative to its parent. Fix up the path. Note that + # children with a sourceTree other than "" are not relative to + # their parents, so no path fix-up is needed in that case. + if 'path' in old_properties: + if 'path' in self._properties: + # Both the original parent and child have paths set. + self._properties['path'] = posixpath.join(old_properties['path'], + self._properties['path']) + else: + # Only the original parent has a path, use it. + self._properties['path'] = old_properties['path'] + if 'sourceTree' in old_properties: + # The original parent had a sourceTree set, use it. + self._properties['sourceTree'] = old_properties['sourceTree'] + + # If the original parent had a name set, keep using it. If the original + # parent didn't have a name but the child did, let the child's name + # live on. If the name attribute seems unnecessary now, get rid of it. + if 'name' in old_properties and old_properties['name'] != None and \ + old_properties['name'] != self.Name(): + self._properties['name'] = old_properties['name'] + if 'name' in self._properties and 'path' in self._properties and \ + self._properties['name'] == self._properties['path']: + del self._properties['name'] + + # Notify all children of their new parent. + for child in self._properties['children']: + child.parent = self + + # If asked to recurse, recurse. + if recurse: + for child in self._properties['children']: + if child.__class__ == PBXGroup: + child.TakeOverOnlyChild(recurse) + + def SortGroup(self): + self._properties['children'] = \ + sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y)) + + # Recurse. + for child in self._properties['children']: + if isinstance(child, PBXGroup): + child.SortGroup() + + +class XCFileLikeElement(XCHierarchicalElement): + # Abstract base for objects that can be used as the fileRef property of + # PBXBuildFile. + + def PathHashables(self): + # A PBXBuildFile that refers to this object will call this method to + # obtain additional hashables specific to this XCFileLikeElement. Don't + # just use this object's hashables, they're not specific and unique enough + # on their own (without access to the parent hashables.) Instead, provide + # hashables that identify this object by path by getting its hashables as + # well as the hashables of ancestor XCHierarchicalElement objects. + + hashables = [] + xche = self + while xche != None and isinstance(xche, XCHierarchicalElement): + xche_hashables = xche.Hashables() + for index in xrange(0, len(xche_hashables)): + hashables.insert(index, xche_hashables[index]) + xche = xche.parent + return hashables + + +class XCContainerPortal(XCObject): + # Abstract base for objects that can be used as the containerPortal property + # of PBXContainerItemProxy. + pass + + +class XCRemoteObject(XCObject): + # Abstract base for objects that can be used as the remoteGlobalIDString + # property of PBXContainerItemProxy. + pass + + +class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject): + _schema = XCFileLikeElement._schema.copy() + _schema.update({ + 'explicitFileType': [0, str, 0, 0], + 'lastKnownFileType': [0, str, 0, 0], + 'name': [0, str, 0, 0], + 'path': [0, str, 0, 1], + }) + + # Weird output rules for PBXFileReference. + _should_print_single_line = True + # super + _encode_transforms = XCFileLikeElement._alternate_encode_transforms + + def __init__(self, properties=None, id=None, parent=None): + # super + XCFileLikeElement.__init__(self, properties, id, parent) + if 'path' in self._properties and self._properties['path'].endswith('/'): + self._properties['path'] = self._properties['path'][:-1] + is_dir = True + else: + is_dir = False + + if 'path' in self._properties and \ + not 'lastKnownFileType' in self._properties and \ + not 'explicitFileType' in self._properties: + # TODO(mark): This is the replacement for a replacement for a quick hack. + # It is no longer incredibly sucky, but this list needs to be extended. + extension_map = { + 'a': 'archive.ar', + 'app': 'wrapper.application', + 'bdic': 'file', + 'bundle': 'wrapper.cfbundle', + 'c': 'sourcecode.c.c', + 'cc': 'sourcecode.cpp.cpp', + 'cpp': 'sourcecode.cpp.cpp', + 'css': 'text.css', + 'cxx': 'sourcecode.cpp.cpp', + 'dylib': 'compiled.mach-o.dylib', + 'framework': 'wrapper.framework', + 'h': 'sourcecode.c.h', + 'hxx': 'sourcecode.cpp.h', + 'icns': 'image.icns', + 'js': 'sourcecode.javascript', + 'm': 'sourcecode.c.objc', + 'mm': 'sourcecode.cpp.objcpp', + 'nib': 'wrapper.nib', + 'pdf': 'image.pdf', + 'pl': 'text.script.perl', + 'plist': 'text.plist.xml', + 'pm': 'text.script.perl', + 'png': 'image.png', + 'py': 'text.script.python', + 'r': 'sourcecode.rez', + 'rez': 'sourcecode.rez', + 's': 'sourcecode.asm', + 'strings': 'text.plist.strings', + 'ttf': 'file', + 'xcconfig': 'text.xcconfig', + 'xib': 'file.xib', + 'y': 'sourcecode.yacc', + } + + if is_dir: + file_type = 'folder' + else: + basename = posixpath.basename(self._properties['path']) + (root, ext) = posixpath.splitext(basename) + # Check the map using a lowercase extension. + # TODO(mark): Maybe it should try with the original case first and fall + # back to lowercase, in case there are any instances where case + # matters. There currently aren't. + if ext != '': + ext = ext[1:].lower() + + # TODO(mark): "text" is the default value, but "file" is appropriate + # for unrecognized files not containing text. Xcode seems to choose + # based on content. + file_type = extension_map.get(ext, 'text') + + self._properties['lastKnownFileType'] = file_type + + +class PBXVariantGroup(PBXGroup, XCFileLikeElement): + """PBXVariantGroup is used by Xcode to represent localizations.""" + # No additions to the schema relative to PBXGroup. + pass + + +# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below +# because it uses PBXContainerItemProxy, defined below. + + +class XCBuildConfiguration(XCObject): + _schema = XCObject._schema.copy() + _schema.update({ + 'baseConfigurationReference': [0, PBXFileReference, 0, 0], + 'buildSettings': [0, dict, 0, 1, {}], + 'name': [0, str, 0, 1], + }) + + def HasBuildSetting(self, key): + return key in self._properties['buildSettings'] + + def GetBuildSetting(self, key): + return self._properties['buildSettings'][key] + + def SetBuildSetting(self, key, value): + # TODO(mark): If a list, copy? + self._properties['buildSettings'][key] = value + + def AppendBuildSetting(self, key, value): + if not key in self._properties['buildSettings']: + self._properties['buildSettings'][key] = [] + self._properties['buildSettings'][key].append(value) + + def DelBuildSetting(self, key): + if key in self._properties['buildSettings']: + del self._properties['buildSettings'][key] + + +class XCConfigurationList(XCObject): + # _configs is the default list of configurations. + _configs = [ XCBuildConfiguration({'name': 'Debug'}), + XCBuildConfiguration({'name': 'Release'}) ] + + _schema = XCObject._schema.copy() + _schema.update({ + 'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs], + 'defaultConfigurationIsVisible': [0, int, 0, 1, 1], + 'defaultConfigurationName': [0, str, 0, 1, 'Release'], + }) + + def Name(self): + return 'Build configuration list for ' + \ + self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"' + + def ConfigurationNamed(self, name): + """Convenience accessor to obtain an XCBuildConfiguration by name.""" + for configuration in self._properties['buildConfigurations']: + if configuration._properties['name'] == name: + return configuration + + raise KeyError, name + + def DefaultConfiguration(self): + """Convenience accessor to obtain the default XCBuildConfiguration.""" + return self.ConfigurationNamed(self._properties['defaultConfigurationName']) + + def HasBuildSetting(self, key): + """Determines the state of a build setting in all XCBuildConfiguration + child objects. + + If all child objects have key in their build settings, and the value is the + same in all child objects, returns 1. + + If no child objects have the key in their build settings, returns 0. + + If some, but not all, child objects have the key in their build settings, + or if any children have different values for the key, returns -1. + """ + + has = None + value = None + for configuration in self._properties['buildConfigurations']: + configuration_has = configuration.HasBuildSetting(key) + if has == None: + has = configuration_has + elif has != configuration_has: + return -1 + + if configuration_has: + configuration_value = configuration.GetBuildSetting(key) + if value == None: + value = configuration_value + elif value != configuration_value: + return -1 + + if not has: + return 0 + + return 1 + + def GetBuildSetting(self, key): + """Gets the build setting for key. + + All child XCConfiguration objects must have the same value set for the + setting, or a ValueError will be raised. + """ + + # TODO(mark): This is wrong for build settings that are lists. The list + # contents should be compared (and a list copy returned?) + + value = None + for configuration in self._properties['buildConfigurations']: + configuration_value = configuration.GetBuildSetting(key) + if value == None: + value = configuration_value + else: + if value != configuration_value: + raise ValueError, 'Variant values for ' + key + + return value + + def SetBuildSetting(self, key, value): + """Sets the build setting for key to value in all child + XCBuildConfiguration objects. + """ + + for configuration in self._properties['buildConfigurations']: + configuration.SetBuildSetting(key, value) + + def AppendBuildSetting(self, key, value): + """Appends value to the build setting for key, which is treated as a list, + in all child XCBuildConfiguration objects. + """ + + for configuration in self._properties['buildConfigurations']: + configuration.AppendBuildSetting(key, value) + + def DelBuildSetting(self, key): + """Deletes the build setting key from all child XCBuildConfiguration + objects. + """ + + for configuration in self._properties['buildConfigurations']: + configuration.DelBuildSetting(key) + + +class PBXBuildFile(XCObject): + _schema = XCObject._schema.copy() + _schema.update({ + 'fileRef': [0, XCFileLikeElement, 0, 1], + }) + + # Weird output rules for PBXBuildFile. + _should_print_single_line = True + _encode_transforms = XCObject._alternate_encode_transforms + + def Name(self): + # Example: "main.cc in Sources" + return self._properties['fileRef'].Name() + ' in ' + self.parent.Name() + + def Hashables(self): + # super + hashables = XCObject.Hashables(self) + + # It is not sufficient to just rely on Name() to get the + # XCFileLikeElement's name, because that is not a complete pathname. + # PathHashables returns hashables unique enough that no two + # PBXBuildFiles should wind up with the same set of hashables, unless + # someone adds the same file multiple times to the same target. That + # would be considered invalid anyway. + hashables.extend(self._properties['fileRef'].PathHashables()) + + return hashables + + +class XCBuildPhase(XCObject): + """Abstract base for build phase classes. Not represented in a project + file. + + Attributes: + _files_by_path: A dict mapping each path of a child in the files list by + path (keys) to the corresponding PBXBuildFile children (values). + _files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys) + to the corresponding PBXBuildFile children (values). + """ + + # TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't + # actually have a "files" list. XCBuildPhase should not have "files" but + # another abstract subclass of it should provide this, and concrete build + # phase types that do have "files" lists should be derived from that new + # abstract subclass. XCBuildPhase should only provide buildActionMask and + # runOnlyForDeploymentPostprocessing, and not files or the various + # file-related methods and attributes. + + _schema = XCObject._schema.copy() + _schema.update({ + 'buildActionMask': [0, int, 0, 1, 0x7fffffff], + 'files': [1, PBXBuildFile, 1, 1, []], + 'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0], + }) + + def __init__(self, properties=None, id=None, parent=None): + # super + XCObject.__init__(self, properties, id, parent) + + self._files_by_path = {} + self._files_by_xcfilelikeelement = {} + for pbxbuildfile in self._properties.get('files', []): + self._AddBuildFileToDicts(pbxbuildfile) + + def FileGroup(self, path): + # Subclasses must override this by returning a two-element tuple. The + # first item in the tuple should be the PBXGroup to which "path" should be + # added, either as a child or deeper descendant. The second item should + # be a boolean indicating whether files should be added into hierarchical + # groups or one single flat group. + raise NotImplementedError, \ + self.__class__.__name__ + ' must implement FileGroup' + + def _AddPathToDict(self, pbxbuildfile, path): + """Adds path to the dict tracking paths belonging to this build phase. + + If the path is already a member of this build phase, raises an exception. + """ + + if path in self._files_by_path: + raise ValueError, 'Found multiple build files with path ' + path + self._files_by_path[path] = pbxbuildfile + + def _AddBuildFileToDicts(self, pbxbuildfile, path=None): + """Maintains the _files_by_path and _files_by_xcfilelikeelement dicts. + + If path is specified, then it is the path that is being added to the + phase, and pbxbuildfile must contain either a PBXFileReference directly + referencing that path, or it must contain a PBXVariantGroup that itself + contains a PBXFileReference referencing the path. + + If path is not specified, either the PBXFileReference's path or the paths + of all children of the PBXVariantGroup are taken as being added to the + phase. + + If the path is already present in the phase, raises an exception. + + If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile + are already present in the phase, referenced by a different PBXBuildFile + object, raises an exception. This does not raise an exception when + a PBXFileReference or PBXVariantGroup reappear and are referenced by the + same PBXBuildFile that has already introduced them, because in the case + of PBXVariantGroup objects, they may correspond to multiple paths that are + not all added simultaneously. When this situation occurs, the path needs + to be added to _files_by_path, but nothing needs to change in + _files_by_xcfilelikeelement, and the caller should have avoided adding + the PBXBuildFile if it is already present in the list of children. + """ + + xcfilelikeelement = pbxbuildfile._properties['fileRef'] + + paths = [] + if path != None: + # It's best when the caller provides the path. + if isinstance(xcfilelikeelement, PBXVariantGroup): + paths.append(path) + else: + # If the caller didn't provide a path, there can be either multiple + # paths (PBXVariantGroup) or one. + if isinstance(xcfilelikeelement, PBXVariantGroup): + for variant in xcfilelikeelement._properties['children']: + paths.append(variant.FullPath()) + else: + paths.append(xcfilelikeelement.FullPath()) + + # Add the paths first, because if something's going to raise, the + # messages provided by _AddPathToDict are more useful owing to its + # having access to a real pathname and not just an object's Name(). + for a_path in paths: + self._AddPathToDict(pbxbuildfile, a_path) + + # If another PBXBuildFile references this XCFileLikeElement, there's a + # problem. + if xcfilelikeelement in self._files_by_xcfilelikeelement and \ + self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile: + raise ValueError, 'Found multiple build files for ' + \ + xcfilelikeelement.Name() + self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile + + def AppendBuildFile(self, pbxbuildfile, path=None): + # Callers should use this instead of calling + # AppendProperty('files', pbxbuildfile) directly because this function + # maintains the object's dicts. Better yet, callers can just call AddFile + # with a pathname and not worry about building their own PBXBuildFile + # objects. + self.AppendProperty('files', pbxbuildfile) + self._AddBuildFileToDicts(pbxbuildfile, path) + + def AddFile(self, path): + (file_group, hierarchical) = self.FileGroup(path) + file_ref = file_group.AddOrGetFileByPath(path, hierarchical) + + if file_ref in self._files_by_xcfilelikeelement and \ + isinstance(file_ref, PBXVariantGroup): + # There's already a PBXBuildFile in this phase corresponding to the + # PBXVariantGroup. path just provides a new variant that belongs to + # the group. Add the path to the dict. + pbxbuildfile = self._files_by_xcfilelikeelement[file_ref] + self._AddBuildFileToDicts(pbxbuildfile, path) + else: + # Add a new PBXBuildFile to get file_ref into the phase. + pbxbuildfile = PBXBuildFile({'fileRef': file_ref}) + self.AppendBuildFile(pbxbuildfile, path) + + +class PBXHeadersBuildPhase(XCBuildPhase): + # No additions to the schema relative to XCBuildPhase. + + def Name(self): + return 'Headers' + + def FileGroup(self, path): + return self.PBXProjectAncestor().RootGroupForPath(path) + + +class PBXResourcesBuildPhase(XCBuildPhase): + # No additions to the schema relative to XCBuildPhase. + + def Name(self): + return 'Resources' + + def FileGroup(self, path): + return self.PBXProjectAncestor().RootGroupForPath(path) + + +class PBXSourcesBuildPhase(XCBuildPhase): + # No additions to the schema relative to XCBuildPhase. + + def Name(self): + return 'Sources' + + def FileGroup(self, path): + return self.PBXProjectAncestor().RootGroupForPath(path) + + +class PBXFrameworksBuildPhase(XCBuildPhase): + # No additions to the schema relative to XCBuildPhase. + + def Name(self): + return 'Frameworks' + + def FileGroup(self, path): + return (self.PBXProjectAncestor().FrameworksGroup(), False) + + +class PBXShellScriptBuildPhase(XCBuildPhase): + _schema = XCBuildPhase._schema.copy() + _schema.update({ + 'inputPaths': [1, str, 0, 1, []], + 'name': [0, str, 0, 0], + 'outputPaths': [1, str, 0, 1, []], + 'shellPath': [0, str, 0, 1, '/bin/sh'], + 'shellScript': [0, str, 0, 1], + 'showEnvVarsInLog': [0, int, 0, 0], + }) + + def Name(self): + if 'name' in self._properties: + return self._properties['name'] + + return 'ShellScript' + + +class PBXCopyFilesBuildPhase(XCBuildPhase): + _schema = XCBuildPhase._schema.copy() + _schema.update({ + 'dstPath': [0, str, 0, 1], + 'dstSubfolderSpec': [0, int, 0, 1], + 'name': [0, str, 0, 0], + }) + + # path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is + # "DIR", match group 3 is "path" or None. + path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$') + + # path_tree_to_subfolder maps names of Xcode variables to the associated + # dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object. + path_tree_to_subfolder = { + 'BUILT_PRODUCTS_DIR': 16, # Products Directory + # Other types that can be chosen via the Xcode UI. + # TODO(mark): Map Xcode variable names to these. + # : 1, # Wrapper + # : 6, # Executables: 6 + # : 7, # Resources + # : 15, # Java Resources + # : 10, # Frameworks + # : 11, # Shared Frameworks + # : 12, # Shared Support + # : 13, # PlugIns + } + + def Name(self): + if 'name' in self._properties: + return self._properties['name'] + + return 'CopyFiles' + + def FileGroup(self, path): + return self.PBXProjectAncestor().RootGroupForPath(path) + + def SetDestination(self, path): + """Set the dstSubfolderSpec and dstPath properties from path. + + path may be specified in the same notation used for XCHierarchicalElements, + specifically, "$(DIR)/path". + """ + + path_tree_match = self.path_tree_re.search(path) + if path_tree_match: + # Everything else needs to be relative to an Xcode variable. + path_tree = path_tree_match.group(1) + relative_path = path_tree_match.group(3) + + if path_tree in self.path_tree_to_subfolder: + subfolder = self.path_tree_to_subfolder[path_tree] + if relative_path == None: + relative_path = '' + else: + # The path starts with an unrecognized Xcode variable + # name like $(SRCROOT). Xcode will still handle this + # as an "absolute path" that starts with the variable. + subfolder = 0 + relative_path = path + elif path.startswith('/'): + # Special case. Absolute paths are in dstSubfolderSpec 0. + subfolder = 0 + relative_path = path[1:] + else: + raise ValueError, 'Can\'t use path %s in a %s' % \ + (path, self.__class__.__name__) + + self._properties['dstPath'] = relative_path + self._properties['dstSubfolderSpec'] = subfolder + + +class PBXBuildRule(XCObject): + _schema = XCObject._schema.copy() + _schema.update({ + 'compilerSpec': [0, str, 0, 1], + 'filePatterns': [0, str, 0, 0], + 'fileType': [0, str, 0, 1], + 'isEditable': [0, int, 0, 1, 1], + 'outputFiles': [1, str, 0, 1, []], + 'script': [0, str, 0, 0], + }) + + def Name(self): + # Not very inspired, but it's what Xcode uses. + return self.__class__.__name__ + + def Hashables(self): + # super + hashables = XCObject.Hashables(self) + + # Use the hashables of the weak objects that this object refers to. + hashables.append(self._properties['fileType']) + if 'filePatterns' in self._properties: + hashables.append(self._properties['filePatterns']) + return hashables + + +class PBXContainerItemProxy(XCObject): + # When referencing an item in this project file, containerPortal is the + # PBXProject root object of this project file. When referencing an item in + # another project file, containerPortal is a PBXFileReference identifying + # the other project file. + # + # When serving as a proxy to an XCTarget (in this project file or another), + # proxyType is 1. When serving as a proxy to a PBXFileReference (in another + # project file), proxyType is 2. Type 2 is used for references to the + # producs of the other project file's targets. + # + # Xcode is weird about remoteGlobalIDString. Usually, it's printed without + # a comment, indicating that it's tracked internally simply as a string, but + # sometimes it's printed with a comment (usually when the object is initially + # created), indicating that it's tracked as a project file object at least + # sometimes. This module always tracks it as an object, but contains a hack + # to prevent it from printing the comment in the project file output. See + # _XCKVPrint. + _schema = XCObject._schema.copy() + _schema.update({ + 'containerPortal': [0, XCContainerPortal, 0, 1], + 'proxyType': [0, int, 0, 1], + 'remoteGlobalIDString': [0, XCRemoteObject, 0, 1], + 'remoteInfo': [0, str, 0, 1], + }) + + def __repr__(self): + props = self._properties + name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo']) + return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self)) + + def Name(self): + # Admittedly not the best name, but it's what Xcode uses. + return self.__class__.__name__ + + def Hashables(self): + # super + hashables = XCObject.Hashables(self) + + # Use the hashables of the weak objects that this object refers to. + hashables.extend(self._properties['containerPortal'].Hashables()) + hashables.extend(self._properties['remoteGlobalIDString'].Hashables()) + return hashables + + +class PBXTargetDependency(XCObject): + # The "target" property accepts an XCTarget object, and obviously not + # NoneType. But XCTarget is defined below, so it can't be put into the + # schema yet. The definition of PBXTargetDependency can't be moved below + # XCTarget because XCTarget's own schema references PBXTargetDependency. + # Python doesn't deal well with this circular relationship, and doesn't have + # a real way to do forward declarations. To work around, the type of + # the "target" property is reset below, after XCTarget is defined. + # + # At least one of "name" and "target" is required. + _schema = XCObject._schema.copy() + _schema.update({ + 'name': [0, str, 0, 0], + 'target': [0, None.__class__, 0, 0], + 'targetProxy': [0, PBXContainerItemProxy, 1, 1], + }) + + def __repr__(self): + name = self._properties.get('name') or self._properties['target'].Name() + return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self)) + + def Name(self): + # Admittedly not the best name, but it's what Xcode uses. + return self.__class__.__name__ + + def Hashables(self): + # super + hashables = XCObject.Hashables(self) + + # Use the hashables of the weak objects that this object refers to. + hashables.extend(self._properties['targetProxy'].Hashables()) + return hashables + + +class PBXReferenceProxy(XCFileLikeElement): + _schema = XCFileLikeElement._schema.copy() + _schema.update({ + 'fileType': [0, str, 0, 1], + 'path': [0, str, 0, 1], + 'remoteRef': [0, PBXContainerItemProxy, 1, 1], + }) + + +class XCTarget(XCRemoteObject): + # An XCTarget is really just an XCObject, the XCRemoteObject thing is just + # to allow PBXProject to be used in the remoteGlobalIDString property of + # PBXContainerItemProxy. + # + # Setting a "name" property at instantiation may also affect "productName", + # which may in turn affect the "PRODUCT_NAME" build setting in children of + # "buildConfigurationList". See __init__ below. + _schema = XCRemoteObject._schema.copy() + _schema.update({ + 'buildConfigurationList': [0, XCConfigurationList, 1, 1, + XCConfigurationList()], + 'buildPhases': [1, XCBuildPhase, 1, 1, []], + 'dependencies': [1, PBXTargetDependency, 1, 1, []], + 'name': [0, str, 0, 1], + 'productName': [0, str, 0, 1], + }) + + def __init__(self, properties=None, id=None, parent=None, + force_extension=None): + # super + XCRemoteObject.__init__(self, properties, id, parent) + + # Set up additional defaults not expressed in the schema. If a "name" + # property was supplied, set "productName" if it is not present. Also set + # the "PRODUCT_NAME" build setting in each configuration, but only if + # the setting is not present in any build configuration. + if 'name' in self._properties: + if not 'productName' in self._properties: + self.SetProperty('productName', self._properties['name']) + + if 'productName' in self._properties: + if 'buildConfigurationList' in self._properties: + configs = self._properties['buildConfigurationList'] + if configs.HasBuildSetting('PRODUCT_NAME') == 0: + configs.SetBuildSetting('PRODUCT_NAME', + self._properties['productName']) + + def AddDependency(self, other): + pbxproject = self.PBXProjectAncestor() + other_pbxproject = other.PBXProjectAncestor() + if pbxproject == other_pbxproject: + # The easy case. Add a dependency to another target in the same + # project file. + container = PBXContainerItemProxy({'containerPortal': pbxproject, + 'proxyType': 1, + 'remoteGlobalIDString': other, + 'remoteInfo': other.Name()}) + dependency = PBXTargetDependency({'target': other, + 'targetProxy': container}) + self.AppendProperty('dependencies', dependency) + else: + # The hard case. Add a dependency to a target in a different project + # file. Actually, this case isn't really so hard. + other_project_ref = \ + pbxproject.AddOrGetProjectReference(other_pbxproject)[1] + container = PBXContainerItemProxy({ + 'containerPortal': other_project_ref, + 'proxyType': 1, + 'remoteGlobalIDString': other, + 'remoteInfo': other.Name(), + }) + dependency = PBXTargetDependency({'name': other.Name(), + 'targetProxy': container}) + self.AppendProperty('dependencies', dependency) + + # Proxy all of these through to the build configuration list. + + def ConfigurationNamed(self, name): + return self._properties['buildConfigurationList'].ConfigurationNamed(name) + + def DefaultConfiguration(self): + return self._properties['buildConfigurationList'].DefaultConfiguration() + + def HasBuildSetting(self, key): + return self._properties['buildConfigurationList'].HasBuildSetting(key) + + def GetBuildSetting(self, key): + return self._properties['buildConfigurationList'].GetBuildSetting(key) + + def SetBuildSetting(self, key, value): + return self._properties['buildConfigurationList'].SetBuildSetting(key, \ + value) + + def AppendBuildSetting(self, key, value): + return self._properties['buildConfigurationList'].AppendBuildSetting(key, \ + value) + + def DelBuildSetting(self, key): + return self._properties['buildConfigurationList'].DelBuildSetting(key) + + +# Redefine the type of the "target" property. See PBXTargetDependency._schema +# above. +PBXTargetDependency._schema['target'][1] = XCTarget + + +class PBXNativeTarget(XCTarget): + # buildPhases is overridden in the schema to be able to set defaults. + # + # NOTE: Contrary to most objects, it is advisable to set parent when + # constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject + # object. A parent reference is required for a PBXNativeTarget during + # construction to be able to set up the target defaults for productReference, + # because a PBXBuildFile object must be created for the target and it must + # be added to the PBXProject's mainGroup hierarchy. + _schema = XCTarget._schema.copy() + _schema.update({ + 'buildPhases': [1, XCBuildPhase, 1, 1, + [PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]], + 'buildRules': [1, PBXBuildRule, 1, 1, []], + 'productReference': [0, PBXFileReference, 0, 1], + 'productType': [0, str, 0, 1], + }) + + _product_filetypes = { + 'com.apple.product-type.application': ['wrapper.application', + '', '.app'], + 'com.apple.product-type.bundle': ['wrapper.cfbundle', + '', '.bundle'], + 'com.apple.product-type.framework': ['wrapper.framework', + '', '.framework'], + 'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib', + 'lib', '.dylib'], + 'com.apple.product-type.library.static': ['archive.ar', 'lib', '.a'], + 'com.apple.product-type.tool': ['compiled.mach-o.executable', + '', ''], + } + + def __init__(self, properties=None, id=None, parent=None, + force_extension=None): + # super + XCTarget.__init__(self, properties, id, parent) + + if 'productName' in self._properties and \ + 'productType' in self._properties and \ + not 'productReference' in self._properties and \ + self._properties['productType'] in self._product_filetypes: + products_group = None + pbxproject = self.PBXProjectAncestor() + if pbxproject != None: + products_group = pbxproject.ProductsGroup() + + if products_group != None: + (filetype, prefix, suffix) = \ + self._product_filetypes[self._properties['productType']] + + if force_extension is not None: + # Extension override. + suffix = '.' + force_extension + + # If it's a wrapper (bundle), set WRAPPER_EXTENSION. + if filetype.startswith('wrapper.'): + self.SetBuildSetting('WRAPPER_EXTENSION', force_extension) + + ref_props = { + 'explicitFileType': filetype, + 'includeInIndex': 0, + 'path': prefix + self._properties['productName'] + suffix, + 'sourceTree': 'BUILT_PRODUCTS_DIR', + } + file_ref = PBXFileReference(ref_props) + products_group.AppendChild(file_ref) + self.SetProperty('productReference', file_ref) + + def GetBuildPhaseByType(self, type): + if not 'buildPhases' in self._properties: + return None + + the_phase = None + for phase in self._properties['buildPhases']: + if isinstance(phase, type): + # Some phases may be present in multiples in a well-formed project file, + # but phases like PBXSourcesBuildPhase may only be present singly, and + # this function is intended as an aid to GetBuildPhaseByType. Loop + # over the entire list of phases and assert if more than one of the + # desired type is found. + assert the_phase == None + the_phase = phase + + return the_phase + + def ResourcesPhase(self): + resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase) + if resources_phase == None: + resources_phase = PBXResourcesBuildPhase() + + # The resources phase should come before the sources and frameworks + # phases, if any. + insert_at = len(self._properties['buildPhases']) + for index in xrange(0, len(self._properties['buildPhases'])): + phase = self._properties['buildPhases'][index] + if isinstance(phase, PBXSourcesBuildPhase) or \ + isinstance(phase, PBXFrameworksBuildPhase): + insert_at = index + break + + self._properties['buildPhases'].insert(insert_at, resources_phase) + resources_phase.parent = self + + return resources_phase + + def SourcesPhase(self): + sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase) + if sources_phase == None: + sources_phase = PBXSourcesBuildPhase() + self.AppendProperty('buildPhases', sources_phase) + + return sources_phase + + def FrameworksPhase(self): + frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase) + if frameworks_phase == None: + frameworks_phase = PBXFrameworksBuildPhase() + self.AppendProperty('buildPhases', frameworks_phase) + + return frameworks_phase + + def AddDependency(self, other): + # super + XCTarget.AddDependency(self, other) + + static_library_type = 'com.apple.product-type.library.static' + shared_library_type = 'com.apple.product-type.library.dynamic' + framework_type = 'com.apple.product-type.framework' + if isinstance(other, PBXNativeTarget) and \ + 'productType' in self._properties and \ + self._properties['productType'] != static_library_type and \ + 'productType' in other._properties and \ + (other._properties['productType'] == static_library_type or \ + ((other._properties['productType'] == shared_library_type or \ + other._properties['productType'] == framework_type) and \ + ((not other.HasBuildSetting('MACH_O_TYPE')) or + other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))): + + file_ref = other.GetProperty('productReference') + + pbxproject = self.PBXProjectAncestor() + other_pbxproject = other.PBXProjectAncestor() + if pbxproject != other_pbxproject: + other_project_product_group = \ + pbxproject.AddOrGetProjectReference(other_pbxproject)[0] + file_ref = other_project_product_group.GetChildByRemoteObject(file_ref) + + self.FrameworksPhase().AppendProperty('files', + PBXBuildFile({'fileRef': file_ref})) + + +class PBXAggregateTarget(XCTarget): + pass + + +class PBXProject(XCContainerPortal): + # A PBXProject is really just an XCObject, the XCContainerPortal thing is + # just to allow PBXProject to be used in the containerPortal property of + # PBXContainerItemProxy. + """ + + Attributes: + path: "sample.xcodeproj". TODO(mark) Document me! + _other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each + value is a reference to the dict in the + projectReferences list associated with the keyed + PBXProject. + """ + + _schema = XCContainerPortal._schema.copy() + _schema.update({ + 'attributes': [0, dict, 0, 0], + 'buildConfigurationList': [0, XCConfigurationList, 1, 1, + XCConfigurationList()], + 'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.1'], + 'hasScannedForEncodings': [0, int, 0, 1, 1], + 'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()], + 'projectDirPath': [0, str, 0, 1, ''], + 'projectReferences': [1, dict, 0, 0], + 'projectRoot': [0, str, 0, 1, ''], + 'targets': [1, XCTarget, 1, 1, []], + }) + + def __init__(self, properties=None, id=None, parent=None, path=None): + self.path = path + self._other_pbxprojects = {} + # super + return XCContainerPortal.__init__(self, properties, id, parent) + + def Name(self): + name = self.path + if name[-10:] == '.xcodeproj': + name = name[:-10] + return posixpath.basename(name) + + def Path(self): + return self.path + + def Comment(self): + return 'Project object' + + def Children(self): + # super + children = XCContainerPortal.Children(self) + + # Add children that the schema doesn't know about. Maybe there's a more + # elegant way around this, but this is the only case where we need to own + # objects in a dictionary (that is itself in a list), and three lines for + # a one-off isn't that big a deal. + if 'projectReferences' in self._properties: + for reference in self._properties['projectReferences']: + children.append(reference['ProductGroup']) + + return children + + def PBXProjectAncestor(self): + return self + + def _GroupByName(self, name): + if not 'mainGroup' in self._properties: + self.SetProperty('mainGroup', PBXGroup()) + + main_group = self._properties['mainGroup'] + group = main_group.GetChildByName(name) + if group == None: + group = PBXGroup({'name': name}) + main_group.AppendChild(group) + + return group + + # SourceGroup and ProductsGroup are created by default in Xcode's own + # templates. + def SourceGroup(self): + return self._GroupByName('Source') + + def ProductsGroup(self): + return self._GroupByName('Products') + + # IntermediatesGroup is used to collect source-like files that are generated + # by rules or script phases and are placed in intermediate directories such + # as DerivedSources. + def IntermediatesGroup(self): + return self._GroupByName('Intermediates') + + # FrameworksGroup and ProjectsGroup are top-level groups used to collect + # frameworks and projects. + def FrameworksGroup(self): + return self._GroupByName('Frameworks') + + def ProjectsGroup(self): + return self._GroupByName('Projects') + + def RootGroupForPath(self, path): + """Returns a PBXGroup child of this object to which path should be added. + + This method is intended to choose between SourceGroup and + IntermediatesGroup on the basis of whether path is present in a source + directory or an intermediates directory. For the purposes of this + determination, any path located within a derived file directory such as + PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates + directory. + + The returned value is a two-element tuple. The first element is the + PBXGroup, and the second element specifies whether that group should be + organized hierarchically (True) or as a single flat list (False). + """ + + # TODO(mark): make this a class variable and bind to self on call? + # Also, this list is nowhere near exhaustive. + # INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by + # gyp.generator.xcode. There should probably be some way for that module + # to push the names in, rather than having to hard-code them here. + source_tree_groups = { + 'DERIVED_FILE_DIR': (self.IntermediatesGroup, True), + 'INTERMEDIATE_DIR': (self.IntermediatesGroup, True), + 'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True), + 'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True), + } + + (source_tree, path) = SourceTreeAndPathFromPath(path) + if source_tree != None and source_tree in source_tree_groups: + (group_func, hierarchical) = source_tree_groups[source_tree] + group = group_func() + return (group, hierarchical) + + # TODO(mark): make additional choices based on file extension. + + return (self.SourceGroup(), True) + + def AddOrGetFileInRootGroup(self, path): + """Returns a PBXFileReference corresponding to path in the correct group + according to RootGroupForPath's heuristics. + + If an existing PBXFileReference for path exists, it will be returned. + Otherwise, one will be created and returned. + """ + + (group, hierarchical) = self.RootGroupForPath(path) + return group.AddOrGetFileByPath(path, hierarchical) + + def RootGroupsTakeOverOnlyChildren(self, recurse=False): + """Calls TakeOverOnlyChild for all groups in the main group.""" + + for group in self._properties['mainGroup']._properties['children']: + if isinstance(group, PBXGroup): + group.TakeOverOnlyChild(recurse) + + def SortGroups(self): + # Sort the children of the mainGroup (like "Source" and "Products") + # according to their defined order. + self._properties['mainGroup']._properties['children'] = \ + sorted(self._properties['mainGroup']._properties['children'], + cmp=lambda x,y: x.CompareRootGroup(y)) + + # Sort everything else by putting group before files, and going + # alphabetically by name within sections of groups and files. SortGroup + # is recursive. + for group in self._properties['mainGroup']._properties['children']: + if not isinstance(group, PBXGroup): + continue + + if group.Name() == 'Products': + # The Products group is a special case. Instead of sorting + # alphabetically, sort things in the order of the targets that + # produce the products. To do this, just build up a new list of + # products based on the targets. + products = [] + for target in self._properties['targets']: + if not isinstance(target, PBXNativeTarget): + continue + product = target._properties['productReference'] + # Make sure that the product is already in the products group. + assert product in group._properties['children'] + products.append(product) + + # Make sure that this process doesn't miss anything that was already + # in the products group. + assert len(products) == len(group._properties['children']) + group._properties['children'] = products + else: + group.SortGroup() + + def AddOrGetProjectReference(self, other_pbxproject): + """Add a reference to another project file (via PBXProject object) to this + one. + + Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in + this project file that contains a PBXReferenceProxy object for each + product of each PBXNativeTarget in the other project file. ProjectRef is + a PBXFileReference to the other project file. + + If this project file already references the other project file, the + existing ProductGroup and ProjectRef are returned. The ProductGroup will + still be updated if necessary. + """ + + if not 'projectReferences' in self._properties: + self._properties['projectReferences'] = [] + + product_group = None + project_ref = None + + if not other_pbxproject in self._other_pbxprojects: + # This project file isn't yet linked to the other one. Establish the + # link. + product_group = PBXGroup({'name': 'Products'}) + + # ProductGroup is strong. + product_group.parent = self + + # There's nothing unique about this PBXGroup, and if left alone, it will + # wind up with the same set of hashables as all other PBXGroup objects + # owned by the projectReferences list. Add the hashables of the + # remote PBXProject that it's related to. + product_group._hashables.extend(other_pbxproject.Hashables()) + + # The other project reports its path as relative to the same directory + # that this project's path is relative to. The other project's path + # is not necessarily already relative to this project. Figure out the + # pathname that this project needs to use to refer to the other one. + this_path = posixpath.dirname(self.Path()) + projectDirPath = self.GetProperty('projectDirPath') + if projectDirPath: + if posixpath.isabs(projectDirPath[0]): + this_path = projectDirPath + else: + this_path = posixpath.join(this_path, projectDirPath) + other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path) + + # ProjectRef is weak (it's owned by the mainGroup hierarchy). + project_ref = PBXFileReference({ + 'lastKnownFileType': 'wrapper.pb-project', + 'path': other_path, + 'sourceTree': 'SOURCE_ROOT', + }) + self.ProjectsGroup().AppendChild(project_ref) + + ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref} + self._other_pbxprojects[other_pbxproject] = ref_dict + self.AppendProperty('projectReferences', ref_dict) + + # Xcode seems to sort this list by the name of the linked project. + self._properties['projectReferences'] = \ + sorted(self._properties['projectReferences'], cmp=lambda x,y: + cmp(x['ProjectRef'].Name(), y['ProjectRef'].Name())) + else: + # The link already exists. Pull out the relevnt data. + project_ref_dict = self._other_pbxprojects[other_pbxproject] + product_group = project_ref_dict['ProductGroup'] + project_ref = project_ref_dict['ProjectRef'] + + self._SetUpProductReferences(other_pbxproject, product_group, project_ref) + + return [product_group, project_ref] + + def _SetUpProductReferences(self, other_pbxproject, product_group, + project_ref): + # TODO(mark): This only adds references to products in other_pbxproject + # when they don't exist in this pbxproject. Perhaps it should also + # remove references from this pbxproject that are no longer present in + # other_pbxproject. Perhaps it should update various properties if they + # change. + for target in other_pbxproject._properties['targets']: + if not isinstance(target, PBXNativeTarget): + continue + + other_fileref = target._properties['productReference'] + if product_group.GetChildByRemoteObject(other_fileref) == None: + # Xcode sets remoteInfo to the name of the target and not the name + # of its product, despite this proxy being a reference to the product. + container_item = PBXContainerItemProxy({ + 'containerPortal': project_ref, + 'proxyType': 2, + 'remoteGlobalIDString': other_fileref, + 'remoteInfo': target.Name() + }) + # TODO(mark): Does sourceTree get copied straight over from the other + # project? Can the other project ever have lastKnownFileType here + # instead of explicitFileType? (Use it if so?) Can path ever be + # unset? (I don't think so.) Can other_fileref have name set, and + # does it impact the PBXReferenceProxy if so? These are the questions + # that perhaps will be answered one day. + reference_proxy = PBXReferenceProxy({ + 'fileType': other_fileref._properties['explicitFileType'], + 'path': other_fileref._properties['path'], + 'sourceTree': other_fileref._properties['sourceTree'], + 'remoteRef': container_item, + }) + + product_group.AppendChild(reference_proxy) + + def SortRemoteProductReferences(self): + # For each remote project file, sort the associated ProductGroup in the + # same order that the targets are sorted in the remote project file. This + # is the sort order used by Xcode. + + def CompareProducts(x, y, remote_products): + # x and y are PBXReferenceProxy objects. Go through their associated + # PBXContainerItem to get the remote PBXFileReference, which will be + # present in the remote_products list. + x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString'] + y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString'] + x_index = remote_products.index(x_remote) + y_index = remote_products.index(y_remote) + + # Use the order of each remote PBXFileReference in remote_products to + # determine the sort order. + return cmp(x_index, y_index) + + for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems(): + # Build up a list of products in the remote project file, ordered the + # same as the targets that produce them. + remote_products = [] + for target in other_pbxproject._properties['targets']: + if not isinstance(target, PBXNativeTarget): + continue + remote_products.append(target._properties['productReference']) + + # Sort the PBXReferenceProxy children according to the list of remote + # products. + product_group = ref_dict['ProductGroup'] + product_group._properties['children'] = sorted( + product_group._properties['children'], + cmp=lambda x, y: CompareProducts(x, y, remote_products)) + + +class XCProjectFile(XCObject): + _schema = XCObject._schema.copy() + _schema.update({ + 'archiveVersion': [0, int, 0, 1, 1], + 'classes': [0, dict, 0, 1, {}], + 'objectVersion': [0, int, 0, 1, 45], + 'rootObject': [0, PBXProject, 1, 1], + }) + + def ComputeIDs(self, recursive=True, overwrite=True, hash=None): + # Although XCProjectFile is implemented here as an XCObject, it's not a + # proper object in the Xcode sense, and it certainly doesn't have its own + # ID. Pass through an attempt to update IDs to the real root object. + if recursive: + self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash) + + def Print(self, file=sys.stdout): + self.VerifyHasRequiredProperties() + + # Add the special "objects" property, which will be caught and handled + # separately during printing. This structure allows a fairly standard + # loop do the normal printing. + self._properties['objects'] = {} + self._XCPrint(file, 0, '// !$*UTF8*$!\n') + if self._should_print_single_line: + self._XCPrint(file, 0, '{ ') + else: + self._XCPrint(file, 0, '{\n') + for property, value in sorted(self._properties.iteritems(), + cmp=lambda x, y: cmp(x, y)): + if property == 'objects': + self._PrintObjects(file) + else: + self._XCKVPrint(file, 1, property, value) + self._XCPrint(file, 0, '}\n') + del self._properties['objects'] + + def _PrintObjects(self, file): + if self._should_print_single_line: + self._XCPrint(file, 0, 'objects = {') + else: + self._XCPrint(file, 1, 'objects = {\n') + + objects_by_class = {} + for object in self.Descendants(): + if object == self: + continue + class_name = object.__class__.__name__ + if not class_name in objects_by_class: + objects_by_class[class_name] = [] + objects_by_class[class_name].append(object) + + for class_name in sorted(objects_by_class): + self._XCPrint(file, 0, '\n') + self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n') + for object in sorted(objects_by_class[class_name], + cmp=lambda x, y: cmp(x.id, y.id)): + object.Print(file) + self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n') + + if self._should_print_single_line: + self._XCPrint(file, 0, '}; ') + else: + self._XCPrint(file, 1, '};\n') + +#!/usr/bin/env python +# coding=utf-8 +# author=ff0000team + +""" +Site: http://www.beebeeto.com/ +Framework: https://github.com/ff0000team/Beebeeto-framework +""" + +import sys +sys.path.append('../') + +import threadpool + +from copy import deepcopy +from gevent import socket, monkey +from utils.http import normalize_url + +try: + import simplejson as json +except ImportError: + import json + + +monkey.patch_socket() +socket.setdefaulttimeout = 5 + + +class BatchTest(object): + default_options = { + 'target': None, + 'verify': True, + 'verbose': False, + } + + def __init__(self, seed_file, func2run, options=None, + result_file='result.txt', + thread_num=100, verbose=True): + self.func2run = func2run + self.options = options if options else self.default_options + self.seed_iter = open(seed_file, 'rbU') + self.total_num = 0 + self.result_fobj = open(result_file, 'wb') + self.finished_num = 0 + self.err_num = 0 + self.success_num = 0 + self.tp = threadpool.ThreadPool(num_workers=thread_num) + + + def cbSaveResult(self, request, result): + self.finished_num += 1 + print '%d : %s' % (self.err_num+self.finished_num, str(result)) + if result['success']: + self.success_num += 1 + self.result_fobj.write(json.dumps(result) + '\n') + + def cbHandleErr(self, request, exc_info): + self.err_num += 1 + result = deepcopy(request.args[0]) + result['exception'] = str(exc_info[1]) + self.result_fobj.write(json.dumps(result) + '\n') + + def batchTest(self, norm_target_func=None, *args, **kwds): + ''' + the func must be the run() function in a poc class. + ''' + def argsGenerator(): + func_args = { + 'options': self.options, + 'success': None, + 'poc_ret': {}, + } + for seed in self.seed_iter: + if norm_target_func: + func_args['options']['target'] = norm_target_func(seed.strip(), *args, **kwds) + else: + func_args['options']['target'] = seed.strip() + yield deepcopy(func_args) + + requests = threadpool.makeRequests(callable_=self.func2run, + args_list = argsGenerator(), + callback=self.cbSaveResult, + exc_callback=self.cbHandleErr) + [self.tp.putRequest(req) for req in requests] + self.tp.wait() + self.tp.dismissWorkers(100, do_join=True) + return self.total_num, self.finished_num, self.err_num + + +if __name__ == '__main__': + import time + # run poc_id + from poc_20140007 import MyPoc + + start_time = time.time() + bt = BatchTest(seed_file='website.txt', + func2run=MyPoc.verify, + options=None, + result_file='result.txt', + thread_num=100, + verbose=True) + bt.batchTest(norm_target_func=normalize_url, https=False) + print 'total number: %d, finished number: %d, success number: %d, error number: %d'\ + % (bt.total_num, bt.finished_num, bt.success_num, bt.err_num) + print 'cost %f seconds.' % (time.time() - start_time) + +# coding: utf-8 +"""Interface to sourmash""" + +import os +import numpy as np +import pandas as pd +import shutil + +import anvio +import anvio.utils as utils +import anvio.terminal as terminal +import anvio.filesnpaths as filesnpaths + +from scipy.stats import entropy, skew, kurtosis + +from anvio.errors import ConfigError + + +__author__ = "Developers of anvi'o (see AUTHORS.txt)" +__copyright__ = "Copyleft 2015-2019, the Meren Lab (http://merenlab.org/)" +__credits__ = [] +__license__ = "GPL 3.0" +__version__ = anvio.__version__ +__maintainer__ = "Mahmoud Yousef" +__email__ = "mahmoudyousef@uchicago.edu" + + +class Sourmash: + """This calculates a single kmer signature, and computes similarities. + + Feel free to buff this to suit your needs + """ + + def __init__(self, args={}, run=terminal.Run(), progress=terminal.Progress(), program_name='sourmash'): + self.run = run + self.progress = progress + self.program_name = program_name + self.check_program() + + self.results = {} + + A = lambda x: args.__dict__[x] if x in args.__dict__ else None + self.log_file_path = os.path.abspath(A('log_file') or filesnpaths.get_temp_file_path()) + self.num_threads = A('num_threads') or 1 + self.kmer_size = A('kmer_size') or 51 + self.scale = A('scale') or 1000 + + self.run.warning("Anvi'o will use 'sourmash' by Brown et al. (DOI: 10.21105/joss.00027) to compute kmer sequences and determine mash distances. " + "If you publish your findings, please do not forget to properly credit their work", + lc='green', header="CITATION") + + if self.num_threads != 1: + self.num_threads = 1 + self.run.warning("Anvi'o speaking: sourmash currently doesn't support multithreading. " + "Anvi'o will have to reduce your number of threads to one :(") + + self.run.info('[sourmash] Log file path', self.log_file_path, nl_after=1) + + + def check_program(self): + utils.is_program_exists(self.program_name) + + + def process(self, input_path, fasta_files): + self.run.info('[sourmash] Kmer size', self.kmer_size, nl_before=1) + self.run.info('[sourmash] Compression ratio', self.scale) + + report_name = 'kmer_%d_mash_similarity' % self.kmer_size + + # backup the old working directory before changing the directory + old_wd = os.getcwd() + os.chdir(input_path) + if not os.path.exists('output'): + os.mkdir('output') + else: + pass + + self.progress.new('Sourmash') + self.progress.update('Computing fasta signatures for kmer=%d, scale=%d' % (self.kmer_size, self.scale)) + + scale = '--scaled=%i' % self.scale + compute_command = [self.program_name, 'compute', + '-k', self.kmer_size, + '-f', scale] + compute_command.extend(fasta_files) + + exit_code = utils.run_command(compute_command, self.log_file_path, remove_log_file_if_exists=False) + if int(exit_code): + self.progress.end() + raise ConfigError("sourmash returned with non-zero exit code, there may be some errors. " + "Please check the log file `%s` for details. Offending command: " + "`%s` ..." % (self.log_file_path, ' '.join([str(x) for x in compute_command[:7]]))) + + self.progress.update('Computing similarity matrix for kmer=%d, scale=%d' % (self.kmer_size, self.scale)) + compare_command = [self.program_name, 'compare', + '-k', self.kmer_size, + '--csv', os.path.join('output', report_name + '.txt')] + for f in fasta_files: + compare_command.append(f + ".sig") + + exit_code = utils.run_command(compare_command, self.log_file_path, remove_log_file_if_exists=False) + if int(exit_code): + self.progress.end() + raise ConfigError("sourmash returned with non-zero exit code, there may be some errors. " + "Please check the log file `%s` for details. Offending command: " + "`%s` ..." % (self.log_file_path, ' '.join([str(x) for x in compute_command[:7]]))) + + self.results[report_name] = utils.get_TAB_delimited_file_as_dictionary(os.path.join('output', report_name + '.txt'), + indexing_field=-1, + separator=',') + + self.progress.end() + + # restore old working directory + os.chdir(old_wd) + + return self.results + +# +# ElGamal.py : ElGamal encryption/decryption and signatures +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: ElGamal.py,v 1.9 2003/04/04 19:44:26 akuchling Exp $" + +from Crypto.PublicKey.pubkey import * +from Crypto.Util import number + +class error (Exception): + pass + +# Generate an ElGamal key with N bits +def generate(bits, randfunc, progress_func=None): + """generate(bits:int, randfunc:callable, progress_func:callable) + + Generate an ElGamal key of length 'bits', using 'randfunc' to get + random data and 'progress_func', if present, to display + the progress of the key generation. + """ + obj=ElGamalobj() + # Generate prime p + if progress_func: + progress_func('p\n') + obj.p=bignum(getPrime(bits, randfunc)) + # Generate random number g + if progress_func: + progress_func('g\n') + size=bits-1-(ord(randfunc(1)) & 63) # g will be from 1--64 bits smaller than p + if size<1: + size=bits-1 + while (1): + obj.g=bignum(getPrime(size, randfunc)) + if obj.g < obj.p: + break + size=(size+1) % bits + if size==0: + size=4 + # Generate random number x + if progress_func: + progress_func('x\n') + while (1): + size=bits-1-ord(randfunc(1)) # x will be from 1 to 256 bits smaller than p + if size>2: + break + while (1): + obj.x=bignum(getPrime(size, randfunc)) + if obj.x < obj.p: + break + size = (size+1) % bits + if size==0: + size=4 + if progress_func: + progress_func('y\n') + obj.y = pow(obj.g, obj.x, obj.p) + return obj + +def construct(tuple): + """construct(tuple:(long,long,long,long)|(long,long,long,long,long))) + : ElGamalobj + Construct an ElGamal key from a 3- or 4-tuple of numbers. + """ + + obj=ElGamalobj() + if len(tuple) not in [3,4]: + raise error, 'argument for construct() wrong length' + for i in range(len(tuple)): + field = obj.keydata[i] + setattr(obj, field, tuple[i]) + return obj + +class ElGamalobj(pubkey): + keydata=['p', 'g', 'y', 'x'] + + def _encrypt(self, M, K): + a=pow(self.g, K, self.p) + b=( M*pow(self.y, K, self.p) ) % self.p + return ( a,b ) + + def _decrypt(self, M): + if (not hasattr(self, 'x')): + raise error, 'Private key not available in this object' + ax=pow(M[0], self.x, self.p) + plaintext=(M[1] * inverse(ax, self.p ) ) % self.p + return plaintext + + def _sign(self, M, K): + if (not hasattr(self, 'x')): + raise error, 'Private key not available in this object' + p1=self.p-1 + if (GCD(K, p1)!=1): + raise error, 'Bad K value: GCD(K,p-1)!=1' + a=pow(self.g, K, self.p) + t=(M-self.x*a) % p1 + while t<0: t=t+p1 + b=(t*inverse(K, p1)) % p1 + return (a, b) + + def _verify(self, M, sig): + v1=pow(self.y, sig[0], self.p) + v1=(v1*pow(sig[0], sig[1], self.p)) % self.p + v2=pow(self.g, M, self.p) + if v1==v2: + return 1 + return 0 + + def size(self): + "Return the maximum number of bits that can be handled by this key." + return number.size(self.p) - 1 + + def has_private(self): + """Return a Boolean denoting whether the object contains + private components.""" + if hasattr(self, 'x'): + return 1 + else: + return 0 + + def publickey(self): + """Return a new key object containing only the public information.""" + return construct((self.p, self.g, self.y)) + + +object=ElGamalobj + +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +# Copyright 2019 Fortinet, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: fortios_system_storage +short_description: Configure logical storage in Fortinet's FortiOS and FortiGate. +description: + - This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the + user to set and modify system feature and storage category. + Examples include all parameters and values need to be adjusted to datasources before usage. + Tested with FOS v6.0.5 +version_added: "2.9" +author: + - Miguel Angel Munoz (@mamunozgonzalez) + - Nicolas Thomas (@thomnico) +notes: + - Requires fortiosapi library developed by Fortinet + - Run as a local_action in your playbook +requirements: + - fortiosapi>=0.9.8 +options: + host: + description: + - FortiOS or FortiGate IP address. + type: str + required: false + username: + description: + - FortiOS or FortiGate username. + type: str + required: false + password: + description: + - FortiOS or FortiGate password. + type: str + default: "" + vdom: + description: + - Virtual domain, among those defined previously. A vdom is a + virtual instance of the FortiGate that can be configured and + used as a different unit. + type: str + default: root + https: + description: + - Indicates if the requests towards FortiGate must use HTTPS protocol. + type: bool + default: true + ssl_verify: + description: + - Ensures FortiGate certificate must be verified by a proper CA. + type: bool + default: true + state: + description: + - Indicates whether to create or remove the object. + type: str + required: true + choices: + - present + - absent + system_storage: + description: + - Configure logical storage. + default: null + type: dict + suboptions: + device: + description: + - Partition device. + type: str + media_status: + description: + - The physical status of current media. + type: str + choices: + - enable + - disable + - fail + name: + description: + - Storage name. + required: true + type: str + order: + description: + - Set storage order. + type: int + partition: + description: + - Label of underlying partition. + type: str + size: + description: + - Partition size. + type: int + status: + description: + - Enable/disable storage. + type: str + choices: + - enable + - disable + usage: + description: + - Use hard disk for logging or WAN Optimization . + type: str + choices: + - log + - wanopt + wanopt_mode: + description: + - WAN Optimization mode . + type: str + choices: + - mix + - wanopt + - webcache +''' + +EXAMPLES = ''' +- hosts: localhost + vars: + host: "192.168.122.40" + username: "admin" + password: "" + vdom: "root" + ssl_verify: "False" + tasks: + - name: Configure logical storage. + fortios_system_storage: + host: "{{ host }}" + username: "{{ username }}" + password: "{{ password }}" + vdom: "{{ vdom }}" + https: "False" + state: "present" + system_storage: + device: "" + media_status: "enable" + name: "default_name_5" + order: "6" + partition: "" + size: "8" + status: "enable" + usage: "log" + wanopt_mode: "mix" +''' + +RETURN = ''' +build: + description: Build number of the fortigate image + returned: always + type: str + sample: '1547' +http_method: + description: Last method used to provision the content into FortiGate + returned: always + type: str + sample: 'PUT' +http_status: + description: Last result given by FortiGate on last operation applied + returned: always + type: str + sample: "200" +mkey: + description: Master key (id) used in the last call to FortiGate + returned: success + type: str + sample: "id" +name: + description: Name of the table used to fulfill the request + returned: always + type: str + sample: "urlfilter" +path: + description: Path of the table used to fulfill the request + returned: always + type: str + sample: "webfilter" +revision: + description: Internal revision number + returned: always + type: str + sample: "17.0.2.10658" +serial: + description: Serial number of the unit + returned: always + type: str + sample: "FGVMEVYYQT3AB5352" +status: + description: Indication of the operation's result + returned: always + type: str + sample: "success" +vdom: + description: Virtual domain used + returned: always + type: str + sample: "root" +version: + description: Version of the FortiGate + returned: always + type: str + sample: "v5.6.3" + +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible.module_utils.network.fortios.fortios import FortiOSHandler +from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG + + +def login(data, fos): + host = data['host'] + username = data['username'] + password = data['password'] + ssl_verify = data['ssl_verify'] + + fos.debug('on') + if 'https' in data and not data['https']: + fos.https('off') + else: + fos.https('on') + + fos.login(host, username, password, verify=ssl_verify) + + +def filter_system_storage_data(json): + option_list = ['device', 'media_status', 'name', + 'order', 'partition', 'size', + 'status', 'usage', 'wanopt_mode'] + dictionary = {} + + for attribute in option_list: + if attribute in json and json[attribute] is not None: + dictionary[attribute] = json[attribute] + + return dictionary + + +def underscore_to_hyphen(data): + if isinstance(data, list): + for elem in data: + elem = underscore_to_hyphen(elem) + elif isinstance(data, dict): + new_data = {} + for k, v in data.items(): + new_data[k.replace('_', '-')] = underscore_to_hyphen(v) + data = new_data + + return data + + +def system_storage(data, fos): + vdom = data['vdom'] + state = data['state'] + system_storage_data = data['system_storage'] + filtered_data = underscore_to_hyphen(filter_system_storage_data(system_storage_data)) + + if state == "present": + return fos.set('system', + 'storage', + data=filtered_data, + vdom=vdom) + + elif state == "absent": + return fos.delete('system', + 'storage', + mkey=filtered_data['name'], + vdom=vdom) + + +def is_successful_status(status): + return status['status'] == "success" or \ + status['http_method'] == "DELETE" and status['http_status'] == 404 + + +def fortios_system(data, fos): + + if data['system_storage']: + resp = system_storage(data, fos) + + return not is_successful_status(resp), \ + resp['status'] == "success", \ + resp + + +def main(): + fields = { + "host": {"required": False, "type": "str"}, + "username": {"required": False, "type": "str"}, + "password": {"required": False, "type": "str", "default": "", "no_log": True}, + "vdom": {"required": False, "type": "str", "default": "root"}, + "https": {"required": False, "type": "bool", "default": True}, + "ssl_verify": {"required": False, "type": "bool", "default": True}, + "state": {"required": True, "type": "str", + "choices": ["present", "absent"]}, + "system_storage": { + "required": False, "type": "dict", "default": None, + "options": { + "device": {"required": False, "type": "str"}, + "media_status": {"required": False, "type": "str", + "choices": ["enable", "disable", "fail"]}, + "name": {"required": True, "type": "str"}, + "order": {"required": False, "type": "int"}, + "partition": {"required": False, "type": "str"}, + "size": {"required": False, "type": "int"}, + "status": {"required": False, "type": "str", + "choices": ["enable", "disable"]}, + "usage": {"required": False, "type": "str", + "choices": ["log", "wanopt"]}, + "wanopt_mode": {"required": False, "type": "str", + "choices": ["mix", "wanopt", "webcache"]} + + } + } + } + + module = AnsibleModule(argument_spec=fields, + supports_check_mode=False) + + # legacy_mode refers to using fortiosapi instead of HTTPAPI + legacy_mode = 'host' in module.params and module.params['host'] is not None and \ + 'username' in module.params and module.params['username'] is not None and \ + 'password' in module.params and module.params['password'] is not None + + if not legacy_mode: + if module._socket_path: + connection = Connection(module._socket_path) + fos = FortiOSHandler(connection) + + is_error, has_changed, result = fortios_system(module.params, fos) + else: + module.fail_json(**FAIL_SOCKET_MSG) + else: + try: + from fortiosapi import FortiOSAPI + except ImportError: + module.fail_json(msg="fortiosapi module is required") + + fos = FortiOSAPI() + + login(module.params, fos) + is_error, has_changed, result = fortios_system(module.params, fos) + fos.logout() + + if not is_error: + module.exit_json(changed=has_changed, meta=result) + else: + module.fail_json(msg="Error in repo", meta=result) + + +if __name__ == '__main__': + main() + +from django.contrib.gis.gdal.base import GDALBase +from django.contrib.gis.gdal.error import GDALException, OGRIndexError +from django.contrib.gis.gdal.field import Field +from django.contrib.gis.gdal.geometries import OGRGeometry, OGRGeomType +from django.contrib.gis.gdal.prototypes import ds as capi, geom as geom_api +from django.utils import six +from django.utils.encoding import force_bytes, force_text +from django.utils.six.moves import range + + +# For more information, see the OGR C API source code: +# http://www.gdal.org/ogr/ogr__api_8h.html +# +# The OGR_F_* routines are relevant here. +class Feature(GDALBase): + """ + This class that wraps an OGR Feature, needs to be instantiated + from a Layer object. + """ + + def __init__(self, feat, layer): + """ + Initializes Feature from a pointer and its Layer object. + """ + if not feat: + raise GDALException('Cannot create OGR Feature, invalid pointer given.') + self.ptr = feat + self._layer = layer + + def __del__(self): + "Releases a reference to this object." + try: + capi.destroy_feature(self._ptr) + except (AttributeError, TypeError): + pass # Some part might already have been garbage collected + + def __getitem__(self, index): + """ + Gets the Field object at the specified index, which may be either + an integer or the Field's string label. Note that the Field object + is not the field's _value_ -- use the `get` method instead to + retrieve the value (e.g. an integer) instead of a Field instance. + """ + if isinstance(index, six.string_types): + i = self.index(index) + else: + if index < 0 or index > self.num_fields: + raise OGRIndexError('index out of range') + i = index + return Field(self, i) + + def __iter__(self): + "Iterates over each field in the Feature." + for i in range(self.num_fields): + yield self[i] + + def __len__(self): + "Returns the count of fields in this feature." + return self.num_fields + + def __str__(self): + "The string name of the feature." + return 'Feature FID %d in Layer<%s>' % (self.fid, self.layer_name) + + def __eq__(self, other): + "Does equivalence testing on the features." + return bool(capi.feature_equal(self.ptr, other._ptr)) + + # #### Feature Properties #### + @property + def encoding(self): + return self._layer._ds.encoding + + @property + def fid(self): + "Returns the feature identifier." + return capi.get_fid(self.ptr) + + @property + def layer_name(self): + "Returns the name of the layer for the feature." + name = capi.get_feat_name(self._layer._ldefn) + return force_text(name, self.encoding, strings_only=True) + + @property + def num_fields(self): + "Returns the number of fields in the Feature." + return capi.get_feat_field_count(self.ptr) + + @property + def fields(self): + "Returns a list of fields in the Feature." + return [capi.get_field_name(capi.get_field_defn(self._layer._ldefn, i)) + for i in range(self.num_fields)] + + @property + def geom(self): + "Returns the OGR Geometry for this Feature." + # Retrieving the geometry pointer for the feature. + geom_ptr = capi.get_feat_geom_ref(self.ptr) + return OGRGeometry(geom_api.clone_geom(geom_ptr)) + + @property + def geom_type(self): + "Returns the OGR Geometry Type for this Feture." + return OGRGeomType(capi.get_fd_geom_type(self._layer._ldefn)) + + # #### Feature Methods #### + def get(self, field): + """ + Returns the value of the field, instead of an instance of the Field + object. May take a string of the field name or a Field object as + parameters. + """ + field_name = getattr(field, 'name', field) + return self[field_name].value + + def index(self, field_name): + "Returns the index of the given field name." + i = capi.get_field_index(self.ptr, force_bytes(field_name)) + if i < 0: + raise OGRIndexError('invalid OFT field name given: "%s"' % field_name) + return i + +# urllib3/poolmanager.py +# Copyright 2008-2014 Andrey Petrov and contributors (see CONTRIBUTORS.txt) +# +# This module is part of urllib3 and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +import logging + +try: # Python 3 + from urllib.parse import urljoin +except ImportError: + from urlparse import urljoin + +from ._collections import RecentlyUsedContainer +from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool +from .connectionpool import port_by_scheme +from .request import RequestMethods +from .util import parse_url + + +__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url'] + + +pool_classes_by_scheme = { + 'http': HTTPConnectionPool, + 'https': HTTPSConnectionPool, +} + +log = logging.getLogger(__name__) + +SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs', + 'ssl_version') + + +class PoolManager(RequestMethods): + """ + Allows for arbitrary requests while transparently keeping track of + necessary connection pools for you. + + :param num_pools: + Number of connection pools to cache before discarding the least + recently used pool. + + :param headers: + Headers to include with all requests, unless other headers are given + explicitly. + + :param \**connection_pool_kw: + Additional parameters are used to create fresh + :class:`urllib3.connectionpool.ConnectionPool` instances. + + Example: :: + + >>> manager = PoolManager(num_pools=2) + >>> r = manager.request('GET', 'http://google.com/') + >>> r = manager.request('GET', 'http://google.com/mail') + >>> r = manager.request('GET', 'http://yahoo.com/') + >>> len(manager.pools) + 2 + + """ + + proxy = None + + def __init__(self, num_pools=10, headers=None, **connection_pool_kw): + RequestMethods.__init__(self, headers) + self.connection_pool_kw = connection_pool_kw + self.pools = RecentlyUsedContainer(num_pools, + dispose_func=lambda p: p.close()) + + def _new_pool(self, scheme, host, port): + """ + Create a new :class:`ConnectionPool` based on host, port and scheme. + + This method is used to actually create the connection pools handed out + by :meth:`connection_from_url` and companion methods. It is intended + to be overridden for customization. + """ + pool_cls = pool_classes_by_scheme[scheme] + kwargs = self.connection_pool_kw + if scheme == 'http': + kwargs = self.connection_pool_kw.copy() + for kw in SSL_KEYWORDS: + kwargs.pop(kw, None) + + return pool_cls(host, port, **kwargs) + + def clear(self): + """ + Empty our store of pools and direct them all to close. + + This will not affect in-flight connections, but they will not be + re-used after completion. + """ + self.pools.clear() + + def connection_from_host(self, host, port=None, scheme='http'): + """ + Get a :class:`ConnectionPool` based on the host, port, and scheme. + + If ``port`` isn't given, it will be derived from the ``scheme`` using + ``urllib3.connectionpool.port_by_scheme``. + """ + + scheme = scheme or 'http' + + port = port or port_by_scheme.get(scheme, 80) + + pool_key = (scheme, host, port) + + with self.pools.lock: + # If the scheme, host, or port doesn't match existing open + # connections, open a new ConnectionPool. + pool = self.pools.get(pool_key) + if pool: + return pool + + # Make a fresh ConnectionPool of the desired type + pool = self._new_pool(scheme, host, port) + self.pools[pool_key] = pool + return pool + + def connection_from_url(self, url): + """ + Similar to :func:`urllib3.connectionpool.connection_from_url` but + doesn't pass any additional parameters to the + :class:`urllib3.connectionpool.ConnectionPool` constructor. + + Additional parameters are taken from the :class:`.PoolManager` + constructor. + """ + u = parse_url(url) + return self.connection_from_host(u.host, port=u.port, scheme=u.scheme) + + def urlopen(self, method, url, redirect=True, **kw): + """ + Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` + with custom cross-host redirect logic and only sends the request-uri + portion of the ``url``. + + The given ``url`` parameter must be absolute, such that an appropriate + :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. + """ + u = parse_url(url) + conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) + + kw['assert_same_host'] = False + kw['redirect'] = False + if 'headers' not in kw: + kw['headers'] = self.headers + + if self.proxy is not None and u.scheme == "http": + response = conn.urlopen(method, url, **kw) + else: + response = conn.urlopen(method, u.request_uri, **kw) + + redirect_location = redirect and response.get_redirect_location() + if not redirect_location: + return response + + # Support relative URLs for redirecting. + redirect_location = urljoin(url, redirect_location) + + # RFC 2616, Section 10.3.4 + if response.status == 303: + method = 'GET' + + log.info("Redirecting %s -> %s" % (url, redirect_location)) + kw['retries'] = kw.get('retries', 3) - 1 # Persist retries countdown + kw['redirect'] = redirect + return self.urlopen(method, redirect_location, **kw) + + +class ProxyManager(PoolManager): + """ + Behaves just like :class:`PoolManager`, but sends all requests through + the defined proxy, using the CONNECT method for HTTPS URLs. + + :param proxy_url: + The URL of the proxy to be used. + + :param proxy_headers: + A dictionary contaning headers that will be sent to the proxy. In case + of HTTP they are being sent with each request, while in the + HTTPS/CONNECT case they are sent only once. Could be used for proxy + authentication. + + Example: + >>> proxy = urllib3.ProxyManager('http://localhost:3128/') + >>> r1 = proxy.request('GET', 'http://google.com/') + >>> r2 = proxy.request('GET', 'http://httpbin.org/') + >>> len(proxy.pools) + 1 + >>> r3 = proxy.request('GET', 'https://httpbin.org/') + >>> r4 = proxy.request('GET', 'https://twitter.com/') + >>> len(proxy.pools) + 3 + + """ + + def __init__(self, proxy_url, num_pools=10, headers=None, + proxy_headers=None, **connection_pool_kw): + + if isinstance(proxy_url, HTTPConnectionPool): + proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host, + proxy_url.port) + proxy = parse_url(proxy_url) + if not proxy.port: + port = port_by_scheme.get(proxy.scheme, 80) + proxy = proxy._replace(port=port) + self.proxy = proxy + self.proxy_headers = proxy_headers or {} + assert self.proxy.scheme in ("http", "https"), \ + 'Not supported proxy scheme %s' % self.proxy.scheme + connection_pool_kw['_proxy'] = self.proxy + connection_pool_kw['_proxy_headers'] = self.proxy_headers + super(ProxyManager, self).__init__( + num_pools, headers, **connection_pool_kw) + + def connection_from_host(self, host, port=None, scheme='http'): + if scheme == "https": + return super(ProxyManager, self).connection_from_host( + host, port, scheme) + + return super(ProxyManager, self).connection_from_host( + self.proxy.host, self.proxy.port, self.proxy.scheme) + + def _set_proxy_headers(self, url, headers=None): + """ + Sets headers needed by proxies: specifically, the Accept and Host + headers. Only sets headers not provided by the user. + """ + headers_ = {'Accept': '*/*'} + + netloc = parse_url(url).netloc + if netloc: + headers_['Host'] = netloc + + if headers: + headers_.update(headers) + return headers_ + + def urlopen(self, method, url, redirect=True, **kw): + "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." + u = parse_url(url) + + if u.scheme == "http": + # For proxied HTTPS requests, httplib sets the necessary headers + # on the CONNECT to the proxy. For HTTP, we'll definitely + # need to set 'Host' at the very least. + kw['headers'] = self._set_proxy_headers(url, kw.get('headers', + self.headers)) + + return super(ProxyManager, self).urlopen(method, url, redirect, **kw) + + +def proxy_from_url(url, **kw): + return ProxyManager(proxy_url=url, **kw) + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from itertools import zip_longest, combinations +import json +import os +import warnings + +import numpy as np + +import tvm +from tvm import relay +from tvm import rpc +from tvm.contrib import graph_runtime +from tvm.relay.op.contrib import arm_compute_lib +from tvm.contrib import util +from tvm.autotvm.measure import request_remote + + +class Device: + """ + Configuration for Arm Compute Library tests. + + Check tests/python/contrib/arm_compute_lib/ for the presence of an test_config.json file. + This file can be used to override the default configuration here which will attempt to run the Arm + Compute Library runtime tests locally if the runtime is available. Changing the configuration + will allow these runtime tests to be offloaded to a remote Arm device via a tracker for example. + + Notes + ----- + The test configuration will be loaded once when the the class is created. If the configuration + changes between tests, any changes will not be picked up. + + Parameters + ---------- + device : RPCSession + Allows tests to connect to and use remote device. + + Attributes + ---------- + connection_type : str + Details the type of RPC connection to use. Options: + local - Use the local device, + tracker - Connect to a tracker to request a remote device, + remote - Connect to a remote device directly. + host : str + Specify IP address or hostname of remote target. + port : int + Specify port number of remote target. + target : str + The compilation target. + device_key : str + The device key of the remote target. Use when connecting to a remote device via a tracker. + cross_compile : str + Specify path to cross compiler to use when connecting a remote device from a non-arm platform. + """ + + connection_type = "local" + host = "localhost" + port = 9090 + target = "llvm -mtriple=aarch64-linux-gnu -mattr=+neon" + device_key = "" + cross_compile = "" + + def __init__(self): + """Keep remote device for lifetime of object.""" + self.device = self._get_remote() + + @classmethod + def _get_remote(cls): + """Get a remote (or local) device to use for testing.""" + if cls.connection_type == "tracker": + device = request_remote(cls.device_key, cls.host, cls.port, timeout=1000) + elif cls.connection_type == "remote": + device = rpc.connect(cls.host, cls.port) + elif cls.connection_type == "local": + device = rpc.LocalSession() + else: + raise ValueError( + "connection_type in test_config.json should be one of: " "local, tracker, remote." + ) + + return device + + @classmethod + def load(cls, file_name): + """Load test config + + Load the test configuration by looking for file_name relative + to the test_arm_compute_lib directory. + """ + location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) + config_file = os.path.join(location, file_name) + if not os.path.exists(config_file): + warnings.warn( + "Config file doesn't exist, resuming Arm Compute Library tests with default config." + ) + return + with open(config_file, mode="r") as config: + test_config = json.load(config) + + cls.connection_type = test_config["connection_type"] + cls.host = test_config["host"] + cls.port = test_config["port"] + cls.target = test_config["target"] + cls.device_key = test_config.get("device_key") or "" + cls.cross_compile = test_config.get("cross_compile") or "" + + +def get_cpu_op_count(mod): + """Traverse graph counting ops offloaded to TVM.""" + + class Counter(tvm.relay.ExprVisitor): + def __init__(self): + super().__init__() + self.count = 0 + + def visit_call(self, call): + if isinstance(call.op, tvm.ir.Op): + self.count += 1 + + super().visit_call(call) + + c = Counter() + c.visit(mod["main"]) + return c.count + + +def skip_runtime_test(): + """Skip test if it requires the runtime and it's not present.""" + # ACL codegen not present. + if not tvm.get_global_func("relay.ext.arm_compute_lib", True): + print("Skip because Arm Compute Library codegen is not available.") + return True + + # Remote device is in use or ACL runtime not present + # Note: Ensure that the device config has been loaded before this check + if ( + not Device.connection_type != "local" + and not arm_compute_lib.is_arm_compute_runtime_enabled() + ): + print("Skip because runtime isn't present or a remote device isn't being used.") + return True + + +def skip_codegen_test(): + """Skip test if it requires the ACL codegen and it's not present.""" + if not tvm.get_global_func("relay.ext.arm_compute_lib", True): + print("Skip because Arm Compute Library codegen is not available.") + return True + + +def build_module(mod, target, params=None, enable_acl=True, tvm_ops=0, acl_partitions=1): + """Build module with option to build for ACL.""" + if isinstance(mod, tvm.relay.expr.Call): + mod = tvm.IRModule.from_expr(mod) + with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]): + if enable_acl: + mod = arm_compute_lib.partition_for_arm_compute_lib(mod, params) + tvm_op_count = get_cpu_op_count(mod) + assert tvm_op_count == tvm_ops, "Got {} TVM operators, expected {}".format( + tvm_op_count, tvm_ops + ) + partition_count = 0 + for global_var in mod.get_global_vars(): + if "arm_compute_lib" in global_var.name_hint: + partition_count += 1 + + assert ( + acl_partitions == partition_count + ), "Got {} Arm Compute Library partitions, expected {}".format( + partition_count, acl_partitions + ) + relay.backend.compile_engine.get().clear() + return relay.build(mod, target=target, params=params) + + +def build_and_run( + mod, + inputs, + outputs, + params, + device, + enable_acl=True, + no_runs=1, + tvm_ops=0, + acl_partitions=1, + config=None, +): + """Build and run the relay module.""" + if config is None: + config = {} + + try: + lib = build_module(mod, device.target, params, enable_acl, tvm_ops, acl_partitions) + except Exception as e: + err_msg = "The module could not be built.\n" + if config: + err_msg += f"The test failed with the following parameters: {config}\n" + err_msg += str(e) + raise Exception(err_msg) + + lib = update_lib(lib, device.device, device.cross_compile) + gen_module = graph_runtime.GraphModule(lib["default"](device.device.cpu(0))) + gen_module.set_input(**inputs) + out = [] + for _ in range(no_runs): + gen_module.run() + out.append([gen_module.get_output(i) for i in range(outputs)]) + return out + + +def update_lib(lib, device, cross_compile): + """Export the library to the remote/local device.""" + lib_name = "mod.so" + temp = util.tempdir() + lib_path = temp.relpath(lib_name) + if cross_compile: + lib.export_library(lib_path, cc=cross_compile) + else: + lib.export_library(lib_path) + device.upload(lib_path) + lib = device.load_module(lib_name) + return lib + + +def verify(answers, atol, rtol, verify_saturation=False, config=None): + """Compare the array of answers. Each entry is a list of outputs.""" + if config is None: + config = {} + + if len(answers) < 2: + raise RuntimeError(f"No results to compare: expected at least two, found {len(answers)}") + for answer in zip_longest(*answers): + for outs in combinations(answer, 2): + try: + if verify_saturation: + assert ( + np.count_nonzero(outs[0].asnumpy() == 255) < 0.25 * outs[0].asnumpy().size + ), "Output is saturated: {}".format(outs[0]) + assert ( + np.count_nonzero(outs[0].asnumpy() == 0) < 0.25 * outs[0].asnumpy().size + ), "Output is saturated: {}".format(outs[0]) + tvm.testing.assert_allclose( + outs[0].asnumpy(), outs[1].asnumpy(), rtol=rtol, atol=atol + ) + except AssertionError as e: + err_msg = "Results not within the acceptable tolerance.\n" + if config: + err_msg += f"The test failed with the following parameters: {config}\n" + err_msg += str(e) + raise AssertionError(err_msg) + + +def extract_acl_modules(module): + """Get the ACL module(s) from llvm module.""" + return list( + filter(lambda mod: mod.type_key == "arm_compute_lib", module.get_lib().imported_modules) + ) + + +def verify_codegen( + module, + known_good_codegen, + num_acl_modules, + target="llvm -mtriple=aarch64-linux-gnu -mattr=+neon", +): + """Check acl codegen against a known good output.""" + module = build_module(module, target) + acl_modules = extract_acl_modules(module) + + assert len(acl_modules) == num_acl_modules, ( + f"The number of Arm Compute Library modules produced ({len(acl_modules)}) does not " + f"match the expected value ({num_acl_modules})." + ) + + for mod in acl_modules: + source = mod.get_source("json") + codegen = json.loads(source)["nodes"] + # remove input and const names as these cannot be predetermined + for node in range(len(codegen)): + if codegen[node]["op"] == "input" or codegen[node]["op"] == "const": + codegen[node]["name"] = "" + codegen_str = json.dumps(codegen, sort_keys=True, indent=2) + known_good_codegen_str = json.dumps(known_good_codegen, sort_keys=True, indent=2) + + assert codegen_str == known_good_codegen_str, ( + f"The JSON produced by codegen does not match the expected result. \n" + f"Actual={codegen_str} \n" + f"Expected={known_good_codegen_str}" + ) + + +def generate_trials(space, r_factor=3): + """Generates a series of trials. + + This algorithm generates a series of non-deterministic trials given a + space of options to test. A trial is generated by pulling a value from + each option in the space. On some occasions the values are shuffled to + ensure a different trial on each r_factor iteration. The algorithm ensures + that each value from an option is used at least once. The total number of + trials is determined by the r_factor * the option with the largest number + of values. + + Parameters + ---------- + space: List[List[Any]] + A list of different options with varying values to test. + r_factor: (optional) int + The repeat factor. + + Returns + ------- + A list of trials specifying values for each option. + + """ + np.random.seed(0) + max_len = 1 + for option in space: + max_len = max(max_len, len(option)) + + num_trials = r_factor * max_len + trials = [] + for i in range(num_trials): + trial = [] + for option in space: + if i % len(option) == 0: + np.random.shuffle(option) + trial.append(option[i % len(option)]) + + trials.append(trial) + + return trials + +from __future__ import absolute_import, division, unicode_literals + +import os +import sys +import codecs +import glob +import xml.sax.handler + +base_path = os.path.split(__file__)[0] + +test_dir = os.path.join(base_path, 'testdata') +sys.path.insert(0, os.path.abspath(os.path.join(base_path, + os.path.pardir, + os.path.pardir))) + +from html5lib import treebuilders +del base_path + +# Build a dict of avaliable trees +treeTypes = {"DOM": treebuilders.getTreeBuilder("dom")} + +# Try whatever etree implementations are avaliable from a list that are +#"supposed" to work +try: + import xml.etree.ElementTree as ElementTree + treeTypes['ElementTree'] = treebuilders.getTreeBuilder("etree", ElementTree, fullTree=True) +except ImportError: + try: + import elementtree.ElementTree as ElementTree + treeTypes['ElementTree'] = treebuilders.getTreeBuilder("etree", ElementTree, fullTree=True) + except ImportError: + pass + +try: + import xml.etree.cElementTree as cElementTree + treeTypes['cElementTree'] = treebuilders.getTreeBuilder("etree", cElementTree, fullTree=True) +except ImportError: + try: + import cElementTree + treeTypes['cElementTree'] = treebuilders.getTreeBuilder("etree", cElementTree, fullTree=True) + except ImportError: + pass + +try: + import lxml.etree as lxml # flake8: noqa +except ImportError: + pass +else: + treeTypes['lxml'] = treebuilders.getTreeBuilder("lxml") + + +def get_data_files(subdirectory, files='*.dat'): + return glob.glob(os.path.join(test_dir, subdirectory, files)) + + +class DefaultDict(dict): + def __init__(self, default, *args, **kwargs): + self.default = default + dict.__init__(self, *args, **kwargs) + + def __getitem__(self, key): + return dict.get(self, key, self.default) + + +class TestData(object): + def __init__(self, filename, newTestHeading="data", encoding="utf8"): + if encoding is None: + self.f = open(filename, mode="rb") + else: + self.f = codecs.open(filename, encoding=encoding) + self.encoding = encoding + self.newTestHeading = newTestHeading + + def __del__(self): + self.f.close() + + def __iter__(self): + data = DefaultDict(None) + key = None + for line in self.f: + heading = self.isSectionHeading(line) + if heading: + if data and heading == self.newTestHeading: + # Remove trailing newline + data[key] = data[key][:-1] + yield self.normaliseOutput(data) + data = DefaultDict(None) + key = heading + data[key] = "" if self.encoding else b"" + elif key is not None: + data[key] += line + if data: + yield self.normaliseOutput(data) + + def isSectionHeading(self, line): + """If the current heading is a test section heading return the heading, + otherwise return False""" + # print(line) + if line.startswith("#" if self.encoding else b"#"): + return line[1:].strip() + else: + return False + + def normaliseOutput(self, data): + # Remove trailing newlines + for key, value in data.items(): + if value.endswith("\n" if self.encoding else b"\n"): + data[key] = value[:-1] + return data + + +def convert(stripChars): + def convertData(data): + """convert the output of str(document) to the format used in the testcases""" + data = data.split("\n") + rv = [] + for line in data: + if line.startswith("|"): + rv.append(line[stripChars:]) + else: + rv.append(line) + return "\n".join(rv) + return convertData + +convertExpected = convert(2) + + +def errorMessage(input, expected, actual): + msg = ("Input:\n%s\nExpected:\n%s\nRecieved\n%s\n" % + (repr(input), repr(expected), repr(actual))) + if sys.version_info.major == 2: + msg = msg.encode("ascii", "backslashreplace") + return msg + + +class TracingSaxHandler(xml.sax.handler.ContentHandler): + def __init__(self): + xml.sax.handler.ContentHandler.__init__(self) + self.visited = [] + + def startDocument(self): + self.visited.append('startDocument') + + def endDocument(self): + self.visited.append('endDocument') + + def startPrefixMapping(self, prefix, uri): + # These are ignored as their order is not guaranteed + pass + + def endPrefixMapping(self, prefix): + # These are ignored as their order is not guaranteed + pass + + def startElement(self, name, attrs): + self.visited.append(('startElement', name, attrs)) + + def endElement(self, name): + self.visited.append(('endElement', name)) + + def startElementNS(self, name, qname, attrs): + self.visited.append(('startElementNS', name, qname, dict(attrs))) + + def endElementNS(self, name, qname): + self.visited.append(('endElementNS', name, qname)) + + def characters(self, content): + self.visited.append(('characters', content)) + + def ignorableWhitespace(self, whitespace): + self.visited.append(('ignorableWhitespace', whitespace)) + + def processingInstruction(self, target, data): + self.visited.append(('processingInstruction', target, data)) + + def skippedEntity(self, name): + self.visited.append(('skippedEntity', name)) + +"""Test OpenBabel executables from Python + +Note: Python bindings not used + +On Windows or Linux, you can run these tests at the commandline +in the build folder with: +"C:\Program Files\CMake 2.6\bin\ctest.exe" -C CTestTestfile.cmake + -R pytest -VV + +You could also "chdir" into build/test and run the test file directly: +python ../../../test/testsym.py + +In both cases, the test file is run directly from the source folder, +and so you can quickly develop the tests and try them out. +""" + +import os +import glob +import unittest + +from testbabel import run_exec, executable, log, BaseTest + +here = os.path.dirname(__file__) + +class TestReactionInChIWriter(BaseTest): + """A series of tests relating to writing Reaction InChI""" + + def testRSMItoRINCHI(self): + data = [ + ("C>N>O", "RInChI=1.00.1S/CH4/h1H4<>H2O/h1H2<>H3N/h1H3/d+"), + ("O>N>C", "RInChI=1.00.1S/CH4/h1H4<>H2O/h1H2<>H3N/h1H3/d-"), + ("O>>C", "RInChI=1.00.1S/CH4/h1H4<>H2O/h1H2/d-"), + # The following is assumed to be d+ by analogy with + # the empty reaction which is d+ + ("O>>O", "RInChI=1.00.1S/H2O/h1H2<>H2O/h1H2/d+"), + # Example: esterification of acetic acid + ("OCC.CC(=O)O>S(=O)(=O)(O)O>CC(=O)OCC.O", "RInChI=1.00.1S/C2H4O2/c1-2(3)4/h1H3,(H,3,4)!C2H6O/c1-2-3/h3H,2H2,1H3<>C4H8O2/c1-3-6-4(2)5/h3H2,1-2H3!H2O/h1H2<>H2O4S/c1-5(2,3)4/h(H2,1,2,3,4)/d+"), + # Example: alkaline ring opening + ("CC[C@]1(C)O[C@H]1C.[OH-]>>CC[C@](C)(O)[C@@H](C)O", "RInChI=1.00.1S/C6H12O/c1-4-6(3)5(2)7-6/h5H,4H2,1-3H3/t5-,6-/m0/s1!H2O/h1H2/p-1<>C6H14O2/c1-4-6(3,8)5(2)7/h5,7-8H,4H2,1-3H3/t5-,6+/m1/s1/d+"), + # Partial reactions + (">>C1CC=C(O)CC1", "RInChI=1.00.1S/<>C6H10O/c7-6-4-2-1-3-5-6/h4,7H,1-3,5H2/d+"), + ("C1CC=C(O)CC1>>", "RInChI=1.00.1S/<>C6H10O/c7-6-4-2-1-3-5-6/h4,7H,1-3,5H2/d-"), + # The empty reaction + (">>", "RInChI=1.00.1S//d+"), + # Test 'no-structure' + ("c1ccccc1C=C>>*", "RInChI=1.00.1S/<>C8H8/c1-2-8-6-4-3-5-7-8/h2-7H,1H2/d-/u1-0-0"), + ("*>>C1CC=C(O)CC1", "RInChI=1.00.1S/<>C6H10O/c7-6-4-2-1-3-5-6/h4,7H,1-3,5H2/d+/u1-0-0"), + ("O>*>C", "RInChI=1.00.1S/CH4/h1H4<>H2O/h1H2/d-/u0-0-1"), + ("*.O>>C", "RInChI=1.00.1S/CH4/h1H4<>H2O/h1H2/d-/u0-1-0"), + # Empty except for 'no-structures' (assumed) + ("*>*>*", "RInChI=1.00.1S//d+/u1-1-1"), + ] + for eqm in [False, True]: + for rsmi, rinchi in data: + if eqm: + output, error = run_exec('obabel -:%s -ismi -orinchi -xe' % rsmi) + ans = rinchi.replace("/d-", "/d=").replace("/d+", "/d=") + self.assertEqual(output.rstrip(), ans) + else: + output, error = run_exec('obabel -:%s -ismi -orinchi' % rsmi) + self.assertEqual(output.rstrip(), rinchi) + + def testRInChIOfficialExamples(self): + """These test RXN to RInChI using the examples in the RInChI distrib""" + for rxnfile in glob.glob(os.path.join(here, "rinchi", "*.rxn")): + dirname, fname = os.path.split(rxnfile) + output, error = run_exec('obabel %s -orinchi' % rxnfile) + with open(os.path.join(dirname, fname.split(".")[0]+".txt")) as inp: + ans = inp.readlines()[0] + self.assertEqual(output.rstrip(), ans.rstrip()) + +if __name__ == "__main__": + unittest.main() + +"""Helper functions for XML. + +This module has misc. helper functions for working with XML DOM nodes.""" + +import re +from compat import * + +import os +if os.name != "java": + from xml.dom import minidom + from xml.sax import saxutils + + def parseDocument(s): + return minidom.parseString(s) +else: + from javax.xml.parsers import * + import java + + builder = DocumentBuilderFactory.newInstance().newDocumentBuilder() + + def parseDocument(s): + stream = java.io.ByteArrayInputStream(java.lang.String(s).getBytes()) + return builder.parse(stream) + +def parseAndStripWhitespace(s): + try: + element = parseDocument(s).documentElement + except BaseException, e: + raise SyntaxError(str(e)) + stripWhitespace(element) + return element + +#Goes through a DOM tree and removes whitespace besides child elements, +#as long as this whitespace is correctly tab-ified +def stripWhitespace(element, tab=0): + element.normalize() + + lastSpacer = "\n" + ("\t"*tab) + spacer = lastSpacer + "\t" + + #Zero children aren't allowed (i.e. ) + #This makes writing output simpler, and matches Canonical XML + if element.childNodes.length==0: #DON'T DO len(element.childNodes) - doesn't work in Jython + raise SyntaxError("Empty XML elements not allowed") + + #If there's a single child, it must be text context + if element.childNodes.length==1: + if element.firstChild.nodeType == element.firstChild.TEXT_NODE: + #If it's an empty element, remove + if element.firstChild.data == lastSpacer: + element.removeChild(element.firstChild) + return + #If not text content, give an error + elif element.firstChild.nodeType == element.firstChild.ELEMENT_NODE: + raise SyntaxError("Bad whitespace under '%s'" % element.tagName) + else: + raise SyntaxError("Unexpected node type in XML document") + + #Otherwise there's multiple child element + child = element.firstChild + while child: + if child.nodeType == child.ELEMENT_NODE: + stripWhitespace(child, tab+1) + child = child.nextSibling + elif child.nodeType == child.TEXT_NODE: + if child == element.lastChild: + if child.data != lastSpacer: + raise SyntaxError("Bad whitespace under '%s'" % element.tagName) + elif child.data != spacer: + raise SyntaxError("Bad whitespace under '%s'" % element.tagName) + next = child.nextSibling + element.removeChild(child) + child = next + else: + raise SyntaxError("Unexpected node type in XML document") + + +def checkName(element, name): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Missing element: '%s'" % name) + + if name == None: + return + + if element.tagName != name: + raise SyntaxError("Wrong element name: should be '%s', is '%s'" % (name, element.tagName)) + +def getChild(element, index, name=None): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getChild()") + + child = element.childNodes.item(index) + if child == None: + raise SyntaxError("Missing child: '%s'" % name) + checkName(child, name) + return child + +def getChildIter(element, index): + class ChildIter: + def __init__(self, element, index): + self.element = element + self.index = index + + def next(self): + if self.index < len(self.element.childNodes): + retVal = self.element.childNodes.item(self.index) + self.index += 1 + else: + retVal = None + return retVal + + def checkEnd(self): + if self.index != len(self.element.childNodes): + raise SyntaxError("Too many elements under: '%s'" % self.element.tagName) + return ChildIter(element, index) + +def getChildOrNone(element, index): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getChild()") + child = element.childNodes.item(index) + return child + +def getLastChild(element, index, name=None): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getLastChild()") + + child = element.childNodes.item(index) + if child == None: + raise SyntaxError("Missing child: '%s'" % name) + if child != element.lastChild: + raise SyntaxError("Too many elements under: '%s'" % element.tagName) + checkName(child, name) + return child + +#Regular expressions for syntax-checking attribute and element content +nsRegEx = "http://trevp.net/cryptoID\Z" +cryptoIDRegEx = "([a-km-z3-9]{5}\.){3}[a-km-z3-9]{5}\Z" +urlRegEx = "http(s)?://.{1,100}\Z" +sha1Base64RegEx = "[A-Za-z0-9+/]{27}=\Z" +base64RegEx = "[A-Za-z0-9+/]+={0,4}\Z" +certsListRegEx = "(0)?(1)?(2)?(3)?(4)?(5)?(6)?(7)?(8)?(9)?\Z" +keyRegEx = "[A-Z]\Z" +keysListRegEx = "(A)?(B)?(C)?(D)?(E)?(F)?(G)?(H)?(I)?(J)?(K)?(L)?(M)?(N)?(O)?(P)?(Q)?(R)?(S)?(T)?(U)?(V)?(W)?(X)?(Y)?(Z)?\Z" +dateTimeRegEx = "\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ\Z" +shortStringRegEx = ".{1,100}\Z" +exprRegEx = "[a-zA-Z0-9 ,()]{1,200}\Z" +notAfterDeltaRegEx = "0|([1-9][0-9]{0,8})\Z" #A number from 0 to (1 billion)-1 +booleanRegEx = "(true)|(false)" + +def getReqAttribute(element, attrName, regEx=""): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getReqAttribute()") + + value = element.getAttribute(attrName) + if not value: + raise SyntaxError("Missing Attribute: " + attrName) + if not re.match(regEx, value): + raise SyntaxError("Bad Attribute Value for '%s': '%s' " % (attrName, value)) + element.removeAttribute(attrName) + return str(value) #de-unicode it; this is needed for bsddb, for example + +def getAttribute(element, attrName, regEx=""): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getAttribute()") + + value = element.getAttribute(attrName) + if value: + if not re.match(regEx, value): + raise SyntaxError("Bad Attribute Value for '%s': '%s' " % (attrName, value)) + element.removeAttribute(attrName) + return str(value) #de-unicode it; this is needed for bsddb, for example + +def checkNoMoreAttributes(element): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in checkNoMoreAttributes()") + + if element.attributes.length!=0: + raise SyntaxError("Extra attributes on '%s'" % element.tagName) + +def getText(element, regEx=""): + textNode = element.firstChild + if textNode == None: + raise SyntaxError("Empty element '%s'" % element.tagName) + if textNode.nodeType != textNode.TEXT_NODE: + raise SyntaxError("Non-text node: '%s'" % element.tagName) + if not re.match(regEx, textNode.data): + raise SyntaxError("Bad Text Value for '%s': '%s' " % (element.tagName, textNode.data)) + return str(textNode.data) #de-unicode it; this is needed for bsddb, for example + +#Function for adding tabs to a string +def indent(s, steps, ch="\t"): + tabs = ch*steps + if s[-1] != "\n": + s = tabs + s.replace("\n", "\n"+tabs) + else: + s = tabs + s.replace("\n", "\n"+tabs) + s = s[ : -len(tabs)] + return s + +def escape(s): + return saxutils.escape(s) + +import os, shutil +from update import Update +import subprocess, zipfile +from bz2 import BZ2File + +def test_delta_exists(env): + for de in env.get_deltaZipPaths(): + assert os.path.exists(de) + + for dm in env.get_deltaManifestPaths(): + assert os.path.exists(dm) + +def test_delta_manifest_parse(env): + for dm in env.get_deltaManifestPaths(): + u = Update.parse(BZ2File(dm).read()) + +def test_patch_delta(env): + # first make a copy of the version we want to patch + targetdir = os.path.join(env.workdir, "target") + patchdir = os.path.join(env.workdir, "patches") + shutil.copytree(os.path.join(env.workdir, "v1.0"), targetdir) + + os.makedirs(patchdir) + zf = zipfile.ZipFile(env.get_deltaZipPaths()[0]) + zf.extractall(patchdir) + zf.close() + + deltamanifest = Update.parse(BZ2File(env.get_deltaManifestPaths()[0], "r").read()) + for p in deltamanifest.patches: + cmd = ["../build/external/bsdiff/bspatch-endsley", + os.path.join(targetdir, p.name), + os.path.join(targetdir, p.name + ".new"), + os.path.join(patchdir, p.patchName)] + print cmd + res = subprocess.call(cmd) + assert res == 0 + assert file(os.path.join(targetdir, p.name + ".new")).read() == "This file changes with versions v3.0" + + shutil.rmtree(targetdir) + shutil.rmtree(patchdir) +# -*- encoding: utf-8 -*- +# This file is distributed under the same license as the Django package. +# +from __future__ import unicode_literals + +# The *_FORMAT strings use the Django date format syntax, +# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date +DATE_FORMAT = r'Y \m. E j \d.' +TIME_FORMAT = 'H:i' +DATETIME_FORMAT = r'Y \m. E j \d., H:i' +YEAR_MONTH_FORMAT = r'Y \m. F' +MONTH_DAY_FORMAT = r'E j \d.' +SHORT_DATE_FORMAT = 'Y-m-d' +SHORT_DATETIME_FORMAT = 'Y-m-d H:i' +FIRST_DAY_OF_WEEK = 1 # Monday + +# The *_INPUT_FORMATS strings use the Python strftime format syntax, +# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior +DATE_INPUT_FORMATS = [ + '%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06' +] +TIME_INPUT_FORMATS = [ + '%H:%M:%S', # '14:30:59' + '%H:%M:%S.%f', # '14:30:59.000200' + '%H:%M', # '14:30' + '%H.%M.%S', # '14.30.59' + '%H.%M.%S.%f', # '14.30.59.000200' + '%H.%M', # '14.30' +] +DATETIME_INPUT_FORMATS = [ + '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' + '%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200' + '%Y-%m-%d %H:%M', # '2006-10-25 14:30' + '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59' + '%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200' + '%d.%m.%Y %H:%M', # '25.10.2006 14:30' + '%d.%m.%Y', # '25.10.2006' + '%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59' + '%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200' + '%d.%m.%y %H:%M', # '25.10.06 14:30' + '%d.%m.%y %H.%M.%S', # '25.10.06 14.30.59' + '%d.%m.%y %H.%M.%S.%f', # '25.10.06 14.30.59.000200' + '%d.%m.%y %H.%M', # '25.10.06 14.30' + '%d.%m.%y', # '25.10.06' +] +DECIMAL_SEPARATOR = ',' +THOUSAND_SEPARATOR = '.' +NUMBER_GROUPING = 3 + +# -*- coding: utf-8 -*- +############################################################################## +# +# OpenERP, Open Source Management Solution +# Copyright (C) 2010-2011 OpenERP s.a. (). +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# +############################################################################## + +import res_users +import controllers + +# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: + + +import sys, re + +def postProcess(text): + text = re.sub(r'-LRB-', '(', text) + text = re.sub(r'-RRB-', ')', text) + # drop parenthesed content + text = re.sub(r'\([^\)]+\s[^\)]+\)', ' ', text) + text = re.sub(r' +', ' ', text) + text = re.sub(r'^ ', '', text) + text = re.sub(r' $', '', text) + text = re.sub(r' ?,( ?,)+ ?', ' ', text) + text = re.sub(r'^([^a-zA-Z\d]*)[,;.?!] *', r'\1', text) + text = re.sub(r"`` *''", ' ', text) + text = re.sub(r" ''", '"', text) + text = re.sub(r'`` ', '"', text) + text = re.sub(r' \'\'', '"', text) + text = re.sub(r' n\'t', 'n\'t', text) + text = re.sub(r'\$ ([0-9])', r'$\1', text) + text = re.sub(r" '([sdm]|ll|re|ve)\b", r"'\1", text) + text = re.sub(r'(\d) , (\d\d\d([^\d]|$))', r'\1,\2', text) + text = re.sub(r' ([;:,.?!\)\]\}]["\'\)\]\}]?) ', r'\1 ', text) + text = re.sub(r'([\)\]\}]+) ([;:,.?!]+) ', r'\1\2 ', text) + text = re.sub(r'( |^)(["\'\(\[\{]) ([a-zA-Z\d])', r' \2\3', text) # " a => "a + text = re.sub(r' ([^a-zA-Z\d]+)$', r'\1', text) # " ." => "." + text = re.sub(r'"[^a-zA-Z\d]*"', '', text) + text = re.sub(r'\([^a-zA-Z\d]*\)', '', text) # (,) + text = re.sub(r'\[[^a-zA-Z\d]*\]', '', text) + text = re.sub(r'\{[^a-zA-Z\d]*\}', '', text) + text = re.sub(r'\'[^a-zA-Z\d]*\'', '', text) + text = re.sub(' +', ' ', text) + text = re.sub('^ ', '', text) + text = re.sub(' $', '', text) + text = re.sub(' +\.\.\.', '...', text) + text = re.sub('! !( !)+', '!!!', text) + text = re.sub(r'\s*[,;]+\s*([.!?]["\'\)\]\}]?|["\'\)\]\}][.!?])$', r'\1', text) # ,. => . + while re.search(r'[A-Z]\. [A-Z]\.', text): + text = re.sub(r'\b([A-Z]\.) ([A-Z].)', r'\1\2', text) # A. B. C. + text = re.sub(r'([A-Z]) & ([A-Z])', r'\1&\2', text) # AT & T + text = re.sub(r'([A-Za-z0-9])', (lambda x: x.group(1).capitalize()), text, 1) # ^a => A + text = re.sub(r'([a-zA-Z0-9])(["\)\]\}])$', r'\1.\2', text) # a" => a." + text = re.sub(r'([a-zA-Z0-9])$', r'\1.', text) # a$ => a. + text = re.sub(r'^([^"]*)"([^"]*)$', r'\1\2', text) # lonely quotes + return text + +if __name__ == "__main__": + for line in sys.stdin: + print postProcess(line.strip()) + +import socket +import logging +logger = logging.getLogger("debugging") + + +def check_port(host, port_number, content=None): + logger.info(u"Port check, host: %s, port: %s, content: '%s'" % (host, port_number, content)) + + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.settimeout(10) + + # connect_ex returns an error number instead of raising an exception... in theory + try: + result = s.connect_ex((host, port_number)) + except Exception as e: + logger.error(u"Error: %s" % e) + return {'valid': False} + + logger.info(u"Port check, connection errno: %i" % result) + + if result == 0: + ret_obj = {'status_ok': True, 'valid': True} + + if content: + try: + recv_content = s.recv(512) + + except Exception as e: + logger.error(u"Error: %s" % e) + return {'valid': False} + + logger.info(u"Received: %s" % recv_content) + + if content.lower() not in recv_content.lower(): + ret_obj['status_ok'] = False + + return ret_obj + + else: + return {'valid': False} + +"""Build a sentiment analysis / polarity model + +Sentiment analysis can be casted as a binary text classification problem, +that is fitting a linear classifier on features extracted from the text +of the user messages so as to guess wether the opinion of the author is +positive or negative. + +In this examples we will use a movie review dataset. + +""" +# Author: Olivier Grisel +# License: Simplified BSD + +import sys +from sklearn.feature_extraction.text import TfidfVectorizer +from sklearn.svm import LinearSVC +from sklearn.pipeline import Pipeline +from sklearn.grid_search import GridSearchCV +from sklearn.datasets import load_files +from sklearn.cross_validation import train_test_split +from sklearn import metrics + + +if __name__ == "__main__": + # NOTE: we put the following in a 'if __name__ == "__main__"' protected + # block to be able to use a multi-core grid search that also works under + # Windows, see: http://docs.python.org/library/multiprocessing.html#windows + # The multiprocessing module is used as the backend of joblib.Parallel + # that is used when n_jobs != 1 in GridSearchCV + + # the training data folder must be passed as first argument + movie_reviews_data_folder = sys.argv[1] + dataset = load_files(movie_reviews_data_folder, shuffle=False) + print("n_samples: %d" % len(dataset.data)) + + # split the dataset in training and test set: + docs_train, docs_test, y_train, y_test = train_test_split( + dataset.data, dataset.target, test_size=0.25, random_state=None) + + # TASK: Build a vectorizer / classifier pipeline that filters out tokens + # that are too rare or too frequent + pipeline = Pipeline([ + ('vect', TfidfVectorizer(min_df=3, max_df=0.95)), + ('clf', LinearSVC(C=1000)), + ]) + + # TASK: Build a grid search to find out whether unigrams or bigrams are + # more useful. + # Fit the pipeline on the training set using grid search for the parameters + parameters = { + 'vect__ngram_range': [(1, 1), (1, 2)], + } + grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1) + grid_search.fit(docs_train, y_train) + + # TASK: print the cross-validated scores for the each parameters set + # explored by the grid search + print(grid_search.grid_scores_) + + # TASK: Predict the outcome on the testing set and store it in a variable + # named y_predicted + y_predicted = grid_search.predict(docs_test) + + # Print the classification report + print(metrics.classification_report(y_test, y_predicted, + target_names=dataset.target_names)) + + # Print and plot the confusion matrix + cm = metrics.confusion_matrix(y_test, y_predicted) + print(cm) + + # import matplotlib.pyplot as plt + # plt.matshow(cm) + # plt.show() + +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Website property. +""" +from pkg_resources import resource_stream # @UnresolvedImport +from rebulk.remodule import re + +from rebulk import Rebulk, Rule, RemoveMatch +from ..common import seps +from ..common.formatters import cleanup +from ..common.pattern import is_disabled +from ..common.validators import seps_surround +from ...reutils import build_or_pattern + + +def website(config): + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk(disabled=lambda context: is_disabled(context, 'website')) + rebulk = rebulk.regex_defaults(flags=re.IGNORECASE).string_defaults(ignore_case=True) + rebulk.defaults(name="website") + + with resource_stream('guessit', 'tlds-alpha-by-domain.txt') as tld_file: + tlds = [ + tld.strip().decode('utf-8') + for tld in tld_file.readlines() + if b'--' not in tld + ][1:] # All registered domain extension + + safe_tlds = config['safe_tlds'] # For sure a website extension + safe_subdomains = config['safe_subdomains'] # For sure a website subdomain + safe_prefix = config['safe_prefixes'] # Those words before a tlds are sure + website_prefixes = config['prefixes'] + + rebulk.regex(r'(?:[^a-z0-9]|^)((?:'+build_or_pattern(safe_subdomains) + + r'\.)+(?:[a-z-]+\.)+(?:'+build_or_pattern(tlds) + + r'))(?:[^a-z0-9]|$)', + children=True) + rebulk.regex(r'(?:[^a-z0-9]|^)((?:'+build_or_pattern(safe_subdomains) + + r'\.)*[a-z-]+\.(?:'+build_or_pattern(safe_tlds) + + r'))(?:[^a-z0-9]|$)', + safe_subdomains=safe_subdomains, safe_tlds=safe_tlds, children=True) + rebulk.regex(r'(?:[^a-z0-9]|^)((?:'+build_or_pattern(safe_subdomains) + + r'\.)*[a-z-]+\.(?:'+build_or_pattern(safe_prefix) + + r'\.)+(?:'+build_or_pattern(tlds) + + r'))(?:[^a-z0-9]|$)', + safe_subdomains=safe_subdomains, safe_prefix=safe_prefix, tlds=tlds, children=True) + + rebulk.string(*website_prefixes, + validator=seps_surround, private=True, tags=['website.prefix']) + + class PreferTitleOverWebsite(Rule): + """ + If found match is more likely a title, remove website. + """ + consequence = RemoveMatch + + @staticmethod + def valid_followers(match): + """ + Validator for next website matches + """ + return any(name in ['season', 'episode', 'year'] for name in match.names) + + def when(self, matches, context): + to_remove = [] + for website_match in matches.named('website'): + safe = False + for safe_start in safe_subdomains + safe_prefix: + if website_match.value.lower().startswith(safe_start): + safe = True + break + if not safe: + suffix = matches.next(website_match, PreferTitleOverWebsite.valid_followers, 0) + if suffix: + to_remove.append(website_match) + return to_remove + + rebulk.rules(PreferTitleOverWebsite, ValidateWebsitePrefix) + + return rebulk + + +class ValidateWebsitePrefix(Rule): + """ + Validate website prefixes + """ + priority = 64 + consequence = RemoveMatch + + def when(self, matches, context): + to_remove = [] + for prefix in matches.tagged('website.prefix'): + website_match = matches.next(prefix, predicate=lambda match: match.name == 'website', index=0) + if (not website_match or + matches.holes(prefix.end, website_match.start, + formatter=cleanup, seps=seps, predicate=lambda match: match.value)): + to_remove.append(prefix) + return to_remove + +class CharInfoWord(object): + def __init__(self, word): + b1, b2, b3, b4 = (word >> 24, + (word & 0xff0000) >> 16, + (word & 0xff00) >> 8, + word & 0xff) + + self.width_index = b1 + self.height_index = b2 >> 4 + self.depth_index = b2 & 0x0f + self.italic_index = (b3 & 0b11111100) >> 2 + self.tag = b3 & 0b11 + self.remainder = b4 + + def has_ligkern(self): + return self.tag == 1 + + def ligkern_start(self): + return self.remainder + + +class LigKernProgram(object): + def __init__(self, program): + self.program = program + + def execute(self, start, next_char): + curr_instruction = start + while True: + instruction = self.program[curr_instruction] + (skip, inst_next_char, op, remainder) = instruction + + if inst_next_char == next_char: + if op < 128: + # Don't worry about ligatures for now, we only need kerns + return None + else: + return 256 * (op - 128) + remainder + elif skip >= 128: + return None + else: + curr_instruction += 1 + skip + + +class TfmCharMetrics(object): + def __init__(self, width, height, depth, italic, kern_table): + self.width = width + self.height = height + self.depth = depth + self.italic_correction = italic + self.kern_table = kern_table + + +class TfmFile(object): + def __init__(self, start_char, end_char, char_info, width_table, + height_table, depth_table, italic_table, ligkern_table, + kern_table): + self.start_char = start_char + self.end_char = end_char + self.char_info = char_info + self.width_table = width_table + self.height_table = height_table + self.depth_table = depth_table + self.italic_table = italic_table + self.ligkern_program = LigKernProgram(ligkern_table) + self.kern_table = kern_table + + def get_char_metrics(self, char_num): + if char_num < self.start_char or char_num > self.end_char: + raise RuntimeError("Invalid character number") + + info = self.char_info[char_num + self.start_char] + + char_kern_table = {} + if info.has_ligkern(): + for char in range(self.start_char, self.end_char + 1): + kern = self.ligkern_program.execute(info.ligkern_start(), char) + if kern: + char_kern_table[char] = self.kern_table[kern] + + return TfmCharMetrics( + self.width_table[info.width_index], + self.height_table[info.height_index], + self.depth_table[info.depth_index], + self.italic_table[info.italic_index], + char_kern_table) + + +class TfmReader(object): + def __init__(self, f): + self.f = f + + def read_byte(self): + return ord(self.f.read(1)) + + def read_halfword(self): + b1 = self.read_byte() + b2 = self.read_byte() + return (b1 << 8) | b2 + + def read_word(self): + b1 = self.read_byte() + b2 = self.read_byte() + b3 = self.read_byte() + b4 = self.read_byte() + return (b1 << 24) | (b2 << 16) | (b3 << 8) | b4 + + def read_fixword(self): + word = self.read_word() + + neg = False + if word & 0x80000000: + neg = True + word = (-word & 0xffffffff) + + return (-1 if neg else 1) * word / float(1 << 20) + + def read_bcpl(self, length): + str_length = self.read_byte() + data = self.f.read(length - 1) + return data[:str_length] + + +def read_tfm_file(file_name): + with open(file_name, 'rb') as f: + reader = TfmReader(f) + + # file_size + reader.read_halfword() + header_size = reader.read_halfword() + + start_char = reader.read_halfword() + end_char = reader.read_halfword() + + width_table_size = reader.read_halfword() + height_table_size = reader.read_halfword() + depth_table_size = reader.read_halfword() + italic_table_size = reader.read_halfword() + + ligkern_table_size = reader.read_halfword() + kern_table_size = reader.read_halfword() + + # extensible_table_size + reader.read_halfword() + # parameter_table_size + reader.read_halfword() + + # checksum + reader.read_word() + # design_size + reader.read_fixword() + + if header_size > 2: + # coding_scheme + reader.read_bcpl(40) + + if header_size > 12: + # font_family + reader.read_bcpl(20) + + for i in range(header_size - 17): + reader.read_word() + + char_info = [] + for i in range(start_char, end_char + 1): + char_info.append(CharInfoWord(reader.read_word())) + + width_table = [] + for i in range(width_table_size): + width_table.append(reader.read_fixword()) + + height_table = [] + for i in range(height_table_size): + height_table.append(reader.read_fixword()) + + depth_table = [] + for i in range(depth_table_size): + depth_table.append(reader.read_fixword()) + + italic_table = [] + for i in range(italic_table_size): + italic_table.append(reader.read_fixword()) + + ligkern_table = [] + for i in range(ligkern_table_size): + skip = reader.read_byte() + next_char = reader.read_byte() + op = reader.read_byte() + remainder = reader.read_byte() + + ligkern_table.append((skip, next_char, op, remainder)) + + kern_table = [] + for i in range(kern_table_size): + kern_table.append(reader.read_fixword()) + + # There is more information, like the ligkern, kern, extensible, and + # param table, but we don't need these for now + + return TfmFile(start_char, end_char, char_info, width_table, + height_table, depth_table, italic_table, + ligkern_table, kern_table) + +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A class to store named variables and a scope operator to manage sharing.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections as collections_lib +import contextlib +import functools +import traceback + +import six +from six.moves import xrange # pylint: disable=redefined-builtin + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import init_ops +from tensorflow.python.ops import variables +from tensorflow.python.platform import tf_logging as logging + +__all__ = ["VariableScope", "get_variable_scope", + "get_variable", "get_local_variable", "variable_scope", + "variable_op_scope", "no_regularizer"] + + +class _PartitionInfo(object): + """Holds partition info used by initializer functions. + """ + + def __init__(self, full_shape, var_offset): + """Constructor. + + Args: + full_shape: Tuple or list of `int` indicating the full combined shape + of the partitioned variables. + var_offset: Tuple or list of `int` specifying offset of this partition + with respect to the full variable for each dimension. + + Raises: + TypeError: If `full_shape` or `var_offset` is not a sequence. + ValueError: If `full_shape` or `var_offset` differ in length. If + `var_offset` exceeds `full_shape` in any dimension. + """ + if not isinstance(full_shape, collections_lib.Sequence) or isinstance( + full_shape, six.string_types): + raise TypeError( + "`full_shape` must be a sequence (like tuple or list) instead of " + + type(full_shape).__name__) + + if not isinstance(var_offset, collections_lib.Sequence) or isinstance( + var_offset, six.string_types): + raise TypeError( + "`var_offset` must be a sequence (like tuple or list) instead of " + + type(var_offset).__name__) + + if len(var_offset) != len(full_shape): + raise ValueError( + "Expected equal length, but `var_offset` is of length {} while " + "full_shape is of length {}.".format( + len(var_offset), len(full_shape))) + + for i in xrange(len(full_shape)): + offset = var_offset[i] + shape = full_shape[i] + if offset < 0 or offset >= shape: + raise ValueError( + "Expected 0 <= offset < shape but found offset={}, shape={} for " + "var_offset={}, full_shape={}".format(offset, shape, var_offset, + full_shape)) + + self._full_shape = full_shape + self._var_offset = var_offset + + @property + def full_shape(self): + return self._full_shape + + @property + def var_offset(self): + return self._var_offset + + def single_offset(self, shape): + """Returns the offset when the variable is partitioned in at most one dim. + + Args: + shape: Tuple or list of `int` indicating the shape of one specific + variable partition. + + Returns: + `int` representing the offset in the dimension along which the variable is + partitioned. Returns 0 if the variable is not being partitioned. + + Raises: + ValueError: Depending on self.single_slice_dim(). + """ + + single_slice_dim = self.single_slice_dim(shape) + # If this variable is not being partitioned at all, single_slice_dim() could + # return None. + if single_slice_dim is None: + return 0 + return self.var_offset[single_slice_dim] + + def single_slice_dim(self, shape): + """Returns the slice dim when the variable is partitioned only in one dim. + + Args: + shape: Tuple or list of `int` indicating the shape of one specific + variable partition. + + Returns: + `int` representing the dimension that the variable is partitioned in, or + `None` if the variable doesn't seem to be partitioned at all. + + Raises: + TypeError: If `shape` is not a sequence. + ValueError: If `shape` is not the same length as `self.full_shape`. If + the variable is partitioned in more than one dimension. + """ + if not isinstance(shape, collections_lib.Sequence) or isinstance( + shape, six.string_types): + raise TypeError( + "`shape` must be a sequence (like tuple or list) instead of " + + type(shape).__name__) + + if len(shape) != len(self.full_shape): + raise ValueError( + "Expected equal length, but received shape={} of length {} while " + "self.full_shape={} is of length {}.".format(shape, len( + shape), self.full_shape, len(self.full_shape))) + + for i in xrange(len(shape)): + if self.var_offset[i] + shape[i] > self.full_shape[i]: + raise ValueError( + "With self.var_offset={}, a partition of shape={} would exceed " + "self.full_shape={} in dimension {}.".format( + self.var_offset, shape, self.full_shape, i)) + + slice_dim = None + for i in xrange(len(shape)): + if shape[i] == self.full_shape[i]: + continue + if slice_dim is not None: + raise ValueError( + "Cannot use single_slice_dim() with shape={} and " + "self.full_shape={} since slice dim could be either dimension {} " + "or {}.".format(shape, self.full_shape, i, slice_dim)) + slice_dim = i + + return slice_dim + + +class _VariableStore(object): + """Variable store that carries a number of named Variables. + + New variable names and new variables can be created; all stored + variables are initialized with the initializer passed to __init__. + + Attributes: + vars: a dictionary with string names (same as passed in GetVar) as keys + and the corresponding TensorFlow Variables as values. + """ + + def __init__(self): + """Create a variable store.""" + self._vars = {} # A dictionary of the stored TensorFlow variables. + self._partitioned_vars = {} # A dict of the stored PartitionedVariables. + self._variable_scopes_count = {} # Count re-used variable scopes. + + def open_variable_scope(self, scope_name): + if scope_name in self._variable_scopes_count: + self._variable_scopes_count[scope_name] += 1 + else: + self._variable_scopes_count[scope_name] = 1 + + def close_variable_subscopes(self, scope_name): + for k in self._variable_scopes_count: + if not scope_name or k.startswith(scope_name + "/"): + self._variable_scopes_count[k] = 0 + + def variable_scope_count(self, scope_name): + return self._variable_scopes_count.get(scope_name, 0) + + def get_variable(self, name, shape=None, dtype=dtypes.float32, + initializer=None, regularizer=None, reuse=None, + trainable=True, collections=None, caching_device=None, + partitioner=None, validate_shape=True, custom_getter=None): + """Gets an existing variable with these parameters or create a new one. + + If a variable with the given name is already stored, we return the stored + variable. Otherwise, we create a new one. + + Set `reuse` to `True` when you only want to reuse existing Variables. + Set `reuse` to `False` when you only want to create new Variables. + If `reuse` is `None` (the default), both new and existing variables are + returned. + + If initializer is `None` (the default), the default initializer passed in + the constructor is used. If that one is `None` too, we use a new + `uniform_unit_scaling_initializer`. If initializer is a Tensor, we use + it as a value and derive the shape from the initializer. + + If a partitioner is provided, a `PartitionedVariable` is returned. + Accessing this object as a `Tensor` returns the shards concatenated along + the partition axis. + + Some useful partitioners are available. See, e.g., + `variable_axis_size_partitioner` and `min_max_variable_partitioner`. + + Args: + name: The name of the new or existing variable. + shape: Shape of the new or existing variable. + dtype: Type of the new or existing variable (defaults to `DT_FLOAT`). + initializer: Initializer for the variable. + regularizer: A (Tensor -> Tensor or None) function; the result of + applying it on a newly created variable will be added to the collection + GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. + reuse: a Boolean or `None`. Controls reuse or creation of variables. + trainable: If `True` also add the variable to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). + collections: List of graph collections keys to add the `Variable` to. + Defaults to `[GraphKeys.VARIABLES]` (see `tf.Variable`). + caching_device: Optional device string or function describing where the + Variable should be cached for reading. Defaults to the Variable's + device. If not `None`, caches on another device. Typical use is to + cache on the device where the Ops using the `Variable` reside, to + deduplicate copying through `Switch` and other conditional statements. + partitioner: Optional callable that accepts a fully defined `TensorShape` + and dtype of the `Variable` to be created, and returns a list of + partitions for each axis (currently only one axis can be partitioned). + validate_shape: If False, allows the variable to be initialized with a + value of unknown shape. If True, the default, the shape of initial_value + must be known. + custom_getter: Callable that takes as a first argument the true getter, + and allows overwriting the internal get_variable method. + The signature of `custom_getter` should match that of this method, + but the most future-proof version will allow for changes: + `def custom_getter(getter, *args, **kwargs)`. Direct access to + all `get_variable` parameters is also allowed: + `def custom_getter(getter, name, *args, **kwargs)`. A simple identity + custom getter that simply creates variables with modified names is: + ```python + def custom_getter(getter, name, *args, **kwargs): + return getter(name + '_suffix', *args, **kwargs) + ``` + + Returns: + The created or existing `Variable` (or `PartitionedVariable`, if a + partitioner was used). + + Raises: + ValueError: when creating a new variable and shape is not declared, + when reusing a variable and specifying a conflicting shape, + or when violating reuse during variable creation. + """ + if custom_getter is not None and not callable(custom_getter): + raise ValueError( + "Passed a custom_getter which is not callable: %s" % custom_getter) + + # This is the main logic of get_variable. However, custom_getter + # may override this logic. So we save it as a callable and pass + # it to custom_getter. + # Note: the parameters of _true_getter, and their documentation, match + # *exactly* item-for-item with the docstring of this method. + def _true_getter(name, shape=None, dtype=dtypes.float32, # pylint: disable=missing-docstring + initializer=None, regularizer=None, reuse=None, + trainable=True, collections=None, caching_device=None, + partitioner=None, validate_shape=True): + # Partitioned variable case + if partitioner is not None: + if not callable(partitioner): + raise ValueError( + "Partitioner must be callable, but received: %s" % partitioner) + with ops.name_scope(None): + return self._get_partitioned_variable(name=name, + shape=shape, + dtype=dtype, + initializer=initializer, + regularizer=regularizer, + reuse=reuse, + trainable=trainable, + collections=collections, + caching_device=caching_device, + partitioner=partitioner, + validate_shape=validate_shape) + + # Special case for partitioned variable to allow reuse without having to + # specify partitioner. + if (reuse is True and partitioner is None + and name in self._partitioned_vars): + return self._get_partitioned_variable(name=name, + shape=shape, + dtype=dtype, + initializer=initializer, + regularizer=regularizer, + reuse=reuse, + trainable=trainable, + collections=collections, + caching_device=caching_device, + partitioner=None, + validate_shape=validate_shape) + + # Single variable case + if "%s/part_0" % name in self._vars: + raise ValueError( + "No partitioner was provided, but a partitioned version of the " + "variable was found: %s/part_0. Perhaps a variable of the same " + "name was already created with partitioning?" % name) + + return self._get_single_variable( + name=name, shape=shape, dtype=dtype, + initializer=initializer, regularizer=regularizer, reuse=reuse, + trainable=trainable, collections=collections, + caching_device=caching_device, validate_shape=validate_shape) + + if custom_getter is not None: + return custom_getter( + getter=_true_getter, name=name, shape=shape, dtype=dtype, + initializer=initializer, regularizer=regularizer, + reuse=reuse, trainable=trainable, collections=collections, + caching_device=caching_device, partitioner=partitioner, + validate_shape=validate_shape) + else: + return _true_getter( + name, shape=shape, dtype=dtype, + initializer=initializer, regularizer=regularizer, + reuse=reuse, trainable=trainable, collections=collections, + caching_device=caching_device, partitioner=partitioner, + validate_shape=validate_shape) + + def _get_partitioned_variable( + self, name, partitioner, shape=None, dtype=dtypes.float32, + initializer=None, regularizer=None, reuse=None, + trainable=True, collections=None, caching_device=None, + validate_shape=True): + """Gets or creates a sharded variable list with these parameters. + + The `partitioner` must be a callable that accepts a fully defined + `TensorShape` and returns a sequence of integers (the `partitions`). + These integers describe how to partition the given sharded `Variable` + along the given dimension. That is, `partitions[1] = 3` means split + the `Variable` into 3 shards along dimension 1. Currently, sharding along + only one axis is supported. + + If the list of variables with the given name (prefix) is already stored, + we return the stored variables. Otherwise, we create a new one. + + Set `reuse` to `True` when you only want to reuse existing Variables. + Set `reuse` to `False` when you only want to create new Variables. + If `reuse` is `None` (the default), both new and existing variables are + returned. + + If initializer is `None` (the default), the default initializer passed in + the constructor is used. If that one is `None` too, we use a new + `uniform_unit_scaling_initializer`. If initializer is a Tensor, we use + it as a value and derive the shape from the initializer. + + If the initializer is a callable, then it will be called for each + shard. Otherwise the initializer should match the shape of the entire + sharded Variable, and it will be sliced accordingly for each shard. + + Some useful partitioners are available. See, e.g., + `variable_axis_size_partitioner` and `min_max_variable_partitioner`. + + Args: + name: the name of the new or existing sharded variable. + partitioner: Optional callable that accepts a fully defined `TensorShape` + and `dtype` of the Variable to be created, and returns a list of + partitions for each axis (currently only one axis can be partitioned). + shape: shape of the new or existing sharded variable. + dtype: type of the new or existing sharded variable + (defaults to `DT_FLOAT`). + initializer: initializer for the sharded variable. + regularizer: a (Tensor -> Tensor or None) function; the result of + applying it on a newly created variable will be added to the collection + GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. + reuse: a Boolean or `None`. Controls reuse or creation of variables. + trainable: If `True` also add the variable to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). + collections: List of graph collections keys to add the Variable to. + Defaults to `[GraphKeys.VARIABLES]` (see `tf.Variable`). + caching_device: Optional device string or function describing where the + Variable should be cached for reading. Defaults to the Variable's + device. If not `None`, caches on another device. Typical use is to + cache on the device where the Ops using the Variable reside, to + deduplicate copying through `Switch` and other conditional statements. + validate_shape: If False, allows the variable to be initialized with a + value of unknown shape. If True, the default, the shape of initial_value + must be known. + + Returns: + A `PartitionedVariable` object. + + Raises: + ValueError: when creating a new variable and shape is not declared, + when reusing a variable and specifying a conflicting shape, + when violating reuse during variable creation, or if an existing + sharded variable exists for the given name but with different sharding. + """ + + initializing_from_value = initializer is not None and isinstance( + initializer, ops.Tensor) + reuse_without_partition = reuse is True and partitioner is None + + if name in self._vars: + raise ValueError( + "A partitioner was provided, but an unpartitioned version of the " + "variable was found: %s. Perhaps a variable of the same name was " + "already created without partitioning?" % name) + + shape = tensor_shape.as_shape(shape) + if initializing_from_value: + shape = shape.merge_with(initializer.get_shape()) + + if not reuse_without_partition: + if not shape.is_fully_defined(): + raise ValueError("Shape of a new partitioned variable (%s) must be " + "fully defined, but instead was %s." % (name, shape)) + + if shape.ndims < 1: + raise ValueError("A partitioned Variable must have rank at least 1, " + "shape: %s" % shape) + + partitions = partitioner(shape=shape, dtype=dtype) + + if not isinstance(partitions, collections_lib.Sequence): + raise ValueError("Partitioner must return a sequence, but saw: %s" + % partitions) + + if len(partitions) != shape.ndims: + raise ValueError( + "Partitioner returned a partition list that does not match the " + "Variable's rank: %s vs. %s" % (partitions, shape)) + + if any([p < 1 for p in partitions]): + raise ValueError( + "Partitioner returned zero partitions for some axes: %s" % + partitions) + + should_check = reuse is not None + + if name in self._partitioned_vars: + if should_check and not reuse: + raise ValueError( + "Partitioned variable with name %s already exists. Did you mean to " + "set reuse=True in VarScope?" + % name) + + existing_var = self._partitioned_vars[name] + if not shape.is_compatible_with(existing_var.get_shape()): + raise ValueError( + "Trying to reuse partitioned variable %s, but specified shape %s " + "and found shape %s." + % (name, shape, existing_var.get_shape())) + if not dtype.is_compatible_with(existing_var.dtype): + raise ValueError( + "Trying to reuse partitioned variable %s, but specified dtype %s " + "and found dtype %s." + % (name, dtype.name, existing_var.dtype.name)) + + # pylint: disable=protected-access + if (not reuse_without_partition and + existing_var._get_partitions() != partitions): + raise ValueError( + "Trying to reuse partitioned variable %s, but specified partitions " + "%s and found partitions %s." % + (name, partitions, existing_var._get_partitions())) + # pylint: enable=protected-access + + return existing_var + + if should_check and reuse: + raise ValueError("PartitionedVariable %s does not exist, or was not " + "created with tf.get_variable(). Did you mean to set " + "reuse=None in VarScope?" % name) + + slice_dim, slice_shape = _compute_slice_dim_and_shape( + shape.as_list(), partitions) + + vs = [] + num_slices = partitions[slice_dim] + num_slices_with_excess = shape[slice_dim].value % num_slices + + slice_offset = [0] * shape.ndims + + if "%s/part_0" % name in self._vars: + if "%s/part_%d" % (name, num_slices - 1) not in self._vars: + raise ValueError( + "Partitioner returned a different partitioning than what was " + "already found. Partitioner returned %d shards, and shard " + "%s/part_0 was found, but %s/part_%d was not." + % (num_slices, name, name, num_slices - 1)) + if "%s/part_%d" % (name, num_slices) in self._vars: + raise ValueError( + "Partitioner returned a different partitioning than what was " + "already found. Partitioner returned %d shards, and shard " + "%s/part_0 was found, but so was the extra shard %s/part_%d." + % (num_slices, name, name, num_slices)) + + for i in xrange(num_slices): + var_shape = slice_shape[:] + var_offset = slice_offset[:] + partition_info = _PartitionInfo( + full_shape=shape.as_list(), var_offset=var_offset) + if i < num_slices_with_excess: + var_shape[slice_dim] += 1 + slice_offset[slice_dim] += var_shape[slice_dim] + + var_full_name = "%s/part_%d" % (name, i) + with ops.name_scope(var_full_name + "/PartitionedInitializer"): + if initializer is None: + init = init_ops.uniform_unit_scaling_initializer() + init_shape = var_shape + elif callable(initializer): + init = initializer + init_shape = var_shape + elif isinstance(initializer, ops.Tensor): + init = array_ops.slice(initializer, var_offset, var_shape) + # Use the dtype of the given tensor. + dtype = init.dtype.base_dtype + init_shape = None + else: + init = ops.convert_to_tensor(initializer, dtype=dtype) + init = array_ops.slice(init, var_offset, var_shape) + init_shape = None + + with ops.name_scope(None): + var = self._get_single_variable( + name=var_full_name, + shape=init_shape, + dtype=dtype, + initializer=init, + partition_info=partition_info, + regularizer=regularizer, + reuse=reuse, + trainable=trainable, + collections=collections, + caching_device=caching_device, + validate_shape=validate_shape) + + # pylint: disable=protected-access + var._set_save_slice_info(variables.Variable.SaveSliceInfo( + name, shape.as_list(), var_offset, var_shape)) + vs.append(var) + # pylint: enable=protected-access + + # pylint: disable=protected-access + partitioned_var = variables.PartitionedVariable(name=name, + shape=shape, + dtype=dtype, + variable_list=vs, + partitions=partitions) + # pylint: enable=protected-access + + self._partitioned_vars[name] = partitioned_var + return partitioned_var + + def _get_single_variable(self, + name, + shape=None, + dtype=dtypes.float32, + initializer=None, + regularizer=None, + partition_info=None, + reuse=None, + trainable=True, + collections=None, + caching_device=None, + validate_shape=True): + """Get or create a single Variable (e.g. a shard or entire variable). + + See the documentation of get_variable above (ignore partitioning components) + for details. + + Args: + name: see get_variable. + shape: see get_variable. + dtype: see get_variable. + initializer: see get_variable. + regularizer: see get_variable. + partition_info: _PartitionInfo object. + reuse: see get_variable. + trainable: see get_variable. + collections: see get_variable. + caching_device: see get_variable. + validate_shape: see get_variable. + + Returns: + A Variable. See documentation of get_variable above. + + Raises: + ValueError: See documentation of get_variable above. + """ + + # Set to true if initializer is a constant. + initializing_from_value = False + if initializer is not None and not callable(initializer): + initializing_from_value = True + if shape is not None and initializing_from_value: + raise ValueError("If initializer is a constant, do not specify shape.") + + should_check = reuse is not None + dtype = dtypes.as_dtype(dtype) + shape = tensor_shape.as_shape(shape) + + if name in self._vars: + # Here we handle the case when returning an existing variable. + if should_check and not reuse: + tb = self._vars[name].op.traceback[::-1] + # Throw away internal tf entries and only take a few lines. + tb = [x for x in tb if "tensorflow/python" not in x[0]][:3] + raise ValueError("Variable %s already exists, disallowed." + " Did you mean to set reuse=True in VarScope? " + "Originally defined at:\n\n%s" % ( + name, "".join(traceback.format_list(tb)))) + found_var = self._vars[name] + if not shape.is_compatible_with(found_var.get_shape()): + raise ValueError("Trying to share variable %s, but specified shape %s" + " and found shape %s." % (name, shape, + found_var.get_shape())) + if not dtype.is_compatible_with(found_var.dtype): + dtype_str = dtype.name + found_type_str = found_var.dtype.name + raise ValueError("Trying to share variable %s, but specified dtype %s" + " and found dtype %s." % (name, dtype_str, + found_type_str)) + return found_var + + # The code below handles only the case of creating a new variable. + if should_check and reuse: + raise ValueError("Variable %s does not exist, or was not created with " + "tf.get_variable(). Did you mean to set reuse=None in " + "VarScope?" % name) + if not shape.is_fully_defined() and not initializing_from_value: + raise ValueError("Shape of a new variable (%s) must be fully defined, " + "but instead was %s." % (name, shape)) + + # Create the tensor to initialize the variable. + if initializer is None: + initializer = init_ops.uniform_unit_scaling_initializer() + # Clear control dependencies while creating the initializer. + with ops.control_dependencies(None): + if initializing_from_value: + init_val = initializer + variable_dtype = None + else: + init_val = lambda: initializer( + shape.as_list(), dtype=dtype, partition_info=partition_info) + variable_dtype = dtype.base_dtype + + # Create the variable. + v = variables.Variable(initial_value=init_val, + name=name, + trainable=trainable, + collections=collections, + caching_device=caching_device, + dtype=variable_dtype, + validate_shape=validate_shape) + self._vars[name] = v + logging.vlog(1, "Created variable %s with shape %s and init %s", v.name, + format(shape), initializer) + + # Run the regularizer if requested and save the resulting loss. + if regularizer: + with ops.colocate_with(v.op): + with ops.name_scope(name + "/Regularizer/"): + loss = regularizer(v) + if loss is not None: + logging.vlog(1, "Applied regularizer to %s and added the result %s " + "to REGULARIZATION_LOSSES.", v.name, loss.name) + ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss) + + return v + + +# To stop regularization, use this regularizer +def no_regularizer(_): + """Use this function to prevent regularization of variables.""" + return None + + +class VariableScope(object): + """Variable scope object to carry defaults to provide to get_variable. + + Many of the arguments we need for get_variable in a variable store are most + easily handled with a context. This object is used for the defaults. + + Attributes: + name: name of the current scope, used as prefix in get_variable. + initializer: default initializer passed to get_variable. + regularizer: default regularizer passed to get_variable. + reuse: Boolean or None, setting the reuse in get_variable. + caching_device: string, callable, or None: the caching device passed to + get_variable. + partitioner: callable or `None`: the partitioner passed to `get_variable`. + custom_getter: default custom getter passed to get_variable. + name_scope: The name passed to `tf.name_scope`. + dtype: default type passed to get_variable (defaults to DT_FLOAT). + """ + + def __init__(self, + reuse, + name="", + initializer=None, + regularizer=None, + caching_device=None, + partitioner=None, + custom_getter=None, + name_scope="", + dtype=dtypes.float32): + """Creates a new VariableScope with the given properties.""" + self._name = name + self._initializer = initializer + self._regularizer = regularizer + self._reuse = reuse + self._caching_device = caching_device + self._partitioner = partitioner + self._custom_getter = custom_getter + self._name_scope = name_scope + self._dtype = dtype + + @property + def name(self): + return self._name + + @property + def original_name_scope(self): + return self._name_scope + + @property + def reuse(self): + return self._reuse + + @property + def initializer(self): + return self._initializer + + @property + def dtype(self): + return self._dtype + + @property + def regularizer(self): + return self._regularizer + + @property + def caching_device(self): + return self._caching_device + + @property + def partitioner(self): + return self._partitioner + + @property + def custom_getter(self): + return self._custom_getter + + def reuse_variables(self): + """Reuse variables in this scope.""" + self._reuse = True + + def set_initializer(self, initializer): + """Set initializer for this scope.""" + self._initializer = initializer + + def set_dtype(self, dtype): + """Set data type for this scope.""" + self._dtype = dtype + + def set_regularizer(self, regularizer): + """Set regularizer for this scope.""" + self._regularizer = regularizer + + def set_caching_device(self, caching_device): + """Set caching_device for this scope.""" + self._caching_device = caching_device + + def set_partitioner(self, partitioner): + """Set partitioner for this scope.""" + self._partitioner = partitioner + + def set_custom_getter(self, custom_getter): + """Set custom getter for this scope.""" + self._custom_getter = custom_getter + + def get_variable(self, + var_store, + name, + shape=None, + dtype=None, + initializer=None, + regularizer=None, + trainable=True, + collections=None, + caching_device=None, + partitioner=None, + validate_shape=True, + custom_getter=None): + """Gets an existing variable with this name or create a new one.""" + if regularizer is None: + regularizer = self._regularizer + if caching_device is None: + caching_device = self._caching_device + if partitioner is None: + partitioner = self._partitioner + if custom_getter is None: + custom_getter = self._custom_getter + + full_name = self.name + "/" + name if self.name else name + # Variable names only depend on variable_scope (full_name here), + # not name_scope, so we reset it below for the time of variable creation. + with ops.name_scope(None): + # Check that `initializer` dtype and `dtype` are consistent before + # replacing them with defaults. + if (dtype is not None and initializer is not None and + not callable(initializer)): + init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype + if init_dtype != dtype: + raise ValueError("Initializer type '%s' and explicit dtype '%s' " + "don't match." % (init_dtype, dtype)) + if initializer is None: + initializer = self._initializer + if dtype is None: + dtype = self._dtype + + return var_store.get_variable( + full_name, shape=shape, dtype=dtype, initializer=initializer, + regularizer=regularizer, reuse=self.reuse, trainable=trainable, + collections=collections, caching_device=caching_device, + partitioner=partitioner, validate_shape=validate_shape, + custom_getter=custom_getter) + + def _get_partitioned_variable(self, + var_store, + name, + shape=None, + dtype=None, + initializer=None, + regularizer=None, + trainable=True, + collections=None, + caching_device=None, + partitioner=None, + validate_shape=True): + """Gets an existing variable with this name or create a new one.""" + if initializer is None: + initializer = self._initializer + if regularizer is None: + regularizer = self._regularizer + if caching_device is None: + caching_device = self._caching_device + if partitioner is None: + partitioner = self._partitioner + if dtype is None: + dtype = self._dtype + + if self._custom_getter is not None: + raise ValueError( + "Private access to _get_partitioned_variable is not allowed when " + "a custom getter is set. Current custom getter: %s. " + "It is likely that you're using create_partitioned_variables. " + "If so, consider instead using get_variable with a non-empty " + "partitioner parameter instead." % self._custom_getter) + + if partitioner is None: + raise ValueError("No partitioner was specified") + + # This allows the variable scope name to be used as the variable name if + # this function is invoked with an empty name arg, for backward + # compatibility with create_partitioned_variables(). + full_name_list = [] + if self.name: + full_name_list.append(self.name) + if name: + full_name_list.append(name) + full_name = "/".join(full_name_list) + + # Variable names only depend on variable_scope (full_name here), + # not name_scope, so we reset it below for the time of variable creation. + with ops.name_scope(None): + # pylint: disable=protected-access + return var_store._get_partitioned_variable( + full_name, shape=shape, dtype=dtype, initializer=initializer, + regularizer=regularizer, reuse=self.reuse, trainable=trainable, + collections=collections, caching_device=caching_device, + partitioner=partitioner, validate_shape=validate_shape) + # pylint: enable=protected-access + + +_VARSTORE_KEY = ("__variable_store",) +_VARSCOPE_KEY = ("__varscope",) + + +def get_variable_scope(): + """Returns the current variable scope.""" + scope = ops.get_collection(_VARSCOPE_KEY) + if scope: # This collection has at most 1 element, the default scope at [0]. + return scope[0] + scope = VariableScope(False) + ops.add_to_collection(_VARSCOPE_KEY, scope) + return scope + + +def _get_default_variable_store(): + store = ops.get_collection(_VARSTORE_KEY) + if store: + return store[0] + store = _VariableStore() + ops.add_to_collection(_VARSTORE_KEY, store) + return store + + +def get_variable(name, + shape=None, + dtype=None, + initializer=None, + regularizer=None, + trainable=True, + collections=None, + caching_device=None, + partitioner=None, + validate_shape=True, + custom_getter=None): + """Gets an existing variable with these parameters or create a new one. + + This function prefixes the name with the current variable scope + and performs reuse checks. See the + [Variable Scope How To](../../how_tos/variable_scope/index.md) + for an extensive description of how reusing works. Here is a basic example: + + ```python + with tf.variable_scope("foo"): + v = tf.get_variable("v", [1]) # v.name == "foo/v:0" + w = tf.get_variable("w", [1]) # w.name == "foo/w:0" + with tf.variable_scope("foo", reuse=True) + v1 = tf.get_variable("v") # The same as v above. + ``` + + If initializer is `None` (the default), the default initializer passed in + the variable scope will be used. If that one is `None` too, a + `uniform_unit_scaling_initializer` will be used. The initializer can also be + a Tensor, in which case the variable is initialized to this value and shape. + + Similarly, if the regularizer is `None` (the default), the default regularizer + passed in the variable scope will be used (if that is `None` too, + then by default no regularization is performed). + + If a partitioner is provided, a `PartitionedVariable` is returned. + Accessing this object as a `Tensor` returns the shards concatenated along + the partition axis. + + Some useful partitioners are available. See, e.g., + `variable_axis_size_partitioner` and `min_max_variable_partitioner`. + + Args: + name: The name of the new or existing variable. + shape: Shape of the new or existing variable. + dtype: Type of the new or existing variable (defaults to `DT_FLOAT`). + initializer: Initializer for the variable if one is created. + regularizer: A (Tensor -> Tensor or None) function; the result of + applying it on a newly created variable will be added to the collection + GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. + trainable: If `True` also add the variable to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). + collections: List of graph collections keys to add the Variable to. + Defaults to `[GraphKeys.VARIABLES]` (see `tf.Variable`). + caching_device: Optional device string or function describing where the + Variable should be cached for reading. Defaults to the Variable's + device. If not `None`, caches on another device. Typical use is to + cache on the device where the Ops using the Variable reside, to + deduplicate copying through `Switch` and other conditional statements. + partitioner: Optional callable that accepts a fully defined `TensorShape` + and `dtype` of the Variable to be created, and returns a list of + partitions for each axis (currently only one axis can be partitioned). + validate_shape: If False, allows the variable to be initialized with a + value of unknown shape. If True, the default, the shape of initial_value + must be known. + custom_getter: Callable that takes as a first argument the true getter, and + allows overwriting the internal get_variable method. + The signature of `custom_getter` should match that of this method, + but the most future-proof version will allow for changes: + `def custom_getter(getter, *args, **kwargs)`. Direct access to + all `get_variable` parameters is also allowed: + `def custom_getter(getter, name, *args, **kwargs)`. A simple identity + custom getter that simply creates variables with modified names is: + ```python + def custom_getter(getter, name, *args, **kwargs): + return getter(name + '_suffix', *args, **kwargs) + ``` + + Returns: + The created or existing `Variable` (or `PartitionedVariable`, if a + partitioner was used). + + Raises: + ValueError: when creating a new variable and shape is not declared, + when violating reuse during variable creation, or when `initializer` dtype + and `dtype` don't match. Reuse is set inside `variable_scope`. + """ + return get_variable_scope().get_variable( + _get_default_variable_store(), name, shape=shape, dtype=dtype, + initializer=initializer, regularizer=regularizer, trainable=trainable, + collections=collections, caching_device=caching_device, + partitioner=partitioner, validate_shape=validate_shape, + custom_getter=custom_getter) + + +@functools.wraps(get_variable) +def get_local_variable(*args, **kwargs): + kwargs["trainable"] = False + if "collections" in kwargs: + kwargs["collections"] += [ops.GraphKeys.LOCAL_VARIABLES] + else: + kwargs["collections"] = [ops.GraphKeys.LOCAL_VARIABLES] + get_local_variable.__doc__ = ( + "Gets an existing local variable or creates a new one.\n\n" + + get_local_variable.__doc__) + return get_variable(*args, **kwargs) + + +def _get_partitioned_variable(name, + shape=None, + dtype=None, + initializer=None, + regularizer=None, + trainable=True, + collections=None, + caching_device=None, + partitioner=None, + validate_shape=True): + """Gets or creates a sharded variable list with these parameters. + + The `partitioner` must be a callable that accepts a fully defined + `TensorShape` and returns a sequence of integers (the `partitions`). + These integers describe how to partition the given sharded `Variable` + along the given dimension. That is, `partitions[1] = 3` means split + the `Variable` into 3 shards along dimension 1. Currently, sharding along + only one axis is supported. + + If the list of variables with the given name (prefix) is already stored, + we return the stored variables. Otherwise, we create a new one. + + Set `reuse` to `True` when you only want to reuse existing Variables. + Set `reuse` to `False` when you only want to create new Variables. + If `reuse` is `None` (the default), both new and existing variables are + returned. + + If initializer is `None` (the default), the default initializer passed in + the constructor is used. If that one is `None` too, we use a new + `uniform_unit_scaling_initializer`. If initializer is a Tensor, we use + it as a value and derive the shape from the initializer. + + If the initializer is a callable, then it will be called for each + shard. Otherwise the initializer should match the shape of the entire + sharded Variable, and it will be sliced accordingly for each shard. + + Some useful partitioners are available. See, e.g., + `variable_axis_size_partitioner` and `min_max_variable_partitioner`. + + Args: + name: The name of the new or existing variable. + shape: Shape of the new or existing variable. + dtype: Type of the new or existing variable (defaults to `DT_FLOAT`). + initializer: Initializer for the variable if one is created. + regularizer: A (Tensor -> Tensor or None) function; the result of + applying it on a newly created variable will be added to the collection + GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. + trainable: If `True` also add the variable to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). + collections: List of graph collections keys to add the Variable to. + Defaults to `[GraphKeys.VARIABLES]` (see `tf.Variable`). + caching_device: Optional device string or function describing where the + Variable should be cached for reading. Defaults to the Variable's + device. If not `None`, caches on another device. Typical use is to + cache on the device where the Ops using the Variable reside, to + deduplicate copying through `Switch` and other conditional statements. + partitioner: Optional callable that accepts a fully defined `TensorShape` + and `dtype` of the Variable to be created, and returns a list of + partitions for each axis (currently only one axis can be partitioned). + validate_shape: If False, allows the variable to be initialized with a + value of unknown shape. If True, the default, the shape of initial_value + must be known. + + Returns: + A tuple `(shards, partitions)` where `shards` is the list of `Variable` + shards and `partitions` is the output of the partitioner on the input + shape. + + Raises: + ValueError: when creating a new variable and shape is not declared, + or when violating reuse during variable creation. Reuse is set inside + `variable_scope`. + """ + # pylint: disable=protected-access + scope = get_variable_scope() + if scope.custom_getter is not None: + raise ValueError( + "Private access to _get_partitioned_variable is not allowed when " + "a custom getter is set. Current custom getter: %s. " + "It is likely that you're using create_partitioned_variables. " + "If so, consider instead using get_variable with a non-empty " + "partitioner parameter instead." % scope.custom_getter) + return scope._get_partitioned_variable( + _get_default_variable_store(), name, shape=shape, dtype=dtype, + initializer=initializer, regularizer=regularizer, trainable=trainable, + collections=collections, caching_device=caching_device, + partitioner=partitioner, validate_shape=validate_shape) + # pylint: enable=protected-access + + +@contextlib.contextmanager +def _pure_variable_scope(name_or_scope, + reuse=None, + initializer=None, + regularizer=None, + caching_device=None, + partitioner=None, + custom_getter=None, + old_name_scope=None, + dtype=dtypes.float32): + """Creates a context for the variable_scope, see `variable_scope` for docs. + + Note: this does not create a name scope. + + Args: + name_or_scope: `string` or `VariableScope`: the scope to open. + reuse: `True` or `None`; if `True`, we go into reuse mode for this scope as + well as all sub-scopes; if `None`, we just inherit the parent scope reuse. + initializer: default initializer for variables within this scope. + regularizer: default regularizer for variables within this scope. + caching_device: default caching device for variables within this scope. + partitioner: default partitioner for variables within this scope. + custom_getter: default custom getter for variables within this scope. + old_name_scope: the original name scope when re-entering a variable scope. + dtype: type of the variables within this scope (defaults to `DT_FLOAT`). + + Yields: + A scope that can be to captured and reused. + + Raises: + ValueError: when trying to reuse within a create scope, or create within + a reuse scope, or if reuse is not `None` or `True`. + TypeError: when the types of some arguments are not appropriate. + + """ + get_variable_scope() # Ensure that a default exists, then get a pointer. + # Get the reference to the collection as we want to modify it in place. + default_varscope = ops.get_collection_ref(_VARSCOPE_KEY) + old = default_varscope[0] + var_store = _get_default_variable_store() + if isinstance(name_or_scope, VariableScope): + new_name = name_or_scope.name + else: + new_name = old.name + "/" + name_or_scope if old.name else name_or_scope + try: + var_store.open_variable_scope(new_name) + if isinstance(name_or_scope, VariableScope): + name_scope = name_or_scope._name_scope # pylint: disable=protected-access + # Handler for the case when we jump to a shared scope. + # We create a new VariableScope (default_varscope[0]) that contains + # a copy of the provided shared scope, possibly with changed reuse + # and initializer, if the user requested this. + default_varscope[0] = VariableScope( + name_or_scope.reuse if reuse is None else reuse, + name=new_name, + initializer=name_or_scope.initializer, + regularizer=name_or_scope.regularizer, + caching_device=name_or_scope.caching_device, + partitioner=name_or_scope.partitioner, + dtype=name_or_scope.dtype, + custom_getter=name_or_scope.custom_getter, + name_scope=name_scope) + if initializer is not None: + default_varscope[0].set_initializer(initializer) + if regularizer is not None: + default_varscope[0].set_regularizer(regularizer) + if caching_device is not None: + default_varscope[0].set_caching_device(caching_device) + if partitioner is not None: + default_varscope[0].set_partitioner(partitioner) + if custom_getter is not None: + default_varscope[0].set_custom_getter(custom_getter) + if dtype is not None: + default_varscope[0].set_dtype(dtype) + yield default_varscope[0] + else: + # Handler for the case when we just prolong current variable scope. + # VariableScope with name extended by the provided one, and inherited + # reuse and initializer (except if the user provided values to set). + reuse = reuse or old.reuse # Re-using is inherited by sub-scopes. + default_varscope[0] = VariableScope( + reuse, + name=new_name, + initializer=old.initializer, + regularizer=old.regularizer, + caching_device=old.caching_device, + partitioner=old.partitioner, + dtype=old.dtype, + custom_getter=old.custom_getter, + name_scope=old_name_scope or name_or_scope) + if initializer is not None: + default_varscope[0].set_initializer(initializer) + if regularizer is not None: + default_varscope[0].set_regularizer(regularizer) + if caching_device is not None: + default_varscope[0].set_caching_device(caching_device) + if partitioner is not None: + default_varscope[0].set_partitioner(partitioner) + if custom_getter is not None: + default_varscope[0].set_custom_getter(custom_getter) + if dtype is not None: + default_varscope[0].set_dtype(dtype) + yield default_varscope[0] + finally: + var_store.close_variable_subscopes(new_name) + default_varscope[0] = old + + +def _get_unique_variable_scope(prefix): + """Get a name with the given prefix unique in the current variable scope.""" + var_store = _get_default_variable_store() + current_scope = get_variable_scope() + name = current_scope.name + "/" + prefix if current_scope.name else prefix + if var_store.variable_scope_count(name) == 0: + return prefix + idx = 1 + while var_store.variable_scope_count(name + ("_%d" % idx)) > 0: + idx += 1 + return prefix + ("_%d" % idx) + + +# pylint: disable=g-doc-return-or-yield +@contextlib.contextmanager +def variable_scope(name_or_scope, + default_name=None, + values=None, + initializer=None, + regularizer=None, + caching_device=None, + partitioner=None, + custom_getter=None, + reuse=None, + dtype=None): + """Returns a context manager for defining ops that creates variables (layers). + + This context manager validates that the (optional) `values` are from + the same graph, ensures that graph is the default graph, and pushes a + name scope and a variable scope. + + If `name_or_scope` is not None, it is used as is. If `scope` is None, then + `default_name` is used. In that case, if the same name has been previously + used in the same scope, it will made unique be appending `_N` to it. + + Variable scope allows to create new variables and to share already created + ones while providing checks to not create or share by accident. For details, + see the [Variable Scope How To](../../how_tos/variable_scope/index.md), + here we present only a few basic examples. + + Simple example of how to create a new variable: + + ```python + with tf.variable_scope("foo"): + with tf.variable_scope("bar"): + v = tf.get_variable("v", [1]) + assert v.name == "foo/bar/v:0" + ``` + + Basic example of sharing a variable: + + ```python + with tf.variable_scope("foo"): + v = tf.get_variable("v", [1]) + with tf.variable_scope("foo", reuse=True): + v1 = tf.get_variable("v", [1]) + assert v1 == v + ``` + + Sharing a variable by capturing a scope and setting reuse: + + ```python + with tf.variable_scope("foo") as scope: + v = tf.get_variable("v", [1]) + scope.reuse_variables() + v1 = tf.get_variable("v", [1]) + assert v1 == v + ``` + + To prevent accidental sharing of variables, we raise an exception when + getting an existing variable in a non-reusing scope. + + ```python + with tf.variable_scope("foo"): + v = tf.get_variable("v", [1]) + v1 = tf.get_variable("v", [1]) + # Raises ValueError("... v already exists ..."). + ``` + + Similarly, we raise an exception when trying to get a variable that + does not exist in reuse mode. + + ```python + with tf.variable_scope("foo", reuse=True): + v = tf.get_variable("v", [1]) + # Raises ValueError("... v does not exists ..."). + ``` + + Note that the `reuse` flag is inherited: if we open a reusing scope, + then all its sub-scopes become reusing as well. + + Args: + name_or_scope: `string` or `VariableScope`: the scope to open. + default_name: The default name to use if the `name_or_scope` argument is + `None`, this name will be uniquified. If name_or_scope is provided it + won't be used and therefore it is not required and can be None. + values: The list of `Tensor` arguments that are passed to the op function. + initializer: default initializer for variables within this scope. + regularizer: default regularizer for variables within this scope. + caching_device: default caching device for variables within this scope. + partitioner: default partitioner for variables within this scope. + custom_getter: default custom getter for variables within this scope. + reuse: `True` or `None`; if `True`, we go into reuse mode for this scope as + well as all sub-scopes; if `None`, we just inherit the parent scope reuse. + dtype: type of variables created in this scope (defaults to the type + in the passed scope, or inherited from parent scope). + + Returns: + A scope that can be to captured and reused. + + Raises: + ValueError: when trying to reuse within a create scope, or create within + a reuse scope, or if reuse is not `None` or `True`. + TypeError: when the types of some arguments are not appropriate. + """ + if default_name is None and name_or_scope is None: + raise TypeError("If default_name is None then name_or_scope is required") + if values is None: + values = [] + g = ops._get_graph_from_inputs(values) # pylint: disable=protected-access + with g.as_default(): + if name_or_scope is not None: + if not isinstance(name_or_scope, (VariableScope,) + six.string_types): + raise TypeError("VariableScope: name_or_scope must be a string or " + "VariableScope.") + if isinstance(name_or_scope, six.string_types): + name_scope = name_or_scope + else: + name_scope = name_or_scope.name.split("/")[-1] + if name_scope: + with ops.name_scope(name_scope) as cur_name_scope: + if isinstance(name_or_scope, six.string_types): + old_name_scope = cur_name_scope + else: + old_name_scope = name_or_scope.original_name_scope + with _pure_variable_scope( + name_or_scope, + reuse=reuse, + initializer=initializer, + regularizer=regularizer, + caching_device=caching_device, + partitioner=partitioner, + custom_getter=custom_getter, + old_name_scope=old_name_scope, + dtype=dtype) as vs: + yield vs + else: + # This can only happen if someone is entering the root variable scope. + with _pure_variable_scope( + name_or_scope, + reuse=reuse, + initializer=initializer, + regularizer=regularizer, + caching_device=caching_device, + partitioner=partitioner, + custom_getter=custom_getter, + dtype=dtype) as vs: + yield vs + else: # Here name_or_scope is None. Using default name, but made unique. + if reuse: + raise ValueError("reuse=True cannot be used without a name_or_scope") + with ops.name_scope(default_name) as scope: + unique_default_name = _get_unique_variable_scope(default_name) + with _pure_variable_scope( + unique_default_name, + initializer=initializer, + regularizer=regularizer, + caching_device=caching_device, + partitioner=partitioner, + custom_getter=custom_getter, + old_name_scope=scope, + dtype=dtype) as vs: + yield vs + + +# pylint: disable=g-doc-return-or-yield +@contextlib.contextmanager +def variable_op_scope(values, + name_or_scope, + default_name=None, + initializer=None, + regularizer=None, + caching_device=None, + partitioner=None, + custom_getter=None, + reuse=None, + dtype=None): + """Deprecated: context manager for defining an op that creates variables.""" + logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated," + " use tf.variable_scope(name, default_name, values)") + with variable_scope(name_or_scope, + default_name=default_name, + values=values, + initializer=initializer, + regularizer=regularizer, + caching_device=caching_device, + partitioner=partitioner, + custom_getter=custom_getter, + reuse=reuse, + dtype=dtype) as scope: + yield scope + + +def _compute_slice_dim_and_shape(full_shape, slicing): + """Computes which dimension is being sliced and the typical slice shape.""" + + slice_shape = [0] * len(full_shape) + slice_dim = None + for dim, num_slices in enumerate(slicing): + dim_size = full_shape[dim] + if num_slices <= 0 or dim_size < num_slices: + raise ValueError("Cannot create %d slices for size %d. shape: %s, " + "slicing: %s" % + (num_slices, full_shape[dim], full_shape, slicing)) + if num_slices == 1: + # Not slicing in this dimension. + slice_shape[dim] = dim_size + elif slice_dim is not None: + # We only support slicing along one of the dimensions. + raise ValueError("Can only slice a variable along one dimension: " + "shape: %s, slicing: %s" % (full_shape, slicing)) + else: + # Note: We will add any extras onto the last slice, later. + slice_dim = dim + slice_shape[dim] = dim_size // num_slices + + # Degenerate case: If "slicing" was all ones, pretend we are slicing along + # the first dimension. + if slice_dim is None: + slice_dim = 0 + return slice_dim, slice_shape + +# -*- coding: utf-8 -*- +############################################################################## +# +# OpenERP, Open Source Management Solution +# Copyright (C) 2004-2010 Tiny SPRL (). +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# +############################################################################## + +from osv import fields, osv + +class res_partner(osv.osv): + _inherit = 'res.partner' + _columns = { + 'property_delivery_carrier': fields.property( + 'delivery.carrier', + type='many2one', + relation='delivery.carrier', + string="Delivery Method", + view_load=True, + help="This delivery method will be used when invoicing from picking."), + } +res_partner() + + + +# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: + + +import nose + +from datetime import datetime +from numpy.random import randn +import numpy as np + +from pandas.core.api import Series, DataFrame, date_range +import pandas.util.testing as tm +import pandas.stats.math as pmath +from pandas import ols + +N, K = 100, 10 + +_have_statsmodels = True +try: + import statsmodels.api as sm +except ImportError: + try: + import scikits.statsmodels.api as sm # noqa + except ImportError: + _have_statsmodels = False + + +class TestMath(tm.TestCase): + + _nan_locs = np.arange(20, 40) + _inf_locs = np.array([]) + + def setUp(self): + arr = randn(N) + arr[self._nan_locs] = np.NaN + + self.arr = arr + self.rng = date_range(datetime(2009, 1, 1), periods=N) + + self.series = Series(arr.copy(), index=self.rng) + + self.frame = DataFrame(randn(N, K), index=self.rng, + columns=np.arange(K)) + + def test_rank_1d(self): + self.assertEqual(1, pmath.rank(self.series)) + self.assertEqual(0, pmath.rank(Series(0, self.series.index))) + + def test_solve_rect(self): + if not _have_statsmodels: + raise nose.SkipTest("no statsmodels") + + b = Series(np.random.randn(N), self.frame.index) + result = pmath.solve(self.frame, b) + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + expected = ols(y=b, x=self.frame, intercept=False).beta + self.assertTrue(np.allclose(result, expected)) + + def test_inv_illformed(self): + singular = DataFrame(np.array([[1, 1], [2, 2]])) + rs = pmath.inv(singular) + expected = np.array([[0.1, 0.2], [0.1, 0.2]]) + self.assertTrue(np.allclose(rs, expected)) + +if __name__ == '__main__': + nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], + exit=False) + +#-*- encoding: utf8 -*- +"""Retrieve user id from ldap server.""" +import logging +import sys + +logging.basicConfig(level=logging.INFO) +LOGGER = logging.getLogger(__name__) + +try: + import ldap +except ImportError, exception: + LOGGER.error(str(exception)) + sys.exit() + +LDAP_SERVER_URL = 'ldap://directory.lvl.intranet' +LDAP_QUERY = 'uid={},ou=smile,ou=users,dc=smile,dc=fr' + + +def get_user_id_from_ldap(pentagram, ldap_url=LDAP_SERVER_URL, + ldap_query=LDAP_QUERY): + """Get user id from pentagram. + + :pentagram: pentagram: string + :returns: user_id: string + + """ + if not len(pentagram) == 5: + LOGGER.error('Invalid user name, skipping...') + return None + try: + ldap_server = ldap.initialize(ldap_url) + ldap_server.simple_bind() + except ldap.LDAPError: + LOGGER.error('Error while connecting to LDAP server, skipping...') + return None + try: + results = ldap_server.search_s( + ldap_query.format(pentagram), ldap.SCOPE_SUBTREE, + attrlist=['uidNumber']) + except ldap.NO_SUCH_OBJECT: + LOGGER.error('No match found, skipping...') + return None + if not len(results) == 1: + LOGGER.error('Too many users matching, skipping...') + return None + _, arr = results[0] + return arr['uidNumber'][0] + +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools + +from keystone.openstack.common import importutils + + +class Manager(object): + """Base class for intermediary request layer. + + The Manager layer exists to support additional logic that applies to all + or some of the methods exposed by a service that are not specific to the + HTTP interface. + + It also provides a stable entry point to dynamic backends. + + An example of a probable use case is logging all the calls. + + """ + + def __init__(self, driver_name): + self.driver = importutils.import_object(driver_name) + + def __getattr__(self, name): + """Forward calls to the underlying driver.""" + f = getattr(self.driver, name) + + @functools.wraps(f) + def _wrapper(*args, **kw): + return f(*args, **kw) + setattr(self, name, _wrapper) + return _wrapper + +# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import inspect +import sys +import os +import errno +import socket + +from botocore.compat import six + + +if sys.platform.startswith('win'): + def rename_file(current_filename, new_filename): + try: + os.remove(new_filename) + except OSError as e: + if not e.errno == errno.ENOENT: + # We only want to a ignore trying to remove + # a file that does not exist. If it fails + # for any other reason we should be propagating + # that exception. + raise + os.rename(current_filename, new_filename) +else: + rename_file = os.rename + +if six.PY3: + def accepts_kwargs(func): + # In python3.4.1, there's backwards incompatible + # changes when using getargspec with functools.partials. + return inspect.getfullargspec(func)[2] + + # In python3, socket.error is OSError, which is too general + # for what we want (i.e FileNotFoundError is a subclass of OSError). + # In py3 all the socket related errors are in a newly created + # ConnectionError + SOCKET_ERROR = ConnectionError + MAXINT = None +else: + def accepts_kwargs(func): + return inspect.getargspec(func)[2] + + SOCKET_ERROR = socket.error + MAXINT = sys.maxint + + +def seekable(fileobj): + """Backwards compat function to determine if a fileobj is seekable + + :param fileobj: The file-like object to determine if seekable + + :returns: True, if seekable. False, otherwise. + """ + # If the fileobj has a seekable attr, try calling the seekable() + # method on it. + if hasattr(fileobj, 'seekable'): + return fileobj.seekable() + # If there is no seekable attr, check if the object can be seeked + # or telled. If it can, try to seek to the current position. + elif hasattr(fileobj, 'seek') and hasattr(fileobj, 'tell'): + try: + fileobj.seek(0, 1) + return True + except (OSError, IOError): + # If an io related error was thrown then it is not seekable. + return False + # Else, the fileobj is not seekable + return False + + +def readable(fileobj): + """Determines whether or not a file-like object is readable. + + :param fileobj: The file-like object to determine if readable + + :returns: True, if readable. False otherwise. + """ + if hasattr(fileobj, 'readable'): + return fileobj.readable() + + return hasattr(fileobj, 'read') + +#! /usr/bin/env python + +""" +SVN helper script. + +Try to set the svn:eol-style property to "native" on every .py, .txt, .c and +.h file in the directory tree rooted at the current directory. + +Files with the svn:eol-style property already set (to anything) are skipped. + +svn will itself refuse to set this property on a file that's not under SVN +control, or that has a binary mime-type property set. This script inherits +that behavior, and passes on whatever warning message the failing "svn +propset" command produces. + +In the Python project, it's safe to invoke this script from the root of +a checkout. + +No output is produced for files that are ignored. For a file that gets +svn:eol-style set, output looks like: + + property 'svn:eol-style' set on 'Lib\ctypes\__init__.py' + +For a file not under version control: + + svn: warning: 'patch-finalizer.txt' is not under version control + +and for a file with a binary mime-type property: + + svn: File 'Lib\test\test_pep263.py' has binary mime type property +""" + +import re +import os + +def propfiles(root, fn): + default = os.path.join(root, ".svn", "props", fn+".svn-work") + try: + format = int(open(os.path.join(root, ".svn", "format")).read().strip()) + except IOError: + return [] + if format in (8, 9): + # In version 8 and 9, committed props are stored in prop-base, local + # modifications in props + return [os.path.join(root, ".svn", "prop-base", fn+".svn-base"), + os.path.join(root, ".svn", "props", fn+".svn-work")] + raise ValueError, "Unknown repository format" + +def proplist(root, fn): + "Return a list of property names for file fn in directory root" + result = [] + for path in propfiles(root, fn): + try: + f = open(path) + except IOError: + # no properties file: not under version control, + # or no properties set + continue + while 1: + # key-value pairs, of the form + # K + # NL + # V length + # NL + # END + line = f.readline() + if line.startswith("END"): + break + assert line.startswith("K ") + L = int(line.split()[1]) + key = f.read(L) + result.append(key) + f.readline() + line = f.readline() + assert line.startswith("V ") + L = int(line.split()[1]) + value = f.read(L) + f.readline() + f.close() + return result + +possible_text_file = re.compile(r"\.([hc]|py|txt|sln|vcproj)$").search + +for root, dirs, files in os.walk('.'): + if '.svn' in dirs: + dirs.remove('.svn') + for fn in files: + if possible_text_file(fn): + if 'svn:eol-style' not in proplist(root, fn): + path = os.path.join(root, fn) + os.system('svn propset svn:eol-style native "%s"' % path) + +#!/usr/bin/env python +# +# Copyright 2005,2008,2010 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. +# + +from gnuradio import gr, gr_unittest + +class test_kludged_imports (gr_unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_blks_import(self): + # make sure that this somewhat magic import works + from gnuradio import blks2 + + def test_gru_import(self): + # make sure that this somewhat magic import works + from gnuradio import gru + + +if __name__ == '__main__': + gr_unittest.run(test_kludged_imports, "test_kludged_imports.xml") + +#!/usr/bin/env python +"""Tests for grr.lib.timeseries.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import unicode_literals + +from absl import app +from future.builtins import range + +from grr_response_server import timeseries +from grr.test_lib import test_lib + + +class TimeseriesTest(test_lib.GRRBaseTest): + + def makeSeries(self): + s = timeseries.Timeseries() + for i in range(1, 101): + s.Append(i, (i + 5) * 10000) + return s + + def testAppendFilterRange(self): + s = self.makeSeries() + self.assertLen(s.data, 100) + self.assertEqual([1, 60000], s.data[0]) + self.assertEqual([100, 1050000], s.data[-1]) + + s.FilterRange(100000, 200000) + self.assertLen(s.data, 10) + self.assertEqual([5, 100000], s.data[0]) + self.assertEqual([14, 190000], s.data[-1]) + + def testNormalize(self): + s = self.makeSeries() + s.Normalize(10 * 10000, 100000, 600000) + self.assertLen(s.data, 5) + self.assertEqual([9.5, 100000], s.data[0]) + self.assertEqual([49.5, 500000], s.data[-1]) + + s = timeseries.Timeseries() + for i in range(0, 1000): + s.Append(0.5, i * 10) + s.Normalize(200, 5000, 10000) + self.assertLen(s.data, 25) + self.assertListEqual(s.data[0], [0.5, 5000]) + self.assertListEqual(s.data[24], [0.5, 9800]) + + s = timeseries.Timeseries() + for i in range(0, 1000): + s.Append(i, i * 10) + s.Normalize(200, 5000, 10000, mode=timeseries.NORMALIZE_MODE_COUNTER) + self.assertLen(s.data, 25) + self.assertListEqual(s.data[0], [519, 5000]) + self.assertListEqual(s.data[24], [999, 9800]) + + def testToDeltas(self): + s = self.makeSeries() + self.assertLen(s.data, 100) + s.ToDeltas() + self.assertLen(s.data, 99) + self.assertEqual([1, 60000], s.data[0]) + self.assertEqual([1, 1040000], s.data[-1]) + + s = timeseries.Timeseries() + for i in range(0, 1000): + s.Append(i, i * 1e6) + s.Normalize( + 20 * 1e6, 500 * 1e6, 1000 * 1e6, mode=timeseries.NORMALIZE_MODE_COUNTER) + self.assertLen(s.data, 25) + self.assertListEqual(s.data[0], [519, int(500 * 1e6)]) + s.ToDeltas() + self.assertLen(s.data, 24) + self.assertListEqual(s.data[0], [20, int(500 * 1e6)]) + self.assertListEqual(s.data[23], [20, int(960 * 1e6)]) + + def testNormalizeFillsGapsWithNone(self): + s = timeseries.Timeseries() + for i in range(21, 51): + s.Append(i, (i + 5) * 10000) + for i in range(81, 101): + s.Append(i, (i + 5) * 10000) + s.Normalize(10 * 10000, 10 * 10000, 120 * 10000) + self.assertLen(s.data, 11) + self.assertEqual([None, 100000], s.data[0]) + self.assertEqual([22.5, 200000], s.data[1]) + self.assertEqual([None, 600000], s.data[5]) + self.assertEqual([None, 1100000], s.data[-1]) + + def testMakeIncreasing(self): + s = timeseries.Timeseries() + for i in range(0, 5): + s.Append(i, i * 1000) + for i in range(0, 5): + s.Append(i, (i + 6) * 1000) + self.assertLen(s.data, 10) + self.assertEqual([4, 10000], s.data[-1]) + s.MakeIncreasing() + self.assertLen(s.data, 10) + self.assertEqual([8, 10000], s.data[-1]) + + def testAddRescale(self): + s1 = timeseries.Timeseries() + for i in range(0, 5): + s1.Append(i, i * 1000) + s2 = timeseries.Timeseries() + for i in range(0, 5): + s2.Append(2 * i, i * 1000) + s1.Add(s2) + + for i in range(0, 5): + self.assertEqual(3 * i, s1.data[i][0]) + + s1.Rescale(1 / 3.0) + for i in range(0, 5): + self.assertEqual(i, s1.data[i][0]) + + def testMean(self): + s = timeseries.Timeseries() + self.assertEqual(None, s.Mean()) + + s = self.makeSeries() + self.assertLen(s.data, 100) + self.assertEqual(50, s.Mean()) + + +def main(argv): + test_lib.main(argv) + + +if __name__ == "__main__": + app.run(main) + +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2010 Radim Rehurek +# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html + + +"""Corpus in SVMlight format.""" + + +from __future__ import with_statement + +import logging + +from gensim import utils +from gensim.corpora import IndexedCorpus + + +logger = logging.getLogger(__name__) + + +class SvmLightCorpus(IndexedCorpus): + """Corpus in SVMlight format. + + Quoting http://svmlight.joachims.org/: + The input file contains the training examples. The first lines may contain comments and are ignored + if they start with #. Each of the following lines represents one training example + and is of the following format:: + + .=. : : ... : # + .=. +1 | -1 | 0 | + .=. | "qid" + .=. + .=. + + The "qid" feature (used for SVMlight ranking), if present, is ignored. + + Notes + ----- + Although not mentioned in the specification above, SVMlight also expect its feature ids to be 1-based + (counting starts at 1). We convert features to 0-base internally by decrementing all ids when loading a SVMlight + input file, and increment them again when saving as SVMlight. + + """ + + def __init__(self, fname, store_labels=True): + """ + + Parameters + ---------- + fname: str + Path to corpus. + store_labels : bool, optional + Whether to store labels (~SVM target class). They currently have no application but stored + in `self.labels` for convenience by default. + + """ + IndexedCorpus.__init__(self, fname) + logger.info("loading corpus from %s", fname) + + self.fname = fname # input file, see class doc for format + self.length = None + self.store_labels = store_labels + self.labels = [] + + def __iter__(self): + """ Iterate over the corpus, returning one sparse (BoW) vector at a time. + + Yields + ------ + list of (int, float) + Document in BoW format. + + """ + lineno = -1 + self.labels = [] + with utils.open(self.fname, 'rb') as fin: + for lineno, line in enumerate(fin): + doc = self.line2doc(line) + if doc is not None: + if self.store_labels: + self.labels.append(doc[1]) + yield doc[0] + self.length = lineno + 1 + + @staticmethod + def save_corpus(fname, corpus, id2word=None, labels=False, metadata=False): + """Save a corpus in the SVMlight format. + + The SVMlight `` class tag is taken from the `labels` array, or set to 0 for all documents + if `labels` is not supplied. + + Parameters + ---------- + fname : str + Path to output file. + corpus : iterable of iterable of (int, float) + Corpus in BoW format. + id2word : dict of (str, str), optional + Mapping id -> word. + labels : list or False + An SVMlight `` class tags or False if not present. + metadata : bool + ARGUMENT WILL BE IGNORED. + + Returns + ------- + list of int + Offsets for each line in file (in bytes). + + """ + logger.info("converting corpus to SVMlight format: %s", fname) + + if labels is not False: + # Cast any sequence (incl. a numpy array) to a list, to simplify the processing below. + labels = list(labels) + offsets = [] + with utils.open(fname, 'wb') as fout: + for docno, doc in enumerate(corpus): + label = labels[docno] if labels else 0 # target class is 0 by default + offsets.append(fout.tell()) + fout.write(utils.to_utf8(SvmLightCorpus.doc2line(doc, label))) + return offsets + + def docbyoffset(self, offset): + """Get the document stored at file position `offset`. + + Parameters + ---------- + offset : int + Document's position. + + Returns + ------- + tuple of (int, float) + + """ + with utils.open(self.fname, 'rb') as f: + f.seek(offset) + return self.line2doc(f.readline())[0] + # TODO: it brakes if gets None from line2doc + + def line2doc(self, line): + """Get a document from a single line in SVMlight format. + This method inverse of :meth:`~gensim.corpora.svmlightcorpus.SvmLightCorpus.doc2line`. + + Parameters + ---------- + line : str + Line in SVMLight format. + + Returns + ------- + (list of (int, float), str) + Document in BoW format and target class label. + + """ + line = utils.to_unicode(line) + line = line[: line.find('#')].strip() + if not line: + return None # ignore comments and empty lines + parts = line.split() + if not parts: + raise ValueError('invalid line format in %s' % self.fname) + target, fields = parts[0], [part.rsplit(':', 1) for part in parts[1:]] + # ignore 'qid' features, convert 1-based feature ids to 0-based + doc = [(int(p1) - 1, float(p2)) for p1, p2 in fields if p1 != 'qid'] + return doc, target + + @staticmethod + def doc2line(doc, label=0): + """Convert BoW representation of document in SVMlight format. + This method inverse of :meth:`~gensim.corpora.svmlightcorpus.SvmLightCorpus.line2doc`. + + Parameters + ---------- + doc : list of (int, float) + Document in BoW format. + label : int, optional + Document label (if provided). + + Returns + ------- + str + `doc` in SVMlight format. + + """ + pairs = ' '.join("%i:%s" % (termid + 1, termval) for termid, termval in doc) # +1 to convert 0-base to 1-base + return "%s %s\n" % (label, pairs) + +import torch +import torch.nn as nn +import torchvision +import torchvision.transforms as transforms + + +# Device configuration +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +# Hyper-parameters +sequence_length = 28 +input_size = 28 +hidden_size = 128 +num_layers = 2 +num_classes = 10 +batch_size = 100 +num_epochs = 2 +learning_rate = 0.01 + +# MNIST dataset +train_dataset = torchvision.datasets.MNIST(root='../../data/', + train=True, + transform=transforms.ToTensor(), + download=True) + +test_dataset = torchvision.datasets.MNIST(root='../../data/', + train=False, + transform=transforms.ToTensor()) + +# Data loader +train_loader = torch.utils.data.DataLoader(dataset=train_dataset, + batch_size=batch_size, + shuffle=True) + +test_loader = torch.utils.data.DataLoader(dataset=test_dataset, + batch_size=batch_size, + shuffle=False) + +# Recurrent neural network (many-to-one) +class RNN(nn.Module): + def __init__(self, input_size, hidden_size, num_layers, num_classes): + super(RNN, self).__init__() + self.hidden_size = hidden_size + self.num_layers = num_layers + self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True) + self.fc = nn.Linear(hidden_size, num_classes) + + def forward(self, x): + # Set initial hidden and cell states + h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) + c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) + + # Forward propagate LSTM + out, _ = self.lstm(x, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size) + + # Decode the hidden state of the last time step + out = self.fc(out[:, -1, :]) + return out + +model = RNN(input_size, hidden_size, num_layers, num_classes).to(device) + + +# Loss and optimizer +criterion = nn.CrossEntropyLoss() +optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) + +# Train the model +total_step = len(train_loader) +for epoch in range(num_epochs): + for i, (images, labels) in enumerate(train_loader): + images = images.reshape(-1, sequence_length, input_size).to(device) + labels = labels.to(device) + + # Forward pass + outputs = model(images) + loss = criterion(outputs, labels) + + # Backward and optimize + optimizer.zero_grad() + loss.backward() + optimizer.step() + + if (i+1) % 100 == 0: + print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' + .format(epoch+1, num_epochs, i+1, total_step, loss.item())) + +# Test the model +model.eval() +with torch.no_grad(): + correct = 0 + total = 0 + for images, labels in test_loader: + images = images.reshape(-1, sequence_length, input_size).to(device) + labels = labels.to(device) + outputs = model(images) + _, predicted = torch.max(outputs.data, 1) + total += labels.size(0) + correct += (predicted == labels).sum().item() + + print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total)) + +# Save the model checkpoint +torch.save(model.state_dict(), 'model.ckpt') +""" Python Character Mapping Codec cp1250 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1250.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp1250', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + u'\x00' # 0x00 -> NULL + u'\x01' # 0x01 -> START OF HEADING + u'\x02' # 0x02 -> START OF TEXT + u'\x03' # 0x03 -> END OF TEXT + u'\x04' # 0x04 -> END OF TRANSMISSION + u'\x05' # 0x05 -> ENQUIRY + u'\x06' # 0x06 -> ACKNOWLEDGE + u'\x07' # 0x07 -> BELL + u'\x08' # 0x08 -> BACKSPACE + u'\t' # 0x09 -> HORIZONTAL TABULATION + u'\n' # 0x0A -> LINE FEED + u'\x0b' # 0x0B -> VERTICAL TABULATION + u'\x0c' # 0x0C -> FORM FEED + u'\r' # 0x0D -> CARRIAGE RETURN + u'\x0e' # 0x0E -> SHIFT OUT + u'\x0f' # 0x0F -> SHIFT IN + u'\x10' # 0x10 -> DATA LINK ESCAPE + u'\x11' # 0x11 -> DEVICE CONTROL ONE + u'\x12' # 0x12 -> DEVICE CONTROL TWO + u'\x13' # 0x13 -> DEVICE CONTROL THREE + u'\x14' # 0x14 -> DEVICE CONTROL FOUR + u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + u'\x16' # 0x16 -> SYNCHRONOUS IDLE + u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK + u'\x18' # 0x18 -> CANCEL + u'\x19' # 0x19 -> END OF MEDIUM + u'\x1a' # 0x1A -> SUBSTITUTE + u'\x1b' # 0x1B -> ESCAPE + u'\x1c' # 0x1C -> FILE SEPARATOR + u'\x1d' # 0x1D -> GROUP SEPARATOR + u'\x1e' # 0x1E -> RECORD SEPARATOR + u'\x1f' # 0x1F -> UNIT SEPARATOR + u' ' # 0x20 -> SPACE + u'!' # 0x21 -> EXCLAMATION MARK + u'"' # 0x22 -> QUOTATION MARK + u'#' # 0x23 -> NUMBER SIGN + u'$' # 0x24 -> DOLLAR SIGN + u'%' # 0x25 -> PERCENT SIGN + u'&' # 0x26 -> AMPERSAND + u"'" # 0x27 -> APOSTROPHE + u'(' # 0x28 -> LEFT PARENTHESIS + u')' # 0x29 -> RIGHT PARENTHESIS + u'*' # 0x2A -> ASTERISK + u'+' # 0x2B -> PLUS SIGN + u',' # 0x2C -> COMMA + u'-' # 0x2D -> HYPHEN-MINUS + u'.' # 0x2E -> FULL STOP + u'/' # 0x2F -> SOLIDUS + u'0' # 0x30 -> DIGIT ZERO + u'1' # 0x31 -> DIGIT ONE + u'2' # 0x32 -> DIGIT TWO + u'3' # 0x33 -> DIGIT THREE + u'4' # 0x34 -> DIGIT FOUR + u'5' # 0x35 -> DIGIT FIVE + u'6' # 0x36 -> DIGIT SIX + u'7' # 0x37 -> DIGIT SEVEN + u'8' # 0x38 -> DIGIT EIGHT + u'9' # 0x39 -> DIGIT NINE + u':' # 0x3A -> COLON + u';' # 0x3B -> SEMICOLON + u'<' # 0x3C -> LESS-THAN SIGN + u'=' # 0x3D -> EQUALS SIGN + u'>' # 0x3E -> GREATER-THAN SIGN + u'?' # 0x3F -> QUESTION MARK + u'@' # 0x40 -> COMMERCIAL AT + u'A' # 0x41 -> LATIN CAPITAL LETTER A + u'B' # 0x42 -> LATIN CAPITAL LETTER B + u'C' # 0x43 -> LATIN CAPITAL LETTER C + u'D' # 0x44 -> LATIN CAPITAL LETTER D + u'E' # 0x45 -> LATIN CAPITAL LETTER E + u'F' # 0x46 -> LATIN CAPITAL LETTER F + u'G' # 0x47 -> LATIN CAPITAL LETTER G + u'H' # 0x48 -> LATIN CAPITAL LETTER H + u'I' # 0x49 -> LATIN CAPITAL LETTER I + u'J' # 0x4A -> LATIN CAPITAL LETTER J + u'K' # 0x4B -> LATIN CAPITAL LETTER K + u'L' # 0x4C -> LATIN CAPITAL LETTER L + u'M' # 0x4D -> LATIN CAPITAL LETTER M + u'N' # 0x4E -> LATIN CAPITAL LETTER N + u'O' # 0x4F -> LATIN CAPITAL LETTER O + u'P' # 0x50 -> LATIN CAPITAL LETTER P + u'Q' # 0x51 -> LATIN CAPITAL LETTER Q + u'R' # 0x52 -> LATIN CAPITAL LETTER R + u'S' # 0x53 -> LATIN CAPITAL LETTER S + u'T' # 0x54 -> LATIN CAPITAL LETTER T + u'U' # 0x55 -> LATIN CAPITAL LETTER U + u'V' # 0x56 -> LATIN CAPITAL LETTER V + u'W' # 0x57 -> LATIN CAPITAL LETTER W + u'X' # 0x58 -> LATIN CAPITAL LETTER X + u'Y' # 0x59 -> LATIN CAPITAL LETTER Y + u'Z' # 0x5A -> LATIN CAPITAL LETTER Z + u'[' # 0x5B -> LEFT SQUARE BRACKET + u'\\' # 0x5C -> REVERSE SOLIDUS + u']' # 0x5D -> RIGHT SQUARE BRACKET + u'^' # 0x5E -> CIRCUMFLEX ACCENT + u'_' # 0x5F -> LOW LINE + u'`' # 0x60 -> GRAVE ACCENT + u'a' # 0x61 -> LATIN SMALL LETTER A + u'b' # 0x62 -> LATIN SMALL LETTER B + u'c' # 0x63 -> LATIN SMALL LETTER C + u'd' # 0x64 -> LATIN SMALL LETTER D + u'e' # 0x65 -> LATIN SMALL LETTER E + u'f' # 0x66 -> LATIN SMALL LETTER F + u'g' # 0x67 -> LATIN SMALL LETTER G + u'h' # 0x68 -> LATIN SMALL LETTER H + u'i' # 0x69 -> LATIN SMALL LETTER I + u'j' # 0x6A -> LATIN SMALL LETTER J + u'k' # 0x6B -> LATIN SMALL LETTER K + u'l' # 0x6C -> LATIN SMALL LETTER L + u'm' # 0x6D -> LATIN SMALL LETTER M + u'n' # 0x6E -> LATIN SMALL LETTER N + u'o' # 0x6F -> LATIN SMALL LETTER O + u'p' # 0x70 -> LATIN SMALL LETTER P + u'q' # 0x71 -> LATIN SMALL LETTER Q + u'r' # 0x72 -> LATIN SMALL LETTER R + u's' # 0x73 -> LATIN SMALL LETTER S + u't' # 0x74 -> LATIN SMALL LETTER T + u'u' # 0x75 -> LATIN SMALL LETTER U + u'v' # 0x76 -> LATIN SMALL LETTER V + u'w' # 0x77 -> LATIN SMALL LETTER W + u'x' # 0x78 -> LATIN SMALL LETTER X + u'y' # 0x79 -> LATIN SMALL LETTER Y + u'z' # 0x7A -> LATIN SMALL LETTER Z + u'{' # 0x7B -> LEFT CURLY BRACKET + u'|' # 0x7C -> VERTICAL LINE + u'}' # 0x7D -> RIGHT CURLY BRACKET + u'~' # 0x7E -> TILDE + u'\x7f' # 0x7F -> DELETE + u'\u20ac' # 0x80 -> EURO SIGN + u'\ufffe' # 0x81 -> UNDEFINED + u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK + u'\ufffe' # 0x83 -> UNDEFINED + u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK + u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS + u'\u2020' # 0x86 -> DAGGER + u'\u2021' # 0x87 -> DOUBLE DAGGER + u'\ufffe' # 0x88 -> UNDEFINED + u'\u2030' # 0x89 -> PER MILLE SIGN + u'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON + u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK + u'\u015a' # 0x8C -> LATIN CAPITAL LETTER S WITH ACUTE + u'\u0164' # 0x8D -> LATIN CAPITAL LETTER T WITH CARON + u'\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON + u'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE + u'\ufffe' # 0x90 -> UNDEFINED + u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK + u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK + u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK + u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK + u'\u2022' # 0x95 -> BULLET + u'\u2013' # 0x96 -> EN DASH + u'\u2014' # 0x97 -> EM DASH + u'\ufffe' # 0x98 -> UNDEFINED + u'\u2122' # 0x99 -> TRADE MARK SIGN + u'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON + u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + u'\u015b' # 0x9C -> LATIN SMALL LETTER S WITH ACUTE + u'\u0165' # 0x9D -> LATIN SMALL LETTER T WITH CARON + u'\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON + u'\u017a' # 0x9F -> LATIN SMALL LETTER Z WITH ACUTE + u'\xa0' # 0xA0 -> NO-BREAK SPACE + u'\u02c7' # 0xA1 -> CARON + u'\u02d8' # 0xA2 -> BREVE + u'\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE + u'\xa4' # 0xA4 -> CURRENCY SIGN + u'\u0104' # 0xA5 -> LATIN CAPITAL LETTER A WITH OGONEK + u'\xa6' # 0xA6 -> BROKEN BAR + u'\xa7' # 0xA7 -> SECTION SIGN + u'\xa8' # 0xA8 -> DIAERESIS + u'\xa9' # 0xA9 -> COPYRIGHT SIGN + u'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA + u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + u'\xac' # 0xAC -> NOT SIGN + u'\xad' # 0xAD -> SOFT HYPHEN + u'\xae' # 0xAE -> REGISTERED SIGN + u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE + u'\xb0' # 0xB0 -> DEGREE SIGN + u'\xb1' # 0xB1 -> PLUS-MINUS SIGN + u'\u02db' # 0xB2 -> OGONEK + u'\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE + u'\xb4' # 0xB4 -> ACUTE ACCENT + u'\xb5' # 0xB5 -> MICRO SIGN + u'\xb6' # 0xB6 -> PILCROW SIGN + u'\xb7' # 0xB7 -> MIDDLE DOT + u'\xb8' # 0xB8 -> CEDILLA + u'\u0105' # 0xB9 -> LATIN SMALL LETTER A WITH OGONEK + u'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA + u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + u'\u013d' # 0xBC -> LATIN CAPITAL LETTER L WITH CARON + u'\u02dd' # 0xBD -> DOUBLE ACUTE ACCENT + u'\u013e' # 0xBE -> LATIN SMALL LETTER L WITH CARON + u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE + u'\u0154' # 0xC0 -> LATIN CAPITAL LETTER R WITH ACUTE + u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE + u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE + u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS + u'\u0139' # 0xC5 -> LATIN CAPITAL LETTER L WITH ACUTE + u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE + u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA + u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON + u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE + u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK + u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS + u'\u011a' # 0xCC -> LATIN CAPITAL LETTER E WITH CARON + u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE + u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + u'\u010e' # 0xCF -> LATIN CAPITAL LETTER D WITH CARON + u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE + u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE + u'\u0147' # 0xD2 -> LATIN CAPITAL LETTER N WITH CARON + u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE + u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + u'\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE + u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS + u'\xd7' # 0xD7 -> MULTIPLICATION SIGN + u'\u0158' # 0xD8 -> LATIN CAPITAL LETTER R WITH CARON + u'\u016e' # 0xD9 -> LATIN CAPITAL LETTER U WITH RING ABOVE + u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE + u'\u0170' # 0xDB -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE + u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS + u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE + u'\u0162' # 0xDE -> LATIN CAPITAL LETTER T WITH CEDILLA + u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S + u'\u0155' # 0xE0 -> LATIN SMALL LETTER R WITH ACUTE + u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE + u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE + u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS + u'\u013a' # 0xE5 -> LATIN SMALL LETTER L WITH ACUTE + u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE + u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA + u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON + u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE + u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK + u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS + u'\u011b' # 0xEC -> LATIN SMALL LETTER E WITH CARON + u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE + u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX + u'\u010f' # 0xEF -> LATIN SMALL LETTER D WITH CARON + u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE + u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE + u'\u0148' # 0xF2 -> LATIN SMALL LETTER N WITH CARON + u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE + u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + u'\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE + u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS + u'\xf7' # 0xF7 -> DIVISION SIGN + u'\u0159' # 0xF8 -> LATIN SMALL LETTER R WITH CARON + u'\u016f' # 0xF9 -> LATIN SMALL LETTER U WITH RING ABOVE + u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE + u'\u0171' # 0xFB -> LATIN SMALL LETTER U WITH DOUBLE ACUTE + u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS + u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE + u'\u0163' # 0xFE -> LATIN SMALL LETTER T WITH CEDILLA + u'\u02d9' # 0xFF -> DOT ABOVE +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) + +# -*- coding: utf-8 -*- +############################################################################## +# +# OpenERP, Open Source Management Solution +# Copyright (C) 2004-2010 Tiny SPRL (). +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# +############################################################################## + + +{ + 'name': 'MRP Byproducts', + 'version': '1.0', + 'category': 'Manufacturing', + 'description': """ +This module allows you to produce several products from one production order. +============================================================================= + +You can configure by-products in the bill of material. + +Without this module: +-------------------- + A + B + C -> D + +With this module: +----------------- + A + B + C -> D + E + """, + 'author': 'OpenERP SA', + 'website': 'https://www.odoo.com/page/manufacturing', + 'depends': ['base', 'mrp'], + 'data': [ + 'security/ir.model.access.csv', + 'mrp_byproduct_view.xml' + ], + 'demo': [], + 'test': ['test/mrp_byproduct.yml'], + 'installable': True, + 'auto_install': False, +} +# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: + +# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors +# License: GNU General Public License v3. See license.txt + +from __future__ import unicode_literals +import webnotes +from webnotes import _ +from webnotes.utils import fmt_money, formatdate, now_datetime, cstr, esc, \ + get_url_to_form, get_fullname +from webnotes.utils.dateutils import datetime_in_user_format +from datetime import timedelta +from dateutil.relativedelta import relativedelta +from webnotes.utils.email_lib import sendmail + +content_sequence = [ + ["Income / Expenses", ["income_year_to_date", "bank_balance", + "income", "expenses_booked"]], + ["Receivables / Payables", ["collections", "payments", + "invoiced_amount", "payables"]], + ["Buying", ["new_purchase_requests", "new_supplier_quotations", "new_purchase_orders"]], + ["Selling", ["new_leads", "new_enquiries", "new_quotations", "new_sales_orders"]], + ["Stock", ["new_delivery_notes", "new_purchase_receipts", "new_stock_entries"]], + ["Support", ["new_communications", "new_support_tickets", "open_tickets"]], + ["Projects", ["new_projects"]], + ["System", ["scheduler_errors"]], +] + +user_specific_content = ["calendar_events", "todo_list"] + +digest_template = """ +

%(name)s

+

%(company)s

+

%(date)s

+
+%(with_value)s +%(no_value)s +
+

To change what you see here, +create more digests, go to Setup > Email Digest

""" + +row_template = """

+%(label)s: + + %(currency)s%(value)s +

""" + +from webnotes.model.controller import DocListController +class DocType(DocListController): + def __init__(self, doc, doclist=[]): + self.doc, self.doclist = doc, doclist + self.from_date, self.to_date = self.get_from_to_date() + self.future_from_date, self.future_to_date = self.get_future_from_to_date() + self.currency = webnotes.conn.get_value("Company", self.doc.company, + "default_currency") + + def get_profiles(self): + """get list of profiles""" + profile_list = webnotes.conn.sql(""" + select name, enabled from tabProfile + where docstatus=0 and name not in ('Administrator', 'Guest') + and user_type = "System User" + order by enabled desc, name asc""", as_dict=1) + + if self.doc.recipient_list: + recipient_list = self.doc.recipient_list.split("\n") + else: + recipient_list = [] + for p in profile_list: + p["checked"] = p["name"] in recipient_list and 1 or 0 + + webnotes.response['profile_list'] = profile_list + + def send(self): + # send email only to enabled users + valid_users = [p[0] for p in webnotes.conn.sql("""select name from `tabProfile` + where enabled=1""")] + recipients = filter(lambda r: r in valid_users, + self.doc.recipient_list.split("\n")) + + common_msg = self.get_common_content() + if recipients: + for user_id in recipients: + msg_for_this_receipient = self.get_msg_html(self.get_user_specific_content(user_id) + \ + common_msg) + if msg_for_this_receipient: + sendmail(recipients=user_id, + subject="[ERPNext] [{frequency} Digest] {name}".format( + frequency=self.doc.frequency, name=self.doc.name), + msg=msg_for_this_receipient) + + def get_digest_msg(self): + return self.get_msg_html(self.get_user_specific_content(webnotes.session.user) + \ + self.get_common_content(), send_only_if_updates=False) + + def get_common_content(self): + out = [] + for module, content in content_sequence: + module_out = [] + for ctype in content: + if self.doc.fields.get(ctype) and hasattr(self, "get_"+ctype): + module_out.append(getattr(self, "get_"+ctype)()) + if any([m[0] for m in module_out]): + out += [[1, "

" + _(module) + "

"]] + module_out + [[1, "
"]] + else: + out += module_out + + return out + + def get_user_specific_content(self, user_id): + original_session_user = webnotes.session.user + + # setting session user for role base event fetching + webnotes.session.user = user_id + + out = [] + for ctype in user_specific_content: + if self.doc.fields.get(ctype) and hasattr(self, "get_"+ctype): + out.append(getattr(self, "get_"+ctype)(user_id)) + + webnotes.session.user = original_session_user + + return out + + def get_msg_html(self, out, send_only_if_updates=True): + with_value = [o[1] for o in out if o[0]] + + if with_value: + has_updates = True + with_value = "\n".join(with_value) + else: + has_updates = False + with_value = "

There were no updates in the items selected for this digest.


" + + if not has_updates and send_only_if_updates: + return + + # seperate out no value items + no_value = [o[1] for o in out if not o[0]] + if no_value: + no_value = """

No Updates For:

""" + "\n".join(no_value) + + date = self.doc.frequency == "Daily" and formatdate(self.from_date) or \ + "%s to %s" % (formatdate(self.from_date), formatdate(self.to_date)) + + msg = digest_template % { + "digest": self.doc.frequency + " Digest", + "date": date, + "company": self.doc.company, + "with_value": with_value, + "no_value": no_value or "", + "name": self.doc.name + } + + return msg + + def get_income_year_to_date(self): + return self.get_income(webnotes.conn.get_defaults("year_start_date"), + self.meta.get_label("income_year_to_date")) + + def get_bank_balance(self): + # account is of type "Bank or Cash" + accounts = dict([[a["name"], [a["account_name"], 0]] for a in self.get_accounts() + if a["account_type"]=="Bank or Cash"]) + ackeys = accounts.keys() + + for gle in self.get_gl_entries(None, self.to_date): + if gle["account"] in ackeys: + accounts[gle["account"]][1] += gle["debit"] - gle["credit"] + + # build html + out = self.get_html("Bank/Cash Balance", "", "") + for ac in ackeys: + if accounts[ac][1]: + out += "\n" + self.get_html(accounts[ac][0], self.currency, + fmt_money(accounts[ac][1]), style="margin-left: 17px") + return sum((accounts[ac][1] for ac in ackeys)), out + + def get_income(self, from_date=None, label=None): + # account is PL Account and Credit type account + accounts = [a["name"] for a in self.get_accounts() + if a["is_pl_account"]=="Yes" and a["debit_or_credit"]=="Credit"] + + income = 0 + for gle in self.get_gl_entries(from_date or self.from_date, self.to_date): + if gle["account"] in accounts: + income += gle["credit"] - gle["debit"] + + return income, self.get_html(label or self.meta.get_label("income"), self.currency, + fmt_money(income)) + + def get_expenses_booked(self): + # account is PL Account and Debit type account + accounts = [a["name"] for a in self.get_accounts() + if a["is_pl_account"]=="Yes" and a["debit_or_credit"]=="Debit"] + + expense = 0 + for gle in self.get_gl_entries(self.from_date, self.to_date): + if gle["account"] in accounts: + expense += gle["debit"] - gle["credit"] + + return expense, self.get_html(self.meta.get_label("expenses_booked"), self.currency, + fmt_money(expense)) + + def get_collections(self): + return self.get_party_total("Customer", "credit", self.meta.get_label("collections")) + + def get_payments(self): + return self.get_party_total("Supplier", "debit", self.meta.get_label("payments")) + + def get_party_total(self, party_type, gle_field, label): + import re + # account is of master_type Customer or Supplier + accounts = [a["name"] for a in self.get_accounts() + if a["master_type"]==party_type] + + # account is "Bank or Cash" + bc_accounts = [esc(a["name"], "()|") for a in self.get_accounts() + if a["account_type"]=="Bank or Cash"] + bc_regex = re.compile("""(%s)""" % "|".join(bc_accounts)) + + total = 0 + for gle in self.get_gl_entries(self.from_date, self.to_date): + # check that its made against a bank or cash account + if gle["account"] in accounts and gle["against"] and \ + bc_regex.findall(gle["against"]): + val = gle["debit"] - gle["credit"] + total += (gle_field=="debit" and 1 or -1) * val + + return total, self.get_html(label, self.currency, fmt_money(total)) + + def get_invoiced_amount(self): + # aka receivables + return self.get_booked_total("Customer", "debit", self.meta.get_label("invoiced_amount")) + + def get_payables(self): + return self.get_booked_total("Supplier", "credit", self.meta.get_label("payables")) + + def get_booked_total(self, party_type, gle_field, label): + # account is of master_type Customer or Supplier + accounts = [a["name"] for a in self.get_accounts() + if a["master_type"]==party_type] + + total = 0 + for gle in self.get_gl_entries(self.from_date, self.to_date): + if gle["account"] in accounts: + total += gle[gle_field] + + return total, self.get_html(label, self.currency, fmt_money(total)) + + def get_new_leads(self): + return self.get_new_count("Lead", self.meta.get_label("new_leads")) + + def get_new_enquiries(self): + return self.get_new_count("Opportunity", self.meta.get_label("new_enquiries"), docstatus=1) + + def get_new_quotations(self): + return self.get_new_sum("Quotation", self.meta.get_label("new_quotations"), "grand_total") + + def get_new_sales_orders(self): + return self.get_new_sum("Sales Order", self.meta.get_label("new_sales_orders"), "grand_total") + + def get_new_delivery_notes(self): + return self.get_new_sum("Delivery Note", self.meta.get_label("new_delivery_notes"), "grand_total") + + def get_new_purchase_requests(self): + return self.get_new_count("Material Request", + self.meta.get_label("new_purchase_requests"), docstatus=1) + + def get_new_supplier_quotations(self): + return self.get_new_sum("Supplier Quotation", self.meta.get_label("new_supplier_quotations"), + "grand_total") + + def get_new_purchase_orders(self): + return self.get_new_sum("Purchase Order", self.meta.get_label("new_purchase_orders"), + "grand_total") + + def get_new_purchase_receipts(self): + return self.get_new_sum("Purchase Receipt", self.meta.get_label("new_purchase_receipts"), + "grand_total") + + def get_new_stock_entries(self): + return self.get_new_sum("Stock Entry", self.meta.get_label("new_stock_entries"), "total_amount") + + def get_new_support_tickets(self): + return self.get_new_count("Support Ticket", self.meta.get_label("new_support_tickets"), + filter_by_company=False) + + def get_new_communications(self): + return self.get_new_count("Communication", self.meta.get_label("new_communications"), + filter_by_company=False) + + def get_new_projects(self): + return self.get_new_count("Project", self.meta.get_label("new_projects"), + filter_by_company=False) + + def get_calendar_events(self, user_id): + from core.doctype.event.event import get_events + events = get_events(self.future_from_date.strftime("%Y-%m-%d"), self.future_to_date.strftime("%Y-%m-%d")) + + html = "" + if events: + for i, e in enumerate(events): + if i>=10: + break + if e.all_day: + html += """
  • %s [%s (%s)]
  • """ % \ + (e.subject, datetime_in_user_format(e.starts_on), _("All Day")) + else: + html += "
  • %s [%s - %s]
  • " % \ + (e.subject, datetime_in_user_format(e.starts_on), datetime_in_user_format(e.ends_on)) + + if html: + return 1, "

    Upcoming Calendar Events (max 10):

      " + html + "

    " + else: + return 0, "

    Calendar Events

    " + + def get_todo_list(self, user_id): + from core.page.todo.todo import get + todo_list = get() + + html = "" + if todo_list: + for i, todo in enumerate([todo for todo in todo_list if not todo.checked]): + if i>= 10: + break + if not todo.description and todo.reference_type: + todo.description = "%s: %s - %s %s" % \ + (todo.reference_type, get_url_to_form(todo.reference_type, todo.reference_name), + _("assigned by"), get_fullname(todo.assigned_by)) + + html += "
  • %s [%s]
  • " % (todo.description, todo.priority) + + if html: + return 1, "

    To Do (max 10):

      " + html + "

    " + else: + return 0, "

    To Do

    " + + def get_new_count(self, doctype, label, docstatus=0, filter_by_company=True): + if filter_by_company: + company = """and company="%s" """ % self.doc.company + else: + company = "" + count = webnotes.conn.sql("""select count(*) from `tab%s` + where docstatus=%s %s and + date(creation)>=%s and date(creation)<=%s""" % + (doctype, docstatus, company, "%s", "%s"), (self.from_date, self.to_date)) + count = count and count[0][0] or 0 + + return count, self.get_html(label, None, count) + + def get_new_sum(self, doctype, label, sum_field): + count_sum = webnotes.conn.sql("""select count(*), sum(ifnull(`%s`, 0)) + from `tab%s` where docstatus=1 and company = %s and + date(creation)>=%s and date(creation)<=%s""" % (sum_field, doctype, "%s", + "%s", "%s"), (self.doc.company, self.from_date, self.to_date)) + count, total = count_sum and count_sum[0] or (0, 0) + + return count, self.get_html(label, self.currency, + "%s - (%s)" % (fmt_money(total), cstr(count))) + + def get_html(self, label, currency, value, style=None): + """get html output""" + return row_template % { + "style": style or "", + "label": label, + "currency": currency and (currency+" ") or "", + "value": value + } + + def get_gl_entries(self, from_date=None, to_date=None): + """get valid GL Entries filtered by company and posting date""" + if from_date==self.from_date and to_date==self.to_date and \ + hasattr(self, "gl_entries"): + return self.gl_entries + + gl_entries = webnotes.conn.sql("""select `account`, + ifnull(credit, 0) as credit, ifnull(debit, 0) as debit, `against` + from `tabGL Entry` + where company=%s + and posting_date <= %s %s""" % ("%s", "%s", + from_date and "and posting_date>='%s'" % from_date or ""), + (self.doc.company, to_date or self.to_date), as_dict=1) + + # cache if it is the normal cases + if from_date==self.from_date and to_date==self.to_date: + self.gl_entries = gl_entries + + return gl_entries + + def get_accounts(self): + if not hasattr(self, "accounts"): + self.accounts = webnotes.conn.sql("""select name, is_pl_account, + debit_or_credit, account_type, account_name, master_type + from `tabAccount` where company=%s and docstatus < 2 + and group_or_ledger = "Ledger" order by lft""", + (self.doc.company,), as_dict=1) + return self.accounts + + def get_from_to_date(self): + today = now_datetime().date() + + # decide from date based on email digest frequency + if self.doc.frequency == "Daily": + # from date, to_date is yesterday + from_date = to_date = today - timedelta(days=1) + elif self.doc.frequency == "Weekly": + # from date is the previous week's monday + from_date = today - timedelta(days=today.weekday(), weeks=1) + # to date is sunday i.e. the previous day + to_date = from_date + timedelta(days=6) + else: + # from date is the 1st day of the previous month + from_date = today - relativedelta(days=today.day-1, months=1) + # to date is the last day of the previous month + to_date = today - relativedelta(days=today.day) + + return from_date, to_date + + def get_future_from_to_date(self): + today = now_datetime().date() + + # decide from date based on email digest frequency + if self.doc.frequency == "Daily": + # from date, to_date is today + from_date = to_date = today + elif self.doc.frequency == "Weekly": + # from date is the current week's monday + from_date = today - timedelta(days=today.weekday()) + # to date is the current week's sunday + to_date = from_date + timedelta(days=6) + else: + # from date is the 1st day of the current month + from_date = today - relativedelta(days=today.day-1) + # to date is the last day of the current month + to_date = from_date + relativedelta(days=-1, months=1) + + return from_date, to_date + + def get_next_sending(self): + from_date, to_date = self.get_from_to_date() + + send_date = to_date + timedelta(days=1) + + if self.doc.frequency == "Daily": + next_send_date = send_date + timedelta(days=1) + elif self.doc.frequency == "Weekly": + next_send_date = send_date + timedelta(weeks=1) + else: + next_send_date = send_date + relativedelta(months=1) + self.doc.next_send = formatdate(next_send_date) + " at midnight" + + return send_date + + def get_open_tickets(self): + open_tickets = webnotes.conn.sql("""select name, subject, modified, raised_by + from `tabSupport Ticket` where status='Open' + order by modified desc limit 10""", as_dict=True) + + if open_tickets: + return 1, """

    Latest Open Tickets (max 10):

    %s""" % \ + "".join(["

    %(name)s: %(subject)s
    by %(raised_by)s on %(modified)s

    " % \ + t for t in open_tickets]) + else: + return 0, "No Open Tickets!" + + def get_scheduler_errors(self): + import webnotes.utils.scheduler + return webnotes.utils.scheduler.get_error_report(self.from_date, self.to_date) + + def onload(self): + self.get_next_sending() + +def send(): + from webnotes.model.code import get_obj + from webnotes.utils import getdate + now_date = now_datetime().date() + + from webnotes import conf + if "expires_on" in conf and now_date > getdate(conf.expires_on): + # do not send email digests to expired accounts + return + + for ed in webnotes.conn.sql("""select name from `tabEmail Digest` + where enabled=1 and docstatus<2""", as_list=1): + ed_obj = get_obj('Email Digest', ed[0]) + if (now_date == ed_obj.get_next_sending()): + ed_obj.send() +from django.conf.urls import patterns +from django.contrib import messages +from django.core.urlresolvers import reverse +from django.http import HttpResponseRedirect, HttpResponse +from django.template import RequestContext, Template +from django.template.response import TemplateResponse +from django.views.decorators.cache import never_cache + +TEMPLATE = """{% if messages %} +
      + {% for message in messages %} + + {{ message }} + + {% endfor %} +
    +{% endif %} +""" + +@never_cache +def add(request, message_type): + # don't default to False here, because we want to test that it defaults + # to False if unspecified + fail_silently = request.POST.get('fail_silently', None) + for msg in request.POST.getlist('messages'): + if fail_silently is not None: + getattr(messages, message_type)(request, msg, + fail_silently=fail_silently) + else: + getattr(messages, message_type)(request, msg) + + show_url = reverse('django.contrib.messages.tests.urls.show') + return HttpResponseRedirect(show_url) + +@never_cache +def add_template_response(request, message_type): + for msg in request.POST.getlist('messages'): + getattr(messages, message_type)(request, msg) + + show_url = reverse('django.contrib.messages.tests.urls.show_template_response') + return HttpResponseRedirect(show_url) + +@never_cache +def show(request): + t = Template(TEMPLATE) + return HttpResponse(t.render(RequestContext(request))) + +@never_cache +def show_template_response(request): + return TemplateResponse(request, Template(TEMPLATE)) + +urlpatterns = patterns('', + ('^add/(debug|info|success|warning|error)/$', add), + ('^show/$', show), + ('^template_response/add/(debug|info|success|warning|error)/$', add_template_response), + ('^template_response/show/$', show_template_response), +) + +"""Get useful information from live Python objects. + +This module encapsulates the interface provided by the internal special +attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion. +It also provides some help for examining source code and class layout. + +Here are some of the useful functions provided by this module: + + ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(), + isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(), + isroutine() - check object types + getmembers() - get members of an object that satisfy a given condition + + getfile(), getsourcefile(), getsource() - find an object's source code + getdoc(), getcomments() - get documentation on an object + getmodule() - determine the module that an object came from + getclasstree() - arrange classes so as to represent their hierarchy + + getargspec(), getargvalues(), getcallargs() - get info about function arguments + getfullargspec() - same, with support for Python-3000 features + formatargspec(), formatargvalues() - format an argument spec + getouterframes(), getinnerframes() - get info about frames + currentframe() - get the current stack frame + stack(), trace() - get info about frames on the stack or in a traceback + + signature() - get a Signature object for the callable +""" + +# This module is in the public domain. No warranties. + +__author__ = ('Ka-Ping Yee ', + 'Yury Selivanov ') + +import imp +import importlib.machinery +import itertools +import linecache +import os +import re +import sys +import tokenize +import types +import warnings +import functools +import builtins +from operator import attrgetter +from collections import namedtuple, OrderedDict + +# Create constants for the compiler flags in Include/code.h +# We try to get them from dis to avoid duplication, but fall +# back to hardcoding so the dependency is optional +try: + from dis import COMPILER_FLAG_NAMES as _flag_names +except ImportError: + CO_OPTIMIZED, CO_NEWLOCALS = 0x1, 0x2 + CO_VARARGS, CO_VARKEYWORDS = 0x4, 0x8 + CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40 +else: + mod_dict = globals() + for k, v in _flag_names.items(): + mod_dict["CO_" + v] = k + +# See Include/object.h +TPFLAGS_IS_ABSTRACT = 1 << 20 + +# ----------------------------------------------------------- type-checking +def ismodule(object): + """Return true if the object is a module. + + Module objects provide these attributes: + __cached__ pathname to byte compiled file + __doc__ documentation string + __file__ filename (missing for built-in modules)""" + return isinstance(object, types.ModuleType) + +def isclass(object): + """Return true if the object is a class. + + Class objects provide these attributes: + __doc__ documentation string + __module__ name of module in which this class was defined""" + return isinstance(object, type) + +def ismethod(object): + """Return true if the object is an instance method. + + Instance method objects provide these attributes: + __doc__ documentation string + __name__ name with which this method was defined + __func__ function object containing implementation of method + __self__ instance to which this method is bound""" + return isinstance(object, types.MethodType) + +def ismethoddescriptor(object): + """Return true if the object is a method descriptor. + + But not if ismethod() or isclass() or isfunction() are true. + + This is new in Python 2.2, and, for example, is true of int.__add__. + An object passing this test has a __get__ attribute but not a __set__ + attribute, but beyond that the set of attributes varies. __name__ is + usually sensible, and __doc__ often is. + + Methods implemented via descriptors that also pass one of the other + tests return false from the ismethoddescriptor() test, simply because + the other tests promise more -- you can, e.g., count on having the + __func__ attribute (etc) when an object passes ismethod().""" + if isclass(object) or ismethod(object) or isfunction(object): + # mutual exclusion + return False + tp = type(object) + return hasattr(tp, "__get__") and not hasattr(tp, "__set__") + +def isdatadescriptor(object): + """Return true if the object is a data descriptor. + + Data descriptors have both a __get__ and a __set__ attribute. Examples are + properties (defined in Python) and getsets and members (defined in C). + Typically, data descriptors will also have __name__ and __doc__ attributes + (properties, getsets, and members have both of these attributes), but this + is not guaranteed.""" + if isclass(object) or ismethod(object) or isfunction(object): + # mutual exclusion + return False + tp = type(object) + return hasattr(tp, "__set__") and hasattr(tp, "__get__") + +if hasattr(types, 'MemberDescriptorType'): + # CPython and equivalent + def ismemberdescriptor(object): + """Return true if the object is a member descriptor. + + Member descriptors are specialized descriptors defined in extension + modules.""" + return isinstance(object, types.MemberDescriptorType) +else: + # Other implementations + def ismemberdescriptor(object): + """Return true if the object is a member descriptor. + + Member descriptors are specialized descriptors defined in extension + modules.""" + return False + +if hasattr(types, 'GetSetDescriptorType'): + # CPython and equivalent + def isgetsetdescriptor(object): + """Return true if the object is a getset descriptor. + + getset descriptors are specialized descriptors defined in extension + modules.""" + return isinstance(object, types.GetSetDescriptorType) +else: + # Other implementations + def isgetsetdescriptor(object): + """Return true if the object is a getset descriptor. + + getset descriptors are specialized descriptors defined in extension + modules.""" + return False + +def isfunction(object): + """Return true if the object is a user-defined function. + + Function objects provide these attributes: + __doc__ documentation string + __name__ name with which this function was defined + __code__ code object containing compiled function bytecode + __defaults__ tuple of any default values for arguments + __globals__ global namespace in which this function was defined + __annotations__ dict of parameter annotations + __kwdefaults__ dict of keyword only parameters with defaults""" + return isinstance(object, types.FunctionType) + +def isgeneratorfunction(object): + """Return true if the object is a user-defined generator function. + + Generator function objects provides same attributes as functions. + + See help(isfunction) for attributes listing.""" + return bool((isfunction(object) or ismethod(object)) and + object.__code__.co_flags & CO_GENERATOR) + +def isgenerator(object): + """Return true if the object is a generator. + + Generator objects provide these attributes: + __iter__ defined to support iteration over container + close raises a new GeneratorExit exception inside the + generator to terminate the iteration + gi_code code object + gi_frame frame object or possibly None once the generator has + been exhausted + gi_running set to 1 when generator is executing, 0 otherwise + next return the next item from the container + send resumes the generator and "sends" a value that becomes + the result of the current yield-expression + throw used to raise an exception inside the generator""" + return isinstance(object, types.GeneratorType) + +def istraceback(object): + """Return true if the object is a traceback. + + Traceback objects provide these attributes: + tb_frame frame object at this level + tb_lasti index of last attempted instruction in bytecode + tb_lineno current line number in Python source code + tb_next next inner traceback object (called by this level)""" + return isinstance(object, types.TracebackType) + +def isframe(object): + """Return true if the object is a frame object. + + Frame objects provide these attributes: + f_back next outer frame object (this frame's caller) + f_builtins built-in namespace seen by this frame + f_code code object being executed in this frame + f_globals global namespace seen by this frame + f_lasti index of last attempted instruction in bytecode + f_lineno current line number in Python source code + f_locals local namespace seen by this frame + f_trace tracing function for this frame, or None""" + return isinstance(object, types.FrameType) + +def iscode(object): + """Return true if the object is a code object. + + Code objects provide these attributes: + co_argcount number of arguments (not including * or ** args) + co_code string of raw compiled bytecode + co_consts tuple of constants used in the bytecode + co_filename name of file in which this code object was created + co_firstlineno number of first line in Python source code + co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg + co_lnotab encoded mapping of line numbers to bytecode indices + co_name name with which this code object was defined + co_names tuple of names of local variables + co_nlocals number of local variables + co_stacksize virtual machine stack space required + co_varnames tuple of names of arguments and local variables""" + return isinstance(object, types.CodeType) + +def isbuiltin(object): + """Return true if the object is a built-in function or method. + + Built-in functions and methods provide these attributes: + __doc__ documentation string + __name__ original name of this function or method + __self__ instance to which a method is bound, or None""" + return isinstance(object, types.BuiltinFunctionType) + +def isroutine(object): + """Return true if the object is any kind of function or method.""" + return (isbuiltin(object) + or isfunction(object) + or ismethod(object) + or ismethoddescriptor(object)) + +def isabstract(object): + """Return true if the object is an abstract base class (ABC).""" + return bool(isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT) + +def getmembers(object, predicate=None): + """Return all members of an object as (name, value) pairs sorted by name. + Optionally, only return members that satisfy a given predicate.""" + if isclass(object): + mro = (object,) + getmro(object) + else: + mro = () + results = [] + for key in dir(object): + # First try to get the value via __dict__. Some descriptors don't + # like calling their __get__ (see bug #1785). + for base in mro: + if key in base.__dict__: + value = base.__dict__[key] + break + else: + try: + value = getattr(object, key) + except AttributeError: + continue + if not predicate or predicate(value): + results.append((key, value)) + results.sort() + return results + +Attribute = namedtuple('Attribute', 'name kind defining_class object') + +def classify_class_attrs(cls): + """Return list of attribute-descriptor tuples. + + For each name in dir(cls), the return list contains a 4-tuple + with these elements: + + 0. The name (a string). + + 1. The kind of attribute this is, one of these strings: + 'class method' created via classmethod() + 'static method' created via staticmethod() + 'property' created via property() + 'method' any other flavor of method + 'data' not a method + + 2. The class which defined this attribute (a class). + + 3. The object as obtained directly from the defining class's + __dict__, not via getattr. This is especially important for + data attributes: C.data is just a data object, but + C.__dict__['data'] may be a data descriptor with additional + info, like a __doc__ string. + """ + + mro = getmro(cls) + names = dir(cls) + result = [] + for name in names: + # Get the object associated with the name, and where it was defined. + # Getting an obj from the __dict__ sometimes reveals more than + # using getattr. Static and class methods are dramatic examples. + # Furthermore, some objects may raise an Exception when fetched with + # getattr(). This is the case with some descriptors (bug #1785). + # Thus, we only use getattr() as a last resort. + homecls = None + for base in (cls,) + mro: + if name in base.__dict__: + obj = base.__dict__[name] + homecls = base + break + else: + obj = getattr(cls, name) + homecls = getattr(obj, "__objclass__", homecls) + + # Classify the object. + if isinstance(obj, staticmethod): + kind = "static method" + elif isinstance(obj, classmethod): + kind = "class method" + elif isinstance(obj, property): + kind = "property" + elif ismethoddescriptor(obj): + kind = "method" + elif isdatadescriptor(obj): + kind = "data" + else: + obj_via_getattr = getattr(cls, name) + if (isfunction(obj_via_getattr) or + ismethoddescriptor(obj_via_getattr)): + kind = "method" + else: + kind = "data" + obj = obj_via_getattr + + result.append(Attribute(name, kind, homecls, obj)) + + return result + +# ----------------------------------------------------------- class helpers + +def getmro(cls): + "Return tuple of base classes (including cls) in method resolution order." + return cls.__mro__ + +# -------------------------------------------------- source code extraction +def indentsize(line): + """Return the indent size, in spaces, at the start of a line of text.""" + expline = line.expandtabs() + return len(expline) - len(expline.lstrip()) + +def getdoc(object): + """Get the documentation string for an object. + + All tabs are expanded to spaces. To clean up docstrings that are + indented to line up with blocks of code, any whitespace than can be + uniformly removed from the second line onwards is removed.""" + try: + doc = object.__doc__ + except AttributeError: + return None + if not isinstance(doc, str): + return None + return cleandoc(doc) + +def cleandoc(doc): + """Clean up indentation from docstrings. + + Any whitespace that can be uniformly removed from the second line + onwards is removed.""" + try: + lines = doc.expandtabs().split('\n') + except UnicodeError: + return None + else: + # Find minimum indentation of any non-blank lines after first line. + margin = sys.maxsize + for line in lines[1:]: + content = len(line.lstrip()) + if content: + indent = len(line) - content + margin = min(margin, indent) + # Remove indentation. + if lines: + lines[0] = lines[0].lstrip() + if margin < sys.maxsize: + for i in range(1, len(lines)): lines[i] = lines[i][margin:] + # Remove any trailing or leading blank lines. + while lines and not lines[-1]: + lines.pop() + while lines and not lines[0]: + lines.pop(0) + return '\n'.join(lines) + +def getfile(object): + """Work out which source or compiled file an object was defined in.""" + if ismodule(object): + if hasattr(object, '__file__'): + return object.__file__ + raise TypeError('{!r} is a built-in module'.format(object)) + if isclass(object): + object = sys.modules.get(object.__module__) + if hasattr(object, '__file__'): + return object.__file__ + raise TypeError('{!r} is a built-in class'.format(object)) + if ismethod(object): + object = object.__func__ + if isfunction(object): + object = object.__code__ + if istraceback(object): + object = object.tb_frame + if isframe(object): + object = object.f_code + if iscode(object): + return object.co_filename + raise TypeError('{!r} is not a module, class, method, ' + 'function, traceback, frame, or code object'.format(object)) + +ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type') + +def getmoduleinfo(path): + """Get the module name, suffix, mode, and module type for a given file.""" + warnings.warn('inspect.getmoduleinfo() is deprecated', DeprecationWarning, + 2) + filename = os.path.basename(path) + suffixes = [(-len(suffix), suffix, mode, mtype) + for suffix, mode, mtype in imp.get_suffixes()] + suffixes.sort() # try longest suffixes first, in case they overlap + for neglen, suffix, mode, mtype in suffixes: + if filename[neglen:] == suffix: + return ModuleInfo(filename[:neglen], suffix, mode, mtype) + +def getmodulename(path): + """Return the module name for a given file, or None.""" + fname = os.path.basename(path) + # Check for paths that look like an actual module file + suffixes = [(-len(suffix), suffix) + for suffix in importlib.machinery.all_suffixes()] + suffixes.sort() # try longest suffixes first, in case they overlap + for neglen, suffix in suffixes: + if fname.endswith(suffix): + return fname[:neglen] + return None + +def getsourcefile(object): + """Return the filename that can be used to locate an object's source. + Return None if no way can be identified to get the source. + """ + filename = getfile(object) + all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:] + all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:] + if any(filename.endswith(s) for s in all_bytecode_suffixes): + filename = (os.path.splitext(filename)[0] + + importlib.machinery.SOURCE_SUFFIXES[0]) + elif any(filename.endswith(s) for s in + importlib.machinery.EXTENSION_SUFFIXES): + return None + if os.path.exists(filename): + return filename + # only return a non-existent filename if the module has a PEP 302 loader + if hasattr(getmodule(object, filename), '__loader__'): + return filename + # or it is in the linecache + if filename in linecache.cache: + return filename + +def getabsfile(object, _filename=None): + """Return an absolute path to the source or compiled file for an object. + + The idea is for each object to have a unique origin, so this routine + normalizes the result as much as possible.""" + if _filename is None: + _filename = getsourcefile(object) or getfile(object) + return os.path.normcase(os.path.abspath(_filename)) + +modulesbyfile = {} +_filesbymodname = {} + +def getmodule(object, _filename=None): + """Return the module an object was defined in, or None if not found.""" + if ismodule(object): + return object + if hasattr(object, '__module__'): + return sys.modules.get(object.__module__) + # Try the filename to modulename cache + if _filename is not None and _filename in modulesbyfile: + return sys.modules.get(modulesbyfile[_filename]) + # Try the cache again with the absolute file name + try: + file = getabsfile(object, _filename) + except TypeError: + return None + if file in modulesbyfile: + return sys.modules.get(modulesbyfile[file]) + # Update the filename to module name cache and check yet again + # Copy sys.modules in order to cope with changes while iterating + for modname, module in list(sys.modules.items()): + if ismodule(module) and hasattr(module, '__file__'): + f = module.__file__ + if f == _filesbymodname.get(modname, None): + # Have already mapped this module, so skip it + continue + _filesbymodname[modname] = f + f = getabsfile(module) + # Always map to the name the module knows itself by + modulesbyfile[f] = modulesbyfile[ + os.path.realpath(f)] = module.__name__ + if file in modulesbyfile: + return sys.modules.get(modulesbyfile[file]) + # Check the main module + main = sys.modules['__main__'] + if not hasattr(object, '__name__'): + return None + if hasattr(main, object.__name__): + mainobject = getattr(main, object.__name__) + if mainobject is object: + return main + # Check builtins + builtin = sys.modules['builtins'] + if hasattr(builtin, object.__name__): + builtinobject = getattr(builtin, object.__name__) + if builtinobject is object: + return builtin + +def findsource(object): + """Return the entire source file and starting line number for an object. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a list of all the lines + in the file and the line number indexes a line in that list. An IOError + is raised if the source code cannot be retrieved.""" + + file = getfile(object) + sourcefile = getsourcefile(object) + if not sourcefile and file[:1] + file[-1:] != '<>': + raise IOError('source code not available') + file = sourcefile if sourcefile else file + + module = getmodule(object, file) + if module: + lines = linecache.getlines(file, module.__dict__) + else: + lines = linecache.getlines(file) + if not lines: + raise IOError('could not get source code') + + if ismodule(object): + return lines, 0 + + if isclass(object): + name = object.__name__ + pat = re.compile(r'^(\s*)class\s*' + name + r'\b') + # make some effort to find the best matching class definition: + # use the one with the least indentation, which is the one + # that's most probably not inside a function definition. + candidates = [] + for i in range(len(lines)): + match = pat.match(lines[i]) + if match: + # if it's at toplevel, it's already the best one + if lines[i][0] == 'c': + return lines, i + # else add whitespace to candidate list + candidates.append((match.group(1), i)) + if candidates: + # this will sort by whitespace, and by line number, + # less whitespace first + candidates.sort() + return lines, candidates[0][1] + else: + raise IOError('could not find class definition') + + if ismethod(object): + object = object.__func__ + if isfunction(object): + object = object.__code__ + if istraceback(object): + object = object.tb_frame + if isframe(object): + object = object.f_code + if iscode(object): + if not hasattr(object, 'co_firstlineno'): + raise IOError('could not find function definition') + lnum = object.co_firstlineno - 1 + pat = re.compile(r'^(\s*def\s)|(.*(? 0: + if pat.match(lines[lnum]): break + lnum = lnum - 1 + return lines, lnum + raise IOError('could not find code object') + +def getcomments(object): + """Get lines of comments immediately preceding an object's source code. + + Returns None when source can't be found. + """ + try: + lines, lnum = findsource(object) + except (IOError, TypeError): + return None + + if ismodule(object): + # Look for a comment block at the top of the file. + start = 0 + if lines and lines[0][:2] == '#!': start = 1 + while start < len(lines) and lines[start].strip() in ('', '#'): + start = start + 1 + if start < len(lines) and lines[start][:1] == '#': + comments = [] + end = start + while end < len(lines) and lines[end][:1] == '#': + comments.append(lines[end].expandtabs()) + end = end + 1 + return ''.join(comments) + + # Look for a preceding block of comments at the same indentation. + elif lnum > 0: + indent = indentsize(lines[lnum]) + end = lnum - 1 + if end >= 0 and lines[end].lstrip()[:1] == '#' and \ + indentsize(lines[end]) == indent: + comments = [lines[end].expandtabs().lstrip()] + if end > 0: + end = end - 1 + comment = lines[end].expandtabs().lstrip() + while comment[:1] == '#' and indentsize(lines[end]) == indent: + comments[:0] = [comment] + end = end - 1 + if end < 0: break + comment = lines[end].expandtabs().lstrip() + while comments and comments[0].strip() == '#': + comments[:1] = [] + while comments and comments[-1].strip() == '#': + comments[-1:] = [] + return ''.join(comments) + +class EndOfBlock(Exception): pass + +class BlockFinder: + """Provide a tokeneater() method to detect the end of a code block.""" + def __init__(self): + self.indent = 0 + self.islambda = False + self.started = False + self.passline = False + self.last = 1 + + def tokeneater(self, type, token, srowcol, erowcol, line): + if not self.started: + # look for the first "def", "class" or "lambda" + if token in ("def", "class", "lambda"): + if token == "lambda": + self.islambda = True + self.started = True + self.passline = True # skip to the end of the line + elif type == tokenize.NEWLINE: + self.passline = False # stop skipping when a NEWLINE is seen + self.last = srowcol[0] + if self.islambda: # lambdas always end at the first NEWLINE + raise EndOfBlock + elif self.passline: + pass + elif type == tokenize.INDENT: + self.indent = self.indent + 1 + self.passline = True + elif type == tokenize.DEDENT: + self.indent = self.indent - 1 + # the end of matching indent/dedent pairs end a block + # (note that this only works for "def"/"class" blocks, + # not e.g. for "if: else:" or "try: finally:" blocks) + if self.indent <= 0: + raise EndOfBlock + elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): + # any other token on the same indentation level end the previous + # block as well, except the pseudo-tokens COMMENT and NL. + raise EndOfBlock + +def getblock(lines): + """Extract the block of code at the top of the given list of lines.""" + blockfinder = BlockFinder() + try: + tokens = tokenize.generate_tokens(iter(lines).__next__) + for _token in tokens: + blockfinder.tokeneater(*_token) + except (EndOfBlock, IndentationError): + pass + return lines[:blockfinder.last] + +def getsourcelines(object): + """Return a list of source lines and starting line number for an object. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a list of the lines + corresponding to the object and the line number indicates where in the + original source file the first line of code was found. An IOError is + raised if the source code cannot be retrieved.""" + lines, lnum = findsource(object) + + if ismodule(object): return lines, 0 + else: return getblock(lines[lnum:]), lnum + 1 + +def getsource(object): + """Return the text of the source code for an object. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a single string. An + IOError is raised if the source code cannot be retrieved.""" + lines, lnum = getsourcelines(object) + return ''.join(lines) + +# --------------------------------------------------- class tree extraction +def walktree(classes, children, parent): + """Recursive helper function for getclasstree().""" + results = [] + classes.sort(key=attrgetter('__module__', '__name__')) + for c in classes: + results.append((c, c.__bases__)) + if c in children: + results.append(walktree(children[c], children, c)) + return results + +def getclasstree(classes, unique=False): + """Arrange the given list of classes into a hierarchy of nested lists. + + Where a nested list appears, it contains classes derived from the class + whose entry immediately precedes the list. Each entry is a 2-tuple + containing a class and a tuple of its base classes. If the 'unique' + argument is true, exactly one entry appears in the returned structure + for each class in the given list. Otherwise, classes using multiple + inheritance and their descendants will appear multiple times.""" + children = {} + roots = [] + for c in classes: + if c.__bases__: + for parent in c.__bases__: + if not parent in children: + children[parent] = [] + if c not in children[parent]: + children[parent].append(c) + if unique and parent in classes: break + elif c not in roots: + roots.append(c) + for parent in children: + if parent not in classes: + roots.append(parent) + return walktree(roots, children, None) + +# ------------------------------------------------ argument list extraction +Arguments = namedtuple('Arguments', 'args, varargs, varkw') + +def getargs(co): + """Get information about the arguments accepted by a code object. + + Three things are returned: (args, varargs, varkw), where + 'args' is the list of argument names. Keyword-only arguments are + appended. 'varargs' and 'varkw' are the names of the * and ** + arguments or None.""" + args, varargs, kwonlyargs, varkw = _getfullargs(co) + return Arguments(args + kwonlyargs, varargs, varkw) + +def _getfullargs(co): + """Get information about the arguments accepted by a code object. + + Four things are returned: (args, varargs, kwonlyargs, varkw), where + 'args' and 'kwonlyargs' are lists of argument names, and 'varargs' + and 'varkw' are the names of the * and ** arguments or None.""" + + if not iscode(co): + raise TypeError('{!r} is not a code object'.format(co)) + + nargs = co.co_argcount + names = co.co_varnames + nkwargs = co.co_kwonlyargcount + args = list(names[:nargs]) + kwonlyargs = list(names[nargs:nargs+nkwargs]) + step = 0 + + nargs += nkwargs + varargs = None + if co.co_flags & CO_VARARGS: + varargs = co.co_varnames[nargs] + nargs = nargs + 1 + varkw = None + if co.co_flags & CO_VARKEYWORDS: + varkw = co.co_varnames[nargs] + return args, varargs, kwonlyargs, varkw + + +ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults') + +def getargspec(func): + """Get the names and default values of a function's arguments. + + A tuple of four things is returned: (args, varargs, varkw, defaults). + 'args' is a list of the argument names. + 'args' will include keyword-only argument names. + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'defaults' is an n-tuple of the default values of the last n arguments. + + Use the getfullargspec() API for Python-3000 code, as annotations + and keyword arguments are supported. getargspec() will raise ValueError + if the func has either annotations or keyword arguments. + """ + + args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \ + getfullargspec(func) + if kwonlyargs or ann: + raise ValueError("Function has keyword-only arguments or annotations" + ", use getfullargspec() API which can support them") + return ArgSpec(args, varargs, varkw, defaults) + +FullArgSpec = namedtuple('FullArgSpec', + 'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations') + +def getfullargspec(func): + """Get the names and default values of a function's arguments. + + A tuple of seven things is returned: + (args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults annotations). + 'args' is a list of the argument names. + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'defaults' is an n-tuple of the default values of the last n arguments. + 'kwonlyargs' is a list of keyword-only argument names. + 'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults. + 'annotations' is a dictionary mapping argument names to annotations. + + The first four items in the tuple correspond to getargspec(). + """ + + if ismethod(func): + func = func.__func__ + if not isfunction(func): + raise TypeError('{!r} is not a Python function'.format(func)) + args, varargs, kwonlyargs, varkw = _getfullargs(func.__code__) + return FullArgSpec(args, varargs, varkw, func.__defaults__, + kwonlyargs, func.__kwdefaults__, func.__annotations__) + +ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals') + +def getargvalues(frame): + """Get information about arguments passed into a particular frame. + + A tuple of four things is returned: (args, varargs, varkw, locals). + 'args' is a list of the argument names. + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'locals' is the locals dictionary of the given frame.""" + args, varargs, varkw = getargs(frame.f_code) + return ArgInfo(args, varargs, varkw, frame.f_locals) + +def formatannotation(annotation, base_module=None): + if isinstance(annotation, type): + if annotation.__module__ in ('builtins', base_module): + return annotation.__name__ + return annotation.__module__+'.'+annotation.__name__ + return repr(annotation) + +def formatannotationrelativeto(object): + module = getattr(object, '__module__', None) + def _formatannotation(annotation): + return formatannotation(annotation, module) + return _formatannotation + +#brython fix me +def formatargspec(args, varargs=None, varkw=None, defaults=None, + kwonlyargs=(), kwonlydefaults={}, annotations={}, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value), + formatreturns=lambda text: ' -> ' + text, + formatannotation=formatannotation): + """Format an argument spec from the values returned by getargspec + or getfullargspec. + + The first seven arguments are (args, varargs, varkw, defaults, + kwonlyargs, kwonlydefaults, annotations). The other five arguments + are the corresponding optional formatting functions that are called to + turn names and values into strings. The last argument is an optional + function to format the sequence of arguments.""" + def formatargandannotation(arg): + result = formatarg(arg) + if arg in annotations: + result += ': ' + formatannotation(annotations[arg]) + return result + specs = [] + if defaults: + firstdefault = len(args) - len(defaults) + for i, arg in enumerate(args): + spec = formatargandannotation(arg) + if defaults and i >= firstdefault: + spec = spec + formatvalue(defaults[i - firstdefault]) + specs.append(spec) + if varargs is not None: + specs.append(formatvarargs(formatargandannotation(varargs))) + else: + if kwonlyargs: + specs.append('*') + if kwonlyargs: + for kwonlyarg in kwonlyargs: + spec = formatargandannotation(kwonlyarg) + if kwonlydefaults and kwonlyarg in kwonlydefaults: + spec += formatvalue(kwonlydefaults[kwonlyarg]) + specs.append(spec) + if varkw is not None: + specs.append(formatvarkw(formatargandannotation(varkw))) + result = '(' + ', '.join(specs) + ')' + if 'return' in annotations: + result += formatreturns(formatannotation(annotations['return'])) + return result + +#brython fix me +#def formatargvalues(args, varargs, varkw, locals, +# formatarg=str, +# formatvarargs=lambda name: '*' + name, +# formatvarkw=lambda name: '**' + name, +# formatvalue=lambda value: '=' + repr(value)): +# """Format an argument spec from the 4 values returned by getargvalues. + +# The first four arguments are (args, varargs, varkw, locals). The +# next four arguments are the corresponding optional formatting functions +# that are called to turn names and values into strings. The ninth +# argument is an optional function to format the sequence of arguments.""" +# def convert(name, locals=locals, +# formatarg=formatarg, formatvalue=formatvalue): +# return formatarg(name) + formatvalue(locals[name]) +# specs = [] +# for i in range(len(args)): +# specs.append(convert(args[i])) +# if varargs: +# specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) +# if varkw: +# specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) +# return '(' + ', '.join(specs) + ')' + +def _missing_arguments(f_name, argnames, pos, values): + names = [repr(name) for name in argnames if name not in values] + missing = len(names) + if missing == 1: + s = names[0] + elif missing == 2: + s = "{} and {}".format(*names) + else: + tail = ", {} and {}".format(names[-2:]) + del names[-2:] + s = ", ".join(names) + tail + raise TypeError("%s() missing %i required %s argument%s: %s" % + (f_name, missing, + "positional" if pos else "keyword-only", + "" if missing == 1 else "s", s)) + +def _too_many(f_name, args, kwonly, varargs, defcount, given, values): + atleast = len(args) - defcount + kwonly_given = len([arg for arg in kwonly if arg in values]) + if varargs: + plural = atleast != 1 + sig = "at least %d" % (atleast,) + elif defcount: + plural = True + sig = "from %d to %d" % (atleast, len(args)) + else: + plural = len(args) != 1 + sig = str(len(args)) + kwonly_sig = "" + if kwonly_given: + msg = " positional argument%s (and %d keyword-only argument%s)" + kwonly_sig = (msg % ("s" if given != 1 else "", kwonly_given, + "s" if kwonly_given != 1 else "")) + raise TypeError("%s() takes %s positional argument%s but %d%s %s given" % + (f_name, sig, "s" if plural else "", given, kwonly_sig, + "was" if given == 1 and not kwonly_given else "were")) + +def getcallargs(func, *positional, **named): + """Get the mapping of arguments to values. + + A dict is returned, with keys the function argument names (including the + names of the * and ** arguments, if any), and values the respective bound + values from 'positional' and 'named'.""" + spec = getfullargspec(func) + args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec + f_name = func.__name__ + arg2value = {} + + + if ismethod(func) and func.__self__ is not None: + # implicit 'self' (or 'cls' for classmethods) argument + positional = (func.__self__,) + positional + num_pos = len(positional) + num_args = len(args) + num_defaults = len(defaults) if defaults else 0 + + n = min(num_pos, num_args) + for i in range(n): + arg2value[args[i]] = positional[i] + if varargs: + arg2value[varargs] = tuple(positional[n:]) + possible_kwargs = set(args + kwonlyargs) + if varkw: + arg2value[varkw] = {} + for kw, value in named.items(): + if kw not in possible_kwargs: + if not varkw: + raise TypeError("%s() got an unexpected keyword argument %r" % + (f_name, kw)) + arg2value[varkw][kw] = value + continue + if kw in arg2value: + raise TypeError("%s() got multiple values for argument %r" % + (f_name, kw)) + arg2value[kw] = value + if num_pos > num_args and not varargs: + _too_many(f_name, args, kwonlyargs, varargs, num_defaults, + num_pos, arg2value) + if num_pos < num_args: + req = args[:num_args - num_defaults] + for arg in req: + if arg not in arg2value: + _missing_arguments(f_name, req, True, arg2value) + for i, arg in enumerate(args[num_args - num_defaults:]): + if arg not in arg2value: + arg2value[arg] = defaults[i] + missing = 0 + for kwarg in kwonlyargs: + if kwarg not in arg2value: + if kwarg in kwonlydefaults: + arg2value[kwarg] = kwonlydefaults[kwarg] + else: + missing += 1 + if missing: + _missing_arguments(f_name, kwonlyargs, False, arg2value) + return arg2value + +ClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound') + +def getclosurevars(func): + """ + Get the mapping of free variables to their current values. + + Returns a named tuple of dicts mapping the current nonlocal, global + and builtin references as seen by the body of the function. A final + set of unbound names that could not be resolved is also provided. + """ + + if ismethod(func): + func = func.__func__ + + if not isfunction(func): + raise TypeError("'{!r}' is not a Python function".format(func)) + + code = func.__code__ + # Nonlocal references are named in co_freevars and resolved + # by looking them up in __closure__ by positional index + if func.__closure__ is None: + nonlocal_vars = {} + else: + nonlocal_vars = { + var : cell.cell_contents + for var, cell in zip(code.co_freevars, func.__closure__) + } + + # Global and builtin references are named in co_names and resolved + # by looking them up in __globals__ or __builtins__ + global_ns = func.__globals__ + builtin_ns = global_ns.get("__builtins__", builtins.__dict__) + if ismodule(builtin_ns): + builtin_ns = builtin_ns.__dict__ + global_vars = {} + builtin_vars = {} + unbound_names = set() + for name in code.co_names: + if name in ("None", "True", "False"): + # Because these used to be builtins instead of keywords, they + # may still show up as name references. We ignore them. + continue + try: + global_vars[name] = global_ns[name] + except KeyError: + try: + builtin_vars[name] = builtin_ns[name] + except KeyError: + unbound_names.add(name) + + return ClosureVars(nonlocal_vars, global_vars, + builtin_vars, unbound_names) + +# -------------------------------------------------- stack frame extraction + +Traceback = namedtuple('Traceback', 'filename lineno function code_context index') + +def getframeinfo(frame, context=1): + """Get information about a frame or traceback object. + + A tuple of five things is returned: the filename, the line number of + the current line, the function name, a list of lines of context from + the source code, and the index of the current line within that list. + The optional second argument specifies the number of lines of context + to return, which are centered around the current line.""" + if istraceback(frame): + lineno = frame.tb_lineno + frame = frame.tb_frame + else: + lineno = frame.f_lineno + if not isframe(frame): + raise TypeError('{!r} is not a frame or traceback object'.format(frame)) + + filename = getsourcefile(frame) or getfile(frame) + if context > 0: + start = lineno - 1 - context//2 + try: + lines, lnum = findsource(frame) + except IOError: + lines = index = None + else: + start = max(start, 1) + start = max(0, min(start, len(lines) - context)) + lines = lines[start:start+context] + index = lineno - 1 - start + else: + lines = index = None + + return Traceback(filename, lineno, frame.f_code.co_name, lines, index) + +def getlineno(frame): + """Get the line number from a frame object, allowing for optimization.""" + # FrameType.f_lineno is now a descriptor that grovels co_lnotab + return frame.f_lineno + +def getouterframes(frame, context=1): + """Get a list of records for a frame and all higher (calling) frames. + + Each record contains a frame object, filename, line number, function + name, a list of lines of context, and index within the context.""" + framelist = [] + while frame: + framelist.append((frame,) + getframeinfo(frame, context)) + frame = frame.f_back + return framelist + +def getinnerframes(tb, context=1): + """Get a list of records for a traceback's frame and all lower frames. + + Each record contains a frame object, filename, line number, function + name, a list of lines of context, and index within the context.""" + framelist = [] + while tb: + framelist.append((tb.tb_frame,) + getframeinfo(tb, context)) + tb = tb.tb_next + return framelist + +def currentframe(): + """Return the frame of the caller or None if this is not possible.""" + return sys._getframe(1) if hasattr(sys, "_getframe") else None + +def stack(context=1): + """Return a list of records for the stack above the caller's frame.""" + return getouterframes(sys._getframe(1), context) + +def trace(context=1): + """Return a list of records for the stack below the current exception.""" + return getinnerframes(sys.exc_info()[2], context) + + +# ------------------------------------------------ static version of getattr + +_sentinel = object() + +def _static_getmro(klass): + return type.__dict__['__mro__'].__get__(klass) + +def _check_instance(obj, attr): + instance_dict = {} + try: + instance_dict = object.__getattribute__(obj, "__dict__") + except AttributeError: + pass + return dict.get(instance_dict, attr, _sentinel) + + +def _check_class(klass, attr): + for entry in _static_getmro(klass): + if _shadowed_dict(type(entry)) is _sentinel: + try: + return entry.__dict__[attr] + except KeyError: + pass + return _sentinel + +def _is_type(obj): + try: + _static_getmro(obj) + except TypeError: + return False + return True + +def _shadowed_dict(klass): + dict_attr = type.__dict__["__dict__"] + for entry in _static_getmro(klass): + try: + class_dict = dict_attr.__get__(entry)["__dict__"] + except KeyError: + pass + else: + if not (type(class_dict) is types.GetSetDescriptorType and + class_dict.__name__ == "__dict__" and + class_dict.__objclass__ is entry): + return class_dict + return _sentinel + +def getattr_static(obj, attr, default=_sentinel): + """Retrieve attributes without triggering dynamic lookup via the + descriptor protocol, __getattr__ or __getattribute__. + + Note: this function may not be able to retrieve all attributes + that getattr can fetch (like dynamically created attributes) + and may find attributes that getattr can't (like descriptors + that raise AttributeError). It can also return descriptor objects + instead of instance members in some cases. See the + documentation for details. + """ + instance_result = _sentinel + if not _is_type(obj): + klass = type(obj) + dict_attr = _shadowed_dict(klass) + if (dict_attr is _sentinel or + type(dict_attr) is types.MemberDescriptorType): + instance_result = _check_instance(obj, attr) + else: + klass = obj + + klass_result = _check_class(klass, attr) + + if instance_result is not _sentinel and klass_result is not _sentinel: + if (_check_class(type(klass_result), '__get__') is not _sentinel and + _check_class(type(klass_result), '__set__') is not _sentinel): + return klass_result + + if instance_result is not _sentinel: + return instance_result + if klass_result is not _sentinel: + return klass_result + + if obj is klass: + # for types we check the metaclass too + for entry in _static_getmro(type(klass)): + if _shadowed_dict(type(entry)) is _sentinel: + try: + return entry.__dict__[attr] + except KeyError: + pass + if default is not _sentinel: + return default + raise AttributeError(attr) + + +# ------------------------------------------------ generator introspection + +GEN_CREATED = 'GEN_CREATED' +GEN_RUNNING = 'GEN_RUNNING' +GEN_SUSPENDED = 'GEN_SUSPENDED' +GEN_CLOSED = 'GEN_CLOSED' + +def getgeneratorstate(generator): + """Get current state of a generator-iterator. + + Possible states are: + GEN_CREATED: Waiting to start execution. + GEN_RUNNING: Currently being executed by the interpreter. + GEN_SUSPENDED: Currently suspended at a yield expression. + GEN_CLOSED: Execution has completed. + """ + if generator.gi_running: + return GEN_RUNNING + if generator.gi_frame is None: + return GEN_CLOSED + if generator.gi_frame.f_lasti == -1: + return GEN_CREATED + return GEN_SUSPENDED + + +def getgeneratorlocals(generator): + """ + Get the mapping of generator local variables to their current values. + + A dict is returned, with the keys the local variable names and values the + bound values.""" + + if not isgenerator(generator): + raise TypeError("'{!r}' is not a Python generator".format(generator)) + + frame = getattr(generator, "gi_frame", None) + if frame is not None: + return generator.gi_frame.f_locals + else: + return {} + +############################################################################### +### Function Signature Object (PEP 362) +############################################################################### + + +_WrapperDescriptor = type(type.__call__) +_MethodWrapper = type(all.__call__) + +_NonUserDefinedCallables = (_WrapperDescriptor, + _MethodWrapper, + types.BuiltinFunctionType) + + +def _get_user_defined_method(cls, method_name): + try: + meth = getattr(cls, method_name) + except AttributeError: + return + else: + if not isinstance(meth, _NonUserDefinedCallables): + # Once '__signature__' will be added to 'C'-level + # callables, this check won't be necessary + return meth + + +def signature(obj): + '''Get a signature object for the passed callable.''' + + if not callable(obj): + raise TypeError('{!r} is not a callable object'.format(obj)) + + if isinstance(obj, types.MethodType): + # In this case we skip the first parameter of the underlying + # function (usually `self` or `cls`). + sig = signature(obj.__func__) + return sig.replace(parameters=tuple(sig.parameters.values())[1:]) + + try: + sig = obj.__signature__ + except AttributeError: + pass + else: + if sig is not None: + return sig + + try: + # Was this function wrapped by a decorator? + wrapped = obj.__wrapped__ + except AttributeError: + pass + else: + return signature(wrapped) + + if isinstance(obj, types.FunctionType): + return Signature.from_function(obj) + + if isinstance(obj, functools.partial): + sig = signature(obj.func) + + new_params = OrderedDict(sig.parameters.items()) + + partial_args = obj.args or () + partial_keywords = obj.keywords or {} + try: + ba = sig.bind_partial(*partial_args, **partial_keywords) + except TypeError as ex: + msg = 'partial object {!r} has incorrect arguments'.format(obj) + raise ValueError(msg) from ex + + for arg_name, arg_value in ba.arguments.items(): + param = new_params[arg_name] + if arg_name in partial_keywords: + # We set a new default value, because the following code + # is correct: + # + # >>> def foo(a): print(a) + # >>> print(partial(partial(foo, a=10), a=20)()) + # 20 + # >>> print(partial(partial(foo, a=10), a=20)(a=30)) + # 30 + # + # So, with 'partial' objects, passing a keyword argument is + # like setting a new default value for the corresponding + # parameter + # + # We also mark this parameter with '_partial_kwarg' + # flag. Later, in '_bind', the 'default' value of this + # parameter will be added to 'kwargs', to simulate + # the 'functools.partial' real call. + new_params[arg_name] = param.replace(default=arg_value, + _partial_kwarg=True) + + elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and + not param._partial_kwarg): + new_params.pop(arg_name) + + return sig.replace(parameters=new_params.values()) + + sig = None + if isinstance(obj, type): + # obj is a class or a metaclass + + # First, let's see if it has an overloaded __call__ defined + # in its metaclass + call = _get_user_defined_method(type(obj), '__call__') + if call is not None: + sig = signature(call) + else: + # Now we check if the 'obj' class has a '__new__' method + new = _get_user_defined_method(obj, '__new__') + if new is not None: + sig = signature(new) + else: + # Finally, we should have at least __init__ implemented + init = _get_user_defined_method(obj, '__init__') + if init is not None: + sig = signature(init) + elif not isinstance(obj, _NonUserDefinedCallables): + # An object with __call__ + # We also check that the 'obj' is not an instance of + # _WrapperDescriptor or _MethodWrapper to avoid + # infinite recursion (and even potential segfault) + call = _get_user_defined_method(type(obj), '__call__') + if call is not None: + sig = signature(call) + + if sig is not None: + # For classes and objects we skip the first parameter of their + # __call__, __new__, or __init__ methods + return sig.replace(parameters=tuple(sig.parameters.values())[1:]) + + if isinstance(obj, types.BuiltinFunctionType): + # Raise a nicer error message for builtins + msg = 'no signature found for builtin function {!r}'.format(obj) + raise ValueError(msg) + + raise ValueError('callable {!r} is not supported by signature'.format(obj)) + + +class _void: + '''A private marker - used in Parameter & Signature''' + + +class _empty: + pass + + +class _ParameterKind(int): + def __new__(self, *args, name): + obj = int.__new__(self, *args) + obj._name = name + return obj + + def __str__(self): + return self._name + + def __repr__(self): + return '<_ParameterKind: {!r}>'.format(self._name) + + +_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY') +_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD') +_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL') +_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY') +_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD') + + +class Parameter: + '''Represents a parameter in a function signature. + + Has the following public attributes: + + * name : str + The name of the parameter as a string. + * default : object + The default value for the parameter if specified. If the + parameter has no default value, this attribute is not set. + * annotation + The annotation for the parameter if specified. If the + parameter has no annotation, this attribute is not set. + * kind : str + Describes how argument values are bound to the parameter. + Possible values: `Parameter.POSITIONAL_ONLY`, + `Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`, + `Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`. + ''' + + __slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg') + + POSITIONAL_ONLY = _POSITIONAL_ONLY + POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD + VAR_POSITIONAL = _VAR_POSITIONAL + KEYWORD_ONLY = _KEYWORD_ONLY + VAR_KEYWORD = _VAR_KEYWORD + + empty = _empty + + def __init__(self, name, kind, *, default=_empty, annotation=_empty, + _partial_kwarg=False): + + if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD, + _VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD): + raise ValueError("invalid value for 'Parameter.kind' attribute") + self._kind = kind + + if default is not _empty: + if kind in (_VAR_POSITIONAL, _VAR_KEYWORD): + msg = '{} parameters cannot have default values'.format(kind) + raise ValueError(msg) + self._default = default + self._annotation = annotation + + if name is None: + if kind != _POSITIONAL_ONLY: + raise ValueError("None is not a valid name for a " + "non-positional-only parameter") + self._name = name + else: + name = str(name) + if kind != _POSITIONAL_ONLY and not name.isidentifier(): + msg = '{!r} is not a valid parameter name'.format(name) + raise ValueError(msg) + self._name = name + + self._partial_kwarg = _partial_kwarg + + @property + def name(self): + return self._name + + @property + def default(self): + return self._default + + @property + def annotation(self): + return self._annotation + + @property + def kind(self): + return self._kind + + def replace(self, *, name=_void, kind=_void, annotation=_void, + default=_void, _partial_kwarg=_void): + '''Creates a customized copy of the Parameter.''' + + if name is _void: + name = self._name + + if kind is _void: + kind = self._kind + + if annotation is _void: + annotation = self._annotation + + if default is _void: + default = self._default + + if _partial_kwarg is _void: + _partial_kwarg = self._partial_kwarg + + return type(self)(name, kind, default=default, annotation=annotation, + _partial_kwarg=_partial_kwarg) + + def __str__(self): + kind = self.kind + + formatted = self._name + if kind == _POSITIONAL_ONLY: + if formatted is None: + formatted = '' + formatted = '<{}>'.format(formatted) + + # Add annotation and default value + if self._annotation is not _empty: + formatted = '{}:{}'.format(formatted, + formatannotation(self._annotation)) + + if self._default is not _empty: + formatted = '{}={}'.format(formatted, repr(self._default)) + + if kind == _VAR_POSITIONAL: + formatted = '*' + formatted + elif kind == _VAR_KEYWORD: + formatted = '**' + formatted + + return formatted + + def __repr__(self): + return '<{} at {:#x} {!r}>'.format(self.__class__.__name__, + id(self), self.name) + + def __eq__(self, other): + return (issubclass(other.__class__, Parameter) and + self._name == other._name and + self._kind == other._kind and + self._default == other._default and + self._annotation == other._annotation) + + def __ne__(self, other): + return not self.__eq__(other) + + +class BoundArguments: + '''Result of `Signature.bind` call. Holds the mapping of arguments + to the function's parameters. + + Has the following public attributes: + + * arguments : OrderedDict + An ordered mutable mapping of parameters' names to arguments' values. + Does not contain arguments' default values. + * signature : Signature + The Signature object that created this instance. + * args : tuple + Tuple of positional arguments values. + * kwargs : dict + Dict of keyword arguments values. + ''' + + def __init__(self, signature, arguments): + self.arguments = arguments + self._signature = signature + + @property + def signature(self): + return self._signature + + @property + def args(self): + args = [] + for param_name, param in self._signature.parameters.items(): + if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or + param._partial_kwarg): + # Keyword arguments mapped by 'functools.partial' + # (Parameter._partial_kwarg is True) are mapped + # in 'BoundArguments.kwargs', along with VAR_KEYWORD & + # KEYWORD_ONLY + break + + try: + arg = self.arguments[param_name] + except KeyError: + # We're done here. Other arguments + # will be mapped in 'BoundArguments.kwargs' + break + else: + if param.kind == _VAR_POSITIONAL: + # *args + args.extend(arg) + else: + # plain argument + args.append(arg) + + return tuple(args) + + @property + def kwargs(self): + kwargs = {} + kwargs_started = False + for param_name, param in self._signature.parameters.items(): + if not kwargs_started: + if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or + param._partial_kwarg): + kwargs_started = True + else: + if param_name not in self.arguments: + kwargs_started = True + continue + + if not kwargs_started: + continue + + try: + arg = self.arguments[param_name] + except KeyError: + pass + else: + if param.kind == _VAR_KEYWORD: + # **kwargs + kwargs.update(arg) + else: + # plain keyword argument + kwargs[param_name] = arg + + return kwargs + + def __eq__(self, other): + return (issubclass(other.__class__, BoundArguments) and + self.signature == other.signature and + self.arguments == other.arguments) + + def __ne__(self, other): + return not self.__eq__(other) + + +class Signature: + '''A Signature object represents the overall signature of a function. + It stores a Parameter object for each parameter accepted by the + function, as well as information specific to the function itself. + + A Signature object has the following public attributes and methods: + + * parameters : OrderedDict + An ordered mapping of parameters' names to the corresponding + Parameter objects (keyword-only arguments are in the same order + as listed in `code.co_varnames`). + * return_annotation : object + The annotation for the return type of the function if specified. + If the function has no annotation for its return type, this + attribute is not set. + * bind(*args, **kwargs) -> BoundArguments + Creates a mapping from positional and keyword arguments to + parameters. + * bind_partial(*args, **kwargs) -> BoundArguments + Creates a partial mapping from positional and keyword arguments + to parameters (simulating 'functools.partial' behavior.) + ''' + + __slots__ = ('_return_annotation', '_parameters') + + _parameter_cls = Parameter + _bound_arguments_cls = BoundArguments + + empty = _empty + + def __init__(self, parameters=None, *, return_annotation=_empty, + __validate_parameters__=True): + '''Constructs Signature from the given list of Parameter + objects and 'return_annotation'. All arguments are optional. + ''' + + if parameters is None: + params = OrderedDict() + else: + if __validate_parameters__: + params = OrderedDict() + top_kind = _POSITIONAL_ONLY + + for idx, param in enumerate(parameters): + kind = param.kind + if kind < top_kind: + msg = 'wrong parameter order: {} before {}' + msg = msg.format(top_kind, param.kind) + raise ValueError(msg) + else: + top_kind = kind + + name = param.name + if name is None: + name = str(idx) + param = param.replace(name=name) + + if name in params: + msg = 'duplicate parameter name: {!r}'.format(name) + raise ValueError(msg) + params[name] = param + else: + params = OrderedDict(((param.name, param) + for param in parameters)) + + self._parameters = types.MappingProxyType(params) + self._return_annotation = return_annotation + + @classmethod + def from_function(cls, func): + '''Constructs Signature for the given python function''' + + if not isinstance(func, types.FunctionType): + raise TypeError('{!r} is not a Python function'.format(func)) + + Parameter = cls._parameter_cls + + # Parameter information. + func_code = func.__code__ + pos_count = func_code.co_argcount + arg_names = func_code.co_varnames + positional = tuple(arg_names[:pos_count]) + keyword_only_count = func_code.co_kwonlyargcount + keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)] + annotations = func.__annotations__ + defaults = func.__defaults__ + kwdefaults = func.__kwdefaults__ + + if defaults: + pos_default_count = len(defaults) + else: + pos_default_count = 0 + + parameters = [] + + # Non-keyword-only parameters w/o defaults. + non_default_count = pos_count - pos_default_count + for name in positional[:non_default_count]: + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_POSITIONAL_OR_KEYWORD)) + + # ... w/ defaults. + for offset, name in enumerate(positional[non_default_count:]): + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_POSITIONAL_OR_KEYWORD, + default=defaults[offset])) + + # *args + if func_code.co_flags & 0x04: + name = arg_names[pos_count + keyword_only_count] + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_VAR_POSITIONAL)) + + # Keyword-only parameters. + for name in keyword_only: + default = _empty + if kwdefaults is not None: + default = kwdefaults.get(name, _empty) + + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation,