id
stringlengths
30
32
content
stringlengths
139
2.8k
codereview_new_python_data_5676
def minimum_cut(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs): if not callable(flow_func): raise nx.NetworkXError("flow_func has to be callable.") - if kwargs.get("cutoff") is not None and flow_func in flow_funcs_without_cutoff: raise nx.NetworkXError("cutoff should not be specified.") R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs) Related to the above comment: since we're down to one unsupported function, it might be more straightforward to check against that explicitly: ```suggestion if kwargs.get("cutoff") is not None and flow_func is preflow_push: ``` def minimum_cut(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs): if not callable(flow_func): raise nx.NetworkXError("flow_func has to be callable.") + if kwargs.get("cutoff") is not None and flow_func is preflow_push: raise nx.NetworkXError("cutoff should not be specified.") R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs)
codereview_new_python_data_5677
def test_paley_graph(p): assert (v, u) in G.edges -@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph)) def test_margulis_gabber_galil_graph_badinput(graph_type): with pytest.raises( nx.NetworkXError, match="`create_using` must be an undirected multigraph" ): nx.margulis_gabber_galil_graph(3, create_using=graph_type) -@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph)) def test_chordal_cycle_graph_badinput(graph_type): with pytest.raises( nx.NetworkXError, match="`create_using` must be an undirected multigraph" This all looks good! Thanks @chimaobi-okite! could you also add `nx.MultiDiGraph` ? def test_paley_graph(p): assert (v, u) in G.edges +@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph, nx.MultiDiGraph)) def test_margulis_gabber_galil_graph_badinput(graph_type): with pytest.raises( nx.NetworkXError, match="`create_using` must be an undirected multigraph" ): nx.margulis_gabber_galil_graph(3, create_using=graph_type) +@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph, nx.MultiDiGraph)) def test_chordal_cycle_graph_badinput(graph_type): with pytest.raises( nx.NetworkXError, match="`create_using` must be an undirected multigraph"
codereview_new_python_data_5678
def test_specified_methods(self): assert ans == pytest.approx(4, abs=1e-7) def test_directed_not_strongly_connected(self): - g = nx.DiGraph() - g.add_nodes_from(range(2)) - g.add_edge(0, 1) - pytest.raises(nx.NetworkXError, nx.average_shortest_path_length, g) def test_undirected_not_connected(self): g = nx.Graph() Mostly cosmetic suggestion, not a blocker: ```suggestion def test_directed_not_strongly_connected(self): G = nx.DiGraph([(0, 1)]) with pytest.raises(nx.NetworkXError, match="Graph is not strongly connected"): nx.average_shortest_path_length(G) ``` def test_specified_methods(self): assert ans == pytest.approx(4, abs=1e-7) def test_directed_not_strongly_connected(self): + G = nx.DiGraph([(0, 1)]) + with pytest.raises(nx.NetworkXError, match="Graph is not strongly connected"): + nx.average_shortest_path_length(G) def test_undirected_not_connected(self): g = nx.Graph()
codereview_new_python_data_5679
def vf2pp_all_isomorphisms(G1, G2, node_label=None, default_label=-1): # Check that both graphs have the same number of nodes and degree sequence if G1.order() != G2.order(): return False - if sorted(d for _, d in G1_degree.items()) != sorted( - d for _, d in G2_degree.items() - ): return False # Initialize parameters and cache necessary information about degree and labels ```suggestion if sorted(G1_degree.values()) != sorted(G2_degree.values()): ``` Because `G1.degree` is replaced by `G1_degree` which is a dict we can use `values()` here. :) def vf2pp_all_isomorphisms(G1, G2, node_label=None, default_label=-1): # Check that both graphs have the same number of nodes and degree sequence if G1.order() != G2.order(): return False + if sorted(G1_degree.values()) != sorted(G2_degree.values()): return False # Initialize parameters and cache necessary information about degree and labels
codereview_new_python_data_5680
def is_path(G, path): A NetworkX graph. path : list - A list of node which defines the path to traverse Returns ------- ```suggestion A list of nodes which defines the path to traverse ``` def is_path(G, path): A NetworkX graph. path : list + A list of nodes which defines the path to traverse Returns -------
codereview_new_python_data_5681
def topological_sort(G): def lexicographical_topological_sort(G, key=None): - """Generates a unique ordering of nodes by first sorting topologically (for which there are often multiple valid orderings) and then additionally by sorting lexicographically. A topological sort arranges the nodes of a directed graph so that the The first line should be short -- because it gets used in the docs as the text describing links to this doc_string. I'd suggest keeping this sentence as the first part of the main description. Something like: ```suggestion """Generate the nodes in the unique lexicographical topological sort order. Generates a unique ordering of nodes by first sorting topologically (for which there are often multiple valid orderings) and then additionally by sorting lexicographically. ``` def topological_sort(G): def lexicographical_topological_sort(G, key=None): + """Generate the nodes in the unique lexicographical topological sort order. + + Generates a unique ordering of nodes by first sorting topologically (for which there are often multiple valid orderings) and then additionally by sorting lexicographically. A topological sort arranges the nodes of a directed graph so that the
codereview_new_python_data_5682
def test_rescale_layout_dict(self): for k, v in expectation.items(): assert (s_vpos[k] == v).all() s_vpos = nx.rescale_layout_dict(vpos, scale=2) -<<<<<<< HEAD - expectation = { 0: np.array((-2, -2)), 1: np.array((2, 2)), I think this line is causing the test failures. Does it fail locally? def test_rescale_layout_dict(self): for k, v in expectation.items(): assert (s_vpos[k] == v).all() s_vpos = nx.rescale_layout_dict(vpos, scale=2) expectation = { 0: np.array((-2, -2)), 1: np.array((2, 2)),
codereview_new_python_data_5683
def test_ancestors_descendants_undirected(): def test_compute_v_structures_raise(): G = nx.Graph() - pytest.raises(nx.NetworkXError, nx.compute_v_structures, G) def test_compute_v_structures(): `nx.DiGraph()` inside the test? def test_ancestors_descendants_undirected(): def test_compute_v_structures_raise(): G = nx.Graph() + pytest.raises(nx.NetworkXNotImplemented, nx.compute_v_structures, G) def test_compute_v_structures():
codereview_new_python_data_5684
def _bfs_with_marks(G, start_node, check_set): # memoize where we visited so far visited[neighbr] = None - # mark the node in Z' and do not continue # along that path if neighbr in check_set: marked.add(neighbr) ```suggestion # mark the node in 'check_set' and do not continue ``` def _bfs_with_marks(G, start_node, check_set): # memoize where we visited so far visited[neighbr] = None + # mark the node in 'check_set' and do not continue # along that path if neighbr in check_set: marked.add(neighbr)
codereview_new_python_data_5685
def test_undirected_graphs_are_not_supported(): """ Test that undirected graphs are not supported. - d-separation and its related algorithms does not apply in the case of undirected graphs. """ g = nx.path_graph(3, nx.Graph) ```suggestion d-separation and its related algorithms do not apply in ``` def test_undirected_graphs_are_not_supported(): """ Test that undirected graphs are not supported. + d-separation and its related algorithms do not apply in the case of undirected graphs. """ g = nx.path_graph(3, nx.Graph)
codereview_new_python_data_5686
def strategy_saturation_largest_first(G, colors): ) # If 0 nodes have been colored, simply choose the node of highest degree. - if len(colors) == 0: node = max(G, key=G.degree) yield node # Add the color 0 to the distinct colors set for each I think PEP8 standards say `if len(colors) == 0:` should be written `if not colors:` def strategy_saturation_largest_first(G, colors): ) # If 0 nodes have been colored, simply choose the node of highest degree. + if not colors: node = max(G, key=G.degree) yield node # Add the color 0 to the distinct colors set for each
codereview_new_python_data_5687
def naive_all_pairs_lowest_common_ancestor(G, pairs=None): if pairs is None: pairs = combinations_with_replacement(G, 2) else: - # Convert iterator to iterable, if necessary, trim duplicates - pairs = set(pairs) # Verify that each of the nodes in the provided pairs is in G nodeset = set(G) for pair in pairs: Using a set here can/will break the order in which the pairs+lca are yielded. If order is important, `list` can be used instead. def naive_all_pairs_lowest_common_ancestor(G, pairs=None): if pairs is None: pairs = combinations_with_replacement(G, 2) else: + # Convert iterator to iterable, if necessary. Trim duplicates. + pairs = dict.fromkeys(pairs) # Verify that each of the nodes in the provided pairs is in G nodeset = set(G) for pair in pairs:
codereview_new_python_data_5688
def bfs_layers(G, sources): raise nx.NetworkXError(f"The node {source} is not in the graph.") # this is basically BFS, except that the current layer only stores the nodes at while current_layer: yield current_layer next_layer = list() Did this comment get cutoff? def bfs_layers(G, sources): raise nx.NetworkXError(f"The node {source} is not in the graph.") # this is basically BFS, except that the current layer only stores the nodes at + # the same distance from sources at each iteration while current_layer: yield current_layer next_layer = list()
codereview_new_python_data_5689
def bridges(G, root=None): chains = nx.chain_decomposition(H, root=root) chain_edges = set(chain.from_iterable(chains)) H_copy = H.copy() - if root: cc_root = nx.node_connected_component(H, root) for node in H.nodes(): if node not in cc_root: ```suggestion if root is None: ``` Using `root=0` will not work when the if clause is `if root:`. Here we are checking whether there was no input, so we want to look explicitly at whether the root is the None object. `if root is None:` def bridges(G, root=None): chains = nx.chain_decomposition(H, root=root) chain_edges = set(chain.from_iterable(chains)) H_copy = H.copy() + if root is None: cc_root = nx.node_connected_component(H, root) for node in H.nodes(): if node not in cc_root:
codereview_new_python_data_5690
def _build_chain(G, u, v, visited): u, v = v, G.nodes[v]["parent"] yield u, v - # Check if the root is in the graph G. If not, raise a NodeNotFound - # error. if root is not None and root not in G: raise nx.NodeNotFound("Root node %s is not in graph" % root) ```suggestion # Check if the root is in the graph G. If not, raise NodeNotFound ``` very minor change -- only for looks def _build_chain(G, u, v, visited): u, v = v, G.nodes[v]["parent"] yield u, v + # Check if the root is in the graph G. If not, raise NodeNotFound if root is not None and root not in G: raise nx.NodeNotFound("Root node %s is not in graph" % root)
codereview_new_python_data_5691
def _build_chain(G, u, v, visited): # Check if the root is in the graph G. If not, raise NodeNotFound if root is not None and root not in G: - raise nx.NodeNotFound("Root node %s is not in graph" % root) # Create a directed version of H that has the DFS edges directed # toward the root and the nontree edges directed away from the root ```suggestion raise nx.NodeNotFound(f"Root node {root} is not in graph") ``` Just a nit, but I think [f-strings](https://docs.python.org/3/tutorial/inputoutput.html#formatted-string-literals) are generally preferred over the printf-style string interpolation. def _build_chain(G, u, v, visited): # Check if the root is in the graph G. If not, raise NodeNotFound if root is not None and root not in G: + raise nx.NodeNotFound(f"Root node {root} is not in graph") # Create a directed version of H that has the DFS edges directed # toward the root and the nontree edges directed away from the root
codereview_new_python_data_5692
def bridges(G, root=None): chains = nx.chain_decomposition(H, root=root) chain_edges = set(chain.from_iterable(chains)) H_copy = H.copy() - if root is None: H = H.subgraph(nx.node_connected_component(H, root)).copy() for u, v in H.edges(): if (u, v) not in chain_edges and (v, u) not in chain_edges: ```suggestion if root is not None: ``` def bridges(G, root=None): chains = nx.chain_decomposition(H, root=root) chain_edges = set(chain.from_iterable(chains)) H_copy = H.copy() + if root is not None: H = H.subgraph(nx.node_connected_component(H, root)).copy() for u, v in H.edges(): if (u, v) not in chain_edges and (v, u) not in chain_edges:
codereview_new_python_data_5693
def closeness_centrality(G, u=None, distance=None, wf_improved=True): distance : edge attribute key, optional (default=None) Use the specified edge attribute as the edge distance in shortest - path calculations. If `None` (the default), all edges are assumed to have - a distance of 1. No check is performed to ensure that edges have the - provided attribute. wf_improved : bool, optional (default=True) If True, scale by the fraction of nodes reachable. This gives the ```suggestion path calculations. If `None` (the default) all edges have a distance of 1. Absent edge attributes are assigned a distance of 1. Note that no check is performed to ensure that edges have the provided attribute. ``` def closeness_centrality(G, u=None, distance=None, wf_improved=True): distance : edge attribute key, optional (default=None) Use the specified edge attribute as the edge distance in shortest + path calculations. If `None` (the default) all edges have a distance of 1. + Absent edge attributes are assigned a distance of 1. Note that no check + is performed to ensure that edges have the provided attribute. wf_improved : bool, optional (default=True) If True, scale by the fraction of nodes reachable. This gives the
codereview_new_python_data_5694
def descendants(G, source): Examples -------- >>> DG = nx.path_graph(5, create_using=nx.DiGraph) - >>> descendants = nx.descendants(DG, 2) - >>> descendants - {3, 4} - A node is not returned as one of its own descendantsgit . If you prefer to include the node itself as well, you can perform a manual union operation as follows: - >>> descendants = descendants.union({2}) See also -------- We'll need to keep the `sorted` as sets are not ordered, so giving the set output directly might result in intermittent doctest failures. See [the doctest docs on this subject](https://docs.python.org/3/library/doctest.html#warnings) for more info. ```suggestion >>> descendants = nx.descendants(DG, 2) >>> descendants {3, 4} ``` ```suggestion >>> sorted(nx.descendants(DG, 2)) [3, 4] ``` def descendants(G, source): Examples -------- >>> DG = nx.path_graph(5, create_using=nx.DiGraph) + >>> sorted(nx.descendants(DG, 2)) + [3, 4] + The `source` node is not a descendant of itself, but can be included manually: + >>> sorted(nx.descendants(DG, 2) | {2}) + [2, 3, 4] See also --------
codereview_new_python_data_5695
def descendants(G, source): Examples -------- >>> DG = nx.path_graph(5, create_using=nx.DiGraph) - >>> descendants = nx.descendants(DG, 2) - >>> descendants - {3, 4} - A node is not returned as one of its own descendantsgit . If you prefer to include the node itself as well, you can perform a manual union operation as follows: - >>> descendants = descendants.union({2}) See also -------- It looks like `git` snuck itself in here :). A minor wording suggestion (feel free to ignore): ```suggestion The `source` node is not a descendant of itself, but can be included manually: ``` def descendants(G, source): Examples -------- >>> DG = nx.path_graph(5, create_using=nx.DiGraph) + >>> sorted(nx.descendants(DG, 2)) + [3, 4] + The `source` node is not a descendant of itself, but can be included manually: + >>> sorted(nx.descendants(DG, 2) | {2}) + [2, 3, 4] See also --------
codereview_new_python_data_5696
def descendants(G, source): Examples -------- >>> DG = nx.path_graph(5, create_using=nx.DiGraph) - >>> descendants = nx.descendants(DG, 2) - >>> descendants - {3, 4} - A node is not returned as one of its own descendantsgit . If you prefer to include the node itself as well, you can perform a manual union operation as follows: - >>> descendants = descendants.union({2}) See also -------- Same deal here with the `sorted`. I also personally like using the binary operators for sets, but if you prefer the explicit method names feel free to ignore! ```suggestion >>> descendants = descendants.union({2}) ``` ```suggestion >>> sorted(nx.descendants(DG, 2) | {2}) [2, 3, 4] ``` def descendants(G, source): Examples -------- >>> DG = nx.path_graph(5, create_using=nx.DiGraph) + >>> sorted(nx.descendants(DG, 2)) + [3, 4] + The `source` node is not a descendant of itself, but can be included manually: + >>> sorted(nx.descendants(DG, 2) | {2}) + [2, 3, 4] See also --------
codereview_new_python_data_5697
from networkx.algorithms.isomorphism import fast_could_be_isomorphic from networkx.algorithms.isomorphism import faster_could_be_isomorphic from networkx.algorithms.isomorphism import is_isomorphic -from networkx.algorithms.isomorphism.vf2pp import vf2pp_is_isomorphic -from networkx.algorithms.isomorphism.vf2pp import vf2pp_mapping from networkx.algorithms.tree.branchings import maximum_branching from networkx.algorithms.tree.branchings import maximum_spanning_arborescence from networkx.algorithms.tree.branchings import minimum_branching Should we also add the `all_mappings` generator to the `__init__` imports here? from networkx.algorithms.isomorphism import fast_could_be_isomorphic from networkx.algorithms.isomorphism import faster_could_be_isomorphic from networkx.algorithms.isomorphism import is_isomorphic +from networkx.algorithms.isomorphism.vf2pp import * from networkx.algorithms.tree.branchings import maximum_branching from networkx.algorithms.tree.branchings import maximum_spanning_arborescence from networkx.algorithms.tree.branchings import minimum_branching
codereview_new_python_data_5698
def _update_Tinout(new_node1, new_node2, graph_params, state_params): } # Add the uncovered neighbors of node1 and node2 in T1 and T2 respectively - T1.discard(new_node1) - T2.discard(new_node2) T1.update(uncovered_neighbors_G1) T2.update(uncovered_neighbors_G2) - T1_out.discard(new_node1) - T2_out.discard(new_node2) T1_out.difference_update(uncovered_neighbors_G1) T2_out.difference_update(uncovered_neighbors_G2) def _restore_Tinout(popped_node1, popped_node2, graph_params, state_params): I think this might get tripped up by a selfloop from `new_node1` to itself. We discard the `new_node1` and then in the update we add it again if there is a selfloop. If we update before we discard then we will certainly remove it from the set. Can you construct a test which breaks with the current code? Then I think the solution is to `discard` after all the updates are done. def _update_Tinout(new_node1, new_node2, graph_params, state_params): } # Add the uncovered neighbors of node1 and node2 in T1 and T2 respectively T1.update(uncovered_neighbors_G1) T2.update(uncovered_neighbors_G2) + T1.discard(new_node1) + T2.discard(new_node2) T1_out.difference_update(uncovered_neighbors_G1) T2_out.difference_update(uncovered_neighbors_G2) + T1_out.discard(new_node1) + T2_out.discard(new_node2) def _restore_Tinout(popped_node1, popped_node2, graph_params, state_params):
codereview_new_python_data_5699
from networkx.algorithms.isomorphism import fast_could_be_isomorphic from networkx.algorithms.isomorphism import faster_could_be_isomorphic from networkx.algorithms.isomorphism import is_isomorphic -from networkx.algorithms.isomorphism.vf2pp import vf2pp_is_isomorphic -from networkx.algorithms.isomorphism.vf2pp import vf2pp_mapping from networkx.algorithms.tree.branchings import maximum_branching from networkx.algorithms.tree.branchings import maximum_spanning_arborescence from networkx.algorithms.tree.branchings import minimum_branching I think what we actually want here is: ```suggestion from networkx.algorithms.isomorphism.vf2pp import * ``` This will make all *3* functions (including `vf2pp_all_mappings`) publicly accessible from the top-level `nx` namespace, as well as the `nx.algorithms` namespace. In fact, I'd advocate for making the new functions *only* accessible from the top-level namespace (i.e. so users *must* call `nx.vf2pp_is_isomorphic` and not `nx.algorithms.vf2pp_is_isomorphic`), but that's a break from the pattern that is typically used and would probably require some discussion. I don't want to get hung up on this point too much, so the above suggestion should get us where we need to be for now! from networkx.algorithms.isomorphism import fast_could_be_isomorphic from networkx.algorithms.isomorphism import faster_could_be_isomorphic from networkx.algorithms.isomorphism import is_isomorphic +from networkx.algorithms.isomorphism.vf2pp import * from networkx.algorithms.tree.branchings import maximum_branching from networkx.algorithms.tree.branchings import maximum_spanning_arborescence from networkx.algorithms.tree.branchings import minimum_branching
codereview_new_python_data_5700
def test_different_degree_sequences2(self): (8, 3), ] ) - G2 = nx.Graph( - [ - (0, 1), - (1, 2), - (0, 2), - (2, 3), - (3, 4), - (4, 5), - (5, 6), - (6, 3), - (4, 7), - (7, 8), - (8, 3), - (8, 0), - ] - ) assert not vf2pp_is_isomorphic(G1, G2) G1.add_edge(6, 1) This becomes easier to read if the second graph is created in a way that highlights its differences from G1, e.g. ```suggestion G2 = G1.copy() G2.add_edge(8, 0) ``` def test_different_degree_sequences2(self): (8, 3), ] ) + G2 = G1.copy() + G2.add_edge(8, 0) assert not vf2pp_is_isomorphic(G1, G2) G1.add_edge(6, 1)
codereview_new_python_data_5701
def vf2pp_mapping(G1, G2, node_labels=None, default_label=None): Parameters ---------- - G1,G2: NetworkX Graph or MultiGraph instances. - The two graphs to check for isomorphism or monomorphism. node_labels: Label name The label name of all nodes A couple quick nits about the parameter formatting: - There needs to be a space around both sides of the `:` for this to parse correctly. - The docstring should reflect the current behavior, so I think the monomorphism should be removed for now ```suggestion G1, G2 : NetworkX Graph or MultiGraph instances. The two graphs to check for isomorphism. ``` def vf2pp_mapping(G1, G2, node_labels=None, default_label=None): Parameters ---------- + G1, G2 : NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism. node_labels: Label name The label name of all nodes
codereview_new_python_data_5702
def vf2pp_mapping(G1, G2, node_labels=None, default_label=None): Parameters ---------- - G1,G2: NetworkX Graph or MultiGraph instances. - The two graphs to check for isomorphism or monomorphism. node_labels: Label name The label name of all nodes The description to the right of the colon should describe the *type* of the parameter, so in this case it should be something like: ```suggestion node_labels : str, optional The name of the node attribute to be used when comparing nodes. The default is `None`, meaning node attributes are not considered in the comparison. Any node that doesn't not have the `node_labels` attribute uses `default_label` instead. ``` def vf2pp_mapping(G1, G2, node_labels=None, default_label=None): Parameters ---------- + G1, G2 : NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism. node_labels: Label name The label name of all nodes
codereview_new_python_data_5703
def _cut_PT(u, v, graph_params, state_params): for label, G1_nbh in u_labels_neighbors.items(): G2_nbh = v_labels_neighbors[label] - if isinstance(G1, nx.MultiGraph): # Check for every neighbor in the neighborhood, if u-nbr1 has same edges as v-nbr2 u_nbrs_edges = sorted(G1.number_of_edges(u, x) for x in G1_nbh) v_nbrs_edges = sorted(G2.number_of_edges(v, x) for x in G2_nbh) ```suggestion if G1.is_multigraph(): ``` def _cut_PT(u, v, graph_params, state_params): for label, G1_nbh in u_labels_neighbors.items(): G2_nbh = v_labels_neighbors[label] + if G1.is_multigraph(): # Check for every neighbor in the neighborhood, if u-nbr1 has same edges as v-nbr2 u_nbrs_edges = sorted(G1.number_of_edges(u, x) for x in G1_nbh) v_nbrs_edges = sorted(G2.number_of_edges(v, x) for x in G2_nbh)
codereview_new_python_data_5704
def _one_level(G, m, partition, resolution=1, is_directed=False, seed=None): nbrs = {} for u in G: nbrs[u] = defaultdict(float) - for _, n, wt in G.out_edges(u, data=True): - nbrs[u][n] += wt["weight"] - for n, _, wt in G.in_edges(u, data=True): - nbrs[u][n] += wt["weight"] else: degrees = dict(G.degree(weight="weight")) Stot = [deg for deg in degrees.values()] Just a nit, but you can pass the edge attr name directly to the `data` kwarg to make this a little cleaner: ```suggestion for _, n, wt in G.out_edges(u, data="weight"): nbrs[u][n] += wt for n, _, wt in G.in_edges(u, data="weight"): nbrs[u][n] += wt ``` def _one_level(G, m, partition, resolution=1, is_directed=False, seed=None): nbrs = {} for u in G: nbrs[u] = defaultdict(float) + for _, n, wt in G.out_edges(u, data="weight"): + nbrs[u][n] += wt + for n, _, wt in G.in_edges(u, data="weight"): + nbrs[u][n] += wt else: degrees = dict(G.degree(weight="weight")) Stot = [deg for deg in degrees.values()]
codereview_new_python_data_5705
def test_directed_partition(): """ G = nx.DiGraph() H = nx.DiGraph() H.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) G_edges = [ (0, 2), The original failure reported in #5175 was also sensitive to node insertion order, at least by my testing (see [this comment](https://github.com/networkx/networkx/issues/5175#issuecomment-965602312)) so I think we also need: ```suggestion H = nx.DiGraph() G.add_nodes_from(range(10)) ``` def test_directed_partition(): """ G = nx.DiGraph() H = nx.DiGraph() + G.add_nodes_from(range(10)) H.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) G_edges = [ (0, 2),
codereview_new_python_data_5706
def geometric_edges(G, radius, p=2): """ # Input validation - every node must have a "pos" attribute for n, pos in G.nodes(data="pos"): - if pos == None: raise nx.NetworkXError( f"All nodes in `G` must have a 'pos' attribute. Check node {n}" ) I guess this line could be `if pos is None:` by some style standards. (Maybe a small speed up, but not sure) def geometric_edges(G, radius, p=2): """ # Input validation - every node must have a "pos" attribute for n, pos in G.nodes(data="pos"): + if pos is None: raise nx.NetworkXError( f"All nodes in `G` must have a 'pos' attribute. Check node {n}" )
codereview_new_python_data_5718
if 'EXPECTED_BAKE_TIME' in item_name: # pylint: disable=raise-missing-from raise ImportError(f'\n\nMISSING CONSTANT --> \nWe can not find or import the constant {item_name} in your' - " 'lasagna.py' file.\nDid you mis-name or forget to define it?") from None else: item_name = item_name[:-1] + "()'" # pylint: disable=raise-missing-from ```suggestion f' function named {item_name}. \nDid you misname or forget to define it?') from None ``` Not a contraction. “misname,” Merriam-Webster.com Dictionary, https://www.merriam-webster.com/dictionary/misname. Accessed 2/22/2023. if 'EXPECTED_BAKE_TIME' in item_name: # pylint: disable=raise-missing-from raise ImportError(f'\n\nMISSING CONSTANT --> \nWe can not find or import the constant {item_name} in your' + " 'lasagna.py' file.\nDid you misname or forget to define it?") from None else: item_name = item_name[:-1] + "()'" # pylint: disable=raise-missing-from
codereview_new_python_data_5719
def combinations(target, size, exclude): return [[target]] else: for index in range(len(possible), 0, -1): - for seq in itertools.combinations(possible, i): if sum(seq) == target and len(seq) == size: result.append(list(seq)) return result ```suggestion for seq in itertools.combinations(possible, index): ``` def combinations(target, size, exclude): return [[target]] else: for index in range(len(possible), 0, -1): + for seq in itertools.combinations(possible, index): if sum(seq) == target and len(seq) == size: result.append(list(seq)) return result
codereview_new_python_data_5720
def parse(input_string): root = None current = None stack = list(input_string) - if input_string == '()': raise ValueError('tree with no nodes') Is this an accidental addition? def parse(input_string): root = None current = None stack = list(input_string) if input_string == '()': raise ValueError('tree with no nodes')
codereview_new_python_data_5783
def BoringPipeline(): loader = fw_iterator(pipeline, reader_name="reader", auto_reset=auto_reset_op) for _ in range(2): - loader = iter(loader) - for i in range(len(loader)): - data = next(loader) for j, d in enumerate(extract_data(data[0])): assert d[0] == i * batch_size + j, f"{d[0]} { i * batch_size + j}" Does this mean that the old code is not valid? def BoringPipeline(): loader = fw_iterator(pipeline, reader_name="reader", auto_reset=auto_reset_op) for _ in range(2): + loader_iter = iter(loader) + for i in range(len(loader_iter)): + data = next(loader_iter) for j, d in enumerate(extract_data(data[0])): assert d[0] == i * batch_size + j, f"{d[0]} { i * batch_size + j}"
codereview_new_python_data_5784
def BoringPipeline(): loader = fw_iterator(pipeline, reader_name="reader", auto_reset=auto_reset_op) for _ in range(2): - loader = iter(loader) - for i in range(len(loader)): - data = next(loader) for j, d in enumerate(extract_data(data[0])): assert d[0] == i * batch_size + j, f"{d[0]} { i * batch_size + j}" Hmmm... at this point I'd suggest to store it into a separate variable, should we switch to returning something else than `self` in the future. def BoringPipeline(): loader = fw_iterator(pipeline, reader_name="reader", auto_reset=auto_reset_op) for _ in range(2): + loader_iter = iter(loader) + for i in range(len(loader_iter)): + data = next(loader_iter) for j, d in enumerate(extract_data(data[0])): assert d[0] == i * batch_size + j, f"{d[0]} { i * batch_size + j}"
codereview_new_python_data_5785
def test_external_source_with_iter_cupy_stream(): for attempt in range(10): pipe = Pipeline(1, 3, 0) - pipe.set_outputs(fn.external_source(lambda i: [cp.array([attempt * 100 + i * 10 + 1.5], - dtype=cp.float32)])) pipe.build() for i in range(10): I believe breaking this lambda in the middle looks confusing. At the first glance it looks like `dtype` is an arg to the `fn.external_source`, not `cp.array` ```suggestion pipe.set_outputs(fn.external_source( lambda i: [cp.array([attempt * 100 + i * 10 + 1.5], dtype=cp.float32)] )) ``` def test_external_source_with_iter_cupy_stream(): for attempt in range(10): pipe = Pipeline(1, 3, 0) + def get_data(i): + return [cp.array([attempt * 100 + i * 10 + 1.5], dtype=cp.float32)] + + pipe.set_outputs(fn.external_source(get_data)) pipe.build() for i in range(10):
codereview_new_python_data_5786
def check_transpose_vs_numpy(device, batch_size, dim, total_volume, permutation) print("permutation ", permutation) eii1 = RandomlyShapedDataIterator(batch_size, max_shape=max_shape) eii2 = RandomlyShapedDataIterator(batch_size, max_shape=max_shape) - compare_pipelines(TransposePipeline(device, batch_size, "", iter(eii1), - permutation=permutation), - PythonOpPipeline(lambda x: transpose_func(x, permutation), - batch_size, "", iter(eii2)), - batch_size=batch_size, N_iterations=3) def all_permutations(n): ```suggestion compare_pipelines( TransposePipeline(device, batch_size, "", iter(eii1), permutation=permutation), PythonOpPipeline(lambda x: transpose_func(x, permutation), batch_size, "", iter(eii2)), ``` def check_transpose_vs_numpy(device, batch_size, dim, total_volume, permutation) print("permutation ", permutation) eii1 = RandomlyShapedDataIterator(batch_size, max_shape=max_shape) eii2 = RandomlyShapedDataIterator(batch_size, max_shape=max_shape) + compare_pipelines( + TransposePipeline(device, batch_size, "", iter(eii1), permutation=permutation), + PythonOpPipeline(lambda x: transpose_func(x, permutation), batch_size, "", iter(eii2)), + batch_size=batch_size, N_iterations=3 + ) def all_permutations(n):
codereview_new_python_data_5787
def feed_ndarray(tensor_or_tl, arr, cuda_stream=None, non_blocking=False): else: dali_tensor = tensor_or_tl - assert dali_type == arr.dtype, ("The element type of DALI Tensor/TensorList" - " doesn't match the element type of the target PyTorch Tensor:" f"{dali_type} vs {arr.dtype}") assert dali_tensor.shape() == list(arr.size()), ( Nitpick: ```suggestion assert dali_type == arr.dtype, (f"The element type of DALI Tensor/TensorList " f"doesn't match the element type of the target PyTorch Tensor: " f"{dali_type} vs {arr.dtype}") ``` def feed_ndarray(tensor_or_tl, arr, cuda_stream=None, non_blocking=False): else: dali_tensor = tensor_or_tl + assert dali_type == arr.dtype, (f"The element type of DALI Tensor/TensorList " + f"doesn't match the element type of the target PyTorch Tensor: " f"{dali_type} vs {arr.dtype}") assert dali_tensor.shape() == list(arr.size()), (
codereview_new_python_data_5788
def define_graph(self): [pipe.build() for pipe in pipes] iters = pipes[0].epoch_size("Reader") - assert (all(pipe.epoch_size("Reader") == iters for pipe in pipes)) iters_tmp = iters iters = iters // BATCH_SIZE if iters_tmp != iters * BATCH_SIZE: ```suggestion assert all(pipe.epoch_size("Reader") == iters for pipe in pipes) ``` def define_graph(self): [pipe.build() for pipe in pipes] iters = pipes[0].epoch_size("Reader") + assert all(pipe.epoch_size("Reader") == iters for pipe in pipes) iters_tmp = iters iters = iters // BATCH_SIZE if iters_tmp != iters * BATCH_SIZE:
codereview_new_python_data_5789
def generate(rng, ndim: int, batch_size: int, in_dtype: np.dtype, out_dtype: np. max_size = 100000 // batch_size out = [rng.uniform(lo, hi, size=random_shape(rng, ndim, max_size)).astype(in_dtype) for _ in range(batch_size)] if np.issubdtype(in_dtype, np.floating) and np.issubdtype(out_dtype, np.integer): for x in out: # avoid exactly halfway numbers - rounding is different for CPU and GPU Why removing that `out = replace_with_empty_volumes(rng, out, empty_volume_policy)`? def generate(rng, ndim: int, batch_size: int, in_dtype: np.dtype, out_dtype: np. max_size = 100000 // batch_size out = [rng.uniform(lo, hi, size=random_shape(rng, ndim, max_size)).astype(in_dtype) for _ in range(batch_size)] + out = replace_with_empty_volumes(rng, out, empty_volume_policy) if np.issubdtype(in_dtype, np.floating) and np.issubdtype(out_dtype, np.integer): for x in out: # avoid exactly halfway numbers - rounding is different for CPU and GPU
codereview_new_python_data_5790
def ref_color_twist(img, H, S, brightness, contrast, out_dtype): s, c = math.sin(angle), math.cos(angle) # Rotate the color components by angle and scale by S. # The fun part is that it doesn't really matter that much which - hmat = np.array([[1, 0, 0], - [0, c * S, s * S], [0, -s * S, c * S]]) m = np.matmul(yiq2rgb, np.matmul(hmat, rgb2yiq)) Was that actually wrong (i.e. triggering the linter?). In my PR I am applying kind of opposite review remarks: it seems we are going to edit such lines of code back and forth: from aligned columns to single whitespace. def ref_color_twist(img, H, S, brightness, contrast, out_dtype): s, c = math.sin(angle), math.cos(angle) # Rotate the color components by angle and scale by S. # The fun part is that it doesn't really matter that much which + hmat = np.array([[1, 0, 0], + [0, c * S, s * S], [0, -s * S, c * S]]) m = np.matmul(yiq2rgb, np.matmul(hmat, rgb2yiq))
codereview_new_python_data_5791
def test_output_layout(): pipe.set_outputs(out1, out2, out3, out4, out5, out6) pipe.build() out1, out2, out3, out4, out5, out6 = pipe.run() - assert (out1.layout() == 'ABC') - assert (out2.layout() == 'DE') - assert (out3.layout() == 'FGH') - assert (out4.layout() == 'FGH') - assert (out5.layout() == 'IJK') - assert (out6.layout() == '') @raises(RuntimeError, "*length of*output_layouts*greater than*") You can get rid of parenthesis. def test_output_layout(): pipe.set_outputs(out1, out2, out3, out4, out5, out6) pipe.build() out1, out2, out3, out4, out5, out6 = pipe.run() + assert out1.layout() == 'ABC' + assert out2.layout() == 'DE' + assert out3.layout() == 'FGH' + assert out4.layout() == 'FGH' + assert out5.layout() == 'IJK' + assert out6.layout() == '' @raises(RuntimeError, "*length of*output_layouts*greater than*")
codereview_new_python_data_5792
def set_all_values_to_255_batch(out0, in0): out0[0][:] = 255 def setup_out_shape(out_shape, in_shape): - out_shape = in_shape # noqa: F841 check_single_input(numba_function, run_fn=set_all_values_to_255_batch, out_types=[types.UINT8], in_types=[types.UINT8], outs_ndim=[3], ins_ndim=[3], It may be out of scope, but this assignment does seem to do nothing indeed. I wonder if the intention was more like out_shape[i] = in_shape[i] def set_all_values_to_255_batch(out0, in0): out0[0][:] = 255 def setup_out_shape(out_shape, in_shape): + pass check_single_input(numba_function, run_fn=set_all_values_to_255_batch, out_types=[types.UINT8], in_types=[types.UINT8], outs_ndim=[3], ins_ndim=[3],
codereview_new_python_data_5793
def _compare_to_cv_distortion(in_img, out_img, q, no): decoded_img = cv2.cvtColor(decoded_img_bgr, cv2.COLOR_BGR2RGB) diff = cv2.absdiff(out_img, decoded_img) - diff_in_range = np.average( - diff) < 5, f"Absolute difference with the reference is too big: {np.average(diff)}" if dump_images or (dump_broken and not diff_in_range): i, j = no Isn't this piece of code broken? `diff_in_range` is a pair, thus it is always true-ish and the assert never fails and does not actually enforce the `np.average(diff) < 5` condition. def _compare_to_cv_distortion(in_img, out_img, q, no): decoded_img = cv2.cvtColor(decoded_img_bgr, cv2.COLOR_BGR2RGB) diff = cv2.absdiff(out_img, decoded_img) + diff_in_range = np.average(diff) < 5 if dump_images or (dump_broken and not diff_in_range): i, j = no
codereview_new_python_data_5794
def _check_data_batch(data, batch_size, layout): raise RuntimeError("The layout '{}' cannot describe {}-dimensional data".format(layout, dim)) -def _prep_data_for_feed_input(data, batch_size, layout, device_id=-1): def to_numpy(x): if _types._is_mxnet_array(x): return x.asnumpy() AFAIK, `None` and `-1` have different meanings in the API. Are we sure, that this won't break existing CPU-only code? def _check_data_batch(data, batch_size, layout): raise RuntimeError("The layout '{}' cannot describe {}-dimensional data".format(layout, dim)) +def _prep_data_for_feed_input(data, batch_size, layout, device_id=None): def to_numpy(x): if _types._is_mxnet_array(x): return x.asnumpy()
codereview_new_python_data_5795
def process_index(idx, dim): def _check(maybe_node): if not isinstance(maybe_node, DataNode): - raise TypeError("Expected outputs of type compatible with \"DataNode\"." - " Received output type with name \"{}\" that does not match.".format( - type(maybe_node).__name__)) ```suggestion raise TypeError(f"Expected outputs of type compatible with \"DataNode\". " f"Received output type with name \"{type(maybe_node).__name__}\" " f"that does not match.") ``` def process_index(idx, dim): def _check(maybe_node): if not isinstance(maybe_node, DataNode): + raise TypeError(f"Expected outputs of type compatible with \"DataNode\". " + f"Received output type with name \"{type(maybe_node).__name__}\" " + f"that does not match.")
codereview_new_python_data_5796
def get_submodule(root, path): setattr(root, part, m) elif not isinstance(m, types.ModuleType): raise RuntimeError( - "The module {} already contains an attribute \"{}\", which is not a module, but {}". - format(root, part, m)) root = m return root ```suggestion raise RuntimeError( f"The module {root} already contains an attribute \"{part}\", " f"which is not a module, but {m}") ``` def get_submodule(root, path): setattr(root, part, m) elif not isinstance(m, types.ModuleType): raise RuntimeError( + f"The module {root} already contains an attribute \"{part}\", " + f"which is not a module, but {m}") root = m return root
codereview_new_python_data_5797
def cell_unpickle(): def cell_reducer(cell): - return (cell_unpickle, tuple(), { - 'cell_contents': cell.cell_contents - }, None, None, set_cell_state) class DaliCallbackPickler(pickle.Pickler): I think this reads a bit better... ```suggestion return (cell_unpickle, tuple(), {'cell_contents': cell.cell_contents}, None, None, set_cell_state) ``` def cell_unpickle(): def cell_reducer(cell): + return (cell_unpickle, + tuple(), + {'cell_contents': cell.cell_contents}, + None, + None, + set_cell_state) class DaliCallbackPickler(pickle.Pickler):
codereview_new_python_data_5798
def replace_with_empty_volumes(rng, input, empty_volume_policy): start = 0 end = rng.integers(1, len(input) // 3) elif empty_volume_policy == "right": - start = rng.integers(1, len(input) * 2 // 3) end = len(input) elif empty_volume_policy == "middle": start = rng.integers(1 + len(input) // 3, len(input) * 2 // 3) ```suggestion start = rng.integers(len(input) * 2 // 3, len(input) - 1) ``` def replace_with_empty_volumes(rng, input, empty_volume_policy): start = 0 end = rng.integers(1, len(input) // 3) elif empty_volume_policy == "right": + start = rng.integers(len(input) * 2 // 3, len(input) - 1) end = len(input) elif empty_volume_policy == "middle": start = rng.integers(1 + len(input) // 3, len(input) * 2 // 3)
codereview_new_python_data_5799
from . import math # noqa: F401 -class enable_arithm_op: """Context-manager that enables arithmetic operators and slicing on TensorLists. Can also be used as a function. """ - def __init__(self): - self.prev = enable_arithm_op._arithm_op_enabled - enable_arithm_op._arithm_op_enabled = True def __enter__(self): pass def __exit__(self, type, value, traceback): - enable_arithm_op._arithm_op_enabled = False _arithm_op_enabled = False def is_arithm_op_enabled(): - return enable_arithm_op._arithm_op_enabled Shouldn't we replace it with self.prev? What about nesting? If someone enables arithm ops globally, there is no nice way of disabling them. from . import math # noqa: F401 +class set_arithm_op_enabled: """Context-manager that enables arithmetic operators and slicing on TensorLists. Can also be used as a function. """ + def __init__(self, mode=True): + self.prev = set_arithm_op_enabled._arithm_op_enabled + set_arithm_op_enabled._arithm_op_enabled = mode def __enter__(self): pass def __exit__(self, type, value, traceback): + set_arithm_op_enabled._arithm_op_enabled = self.prev _arithm_op_enabled = False def is_arithm_op_enabled(): + return set_arithm_op_enabled._arithm_op_enabled
codereview_new_python_data_5800
def eager_arithm_ops(data): def test_arithm_ops_cpu(): - eager.set_arithm_op_enabled() - pipe = pipeline_arithm_ops_cpu(get_data, batch_size=batch_size, num_threads=4, device_id=None) - compare_eager_with_pipeline(pipe, eager_op=eager_arithm_ops) How about a few tests on using it as a nested context manager (maybe mixed with a direct call)? def eager_arithm_ops(data): def test_arithm_ops_cpu(): + with eager.arithmetic(): + pipe = pipeline_arithm_ops_cpu(get_data, batch_size=batch_size, + num_threads=4, device_id=None) + compare_eager_with_pipeline(pipe, eager_op=eager_arithm_ops)
codereview_new_python_data_5801
def eager_arithm_ops(data): def test_arithm_ops_cpu(): - eager.set_arithm_op_enabled() - pipe = pipeline_arithm_ops_cpu(get_data, batch_size=batch_size, num_threads=4, device_id=None) - compare_eager_with_pipeline(pipe, eager_op=eager_arithm_ops) This kinda bugs me, that we enable the eager arithmetic ops and do not disable them. def eager_arithm_ops(data): def test_arithm_ops_cpu(): + with eager.arithmetic(): + pipe = pipeline_arithm_ops_cpu(get_data, batch_size=batch_size, + num_threads=4, device_id=None) + compare_eager_with_pipeline(pipe, eager_op=eager_arithm_ops)
codereview_new_python_data_5802
def numpy_flip(data, h_dim, v_dim, d_dim, hor, ver, depth): def find_dims(layout): - return layout.find("W"), \ - layout.find("H"), \ - layout.find("D") class SynthPythonFlipPipeline(Pipeline): Can these three lines be a one-liner? Like ```python def find_dims(layout): return layout.find("W"), layout.find("H"), layout.find("D") ``` def numpy_flip(data, h_dim, v_dim, d_dim, hor, ver, depth): def find_dims(layout): + return layout.find("W"), layout.find("H"), layout.find("D") class SynthPythonFlipPipeline(Pipeline):
codereview_new_python_data_5803
def test_random_bbox_crop_no_labels(): test_box_shape = [200, 4] def get_boxes(): - out = [(np.random.randint(0, 255, size=test_box_shape, dtype=np.uint8) / - 255).astype(dtype=np.float32) for _ in range(batch_size)] return out boxes = fn.external_source(source=get_boxes) processed = fn.random_bbox_crop(boxes, The `/` sign at the end of this line might be confused with `\` as line break. If you feel, that it might be an issue, maybe you can consider breaking this line in some other manner? def test_random_bbox_crop_no_labels(): test_box_shape = [200, 4] def get_boxes(): + out = [ + (np.random.randint(0, 255, size=test_box_shape, dtype=np.uint8) / 255) + .astype(dtype=np.float32) for _ in range(batch_size)] return out boxes = fn.external_source(source=get_boxes) processed = fn.random_bbox_crop(boxes,
codereview_new_python_data_5804
def test_roi_random_crop(): [(20, 50, 10, 20, 30, 40), (20, 50, 100, 140, 30, 40), (0, 1, 10, 20, 80, 100)]: - yield check_roi_random_crop, ndim, batch_size, roi_start_min, roi_start_max, \ - roi_extent_min, roi_extent_max, crop_extent_min, crop_extent_max, in_shape_min, \ - in_shape_max, niter def check_roi_random_crop_error(shape_like_in=None, in_shape=None, crop_shape=None, roi_start=None, I think here you can actually yield a tuple and it will be just the same (but without backslashes): ```suggestion yield (check_roi_random_crop, ndim, batch_size, roi_start_min, roi_start_max, roi_extent_min, roi_extent_max, crop_extent_min, crop_extent_max, in_shape_min, in_shape_max, niter) ``` def test_roi_random_crop(): [(20, 50, 10, 20, 30, 40), (20, 50, 100, 140, 30, 40), (0, 1, 10, 20, 80, 100)]: + yield (check_roi_random_crop, ndim, batch_size, roi_start_min, roi_start_max, + roi_extent_min, roi_extent_max, crop_extent_min, crop_extent_max, in_shape_min, + in_shape_max, niter) def check_roi_random_crop_error(shape_like_in=None, in_shape=None, crop_shape=None, roi_start=None,
codereview_new_python_data_5805
def test_variable_batch_size_from_external_source(): @pipeline_def(batch_size=8, num_threads=3, device_id=0, seed=47, debug=True) def incorrect_variable_batch_size_from_es_pipeline(): src_data = np.zeros((1, 6, 64, 64, 3), dtype=np.uint8) images = fn.external_source(src_data) - return images, @raises(RuntimeError, glob=('Batch size must be uniform across an iteration.' One style of quotes in one file? ```suggestion @raises(RuntimeError, glob="Batch size must be uniform across an iteration. " "External Source operator returned batch size*") ``` def test_variable_batch_size_from_external_source(): @pipeline_def(batch_size=8, num_threads=3, device_id=0, seed=47, debug=True) def incorrect_variable_batch_size_from_es_pipeline(): + rng = fn.random.coin_flip(probability=0.5) src_data = np.zeros((1, 6, 64, 64, 3), dtype=np.uint8) images = fn.external_source(src_data) + return images, rng @raises(RuntimeError, glob=('Batch size must be uniform across an iteration.'
codereview_new_python_data_5806
def random_seed(): class CommonPipeline(Pipeline): def __init__(self, device): - super().__init__(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, seed=SEED, exec_async=False, - exec_pipelined=False) self.input = ops.readers.File(file_root=images_dir) self.decode = ops.decoders.Image(device='mixed' if device == 'gpu' else 'cpu', output_type=types.RGB) Semantic grouping ```suggestion super().__init__(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, seed=SEED, exec_async=False, exec_pipelined=False) ``` def random_seed(): class CommonPipeline(Pipeline): def __init__(self, device): + super().__init__(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, seed=SEED, + exec_async=False, exec_pipelined=False) self.input = ops.readers.File(file_root=images_dir) self.decode = ops.decoders.Image(device='mixed' if device == 'gpu' else 'cpu', output_type=types.RGB)
codereview_new_python_data_5807
def test_api_fw_check1(iter_type, data_definition): pipe.run]: try: method() - assert (False) except RuntimeError: - assert (True) # disable check pipe.enable_api_check(False) for method in [pipe.schedule_run, pipe.share_outputs, pipe.release_outputs, pipe.outputs, pipe.run]: try: method() - assert (True) except RuntimeError: assert (False) yield check, iter_type WTF? ```suggestion pass ``` Better still - use `assert_raises`? def test_api_fw_check1(iter_type, data_definition): pipe.run]: try: method() + assert False except RuntimeError: + pass # disable check pipe.enable_api_check(False) for method in [pipe.schedule_run, pipe.share_outputs, pipe.release_outputs, pipe.outputs, pipe.run]: try: method() except RuntimeError: assert (False) yield check, iter_type
codereview_new_python_data_5808
def test_api_fw_check1(iter_type, data_definition): pipe.run]: try: method() - assert (False) except RuntimeError: - assert (True) # disable check pipe.enable_api_check(False) for method in [pipe.schedule_run, pipe.share_outputs, pipe.release_outputs, pipe.outputs, pipe.run]: try: method() - assert (True) except RuntimeError: assert (False) yield check, iter_type Why do we even need/want this check? Can't we just let the error be thrown and _see_ it in case of failure? def test_api_fw_check1(iter_type, data_definition): pipe.run]: try: method() + assert False except RuntimeError: + pass # disable check pipe.enable_api_check(False) for method in [pipe.schedule_run, pipe.share_outputs, pipe.release_outputs, pipe.outputs, pipe.run]: try: method() except RuntimeError: assert (False) yield check, iter_type
codereview_new_python_data_5809
def shape_gen_f(): return random_shape(max_shape) def test_coin_flip(): batch_size = 8 for device in ['cpu', 'gpu']: - for max_shape, use_shape_like_in in [([100000], False), ([100000], True), (None, False)]: for probability in [None, 0.7, 0.5, 0.0, 1.0]: yield check_coin_flip, device, batch_size, max_shape, probability, use_shape_like_in Readability suffered. Please revert. def shape_gen_f(): return random_shape(max_shape) def test_coin_flip(): batch_size = 8 for device in ['cpu', 'gpu']: + for max_shape, use_shape_like_in in [ + ([100000], False), + ([100000], True), + (None, False) + ]: for probability in [None, 0.7, 0.5, 0.0, 1.0]: yield check_coin_flip, device, batch_size, max_shape, probability, use_shape_like_in
codereview_new_python_data_5810
def paths_index_paths_error(): def general_index_error( - index_file_contents, tar_file_path="db/webdataset/MNIST/devel-0.tar", ext="jpg" ): index_file = tempfile.NamedTemporaryFile() index_file.write(index_file_contents) ```suggestion def general_index_error(index_file_contents, tar_file_path="db/webdataset/MNIST/devel-0.tar", ext="jpg"): ``` def paths_index_paths_error(): def general_index_error( + index_file_contents, + tar_file_path="db/webdataset/MNIST/devel-0.tar", + ext="jpg" ): index_file = tempfile.NamedTemporaryFile() index_file.write(index_file_contents)
codereview_new_python_data_5811
def coco_pipe(coco_op, file_root, annotations_file, polygon_masks, pixelwise_mas def test_coco_reader_alias(): def check_coco_reader_alias(polygon_masks, pixelwise_masks): - new_pipe = coco_pipe(fn.readers.coco, file_root, train_annotations, polygon_masks, - pixelwise_masks) - legacy_pipe = coco_pipe(fn.coco_reader, file_root, train_annotations, polygon_masks, - pixelwise_masks) compare_pipelines(new_pipe, legacy_pipe, batch_size_alias_test, 5) file_root = os.path.join(test_data_root, 'db', 'coco_pixelwise', 'images') ```suggestion new_pipe = coco_pipe(fn.readers.coco, file_root, train_annotations, polygon_masks, pixelwise_masks) legacy_pipe = coco_pipe(fn.coco_reader, file_root, train_annotations, polygon_masks, pixelwise_masks) ``` def coco_pipe(coco_op, file_root, annotations_file, polygon_masks, pixelwise_mas def test_coco_reader_alias(): def check_coco_reader_alias(polygon_masks, pixelwise_masks): + new_pipe = coco_pipe(fn.readers.coco, file_root, train_annotations, + polygon_masks, pixelwise_masks) + legacy_pipe = coco_pipe(fn.coco_reader, file_root, train_annotations, + polygon_masks, pixelwise_masks) compare_pipelines(new_pipe, legacy_pipe, batch_size_alias_test, 5) file_root = os.path.join(test_data_root, 'db', 'coco_pixelwise', 'images')
codereview_new_python_data_5812
def next_power_of_two(x): class CropMirrorNormalizePipeline(Pipeline): def __init__(self, device, batch_size, num_threads=1, device_id=0, num_gpus=1, - dtype=types.FLOAT, output_layout="HWC", mirror_probability=0.0, mean=[0., 0., 0.], - std=[1., 1., 1.], scale=None, shift=None, pad_output=False): super().__init__(batch_size, num_threads, device_id, seed=7865) self.device = device self.input = ops.readers.Caffe(path=caffe_db_folder, shard_id=device_id, ```suggestion dtype=types.FLOAT, output_layout="HWC", mirror_probability=0.0, mean=[0., 0., 0.], std=[1., 1., 1.], scale=None, shift=None, pad_output=False): ``` def next_power_of_two(x): class CropMirrorNormalizePipeline(Pipeline): def __init__(self, device, batch_size, num_threads=1, device_id=0, num_gpus=1, + dtype=types.FLOAT, output_layout="HWC", mirror_probability=0.0, + mean=[0., 0., 0.], std=[1., 1., 1.], scale=None, shift=None, pad_output=False): super().__init__(batch_size, num_threads, device_id, seed=7865) self.device = device self.input = ops.readers.Caffe(path=caffe_db_folder, shard_id=device_id,
codereview_new_python_data_5932
def _update(self, force_update): # if self._extensions_goal_state.source == GoalStateSource.FastTrack: certs_uri = findtext(xml_doc, "Certificates") - if certs_uri is not None: - self._download_certificates(certs_uri) - self._check_certificates() - def _check_certificates(self): for extension in self.extensions_goal_state.extensions: for settings in extension.settings: if settings.protectedSettings is None: In case if we downloaded new certs then we are not updating the state of goal_state certs in self._certs. Next line check certificates is using self._certs which will have old certs. Or am I missing something? def _update(self, force_update): # if self._extensions_goal_state.source == GoalStateSource.FastTrack: certs_uri = findtext(xml_doc, "Certificates") + self._check_certificates(certs_uri) + def _check_certificates(self, certs_uri): + # Re-download certificates in case they have been removed from disk since last download + if certs_uri is not None: + self._download_certificates(certs_uri) + # Check that certificates needed by extensions are in goal state certs.summary for extension in self.extensions_goal_state.extensions: for settings in extension.settings: if settings.protectedSettings is None:
codereview_new_python_data_5933
def __setup_azure_slice(): if not os.path.exists(vmextensions_slice): files_to_create.append((vmextensions_slice, _VMEXTENSIONS_SLICE_CONTENTS)) - ## Log collector slice should be updated to remove MemoryLimit quota from previous GA versions slice_contents = _LOGCOLLECTOR_SLICE_CONTENTS_FMT.format(cpu_quota=_LOGCOLLECTOR_CPU_QUOTA) files_to_create.append((logcollector_slice, slice_contents)) I would rephrase this comment a little. In this specific instance we want to remove the limit set by 2.8, but in general what we want is to update the settings with the values used by the current version (which is what this code is doing) def __setup_azure_slice(): if not os.path.exists(vmextensions_slice): files_to_create.append((vmextensions_slice, _VMEXTENSIONS_SLICE_CONTENTS)) + # Update log collector slice contents slice_contents = _LOGCOLLECTOR_SLICE_CONTENTS_FMT.format(cpu_quota=_LOGCOLLECTOR_CPU_QUOTA) files_to_create.append((logcollector_slice, slice_contents))
codereview_new_python_data_5934
def _execute_test_suite(self, suite: TestSuiteDescription) -> bool: agent_test_logger.info("") except: # pylint: disable=bare-except self._report_test_result( suite_full_name, suite_name, Do we also need to add `failed = True` in this except block? Although it might not be necessary because the except blocks above handle this def _execute_test_suite(self, suite: TestSuiteDescription) -> bool: agent_test_logger.info("") except: # pylint: disable=bare-except + failed = True self._report_test_result( suite_full_name, suite_name,
codereview_new_python_data_5935
def __log_event(level, msg_, success_=True): def run(self, goal_state): try: - # Ignore new agents if update is disabled. The DCR flag will be removed after testing - if not self._autoupdate_enabled or not conf.get_enable_agent_update_in_dcr(): return self._gs_id = goal_state.extensions_goal_state.id do you mean you will remove the config flag once you are done with your tests? def __log_event(level, msg_, success_=True): def run(self, goal_state): try: + # Ignore new agents if update is disabled. The latter flag only used in e2e tests. + if not self._autoupdate_enabled or not conf.get_enable_ga_updates(): return self._gs_id = goal_state.extensions_goal_state.id
codereview_new_python_data_5936
def __log_event(level, msg_, success_=True): def run(self, goal_state): try: - # Ignore new agents if update is disabled. The DCR flag will be removed after testing - if not self._autoupdate_enabled or not conf.get_enable_agent_update_in_dcr(): return self._gs_id = goal_state.extensions_goal_state.id can we improve the name of the flag? (assuming it is not just a temporary flag) def __log_event(level, msg_, success_=True): def run(self, goal_state): try: + # Ignore new agents if update is disabled. The latter flag only used in e2e tests. + if not self._autoupdate_enabled or not conf.get_enable_ga_updates(): return self._gs_id = goal_state.extensions_goal_state.id
codereview_new_python_data_5937
console_handler = logging.StreamHandler() log.addHandler(console_handler) -log .setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s.%(msecs)03d [%(levelname)s] %(message)s', datefmt="%Y-%m-%dT%H:%M:%SZ") for handler in log.handlers: extra space here? `log .setLevel(logging.INFO)` console_handler = logging.StreamHandler() log.addHandler(console_handler) +log.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s.%(msecs)03d [%(levelname)s] %(message)s', datefmt="%Y-%m-%dT%H:%M:%SZ") for handler in log.handlers:
codereview_new_python_data_5938
# BVT for RunCommand. # # Note that there are two incarnations of RunCommand (which are actually two different extensions): -# Microsoft.CPlat.Core.RunCommandHandlerLinux and Microsoft.OSTCExtensions.VMAccessForLinux. This -# test exercises both using the same strategy: execute the extension to create a file on the test -# VM, then fetch the contents of the file over SSH and compare against the known value. # import base64 import uuid Should we change this `Microsoft.OSTCExtensions.VMAccessForLinux` to `Microsoft.OSTCExtensions.RunCommand` # BVT for RunCommand. # # Note that there are two incarnations of RunCommand (which are actually two different extensions): +# Microsoft.CPlat.Core.RunCommandHandlerLinux and Microsoft.CPlat.Core.RunCommandLinux. This test +# exercises both using the same strategy: execute the extension to create a file on the test VM, +# then fetch the contents of the file over SSH and compare against the known value. # import base64 import uuid
codereview_new_python_data_5939
def _set_resource_usage_cgroups(cpu_cgroup_path, memory_cgroup_path): @staticmethod def _initialize_telemetry(): - goalstate_properties = GoalStateProperties.RoleConfig | GoalStateProperties.HostingEnv - protocol = get_protocol_util().get_protocol(goalstate_properties=goalstate_properties) - protocol.client.update_goal_state( - goalstate_properties=goalstate_properties, - force_update=True) # Initialize the common parameters for telemetry events initialize_event_logger_vminfo_common_parameters(protocol) On the naked eye, it looks like either of those but logically it's all of them :) def _set_resource_usage_cgroups(cpu_cgroup_path, memory_cgroup_path): @staticmethod def _initialize_telemetry(): + protocol = get_protocol_util().get_protocol() + protocol.client.reset_goal_state(goalstate_properties=GoalStateProperties.RoleConfig | GoalStateProperties.HostingEnv) # Initialize the common parameters for telemetry events initialize_event_logger_vminfo_common_parameters(protocol)
codereview_new_python_data_5940
def _set_resource_usage_cgroups(cpu_cgroup_path, memory_cgroup_path): @staticmethod def _initialize_telemetry(): - protocol = get_protocol_util().get_protocol() protocol.client.reset_goal_state(goalstate_properties=GoalStateProperties.RoleConfig | GoalStateProperties.HostingEnv) # Initialize the common parameters for telemetry events initialize_event_logger_vminfo_common_parameters(protocol) Looks like get_protocal() still initializing goal state with all properties? def _set_resource_usage_cgroups(cpu_cgroup_path, memory_cgroup_path): @staticmethod def _initialize_telemetry(): + protocol = get_protocol_util().get_protocol(init_goal_state=False) protocol.client.reset_goal_state(goalstate_properties=GoalStateProperties.RoleConfig | GoalStateProperties.HostingEnv) # Initialize the common parameters for telemetry events initialize_event_logger_vminfo_common_parameters(protocol)
codereview_new_python_data_5941
def update_goal_state(self, silent=False): Updates the goal state if the incarnation or etag changed """ try: - if not silent: - logger.info("Forcing an update of the goal state.") - - self._goal_state.update(silent=silent) except ProtocolError: raise we can remove this message since the force flag was removed def update_goal_state(self, silent=False): Updates the goal state if the incarnation or etag changed """ try: + if self._goal_state is None: + self._goal_state = GoalState(self, silent=silent) + else: + self._goal_state.update(silent=silent) except ProtocolError: raise
codereview_new_python_data_5942
def report_ext_handler_status(self, vm_status, ext_handler, goal_state_changed): heartbeat = ext_handler_i.collect_heartbeat() if heartbeat is not None: handler_status.status = heartbeat.get('status') - handler_status.message = parse_formatted_message(heartbeat.get('formattedMessage')) except ExtensionError as e: ext_handler_i.set_handler_status(message=ustr(e), code=e.code) you should check that the heartbeat actually has a 'formattedMessage') def report_ext_handler_status(self, vm_status, ext_handler, goal_state_changed): heartbeat = ext_handler_i.collect_heartbeat() if heartbeat is not None: handler_status.status = heartbeat.get('status') + if 'formattedMessage' in heartbeat: + handler_status.message = parse_formatted_message(heartbeat.get('formattedMessage')) except ExtensionError as e: ext_handler_i.set_handler_status(message=ustr(e), code=e.code)
codereview_new_python_data_5943
def update_goal_state(self, force_update=False, silent=False): Updates the goal state if the incarnation or etag changed or if 'force_update' is True """ try: - if force_update and not silent: logger.info("Forcing an update of the goal state.") if self._goal_state is None or force_update: nit: extra space def update_goal_state(self, force_update=False, silent=False): Updates the goal state if the incarnation or etag changed or if 'force_update' is True """ try: + if force_update and not silent: logger.info("Forcing an update of the goal state.") if self._goal_state is None or force_update:
codereview_new_python_data_5944
def _purge(): if GoalStateHistory._purge_error_count > 0: GoalStateHistory._purge_error_count = 0 logger.info("Successfully cleaned up the goal state history directory") except Exception as e: I think it should be `>=0`. Happy path scenario it will be 0 and still want to log `successfully cleaned up `message def _purge(): if GoalStateHistory._purge_error_count > 0: GoalStateHistory._purge_error_count = 0 + # Log a success message when we are recovering from errors. logger.info("Successfully cleaned up the goal state history directory") except Exception as e:
codereview_new_python_data_5945
def get_used_and_available_system_memory(): """ used_mem = available_mem = 0 free_cmd = ["free", "-b"] - try: - memory = shellutil.run_command(free_cmd) - for line in memory.split("\n"): - if ALL_MEMS_REGEX.match(line): - mems = line.split() - used_mem = int(mems[2]) - available_mem = int(mems[6]) # see "man free" for a description of these fields - except CommandError as e: - logger.warn("Cannot get the memory table. {0} failed: {1}", ustr(free_cmd), ustr(e)) return used_mem/(1024 ** 2), available_mem/(1024 ** 2) def get_nic_state(self, as_string=False): available would be 7 def get_used_and_available_system_memory(): """ used_mem = available_mem = 0 free_cmd = ["free", "-b"] + memory = shellutil.run_command(free_cmd) + for line in memory.split("\n"): + if ALL_MEMS_REGEX.match(line): + mems = line.split() + used_mem = int(mems[2]) + available_mem = int(mems[6]) # see "man free" for a description of these fields return used_mem/(1024 ** 2), available_mem/(1024 ** 2) def get_nic_state(self, as_string=False):
codereview_new_python_data_5946
def discovery(self): DiscoveryParser(sys.argv[2:]) def xml(self): - """Discovery server command handler.""" try: from xml_ci.parser import XMLParser XMLParser(sys.argv[2:]) Why is it done differently from the other CLI verbs? Maybe a comment can clarify this point to future developers looking at this script. def discovery(self): DiscoveryParser(sys.argv[2:]) def xml(self): + """ + XML validation command handler. + + New python dependency (XMLSchema) included in 2.10.X + Check it is installed, and report installation command if it is not. + """ try: from xml_ci.parser import XMLParser XMLParser(sys.argv[2:])
codereview_new_python_data_5956
-# Copyright 2020 Proyectos y Sistemas de Mantenimiento SL (eProsima). # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. ```suggestion # Copyright 2023 Proyectos y Sistemas de Mantenimiento SL (eProsima). ``` +# Copyright 2023 Proyectos y Sistemas de Mantenimiento SL (eProsima). # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License.
codereview_new_python_data_5957
-# Copyright 2020 Proyectos y Sistemas de Mantenimiento SL (eProsima). # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. ```suggestion # Copyright 2023 Proyectos y Sistemas de Mantenimiento SL (eProsima). ``` +# Copyright 2023 Proyectos y Sistemas de Mantenimiento SL (eProsima). # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License.
codereview_new_python_data_5959
def asan_line_splitter( line: str): return _common_line_splitter( line=line, - text_to_split_start='==ERROR: AddressSanitizer: ') def tsan_line_splitter( This leaves out some errors kinds like `LeakSanitizer` (see [here](https://github.com/eProsima/Fast-DDS/actions/runs/4199782572)) def asan_line_splitter( line: str): return _common_line_splitter( line=line, + text_to_split_start='==ERROR: ') def tsan_line_splitter(
codereview_new_python_data_6010
def create_rest_apigw(apigateway_client): def _create_apigateway_function(*args, **kwargs): response = apigateway_client.create_rest_api(**kwargs) - rest_api_id = response.get("id") - rest_api_ids.append(rest_api_id) - resources = apigateway_client.get_resources(restApiId=rest_api_id) root_id = next(item for item in resources["items"] if item["path"] == "/")["id"] - return response.get("id"), response.get("name"), root_id yield _create_apigateway_function Wouldn't suppressing the exception leave the resource existing in AWS if we hit the rate limit? I see the issue with moving `tests.integration.apigateway.test_apigateway_api.apigw_create_rest_api` in `fixtures.py` as it does not return the same data, and the new tests rely on it for snapshot. Not sure how we could proceed for that? def create_rest_apigw(apigateway_client): def _create_apigateway_function(*args, **kwargs): response = apigateway_client.create_rest_api(**kwargs) + api_id = response.get("id") + rest_api_ids.append(api_id) + resources = apigateway_client.get_resources(restApiId=api_id) root_id = next(item for item in resources["items"] if item["path"] == "/")["id"] + return api_id, response.get("name"), root_id yield _create_apigateway_function
codereview_new_python_data_6011
def encode(self, obj: Any, file: IO[bytes]): :param obj: the object to encode :param file: the file to write the encoded data into - :return: the encoded object """ raise NotImplementedError nit: could be removed (as the `encode` method doesn't return a result, as per its type hints) def encode(self, obj: Any, file: IO[bytes]): :param obj: the object to encode :param file: the file to write the encoded data into """ raise NotImplementedError
codereview_new_python_data_6012
def template_to_json(template: str) -> str: # TODO: consider moving to transformers.py as well -def transform_template(template: dict, parameters: list) -> Dict: result = dict(template) # apply 'Fn::Transform' intrinsic functions (note: needs to be applied before global # transforms below, as some utils - incl samtransformer - expect them to be resolved already) - result = apply_transform_intrinsic_functions(result) # apply global transforms transformations = format_transforms(result.get("Transform", [])) That might turn out to be a bit of a problem. As far as I understand this is actually the opposite of what CloudFormation does. def template_to_json(template: str) -> str: # TODO: consider moving to transformers.py as well +def transform_template(template: dict, parameters: list, stack=None) -> Dict: result = dict(template) # apply 'Fn::Transform' intrinsic functions (note: needs to be applied before global # transforms below, as some utils - incl samtransformer - expect them to be resolved already) + result = apply_transform_intrinsic_functions(result, stack=stack) # apply global transforms transformations = format_transforms(result.get("Transform", []))
codereview_new_python_data_6013
def _visit(obj, **_): transformer_class = transformers.get(transform_name) if transformer_class: transformer = transformer_class() - properties = transform.get("Parameters") or {} - return transformer.transform(properties) return obj return recurse_object(template, _visit) ```suggestion parameters = transform.get("Parameters") or {} return transformer.transform(parameters) ``` small nit for consistency def _visit(obj, **_): transformer_class = transformers.get(transform_name) if transformer_class: transformer = transformer_class() + parameters = transform.get("Parameters") or {} + return transformer.transform(parameters) return obj return recurse_object(template, _visit)
codereview_new_python_data_6014
def create_event_source_mapping( raise InvalidParameterValueException("Unrecognized event source.", Type="User") service = extract_service_from_arn(request.get("EventSourceArn")) - if "StartingPosition" not in request and service in ["dynamodb", "kinesis", "kafka"]: raise InvalidParameterValueException( "1 validation error detected: Value null at 'startingPosition' failed to satisfy constraint: Member must not be null.", Type="User", ```suggestion if service in ["dynamodb", "kinesis", "kafka"] and "StartingPosition" not in request: ``` nit: IMO, it's a bit more natural to read this way :thinking: Also, I think this should also be evaluated in update_event_source_mapping. We'll need to extend these validations, especially for event source mappings at some point even further, since there's a lot of differences in which sets of values are valid depending on the source service. def create_event_source_mapping( raise InvalidParameterValueException("Unrecognized event source.", Type="User") service = extract_service_from_arn(request.get("EventSourceArn")) + if service in ["dynamodb", "kinesis", "kafka"] and "StartingPosition" not in request: raise InvalidParameterValueException( "1 validation error detected: Value null at 'startingPosition' failed to satisfy constraint: Member must not be null.", Type="User",
codereview_new_python_data_6015
def cleanup( ) logs_client.delete_log_group(logGroupName=log_group_name) def test_put_target_id_validation(self, sqs_create_queue, events_client, sqs_client, snapshot): rule_name = f"rule-{short_uid()}" queue_url = sqs_create_queue() ```suggestion @pytest.mark.aws_validated def test_put_target_id_validation(self, sqs_create_queue, events_client, sqs_client, snapshot): ``` I guess this was executed against AWS since it has a snapshot right? def cleanup( ) logs_client.delete_log_group(logGroupName=log_group_name) + @pytest.mark.aws_validated def test_put_target_id_validation(self, sqs_create_queue, events_client, sqs_client, snapshot): rule_name = f"rule-{short_uid()}" queue_url = sqs_create_queue()
codereview_new_python_data_6016
def _proxy(*args, **kwargs) -> WerkzeugResponse: return _proxy return _decorate - - -# b'<?xml version="1.0" encoding="UTF-8"?>\n<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">eu-west-2</LocationConstraint>' -# b'<?xml version="1.0" encoding="UTF-8"?><LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">eu-west-2</LocationConstraint>' I will remove those, leftover from debugging 😅 sorry def _proxy(*args, **kwargs) -> WerkzeugResponse: return _proxy return _decorate
codereview_new_python_data_6017
def shorthand_constructor(loader: yaml.Loader, tag_suffix: str, node: yaml.Node) parts = node.value.partition(".") if len(parts) != 3: raise ValueError(f"Node value contains unexpected format for !GetAtt: {parts}") - return {fn_name: [parts[0], *parts[2:]]} if isinstance(node, yaml.ScalarNode): return {fn_name: node.value} nit: I guess this would be equivalent to: ``` return {fn_name: [parts[0], parts[2]]} ``` (more for my understanding.. but feel free to leave as-is 👍 ) def shorthand_constructor(loader: yaml.Loader, tag_suffix: str, node: yaml.Node) parts = node.value.partition(".") if len(parts) != 3: raise ValueError(f"Node value contains unexpected format for !GetAtt: {parts}") + return {fn_name: [parts[0], parts[2]]} if isinstance(node, yaml.ScalarNode): return {fn_name: node.value}
codereview_new_python_data_6018
def register_ses_api_resource(): from localstack.services.edge import ROUTER - ROUTER.add(Resource("/_aws/ses/", ses_service_api_resource)) _EMAILS_ENDPOINT_REGISTERED = True Maybe use `EMAILS_ENDPOINT` (not sure if trailing slash is required) ```suggestion ROUTER.add(Resource(f"{EMAILS_ENDPOINT}/", ses_service_api_resource)) ``` def register_ses_api_resource(): from localstack.services.edge import ROUTER + ROUTER.add(Resource(EMAILS_ENDPOINT, ses_service_api_resource)) _EMAILS_ENDPOINT_REGISTERED = True
codereview_new_python_data_6019
def test_events_resource_types(deploy_cfn_template, cfn_client, snapshot): stack = deploy_cfn_template(template_path=template_path, max_wait=500) events = cfn_client.describe_stack_events(StackName=stack.stack_name)["StackEvents"] - resource_types = set([event["ResourceType"] for event in events]) - snapshot.match("resource_types", dict.fromkeys(resource_types, 0)) Any reason to not just use the set or a sorted list of the types? :thinking: def test_events_resource_types(deploy_cfn_template, cfn_client, snapshot): stack = deploy_cfn_template(template_path=template_path, max_wait=500) events = cfn_client.describe_stack_events(StackName=stack.stack_name)["StackEvents"] + resource_types = list(set([event["ResourceType"] for event in events])) + resource_types.sort() + snapshot.match("resource_types", resource_types)
codereview_new_python_data_6020
def create_launch_template( status_code=400, ) - result = call_moto(context) - return result @handler("ModifyLaunchTemplate", expand=False) def modify_launch_template( ```suggestion return call_moto(context) ``` def create_launch_template( status_code=400, ) + return call_moto(context) @handler("ModifyLaunchTemplate", expand=False) def modify_launch_template(
codereview_new_python_data_6021
def create(template_name): yield create for id in template_ids: - ec2_client.delete_launch_template(LaunchTemplateId=id) class TestEc2Integrations: You can make sure all templates are deleted despite exceptions with: ```suggestion with contextlib.suppress(ClientError): ec2_client.delete_launch_template(LaunchTemplateId=id) ``` def create(template_name): yield create for id in template_ids: + with contextlib.suppress(ClientError): + ec2_client.delete_launch_template(LaunchTemplateId=id) class TestEc2Integrations:
codereview_new_python_data_6022
class RequestParametersResolver: def resolve(self, context: ApiInvocationContext) -> IntegrationParameters: """ - Resolve integration request parameters from the integration request parameters :return: IntegrationParameters """ method_request_params: Dict[str, Any] = self.method_request_dict(context) nit: this docstring seems a bit redundant, maybe we could concretize it a bit more (no need to change now, could be done in one of the next iterations.. 👍 ) class RequestParametersResolver: def resolve(self, context: ApiInvocationContext) -> IntegrationParameters: """ + Resolve method request parameters into integration request parameters. + Integration request parameters, in the form of path variables, query strings + or headers, can be mapped from any defined method request parameters + and the payload. + :return: IntegrationParameters """ method_request_params: Dict[str, Any] = self.method_request_dict(context)
codereview_new_python_data_6023
def is_enabled(self): def for_provider( provider: ServiceProvider, dispatch_table_factory: Callable[[ServiceProvider], DispatchTable] = None, - service_lifecycle_hook=None, ) -> "Service": """ Factory method for creating services for providers. This method hides a bunch of legacy code and Probably very much of a nitpick, but I would make optional types explicit. If we'll ever introduce `mypy` this would be one of the default checks I believe. def is_enabled(self): def for_provider( provider: ServiceProvider, dispatch_table_factory: Callable[[ServiceProvider], DispatchTable] = None, + service_lifecycle_hook: ServiceLifecycleHook = None, ) -> "Service": """ Factory method for creating services for providers. This method hides a bunch of legacy code and
codereview_new_python_data_6024
def run_as_os_user(target: Callable, uid: str | int, gid: str | int = None): """ - Run the given callable under a different OS user ID and (optionally) group ID, in a forked subprocess """ def _wrapper(): Note: currently only accepting a `Callable` here for now - could be extended with `*args, **kwargs` in the future, as needed. 👍 def run_as_os_user(target: Callable, uid: str | int, gid: str | int = None): """ + Run the given callable under a different OS user and (optionally) group, in a forked subprocess. + :param target: the function to call in the subprocess + :param uid: either the user name (string) or numeric user ID + :param gid: optionally, either the group name (string) or numeric group ID """ def _wrapper():
codereview_new_python_data_6025
def run_as_os_user(target: Callable, uid: str | int, gid: str | int = None): """ - Run the given callable under a different OS user ID and (optionally) group ID, in a forked subprocess """ def _wrapper(): Nit: would be nice if the method docstring would contain more information about the formats of uid/gid which are accepted (string as name and id as int, as far as i can see) def run_as_os_user(target: Callable, uid: str | int, gid: str | int = None): """ + Run the given callable under a different OS user and (optionally) group, in a forked subprocess. + :param target: the function to call in the subprocess + :param uid: either the user name (string) or numeric user ID + :param gid: optionally, either the group name (string) or numeric group ID """ def _wrapper():
codereview_new_python_data_6026
def test_macro_deployment( create_lambda_function( func_name=func_name, handler_file=macro_function_path, - runtime=Runtime.python3_8, client=lambda_client, - timeout=1, ) stack_with_macro = deploy_cfn_template( is there a reason for the fairly short timeout here? def test_macro_deployment( create_lambda_function( func_name=func_name, handler_file=macro_function_path, + runtime=Runtime.python3_9, client=lambda_client, ) stack_with_macro = deploy_cfn_template(
codereview_new_python_data_6027
TRACKED_ENV_VAR = [ "PROVIDER_OVERRIDE_S3", - "LAMBDA_RUNTIME_EXECUTOR", "DEBUG", "DISABLE_CORS_CHECK", "DISABLE_CORS_HEADERS", ```suggestion ``` seems redundant since the loop below filters all envs starting with `PROVIDER_OVERRIDE_` anyway TRACKED_ENV_VAR = [ "PROVIDER_OVERRIDE_S3", "DEBUG", "DISABLE_CORS_CHECK", "DISABLE_CORS_HEADERS",