hash
stringlengths
40
40
repo
stringlengths
9
36
date
stringclasses
0 values
license
stringclasses
0 values
message
stringlengths
74
349
mods
listlengths
1
16
c27fd80c76b03bf187fa98029e2edb151644903a
burnysc2/python-sc2
null
null
Add different functions to print debug text Add: debug box/line/sphere functions Add: all debug requests are getting collected so the user can call send_debug() to batch request the debug draw.
[ { "change_type": "MODIFY", "diff": "@@ -29,6 +29,10 @@ class Client(Protocol):\n super().__init__(ws)\n self._player_id = None\n self._game_result = None\n+ self._debug_texts = list()\n+ self._debug_lines = list()\n+ self._debug_boxes = list()\n+ self._debug_spheres = list()\n \n @property\n def in_game(self):\n@@ -212,7 +216,50 @@ class Client(Protocol):\n else:\n await self.debug_text([texts], [positions], color)\n \n- async def debug_create_unit(self, unit_type, amount_of_units, position, owner_id):\n+ def debug_text_simple(self, text, color=None):\n+ self._debug_texts.append(to_debug_message(text, color))\n+\n+ def debug_text_2d(self, text, pos, color=None, size=8):\n+ self._debug_texts.append(to_debug_message(text, color, pos, False, size))\n+\n+ def debug_text_3d(self, text, pos, color=None, size=8):\n+ self._debug_texts.append(to_debug_message(text, color, pos, True, size))\n+\n+ def debug_line_out(self, p0, p1, color=None):\n+ self._debug_lines.append(debug_pb.DebugLine(\n+ line=debug_pb.Line(p0=to_debug_point(p0), p1=to_debug_point(p1)),\n+ color=to_debug_color(color)))\n+\n+ def debug_box_out(self, p_min, p_max, color=None):\n+ self._debug_boxes.append(debug_pb.DebugBox(\n+ min=to_debug_point(p_min),\n+ max=to_debug_point(p_max),\n+ color=to_debug_color(color)\n+ ))\n+\t\t\n+ def debug_sphere_out(self, p, r, color=None):\n+ self._debug_spheres.append(debug_pb.DebugSphere(\n+ p=to_debug_point(p),\n+ r=r,\n+ color=to_debug_color(color)\n+ ))\n+\n+ async def send_debug(self):\n+\t\tawait self._execute(debug=sc_pb.RequestDebug(\n+ debug=[debug_pb.DebugCommand(draw=debug_pb.DebugDraw(\n+ text=self._debug_texts if len(self._debug_texts) > 0 else None,\n+ lines=self._debug_lines if len(self._debug_lines) > 0 else None,\n+ boxes=self._debug_boxes if len(self._debug_boxes) > 0 else None,\n+ spheres=self._debug_spheres if len(self._debug_spheres) > 0 else None\n+ ))]\n+\n+ ))\n+ self._debug_texts.clear()\n+ self._debug_lines.clear()\n+ self._debug_boxes.clear()\n+ self._debug_spheres.clear()\n+\t\t\n+\tasync def debug_create_unit(self, unit_type, amount_of_units, position, owner_id):\n # example:\n # await self._client.debug_create_unit(MARINE, 1, self._game_info.map_center, 1)\n assert isinstance(unit_type, UnitTypeId)\n@@ -228,14 +275,44 @@ class Client(Protocol):\n quantity=(amount_of_units)\n ))]\n ))\n- async def debug_text_simple(self, texts):\n- if not isinstance(texts, list):\n- texts = [texts]\n- await self._execute(debug=sc_pb.RequestDebug(\n- debug=[debug_pb.DebugCommand(draw=debug_pb.DebugDraw(\n- text=[debug_pb.DebugText(\n- text=text,\n- color=debug_pb.Color(r=1, g=1, b=1),\n- ) for text in texts]\n- ))]\n- ))\n+\t\t\n+ \n+\n+def to_debug_color(color):\n+ if color is None:\n+ return debug_pb.Color(r=255, g=255, b=255)\n+ else:\n+ r = getattr(color, \"r\", getattr(color, \"x\", 255))\n+ g = getattr(color, \"g\", getattr(color, \"y\", 255))\n+ b = getattr(color, \"b\", getattr(color, \"z\", 255))\n+ if r + g + b <= 3:\n+ r *= 255\n+ g *= 255\n+ b *= 255\n+\n+ return debug_pb.Color(r=int(r), g=int(g), b=int(b))\n+\n+\n+def to_debug_point(point):\n+ return common_pb.Point(x=point.x, y=point.y, z=getattr(point, \"z\", 0))\n+\n+\n+def to_debug_message(text, color=None, pos=None, is3d=False, size=8):\n+ text = text\n+ color = to_debug_color(color)\n+ size = size\n+ pt3d = None\n+ virtual_pos = None\n+\n+ if pos is not None:\n+ if is3d:\n+ pt3d = to_debug_point(pos)\n+ else:\n+ virtual_pos = to_debug_point(pos)\n+ return debug_pb.DebugText(\n+ color=color,\n+ text=text,\n+ virtual_pos=virtual_pos,\n+ world_pos=pt3d,\n+ size=size\n+ )\n", "new_path": "sc2/client.py", "old_path": "sc2/client.py" } ]
7b2e711c738b864e4e0b7c35257225fb23cfa15d
rqlite/rqlite
null
null
Remove constraint check It's too clever, and causing test practicality issues.
[ { "change_type": "MODIFY", "diff": "@@ -136,7 +136,7 @@ class Node(object):\n r.raise_for_status()\n return r.json()\n \n- def is_leader(self, constraint_check=True):\n+ def is_leader(self):\n '''\n is_leader returns whether this node is the cluster leader\n It also performs a check, to ensure the node nevers gives out\n@@ -144,28 +144,16 @@ class Node(object):\n '''\n \n try:\n- isLeaderRaft = self.status()['store']['raft']['state'] == 'Leader'\n- isLeaderNodes = self.nodes()[self.node_id]['leader'] is True\n+ return self.status()['store']['raft']['state'] == 'Leader'\n except requests.exceptions.ConnectionError:\n return False\n \n- if (isLeaderRaft != isLeaderNodes) and constraint_check:\n- raise AssertionError(\"conflicting states reported for leadership (raft: %s, nodes: %s)\"\n- % (isLeaderRaft, isLeaderNodes))\n- return isLeaderNodes\n-\n def is_follower(self):\n try:\n- isFollowerRaft = self.status()['store']['raft']['state'] == 'Follower'\n- isFollowersNodes = self.nodes()[self.node_id]['leader'] is False\n+ return self.status()['store']['raft']['state'] == 'Follower'\n except requests.exceptions.ConnectionError:\n return False\n \n- if isFollowerRaft != isFollowersNodes:\n- raise AssertionError(\"conflicting states reported for followership (raft: %s, nodes: %s)\"\n- % (isFollowerRaft, isFollowersNodes))\n- return isFollowersNodes\n-\n def wait_for_leader(self, timeout=TIMEOUT):\n lr = None\n t = 0\n@@ -289,6 +277,7 @@ class Node(object):\n \n def redirect_addr(self):\n r = requests.post(self._execute_url(redirect=True), data=json.dumps(['nonsense']), allow_redirects=False)\n+ r.raise_for_status()\n if r.status_code == 301:\n return \"%s://%s\" % (urlparse(r.headers['Location']).scheme, urlparse(r.headers['Location']).netloc)\n \n@@ -333,7 +322,7 @@ def deprovision_node(node):\n class Cluster(object):\n def __init__(self, nodes):\n self.nodes = nodes\n- def wait_for_leader(self, node_exc=None, timeout=TIMEOUT, constraint_check=True):\n+ def wait_for_leader(self, node_exc=None, timeout=TIMEOUT):\n t = 0\n while True:\n if t > timeout:\n@@ -341,7 +330,7 @@ class Cluster(object):\n for n in self.nodes:\n if node_exc is not None and n == node_exc:\n continue\n- if n.is_leader(constraint_check):\n+ if n.is_leader():\n return n\n time.sleep(1)\n t+=1\n@@ -682,10 +671,9 @@ class TestEndToEndNonVoterFollowsLeader(unittest.TestCase):\n j = n.query('SELECT * FROM foo')\n self.assertEqual(str(j), \"{u'results': [{u'values': [[1, u'fiona']], u'types': [u'integer', u'text'], u'columns': [u'id', u'name']}]}\")\n \n- # Kill leader, and then make more changes. Don't perform leader-constraint checks\n- # since the cluster is changing right now.\n- n0 = self.cluster.wait_for_leader(constraint_check=False).stop()\n- n1 = self.cluster.wait_for_leader(node_exc=n0, constraint_check=False)\n+ # Kill leader, and then make more changes.\n+ n0 = self.cluster.wait_for_leader().stop()\n+ n1 = self.cluster.wait_for_leader(node_exc=n0)\n n1.wait_for_all_applied()\n j = n1.query('SELECT * FROM foo')\n self.assertEqual(str(j), \"{u'results': [{u'values': [[1, u'fiona']], u'types': [u'integer', u'text'], u'columns': [u'id', u'name']}]}\")\n", "new_path": "system_test/full_system_test.py", "old_path": "system_test/full_system_test.py" } ]
dad51485282b6e05c4993b0733bd54aa3c0bacef
cupy/cupy
null
null
Use "import numpy as np" in the array_api submodule This avoids importing everything inside the individual functions, but still is preferred over importing the functions used explicitly, as most of them clash with the wrapper function names.
[ { "change_type": "MODIFY", "diff": "@@ -1,76 +1,67 @@\n+import numpy as np\n+\n def arange(start, /, *, stop=None, step=1, dtype=None, device=None):\n- from .. import arange\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return arange(start, stop=stop, step=step, dtype=dtype)\n+ return np.arange(start, stop=stop, step=step, dtype=dtype)\n \n def empty(shape, /, *, dtype=None, device=None):\n- from .. import empty\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return empty(shape, dtype=dtype)\n+ return np.empty(shape, dtype=dtype)\n \n def empty_like(x, /, *, dtype=None, device=None):\n- from .. import empty_like\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return empty_like(x, dtype=dtype)\n+ return np.empty_like(x, dtype=dtype)\n \n def eye(N, /, *, M=None, k=0, dtype=None, device=None):\n- from .. import eye\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return eye(N, M=M, k=k, dtype=dtype)\n+ return np.eye(N, M=M, k=k, dtype=dtype)\n \n def full(shape, fill_value, /, *, dtype=None, device=None):\n- from .. import full\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return full(shape, fill_value, dtype=dtype)\n+ return np.full(shape, fill_value, dtype=dtype)\n \n def full_like(x, fill_value, /, *, dtype=None, device=None):\n- from .. import full_like\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return full_like(x, fill_value, dtype=dtype)\n+ return np.full_like(x, fill_value, dtype=dtype)\n \n def linspace(start, stop, num, /, *, dtype=None, device=None, endpoint=True):\n- from .. import linspace\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return linspace(start, stop, num, dtype=dtype, endpoint=endpoint)\n+ return np.linspace(start, stop, num, dtype=dtype, endpoint=endpoint)\n \n def ones(shape, /, *, dtype=None, device=None):\n- from .. import ones\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return ones(shape, dtype=dtype)\n+ return np.ones(shape, dtype=dtype)\n \n def ones_like(x, /, *, dtype=None, device=None):\n- from .. import ones_like\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return ones_like(x, dtype=dtype)\n+ return np.ones_like(x, dtype=dtype)\n \n def zeros(shape, /, *, dtype=None, device=None):\n- from .. import zeros\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return zeros(shape, dtype=dtype)\n+ return np.zeros(shape, dtype=dtype)\n \n def zeros_like(x, /, *, dtype=None, device=None):\n- from .. import zeros_like\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return zeros_like(x, dtype=dtype)\n+ return np.zeros_like(x, dtype=dtype)\n", "new_path": "numpy/_array_api/_creation_functions.py", "old_path": "numpy/_array_api/_creation_functions.py" }, { "change_type": "MODIFY", "diff": "@@ -1,230 +1,177 @@\n+import numpy as np\n+\n def abs(x, /):\n- from .. import abs\n- return abs(x)\n+ return np.abs(x)\n \n def acos(x, /):\n # Note: the function name is different here\n- from .. import arccos\n- return arccos(x)\n+ return np.arccos(x)\n \n def acosh(x, /):\n # Note: the function name is different here\n- from .. import arccosh\n- return arccosh(x)\n+ return np.arccosh(x)\n \n def add(x1, x2, /):\n- from .. import add\n- return add(x1, x2)\n+ return np.add(x1, x2)\n \n def asin(x, /):\n # Note: the function name is different here\n- from .. import arcsin\n- return arcsin(x)\n+ return np.arcsin(x)\n \n def asinh(x, /):\n # Note: the function name is different here\n- from .. import arcsinh\n- return arcsinh(x)\n+ return np.arcsinh(x)\n \n def atan(x, /):\n # Note: the function name is different here\n- from .. import arctan\n- return arctan(x)\n+ return np.arctan(x)\n \n def atan2(x1, x2, /):\n # Note: the function name is different here\n- from .. import arctan2\n- return arctan2(x1, x2)\n+ return np.arctan2(x1, x2)\n \n def atanh(x, /):\n # Note: the function name is different here\n- from .. import arctanh\n- return arctanh(x)\n+ return np.arctanh(x)\n \n def bitwise_and(x1, x2, /):\n- from .. import bitwise_and\n- return bitwise_and(x1, x2)\n+ return np.bitwise_and(x1, x2)\n \n def bitwise_left_shift(x1, x2, /):\n # Note: the function name is different here\n- from .. import left_shift\n- return left_shift(x1, x2)\n+ return np.left_shift(x1, x2)\n \n def bitwise_invert(x, /):\n # Note: the function name is different here\n- from .. import invert\n- return invert(x)\n+ return np.invert(x)\n \n def bitwise_or(x1, x2, /):\n- from .. import bitwise_or\n- return bitwise_or(x1, x2)\n+ return np.bitwise_or(x1, x2)\n \n def bitwise_right_shift(x1, x2, /):\n # Note: the function name is different here\n- from .. import right_shift\n- return right_shift(x1, x2)\n+ return np.right_shift(x1, x2)\n \n def bitwise_xor(x1, x2, /):\n- from .. import bitwise_xor\n- return bitwise_xor(x1, x2)\n+ return np.bitwise_xor(x1, x2)\n \n def ceil(x, /):\n- from .. import ceil\n- return ceil(x)\n+ return np.ceil(x)\n \n def cos(x, /):\n- from .. import cos\n- return cos(x)\n+ return np.cos(x)\n \n def cosh(x, /):\n- from .. import cosh\n- return cosh(x)\n+ return np.cosh(x)\n \n def divide(x1, x2, /):\n- from .. import divide\n- return divide(x1, x2)\n+ return np.divide(x1, x2)\n \n def equal(x1, x2, /):\n- from .. import equal\n- return equal(x1, x2)\n+ return np.equal(x1, x2)\n \n def exp(x, /):\n- from .. import exp\n- return exp(x)\n+ return np.exp(x)\n \n def expm1(x, /):\n- from .. import expm1\n- return expm1(x)\n+ return np.expm1(x)\n \n def floor(x, /):\n- from .. import floor\n- return floor(x)\n+ return np.floor(x)\n \n def floor_divide(x1, x2, /):\n- from .. import floor_divide\n- return floor_divide(x1, x2)\n+ return np.floor_divide(x1, x2)\n \n def greater(x1, x2, /):\n- from .. import greater\n- return greater(x1, x2)\n+ return np.greater(x1, x2)\n \n def greater_equal(x1, x2, /):\n- from .. import greater_equal\n- return greater_equal(x1, x2)\n+ return np.greater_equal(x1, x2)\n \n def isfinite(x, /):\n- from .. import isfinite\n- return isfinite(x)\n+ return np.isfinite(x)\n \n def isinf(x, /):\n- from .. import isinf\n- return isinf(x)\n+ return np.isinf(x)\n \n def isnan(x, /):\n- from .. import isnan\n- return isnan(x)\n+ return np.isnan(x)\n \n def less(x1, x2, /):\n- from .. import less\n- return less(x1, x2)\n+ return np.less(x1, x2)\n \n def less_equal(x1, x2, /):\n- from .. import less_equal\n- return less_equal(x1, x2)\n+ return np.less_equal(x1, x2)\n \n def log(x, /):\n- from .. import log\n- return log(x)\n+ return np.log(x)\n \n def log1p(x, /):\n- from .. import log1p\n- return log1p(x)\n+ return np.log1p(x)\n \n def log2(x, /):\n- from .. import log2\n- return log2(x)\n+ return np.log2(x)\n \n def log10(x, /):\n- from .. import log10\n- return log10(x)\n+ return np.log10(x)\n \n def logical_and(x1, x2, /):\n- from .. import logical_and\n- return logical_and(x1, x2)\n+ return np.logical_and(x1, x2)\n \n def logical_not(x, /):\n- from .. import logical_not\n- return logical_not(x)\n+ return np.logical_not(x)\n \n def logical_or(x1, x2, /):\n- from .. import logical_or\n- return logical_or(x1, x2)\n+ return np.logical_or(x1, x2)\n \n def logical_xor(x1, x2, /):\n- from .. import logical_xor\n- return logical_xor(x1, x2)\n+ return np.logical_xor(x1, x2)\n \n def multiply(x1, x2, /):\n- from .. import multiply\n- return multiply(x1, x2)\n+ return np.multiply(x1, x2)\n \n def negative(x, /):\n- from .. import negative\n- return negative(x)\n+ return np.negative(x)\n \n def not_equal(x1, x2, /):\n- from .. import not_equal\n- return not_equal(x1, x2)\n+ return np.not_equal(x1, x2)\n \n def positive(x, /):\n- from .. import positive\n- return positive(x)\n+ return np.positive(x)\n \n def pow(x1, x2, /):\n # Note: the function name is different here\n- from .. import power\n- return power(x1, x2)\n+ return np.power(x1, x2)\n \n def remainder(x1, x2, /):\n- from .. import remainder\n- return remainder(x1, x2)\n+ return np.remainder(x1, x2)\n \n def round(x, /):\n- from .. import round\n- return round(x)\n+ return np.round(x)\n \n def sign(x, /):\n- from .. import sign\n- return sign(x)\n+ return np.sign(x)\n \n def sin(x, /):\n- from .. import sin\n- return sin(x)\n+ return np.sin(x)\n \n def sinh(x, /):\n- from .. import sinh\n- return sinh(x)\n+ return np.sinh(x)\n \n def square(x, /):\n- from .. import square\n- return square(x)\n+ return np.square(x)\n \n def sqrt(x, /):\n- from .. import sqrt\n- return sqrt(x)\n+ return np.sqrt(x)\n \n def subtract(x1, x2, /):\n- from .. import subtract\n- return subtract(x1, x2)\n+ return np.subtract(x1, x2)\n \n def tan(x, /):\n- from .. import tan\n- return tan(x)\n+ return np.tan(x)\n \n def tanh(x, /):\n- from .. import tanh\n- return tanh(x)\n+ return np.tanh(x)\n \n def trunc(x, /):\n- from .. import trunc\n- return trunc(x)\n+ return np.trunc(x)\n", "new_path": "numpy/_array_api/_elementwise_functions.py", "old_path": "numpy/_array_api/_elementwise_functions.py" }, { "change_type": "MODIFY", "diff": "@@ -1,93 +1,73 @@\n+import numpy as np\n+\n # def cholesky():\n-# from .. import cholesky\n-# return cholesky()\n+# return np.cholesky()\n \n def cross(x1, x2, /, *, axis=-1):\n- from .. import cross\n- return cross(x1, x2, axis=axis)\n+ return np.cross(x1, x2, axis=axis)\n \n def det(x, /):\n # Note: this function is being imported from a nondefault namespace\n- from ..linalg import det\n- return det(x)\n+ return np.det(x)\n \n def diagonal(x, /, *, axis1=0, axis2=1, offset=0):\n- from .. import diagonal\n- return diagonal(x, axis1=axis1, axis2=axis2, offset=offset)\n+ return np.diagonal(x, axis1=axis1, axis2=axis2, offset=offset)\n \n # def dot():\n-# from .. import dot\n-# return dot()\n+# return np.dot()\n #\n # def eig():\n-# from .. import eig\n-# return eig()\n+# return np.eig()\n #\n # def eigvalsh():\n-# from .. import eigvalsh\n-# return eigvalsh()\n+# return np.eigvalsh()\n #\n # def einsum():\n-# from .. import einsum\n-# return einsum()\n+# return np.einsum()\n \n def inv(x):\n # Note: this function is being imported from a nondefault namespace\n- from ..linalg import inv\n- return inv(x)\n+ return np.inv(x)\n \n # def lstsq():\n-# from .. import lstsq\n-# return lstsq()\n+# return np.lstsq()\n #\n # def matmul():\n-# from .. import matmul\n-# return matmul()\n+# return np.matmul()\n #\n # def matrix_power():\n-# from .. import matrix_power\n-# return matrix_power()\n+# return np.matrix_power()\n #\n # def matrix_rank():\n-# from .. import matrix_rank\n-# return matrix_rank()\n+# return np.matrix_rank()\n \n def norm(x, /, *, axis=None, keepdims=False, ord=None):\n # Note: this function is being imported from a nondefault namespace\n- from ..linalg import norm\n # Note: this is different from the default behavior\n if axis == None and x.ndim > 2:\n x = x.flatten()\n- return norm(x, axis=axis, keepdims=keepdims, ord=ord)\n+ return np.norm(x, axis=axis, keepdims=keepdims, ord=ord)\n \n def outer(x1, x2, /):\n- from .. import outer\n- return outer(x1, x2)\n+ return np.outer(x1, x2)\n \n # def pinv():\n-# from .. import pinv\n-# return pinv()\n+# return np.pinv()\n #\n # def qr():\n-# from .. import qr\n-# return qr()\n+# return np.qr()\n #\n # def slogdet():\n-# from .. import slogdet\n-# return slogdet()\n+# return np.slogdet()\n #\n # def solve():\n-# from .. import solve\n-# return solve()\n+# return np.solve()\n #\n # def svd():\n-# from .. import svd\n-# return svd()\n+# return np.svd()\n \n def trace(x, /, *, axis1=0, axis2=1, offset=0):\n- from .. import trace\n- return trace(x, axis1=axis1, axis2=axis2, offset=offset)\n+ return np.trace(x, axis1=axis1, axis2=axis2, offset=offset)\n \n def transpose(x, /, *, axes=None):\n- from .. import transpose\n- return transpose(x, axes=axes)\n+ return np.transpose(x, axes=axes)\n", "new_path": "numpy/_array_api/_linear_algebra_functions.py", "old_path": "numpy/_array_api/_linear_algebra_functions.py" }, { "change_type": "MODIFY", "diff": "@@ -1,28 +1,23 @@\n+import numpy as np\n+\n def concat(arrays, /, *, axis=0):\n # Note: the function name is different here\n- from .. import concatenate\n- return concatenate(arrays, axis=axis)\n+ return np.concatenate(arrays, axis=axis)\n \n def expand_dims(x, axis, /):\n- from .. import expand_dims\n- return expand_dims(x, axis)\n+ return np.expand_dims(x, axis)\n \n def flip(x, /, *, axis=None):\n- from .. import flip\n- return flip(x, axis=axis)\n+ return np.flip(x, axis=axis)\n \n def reshape(x, shape, /):\n- from .. import reshape\n- return reshape(x, shape)\n+ return np.reshape(x, shape)\n \n def roll(x, shift, /, *, axis=None):\n- from .. import roll\n- return roll(x, shift, axis=axis)\n+ return np.roll(x, shift, axis=axis)\n \n def squeeze(x, /, *, axis=None):\n- from .. import squeeze\n- return squeeze(x, axis=axis)\n+ return np.squeeze(x, axis=axis)\n \n def stack(arrays, /, *, axis=0):\n- from .. import stack\n- return stack(arrays, axis=axis)\n+ return np.stack(arrays, axis=axis)\n", "new_path": "numpy/_array_api/_manipulation_functions.py", "old_path": "numpy/_array_api/_manipulation_functions.py" }, { "change_type": "MODIFY", "diff": "@@ -1,15 +1,13 @@\n+import numpy as np\n+\n def argmax(x, /, *, axis=None, keepdims=False):\n- from .. import argmax\n- return argmax(x, axis=axis, keepdims=keepdims)\n+ return np.argmax(x, axis=axis, keepdims=keepdims)\n \n def argmin(x, /, *, axis=None, keepdims=False):\n- from .. import argmin\n- return argmin(x, axis=axis, keepdims=keepdims)\n+ return np.argmin(x, axis=axis, keepdims=keepdims)\n \n def nonzero(x, /):\n- from .. import nonzero\n- return nonzero(x)\n+ return np.nonzero(x)\n \n def where(condition, x1, x2, /):\n- from .. import where\n- return where(condition, x1, x2)\n+ return np.where(condition, x1, x2)\n", "new_path": "numpy/_array_api/_searching_functions.py", "old_path": "numpy/_array_api/_searching_functions.py" }, { "change_type": "MODIFY", "diff": "@@ -1,3 +1,4 @@\n+import numpy as np\n+\n def unique(x, /, *, return_counts=False, return_index=False, return_inverse=False, sorted=True):\n- from .. import unique\n- return unique(x, return_counts=return_counts, return_index=return_index, return_inverse=return_inverse, sorted=sorted)\n+ return np.unique(x, return_counts=return_counts, return_index=return_index, return_inverse=return_inverse, sorted=sorted)\n", "new_path": "numpy/_array_api/_set_functions.py", "old_path": "numpy/_array_api/_set_functions.py" }, { "change_type": "MODIFY", "diff": "@@ -1,19 +1,17 @@\n+import numpy as np\n+\n def argsort(x, /, *, axis=-1, descending=False, stable=True):\n- from .. import argsort\n- from .. import flip\n # Note: this keyword argument is different, and the default is different.\n kind = 'stable' if stable else 'quicksort'\n- res = argsort(x, axis=axis, kind=kind)\n+ res = np.argsort(x, axis=axis, kind=kind)\n if descending:\n- res = flip(res, axis=axis)\n+ res = np.flip(res, axis=axis)\n return res\n \n def sort(x, /, *, axis=-1, descending=False, stable=True):\n- from .. import sort\n- from .. import flip\n # Note: this keyword argument is different, and the default is different.\n kind = 'stable' if stable else 'quicksort'\n- res = sort(x, axis=axis, kind=kind)\n+ res = np.sort(x, axis=axis, kind=kind)\n if descending:\n- res = flip(res, axis=axis)\n+ res = np.flip(res, axis=axis)\n return res\n", "new_path": "numpy/_array_api/_sorting_functions.py", "old_path": "numpy/_array_api/_sorting_functions.py" }, { "change_type": "MODIFY", "diff": "@@ -1,29 +1,24 @@\n+import numpy as np\n+\n def max(x, /, *, axis=None, keepdims=False):\n- from .. import max\n- return max(x, axis=axis, keepdims=keepdims)\n+ return np.max(x, axis=axis, keepdims=keepdims)\n \n def mean(x, /, *, axis=None, keepdims=False):\n- from .. import mean\n- return mean(x, axis=axis, keepdims=keepdims)\n+ return np.mean(x, axis=axis, keepdims=keepdims)\n \n def min(x, /, *, axis=None, keepdims=False):\n- from .. import min\n- return min(x, axis=axis, keepdims=keepdims)\n+ return np.min(x, axis=axis, keepdims=keepdims)\n \n def prod(x, /, *, axis=None, keepdims=False):\n- from .. import prod\n- return prod(x, axis=axis, keepdims=keepdims)\n+ return np.prod(x, axis=axis, keepdims=keepdims)\n \n def std(x, /, *, axis=None, correction=0.0, keepdims=False):\n- from .. import std\n # Note: the keyword argument correction is different here\n- return std(x, axis=axis, ddof=correction, keepdims=keepdims)\n+ return np.std(x, axis=axis, ddof=correction, keepdims=keepdims)\n \n def sum(x, /, *, axis=None, keepdims=False):\n- from .. import sum\n- return sum(x, axis=axis, keepdims=keepdims)\n+ return np.sum(x, axis=axis, keepdims=keepdims)\n \n def var(x, /, *, axis=None, correction=0.0, keepdims=False):\n- from .. import var\n # Note: the keyword argument correction is different here\n- return var(x, axis=axis, ddof=correction, keepdims=keepdims)\n+ return np.var(x, axis=axis, ddof=correction, keepdims=keepdims)\n", "new_path": "numpy/_array_api/_statistical_functions.py", "old_path": "numpy/_array_api/_statistical_functions.py" }, { "change_type": "MODIFY", "diff": "@@ -1,7 +1,7 @@\n+import numpy as np\n+\n def all(x, /, *, axis=None, keepdims=False):\n- from .. import all\n- return all(x, axis=axis, keepdims=keepdims)\n+ return np.all(x, axis=axis, keepdims=keepdims)\n \n def any(x, /, *, axis=None, keepdims=False):\n- from .. import any\n- return any(x, axis=axis, keepdims=keepdims)\n+ return np.any(x, axis=axis, keepdims=keepdims)\n", "new_path": "numpy/_array_api/_utility_functions.py", "old_path": "numpy/_array_api/_utility_functions.py" } ]
0da4b09082036d2c62a638d751b660a75e543bc9
cupy/cupy
null
null
Add an explanatory docstring to _array_api/__init__.py This is mostly aimed at any potential reviewers of the module for now.
[ { "change_type": "MODIFY", "diff": "@@ -1,3 +1,69 @@\n+\"\"\"\n+A NumPy sub-namespace that conforms to the Python array API standard.\n+\n+This is a proof-of-concept namespace that wraps the corresponding NumPy\n+functions to give a conforming implementation of the Python array API standard\n+(https://data-apis.github.io/array-api/latest/). The standard is currently in\n+an RFC phase and comments on it are both welcome and encouraged. Comments\n+should be made either at https://github.com/data-apis/array-api or at\n+https://github.com/data-apis/consortium-feedback/discussions.\n+\n+This submodule will be accompanied with a NEP (not yet written) proposing its\n+inclusion in NumPy.\n+\n+NumPy already follows the proposed spec for the most part, so this module\n+serves mostly as a thin wrapper around it. However, NumPy also implements a\n+lot of behavior that is not included in the spec, so this serves as a\n+restricted subset of the API. Only those functions that are part of the spec\n+are included in this namespace, and all functions are given with the exact\n+signature given in the spec, including the use of position-only arguments, and\n+omitting any extra keyword arguments implemented by NumPy but not part of the\n+spec. Note that the array object itself is unchanged, as implementing a\n+restricted subclass of ndarray seems unnecessarily complex for the purposes of\n+this namespace, so the API of array methods and other behaviors of the array\n+object will include things that are not part of the spec.\n+\n+The spec is designed as a \"minimal API subset\" and explicitly allows libraries\n+to include behaviors not specified by it. But users of this module that intend\n+to write portable code should be aware that only those behaviors that are\n+listed in the spec are guaranteed to be implemented across libraries.\n+\n+A few notes about the current state of this submodule:\n+\n+- There is a test suite that tests modules against the array API standard at\n+ https://github.com/data-apis/array-api-tests. The test suite is still a work\n+ in progress, but the existing tests pass on this module, with a few\n+ exceptions:\n+\n+ - Device support is not yet implemented in NumPy\n+ (https://data-apis.github.io/array-api/latest/design_topics/device_support.html).\n+ As a result, the `device` attribute of the array object is missing, and\n+ array creation functions that take the `device` keyword argument will fail\n+ with NotImplementedError.\n+\n+ - DLPack support (see https://github.com/data-apis/array-api/pull/106) is\n+ not included here, as it requires a full implementation in NumPy proper\n+ first.\n+\n+ - np.argmin and np.argmax do not implement the keepdims keyword argument.\n+\n+ - Some linear algebra functions in the spec are still a work in progress (to\n+ be added soon). These will be updated once the spec is.\n+\n+ - Some tests in the test suite are still not fully correct in that they test\n+ all datatypes whereas certain functions are only defined for a subset of\n+ datatypes.\n+\n+ The test suite is yet complete, and even the tests that exist are not\n+ guaranteed to give a comprehensive coverage of the spec. Therefore, those\n+ reviewing this submodule should refer to the standard documents themselves.\n+\n+- All places where the implementations in this submodule are known to deviate\n+ from their corresponding functions in NumPy are marked with \"# Note\"\n+ comments. Reviewers should make note of these comments.\n+\n+\"\"\"\n+\n __all__ = []\n \n from ._constants import e, inf, nan, pi\n", "new_path": "numpy/_array_api/__init__.py", "old_path": "numpy/_array_api/__init__.py" } ]
76eb888612183768d9e1b0c818fcf5416c5f28c7
cupy/cupy
null
null
Use _implementation on all functions that have it in the array API submodule That way they only work on actual ndarray inputs, not array-like, which is more inline with the spec.
[ { "change_type": "MODIFY", "diff": "@@ -35,7 +35,7 @@ def empty_like(x: array, /, *, dtype: Optional[dtype] = None, device: Optional[d\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return np.empty_like(x, dtype=dtype)\n+ return np.empty_like._implementation(x, dtype=dtype)\n \n def eye(N: int, /, *, M: Optional[int] = None, k: Optional[int] = 0, dtype: Optional[dtype] = None, device: Optional[device] = None) -> array:\n \"\"\"\n@@ -68,7 +68,7 @@ def full_like(x: array, fill_value: Union[int, float], /, *, dtype: Optional[dty\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return np.full_like(x, fill_value, dtype=dtype)\n+ return np.full_like._implementation(x, fill_value, dtype=dtype)\n \n def linspace(start: Union[int, float], stop: Union[int, float], num: int, /, *, dtype: Optional[dtype] = None, device: Optional[device] = None, endpoint: bool = True) -> array:\n \"\"\"\n@@ -101,7 +101,7 @@ def ones_like(x: array, /, *, dtype: Optional[dtype] = None, device: Optional[de\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return np.ones_like(x, dtype=dtype)\n+ return np.ones_like._implementation(x, dtype=dtype)\n \n def zeros(shape: Union[int, Tuple[int, ...]], /, *, dtype: Optional[dtype] = None, device: Optional[device] = None) -> array:\n \"\"\"\n@@ -123,4 +123,4 @@ def zeros_like(x: array, /, *, dtype: Optional[dtype] = None, device: Optional[d\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return np.zeros_like(x, dtype=dtype)\n+ return np.zeros_like._implementation(x, dtype=dtype)\n", "new_path": "numpy/_array_api/_creation_functions.py", "old_path": "numpy/_array_api/_creation_functions.py" }, { "change_type": "MODIFY", "diff": "@@ -381,7 +381,7 @@ def round(x: array, /) -> array:\n \n See its docstring for more information.\n \"\"\"\n- return np.round(x)\n+ return np.round._implementation(x)\n \n def sign(x: array, /) -> array:\n \"\"\"\n", "new_path": "numpy/_array_api/_elementwise_functions.py", "old_path": "numpy/_array_api/_elementwise_functions.py" }, { "change_type": "MODIFY", "diff": "@@ -18,7 +18,7 @@ def cross(x1: array, x2: array, /, *, axis: int = -1) -> array:\n \n See its docstring for more information.\n \"\"\"\n- return np.cross(x1, x2, axis=axis)\n+ return np.cross._implementation(x1, x2, axis=axis)\n \n def det(x: array, /) -> array:\n \"\"\"\n@@ -35,7 +35,7 @@ def diagonal(x: array, /, *, axis1: int = 0, axis2: int = 1, offset: int = 0) ->\n \n See its docstring for more information.\n \"\"\"\n- return np.diagonal(x, axis1=axis1, axis2=axis2, offset=offset)\n+ return np.diagonal._implementation(x, axis1=axis1, axis2=axis2, offset=offset)\n \n # def dot():\n # \"\"\"\n@@ -128,7 +128,7 @@ def outer(x1: array, x2: array, /) -> array:\n \n See its docstring for more information.\n \"\"\"\n- return np.outer(x1, x2)\n+ return np.outer._implementation(x1, x2)\n \n # def pinv():\n # \"\"\"\n@@ -176,7 +176,7 @@ def trace(x: array, /, *, axis1: int = 0, axis2: int = 1, offset: int = 0) -> ar\n \n See its docstring for more information.\n \"\"\"\n- return np.asarray(np.trace(x, axis1=axis1, axis2=axis2, offset=offset))\n+ return np.asarray(np.trace._implementation(x, axis1=axis1, axis2=axis2, offset=offset))\n \n def transpose(x: array, /, *, axes: Optional[Tuple[int, ...]] = None) -> array:\n \"\"\"\n@@ -184,4 +184,4 @@ def transpose(x: array, /, *, axes: Optional[Tuple[int, ...]] = None) -> array:\n \n See its docstring for more information.\n \"\"\"\n- return np.transpose(x, axes=axes)\n+ return np.transpose._implementation(x, axes=axes)\n", "new_path": "numpy/_array_api/_linear_algebra_functions.py", "old_path": "numpy/_array_api/_linear_algebra_functions.py" }, { "change_type": "MODIFY", "diff": "@@ -19,7 +19,7 @@ def expand_dims(x: array, axis: int, /) -> array:\n \n See its docstring for more information.\n \"\"\"\n- return np.expand_dims(x, axis)\n+ return np.expand_dims._implementation(x, axis)\n \n def flip(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> array:\n \"\"\"\n@@ -27,7 +27,7 @@ def flip(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None) ->\n \n See its docstring for more information.\n \"\"\"\n- return np.flip(x, axis=axis)\n+ return np.flip._implementation(x, axis=axis)\n \n def reshape(x: array, shape: Tuple[int, ...], /) -> array:\n \"\"\"\n@@ -35,7 +35,7 @@ def reshape(x: array, shape: Tuple[int, ...], /) -> array:\n \n See its docstring for more information.\n \"\"\"\n- return np.reshape(x, shape)\n+ return np.reshape._implementation(x, shape)\n \n def roll(x: array, shift: Union[int, Tuple[int, ...]], /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> array:\n \"\"\"\n@@ -43,7 +43,7 @@ def roll(x: array, shift: Union[int, Tuple[int, ...]], /, *, axis: Optional[Unio\n \n See its docstring for more information.\n \"\"\"\n- return np.roll(x, shift, axis=axis)\n+ return np.roll._implementation(x, shift, axis=axis)\n \n def squeeze(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> array:\n \"\"\"\n@@ -51,7 +51,7 @@ def squeeze(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None)\n \n See its docstring for more information.\n \"\"\"\n- return np.squeeze(x, axis=axis)\n+ return np.squeeze._implementation(x, axis=axis)\n \n def stack(arrays: Tuple[array], /, *, axis: int = 0) -> array:\n \"\"\"\n@@ -59,4 +59,4 @@ def stack(arrays: Tuple[array], /, *, axis: int = 0) -> array:\n \n See its docstring for more information.\n \"\"\"\n- return np.stack(arrays, axis=axis)\n+ return np.stack._implementation(arrays, axis=axis)\n", "new_path": "numpy/_array_api/_manipulation_functions.py", "old_path": "numpy/_array_api/_manipulation_functions.py" }, { "change_type": "MODIFY", "diff": "@@ -11,7 +11,7 @@ def argmax(x: array, /, *, axis: int = None, keepdims: bool = False) -> array:\n See its docstring for more information.\n \"\"\"\n # Note: this currently fails as np.argmax does not implement keepdims\n- return np.asarray(np.argmax(x, axis=axis, keepdims=keepdims))\n+ return np.asarray(np.argmax._implementation(x, axis=axis, keepdims=keepdims))\n \n def argmin(x: array, /, *, axis: int = None, keepdims: bool = False) -> array:\n \"\"\"\n@@ -20,7 +20,7 @@ def argmin(x: array, /, *, axis: int = None, keepdims: bool = False) -> array:\n See its docstring for more information.\n \"\"\"\n # Note: this currently fails as np.argmin does not implement keepdims\n- return np.asarray(np.argmin(x, axis=axis, keepdims=keepdims))\n+ return np.asarray(np.argmin._implementation(x, axis=axis, keepdims=keepdims))\n \n def nonzero(x: array, /) -> Tuple[array, ...]:\n \"\"\"\n@@ -28,7 +28,7 @@ def nonzero(x: array, /) -> Tuple[array, ...]:\n \n See its docstring for more information.\n \"\"\"\n- return np.nonzero(x)\n+ return np.nonzero._implementation(x)\n \n def where(condition: array, x1: array, x2: array, /) -> array:\n \"\"\"\n@@ -36,4 +36,4 @@ def where(condition: array, x1: array, x2: array, /) -> array:\n \n See its docstring for more information.\n \"\"\"\n- return np.where(condition, x1, x2)\n+ return np.where._implementation(condition, x1, x2)\n", "new_path": "numpy/_array_api/_searching_functions.py", "old_path": "numpy/_array_api/_searching_functions.py" }, { "change_type": "MODIFY", "diff": "@@ -10,4 +10,4 @@ def unique(x: array, /, *, return_counts: bool = False, return_index: bool = Fal\n \n See its docstring for more information.\n \"\"\"\n- return np.unique(x, return_counts=return_counts, return_index=return_index, return_inverse=return_inverse, sorted=sorted)\n+ return np.unique._implementation(x, return_counts=return_counts, return_index=return_index, return_inverse=return_inverse, sorted=sorted)\n", "new_path": "numpy/_array_api/_set_functions.py", "old_path": "numpy/_array_api/_set_functions.py" }, { "change_type": "MODIFY", "diff": "@@ -12,7 +12,7 @@ def argsort(x: array, /, *, axis: int = -1, descending: bool = False, stable: bo\n \"\"\"\n # Note: this keyword argument is different, and the default is different.\n kind = 'stable' if stable else 'quicksort'\n- res = np.argsort(x, axis=axis, kind=kind)\n+ res = np.argsort._implementation(x, axis=axis, kind=kind)\n if descending:\n res = np.flip(res, axis=axis)\n return res\n@@ -25,7 +25,7 @@ def sort(x: array, /, *, axis: int = -1, descending: bool = False, stable: bool\n \"\"\"\n # Note: this keyword argument is different, and the default is different.\n kind = 'stable' if stable else 'quicksort'\n- res = np.sort(x, axis=axis, kind=kind)\n+ res = np.sort._implementation(x, axis=axis, kind=kind)\n if descending:\n res = np.flip(res, axis=axis)\n return res\n", "new_path": "numpy/_array_api/_sorting_functions.py", "old_path": "numpy/_array_api/_sorting_functions.py" }, { "change_type": "MODIFY", "diff": "@@ -5,24 +5,24 @@ from ._types import Optional, Tuple, Union, array\n import numpy as np\n \n def max(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array:\n- return np.max(x, axis=axis, keepdims=keepdims)\n+ return np.max._implementation(x, axis=axis, keepdims=keepdims)\n \n def mean(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array:\n- return np.asarray(np.mean(x, axis=axis, keepdims=keepdims))\n+ return np.asarray(np.mean._implementation(x, axis=axis, keepdims=keepdims))\n \n def min(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array:\n- return np.min(x, axis=axis, keepdims=keepdims)\n+ return np.min._implementation(x, axis=axis, keepdims=keepdims)\n \n def prod(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array:\n- return np.asarray(np.prod(x, axis=axis, keepdims=keepdims))\n+ return np.asarray(np.prod._implementation(x, axis=axis, keepdims=keepdims))\n \n def std(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, correction: Union[int, float] = 0.0, keepdims: bool = False) -> array:\n # Note: the keyword argument correction is different here\n- return np.asarray(np.std(x, axis=axis, ddof=correction, keepdims=keepdims))\n+ return np.asarray(np.std._implementation(x, axis=axis, ddof=correction, keepdims=keepdims))\n \n def sum(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array:\n- return np.asarray(np.sum(x, axis=axis, keepdims=keepdims))\n+ return np.asarray(np.sum._implementation(x, axis=axis, keepdims=keepdims))\n \n def var(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, correction: Union[int, float] = 0.0, keepdims: bool = False) -> array:\n # Note: the keyword argument correction is different here\n- return np.asarray(np.var(x, axis=axis, ddof=correction, keepdims=keepdims))\n+ return np.asarray(np.var._implementation(x, axis=axis, ddof=correction, keepdims=keepdims))\n", "new_path": "numpy/_array_api/_statistical_functions.py", "old_path": "numpy/_array_api/_statistical_functions.py" }, { "change_type": "MODIFY", "diff": "@@ -10,7 +10,7 @@ def all(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keep\n \n See its docstring for more information.\n \"\"\"\n- return np.asarray(np.all(x, axis=axis, keepdims=keepdims))\n+ return np.asarray(np.all._implementation(x, axis=axis, keepdims=keepdims))\n \n def any(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array:\n \"\"\"\n@@ -18,4 +18,4 @@ def any(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keep\n \n See its docstring for more information.\n \"\"\"\n- return np.asarray(np.any(x, axis=axis, keepdims=keepdims))\n+ return np.asarray(np.any._implementation(x, axis=axis, keepdims=keepdims))\n", "new_path": "numpy/_array_api/_utility_functions.py", "old_path": "numpy/_array_api/_utility_functions.py" } ]
994ce07595026d5de54f52ef5748b578f9fae1bc
cupy/cupy
null
null
Use better type signatures in the array API module This includes returning custom dataclasses for finfo and iinfo that only contain the properties required by the array API specification.
[ { "change_type": "MODIFY", "diff": "@@ -396,7 +396,8 @@ class Array:\n res = self._array.__le__(other._array)\n return self.__class__._new(res)\n \n- def __len__(self, /):\n+ # Note: __len__ may end up being removed from the array API spec.\n+ def __len__(self, /) -> int:\n \"\"\"\n Performs the operation __len__.\n \"\"\"\n@@ -843,7 +844,7 @@ class Array:\n return self.__class__._new(res)\n \n @property\n- def dtype(self):\n+ def dtype(self) -> Dtype:\n \"\"\"\n Array API compatible wrapper for :py:meth:`np.ndaray.dtype <numpy.ndarray.dtype>`.\n \n@@ -852,7 +853,7 @@ class Array:\n return self._array.dtype\n \n @property\n- def device(self):\n+ def device(self) -> Device:\n \"\"\"\n Array API compatible wrapper for :py:meth:`np.ndaray.device <numpy.ndarray.device>`.\n \n@@ -862,7 +863,7 @@ class Array:\n raise NotImplementedError(\"The device attribute is not yet implemented\")\n \n @property\n- def ndim(self):\n+ def ndim(self) -> int:\n \"\"\"\n Array API compatible wrapper for :py:meth:`np.ndaray.ndim <numpy.ndarray.ndim>`.\n \n@@ -871,7 +872,7 @@ class Array:\n return self._array.ndim\n \n @property\n- def shape(self):\n+ def shape(self) -> Tuple[int, ...]:\n \"\"\"\n Array API compatible wrapper for :py:meth:`np.ndaray.shape <numpy.ndarray.shape>`.\n \n@@ -880,7 +881,7 @@ class Array:\n return self._array.shape\n \n @property\n- def size(self):\n+ def size(self) -> int:\n \"\"\"\n Array API compatible wrapper for :py:meth:`np.ndaray.size <numpy.ndarray.size>`.\n \n@@ -889,7 +890,7 @@ class Array:\n return self._array.size\n \n @property\n- def T(self):\n+ def T(self) -> Array:\n \"\"\"\n Array API compatible wrapper for :py:meth:`np.ndaray.T <numpy.ndarray.T>`.\n \n", "new_path": "numpy/_array_api/_array_object.py", "old_path": "numpy/_array_api/_array_object.py" }, { "change_type": "MODIFY", "diff": "@@ -10,7 +10,7 @@ from ._dtypes import _all_dtypes\n \n import numpy as np\n \n-def asarray(obj: Union[float, NestedSequence[bool|int|float], SupportsDLPack, SupportsBufferProtocol], /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, copy: Optional[bool] = None) -> Array:\n+def asarray(obj: Union[Array, float, NestedSequence[bool|int|float], SupportsDLPack, SupportsBufferProtocol], /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, copy: Optional[bool] = None) -> Array:\n \"\"\"\n Array API compatible wrapper for :py:func:`np.asarray <numpy.asarray>`.\n \n", "new_path": "numpy/_array_api/_creation_functions.py", "old_path": "numpy/_array_api/_creation_functions.py" }, { "change_type": "MODIFY", "diff": "@@ -2,6 +2,7 @@ from __future__ import annotations\n \n from ._array_object import Array\n \n+from dataclasses import dataclass\n from typing import TYPE_CHECKING\n if TYPE_CHECKING:\n from ._types import List, Tuple, Union, Dtype\n@@ -38,13 +39,44 @@ def can_cast(from_: Union[Dtype, Array], to: Dtype, /) -> bool:\n from_ = from_._array\n return np.can_cast(from_, to)\n \n+# These are internal objects for the return types of finfo and iinfo, since\n+# the NumPy versions contain extra data that isn't part of the spec.\n+@dataclass\n+class finfo_object:\n+ bits: int\n+ # Note: The types of the float data here are float, whereas in NumPy they\n+ # are scalars of the corresponding float dtype.\n+ eps: float\n+ max: float\n+ min: float\n+ # Note: smallest_normal is part of the array API spec, but cannot be used\n+ # until https://github.com/numpy/numpy/pull/18536 is merged.\n+\n+ # smallest_normal: float\n+\n+@dataclass\n+class iinfo_object:\n+ bits: int\n+ max: int\n+ min: int\n+\n def finfo(type: Union[Dtype, Array], /) -> finfo_object:\n \"\"\"\n Array API compatible wrapper for :py:func:`np.finfo <numpy.finfo>`.\n \n See its docstring for more information.\n \"\"\"\n- return np.finfo(type)\n+ fi = np.finfo(type)\n+ # Note: The types of the float data here are float, whereas in NumPy they\n+ # are scalars of the corresponding float dtype.\n+ return finfo_object(\n+ fi.bits,\n+ float(fi.eps),\n+ float(fi.max),\n+ float(fi.min),\n+ # TODO: Uncomment this when #18536 is merged.\n+ # float(fi.smallest_normal),\n+ )\n \n def iinfo(type: Union[Dtype, Array], /) -> iinfo_object:\n \"\"\"\n@@ -52,7 +84,8 @@ def iinfo(type: Union[Dtype, Array], /) -> iinfo_object:\n \n See its docstring for more information.\n \"\"\"\n- return np.iinfo(type)\n+ ii = np.iinfo(type)\n+ return iinfo_object(ii.bits, ii.max, ii.min)\n \n def result_type(*arrays_and_dtypes: Sequence[Union[Array, Dtype]]) -> Dtype:\n \"\"\"\n", "new_path": "numpy/_array_api/_data_type_functions.py", "old_path": "numpy/_array_api/_data_type_functions.py" }, { "change_type": "MODIFY", "diff": "@@ -7,7 +7,7 @@ from typing import List, Optional, Tuple, Union\n import numpy as np\n \n # Note: the function name is different here\n-def concat(arrays: Tuple[Array, ...], /, *, axis: Optional[int] = 0) -> Array:\n+def concat(arrays: Union[Tuple[Array, ...], List[Array]], /, *, axis: Optional[int] = 0) -> Array:\n \"\"\"\n Array API compatible wrapper for :py:func:`np.concatenate <numpy.concatenate>`.\n \n@@ -56,7 +56,7 @@ def squeeze(x: Array, /, axis: Optional[Union[int, Tuple[int, ...]]] = None) ->\n \"\"\"\n return Array._new(np.squeeze(x._array, axis=axis))\n \n-def stack(arrays: Tuple[Array, ...], /, *, axis: int = 0) -> Array:\n+def stack(arrays: Union[Tuple[Array, ...], List[Array]], /, *, axis: int = 0) -> Array:\n \"\"\"\n Array API compatible wrapper for :py:func:`np.stack <numpy.stack>`.\n \n", "new_path": "numpy/_array_api/_manipulation_functions.py", "old_path": "numpy/_array_api/_manipulation_functions.py" } ]
4877478d275959f746dab4f7b91bfe68956f26f1
netflix/security_monkey
null
null
Fix for orphaned items that may develop from a failed watcher event. Also added optional (but on by default) silencing of verbose and useless botocore logs.
[ { "change_type": "MODIFY", "diff": "@@ -95,7 +95,6 @@ def create_item(item, technology, account):\n )\n \n \n-\n def detect_change(item, account, technology, complete_hash, durable_hash):\n \"\"\"\n Checks the database to see if the latest revision of the specified\n", "new_path": "security_monkey/datastore_utils.py", "old_path": "security_monkey/datastore_utils.py" }, { "change_type": "MODIFY", "diff": "@@ -12,7 +12,7 @@ import traceback\n \n from security_monkey import app, db, jirasync, sentry\n from security_monkey.alerter import Alerter\n-from security_monkey.datastore import store_exception, clear_old_exceptions\n+from security_monkey.datastore import store_exception, clear_old_exceptions, Technology, Account, Item, ItemRevision\n from security_monkey.monitors import get_monitors, get_monitors_and_dependencies\n from security_monkey.reporter import Reporter\n from security_monkey.task_scheduler.util import CELERY, setup\n@@ -70,9 +70,57 @@ def clear_expired_exceptions():\n app.logger.info(\"[-] Completed clearing out exceptions that have an expired TTL.\")\n \n \n+def fix_orphaned_deletions(account_name, technology_name):\n+ \"\"\"\n+ Possible issue with orphaned items. This will check if there are any, and will assume that the item\n+ was deleted. This will create a deletion change record to it.\n+\n+ :param account_name:\n+ :param technology_name:\n+ :return:\n+ \"\"\"\n+ # If technology doesn't exist, then create it:\n+ technology = Technology.query.filter(Technology.name == technology_name).first()\n+ if not technology:\n+ technology = Technology(name=technology_name)\n+ db.session.add(technology)\n+ db.session.commit()\n+ app.logger.info(\"Technology: {} did not exist... created it...\".format(technology_name))\n+\n+ account = Account.query.filter(Account.name == account_name).one()\n+\n+ # Query for orphaned items of the given technology/account pair:\n+ orphaned_items = Item.query.filter(Item.account_id == account.id, Item.tech_id == technology.id,\n+ Item.latest_revision_id == None).all() # noqa\n+\n+ if not orphaned_items:\n+ app.logger.info(\"[@] No orphaned items have been found. (This is good)\")\n+ return\n+\n+ # Fix the orphaned items:\n+ for oi in orphaned_items:\n+ app.logger.error(\"[?] Found an orphaned item: {}. Creating a deletion record for it\".format(oi.name))\n+ revision = ItemRevision(active=False, config={})\n+ oi.revisions.append(revision)\n+ db.session.add(revision)\n+ db.session.add(oi)\n+ db.session.commit()\n+\n+ # Update the latest revision id:\n+ db.session.refresh(revision)\n+ oi.latest_revision_id = revision.id\n+ db.session.add(oi)\n+\n+ db.session.commit()\n+ app.logger.info(\"[-] Created deletion record for item: {}.\".format(oi.name))\n+\n+\n def reporter_logic(account_name, technology_name):\n \"\"\"Logic for the run change reporter\"\"\"\n try:\n+ # Before doing anything... Look for orphaned items for this given technology. If they exist, then delete them:\n+ fix_orphaned_deletions(account_name, technology_name)\n+\n # Watch and Audit:\n monitors = find_changes(account_name, technology_name)\n \n@@ -140,6 +188,9 @@ def find_changes(account_name, monitor_name, debug=True):\n Runs the watcher and stores the result, re-audits all types to account\n for downstream dependencies.\n \"\"\"\n+ # Before doing anything... Look for orphaned items for this given technology. If they exist, then delete them:\n+ fix_orphaned_deletions(account_name, monitor_name)\n+\n monitors = get_monitors(account_name, [monitor_name], debug)\n for mon in monitors:\n cw = mon.watcher\n", "new_path": "security_monkey/task_scheduler/tasks.py", "old_path": "security_monkey/task_scheduler/tasks.py" }, { "change_type": "MODIFY", "diff": "@@ -84,7 +84,8 @@ class CelerySchedulerTestCase(SecurityMonkeyTestCase):\n \n db.session.commit()\n \n- def test_find_batch_changes(self):\n+ @patch(\"security_monkey.task_scheduler.tasks.fix_orphaned_deletions\")\n+ def test_find_batch_changes(self, mock_fix_orphaned):\n \"\"\"\n Runs through a full find job via the IAMRole watcher, as that supports batching.\n \n@@ -92,7 +93,7 @@ class CelerySchedulerTestCase(SecurityMonkeyTestCase):\n not going to do any boto work and that will instead be mocked out.\n :return:\n \"\"\"\n- from security_monkey.task_scheduler.tasks import manual_run_change_finder, setup\n+ from security_monkey.task_scheduler.tasks import manual_run_change_finder\n from security_monkey.monitors import Monitor\n from security_monkey.watchers.iam.iam_role import IAMRole\n from security_monkey.auditors.iam.iam_role import IAMRoleAuditor\n@@ -142,6 +143,7 @@ class CelerySchedulerTestCase(SecurityMonkeyTestCase):\n watcher.slurp = mock_slurp\n \n manual_run_change_finder([test_account.name], [watcher.index])\n+ assert mock_fix_orphaned.called\n \n # Check that all items were added to the DB:\n assert len(Item.query.all()) == 11\n@@ -271,8 +273,9 @@ class CelerySchedulerTestCase(SecurityMonkeyTestCase):\n client.put_role_policy(RoleName=\"roleNumber{}\".format(x), PolicyName=\"testpolicy\",\n PolicyDocument=json.dumps(OPEN_POLICY, indent=4))\n \n- def test_report_batch_changes(self):\n- from security_monkey.task_scheduler.tasks import manual_run_change_reporter, setup\n+ @patch(\"security_monkey.task_scheduler.tasks.fix_orphaned_deletions\")\n+ def test_report_batch_changes(self, mock_fix_orphaned):\n+ from security_monkey.task_scheduler.tasks import manual_run_change_reporter\n from security_monkey.datastore import Item, ItemRevision, ItemAudit\n from security_monkey.monitors import Monitor\n from security_monkey.watchers.iam.iam_role import IAMRole\n@@ -327,6 +330,8 @@ class CelerySchedulerTestCase(SecurityMonkeyTestCase):\n \n manual_run_change_reporter([test_account.name])\n \n+ assert mock_fix_orphaned.called\n+\n # Check that all items were added to the DB:\n assert len(Item.query.all()) == 11\n \n@@ -348,6 +353,32 @@ class CelerySchedulerTestCase(SecurityMonkeyTestCase):\n purge_it()\n assert mock.control.purge.called\n \n+ def test_fix_orphaned_deletions(self):\n+ test_account = Account.query.filter(Account.name == \"TEST_ACCOUNT1\").one()\n+ technology = Technology(name=\"orphaned\")\n+\n+ db.session.add(technology)\n+ db.session.commit()\n+\n+ orphaned_item = Item(name=\"orphaned\", region=\"us-east-1\", tech_id=technology.id, account_id=test_account.id)\n+ db.session.add(orphaned_item)\n+ db.session.commit()\n+\n+ assert not orphaned_item.latest_revision_id\n+ assert not orphaned_item.revisions.count()\n+ assert len(Item.query.filter(Item.account_id == test_account.id, Item.tech_id == technology.id,\n+ Item.latest_revision_id == None).all()) == 1 # noqa\n+\n+ from security_monkey.task_scheduler.tasks import fix_orphaned_deletions\n+ fix_orphaned_deletions(test_account.name, technology.name)\n+\n+ assert not Item.query.filter(Item.account_id == test_account.id, Item.tech_id == technology.id,\n+ Item.latest_revision_id == None).all() # noqa\n+\n+ assert orphaned_item.latest_revision_id\n+ assert orphaned_item.revisions.count() == 1\n+ assert orphaned_item.latest_config == {}\n+\n @patch(\"security_monkey.task_scheduler.beat.setup\")\n @patch(\"security_monkey.task_scheduler.beat.purge_it\")\n @patch(\"security_monkey.task_scheduler.tasks.task_account_tech\")\n", "new_path": "security_monkey/tests/scheduling/test_celery_scheduler.py", "old_path": "security_monkey/tests/scheduling/test_celery_scheduler.py" }, { "change_type": "MODIFY", "diff": "@@ -26,10 +26,17 @@ from copy import deepcopy\n import dpath.util\n from dpath.exceptions import PathNotFound\n \n+import logging\n+\n watcher_registry = {}\n abstract_classes = set(['Watcher', 'CloudAuxWatcher', 'CloudAuxBatchedWatcher'])\n \n \n+if not app.config.get(\"DONT_IGNORE_BOTO_VERBOSE_LOGGERS\"):\n+ logging.getLogger('botocore.vendored.requests.packages.urllib3').setLevel(logging.WARNING)\n+ logging.getLogger('botocore.credentials').setLevel(logging.WARNING)\n+\n+\n class WatcherType(type):\n def __init__(cls, name, bases, attrs):\n super(WatcherType, cls).__init__(name, bases, attrs)\n", "new_path": "security_monkey/watcher.py", "old_path": "security_monkey/watcher.py" }, { "change_type": "MODIFY", "diff": "@@ -67,10 +67,15 @@ class SQS(CloudAuxBatchedWatcher):\n \n # Offset by the existing items in the list (from other regions)\n offset = len(self.corresponding_items)\n+ queue_count = -1\n \n- for i in range(0, len(queues)):\n- items.append({\"Url\": queues[i], \"Region\": kwargs[\"region\"]})\n- self.corresponding_items[queues[i]] = i + offset\n+ for item_count in range(0, len(queues)):\n+ if self.corresponding_items.get(queues[item_count]):\n+ app.logger.error(\"[?] Received a duplicate item in the SQS list: {}. Skipping it.\".format(queues[item_count]))\n+ continue\n+ queue_count += 1\n+ items.append({\"Url\": queues[item_count], \"Region\": kwargs[\"region\"]})\n+ self.corresponding_items[queues[item_count]] = queue_count + offset\n \n return items\n \n", "new_path": "security_monkey/watchers/sqs.py", "old_path": "security_monkey/watchers/sqs.py" } ]
84fd14194ddaa5b890e4479def071ce53a93b9d4
netflix/security_monkey
null
null
Add options to post metrics to queue This commit adds an option to SM to post metrics to cloudwatch. Metric data will be posted whenever scan queue items are added or removed.
[ { "change_type": "MODIFY", "diff": "@@ -5,6 +5,7 @@ This document outlines how to configure Security Monkey to:\n \n 1. Automatically run the API\n 1. Automatically scan for changes in your environment.\n+1. Configure Security Monkey to send scanning performance metrics\n \n Each section is important, please read them thoroughly.\n \n@@ -180,6 +181,11 @@ Supervisor will run the Celery `worker` command, which is:\n so keep the supervisor configurations on these instances separate.\n \n \n+Configure Security Monkey to send scanning performance metrics\n+--------------------------------------------------------------\n+Security Monkey can be configured to send metrics when objects are added or removed from the scanning queue. This allows operators to check Security Monkey performance and ensure that items are being processed from the queue in a timely manner. To do so set `METRICS_ENABLED` to `True`. You will need `cloudwatch:PutMetricData` permission. Metrics will be posted with the namespace `securitymonkey` unless configured using the variable `METRICS_NAMESPACE`. You will also want to set `METRICS_POST_REGION` with the region you want to post CloudWatch Metrics to (default: `us-east-1`).\n+\n+\n Deployment Strategies\n --------------------\n A typical deployment strategy is:\n", "new_path": "docs/autostarting.md", "old_path": "docs/autostarting.md" }, { "change_type": "MODIFY", "diff": "@@ -26,6 +26,7 @@ from security_monkey.datastore import store_exception, clear_old_exceptions, Tec\n from security_monkey.monitors import get_monitors, get_monitors_and_dependencies\n from security_monkey.reporter import Reporter\n from security_monkey.task_scheduler.util import CELERY, setup\n+import boto3\n from sqlalchemy.exc import OperationalError, InvalidRequestError, StatementError\n \n \n@@ -216,6 +217,8 @@ def find_changes(account_name, monitor_name, debug=True):\n fix_orphaned_deletions(account_name, monitor_name)\n \n monitors = get_monitors(account_name, [monitor_name], debug)\n+\n+ items = []\n for mon in monitors:\n cw = mon.watcher\n app.logger.info(\"[-->] Looking for changes in account: {}, technology: {}\".format(account_name, cw.index))\n@@ -224,17 +227,26 @@ def find_changes(account_name, monitor_name, debug=True):\n else:\n # Just fetch normally...\n (items, exception_map) = cw.slurp()\n+\n+ _post_metric(\n+ 'queue_items_added',\n+ len(items),\n+ account_name=account_name,\n+ tech=cw.i_am_singular\n+ )\n+\n cw.find_changes(current=items, exception_map=exception_map)\n+\n cw.save()\n \n # Batched monitors have already been monitored, and they will be skipped over.\n- audit_changes([account_name], [monitor_name], False, debug)\n+ audit_changes([account_name], [monitor_name], False, debug, items_count=len(items))\n db.session.close()\n \n return monitors\n \n \n-def audit_changes(accounts, monitor_names, send_report, debug=True, skip_batch=True):\n+def audit_changes(accounts, monitor_names, send_report, debug=True, skip_batch=True, items_count=None):\n \"\"\"\n Audits changes in the accounts\n :param accounts:\n@@ -254,6 +266,13 @@ def audit_changes(accounts, monitor_names, send_report, debug=True, skip_batch=T\n app.logger.debug(\"[-->] Auditing account: {}, technology: {}\".format(account, monitor.watcher.index))\n _audit_changes(account, monitor.auditors, send_report, debug)\n \n+ _post_metric(\n+ 'queue_items_completed',\n+ items_count,\n+ account_name=account,\n+ tech=monitor.watcher.i_am_singular\n+ )\n+\n \n def batch_logic(monitor, current_watcher, account_name, debug):\n \"\"\"\n@@ -293,9 +312,23 @@ def batch_logic(monitor, current_watcher, account_name, debug):\n ))\n (items, exception_map) = current_watcher.slurp()\n \n+ _post_metric(\n+ 'queue_items_added',\n+ len(items),\n+ account_name=account_name,\n+ tech=current_watcher.i_am_singular\n+ )\n+\n audit_items = current_watcher.find_changes(current=items, exception_map=exception_map)\n _audit_specific_changes(monitor, audit_items, False, debug)\n \n+ _post_metric(\n+ 'queue_items_completed',\n+ len(items),\n+ account_name=account_name,\n+ tech=current_watcher.i_am_singular\n+ )\n+\n # Delete the items that no longer exist:\n app.logger.debug(\"[-->] Deleting all items for {technology}/{account} that no longer exist.\".format(\n technology=current_watcher.i_am_plural, account=account_name\n@@ -349,3 +382,31 @@ def _audit_specific_changes(monitor, audit_items, send_report, debug=True):\n monitor.watcher.accounts[0])\n db.session.remove()\n store_exception(\"scheduler-audit-changes\", None, e)\n+\n+\n+def _post_metric(event_type, amount, account_name=None, tech=None):\n+ if not app.config.get('METRICS_ENABLED', False):\n+ return\n+\n+ cw_client = boto3.client('cloudwatch', region_name=app.config.get('METRICS_POST_REGION', 'us-east-1'))\n+ cw_client.put_metric_data(\n+ Namespace=app.config.get('METRICS_NAMESPACE', 'securitymonkey'),\n+ MetricData=[\n+ {\n+ 'MetricName': event_type,\n+ 'Timestamp': int(time.time()),\n+ 'Value': amount,\n+ 'Unit': 'Count',\n+ 'Dimensions': [\n+ {\n+ 'Name': 'tech',\n+ 'Value': tech\n+ },\n+ {\n+ 'Name': 'account_number',\n+ 'Value': Account.query.filter(Account.name == account_name).first().identifier\n+ }\n+ ]\n+ }\n+ ]\n+ )\n", "new_path": "security_monkey/task_scheduler/tasks.py", "old_path": "security_monkey/task_scheduler/tasks.py" } ]
9ef1a06814c1aedb30dd4a932eb15cfb4a9a5e06
geopy/geopy
null
null
Drop Python 3.5 support + remove async_generator async_generator is not compatible with the latest pytest
[ { "change_type": "MODIFY", "diff": "@@ -40,7 +40,7 @@ jobs:\n strategy:\n fail-fast: false\n matrix: # &test-matrix\n- python-version: [3.5, 3.6, 3.7, 3.8, 3.9, 'pypy3']\n+ python-version: [3.6, 3.7, 3.8, 3.9, 'pypy3']\n experimental: [false]\n include:\n - python-version: '3.10-dev'\n@@ -64,7 +64,7 @@ jobs:\n strategy:\n fail-fast: false\n matrix: # *test-matrix https://github.community/t/support-for-yaml-anchors/16128\n- python-version: [3.5, 3.6, 3.7, 3.8, 3.9, 'pypy3']\n+ python-version: [3.6, 3.7, 3.8, 3.9, 'pypy3']\n experimental: [false]\n include:\n - python-version: '3.10-dev'\n", "new_path": ".github/workflows/ci.yml", "old_path": ".github/workflows/ci.yml" }, { "change_type": "MODIFY", "diff": "@@ -5,7 +5,7 @@ geopy makes it easy for Python developers to locate the coordinates of\n addresses, cities, countries, and landmarks across the globe using third-party\n geocoders and other data sources.\n \n-geopy is tested against CPython (versions 3.5, 3.6, 3.7, 3.8, 3.9)\n+geopy is tested against CPython (versions 3.6, 3.7, 3.8, 3.9)\n and PyPy3. geopy 1.x line also supported CPython 2.7, 3.4 and PyPy2.\n \"\"\"\n \n", "new_path": "geopy/__init__.py", "old_path": "geopy/__init__.py" }, { "change_type": "MODIFY", "diff": "@@ -1,29 +1,14 @@\n #!/usr/bin/env python\n-\"\"\"\n-geopy\n-\"\"\"\n-\n-import sys\n \n from setuptools import find_packages, setup\n \n-if sys.version_info < (3, 5):\n- raise RuntimeError(\n- \"geopy 2 supports Python 3.5 and above. \"\n- \"Use geopy 1.x if you need Python 2.7 or 3.4 support.\"\n- )\n-\n-# This import must be below the above `sys.version_info` check,\n-# because the code being imported here is not compatible with the older\n-# versions of Python.\n-from geopy import __version__ as version # noqa # isort:skip\n+from geopy import __version__ as version\n \n INSTALL_REQUIRES = [\n 'geographiclib<3,>=1.49',\n ]\n \n EXTRAS_DEV_TESTFILES_COMMON = [\n- \"async_generator\",\n ]\n \n EXTRAS_DEV_LINT = [\n@@ -85,7 +70,7 @@ setup(\n },\n license='MIT',\n keywords='geocode geocoding gis geographical maps earth distance',\n- python_requires=\">=3.5\",\n+ python_requires=\">=3.6\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n@@ -97,7 +82,6 @@ setup(\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n", "new_path": "setup.py", "old_path": "setup.py" }, { "change_type": "MODIFY", "diff": "@@ -1,3 +1,4 @@\n+import contextlib\n import os\n import ssl\n from unittest.mock import patch\n@@ -5,7 +6,6 @@ from urllib.parse import urljoin\n from urllib.request import getproxies, urlopen\n \n import pytest\n-from async_generator import async_generator, asynccontextmanager, yield_\n \n import geopy.geocoders\n from geopy.adapters import (\n@@ -134,14 +134,13 @@ def adapter_factory(request):\n yield adapter_factory\n \n \n-@asynccontextmanager\n-@async_generator\n+@contextlib.asynccontextmanager\n async def make_dummy_async_geocoder(**kwargs):\n geocoder = DummyGeocoder(**kwargs)\n run_async = isinstance(geocoder.adapter, BaseAsyncAdapter)\n if run_async:\n async with geocoder:\n- await yield_(geocoder)\n+ yield geocoder\n else:\n orig_geocode = geocoder.geocode\n \n@@ -149,7 +148,7 @@ async def make_dummy_async_geocoder(**kwargs):\n return orig_geocode(*args, **kwargs)\n \n geocoder.geocode = geocode\n- await yield_(geocoder)\n+ yield geocoder\n \n \n @pytest.mark.parametrize(\"adapter_cls\", NOT_AVAILABLE_ADAPTERS)\n", "new_path": "test/adapters/each_adapter.py", "old_path": "test/adapters/each_adapter.py" }, { "change_type": "MODIFY", "diff": "@@ -2,7 +2,6 @@ import asyncio\n import atexit\n import contextlib\n import importlib\n-import inspect\n import os\n import types\n from collections import defaultdict\n@@ -19,35 +18,6 @@ import geopy.geocoders\n from geopy.adapters import AdapterHTTPError, BaseAsyncAdapter, BaseSyncAdapter\n from geopy.geocoders.base import _DEFAULT_ADAPTER_CLASS\n \n-# pytest-aiohttp calls `inspect.isasyncgenfunction` to detect\n-# async generators in fixtures.\n-# To support Python 3.5 we use `async_generator` library.\n-# However:\n-# - Since Python 3.6 there is a native implementation of\n-# `inspect.isasyncgenfunction`, but it returns False\n-# for `async_generator`'s functions.\n-# - The stock `async_generator.isasyncgenfunction` doesn't detect\n-# generators wrapped in `@pytest.fixture`.\n-#\n-# Thus we resort to monkey-patching it (for now).\n-if getattr(inspect, \"isasyncgenfunction\", None) is not None:\n- # >=py36\n- original_isasyncgenfunction = inspect.isasyncgenfunction\n-else:\n- # ==py35\n- original_isasyncgenfunction = lambda func: False # noqa\n-\n-\n-def isasyncgenfunction(obj):\n- if original_isasyncgenfunction(obj):\n- return True\n- # Detect async_generator function, possibly wrapped in `@pytest.fixture`:\n- # See https://github.com/python-trio/async_generator/blob/v1.10/async_generator/_impl.py#L451-L455 # noqa\n- return bool(getattr(obj, \"_async_gen_function\", None))\n-\n-\n-inspect.isasyncgenfunction = isasyncgenfunction\n-\n \n def load_adapter_cls(adapter_ref):\n actual_adapter_class = _DEFAULT_ADAPTER_CLASS\n", "new_path": "test/conftest.py", "old_path": "test/conftest.py" }, { "change_type": "MODIFY", "diff": "@@ -1,5 +1,4 @@\n import pytest\n-from async_generator import async_generator, yield_\n \n from geopy.exc import ConfigurationError, GeocoderQueryError\n from geopy.geocoders import IGNFrance\n@@ -254,14 +253,13 @@ class TestIGNFranceUsernameAuthProxy(BaseTestGeocoder):\n )\n \n @pytest.fixture(scope='class', autouse=True)\n- @async_generator\n async def start_proxy(_, request, class_geocoder):\n cls = request.cls\n cls.proxy_server = ProxyServerThread(timeout=cls.proxy_timeout)\n cls.proxy_server.start()\n cls.proxy_url = cls.proxy_server.get_proxy_url()\n async with cls.inject_geocoder(cls.make_geocoder(proxies=cls.proxy_url)):\n- await yield_()\n+ yield\n cls.proxy_server.stop()\n cls.proxy_server.join()\n \n", "new_path": "test/geocoders/ignfrance.py", "old_path": "test/geocoders/ignfrance.py" }, { "change_type": "MODIFY", "diff": "@@ -1,10 +1,10 @@\n+import contextlib\n import json\n import os\n from abc import ABC, abstractmethod\n from unittest.mock import ANY, patch\n \n import pytest\n-from async_generator import async_generator, asynccontextmanager, yield_\n \n from geopy import exc\n from geopy.adapters import BaseAsyncAdapter\n@@ -47,7 +47,6 @@ class BaseTestGeocoder(ABC):\n delta = 0.5\n \n @pytest.fixture(scope='class', autouse=True)\n- @async_generator\n async def class_geocoder(_, request, patch_adapter, is_internet_access_allowed):\n \"\"\"Prepare a class-level Geocoder instance.\"\"\"\n cls = request.cls\n@@ -59,13 +58,12 @@ class BaseTestGeocoder(ABC):\n run_async = isinstance(geocoder.adapter, BaseAsyncAdapter)\n if run_async:\n async with geocoder:\n- await yield_(geocoder)\n+ yield geocoder\n else:\n- await yield_(geocoder)\n+ yield geocoder\n \n @classmethod\n- @asynccontextmanager\n- @async_generator\n+ @contextlib.asynccontextmanager\n async def inject_geocoder(cls, geocoder):\n \"\"\"An async context manager allowing to inject a custom\n geocoder instance in a single test method which will\n@@ -75,9 +73,9 @@ class BaseTestGeocoder(ABC):\n run_async = isinstance(geocoder.adapter, BaseAsyncAdapter)\n if run_async:\n async with geocoder:\n- await yield_(geocoder)\n+ yield geocoder\n else:\n- await yield_(geocoder)\n+ yield geocoder\n \n @pytest.fixture(autouse=True)\n def ensure_no_geocoder_assignment(self):\n", "new_path": "test/geocoders/util.py", "old_path": "test/geocoders/util.py" }, { "change_type": "MODIFY", "diff": "@@ -1,7 +1,7 @@\n [tox]\n envlist=\n- py{35,36,37,38,39,310,py3},\n- py35{-async,-noextras},\n+ py36{-async,-noextras},\n+ py{37,38,39,310,py3},\n lint,\n rst,\n \n@@ -15,19 +15,18 @@ passenv = *\n whitelist_externals = make\n commands = make {env:GEOPY_TOX_TARGET:test}\n \n-[testenv:py35-async]\n+[testenv:py36-async]\n # Run a single job with asyncio adapter:\n # (not the whole matrix, to avoid spending extra quota)\n setenv = GEOPY_TEST_ADAPTER=geopy.adapters.AioHTTPAdapter\n \n-[testenv:py35-noextras]\n+[testenv:py36-noextras]\n # Ensure `pip install geopy` without any non-test extras doesn't break.\n extras =\n dev-test\n \n [gh-actions]\n python =\n- 3.5: py35\n 3.6: py36\n 3.7: py37\n 3.8: py38\n", "new_path": "tox.ini", "old_path": "tox.ini" } ]
0b2146c8f794d5642a0a4feb9152916b49fd4be8
mesonbuild/meson
null
null
Use named field for command_template when generating ninja command. The command template become easier to read with named field.
[ { "change_type": "MODIFY", "diff": "@@ -1232,15 +1232,16 @@ int dummy;\n return\n rule = 'rule STATIC%s_LINKER\\n' % crstr\n if mesonlib.is_windows():\n- command_templ = ''' command = %s @$out.rsp\n+ command_template = ''' command = {executable} @$out.rsp\n rspfile = $out.rsp\n- rspfile_content = $LINK_ARGS %s $in\n+ rspfile_content = $LINK_ARGS {output_args} $in\n '''\n else:\n- command_templ = ' command = %s $LINK_ARGS %s $in\\n'\n- command = command_templ % (\n- ' '.join(static_linker.get_exelist()),\n- ' '.join(static_linker.get_output_args('$out')))\n+ command_template = ' command = {executable} $LINK_ARGS {output_args} $in\\n'\n+ command = command_template.format(\n+ executable=' '.join(static_linker.get_exelist()),\n+ output_args=' '.join(static_linker.get_output_args('$out'))\n+ )\n description = ' description = Static linking library $out\\n\\n'\n outfile.write(rule)\n outfile.write(command)\n@@ -1273,16 +1274,17 @@ int dummy;\n pass\n rule = 'rule %s%s_LINKER\\n' % (langname, crstr)\n if mesonlib.is_windows():\n- command_template = ''' command = %s @$out.rsp\n+ command_template = ''' command = {executable} @$out.rsp\n rspfile = $out.rsp\n- rspfile_content = %s $ARGS %s $in $LINK_ARGS $aliasing\n+ rspfile_content = {cross_args} $ARGS {output_args} $in $LINK_ARGS $aliasing\n '''\n else:\n- command_template = ' command = %s %s $ARGS %s $in $LINK_ARGS $aliasing\\n'\n- command = command_template % (\n- ' '.join(compiler.get_linker_exelist()),\n- ' '.join(cross_args),\n- ' '.join(compiler.get_linker_output_args('$out')))\n+ command_template = ' command = {executable} {cross_args} $ARGS {output_args} $in $LINK_ARGS $aliasing\\n'\n+ command = command_template.format(\n+ executable=' '.join(compiler.get_linker_exelist()),\n+ cross_args=' '.join(cross_args),\n+ output_args=' '.join(compiler.get_linker_output_args('$out'))\n+ )\n description = ' description = Linking target $out'\n outfile.write(rule)\n outfile.write(command)\n@@ -1386,17 +1388,18 @@ rule FORTRAN_DEP_HACK\n if getattr(self, 'created_llvm_ir_rule', False):\n return\n rule = 'rule llvm_ir{}_COMPILER\\n'.format('_CROSS' if is_cross else '')\n- args = [' '.join([ninja_quote(i) for i in compiler.get_exelist()]),\n- ' '.join(self.get_cross_info_lang_args(compiler.language, is_cross)),\n- ' '.join(compiler.get_output_args('$out')),\n- ' '.join(compiler.get_compile_only_args())]\n if mesonlib.is_windows():\n- command_template = ' command = {} @$out.rsp\\n' \\\n+ command_template = ' command = {executable} @$out.rsp\\n' \\\n ' rspfile = $out.rsp\\n' \\\n- ' rspfile_content = {} $ARGS {} {} $in\\n'\n+ ' rspfile_content = {cross_args} $ARGS {output_args} {compile_only_args} $in\\n'\n else:\n- command_template = ' command = {} {} $ARGS {} {} $in\\n'\n- command = command_template.format(*args)\n+ command_template = ' command = {executable} {cross_args} $ARGS {output_args} {compile_only_args} $in\\n'\n+ command = command_template.format(\n+ executable=' '.join([ninja_quote(i) for i in compiler.get_exelist()]),\n+ cross_args=' '.join(self.get_cross_info_lang_args(compiler.language, is_cross)),\n+ output_args=' '.join(compiler.get_output_args('$out')),\n+ compile_only_args=' '.join(compiler.get_compile_only_args())\n+ )\n description = ' description = Compiling LLVM IR object $in.\\n'\n outfile.write(rule)\n outfile.write(command)\n@@ -1448,18 +1451,19 @@ rule FORTRAN_DEP_HACK\n quoted_depargs.append(d)\n cross_args = self.get_cross_info_lang_args(langname, is_cross)\n if mesonlib.is_windows():\n- command_template = ''' command = %s @$out.rsp\n+ command_template = ''' command = {executable} @$out.rsp\n rspfile = $out.rsp\n- rspfile_content = %s $ARGS %s %s %s $in\n+ rspfile_content = {cross_args} $ARGS {dep_args} {output_args} {compile_only_args} $in\n '''\n else:\n- command_template = ' command = %s %s $ARGS %s %s %s $in\\n'\n- command = command_template % (\n- ' '.join([ninja_quote(i) for i in compiler.get_exelist()]),\n- ' '.join(cross_args),\n- ' '.join(quoted_depargs),\n- ' '.join(compiler.get_output_args('$out')),\n- ' '.join(compiler.get_compile_only_args()))\n+ command_template = ' command = {executable} {cross_args} $ARGS {dep_args} {output_args} {compile_only_args} $in\\n'\n+ command = command_template.format(\n+ executable=' '.join([ninja_quote(i) for i in compiler.get_exelist()]),\n+ cross_args=' '.join(cross_args),\n+ dep_args=' '.join(quoted_depargs),\n+ output_args=' '.join(compiler.get_output_args('$out')),\n+ compile_only_args=' '.join(compiler.get_compile_only_args())\n+ )\n description = ' description = Compiling %s object $out\\n' % langname\n if compiler.get_id() == 'msvc':\n deps = ' deps = msvc\\n'\n@@ -1497,12 +1501,13 @@ rule FORTRAN_DEP_HACK\n output = ''\n else:\n output = ' '.join(compiler.get_output_args('$out'))\n- command = \" command = %s %s $ARGS %s %s %s $in\\n\" % (\n- ' '.join(compiler.get_exelist()),\n- ' '.join(cross_args),\n- ' '.join(quoted_depargs),\n- output,\n- ' '.join(compiler.get_compile_only_args()))\n+ command = \" command = {executable} {cross_args} $ARGS {dep_args} {output_args} {compile_only_args} $in\\n\".format(\n+ executable=' '.join(compiler.get_exelist()),\n+ cross_args=' '.join(cross_args),\n+ dep_args=' '.join(quoted_depargs),\n+ output_args=output,\n+ compile_only_args=' '.join(compiler.get_compile_only_args())\n+ )\n description = ' description = Precompiling header %s\\n' % '$in'\n if compiler.get_id() == 'msvc':\n deps = ' deps = msvc\\n'\n", "new_path": "mesonbuild/backend/ninjabackend.py", "old_path": "mesonbuild/backend/ninjabackend.py" } ]
003e0a0610582020d1b213e0c8d16fe63bc6eabe
mesonbuild/meson
null
null
Use the same function for detection of C and C++ compilers The mechanism is identical which means there's a high likelihood of unintended divergence. In fact, a slight divergence was already there.
[ { "change_type": "MODIFY", "diff": "@@ -400,9 +400,9 @@ class Environment:\n errmsg += '\\nRunning \"{0}\" gave \"{1}\"'.format(c, e)\n raise EnvironmentException(errmsg)\n \n- def detect_c_compiler(self, want_cross):\n+ def _detect_c_or_cpp_compiler(self, lang, evar, want_cross):\n popen_exceptions = {}\n- compilers, ccache, is_cross, exe_wrap = self._get_compilers('c', 'CC', want_cross)\n+ compilers, ccache, is_cross, exe_wrap = self._get_compilers(lang, evar, want_cross)\n for compiler in compilers:\n if isinstance(compiler, str):\n compiler = [compiler]\n@@ -424,24 +424,34 @@ class Environment:\n continue\n gtype = self.get_gnu_compiler_type(defines)\n version = self.get_gnu_version_from_defines(defines)\n- return GnuCCompiler(ccache + compiler, version, gtype, is_cross, exe_wrap, defines)\n+ cls = GnuCCompiler if lang == 'c' else GnuCPPCompiler\n+ return cls(ccache + compiler, version, gtype, is_cross, exe_wrap, defines)\n if 'clang' in out:\n if 'Apple' in out or for_darwin(want_cross, self):\n cltype = CLANG_OSX\n else:\n cltype = CLANG_STANDARD\n- return ClangCCompiler(ccache + compiler, version, cltype, is_cross, exe_wrap)\n+ cls = ClangCCompiler if lang == 'c' else ClangCPPCompiler\n+ return cls(ccache + compiler, version, cltype, is_cross, exe_wrap)\n if 'Microsoft' in out or 'Microsoft' in err:\n # Visual Studio prints version number to stderr but\n # everything else to stdout. Why? Lord only knows.\n version = search_version(err)\n- return VisualStudioCCompiler(compiler, version, is_cross, exe_wrap)\n+ cls = VisualStudioCCompiler if lang == 'c' else VisualStudioCPPCompiler\n+ return cls(compiler, version, is_cross, exe_wrap)\n if '(ICC)' in out:\n # TODO: add microsoft add check OSX\n inteltype = ICC_STANDARD\n- return IntelCCompiler(ccache + compiler, version, inteltype, is_cross, exe_wrap)\n+ cls = IntelCCompiler if lang == 'c' else IntelCPPCompiler\n+ return cls(ccache + compiler, version, inteltype, is_cross, exe_wrap)\n self._handle_compiler_exceptions(popen_exceptions, compilers)\n \n+ def detect_c_compiler(self, want_cross):\n+ return self._detect_c_or_cpp_compiler('c', 'CC', want_cross)\n+\n+ def detect_cpp_compiler(self, want_cross):\n+ return self._detect_c_or_cpp_compiler('cpp', 'CXX', want_cross)\n+\n def detect_fortran_compiler(self, want_cross):\n popen_exceptions = {}\n compilers, ccache, is_cross, exe_wrap = self._get_compilers('fortran', 'FC', want_cross)\n@@ -496,46 +506,6 @@ class Environment:\n path = os.path.split(__file__)[0]\n return os.path.join(path, 'depfixer.py')\n \n- def detect_cpp_compiler(self, want_cross):\n- popen_exceptions = {}\n- compilers, ccache, is_cross, exe_wrap = self._get_compilers('cpp', 'CXX', want_cross)\n- for compiler in compilers:\n- if isinstance(compiler, str):\n- compiler = [compiler]\n- basename = os.path.basename(compiler[-1]).lower()\n- if basename == 'cl' or basename == 'cl.exe':\n- arg = '/?'\n- else:\n- arg = '--version'\n- try:\n- p, out, err = Popen_safe(compiler + [arg])\n- except OSError as e:\n- popen_exceptions[' '.join(compiler + [arg])] = e\n- continue\n- version = search_version(out)\n- if 'Free Software Foundation' in out:\n- defines = self.get_gnu_compiler_defines(compiler)\n- if not defines:\n- popen_exceptions[compiler] = 'no pre-processor defines'\n- continue\n- gtype = self.get_gnu_compiler_type(defines)\n- version = self.get_gnu_version_from_defines(defines)\n- return GnuCPPCompiler(ccache + compiler, version, gtype, is_cross, exe_wrap, defines)\n- if 'clang' in out:\n- if 'Apple' in out:\n- cltype = CLANG_OSX\n- else:\n- cltype = CLANG_STANDARD\n- return ClangCPPCompiler(ccache + compiler, version, cltype, is_cross, exe_wrap)\n- if 'Microsoft' in out or 'Microsoft' in err:\n- version = search_version(err)\n- return VisualStudioCPPCompiler(compiler, version, is_cross, exe_wrap)\n- if '(ICC)' in out:\n- # TODO: add microsoft add check OSX\n- inteltype = ICC_STANDARD\n- return IntelCPPCompiler(ccache + compiler, version, inteltype, is_cross, exe_wrap)\n- self._handle_compiler_exceptions(popen_exceptions, compilers)\n-\n def detect_objc_compiler(self, want_cross):\n popen_exceptions = {}\n compilers, ccache, is_cross, exe_wrap = self._get_compilers('objc', 'OBJC', want_cross)\n", "new_path": "mesonbuild/environment.py", "old_path": "mesonbuild/environment.py" } ]
1fbf6300c5d38b12a4347a9327e54a9a315ef8de
mesonbuild/meson
null
null
Use an enum instead of strings for method names. If a non-string value is passed as a method, reject this explicitly with a clear error message rather than trying to match with it and failing.
[ { "change_type": "MODIFY", "diff": "@@ -24,6 +24,7 @@ import sys\n import os, stat, glob, shutil\n import subprocess\n import sysconfig\n+from enum import Enum\n from collections import OrderedDict\n from . mesonlib import MesonException, version_compare, version_compare_many, Popen_safe\n from . import mlog\n@@ -33,21 +34,35 @@ from .environment import detect_cpu_family, for_windows\n class DependencyException(MesonException):\n '''Exceptions raised while trying to find dependencies'''\n \n+class DependencyMethods(Enum):\n+ # Auto means to use whatever dependency checking mechanisms in whatever order meson thinks is best.\n+ AUTO = 'auto'\n+ PKGCONFIG = 'pkg-config'\n+ QMAKE = 'qmake'\n+ # Just specify the standard link arguments, assuming the operating system provides the library.\n+ SYSTEM = 'system'\n+ # Detect using sdl2-config\n+ SDLCONFIG = 'sdlconfig'\n+ # This is only supported on OSX - search the frameworks directory by name.\n+ EXTRAFRAMEWORK = 'extraframework'\n+ # Detect using the sysconfig module.\n+ SYSCONFIG = 'sysconfig'\n+\n class Dependency:\n def __init__(self, type_name, kwargs):\n self.name = \"null\"\n self.is_found = False\n self.type_name = type_name\n- method = kwargs.get('method', 'auto')\n+ method = DependencyMethods(kwargs.get('method', 'auto'))\n \n # Set the detection method. If the method is set to auto, use any available method.\n # If method is set to a specific string, allow only that detection method.\n- if method == \"auto\":\n+ if method == DependencyMethods.AUTO:\n self.methods = self.get_methods()\n elif method in self.get_methods():\n self.methods = [method]\n else:\n- raise MesonException('Unsupported detection method: {}, allowed methods are {}'.format(method, mlog.format_list([\"auto\"] + self.get_methods())))\n+ raise MesonException('Unsupported detection method: {}, allowed methods are {}'.format(method.value, mlog.format_list(map(lambda x: x.value, [DependencyMethods.AUTO] + self.get_methods()))))\n \n def __repr__(self):\n s = '<{0} {1}: {2}>'\n@@ -68,7 +83,7 @@ class Dependency:\n return []\n \n def get_methods(self):\n- return ['auto']\n+ return [DependencyMethods.AUTO]\n \n def get_name(self):\n return self.name\n@@ -268,7 +283,7 @@ class PkgConfigDependency(Dependency):\n return self.libs\n \n def get_methods(self):\n- return ['pkg-config']\n+ return [DependencyMethods.PKGCONFIG]\n \n def check_pkgconfig(self):\n evar = 'PKG_CONFIG'\n@@ -985,10 +1000,10 @@ class QtBaseDependency(Dependency):\n # Keep track of the detection methods used, for logging purposes.\n methods = []\n # Prefer pkg-config, then fallback to `qmake -query`\n- if 'pkg-config' in self.methods:\n+ if DependencyMethods.PKGCONFIG in self.methods:\n self._pkgconfig_detect(mods, env, kwargs)\n methods.append('pkgconfig')\n- if not self.is_found and 'qmake' in self.methods:\n+ if not self.is_found and DependencyMethods.QMAKE in self.methods:\n from_text = self._qmake_detect(mods, env, kwargs)\n methods.append('qmake-' + self.name)\n methods.append('qmake')\n@@ -1137,7 +1152,7 @@ class QtBaseDependency(Dependency):\n return self.largs\n \n def get_methods(self):\n- return ['pkg-config', 'qmake']\n+ return [DependencyMethods.PKGCONFIG, DependencyMethods.QMAKE]\n \n def found(self):\n return self.is_found\n@@ -1301,7 +1316,7 @@ class GLDependency(Dependency):\n self.is_found = False\n self.cargs = []\n self.linkargs = []\n- if 'pkg-config' in self.methods:\n+ if DependencyMethods.PKGCONFIG in self.methods:\n try:\n pcdep = PkgConfigDependency('gl', environment, kwargs)\n if pcdep.found():\n@@ -1313,7 +1328,7 @@ class GLDependency(Dependency):\n return\n except Exception:\n pass\n- if 'system' in self.methods:\n+ if DependencyMethods.SYSTEM in self.methods:\n if mesonlib.is_osx():\n self.is_found = True\n self.linkargs = ['-framework', 'OpenGL']\n@@ -1333,9 +1348,9 @@ class GLDependency(Dependency):\n \n def get_methods(self):\n if mesonlib.is_osx() or mesonlib.is_windows():\n- return ['pkg-config', 'system']\n+ return [DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM]\n else:\n- return ['pkg-config']\n+ return [DependencyMethods.PKGCONFIG]\n \n # There are three different ways of depending on SDL2:\n # sdl2-config, pkg-config and OSX framework\n@@ -1345,7 +1360,7 @@ class SDL2Dependency(Dependency):\n self.is_found = False\n self.cargs = []\n self.linkargs = []\n- if 'pkg-config' in self.methods:\n+ if DependencyMethods.PKGCONFIG in self.methods:\n try:\n pcdep = PkgConfigDependency('sdl2', environment, kwargs)\n if pcdep.found():\n@@ -1358,7 +1373,7 @@ class SDL2Dependency(Dependency):\n except Exception as e:\n mlog.debug('SDL 2 not found via pkgconfig. Trying next, error was:', str(e))\n pass\n- if 'sdlconfig' in self.methods:\n+ if DependencyMethods.SDLCONFIG in self.methods:\n sdlconf = shutil.which('sdl2-config')\n if sdlconf:\n stdo = Popen_safe(['sdl2-config', '--cflags'])[1]\n@@ -1372,7 +1387,7 @@ class SDL2Dependency(Dependency):\n self.version, '(%s)' % sdlconf)\n return\n mlog.debug('Could not find sdl2-config binary, trying next.')\n- if 'extraframework' in self.methods:\n+ if DependencyMethods.EXTRAFRAMEWORK in self.methods:\n if mesonlib.is_osx():\n fwdep = ExtraFrameworkDependency('sdl2', kwargs.get('required', True), None, kwargs)\n if fwdep.found():\n@@ -1397,9 +1412,9 @@ class SDL2Dependency(Dependency):\n \n def get_methods(self):\n if mesonlib.is_osx():\n- return ['pkg-config', 'sdlconfig', 'extraframework']\n+ return [DependencyMethods.PKGCONFIG, DependencyMethods.SDLCONFIG, DependencyMethods.EXTRAFRAMEWORK]\n else:\n- return ['pkg-config', 'sdlconfig']\n+ return [DependencyMethods.PKGCONFIG, DependencyMethods.SDLCONFIG]\n \n class ExtraFrameworkDependency(Dependency):\n def __init__(self, name, required, path, kwargs):\n@@ -1465,7 +1480,7 @@ class Python3Dependency(Dependency):\n self.is_found = False\n # We can only be sure that it is Python 3 at this point\n self.version = '3'\n- if 'pkg-config' in self.methods:\n+ if DependencyMethods.PKGCONFIG in self.methods:\n try:\n pkgdep = PkgConfigDependency('python3', environment, kwargs)\n if pkgdep.found():\n@@ -1477,9 +1492,9 @@ class Python3Dependency(Dependency):\n except Exception:\n pass\n if not self.is_found:\n- if mesonlib.is_windows() and 'sysconfig' in self.methods:\n+ if mesonlib.is_windows() and DependencyMethods.SYSCONFIG in self.methods:\n self._find_libpy3_windows(environment)\n- elif mesonlib.is_osx() and 'extraframework' in self.methods:\n+ elif mesonlib.is_osx() and DependencyMethods.EXTRAFRAMEWORK in self.methods:\n # In OSX the Python 3 framework does not have a version\n # number in its name.\n fw = ExtraFrameworkDependency('python', False, None, kwargs)\n@@ -1536,11 +1551,11 @@ class Python3Dependency(Dependency):\n \n def get_methods(self):\n if mesonlib.is_windows():\n- return ['pkg-config', 'sysconfig']\n+ return [DependencyMethods.PKGCONFIG, DependencyMethods.SYSCONFIG]\n elif mesonlib.is_osx():\n- return ['pkg-config', 'extraframework']\n+ return [DependencyMethods.PKGCONFIG, DependencyMethods.EXTRAFRAMEWORK]\n else:\n- return ['pkg-config']\n+ return [DependencyMethods.PKGCONFIG]\n \n def get_version(self):\n return self.version\n@@ -1574,6 +1589,8 @@ def find_external_dependency(name, environment, kwargs):\n required = kwargs.get('required', True)\n if not isinstance(required, bool):\n raise DependencyException('Keyword \"required\" must be a boolean.')\n+ if not isinstance(kwargs.get('method', ''), str):\n+ raise DependencyException('Keyword \"method\" must be a string.')\n lname = name.lower()\n if lname in packages:\n dep = packages[lname](environment, kwargs)\n", "new_path": "mesonbuild/dependencies.py", "old_path": "mesonbuild/dependencies.py" } ]
30645ed54b4e08611ae4883137d774a4c02b0278
mesonbuild/meson
null
null
Remove extra casts on InterpreterBase.evaluate_statement. The result of this method is always a "native" object, and code coverage (plus a manual inspection) shows that this conversion is never done.
[ { "change_type": "MODIFY", "diff": "@@ -2625,11 +2625,10 @@ different subdirectory.\n raise InterpreterException('Tried to add non-existing source file %s.' % s)\n \n def format_string(self, templ, args):\n- templ = self.to_native(templ)\n if isinstance(args, mparser.ArgumentNode):\n args = args.arguments\n for (i, arg) in enumerate(args):\n- arg = self.to_native(self.evaluate_statement(arg))\n+ arg = self.evaluate_statement(arg)\n if isinstance(arg, bool): # Python boolean is upper case.\n arg = str(arg).lower()\n templ = templ.replace('@{}@'.format(i), str(arg))\n", "new_path": "mesonbuild/interpreter.py", "old_path": "mesonbuild/interpreter.py" }, { "change_type": "MODIFY", "diff": "@@ -198,8 +198,6 @@ class InterpreterBase:\n \n def evaluate_notstatement(self, cur):\n v = self.evaluate_statement(cur.value)\n- if isinstance(v, mparser.BooleanNode):\n- v = v.value\n if not isinstance(v, bool):\n raise InterpreterException('Argument to \"not\" is not a boolean.')\n return not v\n@@ -217,16 +215,8 @@ class InterpreterBase:\n self.evaluate_codeblock(node.elseblock)\n \n def evaluate_comparison(self, node):\n- v1 = self.evaluate_statement(node.left)\n- v2 = self.evaluate_statement(node.right)\n- if self.is_elementary_type(v1):\n- val1 = v1\n- else:\n- val1 = v1.value\n- if self.is_elementary_type(v2):\n- val2 = v2\n- else:\n- val2 = v2.value\n+ val1 = self.evaluate_statement(node.left)\n+ val2 = self.evaluate_statement(node.right)\n if node.ctype == '==':\n return val1 == val2\n elif node.ctype == '!=':\n@@ -244,45 +234,35 @@ class InterpreterBase:\n \n def evaluate_andstatement(self, cur):\n l = self.evaluate_statement(cur.left)\n- if isinstance(l, mparser.BooleanNode):\n- l = l.value\n if not isinstance(l, bool):\n raise InterpreterException('First argument to \"and\" is not a boolean.')\n if not l:\n return False\n r = self.evaluate_statement(cur.right)\n- if isinstance(r, mparser.BooleanNode):\n- r = r.value\n if not isinstance(r, bool):\n raise InterpreterException('Second argument to \"and\" is not a boolean.')\n return r\n \n def evaluate_orstatement(self, cur):\n l = self.evaluate_statement(cur.left)\n- if isinstance(l, mparser.BooleanNode):\n- l = l.get_value()\n if not isinstance(l, bool):\n raise InterpreterException('First argument to \"or\" is not a boolean.')\n if l:\n return True\n r = self.evaluate_statement(cur.right)\n- if isinstance(r, mparser.BooleanNode):\n- r = r.get_value()\n if not isinstance(r, bool):\n raise InterpreterException('Second argument to \"or\" is not a boolean.')\n return r\n \n def evaluate_uminusstatement(self, cur):\n v = self.evaluate_statement(cur.value)\n- if isinstance(v, mparser.NumberNode):\n- v = v.value\n if not isinstance(v, int):\n raise InterpreterException('Argument to negation is not an integer.')\n return -v\n \n def evaluate_arithmeticstatement(self, cur):\n- l = self.to_native(self.evaluate_statement(cur.left))\n- r = self.to_native(self.evaluate_statement(cur.right))\n+ l = self.evaluate_statement(cur.left)\n+ r = self.evaluate_statement(cur.right)\n \n if cur.operation == 'add':\n try:\n@@ -382,8 +362,6 @@ class InterpreterBase:\n obj = self.evaluate_statement(invokable)\n method_name = node.name\n args = node.args\n- if isinstance(obj, mparser.StringNode):\n- obj = obj.get_value()\n if isinstance(obj, str):\n return self.string_method_call(obj, method_name, args)\n if isinstance(obj, bool):\n@@ -402,7 +380,6 @@ class InterpreterBase:\n return obj.method_call(method_name, self.flatten(args), kwargs)\n \n def bool_method_call(self, obj, method_name, args):\n- obj = self.to_native(obj)\n (posargs, _) = self.reduce_arguments(args)\n if method_name == 'to_string':\n if not posargs:\n@@ -426,7 +403,6 @@ class InterpreterBase:\n raise InterpreterException('Unknown method \"%s\" for a boolean.' % method_name)\n \n def int_method_call(self, obj, method_name, args):\n- obj = self.to_native(obj)\n (posargs, _) = self.reduce_arguments(args)\n if method_name == 'is_even':\n if not posargs:\n@@ -442,7 +418,6 @@ class InterpreterBase:\n raise InterpreterException('Unknown method \"%s\" for an integer.' % method_name)\n \n def string_method_call(self, obj, method_name, args):\n- obj = self.to_native(obj)\n (posargs, _) = self.reduce_arguments(args)\n if method_name == 'strip':\n return obj.strip()\n@@ -534,8 +509,6 @@ class InterpreterBase:\n raise InvalidArguments('Keyword argument name is not a string.')\n a = args.kwargs[key]\n reduced_kw[key] = self.evaluate_statement(a)\n- if not isinstance(reduced_pos, list):\n- reduced_pos = [reduced_pos]\n self.argument_depth -= 1\n return reduced_pos, reduced_kw\n \n@@ -564,7 +537,6 @@ To specify a keyword argument, use : instead of =.''')\n if not isinstance(var_name, str):\n raise InvalidArguments('Tried to assign value to a non-variable.')\n value = self.evaluate_statement(node.value)\n- value = self.to_native(value)\n if not self.is_assignable(value):\n raise InvalidCode('Tried to assign an invalid value to variable.')\n # For mutable objects we need to make a copy on assignment\n@@ -593,12 +565,6 @@ To specify a keyword argument, use : instead of =.''')\n return self.variables[varname]\n raise InvalidCode('Unknown variable \"%s\".' % varname)\n \n- def to_native(self, arg):\n- if isinstance(arg, (mparser.StringNode, mparser.NumberNode,\n- mparser.BooleanNode)):\n- return arg.value\n- return arg\n-\n def is_assignable(self, value):\n return isinstance(value, (InterpreterObject, dependencies.Dependency,\n str, int, list, mesonlib.File))\n@@ -624,7 +590,7 @@ To specify a keyword argument, use : instead of =.''')\n if len(args) != 2:\n raise InvalidCode('Set_variable takes two arguments.')\n varname = args[0]\n- value = self.to_native(args[1])\n+ value = args[1]\n self.set_variable(varname, value)\n \n # @noKwargs\n", "new_path": "mesonbuild/interpreterbase.py", "old_path": "mesonbuild/interpreterbase.py" } ]
a681348b057e3ef8df3d0e35fd146ec75a880b4a
mesonbuild/meson
null
null
Add some colour to test output. Bold the section names and colourize errors&skips.
[ { "change_type": "MODIFY", "diff": "@@ -250,6 +250,23 @@ def log_text_file(logfile, testdir, stdo, stde):\n executor.shutdown()\n raise StopException()\n \n+\n+def bold(text):\n+ return mlog.bold(text).get_text(mlog.colorize_console)\n+\n+\n+def green(text):\n+ return mlog.green(text).get_text(mlog.colorize_console)\n+\n+\n+def red(text):\n+ return mlog.red(text).get_text(mlog.colorize_console)\n+\n+\n+def yellow(text):\n+ return mlog.yellow(text).get_text(mlog.colorize_console)\n+\n+\n def run_test_inprocess(testdir):\n old_stdout = sys.stdout\n sys.stdout = mystdout = StringIO()\n@@ -475,10 +492,12 @@ def run_tests(all_tests, log_name_base, extra_args):\n \n for name, test_cases, skipped in all_tests:\n current_suite = ET.SubElement(junit_root, 'testsuite', {'name': name, 'tests': str(len(test_cases))})\n+ print()\n if skipped:\n- print('\\nNot running %s tests.\\n' % name)\n+ print(bold('Not running %s tests.' % name))\n else:\n- print('\\nRunning %s tests.\\n' % name)\n+ print(bold('Running %s tests.' % name))\n+ print()\n futures = []\n for t in test_cases:\n # Jenkins screws us over by automatically sorting test cases by name\n@@ -494,7 +513,7 @@ def run_tests(all_tests, log_name_base, extra_args):\n sys.stdout.flush()\n result = result.result()\n if result is None or 'MESON_SKIP_TEST' in result.stdo:\n- print('Skipping:', t)\n+ print(yellow('Skipping:'), t)\n current_test = ET.SubElement(current_suite, 'testcase', {'name': testname,\n 'classname': name})\n ET.SubElement(current_test, 'skipped', {})\n@@ -502,7 +521,7 @@ def run_tests(all_tests, log_name_base, extra_args):\n else:\n without_install = \"\" if len(install_commands) > 0 else \" (without install)\"\n if result.msg != '':\n- print('Failed test{} during {}: {!r}'.format(without_install, result.step.name, t))\n+ print(red('Failed test{} during {}: {!r}'.format(without_install, result.step.name, t)))\n print('Reason:', result.msg)\n failing_tests += 1\n if result.step == BuildStep.configure and result.mlog != no_meson_log_msg:\n@@ -648,9 +667,9 @@ if __name__ == '__main__':\n pass\n for f in pbfiles:\n os.unlink(f)\n- print('\\nTotal passed tests:', passing_tests)\n- print('Total failed tests:', failing_tests)\n- print('Total skipped tests:', skipped_tests)\n+ print('\\nTotal passed tests:', green(str(passing_tests)))\n+ print('Total failed tests:', red(str(failing_tests)))\n+ print('Total skipped tests:', yellow(str(skipped_tests)))\n if failing_tests > 0:\n print('\\nMesonlogs of failing tests\\n')\n for l in failing_logs:\n", "new_path": "run_project_tests.py", "old_path": "run_project_tests.py" }, { "change_type": "MODIFY", "diff": "@@ -23,6 +23,7 @@ import tempfile\n import platform\n from mesonbuild import mesonlib\n from mesonbuild import mesonmain\n+from mesonbuild import mlog\n from mesonbuild.environment import detect_ninja\n from io import StringIO\n from enum import Enum\n@@ -177,7 +178,8 @@ if __name__ == '__main__':\n if 'APPVEYOR' in os.environ and os.environ['arch'] == 'x86':\n os.environ.pop('platform')\n # Run tests\n- print('Running unittests.\\n')\n+ print(mlog.bold('Running unittests.').get_text(mlog.colorize_console))\n+ print()\n units = ['InternalTests', 'AllPlatformTests', 'FailureTests']\n if mesonlib.is_linux():\n units += ['LinuxlikeTests']\n@@ -200,7 +202,8 @@ if __name__ == '__main__':\n returncode += subprocess.call([sys.executable, 'run_unittests.py', '-v'] + units, env=env)\n # Ubuntu packages do not have a binary without -6 suffix.\n if should_run_linux_cross_tests():\n- print('Running cross compilation tests.\\n')\n+ print(mlog.bold('Running cross compilation tests.').get_text(mlog.colorize_console))\n+ print()\n returncode += subprocess.call([sys.executable, 'run_cross_test.py', 'cross/ubuntu-armhf.txt'], env=env)\n returncode += subprocess.call([sys.executable, 'run_project_tests.py'] + sys.argv[1:], env=env)\n sys.exit(returncode)\n", "new_path": "run_tests.py", "old_path": "run_tests.py" } ]
fab5634916191816ddecf1a2a958fa7ed2eac1ec
mesonbuild/meson
null
null
Add 'Compiler.get_display_language' Use this when we print language-related information to the console and via the Ninja backend.
[ { "change_type": "MODIFY", "diff": "@@ -1606,7 +1606,7 @@ rule FORTRAN_DEP_HACK\n output_args=' '.join(compiler.get_output_args('$out')),\n compile_only_args=' '.join(compiler.get_compile_only_args())\n )\n- description = ' description = Compiling %s object $out.\\n' % langname.title()\n+ description = ' description = Compiling %s object $out.\\n' % compiler.get_display_language()\n if compiler.get_id() == 'msvc':\n deps = ' deps = msvc\\n'\n else:\n", "new_path": "mesonbuild/backend/ninjabackend.py", "old_path": "mesonbuild/backend/ninjabackend.py" }, { "change_type": "MODIFY", "diff": "@@ -179,7 +179,7 @@ class CCompiler(Compiler):\n return ['-Wl,--out-implib=' + implibname]\n \n def sanity_check_impl(self, work_dir, environment, sname, code):\n- mlog.debug('Sanity testing ' + self.language + ' compiler:', ' '.join(self.exelist))\n+ mlog.debug('Sanity testing ' + self.get_display_language() + ' compiler:', ' '.join(self.exelist))\n mlog.debug('Is cross compiler: %s.' % str(self.is_cross))\n \n extra_flags = []\n", "new_path": "mesonbuild/compilers/c.py", "old_path": "mesonbuild/compilers/c.py" }, { "change_type": "MODIFY", "diff": "@@ -584,6 +584,9 @@ class Compiler:\n def get_language(self):\n return self.language\n \n+ def get_display_language(self):\n+ return self.language.capitalize()\n+\n def get_default_suffix(self):\n return self.default_suffix\n \n", "new_path": "mesonbuild/compilers/compilers.py", "old_path": "mesonbuild/compilers/compilers.py" }, { "change_type": "MODIFY", "diff": "@@ -32,6 +32,9 @@ class CPPCompiler(CCompiler):\n self.language = 'cpp'\n CCompiler.__init__(self, exelist, version, is_cross, exe_wrap)\n \n+ def get_display_language(self):\n+ return 'C++'\n+\n def get_no_stdinc_args(self):\n return ['-nostdinc++']\n \n", "new_path": "mesonbuild/compilers/cpp.py", "old_path": "mesonbuild/compilers/cpp.py" }, { "change_type": "MODIFY", "diff": "@@ -25,6 +25,9 @@ class MonoCompiler(Compiler):\n self.id = 'mono'\n self.monorunner = 'mono'\n \n+ def get_display_language(self):\n+ return 'C#'\n+\n def get_output_args(self, fname):\n return ['-out:' + fname]\n \n", "new_path": "mesonbuild/compilers/cs.py", "old_path": "mesonbuild/compilers/cs.py" }, { "change_type": "MODIFY", "diff": "@@ -24,6 +24,9 @@ class ObjCCompiler(CCompiler):\n self.language = 'objc'\n CCompiler.__init__(self, exelist, version, is_cross, exe_wrap)\n \n+ def get_display_language(self):\n+ return 'Objective-C'\n+\n def sanity_check(self, work_dir, environment):\n # TODO try to use sanity_check_impl instead of duplicated code\n source_name = os.path.join(work_dir, 'sanitycheckobjc.m')\n", "new_path": "mesonbuild/compilers/objc.py", "old_path": "mesonbuild/compilers/objc.py" }, { "change_type": "MODIFY", "diff": "@@ -24,6 +24,9 @@ class ObjCPPCompiler(CPPCompiler):\n self.language = 'objcpp'\n CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap)\n \n+ def get_display_language(self):\n+ return 'Objective-C++'\n+\n def sanity_check(self, work_dir, environment):\n # TODO try to use sanity_check_impl instead of duplicated code\n source_name = os.path.join(work_dir, 'sanitycheckobjcpp.mm')\n", "new_path": "mesonbuild/compilers/objcpp.py", "old_path": "mesonbuild/compilers/objcpp.py" }, { "change_type": "MODIFY", "diff": "@@ -741,7 +741,7 @@ class CompilerHolder(InterpreterObject):\n def unittest_args_method(self, args, kwargs):\n # At time, only D compilers have this feature.\n if not hasattr(self.compiler, 'get_unittest_args'):\n- raise InterpreterException('This {} compiler has no unittest arguments.'.format(self.compiler.language))\n+ raise InterpreterException('This {} compiler has no unittest arguments.'.format(self.compiler.get_display_language()))\n return self.compiler.get_unittest_args()\n \n def has_member_method(self, args, kwargs):\n@@ -971,8 +971,7 @@ class CompilerHolder(InterpreterObject):\n raise InvalidCode('Search directory %s is not an absolute path.' % i)\n linkargs = self.compiler.find_library(libname, self.environment, search_dirs)\n if required and not linkargs:\n- l = self.compiler.language.capitalize()\n- raise InterpreterException('{} library {!r} not found'.format(l, libname))\n+ raise InterpreterException('{} library {!r} not found'.format(self.compiler.get_display_language(), libname))\n lib = dependencies.ExternalLibrary(libname, linkargs, self.environment,\n self.compiler.language)\n return ExternalLibraryHolder(lib)\n@@ -986,7 +985,7 @@ class CompilerHolder(InterpreterObject):\n h = mlog.green('YES')\n else:\n h = mlog.red('NO')\n- mlog.log('Compiler for {} supports argument {}:'.format(self.compiler.language, args[0]), h)\n+ mlog.log('Compiler for {} supports argument {}:'.format(self.compiler.get_display_language(), args[0]), h)\n return result\n \n def has_multi_arguments_method(self, args, kwargs):\n@@ -998,7 +997,7 @@ class CompilerHolder(InterpreterObject):\n h = mlog.red('NO')\n mlog.log(\n 'Compiler for {} supports arguments {}:'.format(\n- self.compiler.language, ' '.join(args)),\n+ self.compiler.get_display_language(), ' '.join(args)),\n h)\n return result\n \n@@ -1794,7 +1793,7 @@ class Interpreter(InterpreterBase):\n continue\n else:\n raise\n- mlog.log('Native %s compiler: ' % lang, mlog.bold(' '.join(comp.get_exelist())), ' (%s %s)' % (comp.id, comp.version), sep='')\n+ mlog.log('Native %s compiler: ' % comp.get_display_language(), mlog.bold(' '.join(comp.get_exelist())), ' (%s %s)' % (comp.id, comp.version), sep='')\n if not comp.get_language() in self.coredata.external_args:\n (preproc_args, compile_args, link_args) = environment.get_args_from_envvars(comp)\n self.coredata.external_preprocess_args[comp.get_language()] = preproc_args\n@@ -1802,7 +1801,7 @@ class Interpreter(InterpreterBase):\n self.coredata.external_link_args[comp.get_language()] = link_args\n self.build.add_compiler(comp)\n if need_cross_compiler:\n- mlog.log('Cross %s compiler: ' % lang, mlog.bold(' '.join(cross_comp.get_exelist())), ' (%s %s)' % (cross_comp.id, cross_comp.version), sep='')\n+ mlog.log('Cross %s compiler: ' % cross_comp.get_display_language(), mlog.bold(' '.join(cross_comp.get_exelist())), ' (%s %s)' % (cross_comp.id, cross_comp.version), sep='')\n self.build.add_cross_compiler(cross_comp)\n if self.environment.is_cross_build() and not need_cross_compiler:\n self.build.add_cross_compiler(comp)\n", "new_path": "mesonbuild/interpreter.py", "old_path": "mesonbuild/interpreter.py" } ]
bb0e18b73885de374f8461c0e4f3c911fded1e46
mesonbuild/meson
null
null
Use listify and extract_as_list everywhere They now flatten by default and unhold objects if required Includes unit tests.
[ { "change_type": "MODIFY", "diff": "@@ -20,7 +20,7 @@ from . import environment\n from . import dependencies\n from . import mlog\n from .mesonlib import File, MesonException, listify, extract_as_list\n-from .mesonlib import flatten, typeslistify, stringlistify, classify_unity_sources\n+from .mesonlib import typeslistify, stringlistify, classify_unity_sources\n from .mesonlib import get_filenames_templates_dict, substitute_values\n from .environment import for_windows, for_darwin, for_cygwin\n from .compilers import is_object, clike_langs, sort_clike, lang_suffixes\n@@ -682,7 +682,7 @@ class BuildTarget(Target):\n if 'd' in self.compilers:\n self.add_compiler_args('d', self.compilers['d'].get_feature_args(dfeatures))\n \n- self.link_args = flatten(kwargs.get('link_args', []))\n+ self.link_args = extract_as_list(kwargs, 'link_args')\n for i in self.link_args:\n if not isinstance(i, str):\n raise InvalidArguments('Link_args arguments must be strings.')\n@@ -856,9 +856,7 @@ You probably should put it in link_with instead.''')\n return self.external_deps\n \n def link(self, target):\n- for t in flatten(target):\n- if hasattr(t, 'held_object'):\n- t = t.held_object\n+ for t in listify(target, unholder=True):\n if not t.is_linkable_target():\n raise InvalidArguments('Link target {!r} is not linkable.'.format(t))\n if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:\n@@ -870,9 +868,7 @@ You probably should put it in link_with instead.''')\n self.link_targets.append(t)\n \n def link_whole(self, target):\n- for t in flatten(target):\n- if hasattr(t, 'held_object'):\n- t = t.held_object\n+ for t in listify(target, unholder=True):\n if not isinstance(t, StaticLibrary):\n raise InvalidArguments('{!r} is not a static library.'.format(t))\n if isinstance(self, SharedLibrary) and not t.pic:\n@@ -915,7 +911,7 @@ You probably should put it in link_with instead.''')\n self.include_dirs += ids\n \n def add_compiler_args(self, language, args):\n- args = flatten(args)\n+ args = listify(args)\n for a in args:\n if not isinstance(a, (str, File)):\n raise InvalidArguments('A non-string passed to compiler args.')\n@@ -1546,11 +1542,9 @@ class CustomTarget(Target):\n return deps\n \n def flatten_command(self, cmd):\n- cmd = listify(cmd)\n+ cmd = listify(cmd, unholder=True)\n final_cmd = []\n for c in cmd:\n- if hasattr(c, 'held_object'):\n- c = c.held_object\n if isinstance(c, str):\n final_cmd.append(c)\n elif isinstance(c, File):\n@@ -1573,12 +1567,7 @@ class CustomTarget(Target):\n \n def process_kwargs(self, kwargs):\n super().process_kwargs(kwargs)\n- sources = flatten(kwargs.get('input', []))\n- self.sources = []\n- for s in sources:\n- if hasattr(s, 'held_object'):\n- s = s.held_object\n- self.sources.append(s)\n+ self.sources = extract_as_list(kwargs, 'input', unholder=True)\n if 'output' not in kwargs:\n raise InvalidArguments('Missing keyword argument \"output\".')\n self.outputs = listify(kwargs['output'])\n", "new_path": "mesonbuild/build.py", "old_path": "mesonbuild/build.py" }, { "change_type": "MODIFY", "diff": "@@ -23,7 +23,7 @@ from enum import Enum\n \n from .. import mlog\n from .. import mesonlib\n-from ..mesonlib import MesonException, Popen_safe, flatten, version_compare_many, listify\n+from ..mesonlib import MesonException, Popen_safe, version_compare_many, listify\n \n \n # These must be defined in this file to avoid cyclical references.\n@@ -586,7 +586,7 @@ class ExtraFrameworkDependency(ExternalDependency):\n \n def get_dep_identifier(name, kwargs, want_cross):\n # Need immutable objects since the identifier will be used as a dict key\n- version_reqs = flatten(kwargs.get('version', []))\n+ version_reqs = listify(kwargs.get('version', []))\n if isinstance(version_reqs, list):\n version_reqs = frozenset(version_reqs)\n identifier = (name, version_reqs, want_cross)\n@@ -599,7 +599,7 @@ def get_dep_identifier(name, kwargs, want_cross):\n continue\n # All keyword arguments are strings, ints, or lists (or lists of lists)\n if isinstance(value, list):\n- value = frozenset(flatten(value))\n+ value = frozenset(listify(value))\n identifier += (key, value)\n return identifier\n \n", "new_path": "mesonbuild/dependencies/base.py", "old_path": "mesonbuild/dependencies/base.py" }, { "change_type": "MODIFY", "diff": "@@ -21,7 +21,7 @@ import shutil\n \n from .. import mlog\n from .. import mesonlib\n-from ..mesonlib import version_compare, Popen_safe\n+from ..mesonlib import version_compare, Popen_safe, stringlistify, extract_as_list\n from .base import DependencyException, ExternalDependency, PkgConfigDependency\n \n class GTestDependency(ExternalDependency):\n@@ -185,7 +185,7 @@ class LLVMDependency(ExternalDependency):\n raise DependencyException('Could not generate modules for LLVM.')\n self.modules = shlex.split(out)\n \n- modules = mesonlib.stringlistify(mesonlib.flatten(kwargs.get('modules', [])))\n+ modules = stringlistify(extract_as_list(kwargs, 'modules'))\n for mod in sorted(set(modules)):\n if mod not in self.modules:\n mlog.log('LLVM module', mod, 'found:', mlog.red('NO'))\n", "new_path": "mesonbuild/dependencies/dev.py", "old_path": "mesonbuild/dependencies/dev.py" }, { "change_type": "MODIFY", "diff": "@@ -1560,12 +1560,11 @@ class Interpreter(InterpreterBase):\n version = kwargs.get('version', self.project_version)\n if not isinstance(version, str):\n raise InterpreterException('Version must be a string.')\n- incs = extract_as_list(kwargs, 'include_directories')\n- libs = extract_as_list(kwargs, 'link_with')\n+ incs = extract_as_list(kwargs, 'include_directories', unholder=True)\n+ libs = extract_as_list(kwargs, 'link_with', unholder=True)\n sources = extract_as_list(kwargs, 'sources')\n- sources = self.source_strings_to_files(self.flatten(sources))\n- deps = self.flatten(kwargs.get('dependencies', []))\n- deps = listify(deps)\n+ sources = listify(self.source_strings_to_files(sources), unholder=True)\n+ deps = extract_as_list(kwargs, 'dependencies', unholder=True)\n compile_args = mesonlib.stringlistify(kwargs.get('compile_args', []))\n link_args = mesonlib.stringlistify(kwargs.get('link_args', []))\n final_deps = []\n@@ -1577,13 +1576,8 @@ class Interpreter(InterpreterBase):\n if not isinstance(d, (dependencies.Dependency, dependencies.ExternalLibrary, dependencies.InternalDependency)):\n raise InterpreterException('Dependencies must be external deps')\n final_deps.append(d)\n- dep = dependencies.InternalDependency(version,\n- mesonlib.unholder_array(incs),\n- compile_args,\n- link_args,\n- mesonlib.unholder_array(libs),\n- mesonlib.unholder_array(sources),\n- final_deps)\n+ dep = dependencies.InternalDependency(version, incs, compile_args,\n+ link_args, libs, sources, final_deps)\n return DependencyHolder(dep)\n \n @noKwargs\n@@ -1638,7 +1632,7 @@ class Interpreter(InterpreterBase):\n 'or not executable'.format(cmd))\n cmd = prog\n expanded_args = []\n- for a in mesonlib.flatten(cargs):\n+ for a in listify(cargs):\n if isinstance(a, str):\n expanded_args.append(a)\n elif isinstance(a, mesonlib.File):\n@@ -2308,11 +2302,7 @@ to directly access options of other subprojects.''')\n raise InterpreterException('Run_target needs at least one positional argument.')\n \n cleaned_args = []\n- for i in mesonlib.flatten(all_args):\n- try:\n- i = i.held_object\n- except AttributeError:\n- pass\n+ for i in listify(all_args, unholder=True):\n if not isinstance(i, (str, build.BuildTarget, build.CustomTarget, dependencies.ExternalProgram, mesonlib.File)):\n mlog.debug('Wrong type:', str(i))\n raise InterpreterException('Invalid argument to run_target.')\n@@ -2383,11 +2373,10 @@ to directly access options of other subprojects.''')\n par = kwargs.get('is_parallel', True)\n if not isinstance(par, bool):\n raise InterpreterException('Keyword argument is_parallel must be a boolean.')\n- cmd_args = extract_as_list(kwargs, 'args')\n+ cmd_args = extract_as_list(kwargs, 'args', unholder=True)\n for i in cmd_args:\n- if not isinstance(i, (str, mesonlib.File, TargetHolder)):\n+ if not isinstance(i, (str, mesonlib.File, build.Target)):\n raise InterpreterException('Command line arguments must be strings, files or targets.')\n- cmd_args = mesonlib.unholder_array(cmd_args)\n env = self.unpack_env_kwarg(kwargs)\n should_fail = kwargs.get('should_fail', False)\n if not isinstance(should_fail, bool):\n@@ -2805,7 +2794,8 @@ different subdirectory.\n elif isinstance(s, str):\n s = mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, s)\n else:\n- raise InterpreterException(\"Source item is not string or File-type object.\")\n+ raise InterpreterException('Source item is {!r} instead of '\n+ 'string or File-type object'.format(s))\n results.append(s)\n return results\n \n@@ -2831,7 +2821,7 @@ different subdirectory.\n if not args:\n raise InterpreterException('Target does not have a name.')\n name = args[0]\n- sources = args[1:]\n+ sources = listify(args[1:])\n if self.environment.is_cross_build():\n if kwargs.get('native', False):\n is_cross = False\n@@ -2839,19 +2829,14 @@ different subdirectory.\n is_cross = True\n else:\n is_cross = False\n- try:\n- kw_src = self.flatten(kwargs['sources'])\n- kw_src = listify(kw_src)\n- except KeyError:\n- kw_src = []\n- sources += kw_src\n+ if 'sources' in kwargs:\n+ sources += listify(kwargs['sources'])\n sources = self.source_strings_to_files(sources)\n- objs = self.flatten(kwargs.get('objects', []))\n- kwargs['dependencies'] = self.flatten(kwargs.get('dependencies', []))\n+ objs = extract_as_list(kwargs, 'objects')\n+ kwargs['dependencies'] = extract_as_list(kwargs, 'dependencies')\n if 'extra_files' in kwargs:\n ef = extract_as_list(kwargs, 'extra_files')\n kwargs['extra_files'] = self.source_strings_to_files(ef)\n- objs = listify(objs)\n self.check_sources_exist(os.path.join(self.source_root, self.subdir), sources)\n if targetholder is ExecutableHolder:\n targetclass = build.Executable\n", "new_path": "mesonbuild/interpreter.py", "old_path": "mesonbuild/interpreter.py" }, { "change_type": "MODIFY", "diff": "@@ -199,9 +199,6 @@ def classify_unity_sources(compilers, sources):\n compsrclist[comp].append(src)\n return compsrclist\n \n-def flatten(item):\n- return listify(item, flatten=True)\n-\n def is_osx():\n return platform.system().lower() == 'darwin'\n \n@@ -466,34 +463,45 @@ def replace_if_different(dst, dst_tmp):\n else:\n os.unlink(dst_tmp)\n \n-\n-def listify(item, flatten=True):\n+def listify(item, flatten=True, unholder=False):\n '''\n- Returns a list with all args embedded in a list if they are not of type list.\n+ Returns a list with all args embedded in a list if they are not a list.\n This function preserves order.\n+ @flatten: Convert lists of lists to a flat list\n+ @unholder: Replace each item with the object it holds, if required\n+\n+ Note: unholding only works recursively when flattening\n '''\n if not isinstance(item, list):\n+ if unholder and hasattr(item, 'held_object'):\n+ item = item.held_object\n return [item]\n result = []\n- if flatten:\n- for i in item:\n- if isinstance(i, list):\n- result += listify(i, flatten=True)\n- else:\n- result.append(i)\n- else:\n- for i in item:\n+ for i in item:\n+ if unholder and hasattr(i, 'held_object'):\n+ i = i.held_object\n+ if flatten and isinstance(i, list):\n+ result += listify(i, flatten=True, unholder=unholder)\n+ else:\n result.append(i)\n return result\n \n \n-def extract_as_list(dict_object, *keys, pop=False):\n+def extract_as_list(dict_object, *keys, pop=False, **kwargs):\n '''\n Extracts all values from given dict_object and listifies them.\n '''\n+ result = []\n+ fetch = dict_object.get\n if pop:\n- return flatten([dict_object.pop(key, []) for key in keys])\n- return flatten([dict_object.get(key, []) for key in keys])\n+ fetch = dict_object.pop\n+ # If there's only one key, we don't return a list with one element\n+ if len(keys) == 1:\n+ return listify(fetch(keys[0], []), **kwargs)\n+ # Return a list of values corresponding to *keys\n+ for key in keys:\n+ result.append(listify(fetch(key, []), **kwargs))\n+ return result\n \n \n def typeslistify(item, types):\n@@ -752,15 +760,6 @@ def windows_proof_rmtree(f):\n # Try one last time and throw if it fails.\n shutil.rmtree(f)\n \n-def unholder_array(entries):\n- result = []\n- entries = flatten(entries)\n- for e in entries:\n- if hasattr(e, 'held_object'):\n- e = e.held_object\n- result.append(e)\n- return result\n-\n class OrderedSet(collections.MutableSet):\n \"\"\"A set that preserves the order in which items are added, by first\n insertion.\n", "new_path": "mesonbuild/mesonlib.py", "old_path": "mesonbuild/mesonlib.py" }, { "change_type": "MODIFY", "diff": "@@ -20,7 +20,7 @@ import os\n import copy\n import subprocess\n from . import ModuleReturnValue\n-from ..mesonlib import MesonException, OrderedSet, unholder_array, Popen_safe\n+from ..mesonlib import MesonException, OrderedSet, Popen_safe, extract_as_list\n from ..dependencies import Dependency, PkgConfigDependency, InternalDependency\n from .. import mlog\n from .. import mesonlib\n@@ -323,7 +323,7 @@ class GnomeModule(ExtensionModule):\n cflags = OrderedSet()\n ldflags = OrderedSet()\n gi_includes = OrderedSet()\n- deps = unholder_array(deps)\n+ deps = mesonlib.listify(deps, unholder=True)\n \n for dep in deps:\n if isinstance(dep, InternalDependency):\n@@ -415,7 +415,7 @@ class GnomeModule(ExtensionModule):\n raise MesonException('gobject-introspection dependency was not found, gir cannot be generated.')\n ns = kwargs.pop('namespace')\n nsversion = kwargs.pop('nsversion')\n- libsources = mesonlib.flatten(kwargs.pop('sources'))\n+ libsources = mesonlib.extract_as_list(kwargs, 'sources', pop=True)\n girfile = '%s-%s.gir' % (ns, nsversion)\n srcdir = os.path.join(state.environment.get_source_dir(), state.subdir)\n builddir = os.path.join(state.environment.get_build_dir(), state.subdir)\n@@ -524,7 +524,7 @@ class GnomeModule(ExtensionModule):\n raise MesonException('Gir export packages must be str or list')\n \n deps = (girtarget.get_all_link_deps() + girtarget.get_external_deps() +\n- unholder_array(kwargs.pop('dependencies', [])))\n+ extract_as_list(kwargs, 'dependencies', pop=True, unholder=True))\n # Need to recursively add deps on GirTarget sources from our\n # dependencies and also find the include directories needed for the\n # typelib generation custom target below.\n@@ -791,7 +791,7 @@ This will become a hard error in the future.''')\n \n def _get_build_args(self, kwargs, state):\n args = []\n- deps = unholder_array(kwargs.get('dependencies', []))\n+ deps = extract_as_list(kwargs, 'dependencies', unholder=True)\n cflags, ldflags, gi_includes = self._get_dependencies_flags(deps, state, include_rpath=True)\n inc_dirs = mesonlib.extract_as_list(kwargs, 'include_directories')\n for incd in inc_dirs:\n", "new_path": "mesonbuild/modules/gnome.py", "old_path": "mesonbuild/modules/gnome.py" }, { "change_type": "MODIFY", "diff": "@@ -31,6 +31,7 @@ import mesonbuild.compilers\n import mesonbuild.environment\n import mesonbuild.mesonlib\n import mesonbuild.coredata\n+from mesonbuild.interpreter import ObjectHolder\n from mesonbuild.mesonlib import is_linux, is_windows, is_osx, is_cygwin, windows_proof_rmtree\n from mesonbuild.environment import Environment\n from mesonbuild.dependencies import DependencyException\n@@ -62,7 +63,6 @@ def get_soname(fname):\n def get_rpath(fname):\n return get_dynamic_section_entry(fname, r'(?:rpath|runpath)')\n \n-\n class InternalTests(unittest.TestCase):\n \n def test_version_number(self):\n@@ -398,6 +398,49 @@ class InternalTests(unittest.TestCase):\n \n self.assertEqual(forced_value, desired_value)\n \n+ def test_listify(self):\n+ listify = mesonbuild.mesonlib.listify\n+ # Test sanity\n+ self.assertEqual([1], listify(1))\n+ self.assertEqual([], listify([]))\n+ self.assertEqual([1], listify([1]))\n+ # Test flattening\n+ self.assertEqual([1, 2, 3], listify([1, [2, 3]]))\n+ self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))\n+ self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))\n+ # Test flattening and unholdering\n+ holder1 = ObjectHolder(1)\n+ holder3 = ObjectHolder(3)\n+ self.assertEqual([holder1], listify(holder1))\n+ self.assertEqual([holder1], listify([holder1]))\n+ self.assertEqual([holder1, 2], listify([holder1, 2]))\n+ self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))\n+ self.assertEqual([1], listify(holder1, unholder=True))\n+ self.assertEqual([1], listify([holder1], unholder=True))\n+ self.assertEqual([1, 2], listify([holder1, 2], unholder=True))\n+ self.assertEqual([1, 2, 3], listify([holder1, 2, [holder3]], unholder=True))\n+ # Unholding doesn't work recursively when not flattening\n+ self.assertEqual([1, [2], [holder3]], listify([holder1, [2], [holder3]], unholder=True, flatten=False))\n+\n+ def test_extract_as_list(self):\n+ extract = mesonbuild.mesonlib.extract_as_list\n+ # Test sanity\n+ kwargs = {'sources': [1, 2, 3]}\n+ self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))\n+ self.assertEqual(kwargs, {'sources': [1, 2, 3]})\n+ self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))\n+ self.assertEqual(kwargs, {})\n+ # Test unholding\n+ holder3 = ObjectHolder(3)\n+ kwargs = {'sources': [1, 2, holder3]}\n+ self.assertEqual([1, 2, 3], extract(kwargs, 'sources', unholder=True))\n+ self.assertEqual(kwargs, {'sources': [1, 2, holder3]})\n+ self.assertEqual([1, 2, 3], extract(kwargs, 'sources', unholder=True, pop=True))\n+ self.assertEqual(kwargs, {})\n+ # Test listification\n+ kwargs = {'sources': [1, 2, 3], 'pch_sources': [4, 5, 6]}\n+ self.assertEqual([[1, 2, 3], [4, 5, 6]], extract(kwargs, 'sources', 'pch_sources'))\n+\n \n class BasePlatformTests(unittest.TestCase):\n def setUp(self):\n", "new_path": "run_unittests.py", "old_path": "run_unittests.py" } ]
cda0e33650341f0a82c7d4164607fd74805e670f
mesonbuild/meson
null
null
Add ConfigToolDependency class This class is meant abstract away some of the tedium of writing a config tool wrapper dependency, and allow these instances to share some basic code that they all need.
[ { "change_type": "MODIFY", "diff": "@@ -24,7 +24,9 @@ from enum import Enum\n \n from .. import mlog\n from .. import mesonlib\n-from ..mesonlib import MesonException, Popen_safe, version_compare_many, listify\n+from ..mesonlib import (\n+ MesonException, Popen_safe, version_compare_many, version_compare, listify\n+)\n \n \n # These must be defined in this file to avoid cyclical references.\n@@ -55,6 +57,8 @@ class DependencyMethods(Enum):\n EXTRAFRAMEWORK = 'extraframework'\n # Detect using the sysconfig module.\n SYSCONFIG = 'sysconfig'\n+ # Specify using a \"program\"-config style tool\n+ CONFIG_TOOL = 'config-tool'\n \n \n class Dependency:\n@@ -167,6 +171,94 @@ class ExternalDependency(Dependency):\n return self.compiler\n \n \n+class ConfigToolDependency(ExternalDependency):\n+\n+ \"\"\"Class representing dependencies found using a config tool.\"\"\"\n+\n+ tools = None\n+ tool_name = None\n+\n+ def __init__(self, name, environment, language, kwargs):\n+ super().__init__('config-tool', environment, language, kwargs)\n+ self.name = name\n+ self.tools = listify(kwargs.get('tools', self.tools))\n+\n+ req_version = kwargs.get('version', None)\n+ tool, version = self.find_config(req_version)\n+ self.config = tool\n+ self.is_found = self.report_config(version, req_version)\n+ if not self.is_found:\n+ self.config = None\n+ return\n+ self.version = version\n+\n+ def find_config(self, versions=None):\n+ \"\"\"Helper method that searchs for config tool binaries in PATH and\n+ returns the one that best matches the given version requirements.\n+ \"\"\"\n+ if not isinstance(versions, list) and versions is not None:\n+ versions = listify(versions)\n+\n+ best_match = (None, None)\n+ for tool in self.tools:\n+ try:\n+ p, out = Popen_safe([tool, '--version'])[:2]\n+ except (FileNotFoundError, PermissionError):\n+ continue\n+ if p.returncode != 0:\n+ continue\n+\n+ out = out.strip()\n+ # Some tools, like pcap-config don't supply a version, but also\n+ # dont fail with --version, in that case just assume that there is\n+ # only one verison and return it.\n+ if not out:\n+ return (tool, 'none')\n+ if versions:\n+ is_found = version_compare_many(out, versions)[0]\n+ # This allows returning a found version without a config tool,\n+ # which is useful to inform the user that you found version x,\n+ # but y was required.\n+ if not is_found:\n+ tool = None\n+ if best_match[1]:\n+ if version_compare(out, '> {}'.format(best_match[1])):\n+ best_match = (tool, out)\n+ else:\n+ best_match = (tool, out)\n+\n+ return best_match\n+\n+ def report_config(self, version, req_version):\n+ \"\"\"Helper method to print messages about the tool.\"\"\"\n+ if self.config is None:\n+ if version is not None:\n+ mlog.log('found {} {!r} but need:'.format(self.tool_name, version),\n+ req_version)\n+ else:\n+ mlog.log(\"No {} found; can't detect dependency\".format(self.tool_name))\n+ mlog.log('Dependency {} found:'.format(self.name), mlog.red('NO'))\n+ if self.required:\n+ raise DependencyException('Dependency {} not found'.format(self.name))\n+ return False\n+ mlog.log('Found {}:'.format(self.tool_name), mlog.bold(shutil.which(self.config)),\n+ '({})'.format(version))\n+ mlog.log('Dependency {} found:'.format(self.name), mlog.green('YES'))\n+ return True\n+\n+ def get_config_value(self, args, stage):\n+ p, out, _ = Popen_safe([self.config] + args)\n+ if p.returncode != 0:\n+ if self.required:\n+ raise DependencyException('Could not generate {} for {}'.format(\n+ stage, self.name))\n+ return []\n+ return shlex.split(out)\n+\n+ def get_methods(self):\n+ return [DependencyMethods.AUTO, DependencyMethods.CONFIG_TOOL]\n+\n+\n class PkgConfigDependency(ExternalDependency):\n # The class's copy of the pkg-config path. Avoids having to search for it\n # multiple times in the same Meson invocation.\n", "new_path": "mesonbuild/dependencies/base.py", "old_path": "mesonbuild/dependencies/base.py" } ]
c59ec8749661f242c6a15634cdb32fab65eda7c8
mesonbuild/meson
null
null
Create GL dependency objects via a factory function Create GL dependency objects via a factory function, so they can be the correct type of object (e.g. a PkgConfigDependency when it's found by pkg-config) Factor out method: kwarg processing, so it can be used by the factory before the dependency object is constructed
[ { "change_type": "MODIFY", "diff": "@@ -61,15 +61,8 @@ class DependencyMethods(Enum):\n \n \n class Dependency:\n- def __init__(self, type_name, kwargs):\n- self.name = \"null\"\n- self.version = 'none'\n- self.language = None # None means C-like\n- self.is_found = False\n- self.type_name = type_name\n- self.compile_args = []\n- self.link_args = []\n- self.sources = []\n+ @classmethod\n+ def _process_method_kw(cls, kwargs):\n method = kwargs.get('method', 'auto')\n if method not in [e.value for e in DependencyMethods]:\n raise DependencyException('method {!r} is invalid'.format(method))\n@@ -88,14 +81,27 @@ class Dependency:\n # Set the detection method. If the method is set to auto, use any available method.\n # If method is set to a specific string, allow only that detection method.\n if method == DependencyMethods.AUTO:\n- self.methods = self.get_methods()\n- elif method in self.get_methods():\n- self.methods = [method]\n+ methods = cls.get_methods()\n+ elif method in cls.get_methods():\n+ methods = [method]\n else:\n raise DependencyException(\n 'Unsupported detection method: {}, allowed methods are {}'.format(\n method.value,\n- mlog.format_list([x.value for x in [DependencyMethods.AUTO] + self.get_methods()])))\n+ mlog.format_list([x.value for x in [DependencyMethods.AUTO] + cls.get_methods()])))\n+\n+ return methods\n+\n+ def __init__(self, type_name, kwargs):\n+ self.name = \"null\"\n+ self.version = 'none'\n+ self.language = None # None means C-like\n+ self.is_found = False\n+ self.type_name = type_name\n+ self.compile_args = []\n+ self.link_args = []\n+ self.sources = []\n+ self.methods = self._process_method_kw(kwargs)\n \n def __repr__(self):\n s = '<{0} {1}: {2}>'\n@@ -890,7 +896,12 @@ def find_external_dependency(name, env, kwargs):\n if lname in packages:\n if lname not in _packages_accept_language and 'language' in kwargs:\n raise DependencyException('%s dependency does not accept \"language\" keyword argument' % (lname, ))\n- dep = packages[lname](env, kwargs)\n+ # Create the dependency object using a factory class method, if one\n+ # exists, otherwise it is just constructed directly.\n+ if getattr(packages[lname], '_factory', None):\n+ dep = packages[lname]._factory(env, kwargs)\n+ else:\n+ dep = packages[lname](env, kwargs)\n if required and not dep.found():\n raise DependencyException('Dependency \"%s\" not found' % name)\n return dep\n", "new_path": "mesonbuild/dependencies/base.py", "old_path": "mesonbuild/dependencies/base.py" }, { "change_type": "MODIFY", "diff": "@@ -38,19 +38,6 @@ from .base import ConfigToolDependency\n class GLDependency(ExternalDependency):\n def __init__(self, environment, kwargs):\n super().__init__('gl', environment, None, kwargs)\n- if DependencyMethods.PKGCONFIG in self.methods:\n- try:\n- pcdep = PkgConfigDependency('gl', environment, kwargs)\n- if pcdep.found():\n- self.type_name = 'pkgconfig'\n- self.is_found = True\n- self.compile_args = pcdep.get_compile_args()\n- self.link_args = pcdep.get_link_args()\n- self.version = pcdep.get_version()\n- self.pcdep = pcdep\n- return\n- except Exception:\n- pass\n if DependencyMethods.SYSTEM in self.methods:\n if mesonlib.is_osx():\n self.is_found = True\n@@ -67,6 +54,17 @@ class GLDependency(ExternalDependency):\n self.version = '1'\n return\n \n+ @classmethod\n+ def _factory(cls, environment, kwargs):\n+ if DependencyMethods.PKGCONFIG in cls._process_method_kw(kwargs):\n+ try:\n+ pcdep = PkgConfigDependency('gl', environment, kwargs)\n+ if pcdep.found():\n+ return pcdep\n+ except Exception:\n+ pass\n+ return GLDependency(environment, kwargs)\n+\n @staticmethod\n def get_methods():\n if mesonlib.is_osx() or mesonlib.is_windows():\n", "new_path": "mesonbuild/dependencies/ui.py", "old_path": "mesonbuild/dependencies/ui.py" } ]
cf98f5e3705603ae21bef9b0a577bcd001a8c92e
mesonbuild/meson
null
null
Enable searching system crossfile locations on more platforms There's no reason not to also look in these places on Cygwin or OSX. Don't do this on Windows, as these paths aren't meaningful there. Move test_cross_file_system_paths from LinuxlikeTests to AllPlatformTests.
[ { "change_type": "MODIFY", "diff": "@@ -222,17 +222,17 @@ class CoreData:\n (after resolving variables and ~), return that absolute path. Next,\n check if the file is relative to the current source dir. If the path\n still isn't resolved do the following:\n- Linux + BSD:\n+ Windows:\n+ - Error\n+ *:\n - $XDG_DATA_HOME/meson/cross (or ~/.local/share/meson/cross if\n undefined)\n - $XDG_DATA_DIRS/meson/cross (or\n /usr/local/share/meson/cross:/usr/share/meson/cross if undefined)\n - Error\n- *:\n- - Error\n- BSD follows the Linux path and will honor XDG_* if set. This simplifies\n- the implementation somewhat, especially since most BSD users wont set\n- those environment variables.\n+\n+ Non-Windows follows the Linux path and will honor XDG_* if set. This\n+ simplifies the implementation somewhat.\n \"\"\"\n if filename is None:\n return None\n@@ -242,7 +242,7 @@ class CoreData:\n path_to_try = os.path.abspath(filename)\n if os.path.exists(path_to_try):\n return path_to_try\n- if sys.platform == 'linux' or 'bsd' in sys.platform.lower():\n+ if sys.platform != 'win32':\n paths = [\n os.environ.get('XDG_DATA_HOME', os.path.expanduser('~/.local/share')),\n ] + os.environ.get('XDG_DATA_DIRS', '/usr/local/share:/usr/share').split(':')\n", "new_path": "mesonbuild/coredata.py", "old_path": "mesonbuild/coredata.py" }, { "change_type": "MODIFY", "diff": "@@ -1749,6 +1749,53 @@ int main(int argc, char **argv) {\n self._run(ninja,\n workdir=os.path.join(tmpdir, 'builddir'))\n \n+ def test_cross_file_system_paths(self):\n+ if is_windows():\n+ raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')\n+\n+ testdir = os.path.join(self.common_test_dir, '1 trivial')\n+ cross_content = textwrap.dedent(\"\"\"\\\n+ [binaries]\n+ c = '/usr/bin/cc'\n+ ar = '/usr/bin/ar'\n+ strip = '/usr/bin/ar'\n+\n+ [properties]\n+\n+ [host_machine]\n+ system = 'linux'\n+ cpu_family = 'x86'\n+ cpu = 'i686'\n+ endian = 'little'\n+ \"\"\")\n+\n+ with tempfile.TemporaryDirectory() as d:\n+ dir_ = os.path.join(d, 'meson', 'cross')\n+ os.makedirs(dir_)\n+ with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:\n+ f.write(cross_content)\n+ name = os.path.basename(f.name)\n+\n+ with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):\n+ self.init(testdir, ['--cross-file=' + name], inprocess=True)\n+ self.wipe()\n+\n+ with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):\n+ os.environ.pop('XDG_DATA_HOME', None)\n+ self.init(testdir, ['--cross-file=' + name], inprocess=True)\n+ self.wipe()\n+\n+ with tempfile.TemporaryDirectory() as d:\n+ dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')\n+ os.makedirs(dir_)\n+ with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:\n+ f.write(cross_content)\n+ name = os.path.basename(f.name)\n+\n+ with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):\n+ self.init(testdir, ['--cross-file=' + name], inprocess=True)\n+ self.wipe()\n+\n \n class FailureTests(BasePlatformTests):\n '''\n@@ -2546,50 +2593,6 @@ endian = 'little'\n self.init(testdir, ['-Db_lto=true'], default_args=False)\n self.build('reconfigure')\n \n- def test_cross_file_system_paths(self):\n- testdir = os.path.join(self.common_test_dir, '1 trivial')\n- cross_content = textwrap.dedent(\"\"\"\\\n- [binaries]\n- c = '/usr/bin/cc'\n- ar = '/usr/bin/ar'\n- strip = '/usr/bin/ar'\n-\n- [properties]\n-\n- [host_machine]\n- system = 'linux'\n- cpu_family = 'x86'\n- cpu = 'i686'\n- endian = 'little'\n- \"\"\")\n-\n- with tempfile.TemporaryDirectory() as d:\n- dir_ = os.path.join(d, 'meson', 'cross')\n- os.makedirs(dir_)\n- with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:\n- f.write(cross_content)\n- name = os.path.basename(f.name)\n-\n- with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):\n- self.init(testdir, ['--cross-file=' + name], inprocess=True)\n- self.wipe()\n-\n- with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):\n- os.environ.pop('XDG_DATA_HOME', None)\n- self.init(testdir, ['--cross-file=' + name], inprocess=True)\n- self.wipe()\n-\n- with tempfile.TemporaryDirectory() as d:\n- dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')\n- os.makedirs(dir_)\n- with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:\n- f.write(cross_content)\n- name = os.path.basename(f.name)\n-\n- with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):\n- self.init(testdir, ['--cross-file=' + name], inprocess=True)\n- self.wipe()\n-\n def test_vala_generated_source_buildir_inside_source_tree(self):\n '''\n Test that valac outputs generated C files in the expected location when\n", "new_path": "run_unittests.py", "old_path": "run_unittests.py" } ]
d012b5b997e917a971bca1236a065453493c780d
mesonbuild/meson
null
null
Create a helper for checking if a string has a path component This is used in a number of places, and in some places it is incomplete. Use a helper to ensure it's used properly.
[ { "change_type": "MODIFY", "diff": "@@ -27,7 +27,7 @@ from .. import compilers\n from ..compilers import CompilerArgs\n from ..linkers import ArLinker\n from ..mesonlib import File, MesonException, OrderedSet\n-from ..mesonlib import get_compiler_for_source\n+from ..mesonlib import get_compiler_for_source, has_path_sep\n from .backends import CleanTrees, InstallData\n from ..build import InvalidArguments\n \n@@ -1335,7 +1335,7 @@ int dummy;\n \n # Set runtime-paths so we can run executables without needing to set\n # LD_LIBRARY_PATH, etc in the environment. Doesn't work on Windows.\n- if '/' in target.name or '\\\\' in target.name:\n+ if has_path_sep(target.name):\n # Target names really should not have slashes in them, but\n # unfortunately we did not check for that and some downstream projects\n # now have them. Once slashes are forbidden, remove this bit.\n@@ -2324,7 +2324,7 @@ rule FORTRAN_DEP_HACK\n # FIXME FIXME: The usage of this is a terrible and unreliable hack\n if isinstance(fname, File):\n return fname.subdir != ''\n- return '/' in fname or '\\\\' in fname\n+ return has_path_sep(fname)\n \n # Fortran is a bit weird (again). When you link against a library, just compiling a source file\n # requires the mod files that are output when single files are built. To do this right we would need to\n@@ -2370,7 +2370,7 @@ rule FORTRAN_DEP_HACK\n pch = target.get_pch(lang)\n if not pch:\n continue\n- if '/' not in pch[0] or '/' not in pch[-1]:\n+ if not has_path_sep(pch[0]) or not has_path_sep(pch[-1]):\n msg = 'Precompiled header of {!r} must not be in the same ' \\\n 'directory as source, please put it in a subdirectory.' \\\n ''.format(target.get_basename())\n@@ -2547,7 +2547,7 @@ rule FORTRAN_DEP_HACK\n commands += linker.get_option_link_args(self.environment.coredata.compiler_options)\n # Set runtime-paths so we can run executables without needing to set\n # LD_LIBRARY_PATH, etc in the environment. Doesn't work on Windows.\n- if '/' in target.name or '\\\\' in target.name:\n+ if has_path_sep(target.name):\n # Target names really should not have slashes in them, but\n # unfortunately we did not check for that and some downstream projects\n # now have them. Once slashes are forbidden, remove this bit.\n", "new_path": "mesonbuild/backend/ninjabackend.py", "old_path": "mesonbuild/backend/ninjabackend.py" }, { "change_type": "MODIFY", "diff": "@@ -23,7 +23,7 @@ from . import mlog\n from .mesonlib import File, MesonException, listify, extract_as_list\n from .mesonlib import typeslistify, stringlistify, classify_unity_sources\n from .mesonlib import get_filenames_templates_dict, substitute_values\n-from .mesonlib import for_windows, for_darwin, for_cygwin, for_android\n+from .mesonlib import for_windows, for_darwin, for_cygwin, for_android, has_path_sep\n from .compilers import is_object, clike_langs, sort_clike, lang_suffixes\n \n known_basic_kwargs = {'install': True,\n@@ -286,7 +286,7 @@ class EnvironmentVariables:\n \n class Target:\n def __init__(self, name, subdir, subproject, build_by_default):\n- if '/' in name or '\\\\' in name:\n+ if has_path_sep(name):\n # Fix failing test 53 when this becomes an error.\n mlog.warning('''Target \"%s\" has a path separator in its name.\n This is not supported, it can cause unexpected failures and will become\n@@ -1067,7 +1067,7 @@ class Generator:\n raise InvalidArguments('\"output\" may only contain strings.')\n if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:\n raise InvalidArguments('Every element of \"output\" must contain @BASENAME@ or @PLAINNAME@.')\n- if '/' in rule or '\\\\' in rule:\n+ if has_path_sep(rule):\n raise InvalidArguments('\"outputs\" must not contain a directory separator.')\n if len(outputs) > 1:\n for o in outputs:\n@@ -1666,7 +1666,7 @@ class CustomTarget(Target):\n raise InvalidArguments('Output must not be empty.')\n if i.strip() == '':\n raise InvalidArguments('Output must not consist only of whitespace.')\n- if '/' in i:\n+ if has_path_sep(i):\n raise InvalidArguments('Output must not contain a path segment.')\n if '@INPUT@' in i or '@INPUT0@' in i:\n m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \\\n", "new_path": "mesonbuild/build.py", "old_path": "mesonbuild/build.py" }, { "change_type": "MODIFY", "diff": "@@ -21,7 +21,7 @@ from . import optinterpreter\n from . import compilers\n from .wrap import wrap, WrapMode\n from . import mesonlib\n-from .mesonlib import FileMode, Popen_safe, listify, extract_as_list\n+from .mesonlib import FileMode, Popen_safe, listify, extract_as_list, has_path_sep\n from .dependencies import ExternalProgram\n from .dependencies import InternalDependency, Dependency, DependencyException\n from .interpreterbase import InterpreterBase\n@@ -1863,7 +1863,7 @@ external dependencies (including libraries) must go to \"dependencies\".''')\n raise InterpreterException('Subproject name must not contain a \"..\" path segment.')\n if os.path.isabs(dirname):\n raise InterpreterException('Subproject name must not be an absolute path.')\n- if '\\\\' in dirname or '/' in dirname:\n+ if has_path_sep(dirname):\n mlog.warning('Subproject name has a path separator. This may cause unexpected behaviour.')\n if dirname in self.subproject_stack:\n fullstack = self.subproject_stack + [dirname]\n", "new_path": "mesonbuild/interpreter.py", "old_path": "mesonbuild/interpreter.py" }, { "change_type": "MODIFY", "diff": "@@ -519,6 +519,12 @@ def get_library_dirs():\n unixdirs += glob('/lib/' + plat + '*')\n return unixdirs\n \n+def has_path_sep(name, sep='/\\\\'):\n+ 'Checks if any of the specified @sep path separators are in @name'\n+ for each in sep:\n+ if each in name:\n+ return True\n+ return False\n \n def do_replacement(regex, line, confdata):\n missing_variables = set()\n", "new_path": "mesonbuild/mesonlib.py", "old_path": "mesonbuild/mesonlib.py" }, { "change_type": "MODIFY", "diff": "@@ -17,6 +17,7 @@ import subprocess\n import shutil\n import argparse\n from .. import mlog\n+from ..mesonlib import has_path_sep\n from . import destdir_join\n from .gettext import read_linguas\n \n@@ -79,7 +80,7 @@ def install_help(srcdir, blddir, sources, media, langs, install_dir, destdir, pr\n elif symlinks:\n srcfile = os.path.join(c_install_dir, m)\n mlog.log('Symlinking %s to %s.' % (outfile, srcfile))\n- if '/' in m or '\\\\' in m:\n+ if has_path_sep(m):\n os.makedirs(os.path.dirname(outfile), exist_ok=True)\n try:\n try:\n@@ -94,7 +95,7 @@ def install_help(srcdir, blddir, sources, media, langs, install_dir, destdir, pr\n # Lang doesn't have media file so copy it over 'C' one\n infile = os.path.join(srcdir, 'C', m)\n mlog.log('Installing %s to %s' % (infile, outfile))\n- if '/' in m or '\\\\' in m:\n+ if has_path_sep(m):\n os.makedirs(os.path.dirname(outfile), exist_ok=True)\n shutil.copyfile(infile, outfile)\n shutil.copystat(infile, outfile)\n", "new_path": "mesonbuild/scripts/yelphelper.py", "old_path": "mesonbuild/scripts/yelphelper.py" } ]
2febb99eee8ed71c9122db88ca58dd33be0b9550
mesonbuild/meson
null
null
Fix b_ndebug=if-release option Provide get_{type}_options_for_target() methods that unite {type} and builtin options.
[ { "change_type": "MODIFY", "diff": "@@ -89,12 +89,17 @@ class OptionProxy:\n class OptionOverrideProxy:\n '''Mimic an option list but transparently override\n selected option values.'''\n- def __init__(self, overrides, options):\n+ def __init__(self, overrides, *options):\n self.overrides = overrides\n self.options = options\n \n def __getitem__(self, option_name):\n- base_opt = self.options[option_name]\n+ for opts in self.options:\n+ if option_name in opts:\n+ return self._get_override(option_name, opts[option_name])\n+ raise KeyError('Option not found', option_name)\n+\n+ def _get_override(self, option_name, base_opt):\n if option_name in self.overrides:\n return OptionProxy(base_opt.name, base_opt.validate_value(self.overrides[option_name]))\n return base_opt\n@@ -123,6 +128,20 @@ class Backend:\n def get_target_filename_abs(self, target):\n return os.path.join(self.environment.get_build_dir(), self.get_target_filename(target))\n \n+ def get_builtin_options_for_target(self, target):\n+ return OptionOverrideProxy(target.option_overrides,\n+ self.environment.coredata.builtins)\n+\n+ def get_base_options_for_target(self, target):\n+ return OptionOverrideProxy(target.option_overrides,\n+ self.environment.coredata.builtins,\n+ self.environment.coredata.base_options)\n+\n+ def get_compiler_options_for_target(self, target):\n+ return OptionOverrideProxy(target.option_overrides,\n+ # no code depends on builtins for now\n+ self.environment.coredata.compiler_options)\n+\n def get_option_for_target(self, option_name, target):\n if option_name in target.option_overrides:\n override = target.option_overrides[option_name]\n@@ -444,7 +463,7 @@ class Backend:\n # starting from hard-coded defaults followed by build options and so on.\n commands = CompilerArgs(compiler)\n \n- copt_proxy = OptionOverrideProxy(target.option_overrides, self.environment.coredata.compiler_options)\n+ copt_proxy = self.get_compiler_options_for_target(target)\n # First, the trivial ones that are impossible to override.\n #\n # Add -nostdinc/-nostdinc++ if needed; can't be overridden\n", "new_path": "mesonbuild/backend/backends.py", "old_path": "mesonbuild/backend/backends.py" }, { "change_type": "MODIFY", "diff": "@@ -2137,8 +2137,7 @@ rule FORTRAN_DEP_HACK\n return incs\n \n def _generate_single_compile(self, target, compiler, is_generated=False):\n- base_proxy = backends.OptionOverrideProxy(target.option_overrides,\n- self.environment.coredata.base_options)\n+ base_proxy = self.get_base_options_for_target(target)\n # Create an empty commands list, and start adding arguments from\n # various sources in the order in which they must override each other\n commands = CompilerArgs(compiler)\n", "new_path": "mesonbuild/backend/ninjabackend.py", "old_path": "mesonbuild/backend/ninjabackend.py" }, { "change_type": "MODIFY", "diff": "@@ -315,7 +315,9 @@ def get_base_compile_args(options, compiler):\n except KeyError:\n pass\n try:\n- if options['b_ndebug'].value == 'true' or (options['b_ndebug'].value == 'if-release' and options['buildtype'] == 'release'):\n+ if (options['b_ndebug'].value == 'true' or\n+ (options['b_ndebug'].value == 'if-release' and\n+ options['buildtype'].value == 'release')):\n args += ['-DNDEBUG']\n except KeyError:\n pass\n", "new_path": "mesonbuild/compilers/compilers.py", "old_path": "mesonbuild/compilers/compilers.py" } ]
05c43cdcd19db98d53d5c9f1b50028d881471c2f
mesonbuild/meson
null
null
Add 'install_mode' to all installable targets This makes it possible to customize permissions of all installable targets, such as executable(), libraries, man pages, header files and custom or generated targets. This is useful, for instance, to install setuid/setgid binaries, which was hard to accomplish without access to this attribute.
[ { "change_type": "MODIFY", "diff": "@@ -724,6 +724,7 @@ int dummy;\n \"Pass 'false' for outputs that should not be installed and 'true' for\\n\" \\\n 'using the default installation directory for an output.'\n raise MesonException(m.format(t.name, num_out, t.get_outputs(), num_outdirs))\n+ install_mode = t.get_custom_install_mode()\n # Install the target output(s)\n if isinstance(t, build.BuildTarget):\n should_strip = self.get_option_for_target('strip', t)\n@@ -731,7 +732,7 @@ int dummy;\n # Done separately because of strip/aliases/rpath\n if outdirs[0] is not False:\n i = [self.get_target_filename(t), outdirs[0],\n- t.get_aliases(), should_strip, t.install_rpath]\n+ t.get_aliases(), should_strip, t.install_rpath, install_mode]\n d.targets.append(i)\n # On toolchains/platforms that use an import library for\n # linking (separate from the shared library with all the\n@@ -749,7 +750,7 @@ int dummy;\n implib_install_dir,\n # It has no aliases, should not be stripped, and\n # doesn't have an install_rpath\n- {}, False, '']\n+ {}, False, '', install_mode]\n d.targets.append(i)\n # Install secondary outputs. Only used for Vala right now.\n if num_outdirs > 1:\n@@ -758,7 +759,7 @@ int dummy;\n if outdir is False:\n continue\n f = os.path.join(self.get_target_dir(t), output)\n- d.targets.append([f, outdir, {}, False, None])\n+ d.targets.append([f, outdir, {}, False, None, install_mode])\n elif isinstance(t, build.CustomTarget):\n # If only one install_dir is specified, assume that all\n # outputs will be installed into it. This is for\n@@ -770,14 +771,14 @@ int dummy;\n if num_outdirs == 1 and num_out > 1:\n for output in t.get_outputs():\n f = os.path.join(self.get_target_dir(t), output)\n- d.targets.append([f, outdirs[0], {}, False, None])\n+ d.targets.append([f, outdirs[0], {}, False, None, install_mode])\n else:\n for output, outdir in zip(t.get_outputs(), outdirs):\n # User requested that we not install this output\n if outdir is False:\n continue\n f = os.path.join(self.get_target_dir(t), output)\n- d.targets.append([f, outdir, {}, False, None])\n+ d.targets.append([f, outdir, {}, False, None, install_mode])\n \n def generate_custom_install_script(self, d):\n result = []\n@@ -809,7 +810,7 @@ int dummy;\n msg = 'Invalid header type {!r} can\\'t be installed'\n raise MesonException(msg.format(f))\n abspath = f.absolute_path(srcdir, builddir)\n- i = [abspath, outdir]\n+ i = [abspath, outdir, h.get_custom_install_mode()]\n d.headers.append(i)\n \n def generate_man_install(self, d):\n@@ -823,7 +824,7 @@ int dummy;\n subdir = os.path.join(manroot, 'man' + num)\n srcabs = f.absolute_path(self.environment.get_source_dir(), self.environment.get_build_dir())\n dstabs = os.path.join(subdir, os.path.basename(f.fname) + '.gz')\n- i = [srcabs, dstabs]\n+ i = [srcabs, dstabs, m.get_custom_install_mode()]\n d.man.append(i)\n \n def generate_data_install(self, d):\n", "new_path": "mesonbuild/backend/ninjabackend.py", "old_path": "mesonbuild/backend/ninjabackend.py" }, { "change_type": "MODIFY", "diff": "@@ -64,6 +64,7 @@ buildtarget_kwargs = set([\n 'install',\n 'install_rpath',\n 'install_dir',\n+ 'install_mode',\n 'name_prefix',\n 'name_suffix',\n 'native',\n@@ -668,6 +669,9 @@ class BuildTarget(Target):\n def get_custom_install_dir(self):\n return self.install_dir\n \n+ def get_custom_install_mode(self):\n+ return self.install_mode\n+\n def process_kwargs(self, kwargs, environment):\n super().process_kwargs(kwargs)\n self.copy_kwargs(kwargs)\n@@ -745,6 +749,7 @@ This will become a hard error in a future Meson release.''')\n # the list index of that item will not be installed\n self.install_dir = typeslistify(kwargs.get('install_dir', [None]),\n (str, bool))\n+ self.install_mode = kwargs.get('install_mode', None)\n main_class = kwargs.get('main_class', '')\n if not isinstance(main_class, str):\n raise InvalidArguments('Main class must be a string')\n@@ -1626,6 +1631,7 @@ class CustomTarget(Target):\n 'capture',\n 'install',\n 'install_dir',\n+ 'install_mode',\n 'build_always',\n 'depends',\n 'depend_files',\n@@ -1774,9 +1780,11 @@ class CustomTarget(Target):\n # If an item in this list is False, the output corresponding to\n # the list index of that item will not be installed\n self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))\n+ self.install_mode = kwargs.get('install_mode', None)\n else:\n self.install = False\n self.install_dir = [None]\n+ self.install_mode = None\n self.build_always = kwargs.get('build_always', False)\n if not isinstance(self.build_always, bool):\n raise InvalidArguments('Argument build_always must be a boolean.')\n@@ -1803,6 +1811,9 @@ class CustomTarget(Target):\n def get_custom_install_dir(self):\n return self.install_dir\n \n+ def get_custom_install_mode(self):\n+ return self.install_mode\n+\n def get_outputs(self):\n return self.outputs\n \n", "new_path": "mesonbuild/build.py", "old_path": "mesonbuild/build.py" }, { "change_type": "MODIFY", "diff": "@@ -577,6 +577,7 @@ class Headers(InterpreterObject):\n self.sources = sources\n self.install_subdir = kwargs.get('subdir', '')\n self.custom_install_dir = kwargs.get('install_dir', None)\n+ self.custom_install_mode = kwargs.get('install_mode', None)\n if self.custom_install_dir is not None:\n if not isinstance(self.custom_install_dir, str):\n raise InterpreterException('Custom_install_dir must be a string.')\n@@ -593,6 +594,9 @@ class Headers(InterpreterObject):\n def get_custom_install_dir(self):\n return self.custom_install_dir\n \n+ def get_custom_install_mode(self):\n+ return self.custom_install_mode\n+\n class DataHolder(InterpreterObject, ObjectHolder):\n def __init__(self, data):\n InterpreterObject.__init__(self)\n@@ -624,6 +628,7 @@ class Man(InterpreterObject):\n self.sources = sources\n self.validate_sources()\n self.custom_install_dir = kwargs.get('install_dir', None)\n+ self.custom_install_mode = kwargs.get('install_mode', None)\n if self.custom_install_dir is not None and not isinstance(self.custom_install_dir, str):\n raise InterpreterException('Custom_install_dir must be a string.')\n \n@@ -639,6 +644,9 @@ class Man(InterpreterObject):\n def get_custom_install_dir(self):\n return self.custom_install_dir\n \n+ def get_custom_install_mode(self):\n+ return self.custom_install_mode\n+\n def get_sources(self):\n return self.sources\n \n@@ -1716,8 +1724,8 @@ permitted_kwargs = {'add_global_arguments': {'language'},\n 'add_test_setup': {'exe_wrapper', 'gdb', 'timeout_multiplier', 'env'},\n 'benchmark': {'args', 'env', 'should_fail', 'timeout', 'workdir', 'suite'},\n 'build_target': known_build_target_kwargs,\n- 'configure_file': {'input', 'output', 'configuration', 'command', 'copy', 'install_dir', 'capture', 'install', 'format', 'output_format'},\n- 'custom_target': {'input', 'output', 'command', 'install', 'install_dir', 'build_always', 'capture', 'depends', 'depend_files', 'depfile', 'build_by_default'},\n+ 'configure_file': {'input', 'output', 'configuration', 'command', 'copy', 'install_dir', 'install_mode', 'capture', 'install', 'format', 'output_format'},\n+ 'custom_target': {'input', 'output', 'command', 'install', 'install_dir', 'install_mode', 'build_always', 'capture', 'depends', 'depend_files', 'depfile', 'build_by_default'},\n 'dependency': {'default_options', 'fallback', 'language', 'main', 'method', 'modules', 'optional_modules', 'native', 'required', 'static', 'version', 'private_headers'},\n 'declare_dependency': {'include_directories', 'link_with', 'sources', 'dependencies', 'compile_args', 'link_args', 'link_whole', 'version'},\n 'executable': build.known_exe_kwargs,\n@@ -1725,8 +1733,8 @@ permitted_kwargs = {'add_global_arguments': {'language'},\n 'generator': {'arguments', 'output', 'depfile', 'capture', 'preserve_path_from'},\n 'include_directories': {'is_system'},\n 'install_data': {'install_dir', 'install_mode', 'rename', 'sources'},\n- 'install_headers': {'install_dir', 'subdir'},\n- 'install_man': {'install_dir'},\n+ 'install_headers': {'install_dir', 'install_mode', 'subdir'},\n+ 'install_man': {'install_dir', 'install_mode'},\n 'install_subdir': {'exclude_files', 'exclude_directories', 'install_dir', 'install_mode', 'strip_directory'},\n 'jar': build.known_jar_kwargs,\n 'project': {'version', 'meson_version', 'default_options', 'license', 'subproject_dir'},\n@@ -2932,6 +2940,7 @@ root and issuing %s.\n if len(args) != 1:\n raise InterpreterException('custom_target: Only one positional argument is allowed, and it must be a string name')\n name = args[0]\n+ kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)\n tg = CustomTargetHolder(build.CustomTarget(name, self.subdir, self.subproject, kwargs), self)\n self.add_target(name, tg.held_object)\n return tg\n@@ -3058,6 +3067,7 @@ root and issuing %s.\n @permittedKwargs(permitted_kwargs['install_headers'])\n def func_install_headers(self, node, args, kwargs):\n source_files = self.source_strings_to_files(args)\n+ kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)\n h = Headers(source_files, kwargs)\n self.build.headers.append(h)\n return h\n@@ -3065,6 +3075,7 @@ root and issuing %s.\n @permittedKwargs(permitted_kwargs['install_man'])\n def func_install_man(self, node, args, kwargs):\n fargs = self.source_strings_to_files(args)\n+ kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)\n m = Man(fargs, kwargs)\n self.build.man.append(m)\n return m\n@@ -3115,7 +3126,7 @@ root and issuing %s.\n self.subdir = prev_subdir\n \n def _get_kwarg_install_mode(self, kwargs):\n- if 'install_mode' not in kwargs:\n+ if kwargs.get('install_mode', None) is None:\n return None\n install_mode = []\n mode = mesonlib.typeslistify(kwargs.get('install_mode', []), (str, int))\n@@ -3358,7 +3369,8 @@ root and issuing %s.\n idir = kwargs.get('install_dir', None)\n if isinstance(idir, str) and idir:\n cfile = mesonlib.File.from_built_file(ofile_path, ofile_fname)\n- self.build.data.append(build.Data([cfile], idir))\n+ install_mode = self._get_kwarg_install_mode(kwargs)\n+ self.build.data.append(build.Data([cfile], idir, install_mode))\n return mesonlib.File.from_built_file(self.subdir, output)\n \n @permittedKwargs(permitted_kwargs['include_directories'])\n@@ -3642,6 +3654,7 @@ different subdirectory.\n sources = self.source_strings_to_files(sources)\n objs = extract_as_list(kwargs, 'objects')\n kwargs['dependencies'] = extract_as_list(kwargs, 'dependencies')\n+ kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)\n if 'extra_files' in kwargs:\n ef = extract_as_list(kwargs, 'extra_files')\n kwargs['extra_files'] = self.source_strings_to_files(ef)\n", "new_path": "mesonbuild/interpreter.py", "old_path": "mesonbuild/interpreter.py" }, { "change_type": "MODIFY", "diff": "@@ -148,7 +148,7 @@ def do_copyfile(from_file, to_file):\n selinux_updates.append(to_file)\n append_to_log(to_file)\n \n-def do_copydir(data, src_dir, dst_dir, exclude):\n+def do_copydir(data, src_dir, dst_dir, exclude, install_mode):\n '''\n Copies the contents of directory @src_dir into @dst_dir.\n \n@@ -158,7 +158,7 @@ def do_copydir(data, src_dir, dst_dir, exclude):\n excluded\n foobar\n file\n- do_copydir(..., '/foo', '/dst/dir', {'bar/excluded'}) creates\n+ do_copydir(..., '/foo', '/dst/dir', {'bar/excluded'}, None) creates\n /dst/\n dir/\n bar/\n@@ -170,6 +170,7 @@ def do_copydir(data, src_dir, dst_dir, exclude):\n dst_dir: str, absolute path to the destination directory\n exclude: (set(str), set(str)), tuple of (exclude_files, exclude_dirs),\n each element of the set is a path relative to src_dir.\n+ install_mode: FileMode object, or None to use defaults.\n '''\n if not os.path.isabs(src_dir):\n raise ValueError('src_dir must be absolute, got %s' % src_dir)\n@@ -212,7 +213,7 @@ def do_copydir(data, src_dir, dst_dir, exclude):\n os.mkdir(parent_dir)\n shutil.copystat(os.path.dirname(abs_src), parent_dir)\n shutil.copy2(abs_src, abs_dst, follow_symlinks=False)\n- sanitize_permissions(abs_dst, data.install_umask)\n+ set_mode(abs_dst, install_mode, data.install_umask)\n append_to_log(abs_dst)\n \n def get_destdir_path(d, path):\n@@ -263,8 +264,7 @@ def install_subdirs(d):\n full_dst_dir = get_destdir_path(d, dst_dir)\n print('Installing subdir %s to %s' % (src_dir, full_dst_dir))\n d.dirmaker.makedirs(full_dst_dir, exist_ok=True)\n- do_copydir(d, src_dir, full_dst_dir, exclude)\n- set_mode(full_dst_dir, mode, d.install_umask)\n+ do_copydir(d, src_dir, full_dst_dir, exclude, mode)\n \n def install_data(d):\n for i in d.data:\n@@ -283,6 +283,7 @@ def install_man(d):\n outfilename = get_destdir_path(d, m[1])\n outdir = os.path.dirname(outfilename)\n d.dirmaker.makedirs(outdir, exist_ok=True)\n+ install_mode = m[2]\n print('Installing %s to %s' % (full_source_filename, outdir))\n if outfilename.endswith('.gz') and not full_source_filename.endswith('.gz'):\n with open(outfilename, 'wb') as of:\n@@ -294,7 +295,7 @@ def install_man(d):\n append_to_log(outfilename)\n else:\n do_copyfile(full_source_filename, outfilename)\n- sanitize_permissions(outfilename, d.install_umask)\n+ set_mode(outfilename, install_mode, d.install_umask)\n \n def install_headers(d):\n for t in d.headers:\n@@ -302,10 +303,11 @@ def install_headers(d):\n fname = os.path.basename(fullfilename)\n outdir = get_destdir_path(d, t[1])\n outfilename = os.path.join(outdir, fname)\n+ install_mode = t[2]\n print('Installing %s to %s' % (fname, outdir))\n d.dirmaker.makedirs(outdir, exist_ok=True)\n do_copyfile(fullfilename, outfilename)\n- sanitize_permissions(outfilename, d.install_umask)\n+ set_mode(outfilename, install_mode, d.install_umask)\n \n def run_install_script(d):\n env = {'MESON_SOURCE_ROOT': d.source_dir,\n@@ -364,13 +366,14 @@ def install_targets(d):\n aliases = t[2]\n should_strip = t[3]\n install_rpath = t[4]\n+ install_mode = t[5]\n print('Installing %s to %s' % (fname, outname))\n d.dirmaker.makedirs(outdir, exist_ok=True)\n if not os.path.exists(fname):\n raise RuntimeError('File {!r} could not be found'.format(fname))\n elif os.path.isfile(fname):\n do_copyfile(fname, outname)\n- sanitize_permissions(outname, d.install_umask)\n+ set_mode(outname, install_mode, d.install_umask)\n if should_strip and d.strip_bin is not None:\n if fname.endswith('.jar'):\n print('Not stripping jar target:', os.path.basename(fname))\n@@ -387,12 +390,11 @@ def install_targets(d):\n pdb_outname = os.path.splitext(outname)[0] + '.pdb'\n print('Installing pdb file %s to %s' % (pdb_filename, pdb_outname))\n do_copyfile(pdb_filename, pdb_outname)\n- sanitize_permissions(pdb_outname, d.install_umask)\n+ set_mode(pdb_outname, install_mode, d.install_umask)\n elif os.path.isdir(fname):\n fname = os.path.join(d.build_dir, fname.rstrip('/'))\n outname = os.path.join(outdir, os.path.basename(fname))\n- do_copydir(d, fname, outname, None)\n- sanitize_permissions(outname, d.install_umask)\n+ do_copydir(d, fname, outname, None, install_mode)\n else:\n raise RuntimeError('Unknown file type for {!r}'.format(fname))\n printed_symlink_error = False\n", "new_path": "mesonbuild/scripts/meson_install.py", "old_path": "mesonbuild/scripts/meson_install.py" } ]
8fb72510c3b7a7a5f08b4b20b7e1480f5868052a
mesonbuild/meson
null
null
Move to coredata some methods handling options Those methods only use coredata object, so better just move them as a coredata method.
[ { "change_type": "MODIFY", "diff": "@@ -351,6 +351,37 @@ class CoreData:\n def get_external_preprocess_args(self, lang):\n return self.external_preprocess_args[lang]\n \n+ def merge_user_options(self, options):\n+ for (name, value) in options.items():\n+ if name not in self.user_options:\n+ self.user_options[name] = value\n+ else:\n+ oldval = self.user_options[name]\n+ if type(oldval) != type(value):\n+ self.user_options[name] = value\n+\n+ def set_options(self, options):\n+ for o in options:\n+ if '=' not in o:\n+ raise MesonException('Value \"%s\" not of type \"a=b\".' % o)\n+ (k, v) = o.split('=', 1)\n+ if is_builtin_option(k):\n+ self.set_builtin_option(k, v)\n+ elif k in self.backend_options:\n+ tgt = self.backend_options[k]\n+ tgt.set_value(v)\n+ elif k in self.user_options:\n+ tgt = self.user_options[k]\n+ tgt.set_value(v)\n+ elif k in self.compiler_options:\n+ tgt = self.compiler_options[k]\n+ tgt.set_value(v)\n+ elif k in self.base_options:\n+ tgt = self.base_options[k]\n+ tgt.set_value(v)\n+ else:\n+ raise MesonException('Unknown option %s.' % k)\n+\n def load(build_dir):\n filename = os.path.join(build_dir, 'meson-private', 'coredata.dat')\n load_fail_msg = 'Coredata file {!r} is corrupted. Try with a fresh build tree.'.format(filename)\n", "new_path": "mesonbuild/coredata.py", "old_path": "mesonbuild/coredata.py" }, { "change_type": "MODIFY", "diff": "@@ -386,15 +386,6 @@ class Environment:\n previous_is_plaind = i == '-D'\n return False\n \n- def merge_options(self, options):\n- for (name, value) in options.items():\n- if name not in self.coredata.user_options:\n- self.coredata.user_options[name] = value\n- else:\n- oldval = self.coredata.user_options[name]\n- if type(oldval) != type(value):\n- self.coredata.user_options[name] = value\n-\n @staticmethod\n def get_gnu_compiler_defines(compiler):\n \"\"\"\n", "new_path": "mesonbuild/environment.py", "old_path": "mesonbuild/environment.py" }, { "change_type": "MODIFY", "diff": "@@ -2306,7 +2306,7 @@ to directly access options of other subprojects.''')\n self.build.environment.cmd_line_options.projectoptions,\n )\n oi.process(self.option_file)\n- self.build.environment.merge_options(oi.options)\n+ self.coredata.merge_user_options(oi.options)\n self.set_backend()\n self.active_projectname = proj_name\n self.project_version = kwargs.get('version', 'undefined')\n", "new_path": "mesonbuild/interpreter.py", "old_path": "mesonbuild/interpreter.py" }, { "change_type": "MODIFY", "diff": "@@ -42,6 +42,9 @@ class Conf:\n def clear_cache(self):\n self.coredata.deps = {}\n \n+ def set_options(self, options):\n+ self.coredata.set_options(options)\n+\n def save(self):\n # Only called if something has changed so overwrite unconditionally.\n coredata.save(self.coredata, self.build_dir)\n@@ -94,28 +97,6 @@ class Conf:\n else:\n print(' {0:{width[0]}} {1:{width[1]}} {3:{width[3]}}'.format(*line, width=col_widths))\n \n- def set_options(self, options):\n- for o in options:\n- if '=' not in o:\n- raise ConfException('Value \"%s\" not of type \"a=b\".' % o)\n- (k, v) = o.split('=', 1)\n- if coredata.is_builtin_option(k):\n- self.coredata.set_builtin_option(k, v)\n- elif k in self.coredata.backend_options:\n- tgt = self.coredata.backend_options[k]\n- tgt.set_value(v)\n- elif k in self.coredata.user_options:\n- tgt = self.coredata.user_options[k]\n- tgt.set_value(v)\n- elif k in self.coredata.compiler_options:\n- tgt = self.coredata.compiler_options[k]\n- tgt.set_value(v)\n- elif k in self.coredata.base_options:\n- tgt = self.coredata.base_options[k]\n- tgt.set_value(v)\n- else:\n- raise ConfException('Unknown option %s.' % k)\n-\n def print_conf(self):\n print('Core properties:')\n print(' Source dir', self.build.environment.source_dir)\n", "new_path": "mesonbuild/mconf.py", "old_path": "mesonbuild/mconf.py" } ]
f4d60acaa924b4735fc71b7a9c716fbea824c877
mesonbuild/meson
null
null
Remove had_argument_for() it is not used anymore This also means we don't need to keep original command line arguments anymore.
[ { "change_type": "MODIFY", "diff": "@@ -266,7 +266,7 @@ class Environment:\n private_dir = 'meson-private'\n log_dir = 'meson-logs'\n \n- def __init__(self, source_dir, build_dir, options, original_cmd_line_args):\n+ def __init__(self, source_dir, build_dir, options):\n self.source_dir = source_dir\n self.build_dir = build_dir\n self.scratch_dir = os.path.join(build_dir, Environment.private_dir)\n@@ -289,7 +289,6 @@ class Environment:\n else:\n self.cross_info = None\n self.cmd_line_options = options\n- self.original_cmd_line_args = original_cmd_line_args\n \n # List of potential compilers.\n if mesonlib.is_windows():\n@@ -374,18 +373,6 @@ class Environment:\n def is_library(self, fname):\n return is_library(fname)\n \n- def had_argument_for(self, option):\n- trial1 = coredata.get_builtin_option_cmdline_name(option)\n- trial2 = '-D' + option\n- previous_is_plaind = False\n- for i in self.original_cmd_line_args:\n- if i.startswith(trial1) or i.startswith(trial2):\n- return True\n- if previous_is_plaind and i.startswith(option):\n- return True\n- previous_is_plaind = i == '-D'\n- return False\n-\n @staticmethod\n def get_gnu_compiler_defines(compiler):\n \"\"\"\n", "new_path": "mesonbuild/environment.py", "old_path": "mesonbuild/environment.py" }, { "change_type": "MODIFY", "diff": "@@ -54,10 +54,9 @@ def wrapmodetype(string):\n \n class MesonApp:\n \n- def __init__(self, dir1, dir2, handshake, options, original_cmd_line_args):\n+ def __init__(self, dir1, dir2, handshake, options):\n (self.source_dir, self.build_dir) = self.validate_dirs(dir1, dir2, handshake)\n self.options = options\n- self.original_cmd_line_args = original_cmd_line_args\n \n def has_build_file(self, dirname):\n fname = os.path.join(dirname, environment.build_filename)\n@@ -109,7 +108,7 @@ class MesonApp:\n env.coredata.pkgconf_envvar = curvar\n \n def generate(self):\n- env = environment.Environment(self.source_dir, self.build_dir, self.options, self.original_cmd_line_args)\n+ env = environment.Environment(self.source_dir, self.build_dir, self.options)\n mlog.initialize(env.get_log_dir())\n with mesonlib.BuildDirLock(self.build_dir):\n self._generate(env)\n@@ -334,7 +333,7 @@ def run(original_args, mainfile):\n else:\n dir2 = '.'\n try:\n- app = MesonApp(dir1, dir2, handshake, options, original_args)\n+ app = MesonApp(dir1, dir2, handshake, options)\n except Exception as e:\n # Log directory does not exist, so just print\n # to stdout.\n", "new_path": "mesonbuild/mesonmain.py", "old_path": "mesonbuild/mesonmain.py" }, { "change_type": "MODIFY", "diff": "@@ -412,7 +412,7 @@ def have_d_compiler():\n \n def have_objc_compiler():\n with AutoDeletedDir(tempfile.mkdtemp(prefix='b ', dir='.')) as build_dir:\n- env = environment.Environment(None, build_dir, get_fake_options('/'), [])\n+ env = environment.Environment(None, build_dir, get_fake_options('/'))\n try:\n objc_comp = env.detect_objc_compiler(False)\n except mesonlib.MesonException:\n@@ -427,7 +427,7 @@ def have_objc_compiler():\n \n def have_objcpp_compiler():\n with AutoDeletedDir(tempfile.mkdtemp(prefix='b ', dir='.')) as build_dir:\n- env = environment.Environment(None, build_dir, get_fake_options('/'), [])\n+ env = environment.Environment(None, build_dir, get_fake_options('/'))\n try:\n objcpp_comp = env.detect_objcpp_compiler(False)\n except mesonlib.MesonException:\n", "new_path": "run_project_tests.py", "old_path": "run_project_tests.py" }, { "change_type": "MODIFY", "diff": "@@ -900,7 +900,7 @@ class AllPlatformTests(BasePlatformTests):\n https://github.com/mesonbuild/meson/issues/1355\n '''\n testdir = os.path.join(self.common_test_dir, '3 static')\n- env = Environment(testdir, self.builddir, get_fake_options(self.prefix), [])\n+ env = Environment(testdir, self.builddir, get_fake_options(self.prefix))\n cc = env.detect_c_compiler(False)\n static_linker = env.detect_static_linker(cc)\n if is_windows():\n@@ -1181,7 +1181,7 @@ class AllPlatformTests(BasePlatformTests):\n if not is_windows():\n langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]\n testdir = os.path.join(self.unit_test_dir, '5 compiler detection')\n- env = Environment(testdir, self.builddir, get_fake_options(self.prefix), [])\n+ env = Environment(testdir, self.builddir, get_fake_options(self.prefix))\n for lang, evar in langs:\n # Detect with evar and do sanity checks on that\n if evar in os.environ:\n@@ -1283,7 +1283,7 @@ class AllPlatformTests(BasePlatformTests):\n def test_always_prefer_c_compiler_for_asm(self):\n testdir = os.path.join(self.common_test_dir, '141 c cpp and asm')\n # Skip if building with MSVC\n- env = Environment(testdir, self.builddir, get_fake_options(self.prefix), [])\n+ env = Environment(testdir, self.builddir, get_fake_options(self.prefix))\n if env.detect_c_compiler(False).get_id() == 'msvc':\n raise unittest.SkipTest('MSVC can\\'t compile assembly')\n self.init(testdir)\n@@ -1541,7 +1541,7 @@ int main(int argc, char **argv) {\n self.assertPathExists(os.path.join(testdir, i))\n \n def detect_prebuild_env(self):\n- env = Environment('', self.builddir, get_fake_options(self.prefix), [])\n+ env = Environment('', self.builddir, get_fake_options(self.prefix))\n cc = env.detect_c_compiler(False)\n stlinker = env.detect_static_linker(cc)\n if mesonbuild.mesonlib.is_windows():\n@@ -1709,7 +1709,7 @@ int main(int argc, char **argv) {\n '--libdir=' + libdir])\n # Find foo dependency\n os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir\n- env = Environment(testdir, self.builddir, get_fake_options(self.prefix), [])\n+ env = Environment(testdir, self.builddir, get_fake_options(self.prefix))\n kwargs = {'required': True, 'silent': True}\n foo_dep = PkgConfigDependency('libfoo', env, kwargs)\n # Ensure link_args are properly quoted\n@@ -1976,7 +1976,7 @@ recommended as it can lead to undefined behaviour on some platforms''')\n testdirbase = os.path.join(self.unit_test_dir, '26 guessed linker dependencies')\n testdirlib = os.path.join(testdirbase, 'lib')\n extra_args = None\n- env = Environment(testdirlib, self.builddir, get_fake_options(self.prefix), [])\n+ env = Environment(testdirlib, self.builddir, get_fake_options(self.prefix))\n if env.detect_c_compiler(False).get_id() != 'msvc':\n # static libraries are not linkable with -l with msvc because meson installs them\n # as .a files which unix_args_to_native will not know as it expects libraries to use\n@@ -2123,7 +2123,7 @@ recommended as it can lead to undefined behaviour on some platforms''')\n with open('docs/markdown/Builtin-options.md') as f:\n md = f.read()\n self.assertIsNotNone(md)\n- env = Environment('.', self.builddir, get_fake_options(self.prefix), [])\n+ env = Environment('.', self.builddir, get_fake_options(self.prefix))\n # FIXME: Support other compilers\n cc = env.detect_c_compiler(False)\n cpp = env.detect_cpp_compiler(False)\n@@ -2293,7 +2293,7 @@ class FailureTests(BasePlatformTests):\n '''\n Test that when we can't detect objc or objcpp, we fail gracefully.\n '''\n- env = Environment('', self.builddir, get_fake_options(self.prefix), [])\n+ env = Environment('', self.builddir, get_fake_options(self.prefix))\n try:\n env.detect_objc_compiler(False)\n env.detect_objcpp_compiler(False)\n@@ -2403,7 +2403,7 @@ class WindowsTests(BasePlatformTests):\n ExternalLibraryHolder from build files.\n '''\n testdir = os.path.join(self.platform_test_dir, '1 basic')\n- env = Environment(testdir, self.builddir, get_fake_options(self.prefix), [])\n+ env = Environment(testdir, self.builddir, get_fake_options(self.prefix))\n cc = env.detect_c_compiler(False)\n if cc.id != 'msvc':\n raise unittest.SkipTest('Not using MSVC')\n@@ -2416,7 +2416,7 @@ class WindowsTests(BasePlatformTests):\n testdir = os.path.join(self.platform_test_dir, '5 resources')\n \n # resource compiler depfile generation is not yet implemented for msvc\n- env = Environment(testdir, self.builddir, get_fake_options(self.prefix), [])\n+ env = Environment(testdir, self.builddir, get_fake_options(self.prefix))\n depfile_works = env.detect_c_compiler(False).get_id() != 'msvc'\n \n self.init(testdir)\n@@ -2505,7 +2505,7 @@ class LinuxlikeTests(BasePlatformTests):\n '''\n testdir = os.path.join(self.common_test_dir, '51 pkgconfig-gen')\n self.init(testdir)\n- env = Environment(testdir, self.builddir, get_fake_options(self.prefix), [])\n+ env = Environment(testdir, self.builddir, get_fake_options(self.prefix))\n kwargs = {'required': True, 'silent': True}\n os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir\n foo_dep = PkgConfigDependency('libfoo', env, kwargs)\n@@ -2755,7 +2755,7 @@ class LinuxlikeTests(BasePlatformTests):\n an ordinary test because it requires passing options to meson.\n '''\n testdir = os.path.join(self.common_test_dir, '1 trivial')\n- env = Environment(testdir, self.builddir, get_fake_options(self.prefix), [])\n+ env = Environment(testdir, self.builddir, get_fake_options(self.prefix))\n cc = env.detect_c_compiler(False)\n self._test_stds_impl(testdir, cc, 'c')\n \n@@ -2765,7 +2765,7 @@ class LinuxlikeTests(BasePlatformTests):\n be an ordinary test because it requires passing options to meson.\n '''\n testdir = os.path.join(self.common_test_dir, '2 cpp')\n- env = Environment(testdir, self.builddir, get_fake_options(self.prefix), [])\n+ env = Environment(testdir, self.builddir, get_fake_options(self.prefix))\n cpp = env.detect_cpp_compiler(False)\n self._test_stds_impl(testdir, cpp, 'cpp')\n \n", "new_path": "run_unittests.py", "old_path": "run_unittests.py" } ]
c151eb49678be24a75451a327812dd5fa569af73
mesonbuild/meson
null
null
Improve logging when dependency is anonymous Various pieces of logging assume the dependency has a name, and aren't grammatical if the dependency is '', so say it is anonymous instead.
[ { "change_type": "MODIFY", "diff": "@@ -2805,10 +2805,11 @@ external dependencies (including libraries) must go to \"dependencies\".''')\n def func_dependency(self, node, args, kwargs):\n self.validate_arguments(args, 1, [str])\n name = args[0]\n+ display_name = name if name else '(anonymous)'\n \n disabled, required, feature = extract_required_kwarg(kwargs)\n if disabled:\n- mlog.log('Dependency', mlog.bold(name), 'skipped: feature', mlog.bold(feature), 'disabled')\n+ mlog.log('Dependency', mlog.bold(display_name), 'skipped: feature', mlog.bold(feature), 'disabled')\n return DependencyHolder(NotFoundDependency(self.environment))\n \n # writing just \"dependency('')\" is an error, because it can only fail\n@@ -2823,7 +2824,7 @@ external dependencies (including libraries) must go to \"dependencies\".''')\n if cached_dep:\n if required and not cached_dep.found():\n m = 'Dependency {!r} was already checked and was not found'\n- raise DependencyException(m.format(name))\n+ raise DependencyException(m.format(display_name))\n dep = cached_dep\n else:\n # If the dependency has already been configured, possibly by\n@@ -2905,17 +2906,18 @@ root and issuing %s.\n return fbinfo\n \n def dependency_fallback(self, name, kwargs):\n+ display_name = name if name else '(anonymous)'\n if self.coredata.wrap_mode in (WrapMode.nofallback, WrapMode.nodownload):\n mlog.log('Not looking for a fallback subproject for the dependency',\n- mlog.bold(name), 'because:\\nUse of fallback'\n+ mlog.bold(display_name), 'because:\\nUse of fallback'\n 'dependencies is disabled.')\n return None\n elif self.coredata.wrap_mode == WrapMode.forcefallback:\n mlog.log('Looking for a fallback subproject for the dependency',\n- mlog.bold(name), 'because:\\nUse of fallback dependencies is forced.')\n+ mlog.bold(display_name), 'because:\\nUse of fallback dependencies is forced.')\n else:\n mlog.log('Looking for a fallback subproject for the dependency',\n- mlog.bold(name))\n+ mlog.bold(display_name))\n dirname, varname = self.get_subproject_infos(kwargs)\n # Try to execute the subproject\n try:\n@@ -2933,7 +2935,7 @@ root and issuing %s.\n except Exception as e:\n mlog.log('Couldn\\'t use fallback subproject in',\n mlog.bold(os.path.join(self.subproject_dir, dirname)),\n- 'for the dependency', mlog.bold(name), '\\nReason:', str(e))\n+ 'for the dependency', mlog.bold(display_name), '\\nReason:', str(e))\n return None\n dep = self.get_subproject_dep(name, dirname, varname, kwargs.get('required', True))\n if not dep:\n@@ -2945,10 +2947,10 @@ root and issuing %s.\n found = dep.version_method([], {})\n if not self.check_subproject_version(wanted, found):\n mlog.log('Subproject', mlog.bold(subproj_path), 'dependency',\n- mlog.bold(varname), 'version is', mlog.bold(found),\n+ mlog.bold(display_name), 'version is', mlog.bold(found),\n 'but', mlog.bold(wanted), 'is required.')\n return None\n- mlog.log('Dependency', mlog.bold(name), 'from subproject',\n+ mlog.log('Dependency', mlog.bold(display_name), 'from subproject',\n mlog.bold(subproj_path), 'found:', mlog.green('YES'))\n return dep\n \n", "new_path": "mesonbuild/interpreter.py", "old_path": "mesonbuild/interpreter.py" } ]
4fb00ee1d8c79184e3a77f9266e90d42ff595f41
mesonbuild/meson
null
null
Add new method: mlog.deprecation() Instead of constructing it manually, use a helper.
[ { "change_type": "MODIFY", "diff": "@@ -1792,7 +1792,7 @@ class CustomTarget(Target):\n if 'build_always' in kwargs and 'build_always_stale' in kwargs:\n raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')\n elif 'build_always' in kwargs:\n- mlog.warning('build_always is deprecated. Combine build_by_default and build_always_stale instead.')\n+ mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')\n if 'build_by_default' not in kwargs:\n self.build_by_default = kwargs['build_always']\n self.build_always_stale = kwargs['build_always']\n", "new_path": "mesonbuild/build.py", "old_path": "mesonbuild/build.py" }, { "change_type": "MODIFY", "diff": "@@ -169,7 +169,7 @@ class UserArrayOption(UserOption):\n if len(set(newvalue)) != len(newvalue):\n msg = 'Duplicated values in array option \"%s\" is deprecated. ' \\\n 'This will become a hard error in the future.' % (self.name)\n- mlog.log(mlog.red('DEPRECATION:'), msg)\n+ mlog.deprecation(msg)\n for i in newvalue:\n if not isinstance(i, str):\n raise MesonException('String array element \"{0}\" is not a string.'.format(str(newvalue)))\n", "new_path": "mesonbuild/coredata.py", "old_path": "mesonbuild/coredata.py" }, { "change_type": "MODIFY", "diff": "@@ -276,8 +276,7 @@ class ConfigurationDataHolder(MutableInterpreterObject, ObjectHolder):\n \n def validate_args(self, args, kwargs):\n if len(args) == 1 and isinstance(args[0], list) and len(args[0]) == 2:\n- mlog.log(mlog.red('DEPRECATION:'),\n- '''Passing a list as the single argument to configuration_data.set is deprecated.\n+ mlog.deprecation('''Passing a list as the single argument to configuration_data.set is deprecated.\n This will become a hard error in the future''')\n args = args[0]\n \n", "new_path": "mesonbuild/interpreter.py", "old_path": "mesonbuild/interpreter.py" }, { "change_type": "MODIFY", "diff": "@@ -243,8 +243,9 @@ class FeatureDeprecated(FeatureCheckBase):\n return 'Deprecated features used:'\n \n def log_usage_warning(self, tv):\n- mlog.warning('Project targetting \\'{}\\' but tried to use feature deprecated '\n- 'since \\'{}\\': {}'.format(tv, self.feature_version, self.feature_name))\n+ mlog.deprecation('Project targetting \\'{}\\' but tried to use feature '\n+ 'deprecated since \\'{}\\': {}'\n+ ''.format(tv, self.feature_version, self.feature_name))\n \n \n class FeatureCheckKwargsBase:\n", "new_path": "mesonbuild/interpreterbase.py", "old_path": "mesonbuild/interpreterbase.py" }, { "change_type": "MODIFY", "diff": "@@ -145,6 +145,8 @@ def _log_error(severity, *args, **kwargs):\n args = (yellow('WARNING:'),) + args\n elif severity == 'error':\n args = (red('ERROR:'),) + args\n+ elif severity == 'deprecation':\n+ args = (red('DEPRECATION:'),) + args\n else:\n assert False, 'Invalid severity ' + severity\n \n@@ -163,6 +165,9 @@ def error(*args, **kwargs):\n def warning(*args, **kwargs):\n return _log_error('warning', *args, **kwargs)\n \n+def deprecation(*args, **kwargs):\n+ return _log_error('deprecation', *args, **kwargs)\n+\n def exception(e):\n log()\n if hasattr(e, 'file') and hasattr(e, 'lineno') and hasattr(e, 'colno'):\n", "new_path": "mesonbuild/mlog.py", "old_path": "mesonbuild/mlog.py" }, { "change_type": "MODIFY", "diff": "@@ -683,7 +683,7 @@ class GnomeModule(ExtensionModule):\n \n langs = mesonlib.stringlistify(kwargs.pop('languages', []))\n if langs:\n- mlog.log(mlog.red('DEPRECATION:'), '''The \"languages\" argument of gnome.yelp() is deprecated.\n+ mlog.deprecation('''The \"languages\" argument of gnome.yelp() is deprecated.\n Use a LINGUAS file in the sources directory instead.\n This will become a hard error in the future.''')\n \n", "new_path": "mesonbuild/modules/gnome.py", "old_path": "mesonbuild/modules/gnome.py" } ]
28c1f31d7e2b46a8473d8ebe8f029fb7602fde09
mesonbuild/meson
null
null
Make `-std=` fallback remapping more robust * The current version matching logic is brittle with respect to Clang. LLVM and Apple Clang use slightly different but nowadays overlapping version ranges. Instead, we now just check whether the compiler supports the given `-std=` variant and try its respective fallback instead of testing version ranges.
[ { "change_type": "MODIFY", "diff": "@@ -12,10 +12,12 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import functools\n import os.path\n \n from .. import coredata\n-from ..mesonlib import version_compare\n+from .. import mlog\n+from ..mesonlib import MesonException, version_compare\n \n from .c import CCompiler, VisualStudioCCompiler\n from .compilers import (\n@@ -67,6 +69,55 @@ class CPPCompiler(CCompiler):\n int main () {{ return 0; }}'''\n return self.compiles(t.format(**fargs), env, extra_args, dependencies)\n \n+ def _test_cpp_std_arg(self, cpp_std_value):\n+ # Test whether the compiler understands a -std=XY argument\n+ assert(cpp_std_value.startswith('-std='))\n+\n+ # This test does not use has_multi_arguments() for two reasons:\n+ # 1. has_multi_arguments() requires an env argument, which the compiler\n+ # object does not have at this point.\n+ # 2. even if it did have an env object, that might contain another more\n+ # recent -std= argument, which might lead to a cascaded failure.\n+ CPP_TEST = 'int i = static_cast<int>(0);'\n+ with self.compile(code=CPP_TEST, extra_args=[cpp_std_value], mode='compile') as p:\n+ if p.returncode == 0:\n+ mlog.debug('Compiler accepts {}:'.format(cpp_std_value), 'YES')\n+ return True\n+ else:\n+ mlog.debug('Compiler accepts {}:'.format(cpp_std_value), 'NO')\n+ return False\n+\n+ @functools.lru_cache()\n+ def _find_best_cpp_std(self, cpp_std):\n+ # The initial version mapping approach to make falling back\n+ # from '-std=c++14' to '-std=c++1y' was too brittle. For instance,\n+ # Apple's Clang uses a different versioning scheme to upstream LLVM,\n+ # making the whole detection logic awfully brittle. Instead, let's\n+ # just see if feeding GCC or Clang our '-std=' setting works, and\n+ # if not, try the fallback argument.\n+ CPP_FALLBACKS = {\n+ 'c++11': 'c++0x',\n+ 'gnu++11': 'gnu++0x',\n+ 'c++14': 'c++1y',\n+ 'gnu++14': 'gnu++1y',\n+ 'c++17': 'c++1z',\n+ 'gnu++17': 'gnu++1z'\n+ }\n+\n+ # Currently, remapping is only supported for Clang and GCC\n+ assert(self.id in frozenset(['clang', 'gcc']))\n+\n+ if cpp_std not in CPP_FALLBACKS:\n+ # 'c++03' and 'c++98' don't have fallback types\n+ return '-std=' + cpp_std\n+\n+ for i in (cpp_std, CPP_FALLBACKS[cpp_std]):\n+ cpp_std_value = '-std=' + i\n+ if self._test_cpp_std_arg(cpp_std_value):\n+ return cpp_std_value\n+\n+ raise MesonException('C++ Compiler does not support -std={}'.format(cpp_std))\n+\n \n class ClangCPPCompiler(ClangCompiler, CPPCompiler):\n def __init__(self, exelist, version, cltype, is_cross, exe_wrapper=None, **kwargs):\n@@ -89,11 +140,7 @@ class ClangCPPCompiler(ClangCompiler, CPPCompiler):\n args = []\n std = options['cpp_std']\n if std.value != 'none':\n- cpp_std_value = std.value\n- # Clang 3.2, 3.3, 3.4 only understand -std={c,gnu}++1y and not -std={c,gnu}++14\n- if version_compare(self.version, '>=3.2') and version_compare(self.version, '<3.5'):\n- cpp_std_value = cpp_std_value.replace('++14', '++1y')\n- args.append('-std=' + cpp_std_value)\n+ args.append(self._find_best_cpp_std(std.value))\n return args\n \n def get_option_link_args(self, options):\n@@ -159,11 +206,7 @@ class GnuCPPCompiler(GnuCompiler, CPPCompiler):\n args = []\n std = options['cpp_std']\n if std.value != 'none':\n- cpp_std_value = std.value\n- # GCC 4.8 only understands -std={c,gnu}++1y and not -std={c,gnu}++14\n- if version_compare(self.version, '>=4.8') and version_compare(self.version, '<4.9'):\n- cpp_std_value = cpp_std_value.replace('++14', '++1y')\n- args.append('-std=' + cpp_std_value)\n+ args.append(self._find_best_cpp_std(std.value))\n if options['cpp_debugstl'].value:\n args.append('-D_GLIBCXX_DEBUG=1')\n return args\n", "new_path": "mesonbuild/compilers/cpp.py", "old_path": "mesonbuild/compilers/cpp.py" } ]
69ec001b0672094ab92c07f5e561c9c0525aef7b
mesonbuild/meson
null
null
Use enum instead of `int` for compiler variants * Enums are strongly typed and make the whole `gcc_type`/`clang_type`/`icc_type` distinction redundant. * Enums also allow extending via member functions, which makes the code more generalisable.
[ { "change_type": "MODIFY", "diff": "@@ -14,16 +14,7 @@\n \n # Public symbols for compilers sub-package when using 'from . import compilers'\n __all__ = [\n- 'CLANG_OSX',\n- 'CLANG_STANDARD',\n- 'CLANG_WIN',\n- 'GCC_CYGWIN',\n- 'GCC_MINGW',\n- 'GCC_OSX',\n- 'GCC_STANDARD',\n- 'ICC_OSX',\n- 'ICC_STANDARD',\n- 'ICC_WIN',\n+ 'CompilerType',\n \n 'all_languages',\n 'base_options',\n@@ -94,16 +85,7 @@ __all__ = [\n \n # Bring symbols from each module into compilers sub-package namespace\n from .compilers import (\n- GCC_OSX,\n- GCC_MINGW,\n- GCC_CYGWIN,\n- GCC_STANDARD,\n- CLANG_OSX,\n- CLANG_WIN,\n- CLANG_STANDARD,\n- ICC_OSX,\n- ICC_WIN,\n- ICC_STANDARD,\n+ CompilerType,\n all_languages,\n base_options,\n clib_langs,\n", "new_path": "mesonbuild/compilers/__init__.py", "old_path": "mesonbuild/compilers/__init__.py" }, { "change_type": "MODIFY", "diff": "@@ -30,7 +30,7 @@ from ..mesonlib import (\n from .c_function_attributes import C_FUNC_ATTRIBUTES\n \n from .compilers import (\n- GCC_MINGW,\n+ CompilerType,\n get_largefile_args,\n gnu_winlibs,\n msvc_winlibs,\n@@ -121,7 +121,7 @@ class CCompiler(Compiler):\n # The default behavior is this, override in MSVC\n @functools.lru_cache(maxsize=None)\n def build_rpath_args(self, build_dir, from_dir, rpath_paths, build_rpath, install_rpath):\n- if self.id == 'clang' and self.clang_type == compilers.CLANG_OSX:\n+ if self.id == 'clang' and self.compiler_type == CompilerType.CLANG_OSX:\n return self.build_osx_rpath_args(build_dir, rpath_paths, build_rpath)\n return self.build_unix_rpath_args(build_dir, from_dir, rpath_paths, build_rpath, install_rpath)\n \n@@ -160,15 +160,8 @@ class CCompiler(Compiler):\n '''\n Get args for allowing undefined symbols when linking to a shared library\n '''\n- if self.id == 'clang':\n- if self.clang_type == compilers.CLANG_OSX:\n- # Apple ld\n- return ['-Wl,-undefined,dynamic_lookup']\n- else:\n- # GNU ld and LLVM lld\n- return ['-Wl,--allow-shlib-undefined']\n- elif self.id == 'gcc':\n- if self.gcc_type == compilers.GCC_OSX:\n+ if self.id in ('clang', 'gcc'):\n+ if self.compiler_type.is_osx_compiler:\n # Apple ld\n return ['-Wl,-undefined,dynamic_lookup']\n else:\n@@ -1064,9 +1057,9 @@ class CCompiler(Compiler):\n \n \n class ClangCCompiler(ClangCompiler, CCompiler):\n- def __init__(self, exelist, version, clang_type, is_cross, exe_wrapper=None, **kwargs):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, **kwargs):\n CCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)\n- ClangCompiler.__init__(self, clang_type)\n+ ClangCompiler.__init__(self, compiler_type)\n default_warn_args = ['-Wall', '-Winvalid-pch']\n self.warn_args = {'1': default_warn_args,\n '2': default_warn_args + ['-Wextra'],\n@@ -1092,7 +1085,7 @@ class ClangCCompiler(ClangCompiler, CCompiler):\n \n def get_linker_always_args(self):\n basic = super().get_linker_always_args()\n- if self.clang_type == compilers.CLANG_OSX:\n+ if self.compiler_type.is_osx_compiler:\n return basic + ['-Wl,-headerpad_max_install_names']\n return basic\n \n@@ -1126,9 +1119,9 @@ class ArmclangCCompiler(ArmclangCompiler, CCompiler):\n \n \n class GnuCCompiler(GnuCompiler, CCompiler):\n- def __init__(self, exelist, version, gcc_type, is_cross, exe_wrapper=None, defines=None, **kwargs):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, defines=None, **kwargs):\n CCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)\n- GnuCompiler.__init__(self, gcc_type, defines)\n+ GnuCompiler.__init__(self, compiler_type, defines)\n default_warn_args = ['-Wall', '-Winvalid-pch']\n self.warn_args = {'1': default_warn_args,\n '2': default_warn_args + ['-Wextra'],\n@@ -1140,7 +1133,7 @@ class GnuCCompiler(GnuCompiler, CCompiler):\n ['none', 'c89', 'c99', 'c11',\n 'gnu89', 'gnu99', 'gnu11'],\n 'none')})\n- if self.gcc_type == GCC_MINGW:\n+ if self.compiler_type == CompilerType.GCC_MINGW:\n opts.update({\n 'c_winlibs': coredata.UserArrayOption('c_winlibs', 'Standard Win libraries to link against',\n gnu_winlibs), })\n@@ -1154,7 +1147,7 @@ class GnuCCompiler(GnuCompiler, CCompiler):\n return args\n \n def get_option_link_args(self, options):\n- if self.gcc_type == GCC_MINGW:\n+ if self.compiler_type == CompilerType.GCC_MINGW:\n return options['c_winlibs'].value[:]\n return []\n \n@@ -1166,9 +1159,9 @@ class GnuCCompiler(GnuCompiler, CCompiler):\n \n \n class ElbrusCCompiler(GnuCCompiler, ElbrusCompiler):\n- def __init__(self, exelist, version, gcc_type, is_cross, exe_wrapper=None, defines=None, **kwargs):\n- GnuCCompiler.__init__(self, exelist, version, gcc_type, is_cross, exe_wrapper, defines, **kwargs)\n- ElbrusCompiler.__init__(self, gcc_type, defines)\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, defines=None, **kwargs):\n+ GnuCCompiler.__init__(self, exelist, version, compiler_type, is_cross, exe_wrapper, defines, **kwargs)\n+ ElbrusCompiler.__init__(self, compiler_type, defines)\n \n # It does support some various ISO standards and c/gnu 90, 9x, 1x in addition to those which GNU CC supports.\n def get_options(self):\n@@ -1190,9 +1183,9 @@ class ElbrusCCompiler(GnuCCompiler, ElbrusCompiler):\n \n \n class IntelCCompiler(IntelCompiler, CCompiler):\n- def __init__(self, exelist, version, icc_type, is_cross, exe_wrapper=None, **kwargs):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, **kwargs):\n CCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)\n- IntelCompiler.__init__(self, icc_type)\n+ IntelCompiler.__init__(self, compiler_type)\n self.lang_header = 'c-header'\n default_warn_args = ['-Wall', '-w3', '-diag-disable:remark', '-Wpch-messages']\n self.warn_args = {'1': default_warn_args,\n", "new_path": "mesonbuild/compilers/c.py", "old_path": "mesonbuild/compilers/c.py" }, { "change_type": "MODIFY", "diff": "@@ -12,7 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-import contextlib, os.path, re, tempfile, shlex\n+import contextlib, enum, os.path, re, tempfile, shlex\n import subprocess\n \n from ..linkers import StaticLinker\n@@ -1141,19 +1141,35 @@ class Compiler:\n raise EnvironmentException(\n 'Language {} does not support function attributes.'.format(self.get_display_language()))\n \n-GCC_STANDARD = 0\n-GCC_OSX = 1\n-GCC_MINGW = 2\n-GCC_CYGWIN = 3\n \n-CLANG_STANDARD = 0\n-CLANG_OSX = 1\n-CLANG_WIN = 2\n-# Possibly clang-cl?\n+@enum.unique\n+class CompilerType(enum.Enum):\n+ GCC_STANDARD = 0\n+ GCC_OSX = 1\n+ GCC_MINGW = 2\n+ GCC_CYGWIN = 3\n+\n+ CLANG_STANDARD = 10\n+ CLANG_OSX = 11\n+ CLANG_MINGW = 12\n+ # Possibly clang-cl?\n+\n+ ICC_STANDARD = 20\n+ ICC_OSX = 21\n+ ICC_WIN = 22\n+\n+ @property\n+ def is_standard_compiler(self):\n+ return self.name in ('GCC_STANDARD', 'CLANG_STANDARD', 'ICC_STANDARD')\n+\n+ @property\n+ def is_osx_compiler(self):\n+ return self.name in ('GCC_OSX', 'CLANG_OSX', 'ICC_OSX')\n+\n+ @property\n+ def is_windows_compiler(self):\n+ return self.name in ('GCC_MINGW', 'GCC_CYGWIN', 'CLANG_MINGW', 'ICC_WIN')\n \n-ICC_STANDARD = 0\n-ICC_OSX = 1\n-ICC_WIN = 2\n \n # GNU ld cannot be installed on macOS\n # https://github.com/Homebrew/homebrew-core/issues/17794#issuecomment-328174395\n@@ -1169,14 +1185,14 @@ def get_macos_dylib_install_name(prefix, shlib_name, suffix, soversion):\n install_name += '.dylib'\n return '@rpath/' + install_name\n \n-def get_gcc_soname_args(gcc_type, prefix, shlib_name, suffix, soversion, darwin_versions, is_shared_module):\n- if gcc_type == GCC_STANDARD:\n+def get_gcc_soname_args(compiler_type, prefix, shlib_name, suffix, soversion, darwin_versions, is_shared_module):\n+ if compiler_type.is_standard_compiler:\n sostr = '' if soversion is None else '.' + soversion\n return ['-Wl,-soname,%s%s.%s%s' % (prefix, shlib_name, suffix, sostr)]\n- elif gcc_type in (GCC_MINGW, GCC_CYGWIN):\n+ elif compiler_type.is_windows_compiler:\n # For PE/COFF the soname argument has no effect with GNU LD\n return []\n- elif gcc_type == GCC_OSX:\n+ elif compiler_type.is_osx_compiler:\n if is_shared_module:\n return []\n name = get_macos_dylib_install_name(prefix, shlib_name, suffix, soversion)\n@@ -1188,20 +1204,21 @@ def get_gcc_soname_args(gcc_type, prefix, shlib_name, suffix, soversion, darwin_\n raise RuntimeError('Not implemented yet.')\n \n def get_compiler_is_linuxlike(compiler):\n- if (getattr(compiler, 'gcc_type', None) == GCC_STANDARD) or \\\n- (getattr(compiler, 'clang_type', None) == CLANG_STANDARD) or \\\n- (getattr(compiler, 'icc_type', None) == ICC_STANDARD):\n- return True\n- return False\n+ compiler_type = getattr(compiler, 'compiler_type', None)\n+ return compiler_type and compiler_type.is_standard_compiler\n \n def get_compiler_uses_gnuld(c):\n # FIXME: Perhaps we should detect the linker in the environment?\n # FIXME: Assumes that *BSD use GNU ld, but they might start using lld soon\n- if (getattr(c, 'gcc_type', None) in (GCC_STANDARD, GCC_MINGW, GCC_CYGWIN)) or \\\n- (getattr(c, 'clang_type', None) in (CLANG_STANDARD, CLANG_WIN)) or \\\n- (getattr(c, 'icc_type', None) in (ICC_STANDARD, ICC_WIN)):\n- return True\n- return False\n+ compiler_type = getattr(c, 'compiler_type', None)\n+ return compiler_type in (\n+ CompilerType.GCC_STANDARD,\n+ CompilerType.GCC_MINGW,\n+ CompilerType.GCC_CYGWIN,\n+ CompilerType.CLANG_STANDARD,\n+ CompilerType.CLANG_MINGW,\n+ CompilerType.ICC_STANDARD,\n+ CompilerType.ICC_WIN)\n \n def get_largefile_args(compiler):\n '''\n@@ -1262,13 +1279,13 @@ def gnulike_default_include_dirs(compiler, lang):\n \n class GnuCompiler:\n # Functionality that is common to all GNU family compilers.\n- def __init__(self, gcc_type, defines):\n+ def __init__(self, compiler_type, defines):\n self.id = 'gcc'\n- self.gcc_type = gcc_type\n+ self.compiler_type = compiler_type\n self.defines = defines or {}\n self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage',\n 'b_colorout', 'b_ndebug', 'b_staticpic']\n- if self.gcc_type == GCC_OSX:\n+ if self.compiler_type.is_osx_compiler:\n self.base_options.append('b_bitcode')\n else:\n self.base_options.append('b_lundef')\n@@ -1279,7 +1296,7 @@ class GnuCompiler:\n # TODO: centralise this policy more globally, instead\n # of fragmenting it into GnuCompiler and ClangCompiler\n def get_asneeded_args(self):\n- if self.gcc_type == GCC_OSX:\n+ if self.compiler_type.is_osx_compiler:\n return APPLE_LD_AS_NEEDED\n else:\n return GNU_LD_AS_NEEDED\n@@ -1305,7 +1322,7 @@ class GnuCompiler:\n return self.defines[define]\n \n def get_pic_args(self):\n- if self.gcc_type in (GCC_CYGWIN, GCC_MINGW, GCC_OSX):\n+ if self.compiler_type in (CompilerType.GCC_CYGWIN, CompilerType.GCC_MINGW, CompilerType.GCC_OSX):\n return [] # On Window and OS X, pic is always on.\n return ['-fPIC']\n \n@@ -1319,7 +1336,7 @@ class GnuCompiler:\n return clike_debug_args[is_debug]\n \n def get_buildtype_linker_args(self, buildtype):\n- if self.gcc_type == GCC_OSX:\n+ if self.compiler_type.is_osx_compiler:\n return apple_buildtype_linker_args[buildtype]\n return gnulike_buildtype_linker_args[buildtype]\n \n@@ -1330,7 +1347,7 @@ class GnuCompiler:\n return os.path.dirname(fname), fname\n \n def get_soname_args(self, *args):\n- return get_gcc_soname_args(self.gcc_type, *args)\n+ return get_gcc_soname_args(self.compiler_type, *args)\n \n def get_std_shared_lib_link_args(self):\n return ['-shared']\n@@ -1343,13 +1360,13 @@ class GnuCompiler:\n raise RuntimeError('Module definitions file should be str')\n # On Windows targets, .def files may be specified on the linker command\n # line like an object file.\n- if self.gcc_type in (GCC_CYGWIN, GCC_MINGW):\n+ if self.compiler_type in (CompilerType.GCC_CYGWIN, CompilerType.GCC_MINGW):\n return [defsfile]\n # For other targets, discard the .def file.\n return []\n \n def get_gui_app_args(self, value):\n- if self.gcc_type in (GCC_CYGWIN, GCC_MINGW) and value:\n+ if self.compiler_type in (CompilerType.GCC_CYGWIN, CompilerType.GCC_MINGW) and value:\n return ['-mwindows']\n return []\n \n@@ -1368,8 +1385,8 @@ class GnuCompiler:\n class ElbrusCompiler(GnuCompiler):\n # Elbrus compiler is nearly like GCC, but does not support\n # PCH, LTO, sanitizers and color output as of version 1.21.x.\n- def __init__(self, gcc_type, defines):\n- GnuCompiler.__init__(self, gcc_type, defines)\n+ def __init__(self, compiler_type, defines):\n+ GnuCompiler.__init__(self, compiler_type, defines)\n self.id = 'lcc'\n self.base_options = ['b_pgo', 'b_coverage',\n 'b_ndebug', 'b_staticpic',\n@@ -1404,12 +1421,12 @@ class ElbrusCompiler(GnuCompiler):\n return paths\n \n class ClangCompiler:\n- def __init__(self, clang_type):\n+ def __init__(self, compiler_type):\n self.id = 'clang'\n- self.clang_type = clang_type\n+ self.compiler_type = compiler_type\n self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage',\n 'b_ndebug', 'b_staticpic', 'b_colorout']\n- if self.clang_type == CLANG_OSX:\n+ if self.compiler_type.is_osx_compiler:\n self.base_options.append('b_bitcode')\n else:\n self.base_options.append('b_lundef')\n@@ -1420,13 +1437,13 @@ class ClangCompiler:\n # TODO: centralise this policy more globally, instead\n # of fragmenting it into GnuCompiler and ClangCompiler\n def get_asneeded_args(self):\n- if self.clang_type == CLANG_OSX:\n+ if self.compiler_type.is_osx_compiler:\n return APPLE_LD_AS_NEEDED\n else:\n return GNU_LD_AS_NEEDED\n \n def get_pic_args(self):\n- if self.clang_type in (CLANG_WIN, CLANG_OSX):\n+ if self.compiler_type in (CompilerType.CLANG_MINGW, CompilerType.CLANG_OSX):\n return [] # On Window and OS X, pic is always on.\n return ['-fPIC']\n \n@@ -1437,7 +1454,7 @@ class ClangCompiler:\n return gnulike_buildtype_args[buildtype]\n \n def get_buildtype_linker_args(self, buildtype):\n- if self.clang_type == CLANG_OSX:\n+ if self.compiler_type.is_osx_compiler:\n return apple_buildtype_linker_args[buildtype]\n return gnulike_buildtype_linker_args[buildtype]\n \n@@ -1457,15 +1474,7 @@ class ClangCompiler:\n return ['-include-pch', os.path.join(pch_dir, self.get_pch_name(header))]\n \n def get_soname_args(self, *args):\n- if self.clang_type == CLANG_STANDARD:\n- gcc_type = GCC_STANDARD\n- elif self.clang_type == CLANG_OSX:\n- gcc_type = GCC_OSX\n- elif self.clang_type == CLANG_WIN:\n- gcc_type = GCC_MINGW\n- else:\n- raise MesonException('Unreachable code when converting clang type to gcc type.')\n- return get_gcc_soname_args(gcc_type, *args)\n+ return get_gcc_soname_args(self.compiler_type, *args)\n \n def has_multi_arguments(self, args, env):\n myargs = ['-Werror=unknown-warning-option', '-Werror=unused-command-line-argument']\n@@ -1482,17 +1491,17 @@ class ClangCompiler:\n # visibility to obey OS X and iOS minimum version targets with\n # -mmacosx-version-min, -miphoneos-version-min, etc.\n # https://github.com/Homebrew/homebrew-core/issues/3727\n- if self.clang_type == CLANG_OSX and version_compare(self.version, '>=8.0'):\n+ if self.compiler_type.is_osx_compiler and version_compare(self.version, '>=8.0'):\n extra_args.append('-Wl,-no_weak_imports')\n return super().has_function(funcname, prefix, env, extra_args, dependencies)\n \n def get_std_shared_module_link_args(self, options):\n- if self.clang_type == CLANG_OSX:\n+ if self.compiler_type.is_osx_compiler:\n return ['-bundle', '-Wl,-undefined,dynamic_lookup']\n return ['-shared']\n \n def get_link_whole_for(self, args):\n- if self.clang_type == CLANG_OSX:\n+ if self.compiler_type.is_osx_compiler:\n result = []\n for a in args:\n result += ['-Wl,-force_load', a]\n@@ -1593,9 +1602,9 @@ class ArmclangCompiler:\n \n # Tested on linux for ICC 14.0.3, 15.0.6, 16.0.4, 17.0.1\n class IntelCompiler:\n- def __init__(self, icc_type):\n+ def __init__(self, compiler_type):\n self.id = 'intel'\n- self.icc_type = icc_type\n+ self.compiler_type = compiler_type\n self.lang_header = 'none'\n self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage',\n 'b_colorout', 'b_ndebug', 'b_staticpic', 'b_lundef', 'b_asneeded']\n@@ -1625,27 +1634,19 @@ class IntelCompiler:\n return os.path.dirname(fname), fname\n \n def get_soname_args(self, *args):\n- if self.icc_type == ICC_STANDARD:\n- gcc_type = GCC_STANDARD\n- elif self.icc_type == ICC_OSX:\n- gcc_type = GCC_OSX\n- elif self.icc_type == ICC_WIN:\n- gcc_type = GCC_MINGW\n- else:\n- raise MesonException('Unreachable code when converting icc type to gcc type.')\n- return get_gcc_soname_args(gcc_type, *args)\n+ return get_gcc_soname_args(self.compiler_type, *args)\n \n # TODO: centralise this policy more globally, instead\n # of fragmenting it into GnuCompiler and ClangCompiler\n def get_asneeded_args(self):\n- if self.icc_type == CLANG_OSX:\n+ if self.compiler_type.is_osx_compiler:\n return APPLE_LD_AS_NEEDED\n else:\n return GNU_LD_AS_NEEDED\n \n def get_std_shared_lib_link_args(self):\n # FIXME: Don't know how icc works on OSX\n- # if self.icc_type == ICC_OSX:\n+ # if self.compiler_type.is_osx_compiler:\n # return ['-bundle']\n return ['-shared']\n \n", "new_path": "mesonbuild/compilers/compilers.py", "old_path": "mesonbuild/compilers/compilers.py" }, { "change_type": "MODIFY", "diff": "@@ -21,7 +21,7 @@ from ..mesonlib import MesonException, version_compare\n \n from .c import CCompiler, VisualStudioCCompiler\n from .compilers import (\n- GCC_MINGW,\n+ CompilerType,\n gnu_winlibs,\n msvc_winlibs,\n ClangCompiler,\n@@ -126,9 +126,9 @@ class CPPCompiler(CCompiler):\n \n \n class ClangCPPCompiler(ClangCompiler, CPPCompiler):\n- def __init__(self, exelist, version, cltype, is_cross, exe_wrapper=None, **kwargs):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, **kwargs):\n CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)\n- ClangCompiler.__init__(self, cltype)\n+ ClangCompiler.__init__(self, compiler_type)\n default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']\n self.warn_args = {'1': default_warn_args,\n '2': default_warn_args + ['-Wextra'],\n@@ -185,9 +185,9 @@ class ArmclangCPPCompiler(ArmclangCompiler, CPPCompiler):\n \n \n class GnuCPPCompiler(GnuCompiler, CPPCompiler):\n- def __init__(self, exelist, version, gcc_type, is_cross, exe_wrap, defines, **kwargs):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrap, defines, **kwargs):\n CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap, **kwargs)\n- GnuCompiler.__init__(self, gcc_type, defines)\n+ GnuCompiler.__init__(self, compiler_type, defines)\n default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']\n self.warn_args = {'1': default_warn_args,\n '2': default_warn_args + ['-Wextra'],\n@@ -202,7 +202,7 @@ class GnuCPPCompiler(GnuCompiler, CPPCompiler):\n 'cpp_debugstl': coredata.UserBooleanOption('cpp_debugstl',\n 'STL debug mode',\n False)})\n- if self.gcc_type == GCC_MINGW:\n+ if self.compiler_type == CompilerType.GCC_MINGW:\n opts.update({\n 'cpp_winlibs': coredata.UserArrayOption('cpp_winlibs', 'Standard Win libraries to link against',\n gnu_winlibs), })\n@@ -218,7 +218,7 @@ class GnuCPPCompiler(GnuCompiler, CPPCompiler):\n return args\n \n def get_option_link_args(self, options):\n- if self.gcc_type == GCC_MINGW:\n+ if self.compiler_type == CompilerType.GCC_MINGW:\n return options['cpp_winlibs'].value[:]\n return []\n \n@@ -230,9 +230,9 @@ class GnuCPPCompiler(GnuCompiler, CPPCompiler):\n \n \n class ElbrusCPPCompiler(GnuCPPCompiler, ElbrusCompiler):\n- def __init__(self, exelist, version, gcc_type, is_cross, exe_wrapper=None, defines=None, **kwargs):\n- GnuCPPCompiler.__init__(self, exelist, version, gcc_type, is_cross, exe_wrapper, defines, **kwargs)\n- ElbrusCompiler.__init__(self, gcc_type, defines)\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, defines=None, **kwargs):\n+ GnuCPPCompiler.__init__(self, exelist, version, compiler_type, is_cross, exe_wrapper, defines, **kwargs)\n+ ElbrusCompiler.__init__(self, compiler_type, defines)\n \n # It does not support c++/gnu++ 17 and 1z, but still does support 0x, 1y, and gnu++98.\n def get_options(self):\n@@ -253,9 +253,9 @@ class ElbrusCPPCompiler(GnuCPPCompiler, ElbrusCompiler):\n \n \n class IntelCPPCompiler(IntelCompiler, CPPCompiler):\n- def __init__(self, exelist, version, icc_type, is_cross, exe_wrap, **kwargs):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrap, **kwargs):\n CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap, **kwargs)\n- IntelCompiler.__init__(self, icc_type)\n+ IntelCompiler.__init__(self, compiler_type)\n self.lang_header = 'c++-header'\n default_warn_args = ['-Wall', '-w3', '-diag-disable:remark',\n '-Wpch-messages', '-Wnon-virtual-dtor']\n", "new_path": "mesonbuild/compilers/cpp.py", "old_path": "mesonbuild/compilers/cpp.py" }, { "change_type": "MODIFY", "diff": "@@ -17,8 +17,7 @@ import os.path, subprocess\n from ..mesonlib import EnvironmentException, version_compare, is_windows, is_osx\n \n from .compilers import (\n- GCC_STANDARD,\n- GCC_OSX,\n+ CompilerType,\n d_dmd_buildtype_args,\n d_gdc_buildtype_args,\n d_ldc_buildtype_args,\n@@ -152,12 +151,12 @@ class DCompiler(Compiler):\n if is_windows():\n return []\n elif is_osx():\n- soname_args = get_gcc_soname_args(GCC_OSX, *args)\n+ soname_args = get_gcc_soname_args(CompilerType.GCC_OSX, *args)\n if soname_args:\n return ['-Wl,' + ','.join(soname_args)]\n return []\n \n- return get_gcc_soname_args(GCC_STANDARD, *args)\n+ return get_gcc_soname_args(CompilerType.GCC_STANDARD, *args)\n \n def get_feature_args(self, kwargs, build_to_src):\n res = []\n", "new_path": "mesonbuild/compilers/d.py", "old_path": "mesonbuild/compilers/d.py" }, { "change_type": "MODIFY", "diff": "@@ -14,7 +14,7 @@\n \n from .c import CCompiler\n from .compilers import (\n- ICC_STANDARD,\n+ CompilerType,\n apple_buildtype_linker_args,\n gnulike_buildtype_args,\n gnulike_buildtype_linker_args,\n@@ -257,9 +257,9 @@ end program prog\n \n \n class GnuFortranCompiler(GnuCompiler, FortranCompiler):\n- def __init__(self, exelist, version, gcc_type, is_cross, exe_wrapper=None, defines=None, **kwargs):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, defines=None, **kwargs):\n FortranCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)\n- GnuCompiler.__init__(self, gcc_type, defines)\n+ GnuCompiler.__init__(self, compiler_type, defines)\n default_warn_args = ['-Wall']\n self.warn_args = {'1': default_warn_args,\n '2': default_warn_args + ['-Wextra'],\n@@ -279,9 +279,9 @@ class GnuFortranCompiler(GnuCompiler, FortranCompiler):\n \n \n class ElbrusFortranCompiler(GnuFortranCompiler, ElbrusCompiler):\n- def __init__(self, exelist, version, gcc_type, is_cross, exe_wrapper=None, defines=None, **kwargs):\n- GnuFortranCompiler.__init__(self, exelist, version, gcc_type, is_cross, exe_wrapper, defines, **kwargs)\n- ElbrusCompiler.__init__(self, gcc_type, defines)\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, defines=None, **kwargs):\n+ GnuFortranCompiler.__init__(self, exelist, version, compiler_type, is_cross, exe_wrapper, defines, **kwargs)\n+ ElbrusCompiler.__init__(self, compiler_type, defines)\n \n class G95FortranCompiler(FortranCompiler):\n def __init__(self, exelist, version, is_cross, exe_wrapper=None, **kwags):\n@@ -330,7 +330,7 @@ class IntelFortranCompiler(IntelCompiler, FortranCompiler):\n FortranCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwags)\n # FIXME: Add support for OS X and Windows in detect_fortran_compiler so\n # we are sent the type of compiler\n- IntelCompiler.__init__(self, ICC_STANDARD)\n+ IntelCompiler.__init__(self, CompilerType.ICC_STANDARD)\n self.id = 'intel'\n default_warn_args = ['-warn', 'general', '-warn', 'truncated_source']\n self.warn_args = {'1': default_warn_args,\n", "new_path": "mesonbuild/compilers/fortran.py", "old_path": "mesonbuild/compilers/fortran.py" }, { "change_type": "MODIFY", "diff": "@@ -51,17 +51,21 @@ class ObjCCompiler(CCompiler):\n \n \n class GnuObjCCompiler(GnuCompiler, ObjCCompiler):\n- def __init__(self, exelist, version, gcc_type, is_cross, exe_wrapper=None, defines=None):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, defines=None):\n ObjCCompiler.__init__(self, exelist, version, is_cross, exe_wrapper)\n- GnuCompiler.__init__(self, gcc_type, defines)\n+ GnuCompiler.__init__(self, compiler_type, defines)\n default_warn_args = ['-Wall', '-Winvalid-pch']\n self.warn_args = {'1': default_warn_args,\n '2': default_warn_args + ['-Wextra'],\n '3': default_warn_args + ['-Wextra', '-Wpedantic']}\n \n \n-class ClangObjCCompiler(ClangCompiler, GnuObjCCompiler):\n- def __init__(self, exelist, version, cltype, is_cross, exe_wrapper=None):\n- GnuObjCCompiler.__init__(self, exelist, version, cltype, is_cross, exe_wrapper)\n- ClangCompiler.__init__(self, cltype)\n+class ClangObjCCompiler(ClangCompiler, ObjCCompiler):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None):\n+ ObjCCompiler.__init__(self, exelist, version, is_cross, exe_wrapper)\n+ ClangCompiler.__init__(self, compiler_type)\n+ default_warn_args = ['-Wall', '-Winvalid-pch']\n+ self.warn_args = {'1': default_warn_args,\n+ '2': default_warn_args + ['-Wextra'],\n+ '3': default_warn_args + ['-Wextra', '-Wpedantic']}\n self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage']\n", "new_path": "mesonbuild/compilers/objc.py", "old_path": "mesonbuild/compilers/objc.py" }, { "change_type": "MODIFY", "diff": "@@ -52,17 +52,21 @@ class ObjCPPCompiler(CPPCompiler):\n \n \n class GnuObjCPPCompiler(GnuCompiler, ObjCPPCompiler):\n- def __init__(self, exelist, version, gcc_type, is_cross, exe_wrapper=None, defines=None):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, defines=None):\n ObjCPPCompiler.__init__(self, exelist, version, is_cross, exe_wrapper)\n- GnuCompiler.__init__(self, gcc_type, defines)\n+ GnuCompiler.__init__(self, compiler_type, defines)\n default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']\n self.warn_args = {'1': default_warn_args,\n '2': default_warn_args + ['-Wextra'],\n '3': default_warn_args + ['-Wextra', '-Wpedantic']}\n \n \n-class ClangObjCPPCompiler(ClangCompiler, GnuObjCPPCompiler):\n- def __init__(self, exelist, version, cltype, is_cross, exe_wrapper=None):\n- GnuObjCPPCompiler.__init__(self, exelist, version, cltype, is_cross, exe_wrapper)\n- ClangCompiler.__init__(self, cltype)\n+class ClangObjCPPCompiler(ClangCompiler, ObjCPPCompiler):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None):\n+ ObjCPPCompiler.__init__(self, exelist, version, is_cross, exe_wrapper)\n+ ClangCompiler.__init__(self, compiler_type)\n+ default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']\n+ self.warn_args = {'1': default_warn_args,\n+ '2': default_warn_args + ['-Wextra'],\n+ '3': default_warn_args + ['-Wextra', '-Wpedantic']}\n self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage']\n", "new_path": "mesonbuild/compilers/objcpp.py", "old_path": "mesonbuild/compilers/objcpp.py" }, { "change_type": "MODIFY", "diff": "@@ -22,14 +22,7 @@ from . import mlog\n \n from . import compilers\n from .compilers import (\n- CLANG_OSX,\n- CLANG_STANDARD,\n- CLANG_WIN,\n- GCC_CYGWIN,\n- GCC_MINGW,\n- GCC_OSX,\n- GCC_STANDARD,\n- ICC_STANDARD,\n+ CompilerType,\n is_assembly,\n is_header,\n is_library,\n@@ -451,12 +444,12 @@ class Environment:\n def get_gnu_compiler_type(defines):\n # Detect GCC type (Apple, MinGW, Cygwin, Unix)\n if '__APPLE__' in defines:\n- return GCC_OSX\n+ return CompilerType.GCC_OSX\n elif '__MINGW32__' in defines or '__MINGW64__' in defines:\n- return GCC_MINGW\n+ return CompilerType.GCC_MINGW\n elif '__CYGWIN__' in defines:\n- return GCC_CYGWIN\n- return GCC_STANDARD\n+ return CompilerType.GCC_CYGWIN\n+ return CompilerType.GCC_STANDARD\n \n def warn_about_lang_pointing_to_cross(self, compiler_exe, evar):\n evar_str = os.environ.get(evar, 'WHO_WOULD_CALL_THEIR_COMPILER_WITH_THIS_NAME')\n@@ -560,14 +553,14 @@ This is probably wrong, it should always point to the native compiler.''' % evar\n if not defines:\n popen_exceptions[' '.join(compiler)] = 'no pre-processor defines'\n continue\n- gtype = self.get_gnu_compiler_type(defines)\n+ compiler_type = self.get_gnu_compiler_type(defines)\n if guess_gcc_or_lcc == 'lcc':\n version = self.get_lcc_version_from_defines(defines)\n cls = ElbrusCCompiler if lang == 'c' else ElbrusCPPCompiler\n else:\n version = self.get_gnu_version_from_defines(defines)\n cls = GnuCCompiler if lang == 'c' else GnuCPPCompiler\n- return cls(ccache + compiler, version, gtype, is_cross, exe_wrap, defines, full_version=full_version)\n+ return cls(ccache + compiler, version, compiler_type, is_cross, exe_wrap, defines, full_version=full_version)\n \n if 'armclang' in out:\n # The compiler version is not present in the first line of output,\n@@ -587,13 +580,13 @@ This is probably wrong, it should always point to the native compiler.''' % evar\n return cls(ccache + compiler, version, is_cross, exe_wrap, full_version=full_version)\n if 'clang' in out:\n if 'Apple' in out or mesonlib.for_darwin(want_cross, self):\n- cltype = CLANG_OSX\n+ compiler_type = CompilerType.CLANG_OSX\n elif 'windows' in out or mesonlib.for_windows(want_cross, self):\n- cltype = CLANG_WIN\n+ compiler_type = CompilerType.CLANG_MINGW\n else:\n- cltype = CLANG_STANDARD\n+ compiler_type = CompilerType.CLANG_STANDARD\n cls = ClangCCompiler if lang == 'c' else ClangCPPCompiler\n- return cls(ccache + compiler, version, cltype, is_cross, exe_wrap, full_version=full_version)\n+ return cls(ccache + compiler, version, compiler_type, is_cross, exe_wrap, full_version=full_version)\n if 'Microsoft' in out or 'Microsoft' in err:\n # Latest versions of Visual Studio print version\n # number to stderr but earlier ones print version\n@@ -610,9 +603,9 @@ This is probably wrong, it should always point to the native compiler.''' % evar\n return cls(compiler, version, is_cross, exe_wrap, is_64)\n if '(ICC)' in out:\n # TODO: add microsoft add check OSX\n- inteltype = ICC_STANDARD\n+ compiler_type = CompilerType.ICC_STANDARD\n cls = IntelCCompiler if lang == 'c' else IntelCPPCompiler\n- return cls(ccache + compiler, version, inteltype, is_cross, exe_wrap, full_version=full_version)\n+ return cls(ccache + compiler, version, compiler_type, is_cross, exe_wrap, full_version=full_version)\n if 'ARM' in out:\n cls = ArmCCompiler if lang == 'c' else ArmCPPCompiler\n return cls(ccache + compiler, version, is_cross, exe_wrap, full_version=full_version)\n@@ -651,14 +644,14 @@ This is probably wrong, it should always point to the native compiler.''' % evar\n if not defines:\n popen_exceptions[' '.join(compiler)] = 'no pre-processor defines'\n continue\n- gtype = self.get_gnu_compiler_type(defines)\n+ compiler_type = self.get_gnu_compiler_type(defines)\n if guess_gcc_or_lcc == 'lcc':\n version = self.get_lcc_version_from_defines(defines)\n cls = ElbrusFortranCompiler\n else:\n version = self.get_gnu_version_from_defines(defines)\n cls = GnuFortranCompiler\n- return cls(compiler, version, gtype, is_cross, exe_wrap, defines, full_version=full_version)\n+ return cls(compiler, version, compiler_type, is_cross, exe_wrap, defines, full_version=full_version)\n \n if 'G95' in out:\n return G95FortranCompiler(compiler, version, is_cross, exe_wrap, full_version=full_version)\n@@ -704,13 +697,13 @@ This is probably wrong, it should always point to the native compiler.''' % evar\n if not defines:\n popen_exceptions[' '.join(compiler)] = 'no pre-processor defines'\n continue\n- gtype = self.get_gnu_compiler_type(defines)\n+ compiler_type = self.get_gnu_compiler_type(defines)\n version = self.get_gnu_version_from_defines(defines)\n- return GnuObjCCompiler(ccache + compiler, version, gtype, is_cross, exe_wrap, defines)\n+ return GnuObjCCompiler(ccache + compiler, version, compiler_type, is_cross, exe_wrap, defines)\n if out.startswith('Apple LLVM'):\n- return ClangObjCCompiler(ccache + compiler, version, CLANG_OSX, is_cross, exe_wrap)\n+ return ClangObjCCompiler(ccache + compiler, version, CompilerType.CLANG_OSX, is_cross, exe_wrap)\n if out.startswith('clang'):\n- return ClangObjCCompiler(ccache + compiler, version, CLANG_STANDARD, is_cross, exe_wrap)\n+ return ClangObjCCompiler(ccache + compiler, version, CompilerType.CLANG_STANDARD, is_cross, exe_wrap)\n self._handle_exceptions(popen_exceptions, compilers)\n \n def detect_objcpp_compiler(self, want_cross):\n@@ -731,13 +724,13 @@ This is probably wrong, it should always point to the native compiler.''' % evar\n if not defines:\n popen_exceptions[' '.join(compiler)] = 'no pre-processor defines'\n continue\n- gtype = self.get_gnu_compiler_type(defines)\n+ compiler_type = self.get_gnu_compiler_type(defines)\n version = self.get_gnu_version_from_defines(defines)\n- return GnuObjCPPCompiler(ccache + compiler, version, gtype, is_cross, exe_wrap, defines)\n+ return GnuObjCPPCompiler(ccache + compiler, version, compiler_type, is_cross, exe_wrap, defines)\n if out.startswith('Apple LLVM'):\n- return ClangObjCPPCompiler(ccache + compiler, version, CLANG_OSX, is_cross, exe_wrap)\n+ return ClangObjCPPCompiler(ccache + compiler, version, CompilerType.CLANG_OSX, is_cross, exe_wrap)\n if out.startswith('clang'):\n- return ClangObjCPPCompiler(ccache + compiler, version, CLANG_STANDARD, is_cross, exe_wrap)\n+ return ClangObjCPPCompiler(ccache + compiler, version, CompilerType.CLANG_STANDARD, is_cross, exe_wrap)\n self._handle_exceptions(popen_exceptions, compilers)\n \n def detect_java_compiler(self):\n", "new_path": "mesonbuild/environment.py", "old_path": "mesonbuild/environment.py" }, { "change_type": "MODIFY", "diff": "@@ -265,7 +265,7 @@ class InternalTests(unittest.TestCase):\n def test_compiler_args_class_gnuld(self):\n cargsfunc = mesonbuild.compilers.CompilerArgs\n ## Test --start/end-group\n- gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', 0, False)\n+ gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', mesonbuild.compilers.CompilerType.GCC_STANDARD, False)\n ## Test that 'direct' append and extend works\n l = cargsfunc(gcc, ['-Lfoodir', '-lfoo'])\n self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])\n@@ -1642,30 +1642,30 @@ class AllPlatformTests(BasePlatformTests):\n if isinstance(cc, gnu):\n self.assertIsInstance(linker, ar)\n if is_osx():\n- self.assertEqual(cc.gcc_type, mesonbuild.compilers.GCC_OSX)\n+ self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.GCC_OSX)\n elif is_windows():\n- self.assertEqual(cc.gcc_type, mesonbuild.compilers.GCC_MINGW)\n+ self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.GCC_MINGW)\n elif is_cygwin():\n- self.assertEqual(cc.gcc_type, mesonbuild.compilers.GCC_CYGWIN)\n+ self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.GCC_CYGWIN)\n else:\n- self.assertEqual(cc.gcc_type, mesonbuild.compilers.GCC_STANDARD)\n+ self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.GCC_STANDARD)\n if isinstance(cc, clang):\n self.assertIsInstance(linker, ar)\n if is_osx():\n- self.assertEqual(cc.clang_type, mesonbuild.compilers.CLANG_OSX)\n+ self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.CLANG_OSX)\n elif is_windows():\n # Not implemented yet\n- self.assertEqual(cc.clang_type, mesonbuild.compilers.CLANG_WIN)\n+ self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.CLANG_MINGW)\n else:\n- self.assertEqual(cc.clang_type, mesonbuild.compilers.CLANG_STANDARD)\n+ self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.CLANG_STANDARD)\n if isinstance(cc, intel):\n self.assertIsInstance(linker, ar)\n if is_osx():\n- self.assertEqual(cc.icc_type, mesonbuild.compilers.ICC_OSX)\n+ self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.ICC_OSX)\n elif is_windows():\n- self.assertEqual(cc.icc_type, mesonbuild.compilers.ICC_WIN)\n+ self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.ICC_WIN)\n else:\n- self.assertEqual(cc.icc_type, mesonbuild.compilers.ICC_STANDARD)\n+ self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.ICC_STANDARD)\n if isinstance(cc, msvc):\n self.assertTrue(is_windows())\n self.assertIsInstance(linker, lib)\n@@ -3457,11 +3457,11 @@ class LinuxlikeTests(BasePlatformTests):\n for v in compiler.get_options()[lang_std].choices:\n if (compiler.get_id() == 'clang' and '17' in v and\n (version_compare(compiler.version, '<5.0.0') or\n- (compiler.clang_type == mesonbuild.compilers.CLANG_OSX and version_compare(compiler.version, '<9.1')))):\n+ (compiler.compiler_type == mesonbuild.compilers.CompilerType.CLANG_OSX and version_compare(compiler.version, '<9.1')))):\n continue\n if (compiler.get_id() == 'clang' and '2a' in v and\n (version_compare(compiler.version, '<6.0.0') or\n- (compiler.clang_type == mesonbuild.compilers.CLANG_OSX and version_compare(compiler.version, '<9.1')))):\n+ (compiler.compiler_type == mesonbuild.compilers.CompilerType.CLANG_OSX and version_compare(compiler.version, '<9.1')))):\n continue\n if (compiler.get_id() == 'gcc' and '2a' in v and version_compare(compiler.version, '<8.0.0')):\n continue\n", "new_path": "run_unittests.py", "old_path": "run_unittests.py" } ]
06646fe772304631126a0baf3c057e68285aa766
mesonbuild/meson
null
null
Drop unnecessary periods from help strings The ones for "Compiler options" have no dots, so this was inconsistent. Also dropping the dots makes the text fit better on narrow terminals.
[ { "change_type": "MODIFY", "diff": "@@ -590,31 +590,31 @@ def parse_cmd_line_options(args):\n delattr(args, name)\n \n builtin_options = {\n- 'buildtype': [UserComboOption, 'Build type to use.', ['plain', 'debug', 'debugoptimized', 'release', 'minsize', 'custom'], 'debug'],\n- 'strip': [UserBooleanOption, 'Strip targets on install.', False],\n- 'unity': [UserComboOption, 'Unity build.', ['on', 'off', 'subprojects'], 'off'],\n- 'prefix': [UserStringOption, 'Installation prefix.', default_prefix()],\n- 'libdir': [UserStringOption, 'Library directory.', default_libdir()],\n- 'libexecdir': [UserStringOption, 'Library executable directory.', default_libexecdir()],\n- 'bindir': [UserStringOption, 'Executable directory.', 'bin'],\n- 'sbindir': [UserStringOption, 'System executable directory.', 'sbin'],\n- 'includedir': [UserStringOption, 'Header file directory.', 'include'],\n- 'datadir': [UserStringOption, 'Data file directory.', 'share'],\n- 'mandir': [UserStringOption, 'Manual page directory.', 'share/man'],\n- 'infodir': [UserStringOption, 'Info page directory.', 'share/info'],\n- 'localedir': [UserStringOption, 'Locale data directory.', 'share/locale'],\n- 'sysconfdir': [UserStringOption, 'Sysconf data directory.', 'etc'],\n- 'localstatedir': [UserStringOption, 'Localstate data directory.', 'var'],\n- 'sharedstatedir': [UserStringOption, 'Architecture-independent data directory.', 'com'],\n- 'werror': [UserBooleanOption, 'Treat warnings as errors.', False],\n- 'warning_level': [UserComboOption, 'Compiler warning level to use.', ['1', '2', '3'], '1'],\n- 'layout': [UserComboOption, 'Build directory layout.', ['mirror', 'flat'], 'mirror'],\n- 'default_library': [UserComboOption, 'Default library type.', ['shared', 'static', 'both'], 'shared'],\n- 'backend': [UserComboOption, 'Backend to use.', backendlist, 'ninja'],\n- 'stdsplit': [UserBooleanOption, 'Split stdout and stderr in test logs.', True],\n- 'errorlogs': [UserBooleanOption, \"Whether to print the logs from failing tests.\", True],\n- 'install_umask': [UserUmaskOption, 'Default umask to apply on permissions of installed files.', '022'],\n- 'auto_features': [UserFeatureOption, \"Override value of all 'auto' features.\", 'auto'],\n+ 'buildtype': [UserComboOption, 'Build type to use', ['plain', 'debug', 'debugoptimized', 'release', 'minsize', 'custom'], 'debug'],\n+ 'strip': [UserBooleanOption, 'Strip targets on install', False],\n+ 'unity': [UserComboOption, 'Unity build', ['on', 'off', 'subprojects'], 'off'],\n+ 'prefix': [UserStringOption, 'Installation prefix', default_prefix()],\n+ 'libdir': [UserStringOption, 'Library directory', default_libdir()],\n+ 'libexecdir': [UserStringOption, 'Library executable directory', default_libexecdir()],\n+ 'bindir': [UserStringOption, 'Executable directory', 'bin'],\n+ 'sbindir': [UserStringOption, 'System executable directory', 'sbin'],\n+ 'includedir': [UserStringOption, 'Header file directory', 'include'],\n+ 'datadir': [UserStringOption, 'Data file directory', 'share'],\n+ 'mandir': [UserStringOption, 'Manual page directory', 'share/man'],\n+ 'infodir': [UserStringOption, 'Info page directory', 'share/info'],\n+ 'localedir': [UserStringOption, 'Locale data directory', 'share/locale'],\n+ 'sysconfdir': [UserStringOption, 'Sysconf data directory', 'etc'],\n+ 'localstatedir': [UserStringOption, 'Localstate data directory', 'var'],\n+ 'sharedstatedir': [UserStringOption, 'Architecture-independent data directory', 'com'],\n+ 'werror': [UserBooleanOption, 'Treat warnings as errors', False],\n+ 'warning_level': [UserComboOption, 'Compiler warning level to use', ['1', '2', '3'], '1'],\n+ 'layout': [UserComboOption, 'Build directory layout', ['mirror', 'flat'], 'mirror'],\n+ 'default_library': [UserComboOption, 'Default library type', ['shared', 'static', 'both'], 'shared'],\n+ 'backend': [UserComboOption, 'Backend to use', backendlist, 'ninja'],\n+ 'stdsplit': [UserBooleanOption, 'Split stdout and stderr in test logs', True],\n+ 'errorlogs': [UserBooleanOption, \"Whether to print the logs from failing tests\", True],\n+ 'install_umask': [UserUmaskOption, 'Default umask to apply on permissions of installed files', '022'],\n+ 'auto_features': [UserFeatureOption, \"Override value of all 'auto' features\", 'auto'],\n 'optimization': [UserComboOption, 'Optimization level', ['0', 'g', '1', '2', '3', 's'], '0'],\n 'debug': [UserBooleanOption, 'Debug', True]\n }\n", "new_path": "mesonbuild/coredata.py", "old_path": "mesonbuild/coredata.py" } ]
d451a4bd97f827bb492fd0c0e357cb20b6056ed9
mesonbuild/meson
null
null
Remove get_cross_extra_flags This is no longer needed, we just remove conditionals around it.
[ { "change_type": "MODIFY", "diff": "@@ -350,11 +350,6 @@ int dummy;\n if isinstance(parameters, CompilerArgs):\n parameters = parameters.to_native(copy=True)\n parameters = comp.compute_parameters_with_absolute_paths(parameters, self.build_dir)\n- if target.is_cross:\n- extra_parameters = comp.get_cross_extra_flags(self.environment, False)\n- if isinstance(parameters, CompilerArgs):\n- extra_parameters = extra_parameters.to_native(copy=True)\n- parameters = extra_parameters + parameters\n # The new entry\n src_block = {\n 'language': lang,\n@@ -1597,12 +1592,11 @@ rule FORTRAN_DEP_HACK%s\n if compiler.can_linker_accept_rsp():\n command_template = ' command = {executable} @$out.rsp\\n' \\\n ' rspfile = $out.rsp\\n' \\\n- ' rspfile_content = $ARGS {cross_args} {output_args} {compile_only_args} $in\\n'\n+ ' rspfile_content = $ARGS {output_args} {compile_only_args} $in\\n'\n else:\n- command_template = ' command = {executable} $ARGS {cross_args} {output_args} {compile_only_args} $in\\n'\n+ command_template = ' command = {executable} $ARGS {output_args} {compile_only_args} $in\\n'\n command = command_template.format(\n executable=' '.join([ninja_quote(i) for i in compiler.get_exelist()]),\n- cross_args=' '.join([quote_func(i) for i in compiler.get_cross_extra_flags(self.environment, False)]) if is_cross else '',\n output_args=' '.join(compiler.get_output_args('$out')),\n compile_only_args=' '.join(compiler.get_compile_only_args())\n )\n@@ -1647,20 +1641,15 @@ rule FORTRAN_DEP_HACK%s\n d = quote_func(d)\n quoted_depargs.append(d)\n \n- if is_cross:\n- cross_args = compiler.get_cross_extra_flags(self.environment, False)\n- else:\n- cross_args = ''\n if compiler.can_linker_accept_rsp():\n command_template = ''' command = {executable} @$out.rsp\n rspfile = $out.rsp\n- rspfile_content = $ARGS {cross_args} {dep_args} {output_args} {compile_only_args} $in\n+ rspfile_content = $ARGS {dep_args} {output_args} {compile_only_args} $in\n '''\n else:\n- command_template = ' command = {executable} $ARGS {cross_args} {dep_args} {output_args} {compile_only_args} $in\\n'\n+ command_template = ' command = {executable} $ARGS {dep_args} {output_args} {compile_only_args} $in\\n'\n command = command_template.format(\n executable=' '.join([ninja_quote(i) for i in compiler.get_exelist()]),\n- cross_args=' '.join([quote_func(i) for i in cross_args]),\n dep_args=' '.join(quoted_depargs),\n output_args=' '.join(compiler.get_output_args('$out')),\n compile_only_args=' '.join(compiler.get_compile_only_args())\n@@ -1687,11 +1676,6 @@ rule FORTRAN_DEP_HACK%s\n rule = 'rule %s%s_PCH\\n' % (langname, crstr)\n depargs = compiler.get_dependency_gen_args('$out', '$DEPFILE')\n cross_args = []\n- if is_cross:\n- try:\n- cross_args = compiler.get_cross_extra_flags(self.environment, False)\n- except KeyError:\n- pass\n \n quoted_depargs = []\n for d in depargs:\n", "new_path": "mesonbuild/backend/ninjabackend.py", "old_path": "mesonbuild/backend/ninjabackend.py" }, { "change_type": "MODIFY", "diff": "@@ -321,10 +321,7 @@ class CCompiler(Compiler):\n # on OSX the compiler binary is the same but you need\n # a ton of compiler flags to differentiate between\n # arm and x86_64. So just compile.\n- extra_flags += self.get_cross_extra_flags(environment, link=False)\n extra_flags += self.get_compile_only_args()\n- else:\n- extra_flags += self.get_cross_extra_flags(environment, link=True)\n # Is a valid executable output for all toolchains and platforms\n binname += '.exe'\n # Write binary check source\n@@ -424,28 +421,25 @@ class CCompiler(Compiler):\n # Select a CRT if needed since we're linking\n if mode == 'link':\n args += self.get_linker_debug_crt_args()\n- # Read c_args/cpp_args/etc from the cross-info file (if needed)\n- args += self.get_cross_extra_flags(env, link=(mode == 'link'))\n- if not self.is_cross:\n- if env.is_cross_build() and not self.is_cross:\n- for_machine = MachineChoice.BUILD\n- else:\n- for_machine = MachineChoice.HOST\n- if mode == 'preprocess':\n- # Add CPPFLAGS from the env.\n- args += env.coredata.get_external_preprocess_args(for_machine, self.language)\n- elif mode == 'compile':\n- # Add CFLAGS/CXXFLAGS/OBJCFLAGS/OBJCXXFLAGS from the env\n- sys_args = env.coredata.get_external_args(for_machine, self.language)\n- # Apparently it is a thing to inject linker flags both\n- # via CFLAGS _and_ LDFLAGS, even though the former are\n- # also used during linking. These flags can break\n- # argument checks. Thanks, Autotools.\n- cleaned_sys_args = self.remove_linkerlike_args(sys_args)\n- args += cleaned_sys_args\n- elif mode == 'link':\n- # Add LDFLAGS from the env\n- args += env.coredata.get_external_link_args(for_machine, self.language)\n+ if env.is_cross_build() and not self.is_cross:\n+ for_machine = MachineChoice.BUILD\n+ else:\n+ for_machine = MachineChoice.HOST\n+ if mode == 'preprocess':\n+ # Add CPPFLAGS from the env.\n+ args += env.coredata.get_external_preprocess_args(for_machine, self.language)\n+ elif mode == 'compile':\n+ # Add CFLAGS/CXXFLAGS/OBJCFLAGS/OBJCXXFLAGS from the env\n+ sys_args = env.coredata.get_external_args(for_machine, self.language)\n+ # Apparently it is a thing to inject linker flags both\n+ # via CFLAGS _and_ LDFLAGS, even though the former are\n+ # also used during linking. These flags can break\n+ # argument checks. Thanks, Autotools.\n+ cleaned_sys_args = self.remove_linkerlike_args(sys_args)\n+ args += cleaned_sys_args\n+ elif mode == 'link':\n+ # Add LDFLAGS from the env\n+ args += env.coredata.get_external_link_args(for_machine, self.language)\n args += self.get_compiler_check_args()\n # extra_args must override all other arguments, so we add them last\n args += extra_args\n@@ -875,8 +869,7 @@ class CCompiler(Compiler):\n }\n #endif\n '''\n- args = self.get_cross_extra_flags(env, link=False)\n- args += self.get_compiler_check_args()\n+ args = self.get_compiler_check_args()\n n = 'symbols_have_underscore_prefix'\n with self.compile(code, args, 'compile', want_output=True) as p:\n if p.returncode != 0:\n", "new_path": "mesonbuild/compilers/c.py", "old_path": "mesonbuild/compilers/c.py" }, { "change_type": "MODIFY", "diff": "@@ -1084,14 +1084,6 @@ class Compiler:\n 'Language {} does not support has_multi_link_arguments.'.format(\n self.get_display_language()))\n \n- def get_cross_extra_flags(self, environment, link):\n- extra_flags = []\n- if self.is_cross and environment:\n- extra_flags += environment.coredata.get_external_args(MachineChoice.HOST, self.language)\n- if link:\n- extra_flags += environment.coredata.get_external_link_args(MachineChoice.HOST, self.language)\n- return extra_flags\n-\n def _get_compile_output(self, dirname, mode):\n # In pre-processor mode, the output is sent to stdout and discarded\n if mode == 'preprocess':\n", "new_path": "mesonbuild/compilers/compilers.py", "old_path": "mesonbuild/compilers/compilers.py" }, { "change_type": "MODIFY", "diff": "@@ -78,8 +78,7 @@ class FortranCompiler(Compiler):\n binary_name = os.path.join(work_dir, 'sanitycheckf')\n with open(source_name, 'w') as ofile:\n ofile.write('print *, \"Fortran compilation is working.\"; end')\n- extra_flags = self.get_cross_extra_flags(environment, link=True)\n- pc = subprocess.Popen(self.exelist + extra_flags + [source_name, '-o', binary_name])\n+ pc = subprocess.Popen(self.exelist + [source_name, '-o', binary_name])\n pc.wait()\n if pc.returncode != 0:\n raise EnvironmentException('Compiler %s can not compile programs.' % self.name_string())\n", "new_path": "mesonbuild/compilers/fortran.py", "old_path": "mesonbuild/compilers/fortran.py" }, { "change_type": "MODIFY", "diff": "@@ -31,7 +31,7 @@ class ObjCCompiler(CCompiler):\n # TODO try to use sanity_check_impl instead of duplicated code\n source_name = os.path.join(work_dir, 'sanitycheckobjc.m')\n binary_name = os.path.join(work_dir, 'sanitycheckobjc')\n- extra_flags = self.get_cross_extra_flags(environment, link=False)\n+ extra_flags = []\n if self.is_cross:\n extra_flags += self.get_compile_only_args()\n with open(source_name, 'w') as ofile:\n", "new_path": "mesonbuild/compilers/objc.py", "old_path": "mesonbuild/compilers/objc.py" }, { "change_type": "MODIFY", "diff": "@@ -31,14 +31,11 @@ class ObjCPPCompiler(CPPCompiler):\n # TODO try to use sanity_check_impl instead of duplicated code\n source_name = os.path.join(work_dir, 'sanitycheckobjcpp.mm')\n binary_name = os.path.join(work_dir, 'sanitycheckobjcpp')\n- extra_flags = self.get_cross_extra_flags(environment, link=False)\n- if self.is_cross:\n- extra_flags += self.get_compile_only_args()\n with open(source_name, 'w') as ofile:\n ofile.write('#import<stdio.h>\\n'\n 'class MyClass;'\n 'int main(int argc, char **argv) { return 0; }\\n')\n- pc = subprocess.Popen(self.exelist + extra_flags + [source_name, '-o', binary_name])\n+ pc = subprocess.Popen(self.exelist + [source_name, '-o', binary_name])\n pc.wait()\n if pc.returncode != 0:\n raise EnvironmentException('ObjC++ compiler %s can not compile programs.' % self.name_string())\n", "new_path": "mesonbuild/compilers/objcpp.py", "old_path": "mesonbuild/compilers/objcpp.py" }, { "change_type": "MODIFY", "diff": "@@ -105,8 +105,7 @@ class SwiftCompiler(Compiler):\n with open(source_name, 'w') as ofile:\n ofile.write('''print(\"Swift compilation is working.\")\n ''')\n- extra_flags = self.get_cross_extra_flags(environment, link=True)\n- pc = subprocess.Popen(self.exelist + extra_flags + ['-emit-executable', '-o', output_name, src], cwd=work_dir)\n+ pc = subprocess.Popen(self.exelist + ['-emit-executable', '-o', output_name, src], cwd=work_dir)\n pc.wait()\n if pc.returncode != 0:\n raise EnvironmentException('Swift compiler %s can not compile programs.' % self.name_string())\n", "new_path": "mesonbuild/compilers/swift.py", "old_path": "mesonbuild/compilers/swift.py" }, { "change_type": "MODIFY", "diff": "@@ -87,8 +87,7 @@ class ValaCompiler(Compiler):\n \n def sanity_check(self, work_dir, environment):\n code = 'class MesonSanityCheck : Object { }'\n- args = self.get_cross_extra_flags(environment, link=False)\n- with self.compile(code, args, 'compile') as p:\n+ with self.compile(code, [], 'compile') as p:\n if p.returncode != 0:\n msg = 'Vala compiler {!r} can not compile programs' \\\n ''.format(self.name_string())\n@@ -107,9 +106,7 @@ class ValaCompiler(Compiler):\n if not extra_dirs:\n code = 'class MesonFindLibrary : Object { }'\n vapi_args = ['--pkg', libname]\n- args = self.get_cross_extra_flags(env, link=False)\n- args += vapi_args\n- with self.compile(code, args, 'compile') as p:\n+ with self.compile(code, vapi_args, 'compile') as p:\n if p.returncode == 0:\n return vapi_args\n # Not found? Try to find the vapi file itself.\n", "new_path": "mesonbuild/compilers/vala.py", "old_path": "mesonbuild/compilers/vala.py" } ]
08ce1fb541374fb1ddce1d7318ceb92459942e9e
mesonbuild/meson
null
null
Move the list of LLVM version suffixes to a common place Both scan-build and llvm-config need the same list of LLVM version suffixes. It is better to keep the list at a common place instead of having several copies in different files, which is likely to become out-of-sync when the list is updated.
[ { "change_type": "MODIFY", "diff": "@@ -22,6 +22,7 @@ import re\n \n from .. import mesonlib, mlog\n from ..mesonlib import version_compare, stringlistify, extract_as_list, MachineChoice\n+from ..environment import get_llvm_tool_names\n from .base import (\n DependencyException, DependencyMethods, ExternalDependency, PkgConfigDependency,\n strip_system_libdirs, ConfigToolDependency, CMakeDependency, HasNativeKwarg\n@@ -208,25 +209,7 @@ class LLVMDependencyConfigTool(ConfigToolDependency):\n # before `super().__init__` is called.\n HasNativeKwarg.__init__(self, kwargs)\n \n- # Ordered list of llvm-config binaries to try. Start with base, then try\n- # newest back to oldest (3.5 is arbitrary), and finally the devel version.\n- # Please note that llvm-config-6.0 is a development snapshot and it should\n- # not be moved to the beginning of the list.\n- self.tools = [\n- 'llvm-config', # base\n- 'llvm-config-8', 'llvm-config80',\n- 'llvm-config-7', 'llvm-config70',\n- 'llvm-config-6.0', 'llvm-config60',\n- 'llvm-config-5.0', 'llvm-config50',\n- 'llvm-config-4.0', 'llvm-config40',\n- 'llvm-config-3.9', 'llvm-config39',\n- 'llvm-config-3.8', 'llvm-config38',\n- 'llvm-config-3.7', 'llvm-config37',\n- 'llvm-config-3.6', 'llvm-config36',\n- 'llvm-config-3.5', 'llvm-config35',\n- 'llvm-config-9', # Debian development snapshot\n- 'llvm-config-devel', # FreeBSD development snapshot\n- ]\n+ self.tools = get_llvm_tool_names('llvm-config')\n \n # Fedora starting with Fedora 30 adds a suffix of the number\n # of bits in the isa that llvm targets, for example, on x86_64\n", "new_path": "mesonbuild/dependencies/dev.py", "old_path": "mesonbuild/dependencies/dev.py" }, { "change_type": "MODIFY", "diff": "@@ -163,6 +163,32 @@ def detect_ninja(version: str = '1.5', log: bool = False) -> str:\n mlog.log('Found {}-{} at {}'.format(name, found, quote_arg(n)))\n return n\n \n+def get_llvm_tool_names(tool: str) -> typing.List[str]:\n+ # Ordered list of possible suffixes of LLVM executables to try. Start with\n+ # base, then try newest back to oldest (3.5 is arbitrary), and finally the\n+ # devel version. Please note that the development snapshot in Debian does\n+ # not have a distinct name. Do not move it to the beginning of the list\n+ # unless it becomes a stable release.\n+ suffixes = [\n+ '', # base (no suffix)\n+ '-8', '80',\n+ '-7', '70',\n+ '-6.0', '60',\n+ '-5.0', '50',\n+ '-4.0', '40',\n+ '-3.9', '39',\n+ '-3.8', '38',\n+ '-3.7', '37',\n+ '-3.6', '36',\n+ '-3.5', '35',\n+ '-9', # Debian development snapshot\n+ '-devel', # FreeBSD development snapshot\n+ ]\n+ names = []\n+ for suffix in suffixes:\n+ names.append(tool + suffix)\n+ return names\n+\n def detect_scanbuild():\n \"\"\" Look for scan-build binary on build platform\n \n@@ -182,20 +208,7 @@ def detect_scanbuild():\n exelist = split_args(os.environ['SCANBUILD'])\n \n else:\n- tools = [\n- 'scan-build', # base\n- 'scan-build-8', 'scan-build80',\n- 'scan-build-7', 'scan-build70',\n- 'scan-build-6.0', 'scan-build60',\n- 'scan-build-5.0', 'scan-build50',\n- 'scan-build-4.0', 'scan-build40',\n- 'scan-build-3.9', 'scan-build39',\n- 'scan-build-3.8', 'scan-build38',\n- 'scan-build-3.7', 'scan-build37',\n- 'scan-build-3.6', 'scan-build36',\n- 'scan-build-3.5', 'scan-build35',\n- 'scan-build-9', 'scan-build-devel', # development snapshot\n- ]\n+ tools = get_llvm_tool_names('scan-build')\n for tool in tools:\n if shutil.which(tool) is not None:\n exelist = [shutil.which(tool)]\n", "new_path": "mesonbuild/environment.py", "old_path": "mesonbuild/environment.py" } ]
3d6d908f7c41623c26289e09ed6608cae14d4948
mesonbuild/meson
null
null
Refactor TestResult object initialization Just add or alter attributes of the TestResult object, rather than many, many invocations of the constructor with very similar arguments. Define helper methods for TestResult to add the results of a step and set the failure reason.
[ { "change_type": "MODIFY", "diff": "@@ -65,17 +65,30 @@ class BuildStep(Enum):\n \n \n class TestResult:\n- def __init__(self, msg, step, stdo, stde, mlog, cicmds, conftime=0, buildtime=0, testtime=0):\n- self.msg = msg\n- self.step = step\n- self.stdo = stdo\n- self.stde = stde\n- self.mlog = mlog\n+ def __init__(self, cicmds):\n+ self.msg = '' # empty msg indicates test success\n+ self.stdo = ''\n+ self.stde = ''\n+ self.mlog = ''\n self.cicmds = cicmds\n- self.conftime = conftime\n- self.buildtime = buildtime\n- self.testtime = testtime\n+ self.conftime = 0\n+ self.buildtime = 0\n+ self.testtime = 0\n \n+ def add_step(self, step, stdo, stde, mlog='', time=0):\n+ self.step = step\n+ self.stdo += stdo\n+ self.stde += stde\n+ self.mlog += mlog\n+ if step == BuildStep.configure:\n+ self.conftime = time\n+ elif step == BuildStep.build:\n+ self.buildtime = time\n+ elif step == BuildStep.test:\n+ self.testtime = time\n+\n+ def fail(self, msg):\n+ self.msg = msg\n \n @functools.total_ordering\n class TestDef:\n@@ -434,16 +447,20 @@ def _run_test(testdir, test_build_dir, install_dir, extra_args, compiler, backen\n except Exception:\n mesonlog = no_meson_log_msg\n cicmds = run_ci_commands(mesonlog)\n- gen_time = time.time() - gen_start\n+ testresult = TestResult(cicmds)\n+ testresult.add_step(BuildStep.configure, stdo, stde, mesonlog, time.time() - gen_start)\n if should_fail == 'meson':\n if returncode == 1:\n- return TestResult('', BuildStep.configure, stdo, stde, mesonlog, cicmds, gen_time)\n+ return testresult\n elif returncode != 0:\n- return TestResult('Test exited with unexpected status {}'.format(returncode), BuildStep.configure, stdo, stde, mesonlog, cicmds, gen_time)\n+ testresult.fail('Test exited with unexpected status {}.'.format(returncode))\n+ return testresult\n else:\n- return TestResult('Test that should have failed succeeded', BuildStep.configure, stdo, stde, mesonlog, cicmds, gen_time)\n+ testresult.fail('Test that should have failed succeeded.')\n+ return testresult\n if returncode != 0:\n- return TestResult('Generating the build system failed.', BuildStep.configure, stdo, stde, mesonlog, cicmds, gen_time)\n+ testresult.fail('Generating the build system failed.')\n+ return testresult\n builddata = build.load(test_build_dir)\n # Touch the meson.build file to force a regenerate so we can test that\n # regeneration works before a build is run.\n@@ -453,15 +470,15 @@ def _run_test(testdir, test_build_dir, install_dir, extra_args, compiler, backen\n dir_args = get_backend_args_for_dir(backend, test_build_dir)\n build_start = time.time()\n pc, o, e = Popen_safe(compile_commands + dir_args, cwd=test_build_dir)\n- build_time = time.time() - build_start\n- stdo += o\n- stde += e\n+ testresult.add_step(BuildStep.build, o, e, '', time.time() - build_start)\n if should_fail == 'build':\n if pc.returncode != 0:\n- return TestResult('', BuildStep.build, stdo, stde, mesonlog, cicmds, gen_time)\n- return TestResult('Test that should have failed to build succeeded', BuildStep.build, stdo, stde, mesonlog, cicmds, gen_time)\n+ return testresult\n+ testresult.fail('Test that should have failed to build succeeded.')\n+ return testresult\n if pc.returncode != 0:\n- return TestResult('Compiling source code failed.', BuildStep.build, stdo, stde, mesonlog, cicmds, gen_time, build_time)\n+ testresult.fail('Compiling source code failed.')\n+ return testresult\n # Touch the meson.build file to force a regenerate so we can test that\n # regeneration works after a build is complete.\n ensure_backend_detects_changes(backend)\n@@ -469,37 +486,44 @@ def _run_test(testdir, test_build_dir, install_dir, extra_args, compiler, backen\n test_start = time.time()\n # Test in-process\n (returncode, tstdo, tstde, test_log) = run_test_inprocess(test_build_dir)\n- test_time = time.time() - test_start\n- stdo += tstdo\n- stde += tstde\n- mesonlog += test_log\n+ testresult.add_step(BuildStep.test, tstdo, tstde, test_log, time.time() - test_start)\n if should_fail == 'test':\n if returncode != 0:\n- return TestResult('', BuildStep.test, stdo, stde, mesonlog, cicmds, gen_time)\n- return TestResult('Test that should have failed to run unit tests succeeded', BuildStep.test, stdo, stde, mesonlog, cicmds, gen_time)\n+ return testresult\n+ testresult.fail('Test that should have failed to run unit tests succeeded.')\n+ return testresult\n if returncode != 0:\n- return TestResult('Running unit tests failed.', BuildStep.test, stdo, stde, mesonlog, cicmds, gen_time, build_time, test_time)\n+ testresult.fail('Running unit tests failed.')\n+ return testresult\n # Do installation, if the backend supports it\n if install_commands:\n env = os.environ.copy()\n env['DESTDIR'] = install_dir\n # Install with subprocess\n pi, o, e = Popen_safe(install_commands, cwd=test_build_dir, env=env)\n- stdo += o\n- stde += e\n+ testresult.add_step(BuildStep.install, o, e)\n if pi.returncode != 0:\n- return TestResult('Running install failed.', BuildStep.install, stdo, stde, mesonlog, cicmds, gen_time, build_time, test_time)\n+ testresult.fail('Running install failed.')\n+ return testresult\n+\n # Clean with subprocess\n env = os.environ.copy()\n pi, o, e = Popen_safe(clean_commands + dir_args, cwd=test_build_dir, env=env)\n- stdo += o\n- stde += e\n+ testresult.add_step(BuildStep.clean, o, e)\n if pi.returncode != 0:\n- return TestResult('Running clean failed.', BuildStep.clean, stdo, stde, mesonlog, cicmds, gen_time, build_time, test_time)\n+ testresult.fail('Running clean failed.')\n+ return testresult\n+\n+ # Validate installed files\n+ testresult.add_step(BuildStep.install, '', '')\n if not install_commands:\n- return TestResult('', BuildStep.install, '', '', mesonlog, cicmds, gen_time, build_time, test_time)\n- return TestResult(validate_install(testdir, install_dir, compiler, builddata.environment),\n- BuildStep.validate, stdo, stde, mesonlog, cicmds, gen_time, build_time, test_time)\n+ return testresult\n+ install_msg = validate_install(testdir, install_dir, compiler, builddata.environment)\n+ if install_msg:\n+ testresult.fail(install_msg)\n+ return testresult\n+\n+ return testresult\n \n def gather_tests(testdir: Path) -> T.Iterator[TestDef]:\n tests = [t.name for t in testdir.glob('*') if t.is_dir()]\n", "new_path": "run_project_tests.py", "old_path": "run_project_tests.py" } ]
28e3ce67ae49494d57372f27b6f91580656f77a7
mesonbuild/meson
null
null
Convert test protocol into an enum This gives us better type safety, and will be important as we add more test methods
[ { "change_type": "MODIFY", "diff": "@@ -14,6 +14,7 @@\n \n from collections import OrderedDict\n from functools import lru_cache\n+import enum\n import json\n import os\n import pickle\n@@ -28,12 +29,33 @@ from .. import dependencies\n from .. import mesonlib\n from .. import mlog\n from ..compilers import CompilerArgs, VisualStudioLikeCompiler\n-from ..interpreter import Interpreter\n from ..mesonlib import (\n File, MachineChoice, MesonException, OrderedSet, OptionOverrideProxy,\n classify_unity_sources, unholder\n )\n \n+if T.TYPE_CHECKING:\n+ from ..interpreter import Interpreter\n+\n+\n+class TestProtocol(enum.Enum):\n+\n+ EXITCODE = 0\n+ TAP = 1\n+\n+ @classmethod\n+ def from_str(cls, string: str) -> 'TestProtocol':\n+ if string == 'exitcode':\n+ return cls.EXITCODE\n+ elif string == 'tap':\n+ return cls.TAP\n+ raise MesonException('unknown test format {}'.format(string))\n+\n+ def __str__(self) -> str:\n+ if self is self.EXITCODE:\n+ return 'exitcode'\n+ return 'tap'\n+\n \n class CleanTrees:\n '''\n@@ -91,7 +113,7 @@ class TestSerialisation:\n needs_exe_wrapper: bool, is_parallel: bool, cmd_args: T.List[str],\n env: build.EnvironmentVariables, should_fail: bool,\n timeout: T.Optional[int], workdir: T.Optional[str],\n- extra_paths: T.List[str], protocol: str, priority: int):\n+ extra_paths: T.List[str], protocol: TestProtocol, priority: int):\n self.name = name\n self.project_name = project\n self.suite = suite\n@@ -111,7 +133,7 @@ class TestSerialisation:\n self.priority = priority\n self.needs_exe_wrapper = needs_exe_wrapper\n \n-def get_backend_from_name(backend: str, build: T.Optional[build.Build] = None, interpreter: T.Optional[Interpreter] = None) -> T.Optional['Backend']:\n+def get_backend_from_name(backend: str, build: T.Optional[build.Build] = None, interpreter: T.Optional['Interpreter'] = None) -> T.Optional['Backend']:\n if backend == 'ninja':\n from . import ninjabackend\n return ninjabackend.NinjaBackend(build, interpreter)\n@@ -138,7 +160,7 @@ def get_backend_from_name(backend: str, build: T.Optional[build.Build] = None, i\n # This class contains the basic functionality that is needed by all backends.\n # Feel free to move stuff in and out of it as you see fit.\n class Backend:\n- def __init__(self, build: T.Optional[build.Build], interpreter: T.Optional[Interpreter]):\n+ def __init__(self, build: T.Optional[build.Build], interpreter: T.Optional['Interpreter']):\n # Make it possible to construct a dummy backend\n # This is used for introspection without a build directory\n if build is None:\n", "new_path": "mesonbuild/backend/backends.py", "old_path": "mesonbuild/backend/backends.py" }, { "change_type": "MODIFY", "diff": "@@ -33,6 +33,7 @@ from .interpreterbase import FeatureNew, FeatureDeprecated, FeatureNewKwargs\n from .interpreterbase import ObjectHolder\n from .modules import ModuleReturnValue\n from .cmake import CMakeInterpreter\n+from .backend.backends import TestProtocol\n \n from pathlib import Path, PurePath\n import os\n@@ -979,7 +980,7 @@ class Test(InterpreterObject):\n self.should_fail = should_fail\n self.timeout = timeout\n self.workdir = workdir\n- self.protocol = protocol\n+ self.protocol = TestProtocol.from_str(protocol)\n self.priority = priority\n \n def get_exe(self):\n", "new_path": "mesonbuild/interpreter.py", "old_path": "mesonbuild/interpreter.py" }, { "change_type": "MODIFY", "diff": "@@ -328,7 +328,7 @@ def get_test_list(testdata) -> T.List[T.Dict[str, T.Union[str, int, T.List[str],\n to['suite'] = t.suite\n to['is_parallel'] = t.is_parallel\n to['priority'] = t.priority\n- to['protocol'] = t.protocol\n+ to['protocol'] = str(t.protocol)\n result.append(to)\n return result\n \n", "new_path": "mesonbuild/mintro.py", "old_path": "mesonbuild/mintro.py" }, { "change_type": "MODIFY", "diff": "@@ -43,6 +43,7 @@ from . import environment\n from . import mlog\n from .dependencies import ExternalProgram\n from .mesonlib import MesonException, get_wine_shortpath, split_args\n+from .backend.backends import TestProtocol\n \n if T.TYPE_CHECKING:\n from .backend.backends import TestSerialisation\n@@ -631,7 +632,7 @@ class SingleTestRunner:\n if not self.options.verbose:\n stdout = tempfile.TemporaryFile(\"wb+\")\n stderr = tempfile.TemporaryFile(\"wb+\") if self.options.split else stdout\n- if self.test.protocol == 'tap' and stderr is stdout:\n+ if self.test.protocol is TestProtocol.TAP and stderr is stdout:\n stdout = tempfile.TemporaryFile(\"wb+\")\n \n # Let gdb handle ^C instead of us\n@@ -741,7 +742,7 @@ class SingleTestRunner:\n if timed_out:\n return TestRun(self.test, self.test_env, TestResult.TIMEOUT, [], p.returncode, starttime, duration, stdo, stde, cmd)\n else:\n- if self.test.protocol == 'exitcode':\n+ if self.test.protocol is TestProtocol.EXITCODE:\n return TestRun.make_exitcode(self.test, self.test_env, p.returncode, starttime, duration, stdo, stde, cmd)\n else:\n if self.options.verbose:\n", "new_path": "mesonbuild/mtest.py", "old_path": "mesonbuild/mtest.py" } ]
90310116ab683f8b7869836b5ae9b6504f87bcf4
mesonbuild/meson
null
null
Replace backend.get_option_for_target() with target.get_option() That method had nothing specific to the backend, it's purely a Target method. This allows to cache the OptionOverrideProxy object on the Target instance instead of creating a new one for each option lookup.
[ { "change_type": "MODIFY", "diff": "@@ -32,7 +32,7 @@ from .. import mesonlib\n from .. import mlog\n from ..compilers import LANGUAGES_USING_LDFLAGS, detect\n from ..mesonlib import (\n- File, MachineChoice, MesonException, OrderedSet, OptionOverrideProxy,\n+ File, MachineChoice, MesonException, OrderedSet,\n classify_unity_sources, OptionKey, join_args\n )\n \n@@ -310,19 +310,6 @@ class Backend:\n def get_target_filename_abs(self, target: T.Union[build.Target, build.CustomTargetIndex]) -> str:\n return os.path.join(self.environment.get_build_dir(), self.get_target_filename(target))\n \n- def get_options_for_target(self, target: build.BuildTarget) -> OptionOverrideProxy:\n- return OptionOverrideProxy(target.option_overrides,\n- self.environment.coredata.options,\n- target.subproject)\n-\n- def get_option_for_target(self, key: 'OptionKey', target: build.BuildTarget) -> T.Union[str, int, bool, 'WrapMode']:\n- options = self.get_options_for_target(target)\n- # We don't actually have wrapmode here to do an assert, so just do a\n- # cast, we know what's in coredata anyway.\n- # TODO: if it's possible to annotate get_option or validate_option_value\n- # in the future we might be able to remove the cast here\n- return T.cast('T.Union[str, int, bool, WrapMode]', options[key].value)\n-\n def get_source_dir_include_args(self, target: build.BuildTarget, compiler: 'Compiler', *, absolute_path: bool = False) -> T.List[str]:\n curdir = target.get_subdir()\n if absolute_path:\n@@ -420,7 +407,7 @@ class Backend:\n abs_files: T.List[str] = []\n result: T.List[mesonlib.File] = []\n compsrcs = classify_unity_sources(target.compilers.values(), unity_src)\n- unity_size = self.get_option_for_target(OptionKey('unity_size'), target)\n+ unity_size = target.get_option(OptionKey('unity_size'))\n assert isinstance(unity_size, int), 'for mypy'\n \n def init_language_file(suffix: str, unity_file_number: int) -> T.TextIO:\n@@ -850,7 +837,7 @@ class Backend:\n if self.is_unity(extobj.target):\n compsrcs = classify_unity_sources(extobj.target.compilers.values(), sources)\n sources = []\n- unity_size = self.get_option_for_target(OptionKey('unity_size'), extobj.target)\n+ unity_size = extobj.target.get_option(OptionKey('unity_size'))\n assert isinstance(unity_size, int), 'for mypy'\n \n for comp, srcs in compsrcs.items():\n@@ -917,7 +904,7 @@ class Backend:\n # starting from hard-coded defaults followed by build options and so on.\n commands = compiler.compiler_args()\n \n- copt_proxy = self.get_options_for_target(target)\n+ copt_proxy = target.get_options()\n # First, the trivial ones that are impossible to override.\n #\n # Add -nostdinc/-nostdinc++ if needed; can't be overridden\n@@ -930,27 +917,27 @@ class Backend:\n commands += compiler.get_no_warn_args()\n else:\n # warning_level is a string, but mypy can't determine that\n- commands += compiler.get_warn_args(T.cast('str', self.get_option_for_target(OptionKey('warning_level'), target)))\n+ commands += compiler.get_warn_args(T.cast('str', target.get_option(OptionKey('warning_level'))))\n # Add -Werror if werror=true is set in the build options set on the\n # command-line or default_options inside project(). This only sets the\n # action to be done for warnings if/when they are emitted, so it's ok\n # to set it after get_no_warn_args() or get_warn_args().\n- if self.get_option_for_target(OptionKey('werror'), target):\n+ if target.get_option(OptionKey('werror')):\n commands += compiler.get_werror_args()\n # Add compile args for c_* or cpp_* build options set on the\n # command-line or default_options inside project().\n commands += compiler.get_option_compile_args(copt_proxy)\n \n # Add buildtype args: optimization level, debugging, etc.\n- buildtype = self.get_option_for_target(OptionKey('buildtype'), target)\n+ buildtype = target.get_option(OptionKey('buildtype'))\n assert isinstance(buildtype, str), 'for mypy'\n commands += compiler.get_buildtype_args(buildtype)\n \n- optimization = self.get_option_for_target(OptionKey('optimization'), target)\n+ optimization = target.get_option(OptionKey('optimization'))\n assert isinstance(optimization, str), 'for mypy'\n commands += compiler.get_optimization_args(optimization)\n \n- debug = self.get_option_for_target(OptionKey('debug'), target)\n+ debug = target.get_option(OptionKey('debug'))\n assert isinstance(debug, bool), 'for mypy'\n commands += compiler.get_debug_args(debug)\n \n@@ -1290,7 +1277,7 @@ class Backend:\n return libs\n \n def is_unity(self, target: build.BuildTarget) -> bool:\n- optval = self.get_option_for_target(OptionKey('unity'), target)\n+ optval = target.get_option(OptionKey('unity'))\n return optval == 'on' or (optval == 'subprojects' and target.subproject != '')\n \n def get_custom_target_sources(self, target: build.CustomTarget) -> T.List[str]:\n@@ -1568,7 +1555,7 @@ class Backend:\n # TODO: Create GNUStrip/AppleStrip/etc. hierarchy for more\n # fine-grained stripping of static archives.\n can_strip = not isinstance(t, build.StaticLibrary)\n- should_strip = can_strip and self.get_option_for_target(OptionKey('strip'), t)\n+ should_strip = can_strip and t.get_option(OptionKey('strip'))\n assert isinstance(should_strip, bool), 'for mypy'\n # Install primary build output (library/executable/jar, etc)\n # Done separately because of strip/aliases/rpath\n", "new_path": "mesonbuild/backend/backends.py", "old_path": "mesonbuild/backend/backends.py" }, { "change_type": "MODIFY", "diff": "@@ -1311,7 +1311,7 @@ class NinjaBackend(backends.Backend):\n return args, deps\n \n def generate_cs_target(self, target: build.BuildTarget):\n- buildtype = self.get_option_for_target(OptionKey('buildtype'), target)\n+ buildtype = target.get_option(OptionKey('buildtype'))\n fname = target.get_filename()\n outname_rel = os.path.join(self.get_target_dir(target), fname)\n src_list = target.get_sources()\n@@ -1320,8 +1320,8 @@ class NinjaBackend(backends.Backend):\n deps = []\n commands = compiler.compiler_args(target.extra_args.get('cs', []))\n commands += compiler.get_buildtype_args(buildtype)\n- commands += compiler.get_optimization_args(self.get_option_for_target(OptionKey('optimization'), target))\n- commands += compiler.get_debug_args(self.get_option_for_target(OptionKey('debug'), target))\n+ commands += compiler.get_optimization_args(target.get_option(OptionKey('optimization')))\n+ commands += compiler.get_debug_args(target.get_option(OptionKey('debug')))\n if isinstance(target, build.Executable):\n commands.append('-target:exe')\n elif isinstance(target, build.SharedLibrary):\n@@ -1362,7 +1362,7 @@ class NinjaBackend(backends.Backend):\n \n def determine_single_java_compile_args(self, target, compiler):\n args = []\n- args += compiler.get_buildtype_args(self.get_option_for_target(OptionKey('buildtype'), target))\n+ args += compiler.get_buildtype_args(target.get_option(OptionKey('buildtype')))\n args += self.build.get_global_args(compiler, target.for_machine)\n args += self.build.get_project_args(compiler, target.subproject, target.for_machine)\n args += target.get_java_args()\n@@ -1605,19 +1605,17 @@ class NinjaBackend(backends.Backend):\n \n cython = target.compilers['cython']\n \n- opt_proxy = self.get_options_for_target(target)\n-\n args: T.List[str] = []\n args += cython.get_always_args()\n- args += cython.get_buildtype_args(self.get_option_for_target(OptionKey('buildtype'), target))\n- args += cython.get_debug_args(self.get_option_for_target(OptionKey('debug'), target))\n- args += cython.get_optimization_args(self.get_option_for_target(OptionKey('optimization'), target))\n- args += cython.get_option_compile_args(opt_proxy)\n+ args += cython.get_buildtype_args(target.get_option(OptionKey('buildtype')))\n+ args += cython.get_debug_args(target.get_option(OptionKey('debug')))\n+ args += cython.get_optimization_args(target.get_option(OptionKey('optimization')))\n+ args += cython.get_option_compile_args(target.get_options())\n args += self.build.get_global_args(cython, target.for_machine)\n args += self.build.get_project_args(cython, target.subproject, target.for_machine)\n args += target.get_extra_args('cython')\n \n- ext = opt_proxy[OptionKey('language', machine=target.for_machine, lang='cython')].value\n+ ext = target.get_option(OptionKey('language', machine=target.for_machine, lang='cython'))\n \n for src in target.get_sources():\n if src.endswith('.pyx'):\n@@ -1693,7 +1691,7 @@ class NinjaBackend(backends.Backend):\n # Rust compiler takes only the main file as input and\n # figures out what other files are needed via import\n # statements and magic.\n- base_proxy = self.get_options_for_target(target)\n+ base_proxy = target.get_options()\n args = rustc.compiler_args()\n # Compiler args for compiling this target\n args += compilers.get_base_compile_args(base_proxy, rustc)\n@@ -1935,8 +1933,8 @@ class NinjaBackend(backends.Backend):\n raise InvalidArguments(f'Swift target {target.get_basename()} contains a non-swift source file.')\n os.makedirs(self.get_target_private_dir_abs(target), exist_ok=True)\n compile_args = swiftc.get_compile_only_args()\n- compile_args += swiftc.get_optimization_args(self.get_option_for_target(OptionKey('optimization'), target))\n- compile_args += swiftc.get_debug_args(self.get_option_for_target(OptionKey('debug'), target))\n+ compile_args += swiftc.get_optimization_args(target.get_option(OptionKey('optimization')))\n+ compile_args += swiftc.get_debug_args(target.get_option(OptionKey('debug')))\n compile_args += swiftc.get_module_args(module_name)\n compile_args += self.build.get_project_args(swiftc, target.subproject, target.for_machine)\n compile_args += self.build.get_global_args(swiftc, target.for_machine)\n@@ -2464,7 +2462,7 @@ https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47485'''))\n return linker.get_link_debugfile_args(outname)\n \n def generate_llvm_ir_compile(self, target, src):\n- base_proxy = self.get_options_for_target(target)\n+ base_proxy = target.get_options()\n compiler = get_compiler_for_source(target.compilers.values(), src)\n commands = compiler.compiler_args()\n # Compiler args for compiling this target\n@@ -2524,7 +2522,7 @@ https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47485'''))\n return commands\n \n def _generate_single_compile_base_args(self, target: build.BuildTarget, compiler: 'Compiler') -> 'CompilerArgs':\n- base_proxy = self.get_options_for_target(target)\n+ base_proxy = target.get_options()\n # Create an empty commands list, and start adding arguments from\n # various sources in the order in which they must override each other\n commands = compiler.compiler_args()\n@@ -3026,9 +3024,9 @@ https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47485'''))\n # options passed on the command-line, in default_options, etc.\n # These have the lowest priority.\n if isinstance(target, build.StaticLibrary):\n- commands += linker.get_base_link_args(self.get_options_for_target(target))\n+ commands += linker.get_base_link_args(target.get_options())\n else:\n- commands += compilers.get_base_link_args(self.get_options_for_target(target),\n+ commands += compilers.get_base_link_args(target.get_options(),\n linker,\n isinstance(target, build.SharedModule))\n # Add -nostdlib if needed; can't be overridden\n@@ -3036,9 +3034,9 @@ https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47485'''))\n # Add things like /NOLOGO; usually can't be overridden\n commands += linker.get_linker_always_args()\n # Add buildtype linker args: optimization level, etc.\n- commands += linker.get_buildtype_linker_args(self.get_option_for_target(OptionKey('buildtype'), target))\n+ commands += linker.get_buildtype_linker_args(target.get_option(OptionKey('buildtype')))\n # Add /DEBUG and the pdb filename when using MSVC\n- if self.get_option_for_target(OptionKey('debug'), target):\n+ if target.get_option(OptionKey('debug')):\n commands += self.get_link_debugfile_args(linker, target, outname)\n debugfile = self.get_link_debugfile_name(linker, target, outname)\n if debugfile is not None:\n", "new_path": "mesonbuild/backend/ninjabackend.py", "old_path": "mesonbuild/backend/ninjabackend.py" }, { "change_type": "MODIFY", "diff": "@@ -990,7 +990,7 @@ class Vs2010Backend(backends.Backend):\n for l, comp in target.compilers.items():\n if l in file_args:\n file_args[l] += compilers.get_base_compile_args(\n- self.get_options_for_target(target), comp)\n+ target.get_options(), comp)\n file_args[l] += comp.get_option_compile_args(\n self.environment.coredata.options)\n \n@@ -1114,9 +1114,9 @@ class Vs2010Backend(backends.Backend):\n ET.SubElement(clconf, 'PreprocessorDefinitions').text = ';'.join(target_defines)\n ET.SubElement(clconf, 'FunctionLevelLinking').text = 'true'\n # Warning level\n- warning_level = self.get_option_for_target(OptionKey('warning_level'), target)\n+ warning_level = target.get_option(OptionKey('warning_level'))\n ET.SubElement(clconf, 'WarningLevel').text = 'Level' + str(1 + int(warning_level))\n- if self.get_option_for_target(OptionKey('werror'), target):\n+ if target.get_option(OptionKey('werror')):\n ET.SubElement(clconf, 'TreatWarningAsError').text = 'true'\n # Optimization flags\n o_flags = split_o_flags_args(build_args)\n", "new_path": "mesonbuild/backend/vs2010backend.py", "old_path": "mesonbuild/backend/vs2010backend.py" }, { "change_type": "MODIFY", "diff": "@@ -1503,8 +1503,8 @@ class XCodeBackend(backends.Backend):\n if compiler is None:\n continue\n # Start with warning args\n- warn_args = compiler.get_warn_args(self.get_option_for_target(OptionKey('warning_level'), target))\n- copt_proxy = self.get_options_for_target(target)\n+ warn_args = compiler.get_warn_args(target.get_option(OptionKey('warning_level')))\n+ copt_proxy = target.get_options()\n std_args = compiler.get_option_compile_args(copt_proxy)\n # Add compile args added using add_project_arguments()\n pargs = self.build.projects_args[target.for_machine].get(target.subproject, {}).get(lang, [])\n@@ -1556,9 +1556,9 @@ class XCodeBackend(backends.Backend):\n if target.suffix:\n suffix = '.' + target.suffix\n settings_dict.add_item('EXECUTABLE_SUFFIX', suffix)\n- settings_dict.add_item('GCC_GENERATE_DEBUGGING_SYMBOLS', BOOL2XCODEBOOL[self.get_option_for_target(OptionKey('debug'), target)])\n+ settings_dict.add_item('GCC_GENERATE_DEBUGGING_SYMBOLS', BOOL2XCODEBOOL[target.get_option(OptionKey('debug'))])\n settings_dict.add_item('GCC_INLINES_ARE_PRIVATE_EXTERN', 'NO')\n- settings_dict.add_item('GCC_OPTIMIZATION_LEVEL', OPT2XCODEOPT[self.get_option_for_target(OptionKey('optimization'), target)])\n+ settings_dict.add_item('GCC_OPTIMIZATION_LEVEL', OPT2XCODEOPT[target.get_option(OptionKey('optimization'))])\n if target.has_pch:\n # Xcode uses GCC_PREFIX_HEADER which only allows one file per target/executable. Precompiling various header files and\n # applying a particular pch to each source file will require custom scripts (as a build phase) and build flags per each\n", "new_path": "mesonbuild/backend/xcodebackend.py", "old_path": "mesonbuild/backend/xcodebackend.py" }, { "change_type": "MODIFY", "diff": "@@ -35,7 +35,7 @@ from .mesonlib import (\n File, MesonException, MachineChoice, PerMachine, OrderedSet, listify,\n extract_as_list, typeslistify, stringlistify, classify_unity_sources,\n get_filenames_templates_dict, substitute_values, has_path_sep,\n- OptionKey, PerMachineDefaultable,\n+ OptionKey, PerMachineDefaultable, OptionOverrideProxy,\n MesonBugException\n )\n from .compilers import (\n@@ -578,7 +578,7 @@ class Target(HoldableObject):\n '''))\n self.install = False\n self.build_always_stale = False\n- self.option_overrides: T.Dict[OptionKey, str] = {}\n+ self.options = OptionOverrideProxy({}, self.environment.coredata.options, self.subproject)\n self.extra_files = [] # type: T.List[File]\n if not hasattr(self, 'typename'):\n raise RuntimeError(f'Target type is not set for target class \"{type(self).__name__}\". This is a bug')\n@@ -682,13 +682,25 @@ class Target(HoldableObject):\n # set, use the value of 'install' if it's enabled.\n self.build_by_default = True\n \n- option_overrides = self.parse_overrides(kwargs)\n+ self.set_option_overrides(self.parse_overrides(kwargs))\n \n+ def set_option_overrides(self, option_overrides: T.Dict[OptionKey, str]) -> None:\n+ self.options.overrides = {}\n for k, v in option_overrides.items():\n if k.lang:\n- self.option_overrides[k.evolve(machine=self.for_machine)] = v\n- continue\n- self.option_overrides[k] = v\n+ self.options.overrides[k.evolve(machine=self.for_machine)] = v\n+ else:\n+ self.options.overrides[k] = v\n+\n+ def get_options(self) -> OptionOverrideProxy:\n+ return self.options\n+\n+ def get_option(self, key: 'OptionKey') -> T.Union[str, int, bool, 'WrapMode']:\n+ # We don't actually have wrapmode here to do an assert, so just do a\n+ # cast, we know what's in coredata anyway.\n+ # TODO: if it's possible to annotate get_option or validate_option_value\n+ # in the future we might be able to remove the cast here\n+ return T.cast('T.Union[str, int, bool, WrapMode]', self.options[key].value)\n \n @staticmethod\n def parse_overrides(kwargs: T.Dict[str, T.Any]) -> T.Dict[OptionKey, str]:\n@@ -959,10 +971,8 @@ class BuildTarget(Target):\n self.compilers['c'] = self.all_compilers['c']\n if 'cython' in self.compilers:\n key = OptionKey('language', machine=self.for_machine, lang='cython')\n- if key in self.option_overrides:\n- value = self.option_overrides[key]\n- else:\n- value = self.environment.coredata.options[key].value\n+ value = self.get_option(key)\n+\n try:\n self.compilers[value] = self.all_compilers[value]\n except KeyError:\n@@ -2450,12 +2460,7 @@ class CustomTarget(Target, CommandBase):\n self.install_tag = _install_tag\n self.name = name if name else self.outputs[0]\n \n- if override_options:\n- for k, v in override_options.items():\n- if k.lang:\n- self.option_overrides_compiler[k.evolve(machine=self.for_machine)] = v\n- else:\n- self.option_overrides_base[k] = v\n+ self.set_option_overrides(override_options or {})\n \n # Whether to use absolute paths for all files on the commandline\n self.absolute_paths = absolute_paths\n", "new_path": "mesonbuild/build.py", "old_path": "mesonbuild/build.py" } ]
32bc64e63210cee6df7364a39005d89cdfdc6b71
mesonbuild/meson
null
null
Fix native compilation on ARM64 Windows Move `detect_native_windows_arch()` to `mesonlib/universal.py` and rename it to `windows_detect_native_arch()` Use `IsWow64Process2()` to detect native architecture if available Use native `vcvarsarm64.bat` to initialize vsenv if available
[ { "change_type": "MODIFY", "diff": "@@ -20,7 +20,7 @@ import collections\n from . import coredata\n from . import mesonlib\n from .mesonlib import (\n- MesonException, EnvironmentException, MachineChoice, Popen_safe, PerMachine,\n+ MesonException, MachineChoice, Popen_safe, PerMachine,\n PerMachineDefaultable, PerThreeMachineDefaultable, split_args, quote_arg, OptionKey,\n search_version, MesonBugException\n )\n@@ -228,22 +228,6 @@ def detect_clangformat() -> T.List[str]:\n return [path]\n return []\n \n-def detect_native_windows_arch():\n- \"\"\"\n- The architecture of Windows itself: x86, amd64 or arm64\n- \"\"\"\n- # These env variables are always available. See:\n- # https://msdn.microsoft.com/en-us/library/aa384274(VS.85).aspx\n- # https://blogs.msdn.microsoft.com/david.wang/2006/03/27/howto-detect-process-bitness/\n- arch = os.environ.get('PROCESSOR_ARCHITEW6432', '').lower()\n- if not arch:\n- try:\n- # If this doesn't exist, something is messing with the environment\n- arch = os.environ['PROCESSOR_ARCHITECTURE'].lower()\n- except KeyError:\n- raise EnvironmentException('Unable to detect native OS architecture')\n- return arch\n-\n def detect_windows_arch(compilers: CompilersDict) -> str:\n \"\"\"\n Detecting the 'native' architecture of Windows is not a trivial task. We\n@@ -268,7 +252,7 @@ def detect_windows_arch(compilers: CompilersDict) -> str:\n 3. Otherwise, use the actual Windows architecture\n \n \"\"\"\n- os_arch = detect_native_windows_arch()\n+ os_arch = mesonlib.windows_detect_native_arch()\n if os_arch == 'x86':\n return os_arch\n # If we're on 64-bit Windows, 32-bit apps can be compiled without\n@@ -375,7 +359,7 @@ def detect_cpu(compilers: CompilersDict) -> str:\n # Same check as above for cpu_family\n if any_compiler_has_define(compilers, '__i386__'):\n trial = 'i686' # All 64 bit cpus have at least this level of x86 support.\n- elif trial.startswith('aarch64'):\n+ elif trial.startswith('aarch64') or trial.startswith('arm64'):\n # Same check as above for cpu_family\n if any_compiler_has_define(compilers, '__arm__'):\n trial = 'arm'\n", "new_path": "mesonbuild/environment.py", "old_path": "mesonbuild/environment.py" }, { "change_type": "MODIFY", "diff": "@@ -17,6 +17,7 @@\n from __future__ import annotations\n from pathlib import Path\n import argparse\n+import ctypes\n import enum\n import sys\n import stat\n@@ -141,6 +142,7 @@ __all__ = [\n 'version_compare_condition_with_min',\n 'version_compare_many',\n 'search_version',\n+ 'windows_detect_native_arch',\n 'windows_proof_rm',\n 'windows_proof_rmtree',\n ]\n@@ -689,6 +691,41 @@ def darwin_get_object_archs(objpath: str) -> 'ImmutableListProtocol[str]':\n stdo += ' arm'\n return stdo.split()\n \n+def windows_detect_native_arch() -> str:\n+ \"\"\"\n+ The architecture of Windows itself: x86, amd64 or arm64\n+ \"\"\"\n+ if sys.platform != 'win32':\n+ return ''\n+ try:\n+ process_arch = ctypes.c_ushort()\n+ native_arch = ctypes.c_ushort()\n+ kernel32 = ctypes.windll.kernel32\n+ process = ctypes.c_void_p(kernel32.GetCurrentProcess())\n+ # This is the only reliable way to detect an arm system if we are an x86/x64 process being emulated\n+ if kernel32.IsWow64Process2(process, ctypes.byref(process_arch), ctypes.byref(native_arch)):\n+ # https://docs.microsoft.com/en-us/windows/win32/sysinfo/image-file-machine-constants\n+ if native_arch.value == 0x8664:\n+ return 'amd64'\n+ elif native_arch.value == 0x014C:\n+ return 'x86'\n+ elif native_arch.value == 0xAA64:\n+ return 'arm64'\n+ elif native_arch.value == 0x01C4:\n+ return 'arm'\n+ except (OSError, AttributeError):\n+ pass\n+ # These env variables are always available. See:\n+ # https://msdn.microsoft.com/en-us/library/aa384274(VS.85).aspx\n+ # https://blogs.msdn.microsoft.com/david.wang/2006/03/27/howto-detect-process-bitness/\n+ arch = os.environ.get('PROCESSOR_ARCHITEW6432', '').lower()\n+ if not arch:\n+ try:\n+ # If this doesn't exist, something is messing with the environment\n+ arch = os.environ['PROCESSOR_ARCHITECTURE'].lower()\n+ except KeyError:\n+ raise EnvironmentException('Unable to detect native OS architecture')\n+ return arch\n \n def detect_vcs(source_dir: T.Union[str, Path]) -> T.Optional[T.Dict[str, str]]:\n vcs_systems = [\n", "new_path": "mesonbuild/utils/universal.py", "old_path": "mesonbuild/utils/universal.py" }, { "change_type": "MODIFY", "diff": "@@ -2,12 +2,11 @@ import os\n import subprocess\n import json\n import pathlib\n-import platform\n import shutil\n import tempfile\n \n from .. import mlog\n-from .universal import MesonException, is_windows\n+from .universal import MesonException, is_windows, windows_detect_native_arch\n \n \n __all__ = [\n@@ -72,8 +71,10 @@ def _setup_vsenv(force: bool) -> bool:\n # VS installer instelled but not VS itself maybe?\n raise MesonException('Could not parse vswhere.exe output')\n bat_root = pathlib.Path(bat_info[0]['installationPath'])\n- if platform.machine() == 'ARM64':\n- bat_path = bat_root / 'VC/Auxiliary/Build/vcvarsx86_arm64.bat'\n+ if windows_detect_native_arch() == 'arm64':\n+ bat_path = bat_root / 'VC/Auxiliary/Build/vcvarsarm64.bat'\n+ if not bat_path.exists():\n+ bat_path = bat_root / 'VC/Auxiliary/Build/vcvarsx86_arm64.bat'\n else:\n bat_path = bat_root / 'VC/Auxiliary/Build/vcvars64.bat'\n # if VS is not found try VS Express\n", "new_path": "mesonbuild/utils/vsenv.py", "old_path": "mesonbuild/utils/vsenv.py" } ]
e9036760713718bbfd3d7db9f3dbc03576924e52
lmcinnes/umap
null
null
Add support for saving embeddings at intermediate epochs Returns a list of intermediate embeddings in UMAP.embedding_list
[ { "change_type": "MODIFY", "diff": "@@ -252,8 +252,12 @@ def optimize_layout_euclidean(\n The indices of the heads of 1-simplices with non-zero membership.\n tail: array of shape (n_1_simplices)\n The indices of the tails of 1-simplices with non-zero membership.\n- n_epochs: int\n- The number of training epochs to use in optimization.\n+ n_epochs: int, or list of int\n+ The number of training epochs to use in optimization, or a list of\n+ epochs at which to save the embedding. In case of a list, the optimization\n+ will use the maximum number of epochs in the list, and will return a list\n+ of embedding in the order of increasing epoch, regardless of the order in\n+ the epoch list.\n n_vertices: int\n The number of vertices (0-simplices) in the dataset.\n epochs_per_samples: array of shape (n_1_simplices)\n@@ -322,6 +326,12 @@ def optimize_layout_euclidean(\n dens_phi_sum = np.zeros(1, dtype=np.float32)\n dens_re_sum = np.zeros(1, dtype=np.float32)\n \n+ epochs_list = None\n+ embedding_list = []\n+ if isinstance(n_epochs, list):\n+ epochs_list = n_epochs\n+ n_epochs = max(epochs_list)\n+\n for n in range(n_epochs):\n \n densmap_flag = (\n@@ -385,7 +395,14 @@ def optimize_layout_euclidean(\n if verbose and n % int(n_epochs / 10) == 0:\n print(\"\\tcompleted \", n, \" / \", n_epochs, \"epochs\")\n \n- return head_embedding\n+ if epochs_list is not None and n in epochs_list:\n+ embedding_list.append(head_embedding.copy())\n+\n+ # Add the last embedding to the list as well\n+ if epochs_list is not None:\n+ embedding_list.append(head_embedding.copy())\n+\n+ return head_embedding if epochs_list is None else embedding_list\n \n \n @numba.njit(fastmath=True)\n", "new_path": "umap/layouts.py", "old_path": "umap/layouts.py" }, { "change_type": "MODIFY", "diff": "@@ -978,11 +978,14 @@ def simplicial_set_embedding(\n in greater repulsive force being applied, greater optimization\n cost, but slightly more accuracy.\n \n- n_epochs: int (optional, default 0)\n+ n_epochs: int (optional, default 0), or list of int\n The number of training epochs to be used in optimizing the\n low dimensional embedding. Larger values result in more accurate\n embeddings. If 0 is specified a value will be selected based on\n the size of the input dataset (200 for large datasets, 500 for small).\n+ If a list of int is specified, then the intermediate embeddings at the\n+ different epochs specified in that list are returned in\n+ ``aux_data[\"embedding_list\"]``.\n \n init: string\n How to initialize the low dimensional embedding. Options are:\n@@ -1054,11 +1057,14 @@ def simplicial_set_embedding(\n if densmap:\n default_epochs += 200\n \n- if n_epochs is None:\n- n_epochs = default_epochs\n+ # Get the maximum epoch to reach\n+ n_epochs_max = max(n_epochs) if isinstance(n_epochs, list) else n_epochs\n \n- if n_epochs > 10:\n- graph.data[graph.data < (graph.data.max() / float(n_epochs))] = 0.0\n+ if n_epochs_max is None:\n+ n_epochs_max = default_epochs\n+\n+ if n_epochs_max > 10:\n+ graph.data[graph.data < (graph.data.max() / float(n_epochs_max))] = 0.0\n else:\n graph.data[graph.data < (graph.data.max() / float(default_epochs))] = 0.0\n \n@@ -1099,7 +1105,7 @@ def simplicial_set_embedding(\n else:\n embedding = init_data\n \n- epochs_per_sample = make_epochs_per_sample(graph.data, n_epochs)\n+ epochs_per_sample = make_epochs_per_sample(graph.data, n_epochs_max)\n \n head = graph.row\n tail = graph.col\n@@ -1188,6 +1194,11 @@ def simplicial_set_embedding(\n verbose=verbose,\n move_other=True,\n )\n+\n+ if isinstance(embedding, list):\n+ aux_data[\"embedding_list\"] = embedding\n+ embedding = embedding[-1].copy()\n+\n if output_dens:\n if verbose:\n print(ts() + \" Computing embedding densities\")\n@@ -1707,10 +1718,16 @@ class UMAP(BaseEstimator):\n raise ValueError(\"n_components must be an int\")\n if self.n_components < 1:\n raise ValueError(\"n_components must be greater than 0\")\n+ self.n_epochs_list = None\n+ if isinstance(self.n_epochs, list):\n+ if not all(isinstance(n, int) and n >= 0 for n in self.n_epochs):\n+ raise ValueError(\"n_epochs must be a nonnegative integer or a list of nonnegative integers\")\n+ self.n_epochs_list = self.n_epochs\n+ self.n_epochs = max(self.n_epochs_list)\n if self.n_epochs is not None and (\n- self.n_epochs < 0 or not isinstance(self.n_epochs, int)\n- ):\n- raise ValueError(\"n_epochs must be a nonnegative integer\")\n+ self.n_epochs < 0 or not isinstance(self.n_epochs, int)\n+ ):\n+ raise ValueError(\"n_epochs must be a nonnegative integer or a list of nonnegative integers\")\n if self.metric_kwds is None:\n self._metric_kwds = {}\n else:\n@@ -2577,12 +2594,21 @@ class UMAP(BaseEstimator):\n print(ts(), \"Construct embedding\")\n \n if self.transform_mode == \"embedding\":\n+ epochs = self.n_epochs_list if self.n_epochs_list is not None else self.n_epochs\n self.embedding_, aux_data = self._fit_embed_data(\n self._raw_data[index],\n- self.n_epochs,\n+ epochs,\n init,\n random_state, # JH why raw data?\n )\n+\n+ if self.n_epochs_list is not None:\n+ if not \"embedding_list\" in aux_data:\n+ raise KeyError(\"No list of embedding were found in 'aux_data'. It is likely the\"\n+ \"layout optimization function doesn't support the list of int for 'n_epochs'.\")\n+ else:\n+ self.embedding_list = aux_data[\"embedding_list\"]\n+\n # Assign any points that are fully disconnected from our manifold(s) to have embedding\n # coordinates of np.nan. These will be filtered by our plotting functions automatically.\n # They also prevent users from being deceived a distance query to one of these points.\n", "new_path": "umap/umap_.py", "old_path": "umap/umap_.py" } ]
5c20bf11a02c24e8caebf955706e21f278544bc7
dguenms/dawn-of-civilization
null
null
Adjust UHV goals to new wonders second French goal now requires the Louvre and the Metropolitain instead of the Statue of Liberty second Mughal goal now requires Shalimar Gardens instead of Harmandir Sahib second American goal now also requires Brooklyn Bridge and Golden Gate Bridge
[ { "change_type": "MODIFY", "diff": "@@ -213,11 +213,11 @@ dWonderGoals = {\n \tiMaya: (1, [iTempleOfKukulkan], True),\r\n \tiMoors: (1, [iMezquita], False),\r\n \tiKhmer: (0, [iWatPreahPisnulok], False),\r\n-\tiFrance: (2, [iNotreDame, iVersailles, iStatueOfLiberty, iEiffelTower], True),\r\n+\tiFrance: (2, [iNotreDame, iVersailles, iLouvre, iEiffelTower, iMetropolitain], True),\r\n \tiMali: (1, [iUniversityOfSankore], False),\r\n \tiItaly: (0, [iSanMarcoBasilica, iSistineChapel, iSantaMariaDelFiore], True),\r\n-\tiMughals: (1, [iTajMahal, iRedFort, iHarmandirSahib], True),\r\n-\tiAmerica: (1, [iStatueOfLiberty, iEmpireStateBuilding, iPentagon, iUnitedNations], True),\r\n+\tiMughals: (1, [iTajMahal, iRedFort, iShalimarGardens], True),\r\n+\tiAmerica: (1, [iStatueOfLiberty, iBrooklynBridge, iEmpireStateBuilding, iGoldenGateBridge, iPentagon, iUnitedNations], True),\r\n \tiBrazil: (1, [iWembley, iCristoRedentor, iItaipuDam], True),\r\n }\r\n \r\n@@ -241,7 +241,8 @@ def setup():\n \t\t\n \t\t# French goal needs to be winnable\n \t\tdata.setWonderBuilder(iNotreDame, iFrance)\n-\t\tdata.setWonderBuilder(iVersailles, iFrance)\n+\t\tdata.setWonderBuilder(iVersailles, iFrance)\r\n+\t\tdata.setWonderBuilder(iLouvre, iFrance)\n \t\t\n \t\t# help Congo\n \t\tdata.iCongoSlaveCounter += 500\n@@ -930,7 +931,7 @@ def checkTurn(iGameTurn, iPlayer):\n \t\t\telse:\r\n \t\t\t\tlose(iFrance, 1)\r\n \t\t\t\t\r\n-\t\t# third goal: build Notre Dame, Versailles, the Statue of Liberty and the Eiffel Tower by 1900 AD\r\n+\t\t# third goal: build Notre Dame, Versailles, the Louvre, the Eiffel Tower and the Metropolitain by 1900 AD\r\n \t\tif iGameTurn == getTurnForYear(1900):\r\n \t\t\texpire(iFrance, 2)\r\n \t\t\t\r\n@@ -1177,7 +1178,7 @@ def checkTurn(iGameTurn, iPlayer):\n \t\tif iGameTurn == getTurnForYear(1500):\r\n \t\t\texpire(iMughals, 0)\r\n \t\t\t\r\n-\t\t# second goal: build the Red Fort, Harmandir Sahib and the Taj Mahal by 1660 AD\r\n+\t\t# second goal: build the Red Fort, Shalimar Gardens and the Taj Mahal by 1660 AD\r\n \t\tif iGameTurn == getTurnForYear(1660):\r\n \t\t\texpire(iMughals, 1)\r\n \t\t\t\r\n@@ -1360,7 +1361,7 @@ def checkTurn(iGameTurn, iPlayer):\n \t\t\telse:\r\n \t\t\t\tlose(iAmerica, 0)\r\n \t\t\t\t\r\n-\t\t# second goal: build the Statue of Liberty, the Empire State Building, the Pentagon and the United Nations by 1950 AD\r\n+\t\t# second goal: build the Statue of Liberty, the Brooklyn Bridge, the Empire State Building, the Golden Gate Bridge, the Pentagon and the United Nations by 1950 AD\r\n \t\tif iGameTurn == getTurnForYear(1950):\r\n \t\t\texpire(iAmerica, 1)\r\n \t\t\t\r\n@@ -3730,9 +3731,11 @@ def getUHVHelp(iPlayer, iGoal):\n \t\telif iGoal == 2:\r\n \t\t\tbNotreDame = data.getWonderBuilder(iNotreDame) == iFrance\r\n \t\t\tbVersailles = data.getWonderBuilder(iVersailles) == iFrance\r\n-\t\t\tbStatueOfLiberty = data.getWonderBuilder(iStatueOfLiberty) == iFrance\r\n+\t\t\tbLouvre = data.getWonderBuilder(iLouvre) == iFrance\r\n \t\t\tbEiffelTower = data.getWonderBuilder(iEiffelTower) == iFrance\r\n-\t\t\taHelp.append(getIcon(bNotreDame) + localText.getText(\"TXT_KEY_BUILDING_NOTRE_DAME\", ()) + ' ' + getIcon(bVersailles) + localText.getText(\"TXT_KEY_BUILDING_VERSAILLES\", ()) + ' ' + getIcon(bStatueOfLiberty) + localText.getText(\"TXT_KEY_BUILDING_STATUE_OF_LIBERTY\", ()) + ' ' + getIcon(bEiffelTower) + localText.getText(\"TXT_KEY_BUILDING_EIFFEL_TOWER\", ()))\r\n+\t\t\tbMetropolitain = data.getWonderBuilder(iMetropolitain) == iFrance\r\n+\t\t\taHelp.append(getIcon(bNotreDame) + localText.getText(\"TXT_KEY_BUILDING_NOTRE_DAME\", ()) + ' ' + getIcon(bVersailles) + localText.getText(\"TXT_KEY_BUILDING_VERSAILLES\", ()) + ' ' + getIcon(bLouvre) + localText.getText(\"TXT_KEY_BUILDING_LOUVRE\", ()))\r\n+\t\t\taHelp.append(getIcon(bEiffelTower) + localText.getText(\"TXT_KEY_BUILDING_EIFFEL_TOWER\", ()) + ' ' + getIcon(bMetropolitain) + localText.getText(\"TXT_KEY_BUILDING_METROPOLITAIN\", ()))\r\n \r\n \telif iPlayer == iKhmer:\r\n \t\tif iGoal == 0:\r\n@@ -3881,9 +3884,9 @@ def getUHVHelp(iPlayer, iGoal):\n \t\t\taHelp.append(getIcon(iNumMosques >= 3) + localText.getText(\"TXT_KEY_VICTORY_MOSQUES_BUILT\", (iNumMosques, 3)))\r\n \t\telif iGoal == 1:\r\n \t\t\tbRedFort = data.getWonderBuilder(iRedFort) == iMughals\r\n-\t\t\tbHarmandirSahib = data.getWonderBuilder(iHarmandirSahib) == iMughals\r\n+\t\t\tbShalimarGardens = data.getWonderBuilder(iShalimarGardens) == iMughals\r\n \t\t\tbTajMahal = data.getWonderBuilder(iTajMahal) == iMughals\r\n-\t\t\taHelp.append(getIcon(bRedFort) + localText.getText(\"TXT_KEY_BUILDING_RED_FORT\", ()) + ' ' + getIcon(bHarmandirSahib) + localText.getText(\"TXT_KEY_BUILDING_HARMANDIR_SAHIB\", ()) + ' ' + getIcon(bTajMahal) + localText.getText(\"TXT_KEY_BUILDING_TAJ_MAHAL\", ()))\r\n+\t\t\taHelp.append(getIcon(bRedFort) + localText.getText(\"TXT_KEY_BUILDING_RED_FORT\", ()) + ' ' + getIcon(bShalimarGardens) + localText.getText(\"TXT_KEY_BUILDING_SHALIMAR_GARDENS\", ()) + ' ' + getIcon(bTajMahal) + localText.getText(\"TXT_KEY_BUILDING_TAJ_MAHAL\", ()))\r\n \t\telif iGoal == 2:\r\n \t\t\tiCulture = pMughals.countTotalCulture()\r\n \t\t\taHelp.append(getIcon(iCulture >= utils.getTurns(50000)) + localText.getText(\"TXT_KEY_VICTORY_TOTAL_CULTURE\", (iCulture, utils.getTurns(50000))))\r\n@@ -3996,10 +3999,13 @@ def getUHVHelp(iPlayer, iGoal):\n \t\t\taHelp.append(getIcon(bAmericas) + localText.getText(\"TXT_KEY_VICTORY_NO_NORTH_AMERICAN_COLONIES\", ()) + ' ' + getIcon(bMexico) + localText.getText(\"TXT_KEY_CIV_MEXICO_SHORT_DESC\", ()))\r\n \t\telif iGoal == 1:\r\n \t\t\tbUnitedNations = data.getWonderBuilder(iUnitedNations) == iAmerica\r\n+\t\t\tbBrooklynBridge = data.getWonderBuilder(iBrooklynBridge) == iAmerica\r\n \t\t\tbStatueOfLiberty = data.getWonderBuilder(iStatueOfLiberty) == iAmerica\r\n+\t\t\tbGoldenGateBridge = data.getWonderBuilder(iGoldenGateBridge) == iAmerica\r\n \t\t\tbPentagon = data.getWonderBuilder(iPentagon) == iAmerica\r\n \t\t\tbEmpireState = data.getWonderBuilder(iEmpireStateBuilding) == iAmerica\r\n-\t\t\taHelp.append(getIcon(bStatueOfLiberty) + localText.getText(\"TXT_KEY_BUILDING_STATUE_OF_LIBERTY\", ()) + ' ' + getIcon(bEmpireState) + localText.getText(\"TXT_KEY_BUILDING_EMPIRE_STATE_BUILDING\", ()) + ' ' + getIcon(bPentagon) + localText.getText(\"TXT_KEY_BUILDING_PENTAGON\", ()) + ' ' + getIcon(bUnitedNations) + localText.getText(\"TXT_KEY_BUILDING_UNITED_NATIONS\", ()))\r\n+\t\t\taHelp.append(getIcon(bStatueOfLiberty) + localText.getText(\"TXT_KEY_BUILDING_STATUE_OF_LIBERTY\", ()) + ' ' + getIcon(bBrooklynBridge) + localText.getText(\"TXT_KEY_BUILDING_BROOKLYN_BRIDGE\", ()) + ' ' + getIcon(bEmpireState) + localText.getText(\"TXT_KEY_BUILDING_EMPIRE_STATE_BUILDING\", ()))\r\n+\t\t\taHelp.append(getIcon(bGoldenGateBridge) + localText.getText(\"TXT_KEY_BUILDING_GOLDEN_GATE_BRIDGE\", ()) + ' ' + getIcon(bPentagon) + localText.getText(\"TXT_KEY_BUILDING_PENTAGON\", ()) + ' ' + getIcon(bUnitedNations) + localText.getText(\"TXT_KEY_BUILDING_UNITED_NATIONS\", ()))\r\n \t\telif iGoal == 2:\r\n \t\t\tiCounter = countResources(iAmerica, iOil)\r\n \t\t\taHelp.append(getIcon(iCounter >= 10) + localText.getText(\"TXT_KEY_VICTORY_OIL_SECURED\", (iCounter, 10)))\r\n", "new_path": "Assets/Python/Victory.py", "old_path": "Assets/Python/Victory.py" }, { "change_type": "MODIFY", "diff": "@@ -51,11 +51,11 @@\n \t</TEXT>\n \t<TEXT>\n \t\t<Tag>TXT_KEY_UHV_AME2</Tag>\n-\t\t<English>Build the Statue of Liberty, the Empire State Building, the Pentagon and the United Nations by 1950 AD</English>\n-\t\t<French>Construire la Statue de la Libert&#233;, l'Empire State Building, le Pentagone et les Nations Unies avant 1950 ap. J.-C.</French>\n-\t\t<German>Errichten Sie bis zum Jahr 1950 n. Chr. die Freiheitsstatue, das Empire State Building, das Pentagon und the Vereinten Nationen</German>\n-\t\t<Italian>Build the Statue of Liberty, the Empire State Building, the Pentagon and the United Nations by 1950 AD</Italian>\n-\t\t<Spanish>Build the Statue of Liberty, the Empire State Building, the Pentagon and the United Nations by 1950 AD</Spanish>\n+\t\t<English>Build the Statue of Liberty, the Brooklyn Bridge, the Empire State Building, the Golden Gate Bridge, the Pentagon and the United Nations by 1950 AD</English>\n+\t\t<French>Build the Statue of Liberty, the Brooklyn Bridge, the Empire State Building, the Golden Gate Bridge, the Pentagon and the United Nations by 1950 AD</French>\n+\t\t<German>Errichten Sie bis zum Jahr 1950 n. Chr. die Freiheitsstatue, die Brooklyn Bridge, das Empire State Building, die Golden Gate Bridge, das Pentagon und the Vereinten Nationen</German>\n+\t\t<Italian>Build the Statue of Liberty, the Brooklyn Bridge, the Empire State Building, the Golden Gate Bridge, the Pentagon and the United Nations by 1950 AD</Italian>\n+\t\t<Spanish>Build the Statue of Liberty, the Brooklyn Bridge, the Empire State Building, the Golden Gate Bridge, the Pentagon and the United Nations by 1950 AD</Spanish>\n \t</TEXT>\n \t<TEXT>\n \t\t<Tag>TXT_KEY_UHV_AME3_TITLE</Tag>\n@@ -963,9 +963,9 @@\n \t</TEXT>\n \t<TEXT>\n \t\t<Tag>TXT_KEY_UHV_FRA3</Tag>\n-\t\t<English>Build Notre Dame, Versailles, the Statue of Liberty and the Eiffel Tower by 1900 AD</English>\n-\t\t<French>Construire Notre-Dame, Versailles, la Statue de la Libert&#233; et la Tour Eiffel avant 1900 ap. J.-C.</French>\n-\t\t<German>Bauen Sie bis zum Jahr 1900 n. Chr. Notre Dame, Versailles, die Freiheitsstatue und den Eiffelturm</German>\n+\t\t<English>Build Notre Dame, Versailles, the Louvre, the Eiffel Tower and the Metropolitain by 1900 AD</English>\n+\t\t<French>Build Notre Dame, Versailles, the Louvre, the Eiffel Tower and the Metropolitain by 1900 AD</French>\n+\t\t<German>Bauen Sie bis zum Jahr 1900 n. Chr. Notre Dame, Versailles, den Louvre, den Eiffelturm und die Metropolitain</German>\n \t\t<Italian>Build Notre Dame, Versailles, the Statue of Liberty and the Eiffel Tower by 1900 AD</Italian>\n \t\t<Spanish>Build Notre Dame, Versailles, the Statue of Liberty and the Eiffel Tower by 1900 AD</Spanish>\n \t</TEXT>\n@@ -2099,11 +2099,11 @@\n \t</TEXT>\n \t<TEXT>\n \t\t<Tag>TXT_KEY_UHV_MUG2</Tag>\n-\t\t<English>Build the Red Fort, Harmandir Sahib and the Taj Mahal by 1660 AD</English>\n-\t\t<French>Construire le Fort rouge, le Harmandir Sahib et le Taj Mahal avant 1660 ap. J.-C.</French>\n-\t\t<German>Vollenden Sie bis zum Jahr 1660 n. Chr. das Rote Fort, Harmandir Sahib und den Taj Mahal.</German>\n-\t\t<Italian>Build the Red Fort, Harmandir Sahib and the Taj Mahal by 1660 AD</Italian>\n-\t\t<Spanish>Build the Red Fort, Harmandir Sahib and the Taj Mahal by 1660 AD</Spanish>\n+\t\t<English>Build the Red Fort, Shalimar Gardens and the Taj Mahal by 1660 AD</English>\n+\t\t<French>Build the Red Fort, Shalimar Gardens and the Taj Mahal by 1660 AD</French>\n+\t\t<German>Vollenden Sie bis zum Jahr 1660 n. Chr. das Rote Fort, die Shalimar-G&#228;rten und den Taj Mahal.</German>\n+\t\t<Italian>Build the Red Fort, Shalimar Gardens and the Taj Mahal by 1660 AD</Italian>\n+\t\t<Spanish>Build the Red Fort, Shalimar Gardens and the Taj Mahal by 1660 AD</Spanish>\n \t</TEXT>\n \t<TEXT>\n \t\t<Tag>TXT_KEY_UHV_MUG3_TITLE</Tag>\n", "new_path": "Assets/XML/Text/Victory.xml", "old_path": "Assets/XML/Text/Victory.xml" } ]
a496b39ce3c6b34ba679be04dd07cd2614ef88a3
apache/libcloud
null
null
Add new "ex_get_volumes_for_node" and "ex_get_ips_for_node" method to the Gridscale driver and support for destroying / deleting associated resources when destroying a node.
[ { "change_type": "MODIFY", "diff": "@@ -39,12 +39,13 @@ class GridscaleIp(object):\n :type create_time: ``str``\n \"\"\"\n \n- def __init__(self, id, family, prefix, create_time, address):\n+ def __init__(self, id, family, prefix, create_time, address, extra=None):\n self.id = id\n self.family = family\n self.prefix = prefix\n self.create_time = create_time\n self.ip_address = address\n+ self.extra = extra or {}\n \n def __repr__(self):\n return ('Ip: id={}, family={}, prefix={}, create_time={},Ip_address={}'\n@@ -438,19 +439,37 @@ class GridscaleNodeDriver(GridscaleBaseDriver, NodeDriver):\n 'templates', self.connection.poll_response_initial.object[\n 'object_uuid']))\n \n- def destroy_node(self, node):\n+ def destroy_node(self, node, ex_destroy_associated_resources=False):\n \"\"\"\n Destroy node.\n \n :param node: Node object.\n :type node: :class:`.Node`\n \n+ :param ex_destroy_associated_resources: True to destroy associated\n+ resources such as storage volumes and IPs.\n+ :type ex_destroy_associated_resources: ``bool``\n+\n :return: True if the destroy was successful, otherwise False.\n :rtype: ``bool``\n \"\"\"\n+ if ex_destroy_associated_resources:\n+ associated_volumes = self.ex_get_volumes_for_node(node=node)\n+ associated_ips = self.ex_get_ips_for_node(node=node)\n+\n+ # 1. Delete the server itself\n result = self._sync_request(endpoint='objects/servers/{}'\n .format(node.id),\n method='DELETE')\n+\n+ # 2. Destroy associated resouces (if requested)\n+ if ex_destroy_associated_resources:\n+ for volume in associated_volumes:\n+ self.destroy_volume(volume=volume)\n+\n+ for ip in associated_ips:\n+ self.ex_destroy_ip(ip=ip)\n+\n return result.status == 204\n \n def destroy_volume(self, volume):\n@@ -837,6 +856,44 @@ class GridscaleNodeDriver(GridscaleBaseDriver, NodeDriver):\n method='PATCH')\n return result\n \n+ def ex_get_volumes_for_node(self, node):\n+ \"\"\"\n+ Return a list of associated volumes for the provided node.\n+\n+ :rtype: ``list`` of :class:`StorageVolume`\n+ \"\"\"\n+ volumes = self.list_volumes()\n+\n+ result = []\n+ for volume in volumes:\n+ related_servers = volume.extra.get('relations', {}) \\\n+ .get('servers', [])\n+ for server in related_servers:\n+ if server['object_uuid'] == node.id:\n+ result.append(volume)\n+\n+ return result\n+\n+ def ex_get_ips_for_node(self, node):\n+ \"\"\"\n+ Return a list of associated IPs for the provided node.\n+\n+ :rype: ``list`` of :class:`GridscaleIp`\n+ \"\"\"\n+ ips = self.ex_list_ips()\n+\n+ result = []\n+ for ip in ips:\n+ related_servers = ip.extra.get('relations', {}) \\\n+ .get('servers', [])\n+ for server in related_servers:\n+ # TODO: This is not consistent with volumes where key is\n+ # called \"object_uuid\"\n+ if server['server_uuid'] == node.id:\n+ result.append(ip)\n+\n+ return result\n+\n def _to_node(self, data):\n extra_keys = ['cores', 'power', 'memory', 'current_price', 'relations']\n \n@@ -862,7 +919,6 @@ class GridscaleNodeDriver(GridscaleBaseDriver, NodeDriver):\n return node\n \n def _to_volume(self, data):\n-\n extra_keys = ['create_time', 'current_price', 'storage_type',\n 'relations']\n \n@@ -898,10 +954,15 @@ class GridscaleNodeDriver(GridscaleBaseDriver, NodeDriver):\n return location\n \n def _to_ip(self, data):\n+ extra_keys = ['create_time', 'current_price', 'name',\n+ 'relations', 'reverse_dns', 'status']\n+ extra = self._extract_values_to_dict(data=data, keys=extra_keys)\n+\n ip = GridscaleIp(id=data['object_uuid'], family=data['family'],\n prefix=data['prefix'],\n create_time=data['create_time'],\n- address=data['ip'])\n+ address=data['ip'],\n+ extra=extra)\n \n return ip\n \n", "new_path": "libcloud/compute/drivers/gridscale.py", "old_path": "libcloud/compute/drivers/gridscale.py" } ]
86edc251a6f1252bff2a34af34451e231ad87218
apache/libcloud
null
null
Update S3 storage driver so it suppots "region" constructor argument. This way user can use this constructor class argument instead of using a different driver class per region. Also update code to return more user friendly error message if moved permanently error is returned by the API.
[ { "change_type": "MODIFY", "diff": "@@ -71,6 +71,28 @@ S3_SA_EAST_HOST = 's3-sa-east-1.amazonaws.com'\n S3_SA_SOUTHEAST2_HOST = 's3-sa-east-2.amazonaws.com'\n S3_CA_CENTRAL_HOST = 's3-ca-central-1.amazonaws.com'\n \n+# Maps AWS region name to connection hostname\n+REGION_TO_HOST_MAP = {\n+ 'us-east-1': S3_US_STANDARD_HOST,\n+ 'us-east-2': S3_US_EAST2_HOST,\n+ 'us-west-1': S3_US_WEST_HOST,\n+ 'us-west-2': S3_US_WEST_OREGON_HOST,\n+ 'us-gov-west-1': S3_US_GOV_WEST_HOST,\n+ 'cn-north-1': S3_CN_NORTH_HOST,\n+ 'cn-northwest-1': S3_CN_NORTHWEST_HOST,\n+ 'eu-west-1': S3_EU_WEST_HOST,\n+ 'eu-west-2': S3_EU_WEST2_HOST,\n+ 'eu-central-1': S3_EU_CENTRAL_HOST,\n+ 'ap-south-1': S3_AP_SOUTH_HOST,\n+ 'ap-southeast-1': S3_AP_SOUTHEAST_HOST,\n+ 'ap-southeast-2': S3_AP_SOUTHEAST2_HOST,\n+ 'ap-northeast-1': S3_AP_NORTHEAST1_HOST,\n+ 'ap-northeast-2': S3_AP_NORTHEAST2_HOST,\n+ 'sa-east-1': S3_SA_EAST_HOST,\n+ 'sa-east-2': S3_SA_SOUTHEAST2_HOST,\n+ 'ca-central-1': S3_CA_CENTRAL_HOST\n+}\n+\n API_VERSION = '2006-03-01'\n NAMESPACE = 'http://s3.amazonaws.com/doc/%s/' % (API_VERSION)\n \n@@ -95,8 +117,12 @@ class S3Response(AWSBaseResponse):\n if self.status in [httplib.UNAUTHORIZED, httplib.FORBIDDEN]:\n raise InvalidCredsError(self.body)\n elif self.status == httplib.MOVED_PERMANENTLY:\n- raise LibcloudError('This bucket is located in a different ' +\n- 'region. Please use the correct driver.',\n+ bucket_region = self.headers.get('x-amz-bucket-region', None)\n+ used_region = self.connection.driver.region\n+ raise LibcloudError('This bucket is located in a different '\n+ 'region. Please use the correct driver.'\n+ 'Bucket region \"%s\", used region \"%s\"' %\n+ (bucket_region, used_region),\n driver=S3StorageDriver)\n raise LibcloudError('Unknown error. Status code: %d' % (self.status),\n driver=S3StorageDriver)\n@@ -1001,10 +1027,34 @@ class BaseS3StorageDriver(StorageDriver):\n \n \n class S3StorageDriver(AWSDriver, BaseS3StorageDriver):\n- name = 'Amazon S3 (us-east-1)'\n+ name = 'Amazon S3'\n connectionCls = S3SignatureV4Connection\n region_name = 'us-east-1'\n \n+ def __init__(self, key, secret=None, secure=True, host=None, port=None,\n+ region=None, token=None, **kwargs):\n+ # Here for backward compatibility for old and deprecated driver class per region\n+ # approach\n+ if hasattr(self, 'region_name') and not region:\n+ region = self.region_name # pylint: disable=no-member\n+\n+ self.region_name = region\n+\n+ if region and region not in REGION_TO_HOST_MAP.keys():\n+ raise ValueError('Invalid or unsupported region: %s' % (region))\n+\n+ self.name = 'Amazon S3 (%s)' % (region)\n+\n+ host = REGION_TO_HOST_MAP[region]\n+ super(S3StorageDriver, self).__init__(key=key, secret=secret,\n+ secure=secure, host=host,\n+ port=port,\n+ region=region, token=token,**kwargs)\n+\n+ @classmethod\n+ def list_regions(self):\n+ return REGION_TO_HOST_MAP.keys()\n+\n \n class S3USEast2Connection(S3SignatureV4Connection):\n host = S3_US_EAST2_HOST\n", "new_path": "libcloud/storage/drivers/s3.py", "old_path": "libcloud/storage/drivers/s3.py" } ]
21a78a17929f0633817c337208ab2a21dc0639f9
apache/libcloud
null
null
Update setup.py so it doesn't rely on any functions from libcloud packages. This way we avoid having setup.py depend on typing, requests and other modules libcloud depends on.
[ { "change_type": "MODIFY", "diff": "@@ -12,12 +12,14 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n+\n import os\n import sys\n+import re\n+import fnmatch\n \n from setuptools import setup\n from distutils.core import Command\n-from os.path import join as pjoin\n \n try:\n import epydoc # NOQA\n@@ -25,11 +27,127 @@ try:\n except ImportError:\n has_epydoc = False\n \n+# NOTE: Those functions are intentionally moved in-line to prevent setup.py dependening on any\n+# Libcloud code which depends on libraries such as typing, enum, requests, etc.\n+# START: Taken From Twisted Python which licensed under MIT license\n+# https://github.com/powdahound/twisted/blob/master/twisted/python/dist.py\n+# https://github.com/powdahound/twisted/blob/master/LICENSE\n+\n+# Names that are excluded from globbing results:\n+EXCLUDE_NAMES = ['{arch}', 'CVS', '.cvsignore', '_darcs',\n+ 'RCS', 'SCCS', '.svn']\n+EXCLUDE_PATTERNS = ['*.py[cdo]', '*.s[ol]', '.#*', '*~', '*.py']\n+\n+\n+def _filter_names(names):\n+ \"\"\"\n+ Given a list of file names, return those names that should be copied.\n+ \"\"\"\n+ names = [n for n in names\n+ if n not in EXCLUDE_NAMES]\n+ # This is needed when building a distro from a working\n+ # copy (likely a checkout) rather than a pristine export:\n+ for pattern in EXCLUDE_PATTERNS:\n+ names = [n for n in names\n+ if not fnmatch.fnmatch(n, pattern) and not n.endswith('.py')]\n+ return names\n+\n+\n+def relative_to(base, relativee):\n+ \"\"\"\n+ Gets 'relativee' relative to 'basepath'.\n+\n+ i.e.,\n+\n+ >>> relative_to('/home/', '/home/radix/')\n+ 'radix'\n+ >>> relative_to('.', '/home/radix/Projects/Twisted')\n+ 'Projects/Twisted'\n+\n+ The 'relativee' must be a child of 'basepath'.\n+ \"\"\"\n+ basepath = os.path.abspath(base)\n+ relativee = os.path.abspath(relativee)\n+ if relativee.startswith(basepath):\n+ relative = relativee[len(basepath):]\n+ if relative.startswith(os.sep):\n+ relative = relative[1:]\n+ return os.path.join(base, relative)\n+ raise ValueError(\"%s is not a subpath of %s\" % (relativee, basepath))\n+\n+\n+def get_packages(dname, pkgname=None, results=None, ignore=None, parent=None):\n+ \"\"\"\n+ Get all packages which are under dname. This is necessary for\n+ Python 2.2's distutils. Pretty similar arguments to getDataFiles,\n+ including 'parent'.\n+ \"\"\"\n+ parent = parent or \"\"\n+ prefix = []\n+ if parent:\n+ prefix = [parent]\n+ bname = os.path.basename(dname)\n+ ignore = ignore or []\n+ if bname in ignore:\n+ return []\n+ if results is None:\n+ results = []\n+ if pkgname is None:\n+ pkgname = []\n+ subfiles = os.listdir(dname)\n+ abssubfiles = [os.path.join(dname, x) for x in subfiles]\n \n-import libcloud.utils # NOQA\n-from libcloud.utils.dist import get_packages, get_data_files # NOQA\n+ if '__init__.py' in subfiles:\n+ results.append(prefix + pkgname + [bname])\n+ for subdir in filter(os.path.isdir, abssubfiles):\n+ get_packages(subdir, pkgname=pkgname + [bname],\n+ results=results, ignore=ignore,\n+ parent=parent)\n+ res = ['.'.join(result) for result in results]\n+ return res\n+\n+\n+def get_data_files(dname, ignore=None, parent=None):\n+ \"\"\"\n+ Get all the data files that should be included in this distutils Project.\n+\n+ 'dname' should be the path to the package that you're distributing.\n+\n+ 'ignore' is a list of sub-packages to ignore. This facilitates\n+ disparate package hierarchies. That's a fancy way of saying that\n+ the 'twisted' package doesn't want to include the 'twisted.conch'\n+ package, so it will pass ['conch'] as the value.\n+\n+ 'parent' is necessary if you're distributing a subpackage like\n+ twisted.conch. 'dname' should point to 'twisted/conch' and 'parent'\n+ should point to 'twisted'. This ensures that your data_files are\n+ generated correctly, only using relative paths for the first element\n+ of the tuple ('twisted/conch/*').\n+ The default 'parent' is the current working directory.\n+ \"\"\"\n+ parent = parent or \".\"\n+ ignore = ignore or []\n+ result = []\n+ for directory, subdirectories, filenames in os.walk(dname):\n+ resultfiles = []\n+ for exname in EXCLUDE_NAMES:\n+ if exname in subdirectories:\n+ subdirectories.remove(exname)\n+ for ig in ignore:\n+ if ig in subdirectories:\n+ subdirectories.remove(ig)\n+ for filename in _filter_names(filenames):\n+ resultfiles.append(filename)\n+ if resultfiles:\n+ for filename in resultfiles:\n+ file_path = os.path.join(directory, filename)\n+ if parent:\n+ file_path = file_path.replace(parent + os.sep, '')\n+ result.append(file_path)\n+\n+ return result\n+# END: Taken from Twisted\n \n-libcloud.utils.SHOW_DEPRECATION_WARNING = False\n \n # Different versions of python have different requirements. We can't use\n # libcloud.utils.py3 here because it relies on backports dependency being\n@@ -76,11 +194,20 @@ if PY2_pre_27 or PY3_pre_34:\n \n def read_version_string():\n version = None\n- sys.path.insert(0, pjoin(os.getcwd()))\n- from libcloud import __version__\n- version = __version__\n- sys.path.pop(0)\n- return version\n+ cwd = os.path.dirname(os.path.abspath(__file__))\n+ version_file = os.path.join(cwd, 'libcloud/__init__.py')\n+\n+ with open(version_file) as fp:\n+ content = fp.read()\n+\n+ match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n+ content, re.M)\n+\n+ if match:\n+ version = match.group(1)\n+ return version\n+\n+ raise Exception('Cannot find version in libcloud/__init__.py')\n \n \n def forbid_publish():\n", "new_path": "setup.py", "old_path": "setup.py" }, { "change_type": "MODIFY", "diff": "@@ -30,12 +30,18 @@ whitelist_externals = cp\n scripts/*.sh\n [testenv:py2.7-dist]\n # Verify library installs without any dependencies\n-skipdist=False\n+skipdist = True\n+# NOTE: We intentionally set empty deps to ensure it works on a clean\n+# environment without any dependencies\n+deps =\n commands = python setup.py install\n \n [testenv:py3.7-dist]\n # Verify library installs without any dependencies\n-skipdist=False\n+skipdist = True\n+# NOTE: We intentionally set empty deps to ensure it works on a clean\n+# environment without any dependencies\n+deps =\n commands = python setup.py install\n \n [testenv:docs]\n", "new_path": "tox.ini", "old_path": "tox.ini" } ]
a846c6d8e5b42e3b559cb0f7c58b9bbc7188f36f
apache/libcloud
null
null
Update create_node() method in the Linode driver to explicitly declare supported keyword arguments instead of using **kwargs. That's now possible due to the improvements in the "deploy_node()" method.
[ { "change_type": "MODIFY", "diff": "@@ -173,7 +173,10 @@ class LinodeNodeDriver(NodeDriver):\n self.connection.request(API_ROOT, params=params)\n return True\n \n- def create_node(self, **kwargs):\n+ def create_node(self, name, image, size, auth, location=None, ex_swap=None,\n+ ex_rsize=None, ex_kernel=None, ex_payment=None,\n+ ex_comment=None, ex_private=False, lconfig=None, lroot=None,\n+ lswap=None):\n \"\"\"Create a new Linode, deploy a Linux distribution, and boot\n \n This call abstracts much of the functionality of provisioning a Linode\n@@ -228,14 +231,11 @@ class LinodeNodeDriver(NodeDriver):\n :return: Node representing the newly-created Linode\n :rtype: :class:`Node`\n \"\"\"\n- name = kwargs[\"name\"]\n- image = kwargs[\"image\"]\n- size = kwargs[\"size\"]\n- auth = self._get_and_check_auth(kwargs[\"auth\"])\n+ auth = self._get_and_check_auth(auth)\n \n # Pick a location (resolves LIBCLOUD-41 in JIRA)\n- if \"location\" in kwargs:\n- chosen = kwargs[\"location\"].id\n+ if location:\n+ chosen = location.id\n elif self.datacenter:\n chosen = self.datacenter\n else:\n@@ -251,8 +251,7 @@ class LinodeNodeDriver(NodeDriver):\n raise LinodeException(0xFB, \"Invalid plan ID -- avail.plans\")\n \n # Payment schedule\n- payment = \"1\" if \"ex_payment\" not in kwargs else \\\n- str(kwargs[\"ex_payment\"])\n+ payment = \"1\" if not ex_payment else str(ex_payment)\n if payment not in [\"1\", \"12\", \"24\"]:\n raise LinodeException(0xFB, \"Invalid subscription (1, 12, 24)\")\n \n@@ -271,13 +270,13 @@ class LinodeNodeDriver(NodeDriver):\n \n # Swap size\n try:\n- swap = 128 if \"ex_swap\" not in kwargs else int(kwargs[\"ex_swap\"])\n+ swap = 128 if not ex_swap else int(kwargs[\"ex_swap\"])\n except Exception:\n raise LinodeException(0xFB, \"Need an integer swap size\")\n \n # Root partition size\n- imagesize = (size.disk - swap) if \"ex_rsize\" not in kwargs else\\\n- int(kwargs[\"ex_rsize\"])\n+ imagesize = (size.disk - swap) if not ex_rsize else\\\n+ int(ex_rsize)\n if (imagesize + swap) > size.disk:\n raise LinodeException(0xFB, \"Total disk images are too big\")\n \n@@ -288,8 +287,8 @@ class LinodeNodeDriver(NodeDriver):\n \"Invalid distro -- avail.distributions\")\n \n # Kernel\n- if \"ex_kernel\" in kwargs:\n- kernel = kwargs[\"ex_kernel\"]\n+ if ex_kernel:\n+ kernel = ex_kernel\n else:\n if image.extra['64bit']:\n # For a list of available kernel ids, see\n@@ -303,8 +302,8 @@ class LinodeNodeDriver(NodeDriver):\n raise LinodeException(0xFB, \"Invalid kernel -- avail.kernels\")\n \n # Comments\n- comments = \"Created by Apache libcloud <http://www.libcloud.org>\" if\\\n- \"ex_comment\" not in kwargs else kwargs[\"ex_comment\"]\n+ comments = \"Created by Apache libcloud <https://www.libcloud.org>\" if\\\n+ not ex_comment else ex_comment\n \n # Step 1: linode.create\n params = {\n@@ -325,7 +324,7 @@ class LinodeNodeDriver(NodeDriver):\n self.connection.request(API_ROOT, params=params)\n \n # Step 1c. linode.ip.addprivate if it was requested\n- if \"ex_private\" in kwargs and kwargs[\"ex_private\"]:\n+ if ex_private:\n params = {\n \"api_action\": \"linode.ip.addprivate\",\n \"LinodeID\": linode[\"id\"]\n@@ -340,9 +339,15 @@ class LinodeNodeDriver(NodeDriver):\n \"lroot\": \"[%s] %s Disk Image\" % (linode[\"id\"], image.name),\n \"lswap\": \"[%s] Swap Space\" % linode[\"id\"]\n }\n- for what in [\"lconfig\", \"lroot\", \"lswap\"]:\n- if what in kwargs:\n- label[what] = kwargs[what]\n+\n+ if lconfig:\n+ label['lconfig'] = lconfig\n+\n+ if lroot:\n+ label['lroot'] = lroot\n+\n+ if lswap:\n+ label['lswap'] = lswap\n \n # Step 2: linode.disk.createfromdistribution\n if not root:\n@@ -382,7 +387,7 @@ class LinodeNodeDriver(NodeDriver):\n \"Comments\": comments,\n \"DiskList\": disks\n }\n- if \"ex_private\" in kwargs and kwargs[\"ex_private\"]:\n+ if ex_private:\n params['helper_network'] = True\n params['helper_distro'] = True\n \n", "new_path": "libcloud/compute/drivers/linode.py", "old_path": "libcloud/compute/drivers/linode.py" } ]
155c0f8bebbd9162ef847ffbbd831c979f3f659e
apache/libcloud
null
null
Simplify deploy_node and get rid of **kwargs. This will eventually allow us to simplify the method further and also get rid of the compatibility layer which passes all the deploy node kwargs to create_node.
[ { "change_type": "MODIFY", "diff": "@@ -927,7 +927,10 @@ class NodeDriver(BaseDriver):\n raise NotImplementedError(\n 'create_node not implemented for this driver')\n \n- def deploy_node(self, **kwargs):\n+ def deploy_node(self, deploy, ssh_username='root', ssh_alternate_usernames=None,\n+ ssh_port=22, ssh_timeout=10, ssh_key=None, auth=None,\n+ timeout=SSH_CONNECT_TIMEOUT, max_tries=3, ssh_interface='public_ips',\n+ **create_node_kwargs):\n # type: (...) -> Node\n \"\"\"\n Create a new node, and start deployment.\n@@ -1018,13 +1021,12 @@ class NodeDriver(BaseDriver):\n raise RuntimeError('paramiko is not installed. You can install ' +\n 'it using pip: pip install paramiko')\n \n- if 'auth' in kwargs:\n- auth = kwargs['auth']\n+ if auth:\n if not isinstance(auth, (NodeAuthSSHKey, NodeAuthPassword)):\n raise NotImplementedError(\n 'If providing auth, only NodeAuthSSHKey or'\n 'NodeAuthPassword is supported')\n- elif 'ssh_key' in kwargs:\n+ elif ssh_key:\n # If an ssh_key is provided we can try deploy_node\n pass\n elif 'create_node' in self.features:\n@@ -1046,59 +1048,59 @@ class NodeDriver(BaseDriver):\n # NOTE 2: Some drivers which use password based SSH authentication\n # rely on password being stored on the \"auth\" argument and that's why\n # we also propagate that argument to \"create_node()\" method.\n- create_node_kwargs = dict([(key, value) for key, value in\n- kwargs.items() if key\n- not in DEPLOY_NODE_KWARGS])\n-\n try:\n- node = self.create_node(**create_node_kwargs)\n+ node = self.create_node(auth=auth, **create_node_kwargs)\n except TypeError as e:\n msg_1_re = (r'create_node\\(\\) missing \\d+ required '\n 'positional arguments.*')\n msg_2_re = r'create_node\\(\\) takes at least \\d+ arguments.*'\n if re.match(msg_1_re, str(e)) or re.match(msg_2_re, str(e)):\n- node = self.create_node(**kwargs)\n+ node = self.create_node(deploy=deploy,\n+ ssh_username=ssh_username,\n+ ssh_alternate_usernames=ssh_alternate_usernames,\n+ ssh_port=ssh_port,\n+ ssh_timeout=ssh_timeout,\n+ ssh_key=ssh_key,\n+ auth=auth,\n+ timeout=timeout,\n+ max_tries=max_tries,\n+ ssh_interface=ssh_interface,\n+ **create_node_kwargs)\n else:\n raise e\n \n- max_tries = kwargs.get('max_tries', 3)\n-\n password = None\n- if 'auth' in kwargs:\n- if isinstance(kwargs['auth'], NodeAuthPassword):\n- password = kwargs['auth'].password\n+ if auth:\n+ if isinstance(auth, NodeAuthPassword):\n+ password = auth.password\n elif 'password' in node.extra:\n password = node.extra['password']\n \n- ssh_interface = kwargs.get('ssh_interface', 'public_ips')\n+ wait_timeout = timeout or NODE_ONLINE_WAIT_TIMEOUT\n \n # Wait until node is up and running and has IP assigned\n try:\n node, ip_addresses = self.wait_until_running(\n nodes=[node],\n wait_period=3,\n- timeout=float(kwargs.get('timeout', NODE_ONLINE_WAIT_TIMEOUT)),\n+ timeout=wait_timeout,\n ssh_interface=ssh_interface)[0]\n except Exception as e:\n raise DeploymentError(node=node, original_exception=e, driver=self)\n \n- ssh_username = kwargs.get('ssh_username', 'root')\n- ssh_alternate_usernames = kwargs.get('ssh_alternate_usernames', [])\n- ssh_port = kwargs.get('ssh_port', 22)\n- ssh_timeout = kwargs.get('ssh_timeout', 10)\n- ssh_key_file = kwargs.get('ssh_key', None)\n- timeout = kwargs.get('timeout', SSH_CONNECT_TIMEOUT)\n+ ssh_alternate_usernames = ssh_alternate_usernames or []\n+ deploy_timeout = timeout or SSH_CONNECT_TIMEOUT\n \n deploy_error = None\n \n for username in ([ssh_username] + ssh_alternate_usernames):\n try:\n self._connect_and_run_deployment_script(\n- task=kwargs['deploy'], node=node,\n+ task=deploy, node=node,\n ssh_hostname=ip_addresses[0], ssh_port=ssh_port,\n ssh_username=username, ssh_password=password,\n- ssh_key_file=ssh_key_file, ssh_timeout=ssh_timeout,\n- timeout=timeout, max_tries=max_tries)\n+ ssh_key_file=ssh_key, ssh_timeout=ssh_timeout,\n+ timeout=deploy_timeout, max_tries=max_tries)\n except Exception as e:\n # Try alternate username\n # Todo: Need to fix paramiko so we can catch a more specific\n", "new_path": "libcloud/compute/base.py", "old_path": "libcloud/compute/base.py" } ]
b9747bc011e9e9830ab147327d7aeaa8447ad2d7
apache/libcloud
null
null
Add new storage API methods for downloading part of an object (range download) and implement it for the S3 and local storage drivers.
[ { "change_type": "MODIFY", "diff": "@@ -443,6 +443,68 @@ class StorageDriver(BaseDriver):\n raise NotImplementedError(\n 'download_object_as_stream not implemented for this driver')\n \n+ def download_object_range(self, obj, destination_path, start_bytes,\n+ end_bytes=None, overwrite_existing=False,\n+ delete_on_failure=True):\n+ # type: (Object, str, int, Optional[int], bool, bool) -> bool\n+ \"\"\"\n+ Download part of an object.\n+\n+ :param obj: Object instance.\n+ :type obj: :class:`libcloud.storage.base.Object`\n+\n+ :param destination_path: Full path to a file or a directory where the\n+ incoming file will be saved.\n+ :type destination_path: ``str``\n+\n+ :param start_bytes: Start byte offset for the range download.\n+ :type start_bytes: ``int``\n+\n+ :param end_bytes: End byte offset for the range download. If not\n+ provided, it will assume end of the file.\n+ :type end_bytes: ``int``\n+\n+ :param overwrite_existing: True to overwrite an existing file,\n+ defaults to False.\n+ :type overwrite_existing: ``bool``\n+\n+ :param delete_on_failure: True to delete a partially downloaded file if\n+ the download was not successful (hash\n+ mismatch / file size).\n+ :type delete_on_failure: ``bool``\n+\n+ :return: True if an object has been successfully downloaded, False\n+ otherwise.\n+ :rtype: ``bool``\n+\n+ \"\"\"\n+ raise NotImplementedError(\n+ 'download_object_range not implemented for this driver')\n+\n+ def download_object_range_as_stream(self, obj, start_bytes, end_bytes=None,\n+ chunk_size=None):\n+ # type: (Object, int, Optional[int], Optional[int]) -> Iterator[bytes]\n+ \"\"\"\n+ Return a iterator which yields range / part of the object data.\n+\n+ :param obj: Object instance\n+ :type obj: :class:`libcloud.storage.base.Object`\n+\n+ :param start_bytes: Start byte offset for the range download.\n+ :type start_bytes: ``int``\n+\n+ :param end_bytes: End byte offset for the range download. If not\n+ provided, it will assume end of the file.\n+ :type end_bytes: ``int``\n+\n+ :param chunk_size: Optional chunk size (in bytes).\n+ :type chunk_size: ``int``\n+\n+ :rtype: ``iterator`` of ``bytes``\n+ \"\"\"\n+ raise NotImplementedError(\n+ 'download_object_range_as_stream not implemented for this driver')\n+\n def upload_object(self, file_path, container, object_name, extra=None,\n verify_hash=True, headers=None):\n # type: (str, Container, str, Optional[dict], bool, Optional[Dict[str, str]]) -> Object # noqa: E501\n@@ -602,7 +664,7 @@ class StorageDriver(BaseDriver):\n \n def _save_object(self, response, obj, destination_path,\n overwrite_existing=False, delete_on_failure=True,\n- chunk_size=None):\n+ chunk_size=None, partial_download=False):\n \"\"\"\n Save object to the provided path.\n \n@@ -627,6 +689,10 @@ class StorageDriver(BaseDriver):\n (defaults to ``libcloud.storage.base.CHUNK_SIZE``, 8kb)\n :type chunk_size: ``int``\n \n+ :param partial_download: True if this is a range (partial) save,\n+ False otherwise.\n+ :type partial_download: ``bool``\n+\n :return: ``True`` on success, ``False`` otherwise.\n :rtype: ``bool``\n \"\"\"\n@@ -658,8 +724,10 @@ class StorageDriver(BaseDriver):\n file_handle.write(b(chunk))\n bytes_transferred += len(chunk)\n \n- if int(obj.size) != int(bytes_transferred):\n+ if not partial_download and int(obj.size) != int(bytes_transferred):\n # Transfer failed, support retry?\n+ # NOTE: We only perform this check if this is a regular and not a\n+ # partial / range download\n if delete_on_failure:\n try:\n os.unlink(file_path)\n", "new_path": "libcloud/storage/base.py", "old_path": "libcloud/storage/base.py" }, { "change_type": "MODIFY", "diff": "@@ -31,6 +31,7 @@ except ImportError:\n 'using pip: pip install lockfile')\n \n from libcloud.utils.files import read_in_chunks\n+from libcloud.utils.files import exhaust_iterator\n from libcloud.utils.py3 import relpath\n from libcloud.utils.py3 import u\n from libcloud.common.base import Connection\n@@ -416,6 +417,52 @@ class LocalStorageDriver(StorageDriver):\n for data in read_in_chunks(obj_file, chunk_size=chunk_size):\n yield data\n \n+ def download_object_range(self, obj, destination_path, start_bytes,\n+ end_bytes=None, overwrite_existing=False,\n+ delete_on_failure=True):\n+ base_name = os.path.basename(destination_path)\n+\n+ if not base_name and not os.path.exists(destination_path):\n+ raise LibcloudError(\n+ value='Path %s does not exist' % (destination_path),\n+ driver=self)\n+\n+ if not base_name:\n+ file_path = os.path.join(destination_path, obj.name)\n+ else:\n+ file_path = destination_path\n+\n+ if os.path.exists(file_path) and not overwrite_existing:\n+ raise LibcloudError(\n+ value='File %s already exists, but ' % (file_path) +\n+ 'overwrite_existing=False',\n+ driver=self)\n+\n+ iterator = self.download_object_range_as_stream(\n+ obj=obj,\n+ start_bytes=start_bytes,\n+ end_bytes=end_bytes)\n+\n+ with open(file_path, 'wb') as fp:\n+ fp.write(exhaust_iterator(iterator))\n+\n+ return True\n+\n+ def download_object_range_as_stream(self, obj, start_bytes, end_bytes=None,\n+ chunk_size=None):\n+ path = self.get_object_cdn_url(obj)\n+ with open(path, 'rb') as obj_file:\n+ file_size = len(obj_file.read())\n+\n+ if not end_bytes:\n+ read_bytes = file_size\n+ else:\n+ read_bytes = (file_size - end_bytes - start_bytes) - 1\n+\n+ obj_file.seek(start_bytes)\n+ data = obj_file.read(read_bytes)\n+ yield data\n+\n def upload_object(self, file_path, container, object_name, extra=None,\n verify_hash=True, headers=None):\n \"\"\"\n", "new_path": "libcloud/storage/drivers/local.py", "old_path": "libcloud/storage/drivers/local.py" }, { "change_type": "MODIFY", "diff": "@@ -112,7 +112,7 @@ RESPONSES_PER_REQUEST = 100\n class S3Response(AWSBaseResponse):\n namespace = None\n valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT,\n- httplib.BAD_REQUEST]\n+ httplib.BAD_REQUEST, httplib.PARTIAL_CONTENT]\n \n def success(self):\n i = int(self.status)\n@@ -469,6 +469,54 @@ class BaseS3StorageDriver(StorageDriver):\n 'chunk_size': chunk_size},\n success_status_code=httplib.OK)\n \n+ def download_object_range(self, obj, destination_path, start_bytes,\n+ end_bytes=None, overwrite_existing=False,\n+ delete_on_failure=True):\n+ obj_path = self._get_object_path(obj.container, obj.name)\n+\n+ range_str = 'bytes=%s-' % (start_bytes)\n+\n+ if end_bytes:\n+ range_str += str(end_bytes)\n+\n+ headers = {'Range': range_str}\n+\n+ response = self.connection.request(obj_path, method='GET',\n+ headers=headers, raw=True)\n+\n+ return self._get_object(obj=obj, callback=self._save_object,\n+ response=response,\n+ callback_kwargs={\n+ 'obj': obj,\n+ 'response': response.response,\n+ 'destination_path': destination_path,\n+ 'overwrite_existing': overwrite_existing,\n+ 'delete_on_failure': delete_on_failure,\n+ 'partial_download': True},\n+ success_status_code=httplib.PARTIAL_CONTENT)\n+\n+ def download_object_range_as_stream(self, obj, start_bytes, end_bytes=None,\n+ chunk_size=None):\n+ obj_path = self._get_object_path(obj.container, obj.name)\n+\n+ range_str = 'bytes=%s-' % (start_bytes)\n+\n+ if end_bytes:\n+ range_str += str(end_bytes)\n+\n+ headers = {'Range': range_str}\n+\n+ response = self.connection.request(obj_path, method='GET',\n+ headers=headers,\n+ stream=True, raw=True)\n+\n+ return self._get_object(\n+ obj=obj, callback=read_in_chunks,\n+ response=response,\n+ callback_kwargs={'iterator': response.iter_content(CHUNK_SIZE),\n+ 'chunk_size': chunk_size},\n+ success_status_code=httplib.PARTIAL_CONTENT)\n+\n def upload_object(self, file_path, container, object_name, extra=None,\n verify_hash=True, headers=None, ex_storage_class=None):\n \"\"\"\n", "new_path": "libcloud/storage/drivers/s3.py", "old_path": "libcloud/storage/drivers/s3.py" } ]
9b7c220bb6f1a91aae9295964c4a40574fa28b08
apache/libcloud
null
null
Make start and end byte offsets 0 index based and make them behave in the same manner as python indexing. Also update affected driver code.
[ { "change_type": "MODIFY", "diff": "@@ -457,11 +457,13 @@ class StorageDriver(BaseDriver):\n incoming file will be saved.\n :type destination_path: ``str``\n \n- :param start_bytes: Start byte offset for the range download.\n+ :param start_bytes: Start byte offset (inclusive) for the range\n+ download. Offset is 0 index based so the first\n+ byte in file file is \"0\".\n :type start_bytes: ``int``\n \n- :param end_bytes: End byte offset for the range download. If not\n- provided, it will assume end of the file.\n+ :param end_bytes: End byte offset (non-inclusive) for the range download.\n+ If not provided, it will default to the end of the file.\n :type end_bytes: ``int``\n \n :param overwrite_existing: True to overwrite an existing file,\n@@ -490,11 +492,13 @@ class StorageDriver(BaseDriver):\n :param obj: Object instance\n :type obj: :class:`libcloud.storage.base.Object`\n \n- :param start_bytes: Start byte offset for the range download.\n+ :param start_bytes: Start byte offset (inclusive) for the range\n+ download. Offset is 0 index based so the first\n+ byte in file file is \"0\".\n :type start_bytes: ``int``\n \n- :param end_bytes: End byte offset for the range download. If not\n- provided, it will assume end of the file.\n+ :param end_bytes: End byte offset (non-inclusive) for the range download.\n+ If not provided, it will default to the end of the file.\n :type end_bytes: ``int``\n \n :param chunk_size: Optional chunk size (in bytes).\n", "new_path": "libcloud/storage/base.py", "old_path": "libcloud/storage/base.py" }, { "change_type": "MODIFY", "diff": "@@ -463,9 +463,9 @@ class LocalStorageDriver(StorageDriver):\n if not end_bytes:\n read_bytes = (file_size - start_bytes) + 1\n else:\n- read_bytes = (end_bytes - start_bytes) + 1\n+ read_bytes = (end_bytes - start_bytes)\n \n- obj_file.seek(start_bytes - 1)\n+ obj_file.seek(start_bytes)\n data = obj_file.read(read_bytes)\n yield data\n \n", "new_path": "libcloud/storage/drivers/local.py", "old_path": "libcloud/storage/drivers/local.py" }, { "change_type": "MODIFY", "diff": "@@ -325,7 +325,7 @@ class LocalTests(unittest.TestCase):\n self.remove_tmp_file(tmppath)\n \n def test_download_object_range_success(self):\n- content = b'foo bar baz'\n+ content = b'0123456789123456789'\n tmppath = self.make_tmp_file(content=content)\n container = self.driver.create_container('test6')\n obj = container.upload_object(tmppath, 'test')\n@@ -335,7 +335,7 @@ class LocalTests(unittest.TestCase):\n # 1. Only start_bytes provided\n result = self.driver.download_object_range(obj=obj,\n destination_path=destination_path,\n- start_bytes=5,\n+ start_bytes=4,\n overwrite_existing=True,\n delete_on_failure=True)\n self.assertTrue(result)\n@@ -343,14 +343,14 @@ class LocalTests(unittest.TestCase):\n with open(destination_path, 'rb') as fp:\n written_content = fp.read()\n \n- self.assertEqual(written_content, b'bar baz')\n- self.assertEqual(written_content, content[5 - 1:])\n+ self.assertEqual(written_content, b'456789123456789')\n+ self.assertEqual(written_content, content[4:])\n \n # 2. start_bytes and end_bytes is provided\n result = self.driver.download_object_range(obj=obj,\n destination_path=destination_path,\n- start_bytes=5,\n- end_bytes=7,\n+ start_bytes=4,\n+ end_bytes=6,\n overwrite_existing=True,\n delete_on_failure=True)\n self.assertTrue(result)\n@@ -358,12 +358,12 @@ class LocalTests(unittest.TestCase):\n with open(destination_path, 'rb') as fp:\n written_content = fp.read()\n \n- self.assertEqual(written_content, b'bar')\n- self.assertEqual(written_content, content[5 - 1:7])\n+ self.assertEqual(written_content, b'45')\n+ self.assertEqual(written_content, content[4:6])\n \n result = self.driver.download_object_range(obj=obj,\n destination_path=destination_path,\n- start_bytes=1,\n+ start_bytes=0,\n end_bytes=1,\n overwrite_existing=True,\n delete_on_failure=True)\n@@ -372,13 +372,13 @@ class LocalTests(unittest.TestCase):\n with open(destination_path, 'rb') as fp:\n written_content = fp.read()\n \n- self.assertEqual(written_content, b'f')\n- self.assertEqual(written_content, content[1 - 1:1])\n+ self.assertEqual(written_content, b'0')\n+ self.assertEqual(written_content, content[0:1])\n \n result = self.driver.download_object_range(obj=obj,\n destination_path=destination_path,\n- start_bytes=1,\n- end_bytes=3,\n+ start_bytes=0,\n+ end_bytes=2,\n overwrite_existing=True,\n delete_on_failure=True)\n self.assertTrue(result)\n@@ -386,8 +386,22 @@ class LocalTests(unittest.TestCase):\n with open(destination_path, 'rb') as fp:\n written_content = fp.read()\n \n- self.assertEqual(written_content, b'foo')\n- self.assertEqual(written_content, content[1 - 1:3])\n+ self.assertEqual(written_content, b'01')\n+ self.assertEqual(written_content, content[0:2])\n+\n+ result = self.driver.download_object_range(obj=obj,\n+ destination_path=destination_path,\n+ start_bytes=0,\n+ end_bytes=len(content),\n+ overwrite_existing=True,\n+ delete_on_failure=True)\n+ self.assertTrue(result)\n+\n+ with open(destination_path, 'rb') as fp:\n+ written_content = fp.read()\n+\n+ self.assertEqual(written_content, b'0123456789123456789')\n+ self.assertEqual(written_content, content[0:len(content)])\n \n obj.delete()\n container.delete()\n@@ -395,38 +409,38 @@ class LocalTests(unittest.TestCase):\n os.unlink(destination_path)\n \n def test_download_object_range_as_stream_success(self):\n- content = b'foo bar baz'\n+ content = b'0123456789123456789'\n tmppath = self.make_tmp_file(content=content)\n container = self.driver.create_container('test6')\n obj = container.upload_object(tmppath, 'test')\n \n # 1. Only start_bytes provided\n stream = self.driver.download_object_range_as_stream(obj=obj,\n- start_bytes=5,\n+ start_bytes=4,\n chunk_size=1024)\n written_content = b''.join(stream)\n \n- self.assertEqual(written_content, b'bar baz')\n- self.assertEqual(written_content, content[5 - 1:])\n+ self.assertEqual(written_content, b'456789123456789')\n+ self.assertEqual(written_content, content[4:])\n \n # 2. start_bytes and end_bytes is provided\n stream = self.driver.download_object_range_as_stream(obj=obj,\n- start_bytes=5,\n- end_bytes=7,\n+ start_bytes=4,\n+ end_bytes=6,\n chunk_size=1024)\n written_content = b''.join(stream)\n \n- self.assertEqual(written_content, b'bar')\n- self.assertEqual(written_content, content[5 - 1:7])\n+ self.assertEqual(written_content, b'45')\n+ self.assertEqual(written_content, content[4:6])\n \n stream = self.driver.download_object_range_as_stream(obj=obj,\n- start_bytes=1,\n+ start_bytes=0,\n end_bytes=1,\n chunk_size=1024)\n written_content = b''.join(stream)\n \n- self.assertEqual(written_content, b'f')\n- self.assertEqual(written_content, content[1 - 1:1])\n+ self.assertEqual(written_content, b'0')\n+ self.assertEqual(written_content, content[0:1])\n \n stream = self.driver.download_object_range_as_stream(obj=obj,\n start_bytes=1,\n@@ -434,8 +448,17 @@ class LocalTests(unittest.TestCase):\n chunk_size=1024)\n written_content = b''.join(stream)\n \n- self.assertEqual(written_content, b'foo')\n- self.assertEqual(written_content, content[1 - 1:3])\n+ self.assertEqual(written_content, b'12')\n+ self.assertEqual(written_content, content[1:3])\n+\n+ stream = self.driver.download_object_range_as_stream(obj=obj,\n+ start_bytes=0,\n+ end_bytes=len(content),\n+ chunk_size=1024)\n+ written_content = b''.join(stream)\n+\n+ self.assertEqual(written_content, b'0123456789123456789')\n+ self.assertEqual(written_content, content[0:len(content)])\n \n obj.delete()\n container.delete()\n", "new_path": "libcloud/test/storage/test_local.py", "old_path": "libcloud/test/storage/test_local.py" } ]
943b60476e872d75457e84ea31e6de9d16cc6e8e
apache/libcloud
null
null
Add unit test methods for the new range download methods on the S3 driver.
[ { "change_type": "MODIFY", "diff": "@@ -13,6 +13,8 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+from typing import Optional\n+\n import base64\n import hmac\n import time\n@@ -474,13 +476,7 @@ class BaseS3StorageDriver(StorageDriver):\n delete_on_failure=True):\n obj_path = self._get_object_path(obj.container, obj.name)\n \n- range_str = 'bytes=%s-' % (start_bytes)\n-\n- if end_bytes:\n- range_str += str(end_bytes)\n-\n- headers = {'Range': range_str}\n-\n+ headers = {'Range': self._get_range_str(start_bytes, end_bytes)}\n response = self.connection.request(obj_path, method='GET',\n headers=headers, raw=True)\n \n@@ -499,13 +495,7 @@ class BaseS3StorageDriver(StorageDriver):\n chunk_size=None):\n obj_path = self._get_object_path(obj.container, obj.name)\n \n- range_str = 'bytes=%s-' % (start_bytes)\n-\n- if end_bytes:\n- range_str += str(end_bytes)\n-\n- headers = {'Range': range_str}\n-\n+ headers = {'Range': self._get_range_str(start_bytes, end_bytes)}\n response = self.connection.request(obj_path, method='GET',\n headers=headers,\n stream=True, raw=True)\n@@ -850,6 +840,19 @@ class BaseS3StorageDriver(StorageDriver):\n delimiter=None):\n self._abort_multipart(container, upload.key, upload.id)\n \n+ def _get_range_str(self, start_bytes, end_bytes=None):\n+ # type: (int, Optional[int]) -> str\n+ \"\"\"\n+ Return range string which is used as a Range header value for range\n+ requests.\n+ \"\"\"\n+ range_str = 'bytes=%s-' % (start_bytes)\n+\n+ if end_bytes:\n+ range_str += str(end_bytes)\n+\n+ return range_str\n+\n def _clean_object_name(self, name):\n name = urlquote(name)\n return name\n", "new_path": "libcloud/storage/drivers/s3.py", "old_path": "libcloud/storage/drivers/s3.py" }, { "change_type": "MODIFY", "diff": "@@ -54,7 +54,7 @@ from libcloud.test.file_fixtures import StorageFileFixtures # pylint: disable-m\n from libcloud.test.secrets import STORAGE_S3_PARAMS\n \n \n-class S3MockHttp(MockHttp):\n+class S3MockHttp(MockHttp, unittest.TestCase):\n \n fixtures = StorageFileFixtures('s3')\n base_headers = {}\n@@ -327,6 +327,34 @@ class S3MockHttp(MockHttp):\n headers,\n httplib.responses[httplib.OK])\n \n+ def _foo_bar_container_foo_bar_object_range(self, method, url, body, headers):\n+ # test_download_object_range_success\n+ body = '0123456789123456789'\n+\n+ self.assertTrue('Range' in headers)\n+ self.assertEqual(headers['Range'], 'bytes=5-7')\n+\n+ start_bytes, end_bytes = self._get_start_and_end_bytes_from_range_str(headers['Range'], body)\n+\n+ return (httplib.PARTIAL_CONTENT,\n+ body[start_bytes:end_bytes],\n+ headers,\n+ httplib.responses[httplib.PARTIAL_CONTENT])\n+\n+ def _foo_bar_container_foo_bar_object_range_stream(self, method, url, body, headers):\n+ # test_download_object_range_as_stream_success\n+ body = '0123456789123456789'\n+\n+ self.assertTrue('Range' in headers)\n+ self.assertEqual(headers['Range'], 'bytes=4-7')\n+\n+ start_bytes, end_bytes = self._get_start_and_end_bytes_from_range_str(headers['Range'], body)\n+\n+ return (httplib.PARTIAL_CONTENT,\n+ body[start_bytes:end_bytes],\n+ headers,\n+ httplib.responses[httplib.PARTIAL_CONTENT])\n+\n def _foo_bar_container_foo_bar_object_NO_BUFFER(self, method, url, body, headers):\n # test_download_object_data_is_not_buffered_in_memory\n body = generate_random_data(1000)\n@@ -353,6 +381,19 @@ class S3MockHttp(MockHttp):\n headers,\n httplib.responses[httplib.OK])\n \n+ def _get_start_and_end_bytes_from_range_str(self, range_str, body):\n+ # type: (str, str) -> Tuple[int, int]\n+ range_str = range_str.split('bytes=')[1]\n+ range_str = range_str.split('-')\n+ start_bytes = int(range_str[0])\n+\n+ if len(range_str) == 2:\n+ end_bytes = int(range_str[1])\n+ else:\n+ end_bytes = len(body)\n+\n+ return start_bytes, end_bytes\n+\n \n class S3Tests(unittest.TestCase):\n driver_type = S3StorageDriver\n@@ -611,6 +652,38 @@ class S3Tests(unittest.TestCase):\n delete_on_failure=True)\n self.assertTrue(result)\n \n+ def test_download_object_range_success(self):\n+ container = Container(name='foo_bar_container', extra={},\n+ driver=self.driver)\n+ obj = Object(name='foo_bar_object_range', size=19, hash=None, extra={},\n+ container=container, meta_data=None,\n+ driver=self.driver_type)\n+ destination_path = self._file_path\n+ result = self.driver.download_object_range(obj=obj,\n+ destination_path=destination_path,\n+ start_bytes=5,\n+ end_bytes=7,\n+ overwrite_existing=True,\n+ delete_on_failure=True)\n+ self.assertTrue(result)\n+\n+ with open(self._file_path, 'r') as fp:\n+ content = fp.read()\n+\n+ self.assertEqual(content, '56')\n+\n+ def test_download_object_range_as_stream_success(self):\n+ container = Container(name='foo_bar_container', extra={},\n+ driver=self.driver)\n+ obj = Object(name='foo_bar_object_range_stream', size=19, hash=None, extra={},\n+ container=container, meta_data=None,\n+ driver=self.driver_type)\n+ iterator = self.driver.download_object_range_as_stream(obj=obj,\n+ start_bytes=4,\n+ end_bytes=7)\n+ content = exhaust_iterator(iterator)\n+ self.assertEqual(content, b'456')\n+\n def test_download_object_data_is_not_buffered_in_memory(self):\n # Test case which verifies that response.body attribute is not accessed\n # and as such, whole body response is not buffered into RAM\n", "new_path": "libcloud/test/storage/test_s3.py", "old_path": "libcloud/test/storage/test_s3.py" } ]
3ecc0c1e075d86ed9a0cf2e49caf4d0f66c1c13e
apache/libcloud
null
null
Make end_bytes offset non-inclusive so it's consistent with Python indexing behavior and update affected code.
[ { "change_type": "MODIFY", "diff": "@@ -506,7 +506,7 @@ class StorageDriver(BaseDriver):\n byte in file file is \"0\".\n :type start_bytes: ``int``\n \n- :param end_bytes: End byte offset (inclusive) for the range\n+ :param end_bytes: End byte offset (non-inclusive) for the range\n download. If not provided, it will default to the\n end of the file.\n :type end_bytes: ``int``\n@@ -915,13 +915,18 @@ class StorageDriver(BaseDriver):\n if start_bytes < 0:\n raise ValueError('start_bytes must be greater than 0')\n \n- if end_bytes and start_bytes > end_bytes:\n- raise ValueError('start_bytes must be smaller than end_bytes')\n+ if end_bytes:\n+ if start_bytes > end_bytes:\n+ raise ValueError('start_bytes must be smaller than end_bytes')\n+ elif start_bytes == end_bytes:\n+ raise ValueError('start_bytes and end_bytes can\\'t be the '\n+ 'same. end_bytes is non-inclusive')\n \n return True\n \n- def _get_standard_range_str(self, start_bytes, end_bytes=None):\n- # type: (int, Optional[int]) -> str\n+ def _get_standard_range_str(self, start_bytes, end_bytes=None,\n+ end_bytes_inclusive=False):\n+ # type: (int, Optional[int], bool) -> str\n \"\"\"\n Return range string which is used as a Range header value for range\n requests for drivers which follow standard Range header notation\n@@ -935,10 +940,17 @@ class StorageDriver(BaseDriver):\n bytes=0-2\n bytes=5-\n bytes=100-5000\n+\n+ :param end_bytes_inclusive: True if \"end_bytes\" offset should be\n+ inclusive (aka opposite from the Python indexing behavior where the end\n+ index is not inclusive).\n \"\"\"\n range_str = 'bytes=%s-' % (start_bytes)\n \n if end_bytes is not None:\n- range_str += str(end_bytes)\n+ if end_bytes_inclusive:\n+ range_str += str(end_bytes)\n+ else:\n+ range_str += str(end_bytes - 1)\n \n return range_str\n", "new_path": "libcloud/storage/base.py", "old_path": "libcloud/storage/base.py" }, { "change_type": "MODIFY", "diff": "@@ -440,7 +440,7 @@ class LocalStorageDriver(StorageDriver):\n if end_bytes is None:\n read_bytes = (file_size - start_bytes) + 1\n else:\n- read_bytes = (end_bytes - start_bytes) + 1\n+ read_bytes = (end_bytes - start_bytes)\n \n obj_file.seek(start_bytes)\n data = obj_file.read(read_bytes)\n", "new_path": "libcloud/storage/drivers/local.py", "old_path": "libcloud/storage/drivers/local.py" }, { "change_type": "MODIFY", "diff": "@@ -355,7 +355,7 @@ class AzureBlobsMockHttp(BaseRangeDownloadMockHttp, unittest.TestCase):\n body = '0123456789123456789'\n \n self.assertTrue('Range' in headers)\n- self.assertEqual(headers['Range'], 'bytes=5-8')\n+ self.assertEqual(headers['Range'], 'bytes=5-6')\n \n start_bytes, end_bytes = self._get_start_and_end_bytes_from_range_str(headers['Range'], body)\n \n@@ -710,12 +710,17 @@ class AzureBlobsTests(unittest.TestCase):\n destination_path = os.path.abspath(__file__) + '.temp'\n result = self.driver.download_object_range(obj=obj,\n start_bytes=5,\n- end_bytes=8,\n+ end_bytes=7,\n destination_path=destination_path,\n overwrite_existing=False,\n delete_on_failure=True)\n self.assertTrue(result)\n \n+ with open(destination_path, 'r') as fp:\n+ content = fp.read()\n+\n+ self.assertEqual(content, '56')\n+\n def test_download_object_range_as_stream_success(self):\n container = Container(name='foo_bar_container', extra={},\n driver=self.driver)\n@@ -726,7 +731,7 @@ class AzureBlobsTests(unittest.TestCase):\n \n stream = self.driver.download_object_range_as_stream(obj=obj,\n start_bytes=4,\n- end_bytes=5,\n+ end_bytes=6,\n chunk_size=None)\n \n consumed_stream = ''.join(chunk.decode('utf-8') for chunk in stream)\n", "new_path": "libcloud/test/storage/test_azure_blobs.py", "old_path": "libcloud/test/storage/test_azure_blobs.py" }, { "change_type": "MODIFY", "diff": "@@ -254,11 +254,11 @@ class BaseStorageTests(unittest.TestCase):\n \n def test_get_standard_range_str(self):\n result = self.driver1._get_standard_range_str(0, 5)\n- self.assertEqual(result, 'bytes=0-5')\n+ self.assertEqual(result, 'bytes=0-4')\n \n result = self.driver1._get_standard_range_str(0)\n self.assertEqual(result, 'bytes=0-')\n- result = self.driver1._get_standard_range_str(0, 0)\n+ result = self.driver1._get_standard_range_str(0, 1)\n \n self.assertEqual(result, 'bytes=0-0')\n \n@@ -266,9 +266,12 @@ class BaseStorageTests(unittest.TestCase):\n self.assertEqual(result, 'bytes=200-')\n \n result = self.driver1._get_standard_range_str(10, 200)\n- self.assertEqual(result, 'bytes=10-200')\n+ self.assertEqual(result, 'bytes=10-199')\n \n result = self.driver1._get_standard_range_str(10, 11)\n+ self.assertEqual(result, 'bytes=10-10')\n+\n+ result = self.driver1._get_standard_range_str(10, 11, True)\n self.assertEqual(result, 'bytes=10-11')\n \n \n", "new_path": "libcloud/test/storage/test_base.py", "old_path": "libcloud/test/storage/test_base.py" }, { "change_type": "MODIFY", "diff": "@@ -352,7 +352,7 @@ class LocalTests(unittest.TestCase):\n result = self.driver.download_object_range(obj=obj,\n destination_path=destination_path,\n start_bytes=4,\n- end_bytes=5,\n+ end_bytes=6,\n overwrite_existing=True,\n delete_on_failure=True)\n self.assertTrue(result)\n@@ -361,12 +361,12 @@ class LocalTests(unittest.TestCase):\n written_content = fp.read()\n \n self.assertEqual(written_content, b'45')\n- self.assertEqual(written_content, content[4:5 + 1])\n+ self.assertEqual(written_content, content[4:6])\n \n result = self.driver.download_object_range(obj=obj,\n destination_path=destination_path,\n start_bytes=0,\n- end_bytes=0,\n+ end_bytes=1,\n overwrite_existing=True,\n delete_on_failure=True)\n self.assertTrue(result)\n@@ -375,12 +375,12 @@ class LocalTests(unittest.TestCase):\n written_content = fp.read()\n \n self.assertEqual(written_content, b'0')\n- self.assertEqual(written_content, content[0:0 + 1])\n+ self.assertEqual(written_content, content[0:1])\n \n result = self.driver.download_object_range(obj=obj,\n destination_path=destination_path,\n start_bytes=0,\n- end_bytes=1,\n+ end_bytes=2,\n overwrite_existing=True,\n delete_on_failure=True)\n self.assertTrue(result)\n@@ -389,7 +389,7 @@ class LocalTests(unittest.TestCase):\n written_content = fp.read()\n \n self.assertEqual(written_content, b'01')\n- self.assertEqual(written_content, content[0:1 + 1])\n+ self.assertEqual(written_content, content[0:2])\n \n result = self.driver.download_object_range(obj=obj,\n destination_path=destination_path,\n@@ -428,30 +428,30 @@ class LocalTests(unittest.TestCase):\n # 2. start_bytes and end_bytes is provided\n stream = self.driver.download_object_range_as_stream(obj=obj,\n start_bytes=4,\n- end_bytes=6,\n+ end_bytes=7,\n chunk_size=1024)\n written_content = b''.join(stream)\n \n self.assertEqual(written_content, b'456')\n- self.assertEqual(written_content, content[4:6 + 1])\n+ self.assertEqual(written_content, content[4:7])\n \n stream = self.driver.download_object_range_as_stream(obj=obj,\n start_bytes=0,\n- end_bytes=0,\n+ end_bytes=1,\n chunk_size=1024)\n written_content = b''.join(stream)\n \n self.assertEqual(written_content, b'0')\n- self.assertEqual(written_content, content[0:0 + 1])\n+ self.assertEqual(written_content, content[0:1])\n \n stream = self.driver.download_object_range_as_stream(obj=obj,\n start_bytes=1,\n- end_bytes=2,\n+ end_bytes=3,\n chunk_size=1024)\n written_content = b''.join(stream)\n \n self.assertEqual(written_content, b'12')\n- self.assertEqual(written_content, content[1:2 + 1])\n+ self.assertEqual(written_content, content[1:3])\n \n stream = self.driver.download_object_range_as_stream(obj=obj,\n start_bytes=0,\n@@ -483,6 +483,13 @@ class LocalTests(unittest.TestCase):\n start_bytes=5,\n end_bytes=4)\n \n+ expected_msg = 'start_bytes and end_bytes can\\'t be the same'\n+ self.assertRaisesRegex(ValueError, expected_msg,\n+ self.driver.download_object_range, obj=obj,\n+ destination_path=tmppath,\n+ start_bytes=5,\n+ end_bytes=5)\n+\n def test_download_object_range_as_stream_invalid_values(self):\n content = b'0123456789123456789'\n tmppath = self.make_tmp_file(content=content)\n@@ -514,6 +521,13 @@ class LocalTests(unittest.TestCase):\n end_bytes=len(content) + 1,\n chunk_size=1024)\n \n+ expected_msg = 'start_bytes and end_bytes can\\'t be the same'\n+ stream = self.driver.download_object_range_as_stream(\n+ obj=obj,\n+ start_bytes=5,\n+ end_bytes=5,\n+ chunk_size=1024)\n+\n obj.delete()\n container.delete()\n self.remove_tmp_file(tmppath)\n", "new_path": "libcloud/test/storage/test_local.py", "old_path": "libcloud/test/storage/test_local.py" }, { "change_type": "MODIFY", "diff": "@@ -333,7 +333,7 @@ class S3MockHttp(BaseRangeDownloadMockHttp, unittest.TestCase):\n body = '0123456789123456789'\n \n self.assertTrue('Range' in headers)\n- self.assertEqual(headers['Range'], 'bytes=5-7')\n+ self.assertEqual(headers['Range'], 'bytes=5-6')\n \n start_bytes, end_bytes = self._get_start_and_end_bytes_from_range_str(headers['Range'], body)\n \n@@ -347,7 +347,7 @@ class S3MockHttp(BaseRangeDownloadMockHttp, unittest.TestCase):\n body = '0123456789123456789'\n \n self.assertTrue('Range' in headers)\n- self.assertEqual(headers['Range'], 'bytes=4-7')\n+ self.assertEqual(headers['Range'], 'bytes=4-6')\n \n start_bytes, end_bytes = self._get_start_and_end_bytes_from_range_str(headers['Range'], body)\n \n@@ -658,7 +658,7 @@ class S3Tests(unittest.TestCase):\n with open(self._file_path, 'r') as fp:\n content = fp.read()\n \n- self.assertEqual(content, '567')\n+ self.assertEqual(content, '56')\n \n def test_download_object_range_as_stream_success(self):\n container = Container(name='foo_bar_container', extra={},\n@@ -670,7 +670,7 @@ class S3Tests(unittest.TestCase):\n start_bytes=4,\n end_bytes=7)\n content = exhaust_iterator(iterator)\n- self.assertEqual(content, b'4567')\n+ self.assertEqual(content, b'456')\n \n def test_download_object_data_is_not_buffered_in_memory(self):\n # Test case which verifies that response.body attribute is not accessed\n", "new_path": "libcloud/test/storage/test_s3.py", "old_path": "libcloud/test/storage/test_s3.py" } ]
d110a45eb874bec8e3cf0b1371b515265293d6d9
apache/libcloud
null
null
Replace deprecated lockfile library used by local storage drivers with fasteners library.
[ { "change_type": "MODIFY", "diff": "@@ -57,6 +57,13 @@ Compute\n (GITHUB-1495)\n [Miguel Caballer - @micafer]\n \n+Storage\n+~~~~~~~\n+\n+- Deprecated ``lockfile`` library which is used by the Local Storage driver has\n+ been replaced with ``fasteners`` library.\n+ [Tomaz Muraus - @Kami]\n+\n DNS\n ~~~\n \n", "new_path": "CHANGES.rst", "old_path": "CHANGES.rst" }, { "change_type": "MODIFY", "diff": "@@ -18,7 +18,7 @@ dependencies installed:\n * ``tox`` (``pip install tox``) - you only need this library if you want to\n use tox to run the tests with all the supported Python versions\n * ``mock`` (``pip install mock``)\n-* ``lockfile`` (``pip install lockfile``) - only used in the local storage\n+* ``fasteners`` (``pip install fasteners``) - only used in the local storage\n driver\n * ``coverage`` (``pip install coverage``) - you only need this library if you\n want to generate a test coverage report\n", "new_path": "docs/testing.rst", "old_path": "docs/testing.rst" }, { "change_type": "MODIFY", "diff": "@@ -21,14 +21,17 @@ from __future__ import with_statement\n \n import errno\n import os\n+import time\n import shutil\n+import tempfile\n+import threading\n+from hashlib import sha256\n \n try:\n- import lockfile\n- from lockfile import LockTimeout, mkdirlockfile\n+ import fasteners\n except ImportError:\n- raise ImportError('Missing lockfile dependency, you can install it '\n- 'using pip: pip install lockfile')\n+ raise ImportError('Missing fasteners dependency, you can install it '\n+ 'using pip: pip install fasteners')\n \n from libcloud.utils.files import read_in_chunks\n from libcloud.utils.files import exhaust_iterator\n@@ -51,19 +54,47 @@ class LockLocalStorage(object):\n \"\"\"\n A class to help in locking a local path before being updated\n \"\"\"\n+\n def __init__(self, path):\n self.path = path\n- self.lock = mkdirlockfile.MkdirLockFile(self.path, threaded=True)\n+\n+ self.ipc_lock_path = os.path.join(tempfile.gettempdir(), \"%s.lock\" % (\n+ sha256(path.encode(\"utf-8\")).hexdigest()))\n+\n+ # NOTE: fasteners.InterProcess lock has no guarantees regards usage by\n+ # multiple threads in a single process which means we also need to\n+ # use threading.lock for that purpose\n+ self.thread_lock = threading.Lock()\n+ self.ipc_lock = fasteners.InterProcessLock(self.ipc_lock_path)\n \n def __enter__(self):\n- try:\n- self.lock.acquire(timeout=0.1)\n- except LockTimeout:\n- raise LibcloudError('Lock timeout')\n+ lock_acquire_timeout = 5\n+ start_time = int(time.time())\n+ end_time = start_time + lock_acquire_timeout\n+\n+ while int(time.time()) < end_time:\n+ success = self.thread_lock.acquire(blocking=True)\n+\n+ if success:\n+ break\n+\n+ if not success:\n+ raise LibcloudError(\"Failed to acquire thread lock for path %s \"\n+ \"in 5 seconds\" % (self.path))\n+\n+ success = self.ipc_lock.acquire(blocking=True, timeout=5)\n+\n+ if not success:\n+ raise LibcloudError(\"Failed to acquire IPC lock (%s) for path %s \"\n+ \"in 5 seconds\" %\n+ (self.ipc_lock_path, self.path))\n \n def __exit__(self, type, value, traceback):\n- if self.lock.is_locked():\n- self.lock.release()\n+ if self.thread_lock.locked():\n+ self.thread_lock.release()\n+\n+ if self.ipc_lock.exists():\n+ self.ipc_lock.release()\n \n if value is not None:\n raise value\n@@ -314,7 +345,6 @@ class LocalStorageDriver(StorageDriver):\n \"\"\"\n \n path = self.get_container_cdn_url(container)\n- lockfile.MkdirFileLock(path, threaded=True)\n \n with LockLocalStorage(path):\n self._make_path(path)\n", "new_path": "libcloud/storage/drivers/local.py", "old_path": "libcloud/storage/drivers/local.py" }, { "change_type": "MODIFY", "diff": "@@ -36,11 +36,10 @@ from libcloud.utils.files import exhaust_iterator\n try:\n from libcloud.storage.drivers.local import LocalStorageDriver\n from libcloud.storage.drivers.local import LockLocalStorage\n- from lockfile import LockTimeout\n+ import fasteners\n except ImportError:\n- print('lockfile library is not available, skipping local_storage tests...')\n+ print('fasteners library is not available, skipping local_storage tests...')\n LocalStorageDriver = None\n- LockTimeout = None\n \n \n class LocalTests(unittest.TestCase):\n@@ -532,14 +531,6 @@ class LocalTests(unittest.TestCase):\n container.delete()\n self.remove_tmp_file(tmppath)\n \n- @mock.patch(\"lockfile.mkdirlockfile.MkdirLockFile.acquire\",\n- mock.MagicMock(side_effect=LockTimeout))\n- def test_proper_lockfile_imports(self):\n- # LockLocalStorage was previously using an un-imported exception\n- # in its __enter__ method, so the following would raise a NameError.\n- lls = LockLocalStorage(\"blah\")\n- self.assertRaises(LibcloudError, lls.__enter__)\n-\n \n if not LocalStorageDriver:\n class LocalTests(unittest.TestCase): # NOQA\n", "new_path": "libcloud/test/storage/test_local.py", "old_path": "libcloud/test/storage/test_local.py" }, { "change_type": "MODIFY", "diff": "@@ -6,7 +6,7 @@ skipsdist = true\n passenv = TRAVIS TRAVIS_JOB_ID TRAVIS_BRANCH\n deps =\n -r{toxinidir}/requirements-tests.txt\n- lockfile\n+ fasteners\n libvirt-python==5.9.0\n setuptools==42.0.2\n basepython =\n@@ -25,7 +25,7 @@ whitelist_externals = cp\n [testenv:py3.8-windows]\n deps =\n -r{toxinidir}/requirements-tests.txt\n- lockfile\n+ fasteners\n setuptools==42.0.2\n \n [testenv:py3.7-dist]\n@@ -73,7 +73,7 @@ commands = bash -c \"pip show requests && exit 1 || exit 0\"\n \n [testenv:docs]\n deps = pyopenssl\n- lockfile\n+ fasteners\n rstcheck\n changedir = docs\n commands = pip install sphinx~=1.6.0\n@@ -87,7 +87,7 @@ commands = pip install sphinx~=1.6.0\n # Note: We don't build API docs on Travis since it causes build failures because\n # those API docs files are not included anywhere.\n deps = pyopenssl\n- lockfile\n+ fasteners\n rstcheck\n changedir = docs\n commands = pip install sphinx~=1.6.0\n@@ -102,7 +102,7 @@ commands = pip install sphinx~=1.6.0\n basepython: python3.7\n deps = typing\n pyopenssl\n- lockfile\n+ fasteners\n rstcheck\n requests\n commands = python ./contrib/generate_provider_feature_matrix_table.py\n@@ -159,7 +159,7 @@ commands =\n [testenv:pylint]\n deps = -r{toxinidir}/requirements-tests.txt\n bottle\n- lockfile\n+ fasteners\n paramiko==2.7.1\n pysphere\n setenv =\n@@ -177,7 +177,7 @@ commands = pylint -E --load-plugins=pylint_plugins.driver_class --rcfile=./.pyli\n \n [testenv:lint]\n deps = -r{toxinidir}/requirements-tests.txt\n- lockfile\n+ fasteners\n rstcheck\n \n commands = flake8 libcloud/\n@@ -210,7 +210,7 @@ deps =\n pyopenssl\n python-dateutil\n libvirt-python==5.9.0\n- lockfile\n+ fasteners\n set-env =\n commands = cp libcloud/test/secrets.py-dist libcloud/test/secrets.py\n coverage run --source=libcloud setup.py test\n@@ -222,7 +222,7 @@ deps =\n paramiko==2.7.1\n pyopenssl\n libvirt-python==5.9.0\n- lockfile\n+ fasteners\n set-env =\n commands = cp libcloud/test/secrets.py-dist libcloud/test/secrets.py\n coverage run --source=libcloud setup.py test\n", "new_path": "tox.ini", "old_path": "tox.ini" } ]
b76ed0db81b3123ede5dc5e5f1bddf36336f3722
apache/libcloud
null
null
Add tests which verify that all OpenStack driver can be instantiated with all the supported auth versions. NOTE: Those tests will fail right now due to the regressions being introduced recently which breaks auth for some versions.
[ { "change_type": "MODIFY", "diff": "@@ -39,6 +39,7 @@ from libcloud.utils.py3 import u\n from libcloud.common.base import LibcloudConnection\n from libcloud.common.exceptions import BaseHTTPError\n from libcloud.common.openstack_identity import OpenStackAuthenticationCache\n+from libcloud.common.openstack_identity import AUTH_VERSIONS_WITH_EXPIRES\n from libcloud.common.types import (\n InvalidCredsError,\n MalformedResponseError,\n@@ -3955,6 +3956,54 @@ class OpenStack_2_0_MockHttp(OpenStack_1_1_MockHttp):\n return (httplib.UNAUTHORIZED, \"\", {}, httplib.responses[httplib.UNAUTHORIZED])\n \n \n+class OpenStack_AllAuthVersions_MockHttp(MockHttp):\n+ def __init__(self, *args, **kwargs):\n+ super(OpenStack_AllAuthVersions_MockHttp, self).__init__(*args, **kwargs)\n+\n+ # Lazy import to avoid cyclic depedency issue\n+ from libcloud.test.common.test_openstack_identity import OpenStackIdentity_2_0_MockHttp\n+ from libcloud.test.common.test_openstack_identity import OpenStackIdentity_3_0_MockHttp\n+\n+ self.mock_http = OpenStackMockHttp(*args, **kwargs)\n+ self.mock_http_1_1 = OpenStack_1_1_MockHttp(*args, **kwargs)\n+ self.mock_http_2_0 = OpenStack_2_0_MockHttp(*args, **kwargs)\n+ self.mock_http_2_0_identity = OpenStackIdentity_2_0_MockHttp(*args, **kwargs)\n+ self.mock_http_3_0_identity = OpenStackIdentity_3_0_MockHttp(*args, **kwargs)\n+\n+ def _v1_0_slug_servers_detail(self, method, url, body, headers):\n+ return self.mock_http_1_1._v1_1_slug_servers_detail(method=method, url=url, body=body, headers=headers)\n+ return res\n+\n+ def _v1_1_auth(self, method, url, body, headers):\n+ return self.mock_http._v1_1_auth(method=method, url=url, body=body, headers=headers)\n+\n+ def _v2_0_tokens(self, method, url, body, headers):\n+ return self.mock_http_2_0._v2_0_tokens(method=method, url=url, body=body, headers=headers)\n+\n+ def _v2_1337_servers_detail(self, method, url, body, headers):\n+ return self.mock_http_2_0._v2_1337_servers_detail(method=method, url=url, body=body, headers=headers)\n+\n+ def _v2_0_tenants(self, method, url, body, headers):\n+ return self.mock_http_2_0_identity._v2_0_tenants(method=method, url=url, body=body, headers=headers)\n+\n+ def _v2_9c4693dce56b493b9b83197d900f7fba_servers_detail(self, method, url, body, headers):\n+ return self.mock_http_1_1._v1_1_slug_servers_detail(method=method, url=url, body=body, headers=headers)\n+\n+ def _v3_OS_FEDERATION_identity_providers_user_name_protocols_tenant_name_auth(\n+ self, method, url, body, headers\n+ ):\n+ return self.mock_http_3_0_identity._v3_OS_FEDERATION_identity_providers_test_user_id_protocols_test_tenant_auth(method=method, url=url, body=body, headers=headers)\n+\n+ def _v3_auth_tokens(self, method, url, body, headers):\n+ return self.mock_http_2_0._v3_auth_tokens(method=method, url=url, body=body, headers=headers)\n+\n+ def _v3_0_auth_tokens(self, method, url, body, headers):\n+ return self.mock_http_3_0_identity._v3_0_auth_tokens(method=method, url=url, body=body, headers=headers)\n+\n+ def _v3_auth_projects(self, method, url, body, headers):\n+ return self.mock_http_3_0_identity._v3_auth_projects(method=method, url=url, body=body, headers=headers)\n+\n+\n class OpenStack_1_1_Auth_2_0_Tests(OpenStack_1_1_Tests):\n driver_args = OPENSTACK_PARAMS + (\"1.1\",)\n driver_kwargs = {\"ex_force_auth_version\": \"2.0\"}\n@@ -3989,6 +4038,52 @@ class OpenStack_1_1_Auth_2_0_Tests(OpenStack_1_1_Tests):\n )\n \n \n+class OpenStack_AuthVersions_Tests(unittest.TestCase):\n+\n+ def setUp(self):\n+ # monkeypatch get_endpoint because the base openstack driver doesn't actually\n+ # work with old devstack but this class/tests are still used by the rackspace\n+ # driver\n+ def get_endpoint(*args, **kwargs):\n+ return \"https://servers.api.rackspacecloud.com/v1.0/slug\"\n+\n+ OpenStack_1_1_NodeDriver.connectionCls.get_endpoint = get_endpoint\n+\n+ def test_ex_force_auth_version_all_possible_values(self):\n+ \"\"\"\n+ Test case which verifies that the driver can be correctly instantiated using all the\n+ supported API versions.\n+ \"\"\"\n+ OpenStack_1_1_NodeDriver.connectionCls.conn_class = OpenStack_AllAuthVersions_MockHttp\n+ OpenStackMockHttp.type = None\n+ OpenStack_1_1_MockHttp.type = None\n+ OpenStack_2_0_MockHttp.type = None\n+\n+ cls = get_driver(Provider.OPENSTACK)\n+\n+ for auth_version in AUTH_VERSIONS_WITH_EXPIRES:\n+ driver_kwargs = {}\n+\n+ if auth_version == \"1.1\":\n+ # 1.1 is old and deprecated so we skip it\n+ pass\n+\n+ user_id = OPENSTACK_PARAMS[0]\n+ key = OPENSTACK_PARAMS[1]\n+\n+ if auth_version.startswith(\"3.x\"):\n+ driver_kwargs[\"ex_domina_name\"] = \"domain-name\"\n+ driver_kwargs[\"ex_force_service_region\"] = \"regionOne\"\n+ driver_kwargs[\"ex_tenant_name\"] = \"tenant-name\"\n+\n+ if auth_version == \"3.x_oidc_access_token\":\n+ key = \"test_key\"\n+\n+ driver = cls(user_id, key, ex_force_auth_url=\"http://x.y.z.y:5000\", ex_force_auth_version=auth_version, **driver_kwargs)\n+ nodes = driver.list_nodes()\n+ self.assertTrue(len(nodes) >= 1)\n+\n+\n class OpenStackMockAuthCache(OpenStackAuthenticationCache):\n def __init__(self):\n self.reset()\n", "new_path": "libcloud/test/compute/test_openstack.py", "old_path": "libcloud/test/compute/test_openstack.py" } ]
a1b7520cfca410f773da868fcddffe971fedd6af
gammapy/gammapy
null
null
Add processing options for SkyModel and SkyDiffuseCube Add options to apply or not psf and edisp to SkyModel and SkyDiffuseCube
[ { "change_type": "MODIFY", "diff": "@@ -250,7 +250,9 @@ class MapDataset(Dataset):\n evaluator = self._evaluators.get(model.name)\n \n if evaluator is None:\n- evaluator = MapEvaluator(model=model, evaluation_mode=self.evaluation_mode)\n+ evaluator = MapEvaluator(\n+ model=model, evaluation_mode=self.evaluation_mode\n+ )\n self._evaluators[model.name] = evaluator\n \n # if the model component drifts out of its support the evaluator has\n@@ -928,9 +930,7 @@ class MapDataset(Dataset):\n kwargs[\"exposure\"] = self.exposure.cutout(**cutout_kwargs)\n \n if self.background_model is not None:\n- kwargs[\"models\"] = self.background_model.cutout(\n- **cutout_kwargs, name=name\n- )\n+ kwargs[\"models\"] = self.background_model.cutout(**cutout_kwargs, name=name)\n \n if self.edisp is not None:\n kwargs[\"edisp\"] = self.edisp.cutout(**cutout_kwargs)\n@@ -1646,9 +1646,9 @@ class MapEvaluator:\n \"\"\"\n flux = self.compute_flux()\n npred = self.apply_exposure(flux)\n- if self.psf is not None:\n+ if self.psf is not None and self.model.processing[\"psf\"] == True:\n npred = self.apply_psf(npred)\n- if self.edisp is not None:\n+ if self.edisp is not None and self.model.processing[\"edisp\"] == True:\n npred = self.apply_edisp(npred)\n \n return npred\n", "new_path": "gammapy/cube/fit.py", "old_path": "gammapy/cube/fit.py" }, { "change_type": "MODIFY", "diff": "@@ -14,6 +14,8 @@ from .core import Model, Models\n class SkyModelBase(Model):\n \"\"\"Sky model base class\"\"\"\n \n+ processing = {\"psf\": 1, \"edisp\": 1}\n+\n def __add__(self, other):\n if isinstance(other, (Models, list)):\n return Models([self, *other])\n@@ -32,6 +34,13 @@ class SkyModelBase(Model):\n coords = geom.get_coord(frame=self.frame)\n return self(coords.lon, coords.lat, coords[\"energy\"])\n \n+ def _update_processing(self, processing):\n+ if processing in [None, \"None\"]:\n+ self.processing = {\"psf\": 0, \"edisp\": 0}\n+ else:\n+ for key in processing:\n+ self.processing[key] = processing[key]\n+\n \n class SkyModel(SkyModelBase):\n \"\"\"Sky model component.\n@@ -55,7 +64,12 @@ class SkyModel(SkyModelBase):\n tag = \"SkyModel\"\n \n def __init__(\n- self, spectral_model, spatial_model=None, temporal_model=None, name=None\n+ self,\n+ spectral_model,\n+ spatial_model=None,\n+ temporal_model=None,\n+ name=None,\n+ processing={},\n ):\n self.spatial_model = spatial_model\n self.spectral_model = spectral_model\n@@ -65,6 +79,7 @@ class SkyModel(SkyModelBase):\n self.__dict__.pop(\"_parameters\")\n \n self._name = make_name(name)\n+ self._update_processing(processing)\n \n @property\n def name(self):\n@@ -212,6 +227,9 @@ class SkyModel(SkyModelBase):\n if self.temporal_model is not None:\n data[\"temporal\"] = self.temporal_model.to_dict()\n \n+ if self.processing != {\"psf\": 1, \"edisp\": 1}:\n+ data[\"processing\"] = self.processing\n+\n return data\n \n @classmethod\n@@ -245,6 +263,7 @@ class SkyModel(SkyModelBase):\n spatial_model=spatial_model,\n spectral_model=spectral_model,\n temporal_model=temporal_model,\n+ processing=data.get(\"processing\", {}),\n )\n \n def __str__(self):\n@@ -312,6 +331,7 @@ class SkyDiffuseCube(SkyModelBase):\n interp_kwargs=None,\n name=None,\n filename=None,\n+ processing={},\n ):\n \n self._name = make_name(name)\n@@ -334,6 +354,7 @@ class SkyDiffuseCube(SkyModelBase):\n # remove this again\n self._cached_value = None\n self._cached_coordinates = (None, None, None)\n+ self._update_processing(processing)\n \n super().__init__(norm=norm, tilt=tilt, reference=reference)\n \n@@ -417,6 +438,8 @@ class SkyDiffuseCube(SkyModelBase):\n def from_dict(cls, data):\n model = cls.read(data[\"filename\"])\n model._update_from_dict(data)\n+ processing = data.get(\"processing\", {})\n+ model._update_processing(processing)\n return model\n \n def to_dict(self):\n@@ -427,6 +450,9 @@ class SkyDiffuseCube(SkyModelBase):\n \n # Move parameters at the end\n data[\"parameters\"] = data.pop(\"parameters\")\n+ if self.processing != {\"psf\": 1, \"edisp\": 1}:\n+ data[\"processing\"] = self.processing\n+\n return data\n \n def __str__(self):\n", "new_path": "gammapy/modeling/models/cube.py", "old_path": "gammapy/modeling/models/cube.py" }, { "change_type": "MODIFY", "diff": "@@ -103,9 +103,15 @@ class CountsSpectrum:\n region = None\n wcs = None\n if hdu3 in hdulist:\n- region, wcs =cls.read_region_table(hdulist[hdu3])\n+ region, wcs = cls.read_region_table(hdulist[hdu3])\n \n- return cls(data=counts, energy_lo=ebounds[:-1], energy_hi=ebounds[1:], region=region, wcs=wcs)\n+ return cls(\n+ data=counts,\n+ energy_lo=ebounds[:-1],\n+ energy_hi=ebounds[1:],\n+ region=region,\n+ wcs=wcs,\n+ )\n \n @classmethod\n def read(cls, filename, hdu1=\"COUNTS\", hdu2=\"EBOUNDS\", hdu3=\"REGION\"):\n@@ -124,8 +130,6 @@ class CountsSpectrum:\n names = [\"CHANNEL\", \"COUNTS\"]\n meta = {\"name\": \"COUNTS\"}\n \n-\n-\n return Table([channel, counts], names=names, meta=meta)\n \n def _to_region_table(self):\n@@ -156,7 +160,7 @@ class CountsSpectrum:\n ebounds = energy_axis_to_ebounds(energy)\n \n region_table = self._to_region_table()\n- region_hdu = fits.BinTableHDU(region_table, name='REGION')\n+ region_hdu = fits.BinTableHDU(region_table, name=\"REGION\")\n return fits.HDUList([fits.PrimaryHDU(), hdu, ebounds, region_hdu])\n \n def write(self, filename, use_sherpa=False, **kwargs):\n@@ -439,7 +443,7 @@ class SpectrumEvaluator:\n def apply_edisp(self, true_counts):\n from . import CountsSpectrum\n \n- if self.edisp is not None:\n+ if self.edisp is not None and self.model.processing[\"edisp\"] == True:\n cts = self.edisp.apply(true_counts)\n e_reco = self.edisp.e_reco.edges\n else:\n", "new_path": "gammapy/spectrum/core.py", "old_path": "gammapy/spectrum/core.py" } ]
36d511791b9b9dd64c09844a09865e73dac650ba
gammapy/gammapy
null
null
Add shorter tags for models Redefine most of models tags with a list including shorter aliases to be used with model.create(tag) and in YAML serialization. By default the tag returned in model.to_dict() is the 0th tag which is also the class name.
[ { "change_type": "MODIFY", "diff": "@@ -184,7 +184,7 @@ class SourceCatalogObjectFermiBase(SourceCatalogObject, abc.ABC):\n lat_err = semi_major / scale_1sigma\n lon_err = semi_minor / scale_1sigma / np.cos(d[\"DEJ2000\"])\n \n- if model.tag != \"TemplateSpatialModel\":\n+ if \"TemplateSpatialModel\" not in model.tag:\n model.parameters[\"lon_0\"].error = lon_err\n model.parameters[\"lat_0\"].error = lat_err\n model.phi_0 = phi_0\n", "new_path": "gammapy/catalog/fermi.py", "old_path": "gammapy/catalog/fermi.py" }, { "change_type": "MODIFY", "diff": "@@ -165,7 +165,7 @@ class TestFermi4FGLObject:\n \n def test_spatial_model(self):\n model = self.cat[\"4FGL J0000.3-7355\"].spatial_model()\n- assert model.tag == \"PointSpatialModel\"\n+ assert \"PointSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 0.0983)\n@@ -178,7 +178,7 @@ class TestFermi4FGLObject:\n assert_allclose(model.position.dec.value, pos_err.center.dec.value)\n \n model = self.cat[\"4FGL J1409.1-6121e\"].spatial_model()\n- assert model.tag == \"DiskSpatialModel\"\n+ assert \"DiskSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 212.294006)\n@@ -186,7 +186,7 @@ class TestFermi4FGLObject:\n assert_allclose(p[\"r_0\"].value, 0.7331369519233704)\n \n model = self.cat[\"4FGL J0617.2+2234e\"].spatial_model()\n- assert model.tag == \"GaussianSpatialModel\"\n+ assert \"GaussianSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 94.309998)\n@@ -194,7 +194,7 @@ class TestFermi4FGLObject:\n assert_allclose(p[\"sigma\"].value, 0.27)\n \n model = self.cat[\"4FGL J1443.0-6227e\"].spatial_model()\n- assert model.tag == \"TemplateSpatialModel\"\n+ assert \"TemplateSpatialModel\" in model.tag\n assert model.frame == \"fk5\"\n assert model.normalize is True\n \n@@ -305,14 +305,14 @@ class TestFermi3FGLObject:\n \n def test_spatial_model(self):\n model = self.cat[0].spatial_model()\n- assert model.tag == \"PointSpatialModel\"\n+ assert \"PointSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 0.0377)\n assert_allclose(p[\"lat_0\"].value, 65.751701)\n \n model = self.cat[122].spatial_model()\n- assert model.tag == \"GaussianSpatialModel\"\n+ assert \"GaussianSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 14.75)\n@@ -320,7 +320,7 @@ class TestFermi3FGLObject:\n assert_allclose(p[\"sigma\"].value, 1.35)\n \n model = self.cat[955].spatial_model()\n- assert model.tag == \"DiskSpatialModel\"\n+ assert \"DiskSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 128.287201)\n@@ -328,7 +328,7 @@ class TestFermi3FGLObject:\n assert_allclose(p[\"r_0\"].value, 0.91)\n \n model = self.cat[602].spatial_model()\n- assert model.tag == \"TemplateSpatialModel\"\n+ assert \"TemplateSpatialModel\" in model.tag\n assert model.frame == \"fk5\"\n assert model.normalize is True\n \n@@ -442,7 +442,7 @@ class TestFermi2FHLObject:\n \n def test_spatial_model(self):\n model = self.cat[221].spatial_model()\n- assert model.tag == \"PointSpatialModel\"\n+ assert \"PointSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 221.281998, rtol=1e-5)\n@@ -459,7 +459,7 @@ class TestFermi2FHLObject:\n assert_allclose(model.position.dec.value, pos_err.center.dec.value)\n \n model = self.cat[97].spatial_model()\n- assert model.tag == \"GaussianSpatialModel\"\n+ assert \"GaussianSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 94.309998, rtol=1e-5)\n@@ -467,7 +467,7 @@ class TestFermi2FHLObject:\n assert_allclose(p[\"sigma\"].value, 0.27)\n \n model = self.cat[134].spatial_model()\n- assert model.tag == \"DiskSpatialModel\"\n+ assert \"DiskSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 125.660004, rtol=1e-5)\n@@ -475,7 +475,7 @@ class TestFermi2FHLObject:\n assert_allclose(p[\"r_0\"].value, 0.37)\n \n model = self.cat[256].spatial_model()\n- assert model.tag == \"TemplateSpatialModel\"\n+ assert \"TemplateSpatialModel\" in model.tag\n assert model.frame == \"fk5\"\n assert model.normalize is True\n # TODO: have to check the extended template used for RX J1713,\n", "new_path": "gammapy/catalog/tests/test_fermi.py", "old_path": "gammapy/catalog/tests/test_fermi.py" }, { "change_type": "MODIFY", "diff": "@@ -106,7 +106,8 @@ class Model:\n \n def to_dict(self):\n \"\"\"Create dict for YAML serialisation\"\"\"\n- return {\"type\": self.tag, \"parameters\": self.parameters.to_dict()}\n+ tag = self.tag[0] if isinstance(self.tag, list) else self.tag\n+ return {\"type\": tag, \"parameters\": self.parameters.to_dict()}\n \n @classmethod\n def from_dict(cls, data):\n", "new_path": "gammapy/modeling/models/core.py", "old_path": "gammapy/modeling/models/core.py" }, { "change_type": "MODIFY", "diff": "@@ -210,7 +210,7 @@ class PointSpatialModel(SpatialModel):\n Center position coordinate frame\n \"\"\"\n \n- tag = \"PointSpatialModel\"\n+ tag = [\"PointSpatialModel\", \"PS\"]\n lon_0 = Parameter(\"lon_0\", \"0 deg\")\n lat_0 = Parameter(\"lat_0\", \"0 deg\", min=-90, max=90)\n \n@@ -281,7 +281,7 @@ class GaussianSpatialModel(SpatialModel):\n Center position coordinate frame\n \"\"\"\n \n- tag = \"GaussianSpatialModel\"\n+ tag = [\"GaussianSpatialModel\", \"GaussianSpatial\"]\n \n lon_0 = Parameter(\"lon_0\", \"0 deg\")\n lat_0 = Parameter(\"lat_0\", \"0 deg\", min=-90, max=90)\n@@ -350,7 +350,7 @@ class DiskSpatialModel(SpatialModel):\n Center position coordinate frame\n \"\"\"\n \n- tag = \"DiskSpatialModel\"\n+ tag = [\"DiskSpatialModel\", \"disk\"]\n lon_0 = Parameter(\"lon_0\", \"0 deg\")\n lat_0 = Parameter(\"lat_0\", \"0 deg\", min=-90, max=90)\n r_0 = Parameter(\"r_0\", \"1 deg\", min=0)\n@@ -436,7 +436,7 @@ class ShellSpatialModel(SpatialModel):\n Center position coordinate frame\n \"\"\"\n \n- tag = \"ShellSpatialModel\"\n+ tag = [\"ShellSpatialModel\", \"shell\"]\n lon_0 = Parameter(\"lon_0\", \"0 deg\")\n lat_0 = Parameter(\"lat_0\", \"0 deg\", min=-90, max=90)\n radius = Parameter(\"radius\", \"1 deg\")\n@@ -489,7 +489,7 @@ class ConstantSpatialModel(SpatialModel):\n Value\n \"\"\"\n \n- tag = \"ConstantSpatialModel\"\n+ tag = [\"ConstantSpatialModel\", \"ConstantSpatial\"]\n value = Parameter(\"value\", \"1 sr-1\", frozen=True)\n \n frame = \"icrs\"\n@@ -546,7 +546,7 @@ class TemplateSpatialModel(SpatialModel):\n Default arguments are {'interp': 'linear', 'fill_value': 0}.\n \"\"\"\n \n- tag = \"TemplateSpatialModel\"\n+ tag = [\"TemplateSpatialModel\", \"TemplateSpatial\"]\n norm = Parameter(\"norm\", 1)\n \n def __init__(\n", "new_path": "gammapy/modeling/models/spatial.py", "old_path": "gammapy/modeling/models/spatial.py" }, { "change_type": "MODIFY", "diff": "@@ -389,7 +389,7 @@ class ConstantSpectralModel(SpectralModel):\n :math:`k`\n \"\"\"\n \n- tag = \"ConstantSpectralModel\"\n+ tag = [\"ConstantSpectralModel\", \"ConstantSpectral\"]\n const = Parameter(\"const\", \"1e-12 cm-2 s-1 TeV-1\")\n \n @staticmethod\n@@ -404,7 +404,7 @@ class CompoundSpectralModel(SpectralModel):\n For more information see :ref:`compound-spectral-model`.\n \"\"\"\n \n- tag = \"CompoundSpectralModel\"\n+ tag = [\"CompoundSpectralModel\", \"CompoundSpectral\"]\n \n def __init__(self, model1, model2, operator):\n self.model1 = model1\n@@ -452,7 +452,7 @@ class PowerLawSpectralModel(SpectralModel):\n :math:`E_0`\n \"\"\"\n \n- tag = \"PowerLawSpectralModel\"\n+ tag = [\"PowerLawSpectralModel\", \"PL\"]\n index = Parameter(\"index\", 2.0)\n amplitude = Parameter(\"amplitude\", \"1e-12 cm-2 s-1 TeV-1\")\n reference = Parameter(\"reference\", \"1 TeV\", frozen=True)\n@@ -561,7 +561,7 @@ class PowerLaw2SpectralModel(SpectralModel):\n Upper energy limit :math:`E_{0, max}`.\n \"\"\"\n \n- tag = \"PowerLaw2SpectralModel\"\n+ tag = [\"PowerLaw2SpectralModel\", \"PL2\"]\n \n amplitude = Parameter(\"amplitude\", \"1e-12 cm-2 s-1\")\n index = Parameter(\"index\", 2)\n@@ -675,7 +675,7 @@ class SmoothBrokenPowerLawSpectralModel(SpectralModel):\n :math:`\\beta`\n \"\"\"\n \n- tag = \"SmoothBrokenPowerLawSpectralModel\"\n+ tag = [\"SmoothBrokenPowerLawSpectralModel\", \"SBPL\"]\n index1 = Parameter(\"index1\", 2.0)\n index2 = Parameter(\"index2\", 2.0)\n amplitude = Parameter(\"amplitude\", \"1e-12 cm-2 s-1 TeV-1\")\n@@ -711,7 +711,7 @@ class ExpCutoffPowerLawSpectralModel(SpectralModel):\n :math:`\\alpha`\n \"\"\"\n \n- tag = \"ExpCutoffPowerLawSpectralModel\"\n+ tag = [\"ExpCutoffPowerLawSpectralModel\", \"ECPL\"]\n \n index = Parameter(\"index\", 1.5)\n amplitude = Parameter(\"amplitude\", \"1e-12 cm-2 s-1 TeV-1\")\n@@ -764,7 +764,7 @@ class ExpCutoffPowerLaw3FGLSpectralModel(SpectralModel):\n :math:`E_{C}`\n \"\"\"\n \n- tag = \"ExpCutoffPowerLaw3FGLSpectralModel\"\n+ tag = [\"ExpCutoffPowerLaw3FGLSpectralModel\", \"ECPL3FGL\"]\n index = Parameter(\"index\", 1.5)\n amplitude = Parameter(\"amplitude\", \"1e-12 cm-2 s-1 TeV-1\")\n reference = Parameter(\"reference\", \"1 TeV\", frozen=True)\n@@ -803,7 +803,7 @@ class SuperExpCutoffPowerLaw3FGLSpectralModel(SpectralModel):\n :math:`E_{C}`\n \"\"\"\n \n- tag = \"SuperExpCutoffPowerLaw3FGLSpectralModel\"\n+ tag = [\"SuperExpCutoffPowerLaw3FGLSpectralModel\", \"SECPL3FGL\"]\n amplitude = Parameter(\"amplitude\", \"1e-12 cm-2 s-1 TeV-1\")\n reference = Parameter(\"reference\", \"1 TeV\", frozen=True)\n ecut = Parameter(\"ecut\", \"10 TeV\")\n@@ -838,7 +838,7 @@ class SuperExpCutoffPowerLaw4FGLSpectralModel(SpectralModel):\n internally assumes unit of :math:`[E_0]` power :math:`-\\Gamma_2`\n \"\"\"\n \n- tag = \"SuperExpCutoffPowerLaw4FGLSpectralModel\"\n+ tag = [\"SuperExpCutoffPowerLaw4FGLSpectralModel\", \"SECPL4FGL\"]\n amplitude = Parameter(\"amplitude\", \"1e-12 cm-2 s-1 TeV-1\")\n reference = Parameter(\"reference\", \"1 TeV\", frozen=True)\n expfactor = Parameter(\"expfactor\", \"1e-2\")\n@@ -874,7 +874,7 @@ class LogParabolaSpectralModel(SpectralModel):\n :math:`\\beta`\n \"\"\"\n \n- tag = \"LogParabolaSpectralModel\"\n+ tag = [\"LogParabolaSpectralModel\", \"LP\", \"logpar\"]\n amplitude = Parameter(\"amplitude\", \"1e-12 cm-2 s-1 TeV-1\")\n reference = Parameter(\"reference\", \"10 TeV\", frozen=True)\n alpha = Parameter(\"alpha\", 2)\n@@ -931,7 +931,7 @@ class TemplateSpectralModel(SpectralModel):\n Meta information, meta['filename'] will be used for serialization\n \"\"\"\n \n- tag = \"TemplateSpectralModel\"\n+ tag = [\"TemplateSpectralModel\", \"TemplateSpectral\"]\n norm = Parameter(\"norm\", 1, unit=\"\")\n tilt = Parameter(\"tilt\", 0, unit=\"\", frozen=True)\n reference = Parameter(\"reference\", \"1 TeV\", frozen=True)\n@@ -1017,7 +1017,7 @@ class TemplateSpectralModel(SpectralModel):\n \n def to_dict(self):\n return {\n- \"type\": self.tag,\n+ \"type\": self.tag[0],\n \"parameters\": self.parameters.to_dict(),\n \"energy\": {\n \"data\": self.energy.data.tolist(),\n@@ -1048,7 +1048,7 @@ class ScaleSpectralModel(SpectralModel):\n Multiplicative norm factor for the model value.\n \"\"\"\n \n- tag = \"ScaleSpectralModel\"\n+ tag = [\"ScaleSpectralModel\", \"ScaleSpectral\"]\n norm = Parameter(\"norm\", 1, unit=\"\")\n \n def __init__(self, model, norm=norm.quantity):\n@@ -1351,7 +1351,7 @@ class NaimaSpectralModel(SpectralModel):\n for now this is used only for synchrotron self-compton model\n \"\"\"\n \n- tag = \"NaimaSpectralModel\"\n+ tag = [\"NaimaSpectralModel\", \"NaimaSpectral\"]\n \n def __init__(\n self, radiative_model, distance=1.0 * u.kpc, seed=None, nested_models=None\n@@ -1495,7 +1495,7 @@ class GaussianSpectralModel(SpectralModel):\n :math:`\\sigma`\n \"\"\"\n \n- tag = \"GaussianSpectralModel\"\n+ tag = [\"GaussianSpectralModel\", \"GaussianSpectral\"]\n norm = Parameter(\"norm\", 1e-12 * u.Unit(\"cm-2 s-1\"))\n mean = Parameter(\"mean\", 1 * u.TeV)\n sigma = Parameter(\"sigma\", 2 * u.TeV)\n", "new_path": "gammapy/modeling/models/spectral.py", "old_path": "gammapy/modeling/models/spectral.py" }, { "change_type": "MODIFY", "diff": "@@ -141,7 +141,7 @@ def test_model_create():\n spectral_model = Model.create(\n \"PowerLaw2SpectralModel\", amplitude=\"1e-10 cm-2 s-1\", index=3\n )\n- assert spectral_model.tag == \"PowerLaw2SpectralModel\"\n+ assert \"PowerLaw2SpectralModel\" in spectral_model.tag\n assert_allclose(spectral_model.index.value, 3)\n \n \n", "new_path": "gammapy/modeling/models/tests/test_core.py", "old_path": "gammapy/modeling/models/tests/test_core.py" }, { "change_type": "MODIFY", "diff": "@@ -31,8 +31,8 @@ def test_dict_to_skymodels():\n assert model0.name == \"background_irf\"\n \n model0 = models[1]\n- assert model0.spectral_model.tag == \"ExpCutoffPowerLawSpectralModel\"\n- assert model0.spatial_model.tag == \"PointSpatialModel\"\n+ assert \"ExpCutoffPowerLawSpectralModel\" in model0.spectral_model.tag\n+ assert \"PointSpatialModel\" in model0.spatial_model.tag\n \n pars0 = model0.parameters\n assert pars0[\"index\"].value == 2.1\n@@ -59,9 +59,11 @@ def test_dict_to_skymodels():\n assert np.isnan(pars0[\"lambda_\"].max)\n \n model1 = models[2]\n- assert model1.spectral_model.tag == \"PowerLawSpectralModel\"\n- assert model1.spatial_model.tag == \"DiskSpatialModel\"\n- assert model1.temporal_model.tag == \"LightCurveTemplateTemporalModel\"\n+ assert \"PL\" in model1.spectral_model.tag\n+ assert \"PowerLawSpectralModel\" in model1.spectral_model.tag\n+ assert \"DiskSpatialModel\" in model1.spatial_model.tag\n+ assert \"disk\" in model1.spatial_model.tag\n+ assert \"LightCurveTemplateTemporalModel\" in model1.temporal_model.tag\n \n pars1 = model1.parameters\n assert pars1[\"index\"].value == 2.2\n@@ -82,8 +84,8 @@ def test_dict_to_skymodels():\n )\n assert model2.spectral_model.values.unit == \"1 / (cm2 MeV s sr)\"\n \n- assert model2.spectral_model.tag == \"TemplateSpectralModel\"\n- assert model2.spatial_model.tag == \"TemplateSpatialModel\"\n+ assert \"TemplateSpectralModel\" in model2.spectral_model.tag\n+ assert \"TemplateSpatialModel\" in model2.spatial_model.tag\n \n assert model2.spatial_model.parameters[\"norm\"].value == 1.0\n assert not model2.spatial_model.normalize\n@@ -129,7 +131,7 @@ def test_absorption_io(tmp_path):\n assert new_model.redshift.value == 0.5\n assert new_model.alpha_norm.name == \"alpha_norm\"\n assert new_model.alpha_norm.value == 1\n- assert new_model.spectral_model.tag == \"PowerLawSpectralModel\"\n+ assert \"PowerLawSpectralModel\" in new_model.spectral_model.tag\n assert_allclose(new_model.absorption.energy, dominguez.energy)\n assert_allclose(new_model.absorption.param, dominguez.param)\n assert len(new_model.parameters) == 5\n@@ -202,12 +204,16 @@ def make_all_models():\n \n @pytest.mark.parametrize(\"model_class\", MODEL_REGISTRY)\n def test_all_model_classes(model_class):\n- assert model_class.tag == model_class.__name__\n+ if isinstance(model_class.tag, list):\n+ assert model_class.tag[0] == model_class.__name__\n+ else:\n+ assert model_class.tag == model_class.__name__\n \n \n @pytest.mark.parametrize(\"model\", make_all_models())\n def test_all_model_instances(model):\n- assert model.tag == model.__class__.__name__\n+ tag = model.tag[0] if isinstance(model.tag, list) else model.tag\n+ assert tag == model.__class__.__name__\n \n \n @requires_data()\n", "new_path": "gammapy/modeling/models/tests/test_io.py", "old_path": "gammapy/modeling/models/tests/test_io.py" }, { "change_type": "MODIFY", "diff": "@@ -1,5 +1,4 @@\n # Licensed under a 3-clause BSD style license - see LICENSE.rst\n-\n __all__ = [\"Registry\"]\n \n \n@@ -8,15 +7,15 @@ class Registry(list):\n \n def get_cls(self, tag):\n for cls in self:\n- if hasattr(cls, \"tag\") and cls.tag == tag:\n+ if hasattr(cls, \"tag\") and tag in cls.tag:\n return cls\n raise KeyError(f\"No model found with tag: {tag!r}\")\n \n def __str__(self):\n info = \"Registry\\n\"\n info += \"--------\\n\\n\"\n-\n- len_max = max([len(_.tag) for _ in self])\n+ tags = [_.tag[0] if isinstance(_.tag, list) else _.tag for _ in self]\n+ len_max = max([len(tag) for tag in tags])\n \n for item in self:\n info += f\"\\t{item.tag:{len_max}s}: {item.__name__}\\n\"\n", "new_path": "gammapy/utils/registry.py", "old_path": "gammapy/utils/registry.py" } ]
cbd77d0db9c0b2ff720d1fb2d0cd2ad19ee1a369
gammapy/gammapy
null
null
Add option to remove default output in models yaml file Add option full_output (True by default) to model.to_yaml() and .to_dict(). Switching to False remove the entries for min, max, frozen and error if they are the same than the class defaults or nan.
[ { "change_type": "MODIFY", "diff": "@@ -105,10 +105,20 @@ class Model:\n \"\"\"A deep copy.\"\"\"\n return copy.deepcopy(self)\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n \"\"\"Create dict for YAML serialisation\"\"\"\n tag = self.tag[0] if isinstance(self.tag, list) else self.tag\n- return {\"type\": tag, \"parameters\": self.parameters.to_dict()}\n+ params = self.parameters.to_dict()\n+\n+ if full_output is False:\n+ base = self.__class__\n+ names = self.parameters.names\n+ for k, name in enumerate(names):\n+ init = base.__dict__[name].to_dict()\n+ for item in [\"min\", \"max\", \"frozen\", \"error\"]:\n+ if params[k][item] == init[item] or np.isnan(init[item]):\n+ del params[k][item]\n+ return {\"type\": tag, \"parameters\": params}\n \n @classmethod\n def from_dict(cls, data):\n@@ -281,7 +291,7 @@ class Models(collections.abc.MutableSequence):\n shared_register = _set_link(shared_register, model)\n return models\n \n- def write(self, path, overwrite=False, write_covariance=True):\n+ def write(self, path, overwrite=False, full_output=True, write_covariance=True):\n \"\"\"Write to YAML file.\n \n Parameters\n@@ -315,14 +325,14 @@ class Models(collections.abc.MutableSequence):\n \n path.write_text(self.to_yaml())\n \n- def to_yaml(self):\n+ def to_yaml(self, full_output=True):\n \"\"\"Convert to YAML string.\"\"\"\n- data = self.to_dict()\n+ data = self.to_dict(full_output)\n return yaml.dump(\n data, sort_keys=False, indent=4, width=80, default_flow_style=False\n )\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n \"\"\"Convert to dict.\"\"\"\n # update linked parameters labels\n params_list = []\n@@ -338,7 +348,7 @@ class Models(collections.abc.MutableSequence):\n \n models_data = []\n for model in self._models:\n- model_data = model.to_dict()\n+ model_data = model.to_dict(full_output)\n models_data.append(model_data)\n if self._covar_file is not None:\n return {\n", "new_path": "gammapy/modeling/models/core.py", "old_path": "gammapy/modeling/models/core.py" }, { "change_type": "MODIFY", "diff": "@@ -314,18 +314,18 @@ class SkyModel(SkyModelBase):\n \n return self.__class__(**kwargs)\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n \"\"\"Create dict for YAML serilisation\"\"\"\n data = {}\n data[\"name\"] = self.name\n data[\"type\"] = self.tag\n- data[\"spectral\"] = self.spectral_model.to_dict()\n+ data[\"spectral\"] = self.spectral_model.to_dict(full_output)\n \n if self.spatial_model is not None:\n- data[\"spatial\"] = self.spatial_model.to_dict()\n+ data[\"spatial\"] = self.spatial_model.to_dict(full_output)\n \n if self.temporal_model is not None:\n- data[\"temporal\"] = self.temporal_model.to_dict()\n+ data[\"temporal\"] = self.temporal_model.to_dict(full_output)\n \n if self.apply_irf != self._apply_irf_default:\n data[\"apply_irf\"] = self.apply_irf\n@@ -488,11 +488,11 @@ class BackgroundModel(Model):\n back_values = self.map.data * value\n return self.map.copy(data=back_values)\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n data = {}\n data[\"name\"] = self.name\n data[\"type\"] = self.tag\n- data[\"spectral\"] = self.spectral_model.to_dict()\n+ data[\"spectral\"] = self.spectral_model.to_dict(full_output)\n \n if self.filename is not None:\n data[\"filename\"] = self.filename\n", "new_path": "gammapy/modeling/models/cube.py", "old_path": "gammapy/modeling/models/cube.py" }, { "change_type": "MODIFY", "diff": "@@ -38,6 +38,7 @@ def compute_sigma_eff(lon_0, lat_0, lon, lat, phi, major_axis, e):\n \n class SpatialModel(Model):\n \"\"\"Spatial model base class.\"\"\"\n+\n _type = \"spatial\"\n \n def __init__(self, **kwargs):\n@@ -142,9 +143,9 @@ class SpatialModel(Model):\n data = values * geom.solid_angle()\n return Map.from_geom(geom=geom, data=data.value, unit=data.unit)\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n \"\"\"Create dict for YAML serilisation\"\"\"\n- data = super().to_dict()\n+ data = super().to_dict(full_output)\n data[\"frame\"] = self.frame\n data[\"parameters\"] = data.pop(\"parameters\")\n return data\n@@ -605,10 +606,10 @@ class ConstantSpatialModel(SpatialModel):\n evaluation_radius = None\n position = None\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n \"\"\"Create dict for YAML serilisation\"\"\"\n # redefined to ignore frame attribute from parent class\n- data = super().to_dict()\n+ data = super().to_dict(full_output)\n data.pop(\"frame\")\n data[\"parameters\"] = data.pop(\"parameters\")\n return data\n@@ -643,10 +644,10 @@ class ConstantFluxSpatialModel(SpatialModel):\n evaluation_radius = None\n position = None\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n \"\"\"Create dict for YAML serilisation\"\"\"\n # redefined to ignore frame attribute from parent class\n- data = super().to_dict()\n+ data = super().to_dict(full_output)\n data.pop(\"frame\")\n return data\n \n@@ -785,9 +786,9 @@ class TemplateSpatialModel(SpatialModel):\n m = Map.read(filename)\n return cls(m, normalize=normalize, filename=filename)\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n \"\"\"Create dict for YAML serilisation\"\"\"\n- data = super().to_dict()\n+ data = super().to_dict(full_output)\n data[\"filename\"] = self.filename\n data[\"normalize\"] = self.normalize\n data[\"unit\"] = str(self.map.unit)\n", "new_path": "gammapy/modeling/models/spatial.py", "old_path": "gammapy/modeling/models/spatial.py" }, { "change_type": "MODIFY", "diff": "@@ -57,6 +57,7 @@ def integrate_spectrum(func, emin, emax, ndecade=100, intervals=False):\n \n class SpectralModel(Model):\n \"\"\"Spectral model base class.\"\"\"\n+\n _type = \"spectral\"\n \n def __call__(self, energy):\n@@ -471,11 +472,11 @@ class CompoundSpectralModel(SpectralModel):\n val2 = self.model2(energy)\n return self.operator(val1, val2)\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n return {\n \"type\": self.tag[0],\n- \"model1\": self.model1.to_dict(),\n- \"model2\": self.model2.to_dict(),\n+ \"model1\": self.model1.to_dict(full_output),\n+ \"model2\": self.model2.to_dict(full_output),\n \"operator\": self.operator.__name__,\n }\n \n@@ -1249,7 +1250,7 @@ class TemplateSpectralModel(SpectralModel):\n \"\"\"Evaluate the model (static function).\"\"\"\n return self._evaluate((energy,), clip=True)\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n return {\n \"type\": self.tag[0],\n \"energy\": {\n@@ -1334,7 +1335,7 @@ class Absorption:\n points=(self.param, self.energy), values=self.data, **interp_kwargs\n )\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n if self.filename is None:\n return {\n \"type\": self.tag,\n@@ -1539,11 +1540,11 @@ class AbsorbedSpectralModel(SpectralModel):\n absorption = np.power(absorption, alpha_norm)\n return dnde * absorption\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n return {\n \"type\": self.tag,\n- \"base_model\": self.spectral_model.to_dict(),\n- \"absorption\": self.absorption.to_dict(),\n+ \"base_model\": self.spectral_model.to_dict(full_output),\n+ \"absorption\": self.absorption.to_dict(full_output),\n \"absorption_parameter\": {\"name\": \"redshift\", \"value\": self.redshift.value,},\n \"parameters\": Parameters([self.redshift, self.alpha_norm]).to_dict(),\n }\n", "new_path": "gammapy/modeling/models/spectral.py", "old_path": "gammapy/modeling/models/spectral.py" }, { "change_type": "MODIFY", "diff": "@@ -17,6 +17,7 @@ from .core import Model\n class TemporalModel(Model):\n \"\"\"Temporal model base class.\n evaluates on astropy.time.Time objects\"\"\"\n+\n _type = \"temporal\"\n \n def __call__(self, time):\n@@ -72,18 +73,18 @@ class TemporalModel(Model):\n axis\n \"\"\"\n \n-\n import matplotlib.pyplot as plt\n \n ax = plt.gca() if ax is None else ax\n t_min, t_max = time_range\n n_value = 100\n- delta = (t_max - t_min)\n+ delta = t_max - t_min\n times = t_min + delta * np.linspace(0, 1, n_value)\n val = self(times)\n ax.plot(times.mjd, val)\n return ax\n \n+\n class ConstantTemporalModel(TemporalModel):\n \"\"\"Constant temporal model.\"\"\"\n \n@@ -191,8 +192,6 @@ class ExpDecayTemporalModel(TemporalModel):\n return -t0 * value / self.time_sum(t_min, t_max)\n \n \n-\n-\n class GaussianTemporalModel(TemporalModel):\n r\"\"\"A Gaussian temporal profile\n \n@@ -434,6 +433,6 @@ class LightCurveTemplateTemporalModel(TemporalModel):\n def from_dict(cls, data):\n return cls.read(data[\"filename\"])\n \n- def to_dict(self, overwrite=False):\n+ def to_dict(self, full_output=True, overwrite=False):\n \"\"\"Create dict for YAML serilisation\"\"\"\n return {\"type\": self.tag[0], \"filename\": self.filename}\n", "new_path": "gammapy/modeling/models/temporal.py", "old_path": "gammapy/modeling/models/temporal.py" }, { "change_type": "MODIFY", "diff": "@@ -9,12 +9,14 @@ from gammapy.utils.testing import requires_data\n \n class MyModel(Model):\n \"\"\"Simple model example\"\"\"\n+\n x = Parameter(\"x\", 1, \"cm\")\n y = Parameter(\"y\", 2)\n \n \n class CoModel(Model):\n \"\"\"Compound model example\"\"\"\n+\n norm = Parameter(\"norm\", 42, \"cm\")\n \n def __init__(self, m1, m2, norm=norm.quantity):\n", "new_path": "gammapy/modeling/models/tests/test_core.py", "old_path": "gammapy/modeling/models/tests/test_core.py" }, { "change_type": "MODIFY", "diff": "@@ -59,10 +59,7 @@ def diffuse_model():\n )\n m.data += 42\n spatial_model = TemplateSpatialModel(m, normalize=False)\n- return SkyModel(\n- PowerLawNormSpectralModel(),\n- spatial_model\n- )\n+ return SkyModel(PowerLawNormSpectralModel(), spatial_model)\n \n \n @pytest.fixture(scope=\"session\")\n@@ -560,4 +557,4 @@ def test_fermi_isotropic():\n \n assert_allclose(flux.value, 1.463e-13, rtol=1e-3)\n assert flux.unit == \"MeV-1 cm-2 s-1 sr-1\"\n- assert isinstance(model.spectral_model, CompoundSpectralModel)\n\\ No newline at end of file\n+ assert isinstance(model.spectral_model, CompoundSpectralModel)\n", "new_path": "gammapy/modeling/models/tests/test_cube.py", "old_path": "gammapy/modeling/models/tests/test_cube.py" }, { "change_type": "MODIFY", "diff": "@@ -8,6 +8,7 @@ from astropy.utils.data import get_pkg_data_filename\n from gammapy.maps import Map, MapAxis\n from gammapy.modeling.models import (\n MODEL_REGISTRY,\n+ PowerLawSpectralModel,\n AbsorbedSpectralModel,\n Absorption,\n BackgroundModel,\n@@ -233,5 +234,15 @@ def test_missing_parameters():\n assert len(models[\"source1\"].spatial_model.parameters) == 6\n \n \n+def test_simplified_output():\n+ model = PowerLawSpectralModel()\n+ full = model.to_dict()\n+ simplified = model.to_dict(full_output=False)\n+ for k, name in enumerate(model.parameters.names):\n+ for item in [\"min\", \"max\", \"frozen\", \"error\"]:\n+ assert item in full[\"parameters\"][k]\n+ assert item not in simplified[\"parameters\"][k]\n+\n+\n def test_registries_print():\n print(MODEL_REGISTRY)\n", "new_path": "gammapy/modeling/models/tests/test_io.py", "old_path": "gammapy/modeling/models/tests/test_io.py" }, { "change_type": "MODIFY", "diff": "@@ -776,26 +776,26 @@ def test_integral_error_PowerLaw():\n emax = energy[1:]\n \n powerlaw = PowerLawSpectralModel()\n- powerlaw.parameters['index'].error = 0.4\n- powerlaw.parameters['amplitude'].error = 1e-13\n+ powerlaw.parameters[\"index\"].error = 0.4\n+ powerlaw.parameters[\"amplitude\"].error = 1e-13\n \n- flux, flux_error = powerlaw.integral_error(emin,emax)\n+ flux, flux_error = powerlaw.integral_error(emin, emax)\n \n- assert_allclose(flux.value[0]/1e-13, 5.0, rtol=0.1)\n- assert_allclose(flux_error.value[0]/1e-14, 8.546615432273905, rtol=0.01)\n+ assert_allclose(flux.value[0] / 1e-13, 5.0, rtol=0.1)\n+ assert_allclose(flux_error.value[0] / 1e-14, 8.546615432273905, rtol=0.01)\n \n \n def test_integral_error_ExpCutOffPowerLaw():\n energy = np.linspace(1 * u.TeV, 10 * u.TeV, 10)\n emin = energy[:-1]\n emax = energy[1:]\n- \n+\n exppowerlaw = ExpCutoffPowerLawSpectralModel()\n- exppowerlaw.parameters['index'].error = 0.4\n- exppowerlaw.parameters['amplitude'].error = 1e-13\n- exppowerlaw.parameters['lambda_'].error = 0.03\n- \n+ exppowerlaw.parameters[\"index\"].error = 0.4\n+ exppowerlaw.parameters[\"amplitude\"].error = 1e-13\n+ exppowerlaw.parameters[\"lambda_\"].error = 0.03\n+\n flux, flux_error = exppowerlaw.integral_error(emin, emax)\n- \n- assert_allclose(flux.value[0]/1e-13, 5.05855622, rtol=0.01)\n- assert_allclose(flux_error.value[0]/1e-14, 8.90907063, rtol=0.01)\n+\n+ assert_allclose(flux.value[0] / 1e-13, 5.05855622, rtol=0.01)\n+ assert_allclose(flux_error.value[0] / 1e-14, 8.90907063, rtol=0.01)\n", "new_path": "gammapy/modeling/models/tests/test_spectral.py", "old_path": "gammapy/modeling/models/tests/test_spectral.py" }, { "change_type": "MODIFY", "diff": "@@ -217,4 +217,3 @@ def test_plot_constant_model():\n constant_model = ConstantTemporalModel(const=1)\n with mpl_plot_check():\n constant_model.plot(time_range)\n-\n", "new_path": "gammapy/modeling/models/tests/test_temporal.py", "old_path": "gammapy/modeling/models/tests/test_temporal.py" } ]
9a88b71d4eb5ac6d1041e8eba5feb64c6a5fa8d9
elbayadm/attn2d
null
null
Support pretrained embeddings for Transformer. Also show a nicer error message.
[ { "change_type": "MODIFY", "diff": "@@ -11,6 +11,8 @@ import torch\n import torch.nn as nn\n import torch.nn.functional as F\n \n+from fairseq import utils\n+\n from fairseq.modules import (\n LearnedPositionalEmbedding, MultiheadAttention,\n SinusoidalPositionalEmbedding,\n@@ -36,6 +38,8 @@ class TransformerModel(FairseqModel):\n help='dropout probability for attention weights')\n parser.add_argument('--relu-dropout', type=float, metavar='D',\n help='dropout probability after ReLU in FFN')\n+ parser.add_argument('--encoder-embed-path', type=str, metavar='STR',\n+ help='path to pre-trained encoder embedding')\n parser.add_argument('--encoder-embed-dim', type=int, metavar='N',\n help='encoder embedding dimension')\n parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',\n@@ -48,6 +52,8 @@ class TransformerModel(FairseqModel):\n help='apply layernorm before each encoder block')\n parser.add_argument('--encoder-learned-pos', default=False, action='store_true',\n help='use learned positional embeddings in the encoder')\n+ parser.add_argument('--decoder-embed-path', type=str, metavar='STR',\n+ help='path to pre-trained decoder embedding')\n parser.add_argument('--decoder-embed-dim', type=int, metavar='N',\n help='decoder embedding dimension')\n parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',\n@@ -69,12 +75,20 @@ class TransformerModel(FairseqModel):\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n+ # make sure that all args are properly defaulted (in case there are any new ones)\n+ base_architecture(args)\n+\n src_dict, tgt_dict = task.source_dictionary, task.target_dictionary\n \n- def build_embedding(dictionary, embed_dim):\n+ def build_embedding(dictionary, embed_dim, path=None):\n num_embeddings = len(dictionary)\n padding_idx = dictionary.pad()\n- return Embedding(num_embeddings, embed_dim, padding_idx)\n+ emb = Embedding(num_embeddings, embed_dim, padding_idx)\n+ # if provided, load from preloaded dictionaries\n+ if path:\n+ embed_dict = utils.parse_embedding(path)\n+ utils.load_embedding(embed_dict, dictionary, emb)\n+ return emb\n \n if args.share_all_embeddings:\n if src_dict != tgt_dict:\n@@ -82,12 +96,21 @@ class TransformerModel(FairseqModel):\n if args.encoder_embed_dim != args.decoder_embed_dim:\n raise RuntimeError(\n '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')\n- encoder_embed_tokens = build_embedding(src_dict, args.encoder_embed_dim)\n+ if args.decoder_embed_path and (\n+ args.decoder_embed_path != args.encoder_embed_path):\n+ raise RuntimeError('--share-all-embeddings not compatible with --decoder-embed-path')\n+ encoder_embed_tokens = build_embedding(\n+ src_dict, args.encoder_embed_dim, args.encoder_embed_path\n+ )\n decoder_embed_tokens = encoder_embed_tokens\n args.share_decoder_input_output_embed = True\n else:\n- encoder_embed_tokens = build_embedding(src_dict, args.encoder_embed_dim)\n- decoder_embed_tokens = build_embedding(tgt_dict, args.decoder_embed_dim)\n+ encoder_embed_tokens = build_embedding(\n+ src_dict, args.encoder_embed_dim, args.encoder_embed_path\n+ )\n+ decoder_embed_tokens = build_embedding(\n+ tgt_dict, args.decoder_embed_dim, args.decoder_embed_path\n+ )\n \n encoder = TransformerEncoder(args, src_dict, encoder_embed_tokens)\n decoder = TransformerDecoder(args, tgt_dict, decoder_embed_tokens)\n@@ -391,10 +414,12 @@ def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx, left_pad, le\n \n @register_model_architecture('transformer', 'transformer')\n def base_architecture(args):\n+ args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)\n+ args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\n", "new_path": "fairseq/models/transformer.py", "old_path": "fairseq/models/transformer.py" }, { "change_type": "MODIFY", "diff": "@@ -24,7 +24,7 @@ class MultiheadAttention(nn.Module):\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n- assert self.head_dim * num_heads == self.embed_dim\n+ assert self.head_dim * num_heads == self.embed_dim, \"embed_dim must be divisible by num_heads\"\n self.scaling = self.head_dim**-0.5\n self._mask = None\n \n", "new_path": "fairseq/modules/multihead_attention.py", "old_path": "fairseq/modules/multihead_attention.py" } ]
c1d3342fc666ddd8d742caf8c6b024f3e4291f2e
mycroftai/mycroft-core
null
null
Add playback timing reports in playback thread This covers the default case with mimic and in the future more as they are moved over to using the playback thread
[ { "change_type": "MODIFY", "diff": "@@ -75,7 +75,7 @@ def handle_speak(event):\n utterance)\n for chunk in chunks:\n try:\n- mute_and_speak(chunk)\n+ mute_and_speak(chunk, ident)\n except KeyboardInterrupt:\n raise\n except Exception:\n@@ -84,7 +84,7 @@ def handle_speak(event):\n check_for_signal('buttonPress')):\n break\n else:\n- mute_and_speak(utterance)\n+ mute_and_speak(utterance, ident)\n \n stopwatch.stop()\n report_metric('timing',\n@@ -95,12 +95,13 @@ def handle_speak(event):\n 'time': stopwatch.time})\n \n \n-def mute_and_speak(utterance):\n+def mute_and_speak(utterance, ident):\n \"\"\"\n Mute mic and start speaking the utterance using selected tts backend.\n \n Args:\n- utterance: The sentence to be spoken\n+ utterance: The sentence to be spoken\n+ ident: Ident tying the utterance to the source query\n \"\"\"\n global tts_hash\n \n@@ -116,7 +117,7 @@ def mute_and_speak(utterance):\n tts_hash = hash(str(config.get('tts', '')))\n \n LOG.info(\"Speak: \" + utterance)\n- tts.execute(utterance)\n+ tts.execute(utterance, ident)\n \n \n def handle_stop(event):\n", "new_path": "mycroft/audio/speech.py", "old_path": "mycroft/audio/speech.py" }, { "change_type": "MODIFY", "diff": "@@ -31,6 +31,7 @@ from mycroft.util import (\n play_wav, play_mp3, check_for_signal, create_signal, resolve_resource_file\n )\n from mycroft.util.log import LOG\n+from mycroft.metrics import report_metric, Stopwatch\n import sys\n if sys.version_info[0] < 3:\n from Queue import Queue, Empty\n@@ -38,6 +39,22 @@ else:\n from queue import Queue, Empty\n \n \n+def send_playback_metric(stopwatch, ident):\n+ \"\"\"\n+ Send playback metrics in a background thread\n+ \"\"\"\n+ def do_send(stopwatch, ident):\n+ report_metric('timing',\n+ {'id': ident,\n+ 'system': 'speech_playback',\n+ 'start_time': stopwatch.timestamp,\n+ 'time': stopwatch.time})\n+\n+ t = Thread(target=do_send, args=(stopwatch, ident))\n+ t.daemon = True\n+ t.start()\n+\n+\n class PlaybackThread(Thread):\n \"\"\"\n Thread class for playing back tts audio and sending\n@@ -72,23 +89,26 @@ class PlaybackThread(Thread):\n \"\"\"\n while not self._terminated:\n try:\n- snd_type, data, visimes = self.queue.get(timeout=2)\n+ snd_type, data, visimes, ident = self.queue.get(timeout=2)\n self.blink(0.5)\n if not self._processing_queue:\n self._processing_queue = True\n self.tts.begin_audio()\n \n- if snd_type == 'wav':\n- self.p = play_wav(data)\n- elif snd_type == 'mp3':\n- self.p = play_mp3(data)\n-\n- if visimes:\n- if self.show_visimes(visimes):\n- self.clear_queue()\n- else:\n- self.p.communicate()\n- self.p.wait()\n+ stopwatch = Stopwatch()\n+ with stopwatch:\n+ if snd_type == 'wav':\n+ self.p = play_wav(data)\n+ elif snd_type == 'mp3':\n+ self.p = play_mp3(data)\n+\n+ if visimes:\n+ if self.show_visimes(visimes):\n+ self.clear_queue()\n+ else:\n+ self.p.communicate()\n+ self.p.wait()\n+ send_playback_metric(stopwatch, ident)\n \n if self.queue.empty():\n self.tts.end_audio()\n@@ -219,7 +239,7 @@ class TTS(object):\n \"\"\"\n pass\n \n- def execute(self, sentence):\n+ def execute(self, sentence, ident=None):\n \"\"\"\n Convert sentence to speech.\n \n@@ -228,6 +248,7 @@ class TTS(object):\n \n Args:\n sentence: Sentence to be spoken\n+ ident: Id reference to current interaction\n \"\"\"\n create_signal(\"isSpeaking\")\n if self.phonetic_spelling:\n@@ -247,7 +268,7 @@ class TTS(object):\n if phonemes:\n self.save_phonemes(key, phonemes)\n \n- self.queue.put((self.type, wav_file, self.visime(phonemes)))\n+ self.queue.put((self.type, wav_file, self.visime(phonemes), ident))\n \n def visime(self, phonemes):\n \"\"\"\n", "new_path": "mycroft/tts/__init__.py", "old_path": "mycroft/tts/__init__.py" }, { "change_type": "MODIFY", "diff": "@@ -21,7 +21,7 @@ class ESpeak(TTS):\n def __init__(self, lang, voice):\n super(ESpeak, self).__init__(lang, voice, ESpeakValidator(self))\n \n- def execute(self, sentence):\n+ def execute(self, sentence, ident=None):\n self.begin_audio()\n subprocess.call(\n ['espeak', '-v', self.lang + '+' + self.voice, sentence])\n", "new_path": "mycroft/tts/espeak_tts.py", "old_path": "mycroft/tts/espeak_tts.py" }, { "change_type": "MODIFY", "diff": "@@ -36,7 +36,7 @@ class RemoteTTS(TTS):\n self.url = remove_last_slash(url)\n self.session = FuturesSession()\n \n- def execute(self, sentence):\n+ def execute(self, sentence, ident=None):\n phrases = self.__get_phrases(sentence)\n \n if len(phrases) > 0:\n", "new_path": "mycroft/tts/remote_tts.py", "old_path": "mycroft/tts/remote_tts.py" }, { "change_type": "MODIFY", "diff": "@@ -21,7 +21,7 @@ class SpdSay(TTS):\n def __init__(self, lang, voice):\n super(SpdSay, self).__init__(lang, voice, SpdSayValidator(self))\n \n- def execute(self, sentence):\n+ def execute(self, sentence, ident=None):\n self.begin_audio()\n subprocess.call(\n ['spd-say', '-l', self.lang, '-t', self.voice, sentence])\n", "new_path": "mycroft/tts/spdsay_tts.py", "old_path": "mycroft/tts/spdsay_tts.py" } ]
07ce2d98d0c069e2d4d04d1f9e5bc21e0e520fee
mycroftai/mycroft-core
null
null
Use function attributes for intent decorators This prevents needing to use a shared list which misbehaves when multiple skills initialize at once
[ { "change_type": "MODIFY", "diff": "@@ -18,7 +18,6 @@ import sys\n import time\n import csv\n import inspect\n-from functools import wraps\n from inspect import getargspec\n from datetime import datetime, timedelta\n \n@@ -126,7 +125,7 @@ def load_skill(skill_descriptor, emitter, skill_id, BLACKLISTED_SKILLS=None):\n # The very first time a skill is run, speak the intro\n first_run = skill.settings.get(\"__mycroft_skill_firstrun\", True)\n if first_run:\n- LOG.info(\"First run of \"+skill_descriptor[\"name\"])\n+ LOG.info(\"First run of \" + skill_descriptor[\"name\"])\n skill.settings[\"__mycroft_skill_firstrun\"] = False\n skill.settings.store()\n intro = skill.get_intro_message()\n@@ -166,21 +165,16 @@ def get_handler_name(handler):\n return name\n \n \n-# Lists used when adding skill handlers using decorators\n-_intent_list = []\n-_intent_file_list = []\n-\n-\n def intent_handler(intent_parser):\n \"\"\" Decorator for adding a method as an intent handler. \"\"\"\n \n def real_decorator(func):\n- @wraps(func)\n- def handler_method(*args, **kwargs):\n- return func(*args, **kwargs)\n-\n- _intent_list.append((intent_parser, func))\n- return handler_method\n+ # Store the intent_parser inside the function\n+ # This will be used later to call register_intent\n+ if not hasattr(func, 'intents'):\n+ func.intents = []\n+ func.intents.append(intent_parser)\n+ return func\n \n return real_decorator\n \n@@ -189,12 +183,12 @@ def intent_file_handler(intent_file):\n \"\"\" Decorator for adding a method as an intent file handler. \"\"\"\n \n def real_decorator(func):\n- @wraps(func)\n- def handler_method(*args, **kwargs):\n- return func(*args, **kwargs)\n-\n- _intent_file_list.append((intent_file, func))\n- return handler_method\n+ # Store the intent_file inside the function\n+ # This will be used later to call register_intent_file\n+ if not hasattr(func, 'intent_files'):\n+ func.intent_files = []\n+ func.intent_files.append(intent_file)\n+ return func\n \n return real_decorator\n \n@@ -455,14 +449,20 @@ class MycroftSkill(object):\n def _register_decorated(self):\n \"\"\"\n Register all intent handlers that have been decorated with an intent.\n+\n+ Looks for all functions that have been marked by a decorator\n+ and read the intent data from them\n \"\"\"\n- global _intent_list, _intent_file_list\n- for intent_parser, handler in _intent_list:\n- self.register_intent(intent_parser, handler, need_self=True)\n- for intent_file, handler in _intent_file_list:\n- self.register_intent_file(intent_file, handler, need_self=True)\n- _intent_list = []\n- _intent_file_list = []\n+ for attr_name in dir(self):\n+ method = getattr(self, attr_name)\n+\n+ if hasattr(method, 'intents'):\n+ for intent in getattr(method, 'intents'):\n+ self.register_intent(intent, method)\n+\n+ if hasattr(method, 'intent_files'):\n+ for intent_file in getattr(method, 'intent_files'):\n+ self.register_intent_file(intent_file, method)\n \n def translate(self, text, data=None):\n \"\"\"\n@@ -572,9 +572,8 @@ class MycroftSkill(object):\n Args:\n name: IntentParser name\n handler: method to call\n- need_self: optional parameter, when called from a\n- decorated intent handler the function will\n- need the self variable passed as well.\n+ need_self: optional parameter, pass if giving a local\n+ function or lambda (not defined in the class)\n once: optional parameter, Event handler will be\n removed after it has been run once.\n handler_info: base message when reporting skill event handler\n@@ -679,7 +678,7 @@ class MycroftSkill(object):\n removed = True\n return removed\n \n- def register_intent(self, intent_parser, handler, need_self=False):\n+ def register_intent(self, intent_parser, handler):\n \"\"\"\n Register an Intent with the intent service.\n \n@@ -687,9 +686,6 @@ class MycroftSkill(object):\n intent_parser: Intent or IntentBuilder object to parse\n utterance for the handler.\n handler: function to register with intent\n- need_self: optional parameter, when called from a decorated\n- intent handler the function will need the self\n- variable passed as well.\n \"\"\"\n if type(intent_parser) == IntentBuilder:\n intent_parser = intent_parser.build()\n@@ -701,10 +697,10 @@ class MycroftSkill(object):\n munge_intent_parser(intent_parser, name, self.skill_id)\n self.emitter.emit(Message(\"register_intent\", intent_parser.__dict__))\n self.registered_intents.append((name, intent_parser))\n- self.add_event(intent_parser.name, handler, need_self,\n+ self.add_event(intent_parser.name, handler, False,\n 'mycroft.skill.handler')\n \n- def register_intent_file(self, intent_file, handler, need_self=False):\n+ def register_intent_file(self, intent_file, handler):\n \"\"\"\n Register an Intent file with the intent service.\n For example:\n@@ -729,14 +725,13 @@ class MycroftSkill(object):\n intent_file: name of file that contains example queries\n that should activate the intent\n handler: function to register with intent\n- need_self: use for decorator. See <register_intent>\n \"\"\"\n name = str(self.skill_id) + ':' + intent_file\n self.emitter.emit(Message(\"padatious:register_intent\", {\n \"file_name\": join(self.vocab_dir, intent_file),\n \"name\": name\n }))\n- self.add_event(name, handler, need_self, 'mycroft.skill.handler')\n+ self.add_event(name, handler, False, 'mycroft.skill.handler')\n \n def register_entity_file(self, entity_file):\n \"\"\"\n@@ -1115,6 +1110,7 @@ class FallbackSkill(MycroftSkill):\n ident = message.context['ident']\n report_timing(ident, 'fallback_handler', stopwatch,\n {'handler': handler_name})\n+\n return handler\n \n @classmethod\n@@ -1137,11 +1133,13 @@ class FallbackSkill(MycroftSkill):\n register a fallback with the list of fallback handlers\n and with the list of handlers registered by this instance\n \"\"\"\n+\n def wrapper(*args, **kwargs):\n if handler(*args, **kwargs):\n self.make_active()\n return True\n return False\n+\n self.instance_fallback_handlers.append(wrapper)\n self._register_fallback(handler, priority)\n \n", "new_path": "mycroft/skills/core.py", "old_path": "mycroft/skills/core.py" } ]
fc8424c9ee9c83f4962e171699f13c39407637cc
mycroftai/mycroft-core
null
null
Make skill ids use skill folder This is necessary because in Python 3, hash(x) changes every single start of the application. Using the skill folder makes it consistent. In addition, the skill folder makes it easier to debug parts of the application in comparison to using something like an md5sum
[ { "change_type": "MODIFY", "diff": "@@ -37,7 +37,7 @@ from mycroft.filesystem import FileSystemAccess\n from mycroft.messagebus.message import Message\n from mycroft.metrics import report_metric, report_timing, Stopwatch\n from mycroft.skills.settings import SkillSettings\n-from mycroft.skills.skill_data import (load_vocabulary, load_regex, to_letters,\n+from mycroft.skills.skill_data import (load_vocabulary, load_regex, to_alnum,\n munge_regex, munge_intent_parser)\n from mycroft.util import resolve_resource_file\n from mycroft.util.log import LOG\n@@ -63,13 +63,13 @@ def unmunge_message(message, skill_id):\n \n Args:\n message (Message): Intent result message\n- skill_id (int): skill identifier\n+ skill_id (str): skill identifier\n \n Returns:\n Message without clear keywords\n \"\"\"\n if isinstance(message, Message) and isinstance(message.data, dict):\n- skill_id = to_letters(skill_id)\n+ skill_id = to_alnum(skill_id)\n for key in message.data:\n if key[:len(skill_id)] == skill_id:\n new_key = key[len(skill_id):]\n@@ -807,7 +807,7 @@ class MycroftSkill(object):\n raise ValueError('context should be a string')\n if not isinstance(word, str):\n raise ValueError('word should be a string')\n- context = to_letters(self.skill_id) + context\n+ context = to_alnum(self.skill_id) + context\n self.emitter.emit(Message('add_context',\n {'context': context, 'word': word}))\n \n@@ -827,7 +827,7 @@ class MycroftSkill(object):\n entity_type: Intent handler entity to tie the word to\n \"\"\"\n self.emitter.emit(Message('register_vocab', {\n- 'start': entity, 'end': to_letters(self.skill_id) + entity_type\n+ 'start': entity, 'end': to_alnum(self.skill_id) + entity_type\n }))\n \n def register_regex(self, regex_str):\n", "new_path": "mycroft/skills/core.py", "old_path": "mycroft/skills/core.py" }, { "change_type": "MODIFY", "diff": "@@ -172,7 +172,7 @@ class IntentService(object):\n Returns:\n (str) Skill name or the skill id if the skill wasn't found\n \"\"\"\n- return self.skill_names.get(int(skill_id), skill_id)\n+ return self.skill_names.get(skill_id, skill_id)\n \n def reset_converse(self, message):\n \"\"\"Let skills know there was a problem with speech recognition\"\"\"\n@@ -357,7 +357,7 @@ class IntentService(object):\n if best_intent and best_intent.get('confidence', 0.0) > 0.0:\n self.update_context(best_intent)\n # update active skills\n- skill_id = int(best_intent['intent_type'].split(\":\")[0])\n+ skill_id = best_intent['intent_type'].split(\":\")[0]\n self.add_active_skill(skill_id)\n return best_intent\n \n", "new_path": "mycroft/skills/intent_service.py", "old_path": "mycroft/skills/intent_service.py" }, { "change_type": "MODIFY", "diff": "@@ -311,7 +311,7 @@ class SkillManager(Thread):\n skill_path = skill_path.rstrip('/')\n skill = self.loaded_skills.setdefault(skill_path, {})\n skill.update({\n- \"id\": hash(skill_path),\n+ \"id\": basename(skill_path),\n \"path\": skill_path\n })\n \n@@ -452,7 +452,7 @@ class SkillManager(Thread):\n If supported, the conversation is invoked.\n \"\"\"\n \n- skill_id = int(message.data[\"skill_id\"])\n+ skill_id = message.data[\"skill_id\"]\n utterances = message.data[\"utterances\"]\n lang = message.data[\"lang\"]\n \n", "new_path": "mycroft/skills/main.py", "old_path": "mycroft/skills/main.py" }, { "change_type": "MODIFY", "diff": "@@ -80,7 +80,7 @@ def load_vocabulary(basedir, emitter, skill_id):\n \"\"\"\n for vocab_file in listdir(basedir):\n if vocab_file.endswith(\".voc\"):\n- vocab_type = to_letters(skill_id) + splitext(vocab_file)[0]\n+ vocab_type = to_alnum(skill_id) + splitext(vocab_file)[0]\n load_vocab_from_file(\n join(basedir, vocab_file), vocab_type, emitter)\n \n@@ -92,28 +92,24 @@ def load_regex(basedir, emitter, skill_id):\n basedir (str): path of directory to load from\n emitter (messagebus emitter): websocket used to send the vocab to\n the intent service\n- skill_id (int): skill identifier\n+ skill_id (str): skill identifier\n \"\"\"\n for regex_type in listdir(basedir):\n if regex_type.endswith(\".rx\"):\n- load_regex_from_file(\n- join(basedir, regex_type), emitter, skill_id)\n+ load_regex_from_file(join(basedir, regex_type), emitter, skill_id)\n \n \n-def to_letters(number):\n- \"\"\"Convert number to string of letters.\n+def to_alnum(skill_id):\n+ \"\"\"Convert a skill id to only alphanumeric characters\n \n- 0 -> A, 1 -> B, etc.\n+ Non alpha-numeric characters are converted to \"_\"\n \n Args:\n- number (int): number to be converted\n+ skill_id (str): identifier to be converted\n Returns:\n (str) String of letters\n \"\"\"\n- ret = ''\n- for n in str(number).strip('-'):\n- ret += chr(65 + int(n))\n- return ret\n+ return ''.join(c if c.isalnum() else '_' for c in str(skill_id))\n \n \n def munge_regex(regex, skill_id):\n@@ -121,11 +117,11 @@ def munge_regex(regex, skill_id):\n \n Args:\n regex (str): regex string\n- skill_id (int): skill identifier\n+ skill_id (str): skill identifier\n Returns:\n (str) munged regex\n \"\"\"\n- base = '(?P<' + to_letters(skill_id)\n+ base = '(?P<' + to_alnum(skill_id)\n return base.join(regex.split('(?P<'))\n \n \n@@ -150,7 +146,7 @@ def munge_intent_parser(intent_parser, name, skill_id):\n intent_parser.name = name\n \n # Munge keywords\n- skill_id = to_letters(skill_id)\n+ skill_id = to_alnum(skill_id)\n # Munge required keyword\n reqs = []\n for i in intent_parser.requires:\n", "new_path": "mycroft/skills/skill_data.py", "old_path": "mycroft/skills/skill_data.py" }, { "change_type": "MODIFY", "diff": "@@ -73,17 +73,18 @@ class MycroftSkillTest(unittest.TestCase):\n \n def check_regex_from_file(self, filename, result_list=None):\n result_list = result_list or []\n- load_regex_from_file(join(self.regex_path, filename), self.emitter, 0)\n+ regex_file = join(self.regex_path, filename)\n+ load_regex_from_file(regex_file, self.emitter, 'A')\n self.check_emitter(result_list)\n \n def check_vocab(self, path, result_list=None):\n result_list = result_list or []\n- load_vocabulary(path, self.emitter, 0)\n+ load_vocabulary(path, self.emitter, 'A')\n self.check_emitter(result_list)\n \n def check_regex(self, path, result_list=None):\n result_list = result_list or []\n- load_regex(path, self.emitter, 0)\n+ load_regex(path, self.emitter, 'A')\n self.check_emitter(result_list)\n \n def check_emitter(self, result_list):\n@@ -231,7 +232,7 @@ class MycroftSkillTest(unittest.TestCase):\n s.bind(self.emitter)\n s.initialize()\n expected = [{'at_least_one': [],\n- 'name': '0:a',\n+ 'name': 'A:a',\n 'optional': [],\n 'requires': [('AKeyword', 'AKeyword')]}]\n self.check_register_intent(expected)\n@@ -241,7 +242,7 @@ class MycroftSkillTest(unittest.TestCase):\n s.bind(self.emitter)\n s.initialize()\n expected = [{'at_least_one': [],\n- 'name': '0:a',\n+ 'name': 'A:a',\n 'optional': [],\n 'requires': [('AKeyword', 'AKeyword')]}]\n \n@@ -260,7 +261,7 @@ class MycroftSkillTest(unittest.TestCase):\n s.bind(self.emitter)\n s.initialize()\n expected = [{'at_least_one': [],\n- 'name': '0:a',\n+ 'name': 'A:a',\n 'optional': [],\n 'requires': [('AKeyword', 'AKeyword')]}]\n self.check_register_intent(expected)\n@@ -334,12 +335,13 @@ class MycroftSkillTest(unittest.TestCase):\n sys.path.append(abspath(dirname(__file__)))\n SimpleSkill5 = __import__('decorator_test_skill').TestSkill\n s = SimpleSkill5()\n+ s.skill_id = 'A'\n s.vocab_dir = join(dirname(__file__), 'intent_file')\n s.bind(self.emitter)\n s.initialize()\n s._register_decorated()\n expected = [{'at_least_one': [],\n- 'name': '0:a',\n+ 'name': 'A:a',\n 'optional': [],\n 'requires': [('AKeyword', 'AKeyword')]},\n {\n@@ -447,8 +449,8 @@ class MycroftSkillTest(unittest.TestCase):\n s.bind(emitter)\n s.schedule_event(s.handler, datetime.now(), name='sched_handler1')\n # Check that the handler was registered with the emitter\n- self.assertEqual(emitter.once.call_args[0][0], '0:sched_handler1')\n- self.assertTrue('0:sched_handler1' in [e[0] for e in s.events])\n+ self.assertEqual(emitter.once.call_args[0][0], 'A:sched_handler1')\n+ self.assertTrue('A:sched_handler1' in [e[0] for e in s.events])\n \n @mock.patch.dict(Configuration._Configuration__config, BASE_CONF)\n def test_remove_scheduled_event(self):\n@@ -457,11 +459,11 @@ class MycroftSkillTest(unittest.TestCase):\n s.bind(emitter)\n s.schedule_event(s.handler, datetime.now(), name='sched_handler1')\n # Check that the handler was registered with the emitter\n- self.assertTrue('0:sched_handler1' in [e[0] for e in s.events])\n+ self.assertTrue('A:sched_handler1' in [e[0] for e in s.events])\n s.cancel_scheduled_event('sched_handler1')\n # Check that the handler was removed\n- self.assertEqual(emitter.remove.call_args[0][0], '0:sched_handler1')\n- self.assertTrue('0:sched_handler1' not in [e[0] for e in s.events])\n+ self.assertEqual(emitter.remove.call_args[0][0], 'A:sched_handler1')\n+ self.assertTrue('A:sched_handler1' not in [e[0] for e in s.events])\n \n @mock.patch.dict(Configuration._Configuration__config, BASE_CONF)\n def test_run_scheduled_event(self):\n@@ -477,10 +479,16 @@ class MycroftSkillTest(unittest.TestCase):\n self.assertTrue(s.handler_run)\n # Check that the handler was removed from the list of registred\n # handler\n- self.assertTrue('0:sched_handler1' not in [e[0] for e in s.events])\n+ self.assertTrue('A:sched_handler1' not in [e[0] for e in s.events])\n \n \n-class SimpleSkill1(MycroftSkill):\n+class _TestSkill(MycroftSkill):\n+ def __init__(self):\n+ super().__init__()\n+ self.skill_id = 'A'\n+\n+\n+class SimpleSkill1(_TestSkill):\n def __init__(self):\n super(SimpleSkill1, self).__init__()\n self.handler_run = False\n@@ -497,8 +505,10 @@ class SimpleSkill1(MycroftSkill):\n pass\n \n \n-class SimpleSkill2(MycroftSkill):\n+class SimpleSkill2(_TestSkill):\n \"\"\" Test skill for intent builder without .build() \"\"\"\n+ skill_id = 'A'\n+\n def initialize(self):\n i = IntentBuilder('a').require('Keyword')\n self.register_intent(i, self.handler)\n@@ -510,8 +520,10 @@ class SimpleSkill2(MycroftSkill):\n pass\n \n \n-class SimpleSkill3(MycroftSkill):\n+class SimpleSkill3(_TestSkill):\n \"\"\" Test skill for invalid Intent for register_intent \"\"\"\n+ skill_id = 'A'\n+\n def initialize(self):\n self.register_intent('string', self.handler)\n \n@@ -522,8 +534,10 @@ class SimpleSkill3(MycroftSkill):\n pass\n \n \n-class SimpleSkill4(MycroftSkill):\n+class SimpleSkill4(_TestSkill):\n \"\"\" Test skill for padatious intent \"\"\"\n+ skill_id = 'A'\n+\n def initialize(self):\n self.register_intent_file('test.intent', self.handler)\n self.register_entity_file('test_ent.entity')\n", "new_path": "test/unittests/skills/core.py", "old_path": "test/unittests/skills/core.py" } ]
f6347ae47c872b40339d9565a9cb29da5bca8716
mycroftai/mycroft-core
null
null
Replace hashed meta with skill_gid as identifier This also removes the notion of an owner skill and all skills may update settings on the server.
[ { "change_type": "MODIFY", "diff": "@@ -216,34 +216,12 @@ class SkillSettings(dict):\n except RequestException:\n return\n \n- hashed_meta = self._get_meta_hash(settings_meta)\n- skill_settings = self._request_other_settings(hashed_meta)\n- # if hash is new then there is a diff version of settingsmeta\n- if self._is_new_hash(hashed_meta):\n- # first look at all other devices on user account to see\n- # if the settings exist. if it does then sync with device\n- if skill_settings:\n- # not_owner flags that this settings is loaded from\n- # another device. If a skill settings doesn't have\n- # not_owner, then the skill is created from that device\n- self['not_owner'] = True\n- self.save_skill_settings(skill_settings)\n- else: # upload skill settings if\n- uuid = self._load_uuid()\n- if uuid is not None:\n- self._delete_metadata(uuid)\n- self._upload_meta(settings_meta, hashed_meta)\n- else: # hash is not new\n- if skill_settings is not None:\n- self['not_owner'] = True\n- self.save_skill_settings(skill_settings)\n- else:\n- settings = self._request_my_settings(hashed_meta)\n- if settings is None:\n- # metadata got deleted from Home, send up\n- self._upload_meta(settings_meta, hashed_meta)\n- else:\n- self.save_skill_settings(settings)\n+ settings = self._request_my_settings(self.skill_gid)\n+ if settings is None:\n+ # metadata got deleted from Home, send up\n+ self._upload_meta(settings_meta, self.skill_gid)\n+ else:\n+ self.save_skill_settings(settings)\n self._complete_intialization = True\n \n @property\n@@ -323,15 +301,15 @@ class SkillSettings(dict):\n Args:\n skill_settings (dict): skill\n \"\"\"\n- if self._is_new_hash(skill_settings['identifier']):\n- self._save_uuid(skill_settings['uuid'])\n- self._save_hash(skill_settings['identifier'])\n if 'skillMetadata' in skill_settings:\n sections = skill_settings['skillMetadata']['sections']\n for section in sections:\n for field in section[\"fields\"]:\n if \"name\" in field and \"value\" in field:\n- self[field['name']] = field['value']\n+ # Bypass the change lock to allow server to update\n+ # during skill init\n+ super(SkillSettings, self).__setitem__(field['name'],\n+ field['value'])\n self.store()\n \n def _load_uuid(self):\n@@ -392,90 +370,33 @@ class SkillSettings(dict):\n meta['skillMetadata']['sections'] = sections\n return meta\n \n- def _upload_meta(self, settings_meta, hashed_meta):\n+ def _upload_meta(self, settings_meta, identifier):\n \"\"\" uploads the new meta data to settings with settings migration\n \n Args:\n- settings_meta (dict): from settingsmeta.json or settingsmeta.yaml\n- hashed_meta (str): {skill-folder}-settinsmeta.json\n+ settings_meta (dict): settingsmeta.json or settingsmeta.yaml\n+ identifier (str): identifier for skills meta data\n \"\"\"\n meta = self._migrate_settings(settings_meta)\n- meta['identifier'] = str(hashed_meta)\n+ meta['identifier'] = identifier\n response = self._send_settings_meta(meta)\n- if response and 'uuid' in response:\n- self._save_uuid(response['uuid'])\n- if 'not_owner' in self:\n- del self['not_owner']\n- self._save_hash(hashed_meta)\n \n def hash(self, string):\n \"\"\" md5 hasher for consistency across cpu architectures \"\"\"\n return hashlib.md5(bytes(string, 'utf-8')).hexdigest()\n \n- def _get_meta_hash(self, settings_meta):\n- \"\"\" Gets the hash of skill\n-\n- Args:\n- settings_meta (dict): settingsmeta object\n- Returns:\n- _hash (str): hashed to identify skills\n- \"\"\"\n- _hash = self.hash(json.dumps(settings_meta, sort_keys=True) +\n- self._user_identity)\n- return \"{}--{}\".format(self.name, _hash)\n-\n- def _save_hash(self, hashed_meta):\n- \"\"\" Saves hashed_meta to settings directory.\n-\n- Args:\n- hashed_meta (str): hash of new settingsmeta\n- \"\"\"\n- directory = self.config.get(\"skills\")[\"directory\"]\n- directory = join(directory, self.name)\n- directory = expanduser(directory)\n- hash_file = join(directory, 'hash')\n- os.makedirs(directory, exist_ok=True)\n- with open(hash_file, 'w') as f:\n- f.write(hashed_meta)\n-\n- def _is_new_hash(self, hashed_meta):\n- \"\"\" Check if stored hash is the same as current.\n-\n- If the hashed file does not exist, usually in the\n- case of first load, then the create it and return True\n-\n- Args:\n- hashed_meta (str): hash of metadata and uuid of device\n- Returns:\n- bool: True if hash is new, otherwise False\n- \"\"\"\n- directory = self.config.get(\"skills\")[\"directory\"]\n- directory = join(directory, self.name)\n- directory = expanduser(directory)\n- hash_file = join(directory, 'hash')\n- if isfile(hash_file):\n- with open(hash_file, 'r') as f:\n- current_hash = f.read()\n- return False if current_hash == str(hashed_meta) else True\n- return True\n-\n def update_remote(self):\n \"\"\" update settings state from server \"\"\"\n- skills_settings = None\n settings_meta = self._load_settings_meta()\n if settings_meta is None:\n return\n- hashed_meta = self._get_meta_hash(settings_meta)\n- if self.get('not_owner'):\n- skills_settings = self._request_other_settings(hashed_meta)\n- if not skills_settings:\n- skills_settings = self._request_my_settings(hashed_meta)\n+ skills_settings = self._request_my_settings(self.skill_gid)\n if skills_settings is not None:\n self.save_skill_settings(skills_settings)\n self.store()\n else:\n settings_meta = self._load_settings_meta()\n- self._upload_meta(settings_meta, hashed_meta)\n+ self._upload_meta(settings_meta, self.skill_gid)\n \n def _init_blank_meta(self):\n \"\"\" Send blank settingsmeta to remote. \"\"\"\n@@ -599,10 +520,11 @@ class SkillSettings(dict):\n with the identifier\n \n Args:\n- identifier (str): a hashed_meta\n+ identifier (str): identifier (skill_gid)\n Returns:\n skill_settings (dict or None): returns a dict if matches\n \"\"\"\n+ print(\"GETTING SETTINGS FOR {}\".format(self.name))\n settings = self._request_settings()\n if settings:\n # this loads the settings into memory for use in self.store\n@@ -612,7 +534,8 @@ class SkillSettings(dict):\n self._type_cast(skill_settings, to_platform='core')\n self._remote_settings = skill_settings\n return skill_settings\n- return None\n+ else:\n+ return None\n \n def _request_settings(self):\n \"\"\" Get all skill settings for this device from server.\n@@ -631,27 +554,6 @@ class SkillSettings(dict):\n settings = [skills for skills in settings if skills is not None]\n return settings\n \n- def _request_other_settings(self, identifier):\n- \"\"\" Retrieve skill settings from other devices by identifier\n-\n- Args:\n- identifier (str): identifier for this skill\n- Returns:\n- settings (dict or None): the retrieved settings or None\n- \"\"\"\n- path = \\\n- \"/\" + self._device_identity + \"/userSkill?identifier=\" + identifier\n- try:\n- user_skill = self.api.request({\"method\": \"GET\", \"path\": path})\n- except RequestException:\n- # Some kind of Timeout, connection HTTPError, etc.\n- user_skill = None\n- if not user_skill:\n- return None\n- else:\n- settings = self._type_cast(user_skill[0], to_platform='core')\n- return settings\n-\n def _put_metadata(self, settings_meta):\n \"\"\" PUT settingsmeta to backend to be configured in server.\n used in place of POST and PATCH.\n@@ -717,12 +619,7 @@ class SkillSettings(dict):\n \n if self._should_upload_from_change:\n settings_meta = self._load_settings_meta()\n- hashed_meta = self._get_meta_hash(settings_meta)\n- uuid = self._load_uuid()\n- if uuid is not None:\n- self._delete_metadata(uuid)\n- self._upload_meta(settings_meta, hashed_meta)\n-\n+ self._upload_meta(settings_meta, self.skill_gid)\n \n def _get_meta_path(base_directory):\n json_path = join(base_directory, 'settingsmeta.json')\n", "new_path": "mycroft/skills/settings.py", "old_path": "mycroft/skills/settings.py" } ]
69231c5ed13dc6ad437392fe83cb06074b250dd2
mycroftai/mycroft-core
null
null
Make execute_test less complex Split the long function into several smaller ones just retaining the main logic.
[ { "change_type": "MODIFY", "diff": "@@ -281,6 +281,17 @@ class MockSkillsLoader(object):\n unload_skills(self.skills)\n \n \n+def load_test_case_file(test_case_file):\n+ \"\"\"Load a test case to run.\"\"\"\n+ print(\"\")\n+ print(color.HEADER + \"=\"*20 + \" RUNNING TEST \" + \"=\"*20 + color.RESET)\n+ print('Test file: ', test_case_file)\n+ with open(test_case_file, 'r') as f:\n+ test_case = json.load(f)\n+ print('Test:', json.dumps(test_case, indent=4, sort_keys=False))\n+ return test_case\n+\n+\n class SkillTest(object):\n \"\"\"\n This class is instantiated for each skill being tested. It holds the\n@@ -330,6 +341,92 @@ class SkillTest(object):\n s.get_response = orig_get_response\n s.settings = original_settings\n \n+ def send_play_query(self, s, test_case):\n+ \"\"\"Emit an event triggering the a check for playback possibilities.\"\"\"\n+ play_query = test_case['play_query']\n+ print('PLAY QUERY', color.USER_UTT + play_query + color.RESET)\n+ self.emitter.emit('play:query', Message('play:query:',\n+ {'phrase': play_query}))\n+\n+ def send_play_start(self, s, test_case):\n+ \"\"\"Emit an event starting playback from the skill.\"\"\"\n+ print('PLAY START')\n+ callback_data = test_case['play_start']\n+ callback_data['skill_id'] = s.skill_id\n+ self.emitter.emit('play:start',\n+ Message('play:start', callback_data))\n+\n+ def send_question(self, test_case):\n+ \"\"\"Emit a Question to the loaded skills.\"\"\"\n+ print(\"QUESTION: {}\".format(test_case['question']))\n+ callback_data = {'phrase': test_case['question']}\n+ self.emitter.emit('question:query',\n+ Message('question:query', data=callback_data))\n+\n+ def send_utterance(self, test_case):\n+ \"\"\"Emit an utterance to the loaded skills.\"\"\"\n+ utt = test_case['utterance']\n+ print(\"UTTERANCE:\", color.USER_UTT + utt + color.RESET)\n+ self.emitter.emit('recognizer_loop:utterance',\n+ Message('recognizer_loop:utterance',\n+ {'utterances': [utt]}))\n+\n+ def apply_test_settings(self, s, test_case):\n+ \"\"\"Replace the skills settings with settings from the test_case.\"\"\"\n+ s.settings = TestSettings('/tmp/', self.test_case_file)\n+ for key in test_case['settings']:\n+ s.settings[key] = test_case['settings'][key]\n+ print(color.YELLOW, 'will run test with custom settings:',\n+ '\\n{}'.format(s.settings), color.RESET)\n+\n+ def setup_get_response(self, s, test_case):\n+ \"\"\"Setup interception of get_response calls.\"\"\"\n+ def get_response(dialog='', data=None, announcement='',\n+ validator=None, on_fail=None, num_retries=-1):\n+ data = data or {}\n+ utt = announcement or s.dialog_renderer.render(dialog, data)\n+ print(color.MYCROFT + \">> \" + utt + color.RESET)\n+ s.speak(utt)\n+\n+ response = test_case['responses'].pop(0)\n+ print(\"SENDING RESPONSE:\",\n+ color.USER_UTT + response + color.RESET)\n+ return response\n+\n+ s.get_response = get_response\n+\n+ def remove_context(self, s, cxt):\n+ \"\"\"remove an adapt context.\"\"\"\n+ if isinstance(cxt, list):\n+ for x in cxt:\n+ MycroftSkill.remove_context(s, x)\n+ else:\n+ MycroftSkill.remove_context(s, cxt)\n+\n+ def set_context(self, s, cxt):\n+ \"\"\"Set an adapt context.\"\"\"\n+ for key, value in cxt.items():\n+ MycroftSkill.set_context(s, key, value)\n+\n+ def send_test_input(self, s, test_case):\n+ \"\"\"Emit an utterance, just like the STT engine does. This sends the\n+ provided text to the skill engine for intent matching and it then\n+ invokes the skill.\n+\n+ It also handles some special cases for common play skills and common\n+ query skills.\n+ \"\"\"\n+ if 'utterance' in test_case:\n+ self.send_utterance(test_case)\n+ elif 'play_query' in test_case:\n+ self.send_play_query(s, test_case)\n+ elif 'play_start' in test_case:\n+ self.send_play_start(s, test_case)\n+ elif 'question' in test_case:\n+ self.send_question(test_case)\n+ else:\n+ raise SkillTestError('No input provided in test case')\n+\n def execute_test(self, s):\n \"\"\" Execute test case.\n \n@@ -339,34 +436,13 @@ class SkillTest(object):\n Returns:\n (bool) True if the test succeeded completely.\n \"\"\"\n- print(\"\")\n- print(color.HEADER + \"=\"*20 + \" RUNNING TEST \" + \"=\"*20 + color.RESET)\n- print('Test file: ', self.test_case_file)\n- with open(self.test_case_file, 'r') as f:\n- test_case = json.load(f)\n- print('Test:', json.dumps(test_case, indent=4, sort_keys=False))\n+ test_case = load_test_case_file(self.test_case_file)\n \n if 'settings' in test_case:\n- s.settings = TestSettings('/tmp/', self.test_case_file)\n- for key in test_case['settings']:\n- s.settings[key] = test_case['settings'][key]\n- print(color.YELLOW, 'will run test with custom settings:',\n- '\\n{}'.format(s.settings), color.RESET)\n+ self.apply_test_settings(s, test_case)\n \n if 'responses' in test_case:\n- def get_response(dialog='', data=None, announcement='',\n- validator=None, on_fail=None, num_retries=-1):\n- data = data or {}\n- utt = announcement or s.dialog_renderer.render(dialog, data)\n- print(color.MYCROFT + \">> \" + utt + color.RESET)\n- s.speak(utt)\n-\n- response = test_case['responses'].pop(0)\n- print(\"SENDING RESPONSE:\",\n- color.USER_UTT + response + color.RESET)\n- return response\n-\n- s.get_response = get_response\n+ self.setup_get_response(s, test_case)\n \n # If we keep track of test status for the entire skill, then\n # get all intents from the skill, and mark current intent\n@@ -390,73 +466,67 @@ class SkillTest(object):\n # between test_cases\n cxt = test_case.get('remove_context', None)\n if cxt:\n- if isinstance(cxt, list):\n- for x in cxt:\n- MycroftSkill.remove_context(s, x)\n- else:\n- MycroftSkill.remove_context(s, cxt)\n+ self.remove_context(s, cxt)\n \n cxt = test_case.get('set_context', None)\n if cxt:\n- for key, value in cxt.items():\n- MycroftSkill.set_context(s, key, value)\n-\n- # Emit an utterance, just like the STT engine does. This sends the\n- # provided text to the skill engine for intent matching and it then\n- # invokes the skill.\n- if 'utterance' in test_case:\n- utt = test_case['utterance']\n- print(\"UTTERANCE:\", color.USER_UTT + utt + color.RESET)\n- self.emitter.emit('recognizer_loop:utterance',\n- Message('recognizer_loop:utterance',\n- {'utterances': [utt]}))\n- elif 'play_query' in test_case:\n- play_query = test_case['play_query']\n- print('PLAY QUERY', color.USER_UTT + play_query + color.RESET)\n- self.emitter.emit('play:query', Message('play:query:',\n- {'phrase': play_query}))\n- elif 'play_start' in test_case:\n- print('PLAY START')\n- callback_data = test_case['play_start']\n- callback_data['skill_id'] = s.skill_id\n- self.emitter.emit('play:start',\n- Message('play:start', callback_data))\n- elif 'question' in test_case:\n- print(\"QUESTION: {}\".format(test_case['question']))\n- callback_data = {'phrase': test_case['question']}\n- self.emitter.emit('question:query',\n- Message('question:query', data=callback_data))\n- else:\n- raise SkillTestError('No input utterance provided')\n+ self.set_context(s, cxt)\n \n+ self.send_test_input(s, test_case)\n # Wait up to X seconds for the test_case to complete\n- timeout = time.time() + int(test_case.get('evaluation_timeout')) \\\n- if test_case.get('evaluation_timeout', None) and \\\n- isinstance(test_case['evaluation_timeout'], int) \\\n- else time.time() + DEFAULT_EVALUAITON_TIMEOUT\n- while not evaluation_rule.all_succeeded():\n- try:\n- event = q.get(timeout=1)\n- if ':' in event.type:\n- event.data['__type__'] = event.type.split(':')[1]\n- else:\n- event.data['__type__'] = event.type\n+ timeout = self.get_timeout(test_case)\n \n- evaluation_rule.evaluate(event.data)\n- if event.type == 'mycroft.skill.handler.complete':\n- break\n- except Empty:\n- pass\n- if time.time() > timeout:\n+ while not evaluation_rule.all_succeeded():\n+ # Process the queue until a skill handler sends a complete message\n+ if self.check_queue(q, evaluation_rule) or time.time() > timeout:\n break\n \n- # Stop emmiter from sending on queue\n+ self.shutdown_emitter(s)\n+\n+ # Report test result if failed\n+ return self.results(evaluation_rule)\n+\n+ def get_timeout(self, test_case):\n+ \"\"\"Find any timeout specified in test case.\n+\n+ If no timeout is specified return the default.\n+ \"\"\"\n+ if (test_case.get('evaluation_timeout', None) and\n+ isinstance(test_case['evaluation_timeout'], int)):\n+ return time.time() + int(test_case.get('evaluation_timeout'))\n+ else:\n+ return time.time() + DEFAULT_EVALUAITON_TIMEOUT\n+\n+ def check_queue(self, q, evaluation_rule):\n+ \"\"\"Check the queue for events.\n+\n+ If event indicating skill completion is found returns True, else False.\n+ \"\"\"\n+ try:\n+ event = q.get(timeout=1)\n+ if ':' in event.type:\n+ event.data['__type__'] = event.type.split(':')[1]\n+ else:\n+ event.data['__type__'] = event.type\n+\n+ evaluation_rule.evaluate(event.data)\n+ if event.type == 'mycroft.skill.handler.complete':\n+ return True\n+ except Empty:\n+ pass\n+ return False\n+\n+ def shutdown_emitter(self, s):\n+ \"\"\"Shutdown the skill connection to the bus.\"\"\"\n+ # Stop emiter from sending on queue\n s.bus.q = None\n \n # remove the skill which is not responding\n self.emitter.remove_all_listeners('speak')\n self.emitter.remove_all_listeners('mycroft.skill.handler.complete')\n- # Report test result if failed\n+\n+ def results(self, evaluation_rule):\n+ \"\"\"Display and report the results.\"\"\"\n if not evaluation_rule.all_succeeded():\n self.failure_msg = str(evaluation_rule.get_failure())\n print(color.FAIL + \"Evaluation failed\" + color.RESET)\n@@ -497,7 +567,7 @@ def load_dialog_list(skill, dialog):\n return dialogs\n \n \n-class EvaluationRule(object):\n+class EvaluationRule:\n \"\"\"\n This class initially convert the test_case json file to internal rule\n format, which is stored throughout the testcase run. All Messages on\n", "new_path": "test/integrationtests/skills/skill_tester.py", "old_path": "test/integrationtests/skills/skill_tester.py" } ]
29db163a78cddc14e4d9a788c8bc7ed8e6eda9cf
mycroftai/mycroft-core
null
null
Move listen trigger to last chunk of sentence If rendering a chunk of a sentence takes too long time, the audio queue may run out and trigger the listening. This moves the listening trigger to after the last chunk.
[ { "change_type": "MODIFY", "diff": "@@ -35,11 +35,6 @@ mimic_fallback_obj = None\n _last_stop_signal = 0\n \n \n-def _start_listener(_):\n- \"\"\"Force Mycroft to start listening (as if 'Hey Mycroft' was spoken).\"\"\"\n- bus.emit(Message('mycroft.mic.listen'))\n-\n-\n def handle_speak(event):\n \"\"\"Handle \"speak\" message\n \n@@ -60,11 +55,7 @@ def handle_speak(event):\n stopwatch = Stopwatch()\n stopwatch.start()\n utterance = event.data['utterance']\n- if event.data.get('expect_response', False):\n- # When expect_response is requested, the listener will be restarted\n- # at the end of the next bit of spoken audio.\n- bus.once('recognizer_loop:audio_output_end', _start_listener)\n-\n+ listen = event.data.get('expect_response', False)\n # This is a bit of a hack for Picroft. The analog audio on a Pi blocks\n # for 30 seconds fairly often, so we don't want to break on periods\n # (decreasing the chance of encountering the block). But we will\n@@ -82,7 +73,10 @@ def handle_speak(event):\n utterance = re.sub(r'\\b([A-za-z][\\.])(\\s+)', r'\\g<1>', utterance)\n chunks = re.split(r'(?<!\\w\\.\\w.)(?<![A-Z][a-z]\\.)(?<=\\.|\\;|\\?)\\s',\n utterance)\n- for chunk in chunks:\n+ # Apply the listen flag to the last chunk, set the rest to False\n+ chunks = [(chunks[i], listen if i == len(chunks) - 1 else False)\n+ for i in range(len(chunks))]\n+ for chunk, listen in chunks:\n # Check if somthing has aborted the speech\n if (_last_stop_signal > start or\n check_for_signal('buttonPress')):\n@@ -90,7 +84,7 @@ def handle_speak(event):\n tts.playback.clear()\n break\n try:\n- mute_and_speak(chunk, ident)\n+ mute_and_speak(chunk, ident, listen)\n except KeyboardInterrupt:\n raise\n except Exception:\n@@ -103,7 +97,7 @@ def handle_speak(event):\n 'tts': tts.__class__.__name__})\n \n \n-def mute_and_speak(utterance, ident):\n+def mute_and_speak(utterance, ident, listen=False):\n \"\"\"Mute mic and start speaking the utterance using selected tts backend.\n \n Arguments:\n@@ -125,7 +119,7 @@ def mute_and_speak(utterance, ident):\n \n LOG.info(\"Speak: \" + utterance)\n try:\n- tts.execute(utterance, ident)\n+ tts.execute(utterance, ident, listen)\n except RemoteTTSTimeoutException as e:\n LOG.error(e)\n mimic_fallback_tts(utterance, ident)\n", "new_path": "mycroft/audio/speech.py", "old_path": "mycroft/audio/speech.py" }, { "change_type": "MODIFY", "diff": "@@ -19,7 +19,7 @@ import random\n import re\n from abc import ABCMeta, abstractmethod\n from threading import Thread\n-from time import time\n+from time import time, sleep\n \n import os.path\n from os.path import dirname, exists, isdir, join\n@@ -83,7 +83,8 @@ class PlaybackThread(Thread):\n \"\"\"Thread main loop. get audio and viseme data from queue and play.\"\"\"\n while not self._terminated:\n try:\n- snd_type, data, visemes, ident = self.queue.get(timeout=2)\n+ snd_type, data, visemes, ident, listen = \\\n+ self.queue.get(timeout=2)\n self.blink(0.5)\n if not self._processing_queue:\n self._processing_queue = True\n@@ -111,7 +112,7 @@ class PlaybackThread(Thread):\n except Exception as e:\n LOG.exception(e)\n if self._processing_queue:\n- self.tts.end_audio()\n+ self.tts.end_audio(listen)\n self._processing_queue = False\n \n def show_visemes(self, pairs):\n@@ -196,7 +197,7 @@ class TTS(metaclass=ABCMeta):\n # Create signals informing start of speech\n self.bus.emit(Message(\"recognizer_loop:audio_output_start\"))\n \n- def end_audio(self):\n+ def end_audio(self, listen):\n \"\"\"Helper function for child classes to call in execute().\n \n Sends the recognizer_loop:audio_output_end message, indicating\n@@ -205,6 +206,8 @@ class TTS(metaclass=ABCMeta):\n \"\"\"\n \n self.bus.emit(Message(\"recognizer_loop:audio_output_end\"))\n+ if listen:\n+ self.bus.emit(Message('mycroft.mic.listen'))\n # Clean the cache as needed\n cache_dir = mycroft.util.get_cache_directory(\"tts/\" + self.tts_name)\n mycroft.util.curate_cache(cache_dir, min_free_percent=100)\n@@ -287,15 +290,17 @@ class TTS(metaclass=ABCMeta):\n \"\"\"\n return [sentence]\n \n- def execute(self, sentence, ident=None):\n+ def execute(self, sentence, ident=None, listen=False):\n \"\"\"Convert sentence to speech, preprocessing out unsupported ssml\n \n The method caches results if possible using the hash of the\n sentence.\n \n- Args:\n+ Arguments:\n sentence: Sentence to be spoken\n ident: Id reference to current interaction\n+ listen: True if listen should be triggered at the end\n+ of the utterance.\n \"\"\"\n sentence = self.validate_ssml(sentence)\n \n@@ -307,7 +312,11 @@ class TTS(metaclass=ABCMeta):\n self.spellings[word.lower()])\n \n chunks = self._preprocess_sentence(sentence)\n- for sentence in chunks:\n+ # Apply the listen flag to the last chunk, set the rest to False\n+ chunks = [(chunks[i], listen if i == len(chunks) - 1 else False)\n+ for i in range(len(chunks))]\n+\n+ for sentence, l in chunks:\n key = str(hashlib.md5(\n sentence.encode('utf-8', 'ignore')).hexdigest())\n wav_file = os.path.join(\n@@ -323,7 +332,7 @@ class TTS(metaclass=ABCMeta):\n self.save_phonemes(key, phonemes)\n \n vis = self.viseme(phonemes) if phonemes else None\n- self.queue.put((self.audio_ext, wav_file, vis, ident))\n+ self.queue.put((self.audio_ext, wav_file, vis, ident, l))\n \n def viseme(self, phonemes):\n \"\"\"Create visemes from phonemes. Needs to be implemented for all\n", "new_path": "mycroft/tts/__init__.py", "old_path": "mycroft/tts/__init__.py" } ]
e74b0e3900b8391dd9b075a7fa098b37e74e31ba
mycroftai/mycroft-core
null
null
Add audioservice method tests This includes init, shutdown, play, stop, next, previous, pause, seek_backward, seek_forward, queue lower_volume, restore_volume and resume.
[ { "change_type": "MODIFY", "diff": "@@ -273,7 +273,7 @@ class AudioService:\n self.current.lower_volume()\n self.volume_is_low = True\n \n- def _restore_volume(self, message):\n+ def _restore_volume(self, message=None):\n \"\"\"\n Is triggered when mycroft is done speaking and restores the volume\n \n", "new_path": "mycroft/audio/audioservice.py", "old_path": "mycroft/audio/audioservice.py" }, { "change_type": "MODIFY", "diff": "@@ -12,27 +12,45 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n #\n+\n+from unittest.mock import Mock\n from mycroft.audio.services import AudioBackend\n \n \n class WorkingBackend(AudioBackend):\n def __init__(self, config, bus, name='Working'):\n super(WorkingBackend, self).__init__(config, bus)\n+\n+ # Override instance methods with mocks\n self.name = name\n+ self.add_list = Mock()\n+ self.clear_list = Mock()\n+ self.play = Mock()\n+ self.pause = Mock()\n+ self.resume = Mock()\n+ self.stop = Mock()\n+ self.next = Mock()\n+ self.previous = Mock()\n+ self.lower_volume = Mock()\n+ self.restore_volume = Mock()\n+ self.seek_forward = Mock()\n+ self.seek_backward = Mock()\n+ self.track_info = Mock()\n+ self.shutdown = Mock()\n \n def supported_uris(self):\n return ['file', 'http']\n \n- def add_list(self, tracks):\n+ def play(self):\n pass\n \n- def clear_list(self):\n+ def stop(self):\n pass\n \n- def play(self):\n+ def add_list(self, playlist):\n pass\n \n- def stop(self):\n+ def clear_list(self):\n pass\n \n \n", "new_path": "test/unittests/audio/services/working/__init__.py", "old_path": "test/unittests/audio/services/working/__init__.py" }, { "change_type": "MODIFY", "diff": "@@ -12,23 +12,31 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n #\n-import unittest\n-\n from os.path import dirname, join, abspath\n+import unittest\n+import unittest.mock as mock\n \n import mycroft.audio.audioservice as audio_service\n+from mycroft.messagebus import Message\n \n-\"\"\"\n- Tests for service loader\n-\"\"\"\n+from .services.working import WorkingBackend\n+\"\"\"Tests for Audioservice class\"\"\"\n \n+seek_message = Message('seek', data={'seconds': 5})\n \n-class MockEmitter(object):\n+\n+class MockEmitter:\n def __init__(self):\n self.reset()\n \n+ def once(self, event_type, function):\n+ pass\n+\n+ def on(self, event_type, function):\n+ pass\n+\n def emit(self, message):\n- self.types.append(message.type)\n+ self.types.append(message.msg_type)\n self.results.append(message.data)\n \n def get_types(self):\n@@ -37,22 +45,170 @@ class MockEmitter(object):\n def get_results(self):\n return self.results\n \n+ def remove(self, *args, **kwargs):\n+ pass\n+\n def reset(self):\n self.types = []\n self.results = []\n \n \n+def setup_mock_backends(mock_load_services, emitter):\n+ backend = WorkingBackend({}, emitter)\n+ second_backend = WorkingBackend({}, emitter, 'second')\n+ mock_load_services.return_value = [backend, second_backend]\n+ return backend, second_backend\n+\n+\n class TestService(unittest.TestCase):\n emitter = MockEmitter()\n service_path = abspath(join(dirname(__file__), 'services'))\n \n- def setUp(self):\n- pass\n-\n def test_load(self):\n- services = audio_service.load_services({}, TestService.emitter,\n- TestService.service_path)\n- self.assertEqual(len(services), 1)\n+ service = audio_service.load_services({}, TestService.emitter,\n+ TestService.service_path)\n+ self.assertEqual(len(service), 1)\n+\n+ @mock.patch('mycroft.audio.audioservice.load_services')\n+ def test_audio_backend_shutdown(self, mock_load_services):\n+ \"\"\"Test shutdown of audio backend.\"\"\"\n+ backend, second_backend = setup_mock_backends(mock_load_services,\n+ self.emitter)\n+ service = audio_service.AudioService(self.emitter)\n+ service.load_services_callback()\n+\n+ service.default = backend\n+\n+ # Check that all backend shutdown methods are called on audioservice\n+ # shutdown\n+ service.shutdown()\n+ self.assertTrue(backend.shutdown.called)\n+ self.assertTrue(second_backend.shutdown.called)\n+\n+ @mock.patch('mycroft.audio.audioservice.load_services')\n+ def test_audio_service_methods_not_playing(self, mock_load_services):\n+ \"\"\"Check that backend methods aren't called when stopped.\"\"\"\n+ backend, second_backend = setup_mock_backends(mock_load_services,\n+ self.emitter)\n+ mock_load_services.return_value = [backend, second_backend]\n+\n+ service = audio_service.AudioService(self.emitter)\n+ service.load_services_callback()\n+\n+ service.default = backend\n+\n+ # Check that next and prev aren't called if there is nothing playing\n+ service._next()\n+ self.assertFalse(backend.next.called)\n+ service._prev()\n+ self.assertFalse(backend.previous.called)\n+ service._pause()\n+ self.assertFalse(backend.pause.called)\n+ service._resume()\n+ self.assertFalse(backend.resume.called)\n+ service._seek_forward(seek_message)\n+ self.assertFalse(backend.seek_forward.called)\n+ service._seek_backward(seek_message)\n+ self.assertFalse(backend.seek_backward.called)\n+ service._lower_volume()\n+ self.assertFalse(service.volume_is_low)\n+ self.assertFalse(backend.lower_volume.called)\n+ service._restore_volume()\n+ self.assertFalse(backend.lower_volume.called)\n+\n+ service.shutdown()\n+\n+ @mock.patch('mycroft.audio.audioservice.load_services')\n+ def test_audio_service_methods_playing(self, mock_load_services):\n+ \"\"\"Check that backend methods are called during playback.\"\"\"\n+ backend, second_backend = setup_mock_backends(mock_load_services,\n+ self.emitter)\n+ mock_load_services.return_value = [backend, second_backend]\n+\n+ service = audio_service.AudioService(self.emitter)\n+ service.load_services_callback()\n+\n+ service.default = backend\n+\n+ # Check that play doesn't play unsupported media uri type\n+ m = Message('audio.service.play', data={'tracks': ['asdf://hello']})\n+ service._play(m)\n+ self.assertFalse(backend.play.called)\n+\n+ # Check that play plays supported media uri type\n+ m = Message('audio.service.play', data={'tracks': ['http://hello']})\n+ service._play(m)\n+ self.assertTrue(backend.play.called)\n+\n+ # Check that next and prev are called if a backend is playing.\n+ service._next()\n+ self.assertTrue(backend.next.called)\n+ service._prev()\n+ self.assertTrue(backend.previous.called)\n+ service._pause()\n+ self.assertTrue(backend.pause.called)\n+ service._resume()\n+ self.assertTrue(backend.resume.called)\n+ service._lower_volume()\n+ self.assertTrue(service.volume_is_low)\n+ self.assertTrue(backend.lower_volume.called)\n+ service._restore_volume()\n+ self.assertFalse(service.volume_is_low)\n+ self.assertTrue(backend.lower_volume.called)\n+\n+ # Check that play respects requested backends\n+ m = Message('audio.service.play',\n+ data={'tracks': [['http://hello', 'audio/mp3']],\n+ 'utterance': 'using second'})\n+ service._play(m)\n+ self.assertTrue(second_backend.play.called)\n+\n+ service._seek_forward(seek_message)\n+ second_backend.seek_forward.assert_called_with(5)\n+ service._seek_backward(seek_message)\n+ second_backend.seek_backward.assert_called_with(5)\n+\n+ # Check that stop stops the active backend only if stop is received\n+ # more than 1 second from last play.\n+ second_backend.stop.reset_mock()\n+ self.assertFalse(second_backend.stop.called)\n+ service._stop()\n+ self.assertFalse(second_backend.stop.called)\n+ service.play_start_time -= 1\n+ service._stop()\n+ self.assertTrue(second_backend.stop.called)\n+\n+ service.shutdown()\n+\n+ @mock.patch('mycroft.audio.audioservice.load_services')\n+ def test_audio_service_queue_methods(self, mock_load_services):\n+ \"\"\"Check that backend methods are called during playback.\"\"\"\n+ backend, second_backend = setup_mock_backends(mock_load_services,\n+ self.emitter)\n+ mock_load_services.return_value = [backend, second_backend]\n+\n+ service = audio_service.AudioService(self.emitter)\n+ service.load_services_callback()\n+\n+ service.default = backend\n+\n+ # Check that play doesn't play unsupported media uri type\n+ # Test queueing starts playback if stopped\n+ backend.play.reset_mock()\n+ backend.add_list.reset_mock()\n+ m = Message('audio.service.queue', data={'tracks': ['http://hello']})\n+ service._queue(m)\n+ backend.add_list.called_with(['http://hello'])\n+ self.assertTrue(backend.play.called)\n+\n+ # Test queuing doesn't call play if play is in progress\n+ backend.play.reset_mock()\n+ backend.add_list.reset_mock()\n+ service._queue(m)\n+ backend.add_list.called_with(['http://hello'])\n+ self.assertFalse(backend.play.called)\n+\n+ service.shutdown()\n \n \n if __name__ == \"__main__\":\n", "new_path": "test/unittests/audio/test_service.py", "old_path": "test/unittests/audio/test_service.py" } ]
b7d709c3c86af4f58cc8a8f7bbd089f319a0718b
mycroftai/mycroft-core
null
null
Add wait_for_message() method to messagebus client Refactor message waiting into a MessageWaiter class to be able to use the same code in both wait_for_message and wait_for_response. Add some basic unittests
[ { "change_type": "MODIFY", "diff": "@@ -11,4 +11,4 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n-from .client import MessageBusClient\n+from .client import MessageBusClient, MessageWaiter\n", "new_path": "mycroft/messagebus/client/__init__.py", "old_path": "mycroft/messagebus/client/__init__.py" }, { "change_type": "MODIFY", "diff": "@@ -30,6 +30,53 @@ from mycroft.util.log import LOG\n from .threaded_event_emitter import ThreadedEventEmitter\n \n \n+class MessageWaiter:\n+ \"\"\"Wait for a single message.\n+\n+ Encapsulate the wait for a message logic separating the setup from\n+ the actual waiting act so the waiting can be setuo, actions can be\n+ performed and _then_ the message can be waited for.\n+\n+ Argunments:\n+ bus: Bus to check for messages on\n+ message_type: message type to wait for\n+ \"\"\"\n+ def __init__(self, bus, message_type):\n+ self.bus = bus\n+ self.msg_type = message_type\n+ self.received_msg = None\n+ # Setup response handler\n+ self.bus.once(message_type, self._handler)\n+\n+ def _handler(self, message):\n+ \"\"\"Receive response data.\"\"\"\n+ self.received_msg = message\n+\n+ def wait(self, timeout=3.0):\n+ \"\"\"Wait for message.\n+\n+ Arguments:\n+ timeout (int or float): seconds to wait for message\n+\n+ Returns:\n+ Message or None\n+ \"\"\"\n+ start_time = time.monotonic()\n+ while self.received_msg is None:\n+ time.sleep(0.2)\n+ if time.monotonic() - start_time > timeout:\n+ try:\n+ self.bus.remove(self.msg_type, self._handler)\n+ except (ValueError, KeyError):\n+ # ValueError occurs on pyee 5.0.1 removing handlers\n+ # registered with once.\n+ # KeyError may theoretically occur if the event occurs as\n+ # the handler is removed\n+ pass\n+ break\n+ return self.received_msg\n+\n+\n class MessageBusClient:\n def __init__(self, host=None, port=None, route=None, ssl=None):\n config_overrides = dict(host=host, port=port, route=route, ssl=ssl)\n@@ -120,6 +167,19 @@ class MessageBusClient:\n LOG.warning('Could not send {} message because connection '\n 'has been closed'.format(message.msg_type))\n \n+ def wait_for_message(self, message_type, timeout=3.0):\n+ \"\"\"Wait for a message of a specific type.\n+\n+ Arguments:\n+ message_type (str): the message type of the expected message\n+ timeout: seconds to wait before timeout, defaults to 3\n+\n+ Returns:\n+ The received message or None if the response timed out\n+ \"\"\"\n+\n+ return MessageWaiter(self, message_type).wait(timeout)\n+\n def wait_for_response(self, message, reply_type=None, timeout=3.0):\n \"\"\"Send a message and wait for a response.\n \n@@ -132,32 +192,11 @@ class MessageBusClient:\n Returns:\n The received message or None if the response timed out\n \"\"\"\n- response = None\n-\n- def handler(message):\n- \"\"\"Receive response data.\"\"\"\n- nonlocal response\n- response = message\n-\n- # Setup response handler\n- self.once(reply_type or message.msg_type + '.response', handler)\n- # Send request\n+ message_type = reply_type or message.msg_type + '.response'\n+ waiter = MessageWaiter(self, message_type) # Setup response handler\n+ # Send message and wait for it's response\n self.emit(message)\n- # Wait for response\n- start_time = time.monotonic()\n- while response is None:\n- time.sleep(0.2)\n- if time.monotonic() - start_time > timeout:\n- try:\n- self.remove(reply_type, handler)\n- except (ValueError, KeyError):\n- # ValueError occurs on pyee 1.0.1 removing handlers\n- # registered with once.\n- # KeyError may theoretically occur if the event occurs as\n- # the handler is removed\n- pass\n- return None\n- return response\n+ return waiter.wait()\n \n def on(self, event_name, func):\n self.emitter.on(event_name, func)\n", "new_path": "mycroft/messagebus/client/client.py", "old_path": "mycroft/messagebus/client/client.py" }, { "change_type": "MODIFY", "diff": "@@ -12,9 +12,10 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n #\n-from unittest.mock import patch\n+from unittest import TestCase\n+from unittest.mock import patch, Mock\n \n-from mycroft.messagebus.client import MessageBusClient\n+from mycroft.messagebus.client import MessageBusClient, MessageWaiter\n \n WS_CONF = {\n 'websocket': {\n@@ -37,3 +38,22 @@ class TestMessageBusClient:\n def test_create_client(self, mock_conf):\n mc = MessageBusClient()\n assert mc.client.url == 'ws://testhost:1337/core'\n+\n+\n+class TestMessageWaiter(TestCase):\n+ def test_message_wait_success(self):\n+ bus = Mock()\n+ waiter = MessageWaiter(bus, 'delayed.message')\n+ bus.once.assert_called_with('delayed.message', waiter._handler)\n+\n+ test_msg = Mock(name='test_msg')\n+ waiter._handler(test_msg) # Inject response\n+\n+ self.assertEqual(waiter.wait(), test_msg)\n+\n+ def test_message_wait_timeout(self):\n+ bus = Mock()\n+ waiter = MessageWaiter(bus, 'delayed.message')\n+ bus.once.assert_called_with('delayed.message', waiter._handler)\n+\n+ self.assertEqual(waiter.wait(0.3), None)\n", "new_path": "test/unittests/messagebus/client/test_client.py", "old_path": "test/unittests/messagebus/client/test_client.py" } ]
bea3bac2bff8b1a84d81bb53c4d8565f2e21194c
mycroftai/mycroft-core
null
null
Make mimic get_tts() return phonemes pre-parsed This makes the phonemes json de/encodable like mimic2
[ { "change_type": "MODIFY", "diff": "@@ -91,6 +91,19 @@ def download_subscriber_voices(selected_voice):\n .format(voice))\n \n \n+def parse_phonemes(phonemes):\n+ \"\"\"Parse mimic phoneme string into a list of phone, duration pairs.\n+\n+ Arguments\n+ phonemes (bytes): phoneme output from mimic\n+ Returns:\n+ (list) list of phoneme duration pairs\n+ \"\"\"\n+ phon_str = phonemes.decode()\n+ pairs = phon_str.split(' ')\n+ return [pair.split(':') for pair in pairs if ':' in pair]\n+\n+\n class Mimic(TTS):\n \"\"\"TTS interface for local mimic v1.\"\"\"\n def __init__(self, lang, config):\n@@ -157,24 +170,20 @@ class Mimic(TTS):\n \"\"\"\n phonemes = subprocess.check_output(self.args + ['-o', wav_file,\n '-t', sentence])\n- return wav_file, phonemes.decode()\n+ return wav_file, parse_phonemes(phonemes)\n \n- def viseme(self, output):\n+ def viseme(self, phoneme_pairs):\n \"\"\"Convert phoneme string to visemes.\n \n Arguments:\n- output (str): Phoneme output from mimic\n+ phoneme_pairs (list): Phoneme output from mimic\n \n Returns:\n (list) list of tuples of viseme and duration\n \"\"\"\n visemes = []\n- pairs = str(output).split(\" \")\n- for pair in pairs:\n- pho_dur = pair.split(\":\") # phoneme:duration\n- if len(pho_dur) == 2:\n- visemes.append((VISIMES.get(pho_dur[0], '4'),\n- float(pho_dur[1])))\n+ for phon, dur in phoneme_pairs:\n+ visemes.append((VISIMES.get(phon, '4'), float(dur)))\n return visemes\n \n \n", "new_path": "mycroft/tts/mimic_tts.py", "old_path": "mycroft/tts/mimic_tts.py" }, { "change_type": "MODIFY", "diff": "@@ -21,22 +21,27 @@ class TestMimic(unittest.TestCase):\n @mock.patch('mycroft.tts.mimic_tts.subprocess')\n def test_get_tts(self, mock_subprocess, _, mock_device_api):\n mock_device_api.return_value = device_instance_mock\n+ mock_subprocess.check_output().decode.return_value = 's:1 pau:2'\n+ mock_subprocess.check_output.reset_mock()\n m = Mimic('en-US', {})\n wav, phonemes = m.get_tts('hello', 'abc.wav')\n mock_subprocess.check_output.assert_called_once_with(\n m.args + ['-o', 'abc.wav', '-t', 'hello'])\n- self.assertEqual(phonemes, mock_subprocess.check_output().decode())\n+ self.assertEqual(phonemes, [['s', '1'], ['pau', '2']])\n \n def test_viseme(self, _, mock_device_api):\n mock_device_api.return_value = device_instance_mock\n m = Mimic('en-US', {})\n- viseme_string = ('pau:0.206 m:0.287 ah:0.401 ch:0.513 dh:0.578 '\n- 'iy:0.699 s:0.835 ey:1.013 m:1.118 w:1.213 ey:1.345 '\n- 'dh:1.415 ae:1.491 t:1.539 b:1.616 r:1.671 ih:1.744 '\n- 'k:1.819 s:1.923 d:1.978 ow:2.118 n:2.206 t:2.301 '\n- 'pau:2.408')\n-\n- vis = m.viseme(viseme_string)\n+ phoneme_list = (['pau', 0.206], ['m', 0.287], ['ah', 0.401],\n+ ['ch', 0.513], ['dh', 0.578], ['iy', 0.699],\n+ ['s', 0.835], ['ey', 1.013], ['m', 1.118],\n+ ['w', 1.213], ['ey', 1.345], ['dh', 1.415],\n+ ['ae', 1.491], ['t', 1.539], ['b', 1.616],\n+ ['r', 1.671], ['ih', 1.744], ['k', 1.819],\n+ ['s', 1.923], ['d', 1.978], ['ow', 2.118],\n+ ['n', 2.206], ['t', 2.301], ['pau', 2.408])\n+\n+ vis = m.viseme(phoneme_list)\n self.assertEqual(vis,\n [('4', 0.206), ('4', 0.287), ('0', 0.401),\n ('3', 0.513), ('3', 0.578), ('0', 0.699),\n", "new_path": "test/unittests/tts/test_mimic_tts.py", "old_path": "test/unittests/tts/test_mimic_tts.py" } ]
fc45a383d14b85a54104e2bffe24035f2caa103c
pyglet/pyglet
null
null
Remove thread in DirectSound We use instead pyglet.clock.schedule_interval_soft to regularly refill the DirectSound buffer with new audio data.
[ { "change_type": "MODIFY", "diff": "@@ -35,8 +35,8 @@ from __future__ import absolute_import, print_function\n \r\n import ctypes\r\n import math\r\n-import threading\r\n \r\n+import pyglet\r\n from . import interface\r\n from pyglet.debug import debug_print\r\n from pyglet.media.events import MediaEvent\r\n@@ -84,13 +84,6 @@ class DirectSoundAudioPlayer(AbstractAudioPlayer):\n self.driver = driver\r\n self._ds_driver = ds_driver\r\n \r\n- # Locking strategy:\r\n- # All DirectSound calls should be locked. All instance vars relating\r\n- # to buffering/filling/time/events should be locked (used by both\r\n- # application and worker thread). Other instance vars (consts and\r\n- # 3d vars) do not need to be locked.\r\n- self._lock = threading.RLock()\r\n-\r\n # Desired play state (may be actually paused due to underrun -- not\r\n # implemented yet).\r\n self._playing = False\r\n@@ -133,69 +126,58 @@ class DirectSoundAudioPlayer(AbstractAudioPlayer):\n \r\n self.refill(self._buffer_size)\r\n \r\n- def __del__(self):\r\n- try:\r\n- self.delete()\r\n- except:\r\n- pass\r\n-\r\n- def delete(self):\r\n- if self.driver and self.driver.worker:\r\n- self.driver.worker.remove(self)\r\n-\r\n- with self._lock:\r\n- self._ds_buffer = None\r\n-\r\n def play(self):\r\n assert _debug('DirectSound play')\r\n- self.driver.worker.add(self)\r\n+ pyglet.clock.schedule_interval_soft(self._check_refill, 0.1)\r\n \r\n- with self._lock:\r\n- if not self._playing:\r\n- self._get_audiodata() # prebuffer if needed\r\n- self._playing = True\r\n- self._ds_buffer.play()\r\n+ if not self._playing:\r\n+ self._get_audiodata() # prebuffer if needed\r\n+ self._playing = True\r\n+ self._ds_buffer.play()\r\n \r\n assert _debug('return DirectSound play')\r\n \r\n def stop(self):\r\n assert _debug('DirectSound stop')\r\n- if self.driver and self.driver.worker:\r\n- self.driver.worker.remove(self)\r\n+ # if self.driver and self.driver.worker:\r\n+ # self.driver.worker.remove(self)\r\n+ pyglet.clock.unschedule(self._check_refill)\r\n \r\n- with self._lock:\r\n- if self._playing:\r\n- self._playing = False\r\n- self._ds_buffer.stop()\r\n+ if self._playing:\r\n+ self._playing = False\r\n+ self._ds_buffer.stop()\r\n \r\n assert _debug('return DirectSound stop')\r\n \r\n def clear(self):\r\n assert _debug('DirectSound clear')\r\n- with self._lock:\r\n- self._ds_buffer.current_position = 0\r\n- self._play_cursor_ring = self._write_cursor_ring = 0\r\n- self._play_cursor = self._write_cursor\r\n- self._eos_cursor = None\r\n- self._audiodata_buffer = None\r\n- del self._events[:]\r\n- del self._timestamps[:]\r\n+ self._ds_buffer.current_position = 0\r\n+ self._play_cursor_ring = self._write_cursor_ring = 0\r\n+ self._play_cursor = self._write_cursor\r\n+ self._eos_cursor = None\r\n+ self._audiodata_buffer = None\r\n+ del self._events[:]\r\n+ del self._timestamps[:]\r\n+\r\n+ def _check_refill(self, dt): # Need a better name!\r\n+ write_size = self.get_write_size()\r\n+ if write_size > self.min_buffer_size:\r\n+ self.refill(write_size)\r\n \r\n def refill(self, write_size):\r\n- with self._lock:\r\n- while write_size > 0:\r\n- assert _debug('refill, write_size =', write_size)\r\n- audio_data = self._get_audiodata()\r\n-\r\n- if audio_data is not None:\r\n- assert _debug('write', audio_data.length)\r\n- length = min(write_size, audio_data.length)\r\n- self.write(audio_data, length)\r\n- write_size -= length\r\n- else:\r\n- assert _debug('write silence')\r\n- self.write(None, write_size)\r\n- write_size = 0\r\n+ while write_size > 0:\r\n+ assert _debug('refill, write_size =', write_size)\r\n+ audio_data = self._get_audiodata()\r\n+\r\n+ if audio_data is not None:\r\n+ assert _debug('write', audio_data.length)\r\n+ length = min(write_size, audio_data.length)\r\n+ self.write(audio_data, length)\r\n+ write_size -= length\r\n+ else:\r\n+ assert _debug('write silence')\r\n+ self.write(None, write_size)\r\n+ write_size = 0\r\n \r\n def _has_underrun(self):\r\n return (self._eos_cursor is not None\r\n@@ -259,35 +241,32 @@ class DirectSoundAudioPlayer(AbstractAudioPlayer):\n (ts_cursor, audio_data.timestamp + audio_data.duration))\r\n \r\n def update_play_cursor(self):\r\n- with self._lock:\r\n- play_cursor_ring = self._ds_buffer.current_position.play_cursor\r\n- if play_cursor_ring < self._play_cursor_ring:\r\n- # Wrapped around\r\n- self._play_cursor += self._buffer_size - self._play_cursor_ring\r\n- self._play_cursor_ring = 0\r\n- self._play_cursor += play_cursor_ring - self._play_cursor_ring\r\n- self._play_cursor_ring = play_cursor_ring\r\n+ play_cursor_ring = self._ds_buffer.current_position.play_cursor\r\n+ if play_cursor_ring < self._play_cursor_ring:\r\n+ # Wrapped around\r\n+ self._play_cursor += self._buffer_size - self._play_cursor_ring\r\n+ self._play_cursor_ring = 0\r\n+ self._play_cursor += play_cursor_ring - self._play_cursor_ring\r\n+ self._play_cursor_ring = play_cursor_ring\r\n \r\n self._dispatch_pending_events()\r\n self._cleanup_timestamps()\r\n self._check_underrun()\r\n \r\n def _dispatch_pending_events(self):\r\n- with self._lock:\r\n- pending_events = []\r\n- while self._events and self._events[0][0] <= self._play_cursor:\r\n- _, event = self._events.pop(0)\r\n- pending_events.append(event)\r\n- assert _debug('Dispatching pending events: {}'.format(pending_events))\r\n- assert _debug('Remaining events: {}'.format(self._events))\r\n+ pending_events = []\r\n+ while self._events and self._events[0][0] <= self._play_cursor:\r\n+ _, event = self._events.pop(0)\r\n+ pending_events.append(event)\r\n+ assert _debug('Dispatching pending events: {}'.format(pending_events))\r\n+ assert _debug('Remaining events: {}'.format(self._events))\r\n \r\n for event in pending_events:\r\n event._sync_dispatch_to_player(self.player)\r\n \r\n def _cleanup_timestamps(self):\r\n- with self._lock:\r\n- while self._timestamps and self._timestamps[0][0] < self._play_cursor:\r\n- del self._timestamps[0]\r\n+ while self._timestamps and self._timestamps[0][0] < self._play_cursor:\r\n+ del self._timestamps[0]\r\n \r\n def _check_underrun(self):\r\n if self._playing and self._has_underrun():\r\n@@ -299,9 +278,8 @@ class DirectSoundAudioPlayer(AbstractAudioPlayer):\n def get_write_size(self):\r\n self.update_play_cursor()\r\n \r\n- with self._lock:\r\n- play_cursor = self._play_cursor\r\n- write_cursor = self._write_cursor\r\n+ play_cursor = self._play_cursor\r\n+ write_cursor = self._write_cursor\r\n \r\n return self._buffer_size - max(write_cursor - play_cursor, 0)\r\n \r\n@@ -310,91 +288,82 @@ class DirectSoundAudioPlayer(AbstractAudioPlayer):\n if length == 0:\r\n return 0\r\n \r\n- with self._lock:\r\n- write_ptr = self._ds_buffer.lock(self._write_cursor_ring, length)\r\n- assert 0 < length <= self._buffer_size\r\n- assert length == write_ptr.audio_length_1.value + write_ptr.audio_length_2.value\r\n-\r\n- if audio_data:\r\n- ctypes.memmove(write_ptr.audio_ptr_1, audio_data.data, write_ptr.audio_length_1.value)\r\n- audio_data.consume(write_ptr.audio_length_1.value, self.source_group.audio_format)\r\n- if write_ptr.audio_length_2.value > 0:\r\n- ctypes.memmove(write_ptr.audio_ptr_2, audio_data.data, write_ptr.audio_length_2.value)\r\n- audio_data.consume(write_ptr.audio_length_2.value, self.source_group.audio_format)\r\n+ write_ptr = self._ds_buffer.lock(self._write_cursor_ring, length)\r\n+ assert 0 < length <= self._buffer_size\r\n+ assert length == write_ptr.audio_length_1.value + write_ptr.audio_length_2.value\r\n+\r\n+ if audio_data:\r\n+ ctypes.memmove(write_ptr.audio_ptr_1, audio_data.data, write_ptr.audio_length_1.value)\r\n+ audio_data.consume(write_ptr.audio_length_1.value, self.source_group.audio_format)\r\n+ if write_ptr.audio_length_2.value > 0:\r\n+ ctypes.memmove(write_ptr.audio_ptr_2, audio_data.data, write_ptr.audio_length_2.value)\r\n+ audio_data.consume(write_ptr.audio_length_2.value, self.source_group.audio_format)\r\n+ else:\r\n+ if self.source_group.audio_format.sample_size == 8:\r\n+ c = 0x80\r\n else:\r\n- if self.source_group.audio_format.sample_size == 8:\r\n- c = 0x80\r\n- else:\r\n- c = 0\r\n- ctypes.memset(write_ptr.audio_ptr_1, c, write_ptr.audio_length_1.value)\r\n- if write_ptr.audio_length_2.value > 0:\r\n- ctypes.memset(write_ptr.audio_ptr_2, c, write_ptr.audio_length_2.value)\r\n- self._ds_buffer.unlock(write_ptr)\r\n-\r\n- self._write_cursor += length\r\n- self._write_cursor_ring += length\r\n- self._write_cursor_ring %= self._buffer_size\r\n+ c = 0\r\n+ ctypes.memset(write_ptr.audio_ptr_1, c, write_ptr.audio_length_1.value)\r\n+ if write_ptr.audio_length_2.value > 0:\r\n+ ctypes.memset(write_ptr.audio_ptr_2, c, write_ptr.audio_length_2.value)\r\n+ self._ds_buffer.unlock(write_ptr)\r\n+\r\n+ self._write_cursor += length\r\n+ self._write_cursor_ring += length\r\n+ self._write_cursor_ring %= self._buffer_size\r\n \r\n def seek(self, timestamp):\r\n self.audio_diff_avg_count = 0\r\n self.audio_diff_cum = 0.0\r\n- with self._lock:\r\n- while True:\r\n- audio_data = self._get_audiodata()\r\n- assert _debug(\"Seeking audio timestamp {:.2f} sec. \"\r\n- \"Got audio packet starting at {:.2f} sec\".format(\r\n- timestamp, audio_data.timestamp))\r\n- if timestamp <= (audio_data.timestamp + audio_data.duration):\r\n- break\r\n- \r\n- self._audiodata_buffer = None\r\n- del self._events[:]\r\n- del self._timestamps[:]\r\n+ while True:\r\n+ audio_data = self._get_audiodata()\r\n+ assert _debug(\"Seeking audio timestamp {:.2f} sec. \"\r\n+ \"Got audio packet starting at {:.2f} sec\".format(\r\n+ timestamp, audio_data.timestamp))\r\n+ if timestamp <= (audio_data.timestamp + audio_data.duration):\r\n+ break\r\n+ \r\n+ self._audiodata_buffer = None\r\n+ del self._events[:]\r\n+ del self._timestamps[:]\r\n \r\n- if audio_data is not None:\r\n- assert _debug('write', audio_data.length)\r\n- self.write(audio_data, audio_data.length)\r\n+ if audio_data is not None:\r\n+ assert _debug('write', audio_data.length)\r\n+ self.write(audio_data, audio_data.length)\r\n \r\n def get_time(self):\r\n self.update_play_cursor()\r\n- with self._lock:\r\n- if self._timestamps:\r\n- cursor, ts = self._timestamps[0]\r\n- result = ts + (self._play_cursor - cursor) / \\\r\n- float(self.source_group.audio_format.bytes_per_second)\r\n- else:\r\n- result = None\r\n+ if self._timestamps:\r\n+ cursor, ts = self._timestamps[0]\r\n+ result = ts + (self._play_cursor - cursor) / \\\r\n+ float(self.source_group.audio_format.bytes_per_second)\r\n+ else:\r\n+ result = None\r\n \r\n return result\r\n \r\n def set_volume(self, volume):\r\n- with self._lock:\r\n- self._ds_buffer.volume = _gain2db(volume)\r\n+ self._ds_buffer.volume = _gain2db(volume)\r\n \r\n def set_position(self, position):\r\n if self._ds_buffer.is3d:\r\n- with self._lock:\r\n- self._ds_buffer.position = _convert_coordinates(position)\r\n+ self._ds_buffer.position = _convert_coordinates(position)\r\n \r\n def set_min_distance(self, min_distance):\r\n if self._ds_buffer.is3d:\r\n- with self._lock:\r\n- self._ds_buffer.min_distance = min_distance\r\n+ self._ds_buffer.min_distance = min_distance\r\n \r\n def set_max_distance(self, max_distance):\r\n if self._ds_buffer.is3d:\r\n- with self._lock:\r\n- self._ds_buffer.max_distance = max_distance\r\n+ self._ds_buffer.max_distance = max_distance\r\n \r\n def set_pitch(self, pitch):\r\n frequency = int(pitch * self.source_group.audio_format.sample_rate)\r\n- with self._lock:\r\n- self._ds_buffer.frequency = frequency\r\n+ self._ds_buffer.frequency = frequency\r\n \r\n def set_cone_orientation(self, cone_orientation):\r\n if self._ds_buffer.is3d:\r\n- with self._lock:\r\n- self._ds_buffer.cone_orientation = _convert_coordinates(cone_orientation)\r\n+ self._ds_buffer.cone_orientation = _convert_coordinates(cone_orientation)\r\n \r\n def set_cone_inner_angle(self, cone_inner_angle):\r\n if self._ds_buffer.is3d:\r\n@@ -409,14 +378,12 @@ class DirectSoundAudioPlayer(AbstractAudioPlayer):\n def _set_cone_angles(self):\r\n inner = min(self._cone_inner_angle, self._cone_outer_angle)\r\n outer = max(self._cone_inner_angle, self._cone_outer_angle)\r\n- with self._lock:\r\n- self._ds_buffer.set_cone_angles(inner, outer)\r\n+ self._ds_buffer.set_cone_angles(inner, outer)\r\n \r\n def set_cone_outer_gain(self, cone_outer_gain):\r\n if self._ds_buffer.is3d:\r\n volume = _gain2db(cone_outer_gain)\r\n- with self._lock:\r\n- self._ds_buffer.cone_outside_volume = volume\r\n+ self._ds_buffer.cone_outside_volume = volume\r\n \r\n def prefill_audio(self):\r\n write_size = self.get_write_size()\r\n", "new_path": "pyglet/media/drivers/directsound/adaptation.py", "old_path": "pyglet/media/drivers/directsound/adaptation.py" } ]
f1202260fb86f27c329ec9ce9350098c427f169b
pyglet/pyglet
null
null
Remove threading from OpenALAudioPlayer This is the first attempt at removing the audio worker thread. We use instead pyglet event loop to schedule a refill every 100 ms.
[ { "change_type": "MODIFY", "diff": "@@ -34,9 +34,9 @@\n from __future__ import print_function\n from __future__ import absolute_import\n \n-import threading\n import time\n \n+import pyglet\n from . import interface\n from pyglet.app import WeakSet\n from pyglet.debug import debug_print\n@@ -59,15 +59,9 @@ class OpenALDriver(AbstractAudioDriver):\n self.context = self.device.create_context()\n self.context.make_current()\n \n- self.lock = threading.Lock()\n-\n self._listener = OpenALListener(self)\n self._players = WeakSet()\n \n- # Start worker thread\n- self.worker = PlayerWorker()\n- self.worker.start()\n-\n def create_audio_player(self, source_group, player):\n assert self.device is not None, \"Device was closed\"\n if self.have_version(1, 1):\n@@ -78,16 +72,14 @@ class OpenALDriver(AbstractAudioDriver):\n return player\n \n def delete(self):\n- self.worker.stop()\n for player in self._players:\n player.delete()\n- with self.lock:\n- if self.context is not None:\n- self.context.delete()\n- self.context = None\n- if self.device is not None:\n- self.device.delete()\n- self.device = None\n+ if self.context is not None:\n+ self.context.delete()\n+ self.context = None\n+ if self.device is not None:\n+ self.device.delete()\n+ self.device = None\n \n def have_version(self, major, minor):\n return (major, minor) <= self.get_version()\n@@ -106,12 +98,6 @@ class OpenALDriver(AbstractAudioDriver):\n def get_listener(self):\n return self._listener\n \n- def __enter__(self):\n- self.lock.acquire()\n-\n- def __exit__(self, exc_type, exc_value, traceback):\n- self.lock.release()\n-\n \n class OpenALListener(AbstractListener):\n def __init__(self, driver):\n@@ -151,10 +137,6 @@ class OpenALAudioPlayer11(AbstractAudioPlayer):\n self.driver = driver\n self.source = driver.context.create_source()\n \n- # Lock policy: lock all instance vars (except constants). (AL calls\n- # are locked on context).\n- self._lock = threading.RLock()\n-\n # Cursor positions, like DSound and Pulse drivers, refer to a\n # hypothetical infinite-length buffer. Cursor units are in bytes.\n \n@@ -197,16 +179,11 @@ class OpenALAudioPlayer11(AbstractAudioPlayer):\n def delete(self):\n assert _debug_media('OpenALAudioPlayer.delete()')\n \n- # Do not lock self._lock before calling this, or you risk a deadlock with worker\n- self.driver.worker.remove(self)\n-\n- with self._lock:\n if not self.source:\n return\n \n assert self.driver is not None\n- with self.driver:\n- self.source.delete()\n+ self.source.delete()\n self.source = None\n \n @property\n@@ -216,89 +193,83 @@ class OpenALAudioPlayer11(AbstractAudioPlayer):\n def play(self):\n assert _debug_media('OpenALAudioPlayer.play()')\n \n- with self._lock:\n- assert self.driver is not None\n- assert self.source is not None\n+ assert self.driver is not None\n+ assert self.source is not None\n \n- with self.driver:\n- if not self.source.is_playing:\n- self.source.play()\n- self._playing = True\n- self._clearing = False\n+ if not self.source.is_playing:\n+ self.source.play()\n+ self._playing = True\n+ self._clearing = False\n \n- self.driver.worker.add(self)\n+ pyglet.clock.schedule_interval_soft(self._check_refill, 0.1)\n \n def stop(self):\n assert _debug_media('OpenALAudioPlayer.stop()')\n- if self.driver and self.driver.worker:\n- self.driver.worker.remove(self)\n+ pyglet.clock.unschedule(self._check_refill)\n \n- with self._lock:\n- assert self.driver is not None\n- assert self.source is not None\n+ assert self.driver is not None\n+ assert self.source is not None\n \n- self._pause_timestamp = self.get_time()\n+ self._pause_timestamp = self.get_time()\n \n- with self.driver:\n- self.source.pause()\n- self._playing = False\n+ self.source.pause()\n+ self._playing = False\n \n def clear(self):\n assert _debug_media('OpenALAudioPlayer.clear()')\n \n- with self._lock:\n- assert self.driver is not None\n- assert self.source is not None\n-\n- with self.driver:\n- self.source.stop()\n- self.source.clear()\n- self.source.byte_offset = 0\n- self._playing = False\n- self._clearing = True\n-\n- self._buffer_cursor = 0\n- self._play_cursor = 0\n- self._write_cursor = 0\n- del self._events[:]\n- del self._buffer_sizes[:]\n- del self._buffer_timestamps[:]\n+ assert self.driver is not None\n+ assert self.source is not None\n+\n+ self.source.stop()\n+ self.source.clear()\n+ self.source.byte_offset = 0\n+ self._playing = False\n+ self._clearing = True\n+\n+ self._buffer_cursor = 0\n+ self._play_cursor = 0\n+ self._write_cursor = 0\n+ del self._events[:]\n+ del self._buffer_sizes[:]\n+ del self._buffer_timestamps[:]\n+\n+ def _check_refill(self, dt): # Need a better name!\n+ write_size = self.get_write_size()\n+ if write_size > self.min_buffer_size:\n+ self.refill(write_size)\n \n def _update_play_cursor(self):\n- with self._lock:\n- assert self.driver is not None\n- assert self.source is not None\n+ assert self.driver is not None\n+ assert self.source is not None\n \n- self._handle_processed_buffers()\n+ self._handle_processed_buffers()\n \n- # Update play cursor using buffer cursor + estimate into current\n- # buffer\n- with self.driver:\n- if self._clearing:\n- self._play_cursor = self._buffer_cursor\n- else:\n- self._play_cursor = self._buffer_cursor + self.source.byte_offset\n- assert self._check_cursors()\n+ # Update play cursor using buffer cursor + estimate into current\n+ # buffer\n+ if self._clearing:\n+ self._play_cursor = self._buffer_cursor\n+ else:\n+ self._play_cursor = self._buffer_cursor + self.source.byte_offset\n+ assert self._check_cursors()\n \n- self._dispatch_events()\n+ self._dispatch_events()\n \n def _handle_processed_buffers(self):\n- with self._lock:\n- with self.driver:\n- processed = self.source.unqueue_buffers()\n-\n- if processed > 0:\n- if (len(self._buffer_timestamps) == processed\n- and self._buffer_timestamps[-1] is not None):\n- assert _debug_media('OpenALAudioPlayer: Underrun')\n- # Underrun, take note of timestamp.\n- # We check that the timestamp is not None, because otherwise\n- # our source could have been cleared.\n- self._underrun_timestamp = \\\n- self._buffer_timestamps[-1] + \\\n- self._buffer_sizes[-1] / \\\n- float(self.source_group.audio_format.bytes_per_second)\n- self._update_buffer_cursor(processed)\n+ processed = self.source.unqueue_buffers()\n+\n+ if processed > 0:\n+ if (len(self._buffer_timestamps) == processed\n+ and self._buffer_timestamps[-1] is not None):\n+ assert _debug_media('OpenALAudioPlayer: Underrun')\n+ # Underrun, take note of timestamp.\n+ # We check that the timestamp is not None, because otherwise\n+ # our source could have been cleared.\n+ self._underrun_timestamp = \\\n+ self._buffer_timestamps[-1] + \\\n+ self._buffer_sizes[-1] / \\\n+ float(self.source_group.audio_format.bytes_per_second)\n+ self._update_buffer_cursor(processed)\n \n return processed\n \n@@ -308,15 +279,13 @@ class OpenALAudioPlayer11(AbstractAudioPlayer):\n del self._buffer_timestamps[:processed]\n \n def _dispatch_events(self):\n- with self._lock:\n- while self._events and self._events[0][0] <= self._play_cursor:\n- _, event = self._events.pop(0)\n- event._sync_dispatch_to_player(self.player)\n+ while self._events and self._events[0][0] <= self._play_cursor:\n+ _, event = self._events.pop(0)\n+ event._sync_dispatch_to_player(self.player)\n \n def get_write_size(self):\n- with self._lock:\n- self._update_play_cursor()\n- buffer_size = int(self._write_cursor - self._play_cursor)\n+ self._update_play_cursor()\n+ buffer_size = int(self._write_cursor - self._play_cursor)\n \n # Only write when current buffer size is smaller than ideal\n write_size = max(self.ideal_buffer_size - buffer_size, 0)\n@@ -327,36 +296,32 @@ class OpenALAudioPlayer11(AbstractAudioPlayer):\n def refill(self, write_size):\n assert _debug_media('refill', write_size)\n \n- with self._lock:\n-\n- while write_size > self.min_buffer_size:\n- compensation_time = self.get_audio_time_diff()\n- audio_data = self.source_group.get_audio_data(write_size, compensation_time)\n- if not audio_data:\n- assert _debug_media('No audio data left')\n- if self._has_underrun():\n- assert _debug_media('Underrun')\n- MediaEvent(0, 'on_eos')._sync_dispatch_to_player(self.player)\n- MediaEvent(0, 'on_source_group_eos')._sync_dispatch_to_player(self.player)\n- break\n-\n- assert _debug_media('Writing {} bytes'.format(audio_data.length))\n- self._queue_events(audio_data)\n- self._queue_audio_data(audio_data)\n- self._update_write_cursor(audio_data)\n- write_size -= audio_data.length\n-\n- # Check for underrun stopping playback\n- with self.driver:\n- if self._playing and not self.source.is_playing:\n- assert _debug_media('underrun')\n- self.source.play()\n+ while write_size > self.min_buffer_size:\n+ compensation_time = self.get_audio_time_diff()\n+ audio_data = self.source_group.get_audio_data(write_size, compensation_time)\n+ if not audio_data:\n+ assert _debug_media('No audio data left')\n+ if self._has_underrun():\n+ assert _debug_media('Underrun')\n+ MediaEvent(0, 'on_eos')._sync_dispatch_to_player(self.player)\n+ MediaEvent(0, 'on_source_group_eos')._sync_dispatch_to_player(self.player)\n+ break\n+\n+ assert _debug_media('Writing {} bytes'.format(audio_data.length))\n+ self._queue_events(audio_data)\n+ self._queue_audio_data(audio_data)\n+ self._update_write_cursor(audio_data)\n+ write_size -= audio_data.length\n+\n+ # Check for underrun stopping playback\n+ if self._playing and not self.source.is_playing:\n+ assert _debug_media('underrun')\n+ self.source.play()\n \n def _queue_audio_data(self, audio_data):\n- with self.driver:\n- buf = self.source.get_buffer()\n- buf.data(audio_data, self.source_group.audio_format)\n- self.source.queue_buffer(buf)\n+ buf = self.source.get_buffer()\n+ buf.data(audio_data, self.source_group.audio_format)\n+ self.source.queue_buffer(buf)\n \n def _update_write_cursor(self, audio_data):\n self._write_cursor += audio_data.length\n@@ -371,42 +336,39 @@ class OpenALAudioPlayer11(AbstractAudioPlayer):\n self._events.append((cursor, event))\n \n def _has_underrun(self):\n- with self.driver:\n- return self.source.buffers_queued == 0\n+ return self.source.buffers_queued == 0\n \n def seek(self, timestamp):\n self.audio_diff_avg_count = 0\n self.audio_diff_cum = 0.0\n- with self._lock:\n- while True:\n- audio_data = self.source_group.get_audio_data(self.ideal_buffer_size, 0.0)\n- assert _debug_media(\"Seeking audio timestamp {:.2f} sec. \"\n- \"Got audio packet starting at {:.2f} sec\".format(\n- timestamp, audio_data.timestamp))\n- if timestamp <= (audio_data.timestamp + audio_data.duration):\n- break\n- \n- if audio_data is not None:\n- assert _debug_media('Writing {} bytes'.format(audio_data.length))\n- self._queue_events(audio_data)\n- self._queue_audio_data(audio_data)\n- self._update_write_cursor(audio_data)\n+ while True:\n+ audio_data = self.source_group.get_audio_data(self.ideal_buffer_size, 0.0)\n+ assert _debug_media(\"Seeking audio timestamp {:.2f} sec. \"\n+ \"Got audio packet starting at {:.2f} sec\".format(\n+ timestamp, audio_data.timestamp))\n+ if timestamp <= (audio_data.timestamp + audio_data.duration):\n+ break\n+\n+ if audio_data is not None:\n+ assert _debug_media('Writing {} bytes'.format(audio_data.length))\n+ self._queue_events(audio_data)\n+ self._queue_audio_data(audio_data)\n+ self._update_write_cursor(audio_data)\n \n def get_time(self):\n- with self._lock:\n- # Update first, might remove buffers\n- self._update_play_cursor()\n+ # Update first, might remove buffers\n+ self._update_play_cursor()\n \n- if not self._buffer_timestamps:\n- timestamp = self._underrun_timestamp\n- assert _debug_media('OpenALAudioPlayer: Return underrun timestamp')\n- else:\n- timestamp = self._buffer_timestamps[0]\n- assert _debug_media('OpenALAudioPlayer: Buffer timestamp: {}'.format(timestamp))\n+ if not self._buffer_timestamps:\n+ timestamp = self._underrun_timestamp\n+ assert _debug_media('OpenALAudioPlayer: Return underrun timestamp')\n+ else:\n+ timestamp = self._buffer_timestamps[0]\n+ assert _debug_media('OpenALAudioPlayer: Buffer timestamp: {}'.format(timestamp))\n \n- if timestamp is not None:\n- timestamp += ((self._play_cursor - self._buffer_cursor) /\n- float(self.source_group.audio_format.bytes_per_second))\n+ if timestamp is not None:\n+ timestamp += ((self._play_cursor - self._buffer_cursor) /\n+ float(self.source_group.audio_format.bytes_per_second))\n \n assert _debug_media('OpenALAudioPlayer: get_time = {}'.format(timestamp))\n \n@@ -424,40 +386,31 @@ class OpenALAudioPlayer11(AbstractAudioPlayer):\n return True # Return true so it can be called in an assert (and optimized out)\n \n def set_volume(self, volume):\n- with self.driver:\n- self.source.gain = volume\n+ self.source.gain = volume\n \n def set_position(self, position):\n- with self.driver:\n- self.source.position = position\n+ self.source.position = position\n \n def set_min_distance(self, min_distance):\n- with self.driver:\n- self.source.reference_distance = min_distance\n+ self.source.reference_distance = min_distance\n \n def set_max_distance(self, max_distance):\n- with self.driver:\n- self.source.max_distance = max_distance\n+ self.source.max_distance = max_distance\n \n def set_pitch(self, pitch):\n- with self.driver:\n- self.source.pitch = pitch\n+ self.source.pitch = pitch\n \n def set_cone_orientation(self, cone_orientation):\n- with self.driver:\n- self.source.direction = cone_orientation\n+ self.source.direction = cone_orientation\n \n def set_cone_inner_angle(self, cone_inner_angle):\n- with self.driver:\n- self.source.cone_inner_angle = cone_inner_angle\n+ self.source.cone_inner_angle = cone_inner_angle\n \n def set_cone_outer_angle(self, cone_outer_angle):\n- with self.driver:\n- self.source.cone_outer_angle = cone_outer_angle\n+ self.source.cone_outer_angle = cone_outer_angle\n \n def set_cone_outer_gain(self, cone_outer_gain):\n- with self.driver:\n- self.source.cone_outer_gain = cone_outer_gain\n+ self.source.cone_outer_gain = cone_outer_gain\n \n def prefill_audio(self):\n write_size = self.get_write_size()\n@@ -475,29 +428,26 @@ class OpenALAudioPlayer10(OpenALAudioPlayer11):\n self._buffer_system_time = time.time()\n \n def play(self):\n- with self._lock:\n- super(OpenALAudioPlayer10, self).play()\n- self._buffer_system_time = time.time()\n+ super(OpenALAudioPlayer10, self).play()\n+ self._buffer_system_time = time.time()\n \n def _update_play_cursor(self):\n- with self._lock:\n- assert self.driver is not None\n- assert self.source is not None\n+ assert self.driver is not None\n+ assert self.source is not None\n \n- self._handle_processed_buffers()\n+ self._handle_processed_buffers()\n \n- # Interpolate system time past buffer timestamp\n- self._play_cursor = \\\n- self._buffer_cursor + int(\n- (time.time() - self._buffer_system_time) * \\\n- self.source_group.audio_format.bytes_per_second)\n- assert self._check_cursors()\n- assert _debug_media('Play cursor at {} bytes'.format(self._play_cursor))\n+ # Interpolate system time past buffer timestamp\n+ self._play_cursor = \\\n+ self._buffer_cursor + int(\n+ (time.time() - self._buffer_system_time) * \\\n+ self.source_group.audio_format.bytes_per_second)\n+ assert self._check_cursors()\n+ assert _debug_media('Play cursor at {} bytes'.format(self._play_cursor))\n \n- self._dispatch_events()\n+ self._dispatch_events()\n \n def _handle_processed_buffers(self):\n- with self._lock:\n- processed = super(OpenALAudioPlayer10, self)._handle_processed_buffers()\n- if processed > 0:\n- self._buffer_system_time = time.time()\n\\ No newline at end of file\n+ processed = super(OpenALAudioPlayer10, self)._handle_processed_buffers()\n+ if processed > 0:\n+ self._buffer_system_time = time.time()\n\\ No newline at end of file\n", "new_path": "pyglet/media/drivers/openal/adaptation.py", "old_path": "pyglet/media/drivers/openal/adaptation.py" } ]
07d12a3cf97fa031831d2377695ed4c130fe5701
pyglet/pyglet
null
null
Fix Player when playing audio source with silent audio driver. When there is no video but only audio in the Source and the audio driver is silent, the Player was never finished playing. Now it dispatches correctly the "on_eos" event after the duration.
[ { "change_type": "MODIFY", "diff": "@@ -184,7 +184,7 @@ class Player(pyglet.event.EventDispatcher):\n source = iter(source)\n except TypeError:\n raise TypeError(\"source must be either a Source or an iterable.\"\n- \" Received type {0}\".format(type(source)))\n+ \" Received type {0}\".format(type(source)))\n self._playlists.append(source)\n \n if self.source is None:\n@@ -225,6 +225,12 @@ class Player(pyglet.event.EventDispatcher):\n # Negative number means audio runs ahead.\n # self._mclock._systime += -0.3\n self._mclock.play()\n+ if self._audio_player is None and source.video_format is None:\n+ pyglet.clock.schedule_once(\n+ lambda dt: self.dispatch_event(\"on_eos\"),\n+ source.duration,\n+ )\n+\n else:\n if self._audio_player:\n self._audio_player.stop()\n@@ -364,7 +370,6 @@ class Player(pyglet.event.EventDispatcher):\n audio_driver = get_audio_driver()\n if audio_driver is None:\n # Failed to find a valid audio driver\n- self.source.audio_format = None\n return\n \n self._audio_player = audio_driver.create_audio_player(source, self)\n@@ -379,7 +384,7 @@ class Player(pyglet.event.EventDispatcher):\n @property\n def source(self):\n \"\"\"Source: Read-only. The current :class:`Source`, or ``None``.\"\"\"\n- return self._source \n+ return self._source\n \n @property\n def time(self):\n@@ -637,6 +642,7 @@ Player.register_event_type('on_player_next_source')\n def _one_item_playlist(source):\n yield source\n \n+\n class PlayerGroup(object):\n \"\"\"Group of players that can be played and paused simultaneously.\n \n", "new_path": "pyglet/media/player.py", "old_path": "pyglet/media/player.py" }, { "change_type": "MODIFY", "diff": "@@ -1,5 +1,6 @@\n from __future__ import absolute_import, print_function\n import pyglet\n+import pytest\n _debug = False\n \n \n@@ -13,8 +14,10 @@ class MockPlayer(object):\n def dispatch_event(self, event_type, *args):\n super(MockPlayer, self).dispatch_event(event_type, *args)\n if _debug:\n- print('{}: event {} received @ {}'.format(self.__class__.__name__,\n- \tevent_type, self.pyclock.time()))\n+ print('{}: event {} received @ {}'.format(\n+ self.__class__.__name__,\n+ event_type, self.pyclock.time()\n+ ))\n self.events.append((event_type, args))\n pyglet.clock.unschedule(self.event_loop.interrupt_event_loop)\n self.event_loop.interrupt_event_loop()\n@@ -25,8 +28,8 @@ class MockPlayer(object):\n while self.pyclock.time() < end_time:\n if _debug:\n print('{}: run for {} sec @ {}'.format(self.__class__.__name__,\n- \tend_time-self.pyclock.time(), self.pyclock.time()))\n- self.event_loop.run_event_loop(duration=end_time-self.pyclock.time())\n+ end_time - self.pyclock.time(), self.pyclock.time()))\n+ self.event_loop.run_event_loop(duration=end_time - self.pyclock.time())\n if not self.events:\n continue\n event_type, args = self.events.pop()\n@@ -45,10 +48,10 @@ class MockPlayer(object):\n event_type, args = self.wait_for_event(timeout, *expected_events)\n if _debug:\n print('{}: got event {} @ {}'.format(self.__class__.__name__,\n- \tevent_type, self.pyclock.time()))\n+ event_type, self.pyclock.time()))\n if event_type is None and self.pyclock.time() >= end_time:\n- pytest.fail('Timeout before all events have been received. Still waiting for: '\n- + ','.join(expected_events))\n+ pytest.fail('Timeout before all events have been received. '\n+ 'Still waiting for: ' + ','.join(expected_events))\n elif event_type is not None:\n if event_type in expected_events:\n expected_events.remove(event_type)\n@@ -59,6 +62,6 @@ class MockPlayer(object):\n now = self.pyclock.time()\n end_time = now + timeout\n while now - end_time < -0.005:\n- duration = max(.01, end_time-now)\n+ duration = max(.01, end_time - now)\n self.event_loop.run_event_loop(duration=duration)\n- now = self.pyclock.time()\n\\ No newline at end of file\n+ now = self.pyclock.time()\n", "new_path": "tests/integration/media/mock_player.py", "old_path": "tests/integration/media/mock_player.py" }, { "change_type": "MODIFY", "diff": "@@ -2,11 +2,9 @@ from __future__ import print_function\n from future import standard_library\n standard_library.install_aliases()\n \n-import gc\n import pytest\n from tests import mock\n import time\n-import unittest\n \n import pyglet\n _debug = False\n@@ -26,6 +24,7 @@ class PlayerTest(MockPlayer, Player):\n def player(event_loop):\n return PlayerTest(event_loop)\n \n+\n class SilentTestSource(Silence):\n def __init__(self, duration, sample_rate=44800, sample_size=16):\n super(Silence, self).__init__(duration, sample_rate, sample_size)\n@@ -41,14 +40,16 @@ class SilentTestSource(Silence):\n return self.bytes_read == self._max_offset\n \n \n-\n def test_player_play(player):\n source = SilentTestSource(.1)\n player.queue(source)\n \n player.play()\n- player.wait_for_all_events(1., \n- 'on_eos', 'on_player_eos')\n+ player.wait_for_all_events(\n+ 1.,\n+ 'on_eos',\n+ 'on_player_eos'\n+ )\n assert source.has_fully_played(), 'Source not fully played'\n \n \n@@ -58,17 +59,35 @@ def test_player_play_multiple(player):\n player.queue(source)\n \n player.play()\n- player.wait_for_all_events(1., \n- 'on_eos', 'on_player_next_source', 'on_eos', 'on_player_eos')\n+ player.wait_for_all_events(\n+ 1.,\n+ 'on_eos',\n+ 'on_player_next_source',\n+ 'on_eos',\n+ 'on_player_eos'\n+ )\n for source in sources:\n assert source.has_fully_played(), 'Source not fully played'\n \n \n def test_multiple_fire_and_forget_players():\n \"\"\"\n- Test an issue where the driver crashed when starting multiple players, but not keeping a\n- reference to these players.\n+ Test an issue where the driver crashed when starting multiple players, but not\n+ keeping a reference to these players.\n \"\"\"\n for _ in range(10):\n Silence(1).play()\n time.sleep(1)\n+\n+\n+def test_player_silent_audio_driver(player):\n+ with mock.patch('pyglet.media.player.get_audio_driver') as get_audio_driver_mock:\n+ get_audio_driver_mock.return_value = None\n+ source = SilentTestSource(.1)\n+ player.queue(source)\n+ player.play()\n+\n+ player.wait_for_all_events(\n+ 1.,\n+ 'on_eos',\n+ 'on_player_eos')\n", "new_path": "tests/integration/media/test_player.py", "old_path": "tests/integration/media/test_player.py" }, { "change_type": "MODIFY", "diff": "@@ -1,20 +1,13 @@\n from __future__ import division\n from builtins import range\n-import ctypes\n from tests import mock\n-import os\n import random\n-from collections import deque\n-from itertools import product\n from tests.base.future_test import FutureTestCase\n \n-import pytest\n-\n-import pyglet\n from pyglet.media.player import Player, PlayerGroup\n-from pyglet.media.codecs.base import *\n+from pyglet.media.codecs.base import AudioFormat, VideoFormat, Source\n \n-#pyglet.options['debug_media'] = True\n+# pyglet.options['debug_media'] = True\n \n \n class PlayerTestCase(FutureTestCase):\n@@ -65,12 +58,13 @@ class PlayerTestCase(FutureTestCase):\n return mock_source\n \n def set_video_data_for_mock_source(self, mock_source, timestamp_data_pairs):\n- \"\"\"Make the given mock source return video data. Video data is given in pairs of timestamp\n- and data to return.\"\"\"\n+ \"\"\"Make the given mock source return video data. Video data is given in pairs of\n+ timestamp and data to return.\"\"\"\n def _get_frame():\n if timestamp_data_pairs:\n current_frame = timestamp_data_pairs.pop(0)\n return current_frame[1]\n+\n def _get_timestamp():\n if timestamp_data_pairs:\n return timestamp_data_pairs[0][0]\n@@ -233,8 +227,9 @@ class PlayerTestCase(FutureTestCase):\n self.assert_now_playing(mock_source3)\n \n def test_queue_multiple_audio_sources_same_format_and_play_and_skip(self):\n- \"\"\"When multiple audio sources with the same format are queued, they are played using the\n- same driver player. Skipping to the next source is just advancing the source group.\n+ \"\"\"When multiple audio sources with the same format are queued, they are played\n+ using the same driver player. Skipping to the next source is just advancing the\n+ source group.\n \"\"\"\n mock_source1 = self.create_mock_source(self.audio_format_1, None)\n mock_source2 = self.create_mock_source(self.audio_format_1, None)\n@@ -262,7 +257,8 @@ class PlayerTestCase(FutureTestCase):\n self.assert_now_playing(mock_source3)\n \n def test_on_eos(self):\n- \"\"\"The player receives on_eos for every source, but does not need to do anything.\"\"\"\n+ \"\"\"The player receives on_eos for every source, but does not need to do anything.\n+ \"\"\"\n mock_source1 = self.create_mock_source(self.audio_format_1, None)\n mock_source2 = self.create_mock_source(self.audio_format_1, None)\n mock_source3 = self.create_mock_source(self.audio_format_1, None)\n@@ -296,7 +292,7 @@ class PlayerTestCase(FutureTestCase):\n self.assert_not_playing(None)\n \n def test_eos_events(self):\n- \"\"\"Test receiving various eos events: on source eos, \n+ \"\"\"Test receiving various eos events: on source eos,\n on playlist exhausted and on player eos and on player next source.\n \"\"\"\n on_eos_mock = mock.MagicMock(return_value=None)\n@@ -311,7 +307,7 @@ class PlayerTestCase(FutureTestCase):\n on_player_eos_mock.reset_mock()\n on_player_next_source_mock.reset_mock()\n \n- def assert_eos_events_received(on_eos=False, on_player_eos=False, \n+ def assert_eos_events_received(on_eos=False, on_player_eos=False,\n on_player_next_source=False):\n self.assertEqual(on_eos_mock.called, on_eos)\n self.assertEqual(on_player_eos_mock.called, on_player_eos)\n@@ -362,8 +358,10 @@ class PlayerTestCase(FutureTestCase):\n \n self.reset_mocks()\n self.player.play()\n- self.assertAlmostEqual(self.player.time, 0.5, places=2,\n- msg='While playing, player should return time from driver player')\n+ self.assertAlmostEqual(\n+ self.player.time, 0.5, places=2,\n+ msg='While playing, player should return time from driver player'\n+ )\n self.assert_driver_player_started()\n self.assert_no_new_driver_player_created()\n self.assert_now_playing(mock_source)\n@@ -415,8 +413,8 @@ class PlayerTestCase(FutureTestCase):\n self.player.delete()\n \n def test_set_player_properties_before_playing(self):\n- \"\"\"When setting player properties before a driver specific player is \n- created, these settings should be propagated after creating the \n+ \"\"\"When setting player properties before a driver specific player is\n+ created, these settings should be propagated after creating the\n player.\n \"\"\"\n mock_source1 = self.create_mock_source(self.audio_format_1, None)\n@@ -460,7 +458,7 @@ class PlayerTestCase(FutureTestCase):\n assert_properties_set()\n \n def test_set_player_properties_while_playing(self):\n- \"\"\"When setting player properties while playing, the properties should \n+ \"\"\"When setting player properties while playing, the properties should\n be propagated to the driver specific player right away.\"\"\"\n mock_source1 = self.create_mock_source(self.audio_format_1, None)\n mock_source2 = self.create_mock_source(self.audio_format_2, None)\n@@ -548,7 +546,7 @@ class PlayerTestCase(FutureTestCase):\n self.assert_driver_player_cleared()\n \n def test_video_queue_and_play(self):\n- \"\"\"Sources can also include video. Instead of using a player to \n+ \"\"\"Sources can also include video. Instead of using a player to\n continuously play the video, a texture is updated based on the\n video packet timestamp.\"\"\"\n mock_source = self.create_mock_source(self.audio_format_1, self.video_format_1)\n@@ -571,7 +569,7 @@ class PlayerTestCase(FutureTestCase):\n self.assertIs(self.player.texture, self.mock_texture)\n \n def test_video_seek(self):\n- \"\"\"Sources with video can also be seeked. It's the Source \n+ \"\"\"Sources with video can also be seeked. It's the Source\n responsibility to present the Player with audio and video at the\n correct time.\"\"\"\n mock_source = self.create_mock_source(self.audio_format_1, self.video_format_1)\n@@ -599,7 +597,7 @@ class PlayerTestCase(FutureTestCase):\n self.assert_texture_updated('e')\n \n def test_video_frame_rate(self):\n- \"\"\"Videos texture are scheduled according to the video packet \n+ \"\"\"Videos texture are scheduled according to the video packet\n timestamp.\"\"\"\n mock_source1 = self.create_mock_source(self.audio_format_1, self.video_format_1)\n mock_source2 = self.create_mock_source(self.audio_format_1, self.video_format_2)\n@@ -685,6 +683,13 @@ class PlayerTestCase(FutureTestCase):\n self.assert_update_texture_scheduled()\n self.assert_no_new_driver_player_created()\n \n+ def test_audio_source_with_silent_driver(self):\n+ \"\"\"An audio source with a silent driver.\"\"\"\n+ mock_source = self.create_mock_source(self.audio_format_3, None)\n+ self.mock_get_audio_driver.return_value = None\n+ self.player.queue(mock_source)\n+ self.player.play()\n+\n \n class PlayerGroupTestCase(FutureTestCase):\n def create_mock_player(self, has_audio=True):\n@@ -701,40 +706,48 @@ class PlayerGroupTestCase(FutureTestCase):\n player.play.assert_called_once_with()\n \n def assert_audio_players_started(self, *players):\n- # Find the one player that was used to start the group, the rest should not be used\n+ # Find the one player that was used to start the group,\n+ # the rest should not be used\n call_args = None\n audio_players = []\n for player in players:\n audio_player = player._audio_player\n audio_players.append(audio_player)\n if call_args is not None:\n- self.assertFalse(audio_player._play_group.called, msg='Only one player should be used to start the group')\n+ self.assertFalse(audio_player._play_group.called,\n+ msg='Only one player should be used to start the group')\n elif audio_player._play_group.called:\n call_args = audio_player._play_group.call_args\n \n- self.assertIsNotNone(call_args, msg='No player was used to start all audio players.')\n+ self.assertIsNotNone(call_args,\n+ msg='No player was used to start all audio players.')\n started_players = call_args[0][0]\n- self.assertCountEqual(started_players, audio_players, msg='Not all players with audio players were started')\n+ self.assertCountEqual(started_players, audio_players,\n+ msg='Not all players with audio players were started')\n \n def assert_players_stopped(self, *players):\n for player in players:\n player.pause.assert_called_once_with()\n \n def assert_audio_players_stopped(self, *players):\n- # Find the one player that was used to start the group, the rest should not be used\n+ # Find the one player that was used to start the group,\n+ # the rest should not be used\n call_args = None\n audio_players = []\n for player in players:\n audio_player = player._audio_player\n audio_players.append(audio_player)\n if call_args is not None:\n- self.assertFalse(audio_player._stop_group.called, msg='Only one player should be used to stop the group')\n+ self.assertFalse(audio_player._stop_group.called,\n+ msg='Only one player should be used to stop the group')\n elif audio_player._stop_group.called:\n call_args = audio_player._stop_group.call_args\n \n- self.assertIsNotNone(call_args, msg='No player was used to stop all audio players.')\n+ self.assertIsNotNone(call_args,\n+ msg='No player was used to stop all audio players.')\n stopped_players = call_args[0][0]\n- self.assertCountEqual(stopped_players, audio_players, msg='Not all players with audio players were stopped')\n+ self.assertCountEqual(stopped_players, audio_players,\n+ msg='Not all players with audio players were stopped')\n \n def reset_mocks(self, *mocks):\n for m in mocks:\n@@ -788,4 +801,3 @@ class PlayerGroupTestCase(FutureTestCase):\n group.pause()\n self.assert_audio_players_stopped(*players_with_audio)\n self.assert_players_stopped(*players)\n-\n", "new_path": "tests/unit/media/test_player.py", "old_path": "tests/unit/media/test_player.py" } ]
75e4e08636d5dfe3cb8e6796ad116af01e2c0f4a
probcomp/bayeslite
null
null
Change schema for population. MODEL cols AS stattype has become SET STATTYPE OF cols TO st Reasoning: The word MODEL is confusing here, we are setting the statistical data type so we should name the command just that.
[ { "change_type": "MODIFY", "diff": "@@ -33,7 +33,7 @@ that for illustration::\n Then you can model a table and query the probable implications of the data in\n the table::\n \n- bdb.execute('create population p for t with schema(guess stattypes for (*))')\n+ bdb.execute('create population p for t with schema(guess stattypes of (*))')\n bdb.execute('create generator p_cc for t using cgpm;')\n bdb.execute('initialize 10 models for t_cc')\n bdb.execute('analyze t_cc for 10 iterations')\n", "new_path": "src/backend.py", "old_path": "src/backend.py" }, { "change_type": "MODIFY", "diff": "@@ -99,9 +99,11 @@ pop_schema(many) ::= pop_schema(schema) T_SEMI pop_clause(cl).\n \n pop_clause(empty) ::= .\n pop_clause(column) ::= column_name(col) stattype(st).\n-pop_clause(model) ::= K_MODEL pop_columns(cols) K_AS stattype(st).\n+pop_clause(stattype) ::= K_SET K_STATTYPES|K_STATTYPE\n+ K_OF pop_columns(cols)\n+ K_TO stattype(st).\n pop_clause(ignore) ::= K_IGNORE pop_columns(cols).\n-pop_clause(guess) ::= K_GUESS stattypes_for_opt pop_columns_guess(cols).\n+pop_clause(guess) ::= K_GUESS stattypes_of_opt pop_columns_guess(cols).\n \n stattype_opt(none) ::= .\n stattype_opt(one) ::= stattype(st).\n@@ -114,9 +116,8 @@ pop_columns_guess(list) ::= pop_columns(cols).\n pop_columns(one) ::= column_name(c).\n pop_columns(many) ::= pop_columns(cols) T_COMMA column_name(c).\n \n-stattypes_for_opt ::= .\n-stattypes_for_opt ::= K_STATTYPES K_FOR.\n-\n+stattypes_of_opt ::= .\n+stattypes_of_opt ::= K_STATTYPE|K_STATTYPES K_OF.\n \n /* XXX Temporary generators? */\n command(creategen) ::= K_CREATE K_GENERATOR\n", "new_path": "src/grammar.y", "old_path": "src/grammar.y" }, { "change_type": "MODIFY", "diff": "@@ -201,7 +201,7 @@ class BQLSemantics(object):\n \n def p_pop_clause_empty(self): return None\n def p_pop_clause_column(self, col, st): return ast.PopModelVars([col], st)\n- def p_pop_clause_model(self, cols, st): return ast.PopModelVars(cols, st)\n+ def p_pop_clause_stattype(self, cols, st): return ast.PopModelVars(cols, st)\n def p_pop_clause_ignore(self, cols): return ast.PopIgnoreVars(cols)\n def p_pop_clause_guess(self, cols): return ast.PopGuessVars(cols)\n \n", "new_path": "src/parse.py", "old_path": "src/parse.py" }, { "change_type": "MODIFY", "diff": "@@ -107,7 +107,7 @@ def test_trivial_population():\n # XXX if (not) exists\n bdb.execute('''\n create population p for t (\n- guess stattypes for (*);\n+ guess stattypes of (*);\n age numerical\n )\n ''')\n@@ -120,7 +120,7 @@ def test_population_invalid_numerical():\n with pytest.raises(BQLError):\n bdb.execute('''\n create population p for t (\n- guess stattypes for (*);\n+ guess stattypes of (*);\n gender numerical\n )\n ''')\n@@ -131,7 +131,7 @@ def test_population_invalid_numerical_alterpop_addvar():\n bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)\n bdb.execute('''\n create population p for t (\n- guess stattypes for (*);\n+ guess stattypes of (*);\n ignore gender\n )\n ''')\n@@ -145,7 +145,7 @@ def test_population_invalid_numerical_alterpop_stattype():\n bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)\n bdb.execute('''\n create population p for t (\n- guess stattypes for (*);\n+ guess stattypes of (*);\n gender nominal\n )\n ''')\n@@ -263,8 +263,8 @@ def test_conditional_probability(seed):\n bdb.execute('''\n create population p1 for t1 (\n ignore id, label;\n- model age as numerical;\n- model weight as numerical\n+ set stattype of age to numerical;\n+ set stattype of weight to numerical\n )\n ''')\n bdb.execute('''\n", "new_path": "tests/test_bql.py", "old_path": "tests/test_bql.py" }, { "change_type": "MODIFY", "diff": "@@ -72,8 +72,9 @@ def cgpm_smoke_bdb():\n \n bdb.execute('''\n CREATE POPULATION p FOR t WITH SCHEMA(\n- MODEL output, input AS NUMERICAL;\n- MODEL cat AS CATEGORICAL\n+ output NUMERICAL;\n+ input NUMERICAL;\n+ cat NOMINAL;\n )\n ''')\n \n@@ -121,7 +122,7 @@ def test_cgpm_no_empty_categories():\n bayesdb_nullify(bdb, 'f', '')\n bdb.execute('''\n CREATE POPULATION q FOR f WITH SCHEMA (\n- MODEL a, b, c AS NOMINAL\n+ SET STATTYPES OF a, b, c TO NOMINAL\n );\n ''')\n bdb.execute('CREATE GENERATOR h IF NOT EXISTS FOR q USING cgpm;')\n@@ -360,12 +361,12 @@ def test_cgpm_kepler():\n with cgpm_dummy_satellites_bdb() as bdb:\n bdb.execute('''\n CREATE POPULATION satellites FOR satellites_ucs WITH SCHEMA(\n- MODEL apogee AS NUMERICAL;\n- MODEL class_of_orbit AS CATEGORICAL;\n- MODEL country_of_operator AS CATEGORICAL;\n- MODEL launch_mass AS NUMERICAL;\n- MODEL perigee AS NUMERICAL;\n- MODEL period AS NUMERICAL\n+ apogee NUMERICAL;\n+ class_of_orbit CATEGORICAL;\n+ country_of_operator CATEGORICAL;\n+ launch_mass NUMERICAL;\n+ perigee NUMERICAL;\n+ period NUMERICAL\n )\n ''')\n bdb.execute('''\n@@ -490,28 +491,28 @@ def test_unknown_stattype():\n # No such statistical type at the moment.\n bdb.execute('''\n CREATE POPULATION satellites FOR satellites_ucs WITH SCHEMA(\n- MODEL apogee, perigee, launch_mass, period\n- AS NUMERICAL;\n+ SET STATTYPES OF apogee, perigee, launch_mass, period\n+ TO NUMERICAL;\n \n- MODEL class_of_orbit, country_of_operator\n- AS NOMINAL;\n+ SET STATTYPE OF class_of_orbit, country_of_operator\n+ TO NOMINAL;\n \n- MODEL relaunches\n- AS QUAGGA\n+ SET STATTYPE OF relaunches\n+ TO QUAGGA\n )\n ''')\n # Invent the statistical type.\n bdb.sql_execute('INSERT INTO bayesdb_stattype VALUES (?)', ('quagga',))\n bdb.execute('''\n CREATE POPULATION satellites FOR satellites_ucs WITH SCHEMA(\n- MODEL apogee, perigee, launch_mass, period\n- AS NUMERICAL;\n+ SET STATTYPES OF apogee, perigee, launch_mass, period\n+ TO NUMERICAL;\n \n- MODEL class_of_orbit, country_of_operator\n- AS NOMINAL;\n+ SET STATTYPES OF class_of_orbit, country_of_operator\n+ TO NOMINAL;\n \n- MODEL relaunches\n- AS QUAGGA\n+ SET STATTYPES OF relaunches\n+ TO QUAGGA\n )\n ''')\n registry = {\n@@ -547,12 +548,12 @@ def test_bad_analyze_vars():\n with cgpm_dummy_satellites_bdb() as bdb:\n bdb.execute('''\n CREATE POPULATION satellites FOR satellites_ucs WITH SCHEMA(\n- MODEL apogee AS NUMERICAL;\n- MODEL class_of_orbit AS CATEGORICAL;\n- MODEL country_of_operator AS CATEGORICAL;\n- MODEL launch_mass AS NUMERICAL;\n- MODEL perigee AS NUMERICAL;\n- MODEL period AS NUMERICAL\n+ SET STATTYPE OF apogee TO NUMERICAL;\n+ SET STATTYPE OF class_of_orbit TO CATEGORICAL;\n+ SET STATTYPE OF country_of_operator TO CATEGORICAL;\n+ SET STATTYPE OF launch_mass TO NUMERICAL;\n+ SET STATTYPE OF perigee TO NUMERICAL;\n+ SET STATTYPE OF period TO NUMERICAL\n )\n ''')\n registry = {\n@@ -587,15 +588,15 @@ def test_output_stattypes():\n with pytest.raises(BQLError):\n bdb.execute('''\n CREATE POPULATION satellites FOR satellites_ucs WITH SCHEMA(\n- MODEL apogee, launch_mass AS NUMERICAL;\n- MODEL country_of_operator AS CATEGORICAL\n+ SET STATTYPES OF apogee, launch_mass TO NUMERICAL;\n+ SET STATTYPES OF country_of_operator TO CATEGORICAL\n )\n ''')\n bdb.execute('''\n CREATE POPULATION satellites FOR satellites_ucs WITH SCHEMA(\n IGNORE class_of_orbit, perigee, period;\n- MODEL apogee, launch_mass AS NUMERICAL;\n- MODEL country_of_operator AS CATEGORICAL\n+ SET STATTYPES OF apogee, launch_mass TO NUMERICAL;\n+ SET STATTYPES OF country_of_operator TO CATEGORICAL\n )\n ''')\n registry = {\n@@ -706,7 +707,7 @@ def test_initialize_with_all_nulls():\n # Fail when a is numerical and modeled by crosscat.\n bdb.execute('''\n CREATE POPULATION p FOR t WITH SCHEMA(\n- MODEL a, b, c AS NUMERICAL\n+ SET STATTYPES OF a, b, c TO NUMERICAL\n )\n ''')\n bdb.execute('''\n@@ -720,8 +721,8 @@ def test_initialize_with_all_nulls():\n # Fail when a is nominal and modeled by crosscat.\n bdb.execute('''\n CREATE POPULATION p2 FOR t WITH SCHEMA(\n- MODEL a AS NOMINAL;\n- MODEL b, c AS NUMERICAL\n+ SET STATTYPES OF a TO NOMINAL;\n+ SET STATTYPES OF b, c TO NUMERICAL\n )\n ''')\n bdb.execute('CREATE GENERATOR m2 FOR p2;')\n@@ -732,7 +733,7 @@ def test_initialize_with_all_nulls():\n bdb.execute('''\n CREATE POPULATION p3 FOR t WITH SCHEMA(\n IGNORE a;\n- MODEL b, c AS NUMERICAL\n+ SET STATTYPES OF b, c TO NUMERICAL\n )\n ''')\n bdb.execute('CREATE GENERATOR m3 FOR p3;')\n@@ -834,12 +835,12 @@ def test_predictive_relevance():\n bayesdb_register_backend(bdb, CGPM_Backend(cgpm_registry=dict()))\n bdb.execute('''\n CREATE POPULATION satellites FOR satellites_ucs WITH SCHEMA (\n- MODEL apogee AS NUMERICAL;\n- MODEL class_of_orbit AS CATEGORICAL;\n- MODEL country_of_operator AS CATEGORICAL;\n- MODEL launch_mass AS NUMERICAL;\n- MODEL perigee AS NUMERICAL;\n- MODEL period AS NUMERICAL\n+ apogee NUMERICAL;\n+ class_of_orbit CATEGORICAL;\n+ country_of_operator CATEGORICAL;\n+ launch_mass NUMERICAL;\n+ perigee NUMERICAL;\n+ period NUMERICAL\n )\n ''')\n bdb.execute('CREATE GENERATOR m FOR satellites;')\n@@ -1003,7 +1004,7 @@ def test_add_drop_models():\n bdb, CGPM_Backend(dict(), multiprocess=0))\n bdb.execute('''\n CREATE POPULATION p FOR satellites_ucs WITH SCHEMA(\n- GUESS STATTYPES FOR (*);\n+ GUESS STATTYPES OF (*);\n )\n ''')\n bdb.execute('CREATE GENERATOR m FOR p (SUBSAMPLE 10);')\n@@ -1139,12 +1140,12 @@ def test_using_modelnos():\n with cgpm_dummy_satellites_bdb() as bdb:\n bdb.execute('''\n CREATE POPULATION satellites FOR satellites_ucs WITH SCHEMA(\n- MODEL apogee AS NUMERICAL;\n- MODEL class_of_orbit AS CATEGORICAL;\n- MODEL country_of_operator AS CATEGORICAL;\n- MODEL launch_mass AS NUMERICAL;\n- MODEL perigee AS NUMERICAL;\n- MODEL period AS NUMERICAL\n+ SET STATTYPE OF apogee TO NUMERICAL;\n+ SET STATTYPE OF class_of_orbit TO CATEGORICAL;\n+ SET STATTYPE OF country_of_operator TO CATEGORICAL;\n+ SET STATTYPE OF launch_mass TO NUMERICAL;\n+ SET STATTYPE OF perigee TO NUMERICAL;\n+ SET STATTYPE OF period TO NUMERICAL\n )\n ''')\n bayesdb_register_backend(bdb, CGPM_Backend(dict(), multiprocess=0))\n", "new_path": "tests/test_cgpm.py", "old_path": "tests/test_cgpm.py" }, { "change_type": "MODIFY", "diff": "@@ -32,12 +32,12 @@ def cgpm_dummy_satellites_pop_bdb():\n with cgpm_dummy_satellites_bdb() as bdb:\n bdb.execute('''\n create population satellites for satellites_ucs with schema(\n- model apogee as numerical;\n- model class_of_orbit as categorical;\n- model country_of_operator as categorical;\n- model launch_mass as numerical;\n- model perigee as numerical;\n- model period as numerical\n+ apogee numerical;\n+ class_of_orbit categorical;\n+ country_of_operator categorical;\n+ launch_mass numerical;\n+ perigee numerical;\n+ period numerical\n )\n ''')\n backend = CGPM_Backend(dict(), multiprocess=0)\n", "new_path": "tests/test_cgpm_alter.py", "old_path": "tests/test_cgpm_alter.py" }, { "change_type": "MODIFY", "diff": "@@ -34,12 +34,11 @@ def test_analysis_subproblems_basic():\n with cgpm_dummy_satellites_bdb() as bdb:\n bdb.execute('''\n CREATE POPULATION satellites FOR satellites_ucs WITH SCHEMA(\n- MODEL apogee AS NUMERICAL;\n- MODEL class_of_orbit AS CATEGORICAL;\n- MODEL country_of_operator AS CATEGORICAL;\n- MODEL launch_mass AS NUMERICAL;\n- MODEL perigee AS NUMERICAL;\n- MODEL period AS NUMERICAL\n+ SET STATTYPE OF class_of_orbit TO CATEGORICAL;\n+ SET STATTYPE OF country_of_operator TO CATEGORICAL;\n+ SET STATTYPE OF launch_mass TO NUMERICAL;\n+ SET STATTYPE OF perigee TO NUMERICAL;\n+ SET STATTYPE OF period TO NUMERICAL\n )\n ''')\n bayesdb_register_backend(bdb, CGPM_Backend(dict(), multiprocess=0))\n", "new_path": "tests/test_cgpm_analysis.py", "old_path": "tests/test_cgpm_analysis.py" }, { "change_type": "MODIFY", "diff": "@@ -41,8 +41,8 @@ def smoke_bdb():\n \n bdb.execute('''\n CREATE POPULATION p FOR t WITH SCHEMA (\n- MODEL a, b, c, d AS NUMERICAL;\n- MODEL e AS NOMINAL\n+ SET STATTYPES OF a, b, c, d TO NUMERICAL;\n+ SET STATTYPES OF e TO NOMINAL\n )\n ''')\n \n", "new_path": "tests/test_cmi.py", "old_path": "tests/test_cmi.py" }, { "change_type": "MODIFY", "diff": "@@ -27,7 +27,8 @@ def test_conditional_probability_simple_inferences():\n bdb.sql_execute('insert into t values (?, ?)', row)\n bdb.execute('''\n create population p for t (\n- model foo, bar as categorical\n+ foo categorical;\n+ bar categorical;\n )\n ''')\n bdb.execute('create generator p_cc for p using cgpm;')\n", "new_path": "tests/test_condprob.py", "old_path": "tests/test_condprob.py" }, { "change_type": "MODIFY", "diff": "@@ -539,7 +539,7 @@ def test_bayesdb_population_add_variable():\n bdb.sql_execute('create table t (a real, b ignore, c real)')\n bdb.execute('''\n create population p for t with schema(\n- model a, c as numerical;\n+ set stattypes of a, c to numerical;\n b ignore;\n );\n ''')\n", "new_path": "tests/test_core.py", "old_path": "tests/test_core.py" }, { "change_type": "MODIFY", "diff": "@@ -42,7 +42,7 @@ def bdb():\n bdb.sql_execute('INSERT INTO t (a, b) VALUES (1,0)')\n \n # Create the population and generator on the existing rows.\n- bdb.execute('CREATE POPULATION p FOR t (MODEL a, b AS NOMINAL)')\n+ bdb.execute('CREATE POPULATION p FOR t (SET STATTYPES OF a, b TO NOMINAL)')\n bdb.execute('CREATE GENERATOR m FOR p;')\n bdb.execute('INITIALIZE 1 MODELS FOR m;')\n bdb.execute('ANALYZE m FOR 1000 ITERATION (OPTIMIZED);')\n", "new_path": "tests/test_infer_hypothetical.py", "old_path": "tests/test_infer_hypothetical.py" }, { "change_type": "MODIFY", "diff": "@@ -47,7 +47,10 @@ def test_nig_normal_latent_numbering():\n for x in xrange(100):\n bdb.sql_execute('insert into t(x, y) values(?, ?)', (x, x*x - 100))\n bdb.execute('''\n- create population p for t(id ignore; model x,y as numerical)\n+ create population p for t(\n+ id ignore;\n+ set stattypes of x,y to numerical;\n+ )\n ''')\n assert core.bayesdb_has_population(bdb, 'p')\n pid = core.bayesdb_get_population(bdb, 'p')\n", "new_path": "tests/test_nig_normal.py", "old_path": "tests/test_nig_normal.py" }, { "change_type": "MODIFY", "diff": "@@ -738,13 +738,23 @@ def test_trivial_precedence_error():\n def test_trivial_commands():\n assert parse_bql_string('''\n create population satellites for satellites_ucs (\n- MODEL country_of_operator, orbit_type AS categorical;\n- MODEL launch_mass AS numerical;\n- MODEL perigee AS numerical;\n- MODEL apogee, period AS numerical\n+ guess(*);\n )\n ''') == \\\n [ast.CreatePop(False, 'satellites', 'satellites_ucs', [\n+ ast.PopGuessVars('*'),\n+ ])]\n+ assert parse_bql_string('''\n+ create population satellites for satellites_ucs (\n+ guess stattypes of launch_site, \"contracto=r\";\n+ set stattype of country_of_operator, orbit_type to categorical;\n+ set stattype of launch_mass to numerical;\n+ set stattype of perigee to numerical;\n+ set stattype of apogee, period to numerical;\n+ )\n+ ''') == \\\n+ [ast.CreatePop(False, 'satellites', 'satellites_ucs', [\n+ ast.PopGuessVars(['launch_site', 'contracto=r']),\n ast.PopModelVars(\n ['country_of_operator', 'orbit_type'], 'categorical'),\n ast.PopModelVars(['launch_mass'], 'numerical'),\n@@ -753,8 +763,8 @@ def test_trivial_commands():\n ])]\n assert parse_bql_string('''\n create population satellites for satellites_ucs (\n- MODEL country_of_operator, orbit_type AS categorical;;\n- MODEL apogee, period AS numerical;;\n+ set stattype of country_of_operator, orbit_type to categorical;;\n+ set stattype of apogee, period to numerical;;\n )\n ''') == \\\n [ast.CreatePop(False, 'satellites', 'satellites_ucs', [\n@@ -762,6 +772,24 @@ def test_trivial_commands():\n ['country_of_operator', 'orbit_type'], 'categorical'),\n ast.PopModelVars(['apogee', 'period'], 'numerical'),\n ])]\n+ assert parse_bql_string('''\n+ create population satellites for satellites_ucs (\n+ country_of_operator categorical;\n+ orbit_type categorical;\n+ launch_mass numerical;\n+ perigee numerical;\n+ apogee numerical;\n+ period numerical;\n+ )\n+ ''') == \\\n+ [ast.CreatePop(False, 'satellites', 'satellites_ucs', [\n+ ast.PopModelVars(['country_of_operator'], 'categorical'),\n+ ast.PopModelVars(['orbit_type'], 'categorical'),\n+ ast.PopModelVars(['launch_mass'], 'numerical'),\n+ ast.PopModelVars(['perigee'], 'numerical'),\n+ ast.PopModelVars(['apogee'], 'numerical'),\n+ ast.PopModelVars(['period'], 'numerical'),\n+ ])]\n assert parse_bql_string('drop population satellites') == \\\n [ast.DropPop(False, 'satellites')]\n assert parse_bql_string('create generator t_cc for t using cgpm'\n", "new_path": "tests/test_parse.py", "old_path": "tests/test_parse.py" }, { "change_type": "MODIFY", "diff": "@@ -29,12 +29,12 @@ def test_regress_bonanza__ci_integration():\n bdb, CGPM_Backend(dict(), multiprocess=0))\n bdb.execute('''\n CREATE POPULATION satellites FOR satellites_ucs WITH SCHEMA(\n- MODEL apogee AS NUMERICAL;\n- MODEL class_of_orbit AS NOMINAL;\n- MODEL country_of_operator AS NOMINAL;\n- MODEL launch_mass AS NUMERICAL;\n- MODEL perigee AS NUMERICAL;\n- MODEL period AS NUMERICAL\n+ apogee NUMERICAL;\n+ class_of_orbit NOMINAL;\n+ country_of_operator NOMINAL;\n+ launch_mass NUMERICAL;\n+ perigee NUMERICAL;\n+ period NUMERICAL;\n )\n ''')\n bdb.execute('''\n", "new_path": "tests/test_regress.py", "old_path": "tests/test_regress.py" }, { "change_type": "MODIFY", "diff": "@@ -87,7 +87,7 @@ def test_simulate_given_rowid():\n bdb.execute('''\n CREATE POPULATION t_p FOR t WITH SCHEMA {\n IGNORE x;\n- MODEL y AS NUMERICAL;\n+ y NUMERICAL;\n }\n ''')\n bdb.execute('''\n@@ -153,7 +153,7 @@ def test_simulate_given_rowid_multivariate():\n 'INSERT INTO t (x, y, z, w) VALUES (?, ?, ?, ?)', row)\n bdb.execute('''\n CREATE POPULATION t_p FOR t WITH SCHEMA {\n- MODEL y, z, w AS NUMERICAL;\n+ SET STATTYPES OF y, z, w TO NUMERICAL;\n IGNORE x\n }\n ''')\n@@ -219,7 +219,7 @@ def test_simulate_given_rowid_unincorporated():\n 'INSERT INTO t (x, y, z, w) VALUES (?, ?, ?, ?)', row)\n bdb.execute('''\n CREATE POPULATION t_p FOR t WITH SCHEMA {\n- MODEL y, z, w AS NUMERICAL;\n+ SET STATTYPES OF y, z, w TO NUMERICAL;\n IGNORE x\n }\n ''')\n", "new_path": "tests/test_simulate.py", "old_path": "tests/test_simulate.py" } ]
bb6072b6cb5ea2fa239357708c47e7d62cc8bae0
sys-bio/tellurium
null
null
Drop phrasedml and sbml2matlab requirement. phrasedml and sbml2matlab don't exist for 3.10 yet, so we need to adjust code and fail gracefully when trying to import them.
[ { "change_type": "MODIFY", "diff": "@@ -147,7 +147,7 @@ DEFAULTS = [\n 'pylab/inline/width': 6,\n 'pylab/inline/height': 4,\n 'pylab/inline/bbox_inches': True,\n- 'startup/run_lines': 'import antimony; import sbml2matlab; import rrplugins; import numpy; import scipy; import matplotlib; import roadrunner; import tellurium as te',\n+ 'startup/run_lines': 'import antimony; import rrplugins; import numpy; import scipy; import matplotlib; import roadrunner; import tellurium as te',\n 'startup/use_run_file': False,\n 'startup/run_file': '',\n 'greedy_completer': False,\n", "new_path": "spyder_mod/Spyder 5.1.5/site-packages/spyder/config/main.py", "old_path": "spyder_mod/Spyder 5.1.5/site-packages/spyder/config/main.py" }, { "change_type": "MODIFY", "diff": "@@ -4,7 +4,6 @@ Class for working with omex files.\n from __future__ import print_function, division, absolute_import\n import os\n import re\n-import shutil\n import tempfile\n import json\n import getpass\n@@ -16,8 +15,10 @@ try:\n except ImportError:\n import tecombine as libcombine\n \n-\n-from .convert_phrasedml import phrasedmlImporter\n+try:\n+ from .convert_phrasedml import phrasedmlImporter\n+except:\n+ pass\n from .convert_antimony import antimonyConverter\n \n class OmexFormatDetector:\n@@ -146,8 +147,11 @@ class Omex(object):\n \n :return:\n \"\"\"\n- import phrasedml\n- phrasedml.clearReferencedSBML()\n+ try:\n+ import phrasedml\n+ phrasedml.clearReferencedSBML()\n+ except:\n+ pass\n \n workingDir = tempfile.mkdtemp(suffix=\"_sedml\")\n self.writeFiles(workingDir)\n@@ -163,8 +167,11 @@ class Omex(object):\n \"\"\" Export Omex instance as combine archive.\n \n :param outfile: A path to the output file\"\"\"\n- import phrasedml\n- phrasedml.clearReferencedSBML()\n+ try:\n+ import phrasedml\n+ phrasedml.clearReferencedSBML()\n+ except:\n+ pass\n \n archive = libcombine.CombineArchive()\n description = libcombine.OmexDescription()\n@@ -232,9 +239,7 @@ class inlineOmexImporter:\n if not os.path.isfile(path):\n raise IOError('No such file: {}'.format(path))\n \n- d = None\n if not os.access(os.getcwd(), os.W_OK):\n- d = os.getcwd()\n os.chdir(tempfile.gettempdir())\n \n omex = libcombine.CombineArchive()\n@@ -242,8 +247,6 @@ class inlineOmexImporter:\n raise IOError('Could not read COMBINE archive.')\n importer = inlineOmexImporter(omex)\n \n- # if d is not None:\n- # os.chdir(d)\n return importer\n \n def __init__(self, omex):\n@@ -445,13 +448,14 @@ class inlineOmexImporter:\n for entry in self.sedml_entries:\n sedml_str = self.omex.extractEntryToString(entry.getLocation()).replace('BIOMD0000000012,xml',\n 'BIOMD0000000012.xml')\n+ phrasedml_output = \"\"\n try:\n phrasedml_output = phrasedmlImporter.fromContent(\n sedml_str,\n self.makeSBMLResourceMap(self.fixSep(os.path.dirname(entry.getLocation())))\n ).toPhrasedml().rstrip().replace('compartment', 'compartment_')\n except Exception as e:\n- errmsg = 'Could not read embedded SED-ML file {}.'.format(entry.getLocation())\n+ errmsg = 'Could not read embedded SED-ML file or could not convert to phraSED-ML: {}.\\n{}'.format(entry.getLocation(), e.what())\n try:\n try:\n import libsedml\n", "new_path": "tellurium/teconverters/convert_omex.py", "old_path": "tellurium/teconverters/convert_omex.py" }, { "change_type": "MODIFY", "diff": "@@ -2,114 +2,119 @@ from __future__ import print_function, division, absolute_import\n \n import os\n import re\n-import phrasedml\n try:\n import tesedml as libsedml\n except ImportError:\n import libsedml\n \n+#Only load this class if phrasedml exists\n+try:\n+ import phrasedml\n \n-class phrasedmlImporter(object):\n-\n- def __init__(self, sbml_map={}):\n- \"\"\" Constructor. \"\"\"\n- self.sedml_str = None\n- self.sedml_path = None\n- self.sbml_map = sbml_map\n-\n-\n- @classmethod\n- def fromContent(cls, sedml_str, sbml_map={}):\n-\n- # FIXME: bad hack for https://github.com/fbergmann/libSEDML/issues/47\n- # test for JWS quirks\n- if 'xmlns=\"http://sed-ml.org/sed-ml/level1/version3\"' in sedml_str:\n- # import xml.etree.ElementTree as ElementTree\n- # root = ElementTree.fromstring(sedml_str)\n- # for p in root.findall('{http://sed-ml.org/sed-ml/level1/version3}plot2D'):\n- # if not 'logX' in p.attrib or not 'logY' in p.attrib:\n- # logX = False\n- # logY = False\n- # for l in p.findall('{http://sed-ml.org/sed-ml/level1/version3}listOfCurves'):\n- # for c in l.findall('{http://sed-ml.org/sed-ml/level1/version3}curve'):\n- # if 'logX' in c.attrib and c.attrib['logX'].lower() == 'true':\n- # logX = True\n- # if 'logY' in c.attrib and c.attrib['logY'].lower() == 'true':\n- # logY = True\n- # p.set('logX', logX)\n- # p.set('logY', logY)\n- # sedml_str = (ElementTree.tostring(root, encoding='utf8', method='xml')).decode('utf8')\n- while True:\n- p = sedml_str.find('plot2D')\n- if p < 0:\n- break\n- b = sedml_str.find('>', p)\n- if b < 0:\n- break\n- l = sedml_str.find('logX', p)\n- if l < 0 or b < l:\n- sedml_str = sedml_str[:p] + 'plot2D logX=\"false\" logY=\"false\" ' + sedml_str[p+len('plot2D'):]\n+ class phrasedmlImporter(object):\n+ \n+ def __init__(self, sbml_map={}):\n+ \"\"\" Constructor. \"\"\"\n+ self.sedml_str = None\n+ self.sedml_path = None\n+ self.sbml_map = sbml_map\n+ \n+ \n+ @classmethod\n+ def fromContent(cls, sedml_str, sbml_map={}):\n+ \n+ # FIXME: bad hack for https://github.com/fbergmann/libSEDML/issues/47\n+ # test for JWS quirks\n+ if 'xmlns=\"http://sed-ml.org/sed-ml/level1/version3\"' in sedml_str:\n+ # import xml.etree.ElementTree as ElementTree\n+ # root = ElementTree.fromstring(sedml_str)\n+ # for p in root.findall('{http://sed-ml.org/sed-ml/level1/version3}plot2D'):\n+ # if not 'logX' in p.attrib or not 'logY' in p.attrib:\n+ # logX = False\n+ # logY = False\n+ # for l in p.findall('{http://sed-ml.org/sed-ml/level1/version3}listOfCurves'):\n+ # for c in l.findall('{http://sed-ml.org/sed-ml/level1/version3}curve'):\n+ # if 'logX' in c.attrib and c.attrib['logX'].lower() == 'true':\n+ # logX = True\n+ # if 'logY' in c.attrib and c.attrib['logY'].lower() == 'true':\n+ # logY = True\n+ # p.set('logX', logX)\n+ # p.set('logY', logY)\n+ # sedml_str = (ElementTree.tostring(root, encoding='utf8', method='xml')).decode('utf8')\n+ while True:\n+ p = sedml_str.find('plot2D')\n+ if p < 0:\n+ break\n+ b = sedml_str.find('>', p)\n+ if b < 0:\n+ break\n+ l = sedml_str.find('logX', p)\n+ if l < 0 or b < l:\n+ sedml_str = sedml_str[:p] + 'plot2D logX=\"false\" logY=\"false\" ' + sedml_str[p+len('plot2D'):]\n+ else:\n+ break\n+ print(sedml_str)\n+ \n+ \n+ importer = phrasedmlImporter(sbml_map)\n+ importer.sedml_str = sedml_str\n+ # test for errors\n+ result = importer.toPhrasedml()\n+ if result is None:\n+ # get errors from libsedml\n+ doc = libsedml.SedReader().readSedMLFromString(sedml_str)\n+ if doc.getNumErrors():\n+ max_len = 100\n+ message = doc.getError(doc.getNumErrors()-1).getMessage()\n+ message = message[:max_len] + '...' if len(message) > max_len else message\n+ raise RuntimeError('Errors reading SED-ML: {}'.format(message))\n else:\n- break\n- print(sedml_str)\n-\n-\n- importer = phrasedmlImporter(sbml_map)\n- importer.sedml_str = sedml_str\n- # test for errors\n- result = importer.toPhrasedml()\n- if result is None:\n- # get errors from libsedml\n- doc = libsedml.SedReader().readSedMLFromString(sedml_str)\n- if doc.getNumErrors():\n- max_len = 100\n- message = doc.getError(doc.getNumErrors()-1).getMessage()\n- message = message[:max_len] + '...' if len(message) > max_len else message\n- raise RuntimeError('Errors reading SED-ML: {}'.format(message))\n- else:\n- raise RuntimeError('Unable to read SED-ML.')\n- return importer\n+ raise RuntimeError('Unable to read SED-ML.')\n+ return importer\n+ \n+ \n+ def isInRootDir(self, file):\n+ d = os.path.split(file)[0]\n+ return d == '' or d == '.'\n+ \n+ def removeFileExt(self, filename):\n+ return os.path.splitext(filename)[0]\n+ \n+ def formatResource(self, filename):\n+ \"\"\" Normalizes and also strips xml extension.\"\"\"\n+ return self.removeFileExt(os.path.normpath(filename))\n+ \n+ def fixModelRefs(self, phrasedml_str):\n+ ''' Changes all references of type myModel.xml to myModel.'''\n+ model_ref = re.compile(r'^.*\\s*model\\s*\"([^\"]*)\"\\s*$')\n+ out_str = ''\n+ for line in phrasedml_str.splitlines():\n+ match = model_ref.match(line)\n+ if match:\n+ filename = match.group(1)\n+ if self.isInRootDir(filename):\n+ line = line.replace(filename,self.formatResource(filename))\n+ out_str += line+'\\n'\n+ return out_str\n+ \n+ def toPhrasedml(self):\n+ # assign sbml resources\n+ # print('toPhrasedml sbml resources:')\n+ phrasedml.clearReferencedSBML()\n+ for sbml_resource in self.sbml_map:\n+ # print(' {} -> {}'.format(sbml_resource, self.sbml_map[sbml_resource][:30]))\n+ phrasedml.setReferencedSBML(sbml_resource, self.sbml_map[sbml_resource])\n+ # convert to phrasedml\n+ if self.sedml_str:\n+ result = phrasedml.convertString(self.sedml_str)\n+ if result is None:\n+ raise RuntimeError(phrasedml.getLastError())\n+ return self.fixModelRefs(phrasedml.getLastPhraSEDML())\n+ elif self.sedml_path:\n+ result = phrasedml.convertFile(self.sedml_str)\n+ if result is None:\n+ raise RuntimeError(phrasedml.getLastError())\n+ return self.fixModelRefs(phrasedml.getLastPhraSEDML())\n \n-\n- def isInRootDir(self, file):\n- d = os.path.split(file)[0]\n- return d == '' or d == '.'\n-\n- def removeFileExt(self, filename):\n- return os.path.splitext(filename)[0]\n-\n- def formatResource(self, filename):\n- \"\"\" Normalizes and also strips xml extension.\"\"\"\n- return self.removeFileExt(os.path.normpath(filename))\n-\n- def fixModelRefs(self, phrasedml_str):\n- ''' Changes all references of type myModel.xml to myModel.'''\n- model_ref = re.compile(r'^.*\\s*model\\s*\"([^\"]*)\"\\s*$')\n- out_str = ''\n- for line in phrasedml_str.splitlines():\n- match = model_ref.match(line)\n- if match:\n- filename = match.group(1)\n- if self.isInRootDir(filename):\n- line = line.replace(filename,self.formatResource(filename))\n- out_str += line+'\\n'\n- return out_str\n-\n- def toPhrasedml(self):\n- # assign sbml resources\n- # print('toPhrasedml sbml resources:')\n- phrasedml.clearReferencedSBML()\n- for sbml_resource in self.sbml_map:\n- # print(' {} -> {}'.format(sbml_resource, self.sbml_map[sbml_resource][:30]))\n- phrasedml.setReferencedSBML(sbml_resource, self.sbml_map[sbml_resource])\n- # convert to phrasedml\n- if self.sedml_str:\n- result = phrasedml.convertString(self.sedml_str)\n- if result is None:\n- raise RuntimeError(phrasedml.getLastError())\n- return self.fixModelRefs(phrasedml.getLastPhraSEDML())\n- elif self.sedml_path:\n- result = phrasedml.convertFile(self.sedml_str)\n- if result is None:\n- raise RuntimeError(phrasedml.getLastError())\n- return self.fixModelRefs(phrasedml.getLastPhraSEDML())\n+except:\n+ pass\n\\ No newline at end of file\n", "new_path": "tellurium/teconverters/convert_phrasedml.py", "old_path": "tellurium/teconverters/convert_phrasedml.py" } ]
f692e8da1a75d3f3f938a21727514204725e52b8
cocotb/cocotb
null
null
Use context managers to handle profiling This avoids needing to remember to disable the profiler by every return statement, and reads more cleanly than using a `try` / `finally`
[ { "change_type": "MODIFY", "diff": "@@ -71,6 +71,17 @@ from cocotb.triggers import (Trigger, GPITrigger, Timer, ReadOnly, PythonTrigger\n from cocotb.log import SimLog\n from cocotb.result import (TestComplete, TestError, ReturnValue, raise_error,\n create_error, ExternalException)\n+from cocotb.utils import nullcontext\n+\n+\n+class profiling_context(object):\n+ \"\"\" Context manager that profiles its contents \"\"\"\n+ def __enter__(self):\n+ _profile.enable()\n+\n+ def __exit__(self, *excinfo):\n+ _profile.disable()\n+\n \n class external_state(object):\n INIT = 0\n@@ -280,26 +291,26 @@ class Scheduler(object):\n if _profiling:\n ps = pstats.Stats(_profile).sort_stats('cumulative')\n ps.dump_stats(\"test_profile.pstat\")\n- _profile.enable()\n-\n- self._mode = Scheduler._MODE_NORMAL\n- if trigger is not None:\n- trigger.unprime()\n+ ctx = profiling_context()\n+ else:\n+ ctx = nullcontext()\n \n- # Issue previous test result, if there is one\n- if self._test_result is not None:\n- if _debug:\n- self.log.debug(\"Issue test result to regression object\")\n- cocotb.regression.handle_result(self._test_result)\n- self._test_result = None\n- if self._entrypoint is not None:\n- test = self._entrypoint\n- self._entrypoint = None\n- self.schedule(test)\n- self.advance()\n+ with ctx:\n+ self._mode = Scheduler._MODE_NORMAL\n+ if trigger is not None:\n+ trigger.unprime()\n \n- if _profiling:\n- _profile.disable()\n+ # Issue previous test result, if there is one\n+ if self._test_result is not None:\n+ if _debug:\n+ self.log.debug(\"Issue test result to regression object\")\n+ cocotb.regression.handle_result(self._test_result)\n+ self._test_result = None\n+ if self._entrypoint is not None:\n+ test = self._entrypoint\n+ self._entrypoint = None\n+ self.schedule(test)\n+ self.advance()\n \n def react(self, trigger, depth=0):\n \"\"\"React called when a trigger fires.\n@@ -308,142 +319,136 @@ class Scheduler(object):\n schedule them.\n \"\"\"\n if _profiling and not depth:\n- _profile.enable()\n-\n- # When a trigger fires it is unprimed internally\n- if _debug:\n- self.log.debug(\"Trigger fired: %s\" % str(trigger))\n- # trigger.unprime()\n+ ctx = profiling_context()\n+ else:\n+ ctx = nullcontext()\n \n- if self._mode == Scheduler._MODE_TERM:\n+ with ctx:\n+ # When a trigger fires it is unprimed internally\n if _debug:\n- self.log.debug(\"Ignoring trigger %s since we're terminating\" %\n- str(trigger))\n- return\n+ self.log.debug(\"Trigger fired: %s\" % str(trigger))\n+ # trigger.unprime()\n \n- if trigger is self._readonly:\n- self._mode = Scheduler._MODE_READONLY\n- # Only GPI triggers affect the simulator scheduling mode\n- elif isinstance(trigger, GPITrigger):\n- self._mode = Scheduler._MODE_NORMAL\n+ if self._mode == Scheduler._MODE_TERM:\n+ if _debug:\n+ self.log.debug(\"Ignoring trigger %s since we're terminating\" %\n+ str(trigger))\n+ return\n \n- # We're the only source of ReadWrite triggers which are only used for\n- # playing back any cached signal updates\n- if trigger is self._readwrite:\n+ if trigger is self._readonly:\n+ self._mode = Scheduler._MODE_READONLY\n+ # Only GPI triggers affect the simulator scheduling mode\n+ elif isinstance(trigger, GPITrigger):\n+ self._mode = Scheduler._MODE_NORMAL\n \n- if _debug:\n- self.log.debug(\"Writing cached signal updates\")\n+ # We're the only source of ReadWrite triggers which are only used for\n+ # playing back any cached signal updates\n+ if trigger is self._readwrite:\n+\n+ if _debug:\n+ self.log.debug(\"Writing cached signal updates\")\n \n- while self._writes:\n- handle, value = self._writes.popitem()\n- handle.setimmediatevalue(value)\n+ while self._writes:\n+ handle, value = self._writes.popitem()\n+ handle.setimmediatevalue(value)\n \n- self._readwrite.unprime()\n+ self._readwrite.unprime()\n \n- if _profiling:\n- _profile.disable()\n- return\n+ return\n \n- # Similarly if we've scheduled our next_timestep on way to readwrite\n- if trigger is self._next_timestep:\n+ # Similarly if we've scheduled our next_timestep on way to readwrite\n+ if trigger is self._next_timestep:\n \n- if not self._writes:\n- self.log.error(\n- \"Moved to next timestep without any pending writes!\")\n- else:\n- self.log.debug(\n- \"Priming ReadWrite trigger so we can playback writes\")\n- self._readwrite.prime(self.react)\n+ if not self._writes:\n+ self.log.error(\n+ \"Moved to next timestep without any pending writes!\")\n+ else:\n+ self.log.debug(\n+ \"Priming ReadWrite trigger so we can playback writes\")\n+ self._readwrite.prime(self.react)\n \n- if _profiling:\n- _profile.disable()\n- return\n+ return\n \n- if trigger not in self._trigger2coros:\n-\n- # GPI triggers should only be ever pending if there is an\n- # associated coroutine waiting on that trigger, otherwise it would\n- # have been unprimed already\n- if isinstance(trigger, GPITrigger):\n- self.log.critical(\n- \"No coroutines waiting on trigger that fired: %s\" %\n- str(trigger))\n-\n- trigger.log.info(\"I'm the culprit\")\n- # For Python triggers this isn't actually an error - we might do\n- # event.set() without knowing whether any coroutines are actually\n- # waiting on this event, for example\n- elif _debug:\n- self.log.debug(\n- \"No coroutines waiting on trigger that fired: %s\" %\n- str(trigger))\n-\n- if _profiling:\n- _profile.disable()\n- return\n+ if trigger not in self._trigger2coros:\n \n- # Scheduled coroutines may append to our waiting list so the first\n- # thing to do is pop all entries waiting on this trigger.\n- scheduling = self._trigger2coros.pop(trigger)\n+ # GPI triggers should only be ever pending if there is an\n+ # associated coroutine waiting on that trigger, otherwise it would\n+ # have been unprimed already\n+ if isinstance(trigger, GPITrigger):\n+ self.log.critical(\n+ \"No coroutines waiting on trigger that fired: %s\" %\n+ str(trigger))\n+\n+ trigger.log.info(\"I'm the culprit\")\n+ # For Python triggers this isn't actually an error - we might do\n+ # event.set() without knowing whether any coroutines are actually\n+ # waiting on this event, for example\n+ elif _debug:\n+ self.log.debug(\n+ \"No coroutines waiting on trigger that fired: %s\" %\n+ str(trigger))\n+\n+ return\n+\n+ # Scheduled coroutines may append to our waiting list so the first\n+ # thing to do is pop all entries waiting on this trigger.\n+ scheduling = self._trigger2coros.pop(trigger)\n \n- if _debug:\n- debugstr = \"\\n\\t\".join([coro.__name__ for coro in scheduling])\n- if len(scheduling):\n- debugstr = \"\\n\\t\" + debugstr\n- self.log.debug(\"%d pending coroutines for event %s%s\" %\n- (len(scheduling), str(trigger), debugstr))\n-\n- # This trigger isn't needed any more\n- trigger.unprime()\n-\n- # If the coroutine was waiting on multiple triggers we may be able\n- # to unprime the other triggers that didn't fire\n- scheduling_set = set(scheduling)\n- other_triggers = {\n- t\n- for coro in scheduling\n- for t in self._coro2triggers[coro]\n- } - {trigger}\n-\n- for pending in other_triggers:\n- # every coroutine waiting on this trigger is already being woken\n- if scheduling_set.issuperset(self._trigger2coros[pending]):\n- if pending.primed:\n- pending.unprime()\n- del self._trigger2coros[pending]\n-\n- for coro in scheduling:\n- if _debug:\n- self.log.debug(\"Scheduling coroutine %s\" % (coro.__name__))\n- self.schedule(coro, trigger=trigger)\n if _debug:\n- self.log.debug(\"Scheduled coroutine %s\" % (coro.__name__))\n+ debugstr = \"\\n\\t\".join([coro.__name__ for coro in scheduling])\n+ if len(scheduling):\n+ debugstr = \"\\n\\t\" + debugstr\n+ self.log.debug(\"%d pending coroutines for event %s%s\" %\n+ (len(scheduling), str(trigger), debugstr))\n+\n+ # This trigger isn't needed any more\n+ trigger.unprime()\n \n- if not depth:\n- # Schedule may have queued up some events so we'll burn through those\n- while self._pending_events:\n+ # If the coroutine was waiting on multiple triggers we may be able\n+ # to unprime the other triggers that didn't fire\n+ scheduling_set = set(scheduling)\n+ other_triggers = {\n+ t\n+ for coro in scheduling\n+ for t in self._coro2triggers[coro]\n+ } - {trigger}\n+\n+ for pending in other_triggers:\n+ # every coroutine waiting on this trigger is already being woken\n+ if scheduling_set.issuperset(self._trigger2coros[pending]):\n+ if pending.primed:\n+ pending.unprime()\n+ del self._trigger2coros[pending]\n+\n+ for coro in scheduling:\n if _debug:\n- self.log.debug(\"Scheduling pending event %s\" %\n- (str(self._pending_events[0])))\n- self._pending_events.pop(0).set()\n+ self.log.debug(\"Scheduling coroutine %s\" % (coro.__name__))\n+ self.schedule(coro, trigger=trigger)\n+ if _debug:\n+ self.log.debug(\"Scheduled coroutine %s\" % (coro.__name__))\n \n- while self._pending_triggers:\n- if _debug:\n- self.log.debug(\"Scheduling pending trigger %s\" %\n- (str(self._pending_triggers[0])))\n- self.react(self._pending_triggers.pop(0), depth=depth + 1)\n+ if not depth:\n+ # Schedule may have queued up some events so we'll burn through those\n+ while self._pending_events:\n+ if _debug:\n+ self.log.debug(\"Scheduling pending event %s\" %\n+ (str(self._pending_events[0])))\n+ self._pending_events.pop(0).set()\n \n- # We only advance for GPI triggers\n- if not depth and isinstance(trigger, GPITrigger):\n- self.advance()\n+ while self._pending_triggers:\n+ if _debug:\n+ self.log.debug(\"Scheduling pending trigger %s\" %\n+ (str(self._pending_triggers[0])))\n+ self.react(self._pending_triggers.pop(0), depth=depth + 1)\n \n- if _debug:\n- self.log.debug(\"All coroutines scheduled, handing control back\"\n- \" to simulator\")\n+ # We only advance for GPI triggers\n+ if not depth and isinstance(trigger, GPITrigger):\n+ self.advance()\n+\n+ if _debug:\n+ self.log.debug(\"All coroutines scheduled, handing control back\"\n+ \" to simulator\")\n \n- if _profiling:\n- _profile.disable()\n- return\n \n def unschedule(self, coro):\n \"\"\"Unschedule a coroutine. Unprime any pending triggers\"\"\"\n", "new_path": "cocotb/scheduler.py", "old_path": "cocotb/scheduler.py" }, { "change_type": "MODIFY", "diff": "@@ -491,6 +491,26 @@ class ParametrizedSingleton(type):\n return self\n \n \n+# backport of Python 3.7's contextlib.nullcontext\n+class nullcontext(object):\n+ \"\"\"Context manager that does no additional processing.\n+ Used as a stand-in for a normal context manager, when a particular\n+ block of code is only sometimes used with a normal context manager:\n+ cm = optional_cm if condition else nullcontext()\n+ with cm:\n+ # Perform operation, using optional_cm if condition is True\n+ \"\"\"\n+\n+ def __init__(self, enter_result=None):\n+ self.enter_result = enter_result\n+\n+ def __enter__(self):\n+ return self.enter_result\n+\n+ def __exit__(self, *excinfo):\n+ pass\n+\n+\n if __name__ == \"__main__\":\n import random\n a = \"\"\n", "new_path": "cocotb/utils.py", "old_path": "cocotb/utils.py" } ]
15dc5aa37dfc240a400fd01584eb711a4802ae06
appscale/gts
null
null
Create separate set of constants for operations This is to differentiate between transaction table values and entity operations.
[ { "change_type": "MODIFY", "diff": "@@ -19,6 +19,7 @@ from cassandra.query import ValueSequence\n from .. import dbconstants\n from .. import helper_functions\n from ..dbconstants import AppScaleDBConnectionError\n+from ..dbconstants import Operations\n from ..dbconstants import TxnActions\n from ..dbinterface import AppDBInterface\n from ..unpackaged import APPSCALE_LIB_DIR\n@@ -97,29 +98,29 @@ def deletions_for_entity(entity, composite_indices=()):\n for entry in asc_rows:\n deletions.append({'table': dbconstants.ASC_PROPERTY_TABLE,\n 'key': entry[0],\n- 'operation': TxnActions.DELETE})\n+ 'operation': Operations.DELETE})\n \n dsc_rows = get_index_kv_from_tuple(\n [(prefix, entity)], reverse=True)\n for entry in dsc_rows:\n deletions.append({'table': dbconstants.DSC_PROPERTY_TABLE,\n 'key': entry[0],\n- 'operation': TxnActions.DELETE})\n+ 'operation': Operations.DELETE})\n \n for key in get_composite_indexes_rows([entity], composite_indices):\n deletions.append({'table': dbconstants.COMPOSITE_TABLE,\n 'key': key,\n- 'operation': TxnActions.DELETE})\n+ 'operation': Operations.DELETE})\n \n entity_key = get_entity_key(prefix, entity.key().path())\n deletions.append({'table': dbconstants.APP_ENTITY_TABLE,\n 'key': entity_key,\n- 'operation': TxnActions.DELETE})\n+ 'operation': Operations.DELETE})\n \n kind_key = get_kind_key(prefix, entity.key().path())\n deletions.append({'table': dbconstants.APP_KIND_TABLE,\n 'key': kind_key,\n- 'operation': TxnActions.DELETE})\n+ 'operation': Operations.DELETE})\n \n return deletions\n \n@@ -163,14 +164,14 @@ def index_deletions(old_entity, new_entity, composite_indices=()):\n [app_id, namespace, kind, prop.name(), value, entity_key])\n deletions.append({'table': dbconstants.ASC_PROPERTY_TABLE,\n 'key': key,\n- 'operation': TxnActions.DELETE})\n+ 'operation': Operations.DELETE})\n \n reverse_key = dbconstants.KEY_DELIMITER.join(\n [app_id, namespace, kind, prop.name(),\n helper_functions.reverse_lex(value), entity_key])\n deletions.append({'table': dbconstants.DSC_PROPERTY_TABLE,\n 'key': reverse_key,\n- 'operation': TxnActions.DELETE})\n+ 'operation': Operations.DELETE})\n \n changed_prop_names = set(changed_props.keys())\n for index in composite_indices:\n@@ -187,7 +188,7 @@ def index_deletions(old_entity, new_entity, composite_indices=()):\n for entry in (old_entries - new_entries):\n deletions.append({'table': dbconstants.COMPOSITE_TABLE,\n 'key': entry,\n- 'operation': TxnActions.DELETE})\n+ 'operation': Operations.DELETE})\n \n return deletions\n \n@@ -218,7 +219,7 @@ def mutations_for_entity(entity, txn, current_value=None,\n dbconstants.APP_ENTITY_SCHEMA[1]: str(txn)}\n mutations.append({'table': dbconstants.APP_ENTITY_TABLE,\n 'key': entity_key,\n- 'operation': TxnActions.PUT,\n+ 'operation': Operations.PUT,\n 'values': entity_value})\n \n reference_value = {'reference': entity_key}\n@@ -226,27 +227,27 @@ def mutations_for_entity(entity, txn, current_value=None,\n kind_key = get_kind_key(prefix, entity.key().path())\n mutations.append({'table': dbconstants.APP_KIND_TABLE,\n 'key': kind_key,\n- 'operation': TxnActions.PUT,\n+ 'operation': Operations.PUT,\n 'values': reference_value})\n \n asc_rows = get_index_kv_from_tuple([(prefix, entity)])\n for entry in asc_rows:\n mutations.append({'table': dbconstants.ASC_PROPERTY_TABLE,\n 'key': entry[0],\n- 'operation': TxnActions.PUT,\n+ 'operation': Operations.PUT,\n 'values': reference_value})\n \n dsc_rows = get_index_kv_from_tuple([(prefix, entity)], reverse=True)\n for entry in dsc_rows:\n mutations.append({'table': dbconstants.DSC_PROPERTY_TABLE,\n 'key': entry[0],\n- 'operation': TxnActions.PUT,\n+ 'operation': Operations.PUT,\n 'values': reference_value})\n \n for key in get_composite_indexes_rows([entity], composite_indices):\n mutations.append({'table': dbconstants.COMPOSITE_TABLE,\n 'key': key,\n- 'operation': TxnActions.PUT,\n+ 'operation': Operations.PUT,\n 'values': reference_value})\n \n return mutations\n@@ -479,7 +480,7 @@ class DatastoreProxy(AppDBInterface):\n prepared_statements = {'insert': {}, 'delete': {}}\n for mutation in mutations:\n table = mutation['table']\n- if mutation['operation'] == TxnActions.PUT:\n+ if mutation['operation'] == Operations.PUT:\n if table not in prepared_statements['insert']:\n prepared_statements['insert'][table] = self.prepare_insert(table)\n values = mutation['values']\n@@ -488,7 +489,7 @@ class DatastoreProxy(AppDBInterface):\n prepared_statements['insert'][table],\n (bytearray(mutation['key']), column, bytearray(values[column]))\n )\n- elif mutation['operation'] == TxnActions.DELETE:\n+ elif mutation['operation'] == Operations.DELETE:\n if table not in prepared_statements['delete']:\n prepared_statements['delete'][table] = self.prepare_delete(table)\n batch.add(\n@@ -513,7 +514,7 @@ class DatastoreProxy(AppDBInterface):\n statements_and_params = []\n for mutation in mutations:\n table = mutation['table']\n- if mutation['operation'] == TxnActions.PUT:\n+ if mutation['operation'] == Operations.PUT:\n if table not in prepared_statements['insert']:\n prepared_statements['insert'][table] = self.prepare_insert(table)\n values = mutation['values']\n@@ -522,7 +523,7 @@ class DatastoreProxy(AppDBInterface):\n bytearray(values[column]))\n statements_and_params.append(\n (prepared_statements['insert'][table], params))\n- elif mutation['operation'] == TxnActions.DELETE:\n+ elif mutation['operation'] == Operations.DELETE:\n if table not in prepared_statements['delete']:\n prepared_statements['delete'][table] = self.prepare_delete(table)\n params = (bytearray(mutation['key']),)\n", "new_path": "AppDB/appscale/datastore/cassandra_env/cassandra_interface.py", "old_path": "AppDB/appscale/datastore/cassandra_env/cassandra_interface.py" }, { "change_type": "MODIFY", "diff": "@@ -162,6 +162,12 @@ class TxnActions(object):\n ENQUEUE_TASK = '3'\n \n \n+class Operations(object):\n+ \"\"\" Possible datastore operations on entities. \"\"\"\n+ PUT = 'put'\n+ DELETE = 'delete'\n+\n+\n ###############################\n # Generic Datastore Exceptions\n ###############################\n", "new_path": "AppDB/appscale/datastore/dbconstants.py", "old_path": "AppDB/appscale/datastore/dbconstants.py" }, { "change_type": "MODIFY", "diff": "@@ -899,9 +899,9 @@ class TestDatastoreServer(unittest.TestCase):\n mutations = mutations_for_entity(entity, txn, new_entity)\n self.assertEqual(len(mutations), 6)\n self.assertEqual(mutations[0]['table'], dbconstants.ASC_PROPERTY_TABLE)\n- self.assertEqual(mutations[0]['operation'], dbconstants.TxnActions.DELETE)\n+ self.assertEqual(mutations[0]['operation'], dbconstants.Operations.DELETE)\n self.assertEqual(mutations[1]['table'], dbconstants.DSC_PROPERTY_TABLE)\n- self.assertEqual(mutations[1]['operation'], dbconstants.TxnActions.DELETE)\n+ self.assertEqual(mutations[1]['operation'], dbconstants.Operations.DELETE)\n self.assertEqual(mutations[2]['table'], dbconstants.APP_ENTITY_TABLE)\n self.assertEqual(mutations[3]['table'], dbconstants.APP_KIND_TABLE)\n self.assertEqual(mutations[4]['table'], dbconstants.ASC_PROPERTY_TABLE)\n@@ -937,11 +937,11 @@ class TestDatastoreServer(unittest.TestCase):\n (composite_index,))\n self.assertEqual(len(mutations), 10)\n self.assertEqual(mutations[0]['table'], dbconstants.ASC_PROPERTY_TABLE)\n- self.assertEqual(mutations[0]['operation'], dbconstants.TxnActions.DELETE)\n+ self.assertEqual(mutations[0]['operation'], dbconstants.Operations.DELETE)\n self.assertEqual(mutations[1]['table'], dbconstants.DSC_PROPERTY_TABLE)\n- self.assertEqual(mutations[1]['operation'], dbconstants.TxnActions.DELETE)\n+ self.assertEqual(mutations[1]['operation'], dbconstants.Operations.DELETE)\n self.assertEqual(mutations[2]['table'], dbconstants.COMPOSITE_TABLE)\n- self.assertEqual(mutations[2]['operation'], dbconstants.TxnActions.DELETE)\n+ self.assertEqual(mutations[2]['operation'], dbconstants.Operations.DELETE)\n self.assertEqual(mutations[3]['table'], dbconstants.APP_ENTITY_TABLE)\n self.assertEqual(mutations[4]['table'], dbconstants.APP_KIND_TABLE)\n self.assertEqual(mutations[5]['table'], dbconstants.ASC_PROPERTY_TABLE)\n", "new_path": "AppDB/test/unit/test_datastore_server.py", "old_path": "AppDB/test/unit/test_datastore_server.py" } ]
474705cbde801202d286c616cc4ea956409df678
appscale/gts
null
null
Remove manual reconnect logic Recreating the KazooClient object was causing problems for other functions using the old (closed) object. It was possible for a request to hang indefinitely.
[ { "change_type": "MODIFY", "diff": "@@ -141,17 +141,9 @@ class ZKTransaction:\n (e.g., releasing locks, keeping track of transaction metadata).\n \"\"\"\n \n- # The number of times we should retry ZooKeeper operations, by default.\n- DEFAULT_NUM_RETRIES = 0\n-\n # How long to wait before retrying an operation.\n ZK_RETRY_TIME = .5\n \n- # The number of seconds to wait before we consider a zk call a failure.\n- DEFAULT_ZK_TIMEOUT = 3\n-\n- # When we have this many failures trying to connect to ZK, abort execution.\n- MAX_CONNECTION_FAILURES = 10 \n # The maximum number of seconds to wait before retrying to connect.\n MAX_RECONNECT_DELAY = 30\n \n@@ -176,18 +168,11 @@ class ZKTransaction:\n self.logger.info('Starting {}'.format(class_name))\n \n # Connection instance variables.\n- self.needs_connection = True\n- self.failure_count = 0\n self.host = host\n self.handle = kazoo.client.KazooClient(hosts=host,\n connection_retry=reconnect_policy)\n self.run_with_retry = self.handle.retry\n- try:\n- self.handle.start()\n- self.needs_connection = False\n- except kazoo.exceptions.KazooException as kazoo_exception:\n- self.logger.exception(kazoo_exception)\n- self.reestablish_connection()\n+ self.handle.start()\n \n self.__counter_cache = {}\n \n@@ -246,9 +231,6 @@ class ZKTransaction:\n Raises:\n ZKTransactionException: If it could not increment the counter.\n \"\"\"\n- if self.needs_connection or not self.handle.connected:\n- self.reestablish_connection()\n-\n if path not in self.__counter_cache:\n self.__counter_cache[path] = InspectableCounter(self.handle, path)\n \n@@ -278,9 +260,6 @@ class ZKTransaction:\n Raises:\n ZKInternalException: If there was an error trying to fetch the node.\n \"\"\"\n- if self.needs_connection or not self.handle.connected:\n- self.reestablish_connection()\n-\n try:\n return self.run_with_retry(self.handle.get, path)\n except kazoo.exceptions.NoNodeError:\n@@ -312,25 +291,13 @@ class ZKTransaction:\n value: A str representing the value that should be associated with the\n updated node.\n \"\"\"\n- if self.needs_connection or not self.handle.connected:\n- self.reestablish_connection()\n-\n self.logger.debug(\n 'Updating node at {}, with new value {}'.format(path, value))\n try:\n self.run_with_retry(self.handle.set, path, str(value))\n except kazoo.exceptions.NoNodeError:\n- try:\n- self.run_with_retry(self.handle.create, path, str(value), ZOO_ACL_OPEN,\n- makepath=True)\n- except kazoo.exceptions.KazooException as kazoo_exception:\n- self.logger.exception(kazoo_exception)\n- self.reestablish_connection()\n- except kazoo.exceptions.ZookeeperError as zoo_exception:\n- self.logger.exception(zoo_exception)\n- except kazoo.exceptions.KazooException as kazoo_exception:\n- self.logger.exception(kazoo_exception)\n- self.reestablish_connection()\n+ self.run_with_retry(self.handle.create, path, str(value), ZOO_ACL_OPEN,\n+ makepath=True)\n \n def delete_recursive(self, path):\n \"\"\" Deletes the ZooKeeper node at path, and any child nodes it may have.\n@@ -338,9 +305,6 @@ class ZKTransaction:\n Args:\n path: A PATH_SEPARATOR-separated str that represents the node to delete.\n \"\"\"\n- if self.needs_connection or not self.handle.connected:\n- self.reestablish_connection()\n-\n try:\n children = self.run_with_retry(self.handle.get_children, path)\n for child in children:\n@@ -348,9 +312,6 @@ class ZKTransaction:\n self.run_with_retry(self.handle.delete, path)\n except kazoo.exceptions.NoNodeError:\n pass\n- except kazoo.exceptions.KazooException as kazoo_exception:\n- self.logger.exception(kazoo_exception)\n- self.reestablish_connection()\n \n def dump_tree(self, path):\n \"\"\" Prints information about the given ZooKeeper node and its children.\n@@ -367,9 +328,6 @@ class ZKTransaction:\n self.dump_tree(PATH_SEPARATOR.join([path, child]))\n except kazoo.exceptions.NoNodeError:\n self.logger.info(\"{0} does not exist.\".format(path))\n- except kazoo.exceptions.KazooException as kazoo_exception:\n- self.logger.exception(kazoo_exception)\n- self.reestablish_connection()\n \n def get_app_root_path(self, app_id):\n \"\"\" Returns the ZooKeeper path that holds all information for the given\n@@ -514,15 +472,11 @@ class ZKTransaction:\n Raises:\n ZKTransactionException: If the sequence node couldn't be created.\n \"\"\"\n- if self.needs_connection or not self.handle.connected:\n- self.reestablish_connection()\n-\n try:\n self.run_with_retry(self.handle.create, path, value=str(value), \n acl=ZOO_ACL_OPEN, ephemeral=False, sequence=False, makepath=True)\n except kazoo.exceptions.KazooException as kazoo_exception:\n self.logger.exception(kazoo_exception)\n- self.reestablish_connection()\n raise ZKTransactionException(\"Couldn't create path {0} with value {1} \" \\\n .format(path, value))\n \n@@ -542,9 +496,6 @@ class ZKTransaction:\n Raises:\n ZKTransactionException: If the sequence node couldn't be created.\n \"\"\"\n- if self.needs_connection or not self.handle.connected:\n- self.reestablish_connection()\n-\n try:\n txn_id_path = self.run_with_retry(self.handle.create, path, \n value=str(value), acl=ZOO_ACL_OPEN, ephemeral=False, sequence=True,\n@@ -564,11 +515,9 @@ class ZKTransaction:\n return txn_id\n except kazoo.exceptions.ZookeeperError as zoo_exception:\n self.logger.exception(zoo_exception)\n- self.reestablish_connection()\n except kazoo.exceptions.KazooException as kazoo_exception:\n self.logger.exception(kazoo_exception)\n- self.reestablish_connection()\n- \n+\n raise ZKTransactionException(\"Unable to create sequence node with path\" \\\n \" {0}, value {1}\".format(path, value))\n \n@@ -586,9 +535,6 @@ class ZKTransaction:\n Returns:\n A long that represents the new transaction ID.\n \"\"\"\n- if self.needs_connection or not self.handle.connected:\n- self.reestablish_connection()\n-\n timestamp = str(time.time())\n \n # First, make the ZK node for the actual transaction id.\n@@ -614,16 +560,12 @@ class ZKTransaction:\n ZKTransactionException: If the transaction is not in progress, or it\n has timed out.\n \"\"\"\n- if self.needs_connection or not self.handle.connected:\n- self.reestablish_connection()\n-\n txpath = self.get_transaction_path(app_id, txid)\n try:\n if self.is_blacklisted(app_id, txid):\n raise ZKTransactionException(\"Transaction {0} timed out.\".format(txid))\n except ZKInternalException as zk_exception:\n self.logger.exception(zk_exception)\n- self.reestablish_connection()\n raise ZKTransactionException(\"Couldn't see if transaction {0} is valid\" \\\n .format(txid))\n \n@@ -635,7 +577,6 @@ class ZKTransaction:\n return True\n except kazoo.exceptions.KazooException as kazoo_exception:\n self.logger.exception(kazoo_exception)\n- self.reestablish_connection()\n raise ZKTransactionException(\n 'Unable to determine status of transaction {}'.format(txid))\n \n@@ -653,9 +594,6 @@ class ZKTransaction:\n ZKInternalException: If there was an error seeing if the transaction was\n blacklisted.\n \"\"\"\n- if self.needs_connection or not self.handle.connected:\n- self.reestablish_connection()\n-\n tx_lock_path = self.get_transaction_lock_list_path(app_id, txid)\n if self.is_blacklisted(app_id, txid):\n raise ZKTransactionException(\n@@ -673,7 +611,6 @@ class ZKTransaction:\n time.sleep(self.ZK_RETRY_TIME)\n return self.is_in_transaction(app_id=app_id, txid=txid,\n retries=retries - 1)\n- self.reestablish_connection()\n raise ZKInternalException(\"Couldn't see if we are in transaction {0}\" \\\n .format(txid))\n \n@@ -710,9 +647,6 @@ class ZKTransaction:\n ZKTransactionException: If we can't acquire the lock for the given\n entity group, because a different transaction already has it.\n \"\"\"\n- if self.needs_connection or not self.handle.connected:\n- self.reestablish_connection()\n-\n txpath = self.get_transaction_path(app_id, txid)\n lockrootpath = self.get_lock_root_path(app_id, entity_key)\n lockpath = None\n@@ -743,7 +677,6 @@ class ZKTransaction:\n \"already another transaction using {0} lock\".format(lockrootpath))\n except kazoo.exceptions.KazooException as kazoo_exception:\n self.logger.exception(kazoo_exception)\n- self.reestablish_connection()\n raise ZKTransactionException(\"Couldn't get a lock at path {0}\" \\\n .format(lockrootpath))\n \n@@ -773,7 +706,6 @@ class ZKTransaction:\n \n except kazoo.exceptions.KazooException as kazoo_exception:\n self.logger.exception(kazoo_exception)\n- self.reestablish_connection()\n raise ZKTransactionException(\"Couldn't create or set a lock at path {0}\" \\\n .format(transaction_lock_path))\n \n@@ -793,9 +725,6 @@ class ZKTransaction:\n ZKInternalException: If we can't tell if the transaction is a XG\n transaction or not.\n \"\"\"\n- if self.needs_connection or not self.handle.connected:\n- self.reestablish_connection()\n-\n try:\n return self.run_with_retry(self.handle.exists, self.get_xg_path(app_id,\n tx_id))\n@@ -804,7 +733,6 @@ class ZKTransaction:\n .format(zk_exception)) \n except kazoo.exceptions.KazooException as kazoo_exception:\n self.logger.exception(kazoo_exception)\n- self.reestablish_connection()\n raise ZKInternalException(\"Couldn't see if transaction {0} was XG \" \\\n \"for app {1}\".format(tx_id, app_id))\n \n@@ -826,9 +754,6 @@ class ZKTransaction:\n Raises:\n ZKTransactionException: If it could not get the lock.\n \"\"\"\n- if self.needs_connection or not self.handle.connected:\n- self.reestablish_connection()\n-\n lockrootpath = self.get_lock_root_path(app_id, entity_key)\n \n try:\n@@ -849,13 +774,11 @@ class ZKTransaction:\n \"different root entity in non-cross-group transaction.\")\n except ZKInternalException as zk_exception:\n self.logger.exception(zk_exception)\n- self.reestablish_connection()\n raise ZKTransactionException(\"An internal exception prevented us from \" \\\n \"getting the lock for app id {0}, txid {1}, entity key {2}\" \\\n .format(app_id, txid, entity_key))\n except kazoo.exceptions.KazooException as kazoo_exception:\n self.logger.exception(kazoo_exception)\n- self.reestablish_connection()\n raise ZKTransactionException(\"Couldn't get lock for app id {0}, txid \" \\\n \"{1}, entity key {2}\".format(app_id, txid, entity_key))\n \n@@ -891,7 +814,6 @@ class ZKTransaction:\n \"is not valid.\".format(txid))\n except kazoo.exceptions.KazooException as kazoo_exception:\n self.logger.exception(kazoo_exception)\n- self.reestablish_connection()\n raise ZKTransactionException(\"Couldn't get updated key list for appid \" \\\n \"{0}, txid {1}\".format(app_id, txid))\n \n@@ -927,9 +849,6 @@ class ZKTransaction:\n ZKTransactionException: If any locks acquired during this transaction\n could not be released.\n \"\"\"\n- if self.needs_connection or not self.handle.connected:\n- self.reestablish_connection()\n-\n self.check_transaction(app_id, txid)\n txpath = self.get_transaction_path(app_id, txid)\n \n@@ -951,13 +870,11 @@ class ZKTransaction:\n return True\n except ZKInternalException as zk_exception:\n self.logger.exception(zk_exception)\n- self.reestablish_connection()\n raise ZKTransactionException(\"Internal exception prevented us from \" \\\n \"releasing lock {0} for app id {1}\".format(transaction_lock_path,\n app_id))\n except kazoo.exceptions.KazooException as kazoo_exception:\n self.logger.exception(kazoo_exception)\n- self.reestablish_connection()\n raise ZKTransactionException(\"Couldn't release lock {0} for appid {1}\" \\\n .format(transaction_lock_path, app_id))\n \n@@ -978,13 +895,11 @@ class ZKTransaction:\n # Although there was a failure doing the async deletes, since we've\n # already released the locks above, we can safely return True here.\n self.logger.exception(zk_exception)\n- self.reestablish_connection()\n return True\n except kazoo.exceptions.KazooException as kazoo_exception:\n # Although there was a failure doing the async deletes, since we've\n # already released the locks above, we can safely return True here.\n self.logger.exception(kazoo_exception)\n- self.reestablish_connection()\n return True\n \n return True\n@@ -1002,9 +917,6 @@ class ZKTransaction:\n ZKInternalException: If we couldn't determine if the transaction was\n blacklisted or not.\n \"\"\"\n- if self.needs_connection or not self.handle.connected:\n- self.reestablish_connection()\n-\n try:\n blacklist_root = self.get_blacklist_root_path(app_id)\n blacklist_txn = PATH_SEPARATOR.join([blacklist_root, \n@@ -1019,7 +931,6 @@ class ZKTransaction:\n time.sleep(self.ZK_RETRY_TIME)\n return self.is_blacklisted(app_id=app_id, txid=txid,\n retries=retries - 1)\n- self.reestablish_connection()\n raise ZKInternalException(\"Couldn't see if appid {0}'s transaction, \" \\\n \"{1}, is blacklisted.\".format(app_id, txid))\n \n@@ -1037,9 +948,6 @@ class ZKTransaction:\n Raises:\n ZKInternalException: If we couldn't get a valid transaction ID.\n \"\"\"\n- if self.needs_connection or not self.handle.connected:\n- self.reestablish_connection()\n-\n # If this is an ongoing transaction give the previous value.\n try:\n if self.is_in_transaction(app_id, target_txid):\n@@ -1058,7 +966,6 @@ class ZKTransaction:\n return long(0)\n except kazoo.exceptions.KazooException as kazoo_exception:\n self.logger.exception(kazoo_exception)\n- self.reestablish_connection()\n raise ZKInternalException(\"Couldn't get valid transaction id for \" \\\n \"app {0}, target txid {1}, entity key {2}\".format(app_id, target_txid,\n entity_key))\n@@ -1082,9 +989,6 @@ class ZKTransaction:\n ZKTransactionException: If the transaction is not valid.\n ZKInternalException: If we were unable to register the key.\n \"\"\"\n- if self.needs_connection or not self.handle.connected:\n- self.reestablish_connection()\n-\n vtxpath = self.get_valid_transaction_path(app_id, entity_key)\n \n try:\n@@ -1106,7 +1010,6 @@ class ZKTransaction:\n current_txid))\n except kazoo.exceptions.KazooException as kazoo_exception:\n self.logger.exception(kazoo_exception)\n- self.reestablish_connection()\n raise ZKInternalException(\"Couldn't register updated key for app \" \\\n \"{0}, current txid {1}, target txid {2}, entity_key {3}\".format(app_id,\n current_txid, target_txid, entity_key))\n@@ -1131,9 +1034,6 @@ class ZKTransaction:\n lockpath = None\n lock_list = []\n \n- if self.needs_connection or not self.handle.connected:\n- self.reestablish_connection()\n-\n txpath = self.get_transaction_path(app_id, txid)\n try:\n lockpath = self.run_with_retry(self.handle.get,\n@@ -1148,7 +1048,6 @@ class ZKTransaction:\n return False\n except kazoo.exceptions.KazooException as kazoo_exception:\n self.logger.exception(kazoo_exception)\n- self.reestablish_connection()\n return False\n \n try:\n@@ -1213,76 +1112,10 @@ class ZKTransaction:\n return False\n except kazoo.exceptions.KazooException as kazoo_exception:\n self.logger.exception(kazoo_exception)\n- self.reestablish_connection()\n return False\n \n return True\n \n- def reestablish_connection(self):\n- \"\"\" Checks the connection and resets it as needed. \"\"\"\n- self.logger.warning('Re-establishing ZooKeeper connection.')\n- try:\n- self.handle.restart()\n- self.needs_connection = False\n- self.failure_count = 0\n- self.logger.info('Restarted ZK connection successfully.')\n- return\n- except kazoo.exceptions.ZookeeperError:\n- self.logger.exception(\n- 'Unable to restart ZK connection. Creating a new one.')\n- except kazoo.exceptions.KazooException:\n- self.logger.exception(\n- 'Unable to restart ZK connection. Creating a new one.')\n- except Exception:\n- self.logger.exception(\n- 'Unable to restart ZK connection. Creating a new one.')\n-\n- try:\n- self.handle.stop()\n- except kazoo.exceptions.ZookeeperError:\n- self.logger.exception('Issue stopping ZK connection.')\n- except kazoo.exceptions.KazooException:\n- self.logger.exception('Issue stopping ZK connection.')\n- except Exception:\n- self.logger.exception('Issue stopping ZK connection.')\n-\n- try:\n- self.handle.close()\n- except kazoo.exceptions.ZookeeperError:\n- self.logger.exception('Issue closing ZK connection.')\n- except kazoo.exceptions.KazooException:\n- self.logger.exception('Issue closing ZK connection.')\n- except Exception:\n- self.logger.exception('Issue closing ZK connection.')\n-\n- self.logger.warning('Creating a new connection to ZK')\n- reconnect_error = False\n-\n- self.handle = kazoo.client.KazooClient(hosts=self.host,\n- max_retries=self.DEFAULT_NUM_RETRIES, timeout=self.DEFAULT_ZK_TIMEOUT)\n-\n- try:\n- self.handle.start()\n- except kazoo.exceptions.KazooException as kazoo_exception:\n- reconnect_error = True\n- self.logger.exception(kazoo_exception)\n- except Exception as exception:\n- reconnect_error = True\n- self.logger.exception(exception)\n-\n- if reconnect_error:\n- self.logger.error('Error re-establishing ZooKeeper connection!')\n- self.needs_connection = True\n- self.failure_count += 1\n- else:\n- self.logger.info('Successfully created a new connection')\n- self.needs_connection = False\n- self.failure_count = 0\n-\n- if self.failure_count > self.MAX_CONNECTION_FAILURES:\n- self.logger.critical('Too many connection errors to ZooKeeper. Aborting')\n- sys.exit(1)\n-\n def gc_runner(self):\n \"\"\" Transaction ID garbage collection (GC) runner.\n \n@@ -1308,10 +1141,8 @@ class ZKTransaction:\n self.logger.exception('GC timeout when fetching {}'.format(APPS_PATH))\n except (ZookeeperError, KazooException):\n self.logger.exception('Error when trying garbage collection')\n- self.reestablish_connection()\n except Exception:\n self.logger.exception('Unknown exception')\n- self.reestablish_connection()\n \n with self.gc_cv:\n self.gc_cv.wait(GC_INTERVAL)\n@@ -1336,11 +1167,9 @@ class ZKTransaction:\n last_time = 0\n except (ZookeeperError, KazooException):\n self.logger.exception('Error when fetching {}'.format(gc_time_path))\n- self.reestablish_connection()\n return False\n except Exception:\n self.logger.exception('Unknown exception')\n- self.reestablish_connection()\n return False\n \n # If the last time plus our GC interval is less than the current time,\n@@ -1366,11 +1195,9 @@ class ZKTransaction:\n pass\n except (ZookeeperError, KazooException):\n self.logger.exception('Error while executing garbage collection')\n- self.reestablish_connection()\n return False\n except Exception:\n self.logger.exception('Unknown exception')\n- self.reestablish_connection()\n return False\n \n return True\n@@ -1396,11 +1223,9 @@ class ZKTransaction:\n except (kazoo.exceptions.SystemZookeeperError, ZookeeperError,\n KazooException, SystemError):\n self.logger.exception('Unable to create {}'.format(path))\n- self.reestablish_connection()\n return False\n except Exception:\n self.logger.exception('Unknown exception')\n- self.reestablish_connection()\n return False\n \n return True\n@@ -1422,11 +1247,9 @@ class ZKTransaction:\n except (kazoo.exceptions.SystemZookeeperError, KazooException,\n SystemError):\n self.logger.exception('Unable to delete lock: {0}'.format(path))\n- self.reestablish_connection()\n return False\n except Exception:\n self.logger.exception('Unknown exception')\n- self.reestablish_connection()\n return False\n return True\n \n@@ -1516,11 +1339,9 @@ class ZKTransaction:\n return\n except (ZookeeperError, KazooException):\n self.logger.exception('Unable to get children of {}'.format(txrootpath))\n- self.reestablish_connection()\n return\n except Exception:\n self.logger.exception('Unknown exception')\n- self.reestablish_connection()\n return\n # Verify the time stamp of each transaction.\n for txid in txlist:\n@@ -1550,12 +1371,10 @@ class ZKTransaction:\n except (ZookeeperError, KazooException):\n self.logger.exception(\n 'Error while running GC for {}:{}'.format(app_id, txid))\n- self.reestablish_connection()\n return\n except Exception:\n self.logger.exception('Unknown exception')\n- self.reestablish_connection()\n- return \n+ return\n self.logger.debug('Lock GC took {} seconds.'.format(time.time() - start))\n \n def get_current_transactions(self, project):\n", "new_path": "AppDB/appscale/datastore/zkappscale/zktransaction.py", "old_path": "AppDB/appscale/datastore/zkappscale/zktransaction.py" } ]
bceb7f05916e43611303c87a34c9062e275711ba
appscale/gts
null
null
Allow DeploymentConfig to take a KazooClient This makes it easier to reuse the KazooClient for other things.
[ { "change_type": "MODIFY", "diff": "@@ -31,6 +31,7 @@ from appscale.common.constants import LOG_FORMAT\n from appscale.common.deployment_config import DeploymentConfig\n from appscale.common.deployment_config import ConfigInaccessible\n from appscale.common.unpackaged import APPSCALE_PYTHON_APPSERVER\n+from kazoo.client import KazooClient\n from StringIO import StringIO\n \n sys.path.append(APPSCALE_PYTHON_APPSERVER)\n@@ -423,7 +424,10 @@ def main():\n args = parser.parse_args()\n \n datastore_path = args.datastore_path\n- deployment_config = DeploymentConfig(appscale_info.get_zk_locations_string())\n+ zk_ips = appscale_info.get_zk_node_ips()\n+ zk_client = KazooClient(hosts=','.join(zk_ips))\n+ zk_client.start()\n+ deployment_config = DeploymentConfig(zk_client)\n setup_env()\n \n http_server = tornado.httpserver.HTTPServer(\n", "new_path": "AppDB/appscale/datastore/scripts/blobstore.py", "old_path": "AppDB/appscale/datastore/scripts/blobstore.py" }, { "change_type": "MODIFY", "diff": "@@ -17,6 +17,7 @@ import urllib\n import urllib2\n from xml.etree import ElementTree\n \n+from kazoo.client import KazooClient\n from M2Crypto import SSL\n from tornado.httpclient import HTTPClient\n from tornado.httpclient import HTTPError\n@@ -847,7 +848,11 @@ def is_config_valid(config):\n ################################\n if __name__ == \"__main__\":\n file_io.set_logging_format()\n- deployment_config = DeploymentConfig(appscale_info.get_zk_locations_string())\n+\n+ zk_ips = appscale_info.get_zk_node_ips()\n+ zk_client = KazooClient(hosts=','.join(zk_ips))\n+ zk_client.start()\n+ deployment_config = DeploymentConfig(zk_client)\n \n INTERNAL_IP = appscale_info.get_private_ip()\n SERVER = SOAPpy.SOAPServer((INTERNAL_IP, constants.APP_MANAGER_PORT))\n", "new_path": "AppManager/app_manager_server.py", "old_path": "AppManager/app_manager_server.py" }, { "change_type": "MODIFY", "diff": "@@ -2,7 +2,6 @@ import json\n import logging\n import time\n \n-from kazoo.client import KazooClient\n from kazoo.client import KazooException\n from kazoo.client import KazooState\n from kazoo.client import NoNodeError\n@@ -34,19 +33,18 @@ class DeploymentConfig(object):\n # The ZooKeeper node where configuration is stored.\n CONFIG_ROOT = '/appscale/config'\n \n- def __init__(self, hosts):\n+ def __init__(self, zk_client):\n \"\"\" Creates new DeploymentConfig object.\n \n Args:\n- hosts: A list of ZooKeeper hosts.\n+ zk_client: A KazooClient.\n \"\"\"\n self.logger = logging.getLogger(self.__class__.__name__)\n self.update_lock = Lock()\n self.state = ConfigStates.LOADING\n self.config = {}\n- self.conn = KazooClient(hosts=hosts, read_only=True)\n+ self.conn = zk_client\n self.conn.add_listener(self._conn_listener)\n- self.conn.start()\n self.conn.ensure_path(self.CONFIG_ROOT)\n self.conn.ChildrenWatch(self.CONFIG_ROOT, func=self._update_config)\n \n", "new_path": "common/appscale/common/deployment_config.py", "old_path": "common/appscale/common/deployment_config.py" }, { "change_type": "MODIFY", "diff": "@@ -7,6 +7,8 @@ import os\n import pkgutil\n import sys\n \n+from kazoo.client import KazooClient\n+\n from appscale.common import appscale_info\n from appscale.common.deployment_config import DeploymentConfig\n from appscale.common.deployment_config import InvalidConfig\n@@ -28,7 +30,9 @@ if __name__ == \"__main__\":\n args = parser.parse_args()\n zk_locations = args.zk_locations if args.zk_locations else \\\n appscale_info.get_zk_locations_string()\n- deployment_config = DeploymentConfig(zk_locations)\n+ zk_client = KazooClient(hosts=zk_locations)\n+ zk_client.start()\n+ deployment_config = DeploymentConfig(zk_client)\n cassandra_config = deployment_config.get_config('cassandra')\n if 'num_tokens' not in cassandra_config:\n raise InvalidConfig('num_tokens not specified in deployment config.')\n", "new_path": "scripts/setup_cassandra_config_files.py", "old_path": "scripts/setup_cassandra_config_files.py" } ]
7ed0b36c3d0414f87a82efdbdf615dec7c97b71e
appscale/gts
null
null
Simplify datastore error handling If an error code is defined, the response body does not need to be specified. This prevents encoding issues in cases when a response message has required fields.
[ { "change_type": "MODIFY", "diff": "@@ -3327,30 +3327,27 @@ class DatastoreDistributed():\n Returns:\n An encoded protocol buffer commit response.\n \"\"\"\n- commitres_pb = datastore_pb.CommitResponse()\n transaction_pb = datastore_pb.Transaction(http_request_data)\n txn_id = transaction_pb.handle()\n \n try:\n self.apply_txn_changes(app_id, txn_id)\n except dbconstants.TxTimeoutException as timeout:\n- return commitres_pb.Encode(), datastore_pb.Error.TIMEOUT, str(timeout)\n+ return '', datastore_pb.Error.TIMEOUT, str(timeout)\n except dbconstants.AppScaleDBConnectionError:\n self.logger.exception('DB connection error during commit')\n- return (commitres_pb.Encode(), datastore_pb.Error.INTERNAL_ERROR,\n+ return ('', datastore_pb.Error.INTERNAL_ERROR,\n 'Datastore connection error on Commit request.')\n except dbconstants.ConcurrentModificationException as error:\n- return (commitres_pb.Encode(), datastore_pb.Error.CONCURRENT_TRANSACTION,\n- str(error))\n+ return '', datastore_pb.Error.CONCURRENT_TRANSACTION, str(error)\n except dbconstants.TooManyGroupsException as error:\n- return (commitres_pb.Encode(), datastore_pb.Error.BAD_REQUEST,\n- str(error))\n+ return '', datastore_pb.Error.BAD_REQUEST, str(error)\n except entity_lock.LockTimeout as error:\n- return (commitres_pb.Encode(), datastore_pb.Error.TIMEOUT,\n- str(error))\n+ return '', datastore_pb.Error.TIMEOUT, str(error)\n \n self.zookeeper.remove_tx_node(app_id, txn_id)\n- return commitres_pb.Encode(), 0, \"\"\n+ commitres_pb = datastore_pb.CommitResponse()\n+ return commitres_pb.Encode(), 0, ''\n \n def rollback_transaction(self, app_id, http_request_data):\n \"\"\" Handles the rollback phase of a transaction.\n@@ -3366,13 +3363,11 @@ class DatastoreDistributed():\n 'Doing a rollback on transaction {} for {}'.format(txn.handle(), app_id))\n try:\n self.zookeeper.notify_failed_transaction(app_id, txn.handle())\n- return (api_base_pb.VoidProto().Encode(), 0, \"\")\n+ return api_base_pb.VoidProto().Encode(), 0, ''\n except zktransaction.ZKTransactionException as zkte:\n self.logger.exception('Unable to rollback {} for {}'.\n format(txn, app_id))\n- return (api_base_pb.VoidProto().Encode(),\n- datastore_pb.Error.PERMISSION_DENIED, \n- \"Unable to rollback for this transaction: {0}\".format(str(zkte)))\n+ return '', datastore_pb.Error.PERMISSION_DENIED, str(zkte)\n \n def _zk_state_listener(self, state):\n \"\"\" Handles changes to the ZooKeeper connection state.\n", "new_path": "AppDB/appscale/datastore/datastore_distributed.py", "old_path": "AppDB/appscale/datastore/datastore_distributed.py" }, { "change_type": "MODIFY", "diff": "@@ -191,16 +191,9 @@ class MainHandler(tornado.web.RequestHandler):\n apirequest.ParseFromString(http_request_data)\n apiresponse = remote_api_pb.Response()\n response = None\n- errcode = 0\n- errdetail = \"\"\n- apperror_pb = None\n- if not apirequest.has_method(): \n- errcode = datastore_pb.Error.BAD_REQUEST\n- errdetail = \"Method was not set in request\"\n+ if not apirequest.has_method():\n apirequest.set_method(\"NOT_FOUND\")\n if not apirequest.has_request():\n- errcode = datastore_pb.Error.BAD_REQUEST\n- errdetail = \"Request missing in call\"\n apirequest.set_method(\"NOT_FOUND\")\n apirequest.clear_request()\n method = apirequest.method()\n@@ -295,26 +288,23 @@ class MainHandler(tornado.web.RequestHandler):\n if begin_transaction_req_pb.has_allow_multiple_eg():\n multiple_eg = bool(begin_transaction_req_pb.allow_multiple_eg())\n \n- handle = None\n- transaction_pb = datastore_pb.Transaction()\n-\n if READ_ONLY:\n logger.warning('Unable to begin transaction in read-only mode: {}'.\n format(begin_transaction_req_pb))\n- return (transaction_pb.Encode(), datastore_pb.Error.CAPABILITY_DISABLED,\n- 'Datastore is in read-only mode.')\n+ return ('', datastore_pb.Error.CAPABILITY_DISABLED,\n+ 'Datastore is in read-only mode.')\n \n try:\n handle = datastore_access.setup_transaction(app_id, multiple_eg)\n except (zktransaction.ZKInternalException,\n dbconstants.AppScaleDBConnectionError) as error:\n logger.exception('Unable to begin transaction')\n- return (transaction_pb.Encode(), datastore_pb.Error.INTERNAL_ERROR,\n- str(error))\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n \n+ transaction_pb = datastore_pb.Transaction()\n transaction_pb.set_app(app_id)\n transaction_pb.set_handle(handle)\n- return (transaction_pb.Encode(), 0, \"\")\n+ return transaction_pb.Encode(), 0, ''\n \n def commit_transaction_request(self, app_id, http_request_data):\n \"\"\" Handles the commit phase of a transaction.\n@@ -328,12 +318,11 @@ class MainHandler(tornado.web.RequestHandler):\n global datastore_access\n \n if READ_ONLY:\n- commitres_pb = datastore_pb.CommitResponse()\n transaction_pb = datastore_pb.Transaction(http_request_data)\n logger.warning('Unable to commit in read-only mode: {}'.\n format(transaction_pb))\n- return (commitres_pb.Encode(), datastore_pb.Error.CAPABILITY_DISABLED,\n- 'Datastore is in read-only mode.')\n+ return ('', datastore_pb.Error.CAPABILITY_DISABLED,\n+ 'Datastore is in read-only mode.')\n \n return datastore_access.commit_transaction(app_id, http_request_data)\n \n@@ -347,26 +336,22 @@ class MainHandler(tornado.web.RequestHandler):\n An encoded protocol buffer void response.\n \"\"\"\n global datastore_access\n- response = api_base_pb.VoidProto()\n \n if READ_ONLY:\n logger.warning('Unable to rollback in read-only mode: {}'.\n format(http_request_data))\n- return (response.Encode(), datastore_pb.Error.CAPABILITY_DISABLED,\n- 'Datastore is in read-only mode.')\n+ return ('', datastore_pb.Error.CAPABILITY_DISABLED,\n+ 'Datastore is in read-only mode.')\n \n try:\n return datastore_access.rollback_transaction(app_id, http_request_data)\n- except zktransaction.ZKInternalException:\n+ except zktransaction.ZKInternalException as error:\n logger.exception('ZKInternalException during {} for {}'.\n format(http_request_data, app_id))\n- return (response.Encode(), datastore_pb.Error.INTERNAL_ERROR,\n- \"Internal error with ZooKeeper connection.\")\n- except Exception:\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n+ except Exception as error:\n logger.exception('Unable to rollback transaction')\n- return(response.Encode(),\n- datastore_pb.Error.INTERNAL_ERROR,\n- \"Unable to rollback for this transaction\")\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n \n def run_query(self, http_request_data):\n \"\"\" High level function for running queries.\n@@ -381,31 +366,21 @@ class MainHandler(tornado.web.RequestHandler):\n clone_qr_pb = UnprocessedQueryResult()\n try:\n datastore_access._dynamic_run_query(query, clone_qr_pb)\n- except zktransaction.ZKBadRequest, zkie:\n+ except zktransaction.ZKBadRequest as error:\n logger.exception('Illegal arguments in transaction during {}'.\n format(query))\n- return (clone_qr_pb.Encode(),\n- datastore_pb.Error.BAD_REQUEST, \n- \"Illegal arguments for transaction. {0}\".format(str(zkie)))\n- except zktransaction.ZKInternalException:\n+ return '', datastore_pb.Error.BAD_REQUEST, str(error)\n+ except zktransaction.ZKInternalException as error:\n logger.exception('ZKInternalException during {}'.format(query))\n- clone_qr_pb.set_more_results(False)\n- return (clone_qr_pb.Encode(), \n- datastore_pb.Error.INTERNAL_ERROR, \n- \"Internal error with ZooKeeper connection.\")\n- except zktransaction.ZKTransactionException:\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n+ except zktransaction.ZKTransactionException as error:\n logger.exception('Concurrent transaction during {}'.format(query))\n- clone_qr_pb.set_more_results(False)\n- return (clone_qr_pb.Encode(), \n- datastore_pb.Error.CONCURRENT_TRANSACTION, \n- \"Concurrent transaction exception on put.\")\n- except dbconstants.AppScaleDBConnectionError:\n+ return '', datastore_pb.Error.CONCURRENT_TRANSACTION, str(error)\n+ except dbconstants.AppScaleDBConnectionError as error:\n logger.exception('DB connection error during query')\n- clone_qr_pb.set_more_results(False)\n- return (clone_qr_pb.Encode(),\n- datastore_pb.Error.INTERNAL_ERROR,\n- \"Datastore connection error on run_query request.\")\n- return clone_qr_pb.Encode(), 0, \"\"\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n+\n+ return clone_qr_pb.Encode(), 0, ''\n \n def create_index_request(self, app_id, http_request_data):\n \"\"\" High level function for creating composite indexes.\n@@ -424,19 +399,17 @@ class MainHandler(tornado.web.RequestHandler):\n if READ_ONLY:\n logger.warning('Unable to create in read-only mode: {}'.\n format(request))\n- return (response.Encode(), datastore_pb.Error.CAPABILITY_DISABLED,\n- 'Datastore is in read-only mode.')\n+ return ('', datastore_pb.Error.CAPABILITY_DISABLED,\n+ 'Datastore is in read-only mode.')\n \n try:\n index_id = datastore_access.create_composite_index(app_id, request)\n response.set_value(index_id)\n- except dbconstants.AppScaleDBConnectionError:\n+ except dbconstants.AppScaleDBConnectionError as error:\n logger.exception('DB connection error during index creation')\n- response.set_value(0)\n- return (response.Encode(),\n- datastore_pb.Error.INTERNAL_ERROR,\n- \"Datastore connection error on create index request.\")\n- return response.Encode(), 0, \"\"\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n+\n+ return response.Encode(), 0, ''\n \n def update_index_request(self, app_id, http_request_data):\n \"\"\" High level function for updating a composite index.\n@@ -455,8 +428,8 @@ class MainHandler(tornado.web.RequestHandler):\n if READ_ONLY:\n logger.warning('Unable to update in read-only mode: {}'.\n format(index))\n- return (response.Encode(), datastore_pb.Error.CAPABILITY_DISABLED,\n- 'Datastore is in read-only mode.')\n+ return ('', datastore_pb.Error.CAPABILITY_DISABLED,\n+ 'Datastore is in read-only mode.')\n \n state = index.state()\n if state not in [index.READ_WRITE, index.WRITE_ONLY]:\n@@ -464,14 +437,13 @@ class MainHandler(tornado.web.RequestHandler):\n error_message = 'Unable to update index because state is {}. '\\\n 'Index: {}'.format(state_name, index)\n logger.error(error_message)\n- return response.Encode(), datastore_pb.Error.PERMISSION_DENIED,\\\n- error_message\n+ return '', datastore_pb.Error.PERMISSION_DENIED, error_message\n else:\n # Updating index asynchronously so we can return a response quickly.\n threading.Thread(target=datastore_access.update_composite_index,\n args=(app_id, index)).start()\n \n- return response.Encode(), 0, \"\"\n+ return response.Encode(), 0, ''\n \n def delete_index_request(self, app_id, http_request_data):\n \"\"\" Deletes a composite index for a given application.\n@@ -490,17 +462,16 @@ class MainHandler(tornado.web.RequestHandler):\n if READ_ONLY:\n logger.warning('Unable to delete in read-only mode: {}'.\n format(request))\n- return (response.Encode(), datastore_pb.Error.CAPABILITY_DISABLED,\n- 'Datastore is in read-only mode.')\n+ return ('', datastore_pb.Error.CAPABILITY_DISABLED,\n+ 'Datastore is in read-only mode.')\n \n try: \n datastore_access.delete_composite_index_metadata(app_id, request)\n- except dbconstants.AppScaleDBConnectionError:\n+ except dbconstants.AppScaleDBConnectionError as error:\n logger.exception('DB connection error during index deletion')\n- return (response.Encode(),\n- datastore_pb.Error.INTERNAL_ERROR,\n- \"Datastore connection error on delete index request.\")\n- return response.Encode(), 0, \"\"\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n+\n+ return response.Encode(), 0, ''\n \n def get_indices_request(self, app_id):\n \"\"\" Gets the indices of the given application.\n@@ -516,16 +487,16 @@ class MainHandler(tornado.web.RequestHandler):\n response = datastore_pb.CompositeIndices()\n try:\n indices = datastore_access.datastore_batch.get_indices(app_id)\n- except dbconstants.AppScaleDBConnectionError:\n+ except dbconstants.AppScaleDBConnectionError as error:\n logger.exception('DB connection error while fetching indices for '\n '{}'.format(app_id))\n- return (response.Encode(),\n- datastore_pb.Error.INTERNAL_ERROR,\n- \"Datastore connection error on get indices request.\")\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n+\n for index in indices:\n new_index = response.add_index()\n new_index.ParseFromString(index)\n- return response.Encode(), 0, \"\"\n+\n+ return response.Encode(), 0, ''\n \n def allocate_ids_request(self, app_id, http_request_data):\n \"\"\" High level function for getting unique identifiers for entities.\n@@ -540,33 +511,33 @@ class MainHandler(tornado.web.RequestHandler):\n NotImplementedError: when requesting a max id.\n \"\"\"\n request = datastore_pb.AllocateIdsRequest(http_request_data)\n- response = datastore_pb.AllocateIdsResponse()\n \n if request.has_max() and request.has_size():\n- return (response.Encode(), datastore_pb.Error.BAD_REQUEST,\n+ return ('', datastore_pb.Error.BAD_REQUEST,\n 'Both size and max cannot be set.')\n+\n if not (request.has_max() or request.has_size()):\n- return (response.Encode(), datastore_pb.Error.BAD_REQUEST,\n+ return ('', datastore_pb.Error.BAD_REQUEST,\n 'Either size or max must be set.')\n \n if request.has_size():\n- try:\n- start, end = datastore_access.allocate_size(app_id, request.size())\n- except dbconstants.AppScaleBadArg as error:\n- return response.Encode(), datastore_pb.Error.BAD_REQUEST, str(error)\n- except dbconstants.AppScaleDBConnectionError as error:\n- return response.Encode(), datastore_pb.Error.INTERNAL_ERROR, str(error)\n+ method = datastore_access.allocate_size\n+ args = (app_id, request.size())\n else:\n- try:\n- start, end = datastore_access.allocate_max(app_id, request.max())\n- except dbconstants.AppScaleBadArg as error:\n- return response.Encode(), datastore_pb.Error.BAD_REQUEST, str(error)\n- except dbconstants.AppScaleDBConnectionError as error:\n- return response.Encode(), datastore_pb.Error.INTERNAL_ERROR, str(error)\n+ method = datastore_access.allocate_max\n+ args = (app_id, request.max())\n \n+ try:\n+ start, end = method(*args)\n+ except dbconstants.AppScaleBadArg as error:\n+ return '', datastore_pb.Error.BAD_REQUEST, str(error)\n+ except dbconstants.AppScaleDBConnectionError as error:\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n+\n+ response = datastore_pb.AllocateIdsResponse()\n response.set_start(start)\n response.set_end(end)\n- return response.Encode(), 0, \"\"\n+ return response.Encode(), 0, ''\n \n @staticmethod\n @gen.coroutine\n@@ -628,34 +599,26 @@ class MainHandler(tornado.web.RequestHandler):\n if READ_ONLY:\n logger.warning('Unable to put in read-only mode: {}'.\n format(putreq_pb))\n- return (putresp_pb.Encode(), datastore_pb.Error.CAPABILITY_DISABLED,\n- 'Datastore is in read-only mode.')\n+ return ('', datastore_pb.Error.CAPABILITY_DISABLED,\n+ 'Datastore is in read-only mode.')\n \n try:\n datastore_access.dynamic_put(app_id, putreq_pb, putresp_pb)\n- return (putresp_pb.Encode(), 0, \"\")\n- except zktransaction.ZKBadRequest as zkie:\n- logger.exception('Illegal argument during {}'.format(putreq_pb))\n- return (putresp_pb.Encode(),\n- datastore_pb.Error.BAD_REQUEST, \n- \"Illegal arguments for transaction. {0}\".format(str(zkie)))\n+ return putresp_pb.Encode(), 0, ''\n+ except zktransaction.ZKBadRequest as error:\n+ return '', datastore_pb.Error.BAD_REQUEST, str(error)\n except zktransaction.ZKInternalException as error:\n logger.exception('ZKInternalException during put')\n- return (putresp_pb.Encode(), datastore_pb.Error.INTERNAL_ERROR,\n- str(error))\n- except zktransaction.ZKTransactionException:\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n+ except zktransaction.ZKTransactionException as error:\n logger.exception('Concurrent transaction during {}'.\n format(putreq_pb))\n- return (putresp_pb.Encode(),\n- datastore_pb.Error.CONCURRENT_TRANSACTION, \n- \"Concurrent transaction exception on put.\")\n+ return '', datastore_pb.Error.CONCURRENT_TRANSACTION, str(error)\n except dbconstants.AppScaleDBConnectionError:\n logger.exception('DB connection error during put')\n- return (putresp_pb.Encode(),\n- datastore_pb.Error.INTERNAL_ERROR,\n- \"Datastore connection error on put.\")\n+ return ('', datastore_pb.Error.INTERNAL_ERROR,\n+ 'Datastore connection error on put.')\n \n- \n def get_request(self, app_id, http_request_data):\n \"\"\" High level function for doing gets.\n \n@@ -670,29 +633,21 @@ class MainHandler(tornado.web.RequestHandler):\n getresp_pb = datastore_pb.GetResponse()\n try:\n datastore_access.dynamic_get(app_id, getreq_pb, getresp_pb)\n- except zktransaction.ZKBadRequest as zkie:\n+ except zktransaction.ZKBadRequest as error:\n logger.exception('Illegal argument during {}'.format(getreq_pb))\n- return (getresp_pb.Encode(),\n- datastore_pb.Error.BAD_REQUEST, \n- \"Illegal arguments for transaction. {0}\".format(str(zkie)))\n- except zktransaction.ZKInternalException:\n+ return '', datastore_pb.Error.BAD_REQUEST, str(error)\n+ except zktransaction.ZKInternalException as error:\n logger.exception('ZKInternalException during {}'.format(getreq_pb))\n- return (getresp_pb.Encode(),\n- datastore_pb.Error.INTERNAL_ERROR, \n- \"Internal error with ZooKeeper connection.\")\n- except zktransaction.ZKTransactionException:\n- logger.exception('Concurrent transaction during {}'.\n- format(getreq_pb))\n- return (getresp_pb.Encode(),\n- datastore_pb.Error.CONCURRENT_TRANSACTION, \n- \"Concurrent transaction exception on get.\")\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n+ except zktransaction.ZKTransactionException as error:\n+ logger.exception('Concurrent transaction during {}'.format(getreq_pb))\n+ return '', datastore_pb.Error.CONCURRENT_TRANSACTION, str(error)\n except dbconstants.AppScaleDBConnectionError:\n logger.exception('DB connection error during get')\n- return (getresp_pb.Encode(),\n- datastore_pb.Error.INTERNAL_ERROR,\n- \"Datastore connection error on get.\")\n+ return ('', datastore_pb.Error.INTERNAL_ERROR,\n+ 'Datastore connection error on get.')\n \n- return getresp_pb.Encode(), 0, \"\"\n+ return getresp_pb.Encode(), 0, ''\n \n def delete_request(self, app_id, http_request_data):\n \"\"\" High level function for doing deletes.\n@@ -711,33 +666,27 @@ class MainHandler(tornado.web.RequestHandler):\n if READ_ONLY:\n logger.warning('Unable to delete in read-only mode: {}'.\n format(delreq_pb))\n- return (delresp_pb.Encode(), datastore_pb.Error.CAPABILITY_DISABLED,\n- 'Datastore is in read-only mode.')\n+ return ('', datastore_pb.Error.CAPABILITY_DISABLED,\n+ 'Datastore is in read-only mode.')\n \n try:\n datastore_access.dynamic_delete(app_id, delreq_pb)\n- return (delresp_pb.Encode(), 0, \"\")\n- except zktransaction.ZKBadRequest as zkie:\n+ return delresp_pb.Encode(), 0, ''\n+ except zktransaction.ZKBadRequest as error:\n logger.exception('Illegal argument during {}'.format(delreq_pb))\n- return (delresp_pb.Encode(),\n- datastore_pb.Error.BAD_REQUEST, \n- \"Illegal arguments for transaction. {0}\".format(str(zkie)))\n+ return '', datastore_pb.Error.BAD_REQUEST, str(error)\n except zktransaction.ZKInternalException:\n logger.exception('ZKInternalException during {}'.format(delreq_pb))\n- return (delresp_pb.Encode(),\n- datastore_pb.Error.INTERNAL_ERROR, \n- \"Internal error with ZooKeeper connection.\")\n+ return ('', datastore_pb.Error.INTERNAL_ERROR,\n+ 'Internal error with ZooKeeper connection.')\n except zktransaction.ZKTransactionException:\n- logger.exception('Concurrent transaction during {}'.\n- format(delreq_pb))\n- return (delresp_pb.Encode(),\n- datastore_pb.Error.CONCURRENT_TRANSACTION, \n- \"Concurrent transaction exception on delete.\")\n+ logger.exception('Concurrent transaction during {}'.format(delreq_pb))\n+ return ('', datastore_pb.Error.CONCURRENT_TRANSACTION,\n+ 'Concurrent transaction exception on delete.')\n except dbconstants.AppScaleDBConnectionError:\n logger.exception('DB connection error during delete')\n- return (delresp_pb.Encode(),\n- datastore_pb.Error.INTERNAL_ERROR,\n- \"Datastore connection error on delete.\")\n+ return ('', datastore_pb.Error.INTERNAL_ERROR,\n+ 'Datastore connection error on delete.')\n \n def add_actions_request(self, app_id, http_request_data, service_id,\n version_id):\n@@ -757,27 +706,27 @@ class MainHandler(tornado.web.RequestHandler):\n resp_pb = taskqueue_service_pb.TaskQueueBulkAddResponse()\n \n if service_id is None:\n- return (resp_pb.Encode(), datastore_pb.Error.BAD_REQUEST,\n+ return ('', datastore_pb.Error.BAD_REQUEST,\n 'Module header must be defined')\n \n if version_id is None:\n- return (resp_pb.Encode(), datastore_pb.Error.BAD_REQUEST,\n+ return ('', datastore_pb.Error.BAD_REQUEST,\n 'Version header must be defined')\n \n if READ_ONLY:\n logger.warning('Unable to add transactional tasks in read-only mode')\n- return (resp_pb.Encode(), datastore_pb.Error.CAPABILITY_DISABLED,\n- 'Datastore is in read-only mode.')\n+ return ('', datastore_pb.Error.CAPABILITY_DISABLED,\n+ 'Datastore is in read-only mode.')\n \n try:\n datastore_access.dynamic_add_actions(app_id, req_pb, service_id,\n version_id)\n- return resp_pb.Encode(), 0, \"\"\n+ return resp_pb.Encode(), 0, ''\n except dbconstants.ExcessiveTasks as error:\n- return (resp_pb.Encode(), datastore_pb.Error.BAD_REQUEST, str(error))\n+ return '', datastore_pb.Error.BAD_REQUEST, str(error)\n except dbconstants.AppScaleDBConnectionError:\n logger.exception('DB connection error')\n- return (resp_pb.Encode(), datastore_pb.Error.INTERNAL_ERROR,\n+ return ('', datastore_pb.Error.INTERNAL_ERROR,\n 'Datastore connection error when adding transaction tasks.')\n \n \n", "new_path": "AppDB/appscale/datastore/scripts/datastore.py", "old_path": "AppDB/appscale/datastore/scripts/datastore.py" } ]
15e95dc9e579e7123a4cb78d7347b8340f5fbc27
appscale/gts
null
null
Use the EntityLock in the groomer The allows the groomer to make index modifications under a lock without having to create transaction IDs.
[ { "change_type": "MODIFY", "diff": "@@ -20,6 +20,7 @@ from .cassandra_env import cassandra_interface\n from .datastore_distributed import DatastoreDistributed\n from .utils import get_composite_indexes_rows\n from .zkappscale import zktransaction as zk\n+from .zkappscale.entity_lock import EntityLock\n \n sys.path.append(APPSCALE_PYTHON_APPSERVER)\n from google.appengine.api import apiproxy_stub_map\n@@ -235,73 +236,6 @@ class DatastoreGroomer(threading.Thread):\n \n return False\n \n- def acquire_lock_for_key(self, app_id, key, retries, retry_time):\n- \"\"\" Acquires a lock for a given entity key.\n-\n- Args:\n- app_id: The application ID.\n- key: A string containing an entity key.\n- retries: An integer specifying the number of times to retry.\n- retry_time: How many seconds to wait before each retry.\n- Returns:\n- A transaction ID.\n- Raises:\n- ZKTransactionException if unable to acquire a lock from ZooKeeper.\n- \"\"\"\n- root_key = key.split(dbconstants.KIND_SEPARATOR)[0]\n- root_key += dbconstants.KIND_SEPARATOR\n-\n- txn_id = self.zoo_keeper.get_transaction_id(app_id, is_xg=False)\n- try:\n- self.zoo_keeper.acquire_lock(app_id, txn_id, root_key)\n- except zk.ZKTransactionException as zkte:\n- logging.warning('Concurrent transaction exception for app id {} with '\n- 'info {}'.format(app_id, str(zkte)))\n- if retries > 0:\n- logging.info('Trying again to acquire lock info {} with retry #{}'\n- .format(str(zkte), retries))\n- time.sleep(retry_time)\n- return self.acquire_lock_for_key(\n- app_id=app_id,\n- key=key,\n- retries=retries-1,\n- retry_time=retry_time\n- )\n- self.zoo_keeper.notify_failed_transaction(app_id, txn_id)\n- raise zkte\n- return txn_id\n-\n- def release_lock_for_key(self, app_id, key, txn_id, retries, retry_time):\n- \"\"\" Releases a lock for a given entity key.\n-\n- Args:\n- app_id: The application ID.\n- key: A string containing an entity key.\n- txn_id: A transaction ID.\n- retries: An integer specifying the number of times to retry.\n- retry_time: How many seconds to wait before each retry.\n- \"\"\"\n- root_key = key.split(dbconstants.KIND_SEPARATOR)[0]\n- root_key += dbconstants.KIND_SEPARATOR\n-\n- try:\n- self.zoo_keeper.release_lock(app_id, txn_id)\n- except zk.ZKTransactionException as zkte:\n- logging.warning(str(zkte))\n- if retries > 0:\n- logging.info('Trying again to release lock {} with retry #{}'.\n- format(txn_id, retries))\n- time.sleep(retry_time)\n- self.release_lock_for_key(\n- app_id=app_id,\n- key=key,\n- txn_id=txn_id,\n- retries=retries-1,\n- retry_time=retry_time\n- )\n- else:\n- self.zoo_keeper.notify_failed_transaction(app_id, txn_id)\n-\n def fetch_entity_dict_for_references(self, references):\n \"\"\" Fetches a dictionary of valid entities for a list of references.\n \n@@ -337,6 +271,35 @@ class DatastoreGroomer(threading.Thread):\n entities[key] = app_entities[key][dbconstants.APP_ENTITY_SCHEMA[0]]\n return entities\n \n+ def guess_group_from_table_key(self, entity_key):\n+ \"\"\" Construct a group reference based on an entity key.\n+\n+ Args:\n+ entity_key: A string specifying an entity table key.\n+ Returns:\n+ An entity_pb.Reference object specifying the entity group.\n+ \"\"\"\n+ project_id, namespace, path = entity_key.split(dbconstants.KEY_DELIMITER)\n+\n+ group = entity_pb.Reference()\n+ group.set_app(project_id)\n+ if namespace:\n+ group.set_name_space(namespace)\n+\n+ mutable_path = group.mutable_path()\n+ first_element = mutable_path.add_element()\n+ kind, id_ = path.split(dbconstants.KIND_SEPARATOR)[0].split(':')\n+ first_element.set_type(kind)\n+\n+ # At this point, there's no way to tell if the ID was originally a name,\n+ # so this is a guess.\n+ try:\n+ first_element.set_id(int(id_))\n+ except ValueError:\n+ first_element.set_name(id_)\n+\n+ return group\n+\n def lock_and_delete_indexes(self, references, direction, entity_key):\n \"\"\" For a list of index entries that have the same entity, lock the entity\n and delete the indexes.\n@@ -355,45 +318,28 @@ class DatastoreGroomer(threading.Thread):\n else:\n table_name = dbconstants.DSC_PROPERTY_TABLE\n \n- app = entity_key.split(self.ds_access._SEPARATOR)[0]\n- try:\n- txn_id = self.acquire_lock_for_key(\n- app_id=app,\n- key=entity_key,\n- retries=self.ds_access.NON_TRANS_LOCK_RETRY_COUNT,\n- retry_time=self.ds_access.LOCK_RETRY_TIME\n- )\n- except zk.ZKTransactionException:\n- self.index_entries_delete_failures += 1\n- return\n-\n- entities = self.fetch_entity_dict_for_references(references)\n-\n- refs_to_delete = []\n- for reference in references:\n- index_elements = reference.keys()[0].split(self.ds_access._SEPARATOR)\n- prop_name = index_elements[self.ds_access.PROP_NAME_IN_SINGLE_PROP_INDEX]\n- if not self.ds_access._DatastoreDistributed__valid_index_entry(\n- reference, entities, direction, prop_name):\n- refs_to_delete.append(reference.keys()[0])\n-\n- logging.debug('Removing {} indexes starting with {}'.\n- format(len(refs_to_delete), [refs_to_delete[0]]))\n- try:\n- self.db_access.batch_delete(table_name, refs_to_delete,\n- column_names=dbconstants.PROPERTY_SCHEMA)\n- self.index_entries_cleaned += len(refs_to_delete)\n- except Exception:\n- logging.exception('Unable to delete indexes')\n- self.index_entries_delete_failures += 1\n-\n- self.release_lock_for_key(\n- app_id=app,\n- key=entity_key,\n- txn_id=txn_id,\n- retries=self.ds_access.NON_TRANS_LOCK_RETRY_COUNT,\n- retry_time=self.ds_access.LOCK_RETRY_TIME\n- )\n+ group_key = self.guess_group_from_table_key(entity_key)\n+ entity_lock = EntityLock(self.zoo_keeper.handle, [group_key])\n+ with entity_lock:\n+ entities = self.fetch_entity_dict_for_references(references)\n+\n+ refs_to_delete = []\n+ for reference in references:\n+ index_elements = reference.keys()[0].split(self.ds_access._SEPARATOR)\n+ prop = index_elements[self.ds_access.PROP_NAME_IN_SINGLE_PROP_INDEX]\n+ if not self.ds_access._DatastoreDistributed__valid_index_entry(\n+ reference, entities, direction, prop):\n+ refs_to_delete.append(reference.keys()[0])\n+\n+ logging.debug('Removing {} indexes starting with {}'.\n+ format(len(refs_to_delete), [refs_to_delete[0]]))\n+ try:\n+ self.db_access.batch_delete(table_name, refs_to_delete,\n+ column_names=dbconstants.PROPERTY_SCHEMA)\n+ self.index_entries_cleaned += len(refs_to_delete)\n+ except Exception:\n+ logging.exception('Unable to delete indexes')\n+ self.index_entries_delete_failures += 1\n \n def lock_and_delete_kind_index(self, reference):\n \"\"\" For a list of index entries that have the same entity, lock the entity\n@@ -408,37 +354,21 @@ class DatastoreGroomer(threading.Thread):\n \"\"\"\n table_name = dbconstants.APP_KIND_TABLE\n entity_key = reference.values()[0].values()[0]\n- app = entity_key.split(self.ds_access._SEPARATOR)[0]\n- try:\n- txn_id = self.acquire_lock_for_key(\n- app_id=app,\n- key=entity_key,\n- retries=self.ds_access.NON_TRANS_LOCK_RETRY_COUNT,\n- retry_time=self.ds_access.LOCK_RETRY_TIME\n- )\n- except zk.ZKTransactionException:\n- self.index_entries_delete_failures += 1\n- return\n-\n- entities = self.fetch_entity_dict_for_references([reference])\n- if entity_key not in entities:\n- index_to_delete = reference.keys()[0]\n- logging.debug('Removing {}'.format([index_to_delete]))\n- try:\n- self.db_access.batch_delete(table_name, [index_to_delete],\n- column_names=dbconstants.APP_KIND_SCHEMA)\n- self.index_entries_cleaned += 1\n- except dbconstants.AppScaleDBConnectionError:\n- logging.exception('Unable to delete index.')\n- self.index_entries_delete_failures += 1\n \n- self.release_lock_for_key(\n- app_id=app,\n- key=entity_key,\n- txn_id=txn_id,\n- retries=self.ds_access.NON_TRANS_LOCK_RETRY_COUNT,\n- retry_time=self.ds_access.LOCK_RETRY_TIME\n- )\n+ group_key = self.guess_group_from_table_key(entity_key)\n+ entity_lock = EntityLock(self.zoo_keeper.handle, [group_key])\n+ with entity_lock:\n+ entities = self.fetch_entity_dict_for_references([reference])\n+ if entity_key not in entities:\n+ index_to_delete = reference.keys()[0]\n+ logging.debug('Removing {}'.format([index_to_delete]))\n+ try:\n+ self.db_access.batch_delete(table_name, [index_to_delete],\n+ column_names=dbconstants.APP_KIND_SCHEMA)\n+ self.index_entries_cleaned += 1\n+ except dbconstants.AppScaleDBConnectionError:\n+ logging.exception('Unable to delete index.')\n+ self.index_entries_delete_failures += 1\n \n def clean_up_indexes(self, direction):\n \"\"\" Deletes invalid single property index entries.\n", "new_path": "AppDB/appscale/datastore/groomer.py", "old_path": "AppDB/appscale/datastore/groomer.py" } ]
b08b928d2937caa7ea70ba57839c52316390d9df
appscale/gts
null
null
Allow Python runtime to use an external API server If given an external api port, the Python runtime will use it to make App Identity calls.
[ { "change_type": "MODIFY", "diff": "@@ -592,7 +592,8 @@ def GetRemoteAppIdFromServer(server, path, remote_token=None):\n def ConfigureRemoteApiFromServer(server, path, app_id, services=None,\n default_auth_domain=None,\n use_remote_datastore=True,\n- use_async_rpc=False):\n+ use_async_rpc=False,\n+ external_server=None):\n \"\"\"Does necessary setup to allow easy remote access to App Engine APIs.\n \n Args:\n@@ -609,6 +610,8 @@ def ConfigureRemoteApiFromServer(server, path, app_id, services=None,\n a single request.\n use_async_rpc: A boolean indicating whether or not to make RPC calls in a\n separate thread.\n+ external_server: An AbstractRpcServer specifying the location of an\n+ external API server.\n \n Raises:\n urllib2.HTTPError: if app_id is not provided and there is an error while\n@@ -636,11 +639,20 @@ def ConfigureRemoteApiFromServer(server, path, app_id, services=None,\n apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', datastore_stub)\n \n if use_async_rpc:\n- stub = RuntimeRemoteStub(server, path)\n+ stub_type = RuntimeRemoteStub\n else:\n- stub = RemoteStub(server, path)\n+ stub_type = RemoteStub\n+\n+ stub = stub_type(server, path)\n+ external_stub = None\n+ if external_server is not None:\n+ external_stub = stub_type(external_server, path)\n+\n for service in services:\n- apiproxy_stub_map.apiproxy.RegisterStub(service, stub)\n+ if service == 'app_identity_service' and external_stub is not None:\n+ apiproxy_stub_map.apiproxy.RegisterStub(service, external_stub)\n+ else:\n+ apiproxy_stub_map.apiproxy.RegisterStub(service, stub)\n \n \n def GetRemoteAppId(servername,\n@@ -691,7 +703,8 @@ def ConfigureRemoteApi(app_id,\n default_auth_domain=None,\n save_cookies=False,\n use_remote_datastore=True,\n- use_async_rpc=False):\n+ use_async_rpc=False,\n+ external_api_server=None):\n \"\"\"Does necessary setup to allow easy remote access to App Engine APIs.\n \n Either servername must be provided or app_id must not be None. If app_id\n@@ -727,6 +740,8 @@ def ConfigureRemoteApi(app_id,\n a single request.\n use_async_rpc: A boolean indicating whether or not to make RPC calls in a\n separate thread.\n+ external_api_server: A string specifying the location of an external API\n+ server.\n \n Returns:\n server, the server created by rpc_server_factory, which may be useful for\n@@ -744,12 +759,20 @@ def ConfigureRemoteApi(app_id,\n server = rpc_server_factory(servername, auth_func, GetUserAgent(),\n GetSourceName(), save_cookies=save_cookies,\n debug_data=False, secure=secure)\n+\n+ if external_api_server is None:\n+ external_server = server\n+ else:\n+ external_server = rpc_server_factory(\n+ external_api_server, auth_func, GetUserAgent(), GetSourceName(),\n+ save_cookies=save_cookies, debug_data=False, secure=secure)\n+\n if not app_id:\n app_id = GetRemoteAppIdFromServer(server, path, rtok)\n \n ConfigureRemoteApiFromServer(server, path, app_id, services,\n default_auth_domain, use_remote_datastore,\n- use_async_rpc)\n+ use_async_rpc, external_server)\n return server\n \n \n", "new_path": "AppServer/google/appengine/ext/remote_api/remote_api_stub.py", "old_path": "AppServer/google/appengine/ext/remote_api/remote_api_stub.py" }, { "change_type": "MODIFY", "diff": "@@ -436,6 +436,9 @@ def create_command_line_parser():\n \n # AppScale\n appscale_group = parser.add_argument_group('AppScale')\n+ appscale_group.add_argument(\n+ '--external_api_port', type=int,\n+ help='The port of the external server that handles API calls')\n appscale_group.add_argument(\n '--login_server',\n help='the FQDN or IP address where users should be redirected to when the '\n@@ -594,7 +597,8 @@ class DevelopmentServer(object):\n module_to_max_instances,\n options.use_mtime_file_watcher,\n options.automatic_restart,\n- options.allow_skipped_files)\n+ options.allow_skipped_files,\n+ options.external_api_port)\n request_data = wsgi_request_info.WSGIRequestInfo(self._dispatcher)\n \n storage_path = _get_storage_path(options.storage_path, configuration.app_id)\n", "new_path": "AppServer/google/appengine/tools/devappserver2/devappserver2.py", "old_path": "AppServer/google/appengine/tools/devappserver2/devappserver2.py" }, { "change_type": "MODIFY", "diff": "@@ -73,7 +73,8 @@ class Dispatcher(request_info.Dispatcher):\n module_to_max_instances,\n use_mtime_file_watcher,\n automatic_restart,\n- allow_skipped_files):\n+ allow_skipped_files,\n+ external_api_port=None):\n \"\"\"Initializer for Dispatcher.\n \n Args:\n@@ -109,6 +110,8 @@ class Dispatcher(request_info.Dispatcher):\n allow_skipped_files: If True then all files in the application's directory\n are readable, even if they appear in a static handler or \"skip_files\"\n directive.\n+ external_api_port: An integer specifying the location of an external API\n+ server.\n \"\"\"\n self._configuration = configuration\n self._php_executable_path = php_executable_path\n@@ -117,6 +120,7 @@ class Dispatcher(request_info.Dispatcher):\n self._cloud_sql_config = cloud_sql_config\n self._request_data = None\n self._api_port = None\n+ self._external_api_port = external_api_port\n self._running_modules = []\n self._module_configurations = {}\n self._host = host\n@@ -159,7 +163,8 @@ class Dispatcher(request_info.Dispatcher):\n for module_configuration in self._configuration.modules:\n self._module_configurations[\n module_configuration.module_name] = module_configuration\n- _module, port = self._create_module(module_configuration, port)\n+ _module, port = self._create_module(module_configuration, port,\n+ self._external_api_port)\n _module.start()\n self._module_name_to_module[module_configuration.module_name] = _module\n logging.info('Starting module \"%s\" running at: http://%s',\n@@ -229,7 +234,7 @@ class Dispatcher(request_info.Dispatcher):\n for _module in self._module_name_to_module.values():\n _module.quit()\n \n- def _create_module(self, module_configuration, port):\n+ def _create_module(self, module_configuration, port, external_port=None):\n max_instances = self._module_to_max_instances.get(\n module_configuration.module_name)\n module_args = (module_configuration,\n@@ -250,12 +255,13 @@ class Dispatcher(request_info.Dispatcher):\n self._use_mtime_file_watcher,\n self._automatic_restart,\n self._allow_skipped_files)\n+ module_kwargs = {'external_api_port': external_port}\n if module_configuration.manual_scaling:\n- _module = module.ManualScalingModule(*module_args)\n+ _module = module.ManualScalingModule(*module_args, **module_kwargs)\n elif module_configuration.basic_scaling:\n- _module = module.BasicScalingModule(*module_args)\n+ _module = module.BasicScalingModule(*module_args, **module_kwargs)\n else:\n- _module = module.AutoScalingModule(*module_args)\n+ _module = module.AutoScalingModule(*module_args, **module_kwargs)\n \n if port != 0:\n port += 1000\n", "new_path": "AppServer/google/appengine/tools/devappserver2/dispatcher.py", "old_path": "AppServer/google/appengine/tools/devappserver2/dispatcher.py" }, { "change_type": "MODIFY", "diff": "@@ -28,6 +28,7 @@ import os.path\n import random\n import re\n import string\n+import struct\n import threading\n import time\n import urllib\n@@ -259,7 +260,15 @@ class Module(object):\n runtime_config.skip_files = str(self._module_configuration.skip_files)\n runtime_config.static_files = _static_files_regex_from_handlers(\n self._module_configuration.handlers)\n- runtime_config.api_port = self._api_port\n+\n+ # AppScale: Pack both API ports into the same field.\n+ if (self._external_api_port is not None and\n+ self._module_configuration.runtime == 'python27'):\n+ port_bytes = struct.pack('HH', self._api_port, self._external_api_port)\n+ runtime_config.api_port = struct.unpack('I', port_bytes)[0]\n+ else:\n+ runtime_config.api_port = self._api_port\n+\n runtime_config.stderr_log_level = self._runtime_stderr_loglevel\n runtime_config.datacenter = 'us1'\n runtime_config.auth_domain = self._auth_domain\n@@ -352,7 +361,8 @@ class Module(object):\n max_instances,\n use_mtime_file_watcher,\n automatic_restarts,\n- allow_skipped_files):\n+ allow_skipped_files,\n+ external_api_port=None):\n \"\"\"Initializer for Module.\n \n Args:\n@@ -394,11 +404,14 @@ class Module(object):\n allow_skipped_files: If True then all files in the application's directory\n are readable, even if they appear in a static handler or \"skip_files\"\n directive.\n+ external_api_port: An integer specifying the location of an external API\n+ server.\n \"\"\"\n self._module_configuration = module_configuration\n self._name = module_configuration.module_name\n self._host = host\n self._api_port = api_port\n+ self._external_api_port = external_api_port\n self._auth_domain = auth_domain\n self._runtime_stderr_loglevel = runtime_stderr_loglevel\n self._balanced_port = balanced_port\n@@ -895,7 +908,8 @@ class AutoScalingModule(Module):\n max_instances,\n use_mtime_file_watcher,\n automatic_restarts,\n- allow_skipped_files):\n+ allow_skipped_files,\n+ external_api_port=None):\n \"\"\"Initializer for AutoScalingModule.\n \n Args:\n@@ -937,6 +951,8 @@ class AutoScalingModule(Module):\n allow_skipped_files: If True then all files in the application's directory\n are readable, even if they appear in a static handler or \"skip_files\"\n directive.\n+ external_api_port: An integer specifying the location of an external API\n+ server.\n \"\"\"\n super(AutoScalingModule, self).__init__(module_configuration,\n host,\n@@ -955,7 +971,8 @@ class AutoScalingModule(Module):\n max_instances,\n use_mtime_file_watcher,\n automatic_restarts,\n- allow_skipped_files)\n+ allow_skipped_files,\n+ external_api_port)\n \n self._process_automatic_scaling(\n self._module_configuration.automatic_scaling)\n@@ -1327,7 +1344,8 @@ class ManualScalingModule(Module):\n max_instances,\n use_mtime_file_watcher,\n automatic_restarts,\n- allow_skipped_files):\n+ allow_skipped_files,\n+ external_api_port=None):\n \"\"\"Initializer for ManualScalingModule.\n \n Args:\n@@ -1369,6 +1387,8 @@ class ManualScalingModule(Module):\n allow_skipped_files: If True then all files in the application's directory\n are readable, even if they appear in a static handler or \"skip_files\"\n directive.\n+ external_api_port: An integer specifying the location of an external API\n+ server.\n \"\"\"\n super(ManualScalingModule, self).__init__(module_configuration,\n host,\n@@ -1387,7 +1407,8 @@ class ManualScalingModule(Module):\n max_instances,\n use_mtime_file_watcher,\n automatic_restarts,\n- allow_skipped_files)\n+ allow_skipped_files,\n+ external_api_port)\n \n self._process_manual_scaling(module_configuration.manual_scaling)\n \n@@ -1823,7 +1844,8 @@ class BasicScalingModule(Module):\n max_instances,\n use_mtime_file_watcher,\n automatic_restarts,\n- allow_skipped_files):\n+ allow_skipped_files,\n+ external_api_port=None):\n \"\"\"Initializer for BasicScalingModule.\n \n Args:\n@@ -1865,6 +1887,8 @@ class BasicScalingModule(Module):\n allow_skipped_files: If True then all files in the application's directory\n are readable, even if they appear in a static handler or \"skip_files\"\n directive.\n+ external_api_port: An integer specifying the location of an external API\n+ server.\n \"\"\"\n super(BasicScalingModule, self).__init__(module_configuration,\n host,\n@@ -1883,7 +1907,8 @@ class BasicScalingModule(Module):\n max_instances,\n use_mtime_file_watcher,\n automatic_restarts,\n- allow_skipped_files)\n+ allow_skipped_files,\n+ external_api_port)\n self._process_basic_scaling(module_configuration.basic_scaling)\n \n self._instances = [] # Protected by self._condition.\n", "new_path": "AppServer/google/appengine/tools/devappserver2/module.py", "old_path": "AppServer/google/appengine/tools/devappserver2/module.py" }, { "change_type": "MODIFY", "diff": "@@ -19,6 +19,7 @@\n \n import base64\n import os\n+import struct\n import sys\n import time\n import traceback\n@@ -54,12 +55,18 @@ _STARTUP_FAILURE_TEMPLATE = \"\"\"\n </html>\"\"\"\n \n \n-def setup_stubs(config):\n+def setup_stubs(config, external_api_port=None):\n \"\"\"Sets up API stubs using remote API.\"\"\"\n+ if external_api_port is None:\n+ external_api_server = None\n+ else:\n+ external_api_server = 'localhost:{}'.format(external_api_port)\n+\n remote_api_stub.ConfigureRemoteApi(config.app_id, '/', lambda: ('', ''),\n 'localhost:%d' % config.api_port,\n use_remote_datastore=False,\n- use_async_rpc=True)\n+ use_async_rpc=True,\n+ external_api_server=external_api_server)\n \n if config.HasField('cloud_sql_config'):\n # Connect the RDBMS API to MySQL.\n@@ -119,6 +126,13 @@ def expand_user(path):\n def main():\n config = runtime_config_pb2.Config()\n config.ParseFromString(base64.b64decode(sys.stdin.read()))\n+\n+ # AppScale: The external port is packed in the same field as the API port.\n+ external_api_port = None\n+ if config.api_port > 65535:\n+ port_bytes = struct.pack('I', config.api_port)\n+ config.api_port, external_api_port = struct.unpack('HH', port_bytes)\n+\n debugging_app = None\n if config.python_config and config.python_config.startup_script:\n global_vars = {'config': config}\n@@ -144,7 +158,7 @@ def main():\n ('localhost', 0),\n debugging_app)\n else:\n- setup_stubs(config)\n+ setup_stubs(config, external_api_port)\n sandbox.enable_sandbox(config)\n os.path.expanduser = expand_user\n # This import needs to be after enabling the sandbox so the runtime\n", "new_path": "AppServer/google/appengine/tools/devappserver2/python/runtime.py", "old_path": "AppServer/google/appengine/tools/devappserver2/python/runtime.py" } ]
31ede29f649eb81eaaa8cb7665db020d7245de5c
appscale/gts
null
null
Attach list of groups to transaction node This allows the transaction groomer to track down which entity groups might have an entity lock that needs to be cleared after the transaction is resolved.
[ { "change_type": "MODIFY", "diff": "@@ -560,6 +560,7 @@ class DatastoreDistributed():\n group_key = entity_pb.Reference(encoded_group_key)\n \n txid = self.transaction_manager.create_transaction_id(app, xg=False)\n+ self.transaction_manager.set_groups(app, txid, [group_key])\n lock = entity_lock.EntityLock(self.zookeeper.handle, [group_key], txid)\n try:\n with lock:\n@@ -896,6 +897,7 @@ class DatastoreDistributed():\n group_key = entity_pb.Reference(encoded_group_key)\n \n txid = self.transaction_manager.create_transaction_id(app_id, xg=False)\n+ self.transaction_manager.set_groups(app_id, txid, [group_key])\n lock = entity_lock.EntityLock(self.zookeeper.handle, [group_key], txid)\n try:\n with lock:\n@@ -3217,6 +3219,7 @@ class DatastoreDistributed():\n for index in self.datastore_batch.get_indices(app)]\n \n decoded_groups = (entity_pb.Reference(group) for group in tx_groups)\n+ self.transaction_manager.set_groups(app, txn, decoded_groups)\n lock = entity_lock.EntityLock(self.zookeeper.handle, decoded_groups, txn)\n \n with lock:\n", "new_path": "AppDB/appscale/datastore/datastore_distributed.py", "old_path": "AppDB/appscale/datastore/datastore_distributed.py" }, { "change_type": "MODIFY", "diff": "@@ -1,6 +1,7 @@\n \"\"\" Generates and keeps track of transaction IDs. \"\"\"\n from __future__ import division\n \n+import json\n import logging\n import time\n \n@@ -14,6 +15,7 @@ from .constants import CONTAINER_PREFIX\n from .constants import COUNTER_NODE_PREFIX\n from .constants import MAX_SEQUENCE_COUNTER\n from .constants import OFFSET_NODE\n+from .entity_lock import zk_group_path\n from ..dbconstants import BadRequest\n from ..dbconstants import InternalError\n \n@@ -103,20 +105,8 @@ class ProjectTransactionManager(object):\n Args:\n txid: An integer specifying a transaction ID.\n \"\"\"\n- corrected_counter = txid - self._txid_manual_offset\n-\n- # The number of counters a container can store (including 0).\n- container_size = MAX_SEQUENCE_COUNTER + 1\n-\n- container_count = int(corrected_counter / container_size) + 1\n- container_suffix = '' if container_count == 1 else str(container_count)\n- container_name = CONTAINER_PREFIX + container_suffix\n- container_path = '/'.join([self._project_node, container_name])\n-\n- counter_value = corrected_counter % container_size\n- node_name = COUNTER_NODE_PREFIX + str(counter_value).zfill(10)\n- full_path = '/'.join([container_path, node_name])\n- self._delete_counter(full_path)\n+ path = self._txid_to_path(txid)\n+ self._delete_counter(path)\n \n def get_open_transactions(self):\n \"\"\" Fetches a list of active transactions.\n@@ -152,6 +142,23 @@ class ProjectTransactionManager(object):\n \n return txids\n \n+ def set_groups(self, txid, groups):\n+ \"\"\" Defines which groups will be involved in a transaction.\n+\n+ Args:\n+ txid: An integer specifying a transaction ID.\n+ groups: An iterable of entity group Reference objects.\n+ \"\"\"\n+ txid_path = self._txid_to_path(txid)\n+ groups_path = '/'.join([txid_path, 'groups'])\n+ encoded_groups = [zk_group_path(group) for group in groups]\n+ try:\n+ self.zk_client.create(groups_path, value=json.dumps(encoded_groups))\n+ except KazooException:\n+ message = 'Unable to set lock list for transaction'\n+ logger.exception(message)\n+ raise InternalError(message)\n+\n def _delete_counter(self, path):\n \"\"\" Removes a counter node.\n \n@@ -159,11 +166,7 @@ class ProjectTransactionManager(object):\n path: A string specifying a ZooKeeper path.\n \"\"\"\n try:\n- try:\n- self.zk_client.delete(path)\n- except NotEmptyError:\n- # Cross-group transaction nodes have a child node.\n- self.zk_client.delete(path, recursive=True)\n+ self.zk_client.delete(path, recursive=True)\n except KazooException:\n # Let the transaction groomer clean it up.\n logger.exception('Unable to delete counter')\n@@ -185,6 +188,28 @@ class ProjectTransactionManager(object):\n for container in all_containers\n if container not in self._inactive_containers)\n \n+ def _txid_to_path(self, txid):\n+ \"\"\" Determines the ZooKeeper path for a given transaction ID.\n+\n+ Args:\n+ txid: An integer specifying a transaction ID.\n+ Returns:\n+ A strings specifying the transaction's ZooKeeper path.\n+ \"\"\"\n+ corrected_counter = txid - self._txid_manual_offset\n+\n+ # The number of counters a container can store (including 0).\n+ container_size = MAX_SEQUENCE_COUNTER + 1\n+\n+ container_count = int(corrected_counter / container_size) + 1\n+ container_suffix = '' if container_count == 1 else str(container_count)\n+ container_name = CONTAINER_PREFIX + container_suffix\n+ container_path = '/'.join([self._project_node, container_name])\n+\n+ counter_value = corrected_counter % container_size\n+ node_name = COUNTER_NODE_PREFIX + str(counter_value).zfill(10)\n+ return '/'.join([container_path, node_name])\n+\n def _update_auto_offset(self):\n \"\"\" Ensures there is a usable sequence container. \"\"\"\n container_name = self._counter_path.split('/')[-1]\n@@ -308,6 +333,21 @@ class TransactionManager(object):\n \n return project_tx_manager.get_open_transactions()\n \n+ def set_groups(self, project_id, txid, groups):\n+ \"\"\" Defines which groups will be involved in a transaction.\n+\n+ Args:\n+ project_id: A string specifying a project ID.\n+ txid: An integer specifying a transaction ID.\n+ groups: An iterable of entity group Reference objects.\n+ \"\"\"\n+ try:\n+ project_tx_manager = self.projects[project_id]\n+ except KeyError:\n+ raise BadRequest('The project {} was not found'.format(project_id))\n+\n+ return project_tx_manager.set_groups(txid, groups)\n+\n def _update_projects_sync(self, new_project_ids):\n \"\"\" Updates the available projects for starting transactions.\n \n", "new_path": "AppDB/appscale/datastore/zkappscale/transaction_manager.py", "old_path": "AppDB/appscale/datastore/zkappscale/transaction_manager.py" }, { "change_type": "MODIFY", "diff": "@@ -371,7 +371,8 @@ class TestDatastoreServer(unittest.TestCase):\n db_batch.should_receive('batch_mutate')\n transaction_manager = flexmock(\n create_transaction_id=lambda project, xg: 1,\n- delete_transaction_id=lambda project, txid: None)\n+ delete_transaction_id=lambda project, txid: None,\n+ set_groups=lambda project, txid, groups: None)\n dd = DatastoreDistributed(db_batch, transaction_manager,\n self.get_zookeeper())\n putreq_pb = datastore_pb.PutRequest()\n@@ -410,7 +411,8 @@ class TestDatastoreServer(unittest.TestCase):\n db_batch.should_receive('batch_mutate')\n transaction_manager = flexmock(\n create_transaction_id=lambda project, xg: 1,\n- delete_transaction_id=lambda project, txid: None)\n+ delete_transaction_id=lambda project, txid: None,\n+ set_groups=lambda project, txid, groups: None)\n dd = DatastoreDistributed(db_batch, transaction_manager,\n self.get_zookeeper())\n \n@@ -718,7 +720,8 @@ class TestDatastoreServer(unittest.TestCase):\n db_batch.should_receive('valid_data_version').and_return(True)\n transaction_manager = flexmock(\n create_transaction_id=lambda project, xg: 1,\n- delete_transaction_id=lambda project, txid: None)\n+ delete_transaction_id=lambda project, txid: None,\n+ set_groups=lambda project_id, txid, groups: None)\n dd = DatastoreDistributed(db_batch, transaction_manager,\n self.get_zookeeper())\n dd.dynamic_delete(\"appid\", del_request)\n@@ -1056,7 +1059,9 @@ class TestDatastoreServer(unittest.TestCase):\n \n db_batch.should_receive('get_indices').and_return([])\n \n- transaction_manager = flexmock()\n+ transaction_manager = flexmock(\n+ delete_transaction_id=lambda project_id, txid: None,\n+ set_groups=lambda project_id, txid, groups: None)\n dd = DatastoreDistributed(db_batch, transaction_manager,\n self.get_zookeeper())\n prefix = dd.get_table_prefix(entity)\n", "new_path": "AppDB/test/unit/test_datastore_server.py", "old_path": "AppDB/test/unit/test_datastore_server.py" } ]
657c74b62a3f37f3c55b454d683f6f076ecdf014
appscale/gts
null
null
Add appscale-admin restart command This gives the ServiceManager a unix socket at /var/run/appscale/service_manager.sock that accepts management requests. This allows the user to immediately restart datastore servers without waiting for the ServiceManager to periodically check and fulfill assignments.
[ { "change_type": "MODIFY", "diff": "@@ -11,6 +11,13 @@ import re\n import sys\n import time\n \n+try:\n+ from urllib import quote as urlquote\n+except ImportError:\n+ from urllib.parse import quote as urlquote\n+\n+import requests_unixsocket\n+\n from appscale.appcontroller_client import AppControllerException\n from appscale.common import appscale_info\n from appscale.common.constants import (\n@@ -35,7 +42,9 @@ from tornado.options import options\n from tornado import web\n from tornado.escape import json_decode\n from tornado.escape import json_encode\n+from tornado.httpserver import HTTPServer\n from tornado.ioloop import IOLoop\n+from tornado.netutil import bind_unix_socket\n from . import utils\n from . import constants\n from .appengine_api import UpdateCronHandler\n@@ -59,7 +68,7 @@ from .operation import (\n )\n from .operations_cache import OperationsCache\n from .push_worker_manager import GlobalPushWorkerManager\n-from .service_manager import ServiceManager\n+from .service_manager import ServiceManager, ServiceManagerHandler\n from .summary import get_combined_services\n \n logger = logging.getLogger('appscale-admin')\n@@ -1201,6 +1210,11 @@ def main():\n \n subparsers.add_parser(\n 'summary', description='Lists AppScale processes running on this machine')\n+ restart_parser = subparsers.add_parser(\n+ 'restart',\n+ description='Restart AppScale processes running on this machine')\n+ restart_parser.add_argument('service', nargs='+',\n+ help='The process or service ID to restart')\n \n args = parser.parse_args()\n if args.command == 'summary':\n@@ -1208,6 +1222,15 @@ def main():\n print(tabulate(table, headers=['Service', 'State']))\n sys.exit(0)\n \n+ if args.command == 'restart':\n+ socket_path = urlquote(ServiceManagerHandler.SOCKET_PATH, safe='')\n+ session = requests_unixsocket.Session()\n+ response = session.post(\n+ 'http+unix://{}/'.format(socket_path),\n+ data={'command': 'restart', 'arg': [args.service]})\n+ response.raise_for_status()\n+ return\n+\n if args.verbose:\n logger.setLevel(logging.DEBUG)\n \n@@ -1259,5 +1282,12 @@ def main():\n ])\n logger.info('Starting AdminServer')\n app.listen(args.port)\n+\n+ management_app = web.Application([\n+ ('/', ServiceManagerHandler, {'service_manager': service_manager})])\n+ management_server = HTTPServer(management_app)\n+ management_socket = bind_unix_socket(ServiceManagerHandler.SOCKET_PATH)\n+ management_server.add_socket(management_socket)\n+\n io_loop = IOLoop.current()\n io_loop.start()\n", "new_path": "AdminServer/appscale/admin/__init__.py", "old_path": "AdminServer/appscale/admin/__init__.py" }, { "change_type": "MODIFY", "diff": "@@ -5,23 +5,28 @@ import json\n import logging\n import os\n import psutil\n+import re\n import socket\n import subprocess\n import time\n \n from psutil import NoSuchProcess\n-from tornado import gen\n+from tornado import gen, web\n from tornado.httpclient import AsyncHTTPClient\n from tornado.ioloop import IOLoop, PeriodicCallback\n from tornado.locks import Lock as AsyncLock\n from tornado.options import options\n \n from appscale.common.async_retrying import retry_data_watch_coroutine\n-from appscale.common.constants import ASSIGNMENTS_PATH, CGROUP_DIR, LOG_DIR\n+from appscale.common.constants import (ASSIGNMENTS_PATH, CGROUP_DIR, HTTPCodes,\n+ LOG_DIR, VAR_DIR)\n \n # The cgroup used to start datastore server processes.\n DATASTORE_CGROUP = ['memory', 'appscale-datastore']\n \n+# The characters allowed in a service identifier (eg. datastore)\n+SERVICE_ID_CHARS = '[a-z_]'\n+\n logger = logging.getLogger('appscale-admin')\n \n \n@@ -40,6 +45,11 @@ class ServerStates(object):\n STOPPING = 'stopping'\n \n \n+class BadRequest(Exception):\n+ \"\"\" Indicates a problem with the client request. \"\"\"\n+ pass\n+\n+\n class ProcessStopped(Exception):\n \"\"\" Indicates that the server process is no longer running. \"\"\"\n pass\n@@ -68,6 +78,14 @@ class Server(object):\n self.state = ServerStates.NEW\n self.type = service_type\n \n+ @gen.coroutine\n+ def ensure_running(self):\n+ raise NotImplementedError()\n+\n+ @gen.coroutine\n+ def restart(self):\n+ raise NotImplementedError()\n+\n @gen.coroutine\n def start(self):\n raise NotImplementedError()\n@@ -140,6 +158,11 @@ class DatastoreServer(Server):\n server.state = ServerStates.RUNNING\n return server\n \n+ @gen.coroutine\n+ def restart(self):\n+ yield self.stop()\n+ yield self.start()\n+\n @gen.coroutine\n def start(self):\n \"\"\" Starts a new datastore server. \"\"\"\n@@ -319,6 +342,28 @@ class ServiceManager(object):\n PeriodicCallback(self._groom_servers,\n self.GROOMING_INTERVAL * 1000).start()\n \n+ @gen.coroutine\n+ def restart_service(self, service_id):\n+ if service_id not in self.SERVICE_MAP:\n+ raise BadRequest('Unrecognized service: {}'.format(service_id))\n+\n+ logger.info('Restarting {} servers'.format(service_id))\n+ yield [server.restart() for server in self.state\n+ if server.type == service_id]\n+\n+ @gen.coroutine\n+ def restart_server(self, service_id, port):\n+ if service_id not in self.SERVICE_MAP:\n+ raise BadRequest('Unrecognized service: {}'.format(service_id))\n+\n+ try:\n+ server = next(server for server in self.state\n+ if server.type == service_id and server.port == port)\n+ except StopIteration:\n+ raise BadRequest('Server not found')\n+\n+ yield server.restart()\n+\n @gen.coroutine\n def _groom_servers(self):\n \"\"\" Forgets about outdated servers and fulfills assignments. \"\"\"\n@@ -413,3 +458,51 @@ class ServiceManager(object):\n assignments = json.loads(encoded_assignments) if encoded_assignments else {}\n \n IOLoop.instance().add_callback(persistent_update_services, assignments)\n+\n+\n+class ServiceManagerHandler(web.RequestHandler):\n+ # The unix socket to use for receiving management requests.\n+ SOCKET_PATH = os.path.join(VAR_DIR, 'service_manager.sock')\n+\n+ # An expression that matches server instances.\n+ SERVER_RE = re.compile(r'^({}+)-(\\d+)$'.format(SERVICE_ID_CHARS))\n+\n+ # An expression that matches service IDs.\n+ SERVICE_RE = re.compile('^{}+$'.format(SERVICE_ID_CHARS))\n+\n+ def initialize(self, service_manager):\n+ \"\"\" Defines required resources to handle requests.\n+\n+ Args:\n+ service_manager: A ServiceManager object.\n+ \"\"\"\n+ self._service_manager = service_manager\n+\n+ @gen.coroutine\n+ def post(self):\n+ command = self.get_argument('command')\n+ if command != 'restart':\n+ raise web.HTTPError(HTTPCodes.BAD_REQUEST,\n+ '\"restart\" is the only supported command')\n+\n+ args = self.get_arguments('arg')\n+ for arg in args:\n+ match = self.SERVER_RE.match(arg)\n+ if match:\n+ service_id = match.group(1)\n+ port = int(match.group(2))\n+ try:\n+ yield self._service_manager.restart_server(service_id, port)\n+ return\n+ except BadRequest as error:\n+ raise web.HTTPError(HTTPCodes.BAD_REQUEST, str(error))\n+\n+ if self.SERVICE_RE.match(arg):\n+ try:\n+ yield self._service_manager.restart_service(arg)\n+ return\n+ except BadRequest as error:\n+ raise web.HTTPError(HTTPCodes.BAD_REQUEST, str(error))\n+\n+ raise web.HTTPError(HTTPCodes.BAD_REQUEST,\n+ 'Unrecognized argument: {}'.format(arg))\n", "new_path": "AdminServer/appscale/admin/service_manager.py", "old_path": "AdminServer/appscale/admin/service_manager.py" }, { "change_type": "MODIFY", "diff": "@@ -7,6 +7,7 @@ install_requires = [\n 'kazoo',\n 'psutil',\n 'PyYaml',\n+ 'requests-unixsocket',\n 'SOAPpy',\n 'tabulate',\n 'tornado',\n", "new_path": "AdminServer/setup.py", "old_path": "AdminServer/setup.py" } ]
c55490110eb9ef0a8ef439776d04fdd42482bb62
appscale/gts
null
null
Replace python-memcache with pymemcache This adds connection pooling so that the client does not need to establish a new connection for every operation.
[ { "change_type": "MODIFY", "diff": "@@ -16,16 +16,17 @@\n #\n \n \"\"\" Non-stub version of the memcache API, keeping all data in memcached.\n-Uses the python-memcached library to interface with memcached.\n+Uses the pymemcache library to interface with memcached.\n \"\"\"\n import base64\n import cPickle\n import logging\n import hashlib\n-import memcache\n import os\n import time\n \n+from pymemcache.client.hash import HashClient\n+\n from google.appengine.api import apiproxy_stub\n from google.appengine.api.memcache import memcache_service_pb\n from google.appengine.runtime import apiproxy_errors\n@@ -46,7 +47,7 @@ class MemcacheService(apiproxy_stub.APIProxyStub):\n This service keeps all data in any external servers running memcached.\n \"\"\"\n # The memcached default port.\n- MEMCACHE_PORT = \"11211\"\n+ MEMCACHE_PORT = 11211\n \n # An AppScale file which has a list of IPs running memcached.\n APPSCALE_MEMCACHE_FILE = \"/etc/appscale/memcache_ips\"\n@@ -70,9 +71,10 @@ class MemcacheService(apiproxy_stub.APIProxyStub):\n else:\n all_ips = ['localhost']\n \n- memcaches = [ip + \":\" + self.MEMCACHE_PORT for ip in all_ips if ip != '']\n+ memcaches = [(ip, self.MEMCACHE_PORT) for ip in all_ips if ip]\n memcaches.sort() \n- self._memcache = memcache.Client(memcaches, debug=0)\n+ self._memcache = HashClient(memcaches, connect_timeout=5, timeout=1,\n+ use_pooling=True)\n \n def _Dynamic_Get(self, request, response):\n \"\"\"Implementation of gets for memcache.\n@@ -176,19 +178,26 @@ class MemcacheService(apiproxy_stub.APIProxyStub):\n cas_id = 0\n \n key = self._GetKey(namespace, request.key())\n- value = self._memcache.get(key)\n- if value is None:\n- if not request.has_initial_value():\n- return None\n+ value, real_cas_id = self._memcache.gets(key)\n+ if value is None and not request.has_initial_value():\n+ return\n \n+ if value is None:\n flags = TYPE_INT\n if request.has_initial_flags():\n flags = request.initial_flags()\n \n- stored_value = str(request.initial_value())\n- else:\n- flags, cas_id, stored_value = cPickle.loads(value)\n+ initial_value = cPickle.dumps(\n+ [flags, cas_id, str(request.initial_value())])\n+ success = self._memcache.add(key, initial_value)\n+ if not success:\n+ return\n+\n+ value, real_cas_id = self._memcache.gets(key)\n+ if value is None:\n+ return\n \n+ flags, cas_id, stored_value = cPickle.loads(value)\n if flags == TYPE_INT:\n new_value = int(stored_value)\n elif flags == TYPE_LONG:\n@@ -201,7 +210,7 @@ class MemcacheService(apiproxy_stub.APIProxyStub):\n \n new_stored_value = cPickle.dumps([flags, cas_id + 1, str(new_value)])\n try:\n- self._memcache.cas(key, new_stored_value)\n+ self._memcache.cas(key, new_stored_value, real_cas_id)\n except Exception, e:\n logging.error(str(e))\n return None\n@@ -271,7 +280,7 @@ class MemcacheService(apiproxy_stub.APIProxyStub):\n logging.warn(\"No stats for key '%s'.\" % key) \n return _type(stats_dict.get(key, '0'))\n \n- for server, server_stats in self._memcache.get_stats():\n+ for server, server_stats in self._memcache.stats():\n num_servers += 1\n hits_total += get_stats_value(server_stats, 'get_hits')\n misses_total += get_stats_value(server_stats, 'get_misses')\n", "new_path": "AppServer/google/appengine/api/memcache/memcache_distributed.py", "old_path": "AppServer/google/appengine/api/memcache/memcache_distributed.py" }, { "change_type": "MODIFY", "diff": "@@ -41,6 +41,7 @@ case \"$1\" in\n installtornado\n installpycrypto\n installpycapnp\n+ installpymemcache\n installpyyaml\n installsoappy\n installzookeeper\n", "new_path": "debian/appscale_install.sh", "old_path": "debian/appscale_install.sh" }, { "change_type": "MODIFY", "diff": "@@ -563,6 +563,11 @@ installpycapnp()\n pipwrapper pycapnp\n }\n \n+installpymemcache()\n+{\n+ pipwrapper pymemcache\n+}\n+\n installpyyaml()\n {\n # The python-yaml package on Xenial uses over 30M of memory.\n", "new_path": "debian/appscale_install_functions.sh", "old_path": "debian/appscale_install_functions.sh" }, { "change_type": "MODIFY", "diff": "@@ -71,7 +71,6 @@ Depends: appscale-tools,\n python-flexmock,\n python-lxml,\n python-m2crypto,\n- python-memcache,\n python-mysqldb,\n python-numpy,\n python-openssl,\n", "new_path": "debian/control.bionic", "old_path": "debian/control.bionic" }, { "change_type": "MODIFY", "diff": "@@ -67,7 +67,6 @@ Depends: appscale-tools,\n python-imaging,\n python-lxml,\n python-m2crypto,\n- python-memcache,\n python-mysqldb,\n python-numpy,\n python-openssl,\n", "new_path": "debian/control.jessie", "old_path": "debian/control.jessie" }, { "change_type": "MODIFY", "diff": "@@ -72,7 +72,6 @@ Depends: appscale-tools,\n python-imaging,\n python-lxml,\n python-m2crypto,\n- python-memcache,\n python-mysqldb,\n python-numpy,\n python-openssl,\n", "new_path": "debian/control.stretch", "old_path": "debian/control.stretch" }, { "change_type": "MODIFY", "diff": "@@ -70,7 +70,6 @@ Depends: appscale-tools,\n python-imaging,\n python-lxml,\n python-m2crypto,\n- python-memcache,\n python-mysqldb,\n python-numpy,\n python-openssl,\n", "new_path": "debian/control.trusty", "old_path": "debian/control.trusty" }, { "change_type": "MODIFY", "diff": "@@ -72,7 +72,6 @@ Depends: appscale-tools,\n python-imaging,\n python-lxml,\n python-m2crypto,\n- python-memcache,\n python-mysqldb,\n python-numpy,\n python-openssl,\n", "new_path": "debian/control.xenial", "old_path": "debian/control.xenial" } ]
421b9059a22a90e5d7b5524fbef5278c09c07b73
appscale/gts
null
null
Use memcached's native incr and decr operations This simplifies the process of incrementing and decrementing values.
[ { "change_type": "MODIFY", "diff": "@@ -39,8 +39,6 @@ MemcacheIncrementRequest = memcache_service_pb.MemcacheIncrementRequest\n MemcacheIncrementResponse = memcache_service_pb.MemcacheIncrementResponse\n MemcacheDeleteResponse = memcache_service_pb.MemcacheDeleteResponse\n \n-from google.appengine.api.memcache import TYPE_INT\n-from google.appengine.api.memcache import TYPE_LONG\n from google.appengine.api.memcache import MAX_KEY_SIZE\n \n # Exceptions that indicate a temporary issue with the backend.\n@@ -49,6 +47,12 @@ TRANSIENT_ERRORS = (MemcacheError, socket.error, socket.timeout)\n INVALID_VALUE = memcache_service_pb.MemcacheServiceError.INVALID_VALUE\n UNSPECIFIED_ERROR = memcache_service_pb.MemcacheServiceError.UNSPECIFIED_ERROR\n \n+# The maximum value that memcached will increment to before wrapping around.\n+MAX_INCR = 2 ** 64 - 1\n+\n+# The minimum value that memcached will decrement to.\n+MIN_DECR = 0\n+\n \n def serializer(key, value_and_flags):\n return value_and_flags[0], value_and_flags[1]\n@@ -206,52 +210,53 @@ class MemcacheService(apiproxy_stub.APIProxyStub):\n request: A MemcacheIncrementRequest instance.\n \n Returns:\n- An integer or long if the offset was successful, None on error.\n+ An integer indicating the new value.\n+ Raises:\n+ ApplicationError if unable to perform the mutation.\n \"\"\"\n- if not request.delta():\n- return None\n-\n- key = self._GetKey(namespace, request.key())\n- value_tuple, cas_id = self._memcache.gets(key)\n- if value_tuple is None and not request.has_initial_value():\n- return\n-\n- if value_tuple is None:\n- flags = TYPE_INT\n- if request.has_initial_flags():\n- flags = request.initial_flags()\n-\n- initial_value = (request.initial_value(), flags)\n- success = self._memcache.add(key, initial_value)\n- if not success:\n- return\n-\n- value_tuple, cas_id = self._memcache.gets(key)\n- if value_tuple is None:\n- return\n-\n- value, flags = value_tuple\n- if flags == TYPE_INT:\n- new_value = int(value)\n- elif flags == TYPE_LONG:\n- new_value = long(value)\n+ encoded_key = self._GetKey(namespace, request.key())\n+ method = self._memcache.incr\n+ if request.direction() == MemcacheIncrementRequest.DECREMENT:\n+ method = self._memcache.decr\n+\n+ try:\n+ response = method(encoded_key, request.delta())\n+ except MemcacheClientError as error:\n+ raise apiproxy_errors.ApplicationError(INVALID_VALUE, str(error))\n+ except TRANSIENT_ERRORS as error:\n+ raise apiproxy_errors.ApplicationError(\n+ UNSPECIFIED_ERROR, 'Transient memcache error: {}'.format(error))\n+\n+ if response is None and not request.has_initial_value():\n+ raise apiproxy_errors.ApplicationError(\n+ UNSPECIFIED_ERROR, 'Key does not exist')\n+\n+ if response is not None:\n+ return response\n+\n+ # If the key was not present and an initial value was provided, perform\n+ # the mutation client-side and set the key if it still doesn't exist.\n+ flags = 0\n+ if request.has_initial_flags():\n+ flags = request.initial_flags()\n \n if request.direction() == MemcacheIncrementRequest.INCREMENT:\n- new_value += request.delta()\n- elif request.direction() == MemcacheIncrementRequest.DECREMENT:\n- new_value = max(new_value-request.delta(), 0)\n+ updated_val = request.initial_value() + request.delta()\n+ else:\n+ updated_val = request.initial_value() - request.delta()\n \n- new_stored_value = (str(new_value), flags)\n+ updated_val = max(updated_val, 0) % (MAX_INCR + 1)\n try:\n- response = self._memcache.cas(key, new_stored_value, cas_id)\n- except (TRANSIENT_ERRORS + (MemcacheClientError,)) as error:\n- logging.error(str(error))\n- return None\n+ response = self._memcache.add(encoded_key, (str(updated_val), flags))\n+ except (TRANSIENT_ERRORS + (MemcacheClientError,)):\n+ raise apiproxy_errors.ApplicationError(\n+ UNSPECIFIED_ERROR, 'Unable to set initial value')\n \n- if not response:\n- return\n+ if response is False:\n+ raise apiproxy_errors.ApplicationError(\n+ UNSPECIFIED_ERROR, 'Unable to set initial value')\n \n- return new_value\n+ return updated_val\n \n def _Dynamic_Increment(self, request, response):\n \"\"\"Implementation of increment for memcache.\n@@ -261,12 +266,8 @@ class MemcacheService(apiproxy_stub.APIProxyStub):\n response: A MemcacheIncrementResponse protocol buffer.\n \"\"\"\n new_value = self._Increment(request.name_space(), request)\n- if new_value is None:\n- raise apiproxy_errors.ApplicationError(\n- memcache_service_pb.MemcacheServiceError.UNSPECIFIED_ERROR)\n response.set_new_value(new_value)\n \n-\n def _Dynamic_BatchIncrement(self, request, response):\n \"\"\"Implementation of batch increment for memcache.\n \n@@ -274,15 +275,20 @@ class MemcacheService(apiproxy_stub.APIProxyStub):\n request: A MemcacheBatchIncrementRequest protocol buffer.\n response: A MemcacheBatchIncrementResponse protocol buffer.\n \"\"\"\n- namespace = request.name_space()\n for request_item in request.item_list():\n- new_value = self._Increment(namespace, request_item)\n item = response.add_item()\n- if new_value is None:\n- item.set_increment_status(MemcacheIncrementResponse.NOT_CHANGED)\n- else:\n- item.set_increment_status(MemcacheIncrementResponse.OK)\n- item.set_new_value(new_value)\n+ try:\n+ new_value = self._Increment(request.name_space(), request_item)\n+ except apiproxy_errors.ApplicationError as error:\n+ if error.application_error == INVALID_VALUE:\n+ item.set_increment_status(MemcacheIncrementResponse.NOT_CHANGED)\n+ else:\n+ item.set_increment_status(MemcacheIncrementResponse.ERROR)\n+\n+ continue\n+\n+ item.set_increment_status(MemcacheIncrementResponse.OK)\n+ item.set_new_value(new_value)\n \n def _Dynamic_FlushAll(self, request, response):\n \"\"\"Implementation of MemcacheService::FlushAll().\n", "new_path": "AppServer/google/appengine/api/memcache/memcache_distributed.py", "old_path": "AppServer/google/appengine/api/memcache/memcache_distributed.py" } ]
6b41cdad4023a21c21dbb78f9bacfbfe5bcf9e8f
appscale/gts
null
null
Add service account name field to backup form This allows users to schedule a mapreduce-backed job with a custom service account name for backing up to an arbitrary GCS account.
[ { "change_type": "MODIFY", "diff": "@@ -675,7 +675,8 @@ def _perform_backup(run_as_a_service, kinds, selected_namespace,\n \n if not gcs_path_prefix:\n raise BackupValidationError('GCS path missing.')\n- bucket_name, path_prefix = validate_and_split_gcs_path(gcs_path_prefix)\n+ bucket_name, path_prefix = validate_and_split_gcs_path(\n+ gcs_path_prefix, mapper_params['account_id'])\n mapper_params['gs_bucket_name'] = (\n '%s/%s' % (bucket_name, path_prefix)).rstrip('/')\n naming_format = '$name/$id/output-$num'\n@@ -809,6 +810,12 @@ class DoBackupHandler(BaseDoHandler):\n if BackupInformation.name_exists(backup):\n raise BackupValidationError('Backup \"%s\" already exists.' % backup)\n mapper_params = _get_basic_mapper_params(self)\n+\n+ # AppScale: Use custom service account if specified.\n+ account_id = self.request.get('service_account_name', None)\n+ mapper_params['account_id'] = account_id\n+ mapper_params['tmp_account_id'] = account_id\n+\n backup_result = _perform_backup(\n self.request.get('run_as_a_service', False),\n self.request.get_all('kind'),\n@@ -1253,12 +1260,14 @@ def BackupCompleteHandler(operation, job_id, mapreduce_state):\n mapreduce_spec.params['backup_info_pk'],\n _get_gcs_path_prefix_from_params_dict(mapreduce_spec.mapper.params),\n filenames,\n- mapreduce_spec.params.get('done_callback_queue'))\n+ mapreduce_spec.params.get('done_callback_queue'),\n+ mapreduce_spec.mapper.params['output_writer']['account_id'])\n \n \n @db.transactional\n def _perform_backup_complete(\n- operation, job_id, kind, backup_info_pk, gcs_path_prefix, filenames, queue):\n+ operation, job_id, kind, backup_info_pk, gcs_path_prefix, filenames, queue,\n+ account_id=None):\n backup_info = BackupInformation.get(backup_info_pk)\n if backup_info:\n if job_id in backup_info.active_jobs:\n@@ -1277,6 +1286,7 @@ def _perform_backup_complete(\n if operation.status == utils.DatastoreAdminOperation.STATUS_COMPLETED:\n deferred.defer(finalize_backup_info, backup_info.key(),\n gcs_path_prefix,\n+ account_id,\n _url=config.DEFERRED_PATH,\n _queue=queue,\n _transactional=True)\n@@ -1284,7 +1294,7 @@ def _perform_backup_complete(\n logging.warn('BackupInfo was not found for %s', backup_info_pk)\n \n \n-def finalize_backup_info(backup_info_pk, gcs_path_prefix):\n+def finalize_backup_info(backup_info_pk, gcs_path_prefix, account_id=None):\n \"\"\"Finalize the state of BackupInformation and creates info file for GS.\"\"\"\n \n def get_backup_info():\n@@ -1301,7 +1311,8 @@ def finalize_backup_info(backup_info_pk, gcs_path_prefix):\n \n \n \n- gs_handle = BackupInfoWriter(gcs_path_prefix).write(backup_info)[0]\n+ backup_info_writer = BackupInfoWriter(gcs_path_prefix, account_id)\n+ gs_handle = backup_info_writer.write(backup_info)[0]\n \n def set_backup_info_with_finalize_info():\n backup_info = get_backup_info()\n@@ -1326,13 +1337,14 @@ def parse_backup_info_file(content):\n class BackupInfoWriter(object):\n \"\"\"A class for writing Datastore backup metadata files.\"\"\"\n \n- def __init__(self, gcs_path_prefix):\n+ def __init__(self, gcs_path_prefix, account_id=None):\n \"\"\"Construct a BackupInfoWriter.\n \n Args:\n gcs_path_prefix: (string) gcs prefix used for creating the backup.\n \"\"\"\n self.__gcs_path_prefix = gcs_path_prefix\n+ self._account_id = account_id\n \n def write(self, backup_info):\n \"\"\"Write the metadata files for the given backup_info.\n@@ -1364,7 +1376,7 @@ class BackupInfoWriter(object):\n \"\"\"\n filename = self._generate_filename(backup_info, '.backup_info')\n backup_info.gs_handle = filename\n- with GCSUtil.open(filename, 'w') as info_file:\n+ with GCSUtil.open(filename, 'w', _account_id=self._account_id) as info_file:\n with records.RecordsWriter(info_file) as writer:\n \n writer.write('1')\n@@ -1397,7 +1409,7 @@ class BackupInfoWriter(object):\n backup = self._create_kind_backup(backup_info, kind_backup_files)\n filename = self._generate_filename(\n backup_info, '.%s.backup_info' % kind_backup_files.backup_kind)\n- self._write_kind_backup_info_file(filename, backup)\n+ self._write_kind_backup_info_file(filename, backup, self._account_id)\n filenames.append(filename)\n return filenames\n \n@@ -1425,14 +1437,14 @@ class BackupInfoWriter(object):\n return backup\n \n @classmethod\n- def _write_kind_backup_info_file(cls, filename, backup):\n+ def _write_kind_backup_info_file(cls, filename, backup, account_id=None):\n \"\"\"Writes a kind backup_info.\n \n Args:\n filename: The name of the file to be created as string.\n backup: apphosting.ext.datastore_admin.Backup proto.\n \"\"\"\n- with GCSUtil.open(filename, 'w') as f:\n+ with GCSUtil.open(filename, 'w', _account_id=account_id) as f:\n f.write(backup.SerializeToString())\n \n \n@@ -1948,7 +1960,7 @@ def is_accessible_bucket_name(bucket_name):\n return result and result.status_code == 200\n \n \n-def verify_bucket_writable(bucket_name):\n+def verify_bucket_writable(bucket_name, account_id=None):\n \"\"\"Verify the application can write to the specified bucket.\n \n Args:\n@@ -1959,7 +1971,8 @@ def verify_bucket_writable(bucket_name):\n \"\"\"\n path = '/gs/%s/%s' % (bucket_name, TEST_WRITE_FILENAME_PREFIX)\n try:\n- gcs_stats = GCSUtil.listbucket(path, max_keys=MAX_KEYS_LIST_SIZE)\n+ gcs_stats = GCSUtil.listbucket(path, max_keys=MAX_KEYS_LIST_SIZE,\n+ _account_id=account_id)\n file_names = [f.filename for f in gcs_stats]\n except (cloudstorage.AuthorizationError, cloudstorage.ForbiddenError):\n raise BackupValidationError('Bucket \"%s\" not accessible' % bucket_name)\n@@ -1981,12 +1994,12 @@ def verify_bucket_writable(bucket_name):\n (bucket_name, TEST_WRITE_FILENAME_PREFIX, gen))\n file_name_try += 1\n try:\n- with GCSUtil.open(file_name, 'w') as f:\n+ with GCSUtil.open(file_name, 'w', _account_id=account_id) as f:\n f.write('test')\n except cloudstorage.ForbiddenError:\n raise BackupValidationError('Bucket \"%s\" is not writable' % bucket_name)\n try:\n- GCSUtil.delete(file_name)\n+ GCSUtil.delete(file_name, _account_id=account_id)\n except cloudstorage.Error:\n logging.warn('Failed to delete test file %s', file_name)\n \n@@ -2016,11 +2029,11 @@ def parse_gs_handle(gs_handle):\n return (tokens[0], '') if len(tokens) == 1 else tuple(tokens)\n \n \n-def validate_and_split_gcs_path(gcs_path):\n+def validate_and_split_gcs_path(gcs_path, account_id=None):\n bucket_name, path = parse_gs_handle(gcs_path)\n path = path.rstrip('/')\n validate_gcs_bucket_name(bucket_name)\n- verify_bucket_writable(bucket_name)\n+ verify_bucket_writable(bucket_name, account_id)\n return bucket_name, path\n \n \n", "new_path": "AppServer/google/appengine/ext/datastore_admin/backup_handler.py", "old_path": "AppServer/google/appengine/ext/datastore_admin/backup_handler.py" }, { "change_type": "MODIFY", "diff": "@@ -93,6 +93,14 @@\n <input type=\"text\" id=\"gs_bucket_name\" name=\"gs_bucket_name\" value=\"\" />\n </td>\n </tr>\n+ <tr id=\"gs_service_account_tr\">\n+ <td>\n+ Service account name\n+ <img class=\"ae-help-icon\" src=\"{{ base_path }}/static/img/help.gif\" height=\"14\" width=\"14\" alt=\"help\"\n+ title=\"Use the client_email field from any custom service accounts you've defined.\">\n+ <input name=\"service_account_name\">\n+ </td>\n+ </tr>\n </table>\n \n <table style=\"margin-top: 1em;\"><tr>\n", "new_path": "AppServer/google/appengine/ext/datastore_admin/templates/confirm_backup.html", "old_path": "AppServer/google/appengine/ext/datastore_admin/templates/confirm_backup.html" } ]
d13328c07464a4be86101a345e32760d0557389e
dedupeio/dedupe
null
null
Move project metadata to pyproject.toml It's encourage for project to define their metadata statically. Makes static analyzers and other tooling work better.
[ { "change_type": "MODIFY", "diff": "@@ -1,7 +1,63 @@\n+[project]\n+name = \"dedupe\"\n+description = \"A python library for accurate and scaleable data deduplication and entity-resolution\"\n+version = \"2.0.17\"\n+readme = \"README.md\"\n+requires-python = \">=3.7\"\n+license = {file = \"LICENSE\"}\n+keywords = []\n+authors = [\n+ { name = \"Forest Gregg\", email = \"fgregg@datamade.us\" },\n+]\n+classifiers = [\n+ \"Development Status :: 4 - Beta\",\n+ \"Intended Audience :: Developers\",\n+ \"Intended Audience :: Science/Research\",\n+ \"License :: OSI Approved :: MIT License\",\n+ \"Natural Language :: English\",\n+ \"Operating System :: MacOS :: MacOS X\",\n+ \"Operating System :: Microsoft :: Windows\",\n+ \"Operating System :: POSIX\",\n+ \"Programming Language :: Cython\",\n+ \"Programming Language :: Python :: 3\",\n+ \"Topic :: Software Development :: Libraries :: Python Modules\",\n+ \"Topic :: Scientific/Engineering\",\n+ \"Topic :: Scientific/Engineering :: Information Analysis\",\n+]\n+dependencies = [\n+ \"scikit-learn\",\n+ \"affinegap>=1.3\",\n+ \"categorical-distance>=1.9\",\n+ \"dedupe-variable-datetime\",\n+ \"numpy>=1.13\",\n+ \"doublemetaphone\",\n+ \"highered>=0.2.0\",\n+ \"simplecosine>=1.2\",\n+ \"haversine>=0.4.1\",\n+ \"BTrees>=4.1.4\",\n+ \"zope.index\",\n+ \"Levenshtein_search==1.4.5\",\n+ \"typing_extensions\",\n+]\n+\n+[project.urls]\n+Homepage = \"https://github.com/dedupeio/dedupe\"\n+Issues = \"https://github.com/dedupeio/dedupe/issues\"\n+Documentation = \"https://docs.dedupe.io/en/latest/\"\n+Examples = \"https://github.com/dedupeio/dedupe-examples\"\n+Twitter = \"https://twitter.com/DedupeIo\"\n+Changelog = \"https://github.com/dedupeio/dedupe/blob/main/CHANGELOG.md\"\n+MailingList = \"https://groups.google.com/forum/#!forum/open-source-deduplication\"\n+\n+\n [build-system]\n-requires = [\"setuptools\",\n+requires = [\"setuptools>=64\",\n \"wheel\",\n \"cython\"]\n+build-backend = \"setuptools.build_meta\"\n+\n+[tool.setuptools]\n+packages = [\"dedupe\", \"dedupe.variables\"]\n \n [tool.mypy]\n plugins = \"numpy.typing.mypy_plugin\"\n", "new_path": "pyproject.toml", "old_path": "pyproject.toml" }, { "change_type": "MODIFY", "diff": "@@ -1,6 +1,3 @@\n-#!/usr/bin/python\n-# -*- coding: utf-8 -*-\n-\n try:\n from setuptools import Extension, setup\n except ImportError:\n@@ -10,68 +7,7 @@ except ImportError:\n \n from Cython.Build import cythonize\n \n-install_requires = [\n- \"scikit-learn\",\n- \"affinegap>=1.3\",\n- \"categorical-distance>=1.9\",\n- \"dedupe-variable-datetime\",\n- \"numpy>=1.13\",\n- \"doublemetaphone\",\n- \"highered>=0.2.0\",\n- \"simplecosine>=1.2\",\n- \"haversine>=0.4.1\",\n- \"BTrees>=4.1.4\",\n- \"zope.index\",\n- \"Levenshtein_search==1.4.5\",\n- \"typing_extensions\",\n-]\n-\n \n setup(\n- name=\"dedupe\",\n- url=\"https://github.com/dedupeio/dedupe\",\n- version=\"2.0.17\",\n- author=\"Forest Gregg\",\n- author_email=\"fgregg@datamade.us\",\n- description=\"A python library for accurate and scaleable data deduplication and entity-resolution\",\n- packages=[\"dedupe\", \"dedupe.variables\"],\n- ext_modules=cythonize(\n- [Extension(\"dedupe.cpredicates\", [\"dedupe/cpredicates.pyx\"])]\n- ),\n- install_requires=install_requires,\n- python_requires=\">=3.7\",\n- classifiers=[\n- \"Development Status :: 4 - Beta\",\n- \"Intended Audience :: Developers\",\n- \"Intended Audience :: Science/Research\",\n- \"License :: OSI Approved :: MIT License\",\n- \"Natural Language :: English\",\n- \"Operating System :: MacOS :: MacOS X\",\n- \"Operating System :: Microsoft :: Windows\",\n- \"Operating System :: POSIX\",\n- \"Programming Language :: Cython\",\n- \"Programming Language :: Python :: 3\",\n- \"Topic :: Software Development :: Libraries :: Python Modules\",\n- \"Topic :: Scientific/Engineering\",\n- \"Topic :: Scientific/Engineering :: Information Analysis\",\n- ],\n- long_description=\"\"\"\n- dedupe is a library that uses machine learning to perform de-duplication and entity resolution quickly on structured data. dedupe is the open source engine for `dedupe.io <https://dedupe.io>`_\n-\n- **dedupe** will help you:\n-\n- * **remove duplicate entries** from a spreadsheet of names and addresses\n- * **link a list** with customer information to another with order history, even without unique customer id's\n- * take a database of campaign contributions and **figure out which ones were made by the same person**, even if the names were entered slightly differently for each record\n-\n- dedupe takes in human training data and comes up with the best rules for your dataset to quickly and automatically find similar records, even with very large databases.\n- \"\"\", # noqa: E501\n- project_urls={\n- \"Issues\": \"https://github.com/dedupeio/dedupe/issues\",\n- \"Documentation\": \"https://docs.dedupe.io/en/latest/\",\n- \"Examples\": \"https://github.com/dedupeio/dedupe-examples\",\n- \"Twitter\": \"https://twitter.com/DedupeIo\",\n- \"Changelog\": \"https://github.com/dedupeio/dedupe/blob/main/CHANGELOG.md\",\n- \"Mailing List\": \"https://groups.google.com/forum/#!forum/open-source-deduplication\",\n- },\n+ ext_modules=cythonize([Extension(\"dedupe.cpredicates\", [\"dedupe/cpredicates.pyx\"])])\n )\n", "new_path": "setup.py", "old_path": "setup.py" } ]
5ab79220d5e51bb1a13e40c82d2337b05b8e5967
sentinel-hub/eo-learn
null
null
Fix for logging when running multiple threads during execution. When running in multithreading mode, the log files for single eopatches would also log the information for the eopatches running in different threads. Adding the filter that filters messages based on the thread name fixes this problem.
[ { "change_type": "MODIFY", "diff": "@@ -16,6 +16,7 @@ file in the root directory of this source tree.\n \n import os\n import logging\n+import threading\n import traceback\n import concurrent.futures\n import datetime as dt\n@@ -24,6 +25,8 @@ from tqdm.auto import tqdm\n \n from .eoworkflow import EOWorkflow\n \n+from .utilities import LogFileFilter\n+\n LOGGER = logging.getLogger(__name__)\n \n \n@@ -149,15 +152,14 @@ class EOExecutor:\n \"\"\" Handles a single execution of a workflow\n \"\"\"\n workflow, input_args, log_path, return_results = process_args\n-\n- if log_path:\n- logger = logging.getLogger()\n- logger.setLevel(logging.DEBUG)\n- handler = cls._get_log_handler(log_path)\n- logger.addHandler(handler)\n-\n stats = {cls.STATS_START_TIME: dt.datetime.now()}\n try:\n+ if log_path:\n+ logger = logging.getLogger()\n+ logger.setLevel(logging.DEBUG)\n+ handler = cls._get_log_handler(log_path)\n+ logger.addHandler(handler)\n+\n results = workflow.execute(input_args, monitor=True)\n \n if return_results:\n@@ -169,6 +171,7 @@ class EOExecutor:\n stats[cls.STATS_END_TIME] = dt.datetime.now()\n \n if log_path:\n+ logger.info(msg='Pipeline failed.' if cls.STATS_ERROR else 'Pipeline finished.')\n handler.close()\n logger.removeHandler(handler)\n \n@@ -181,6 +184,7 @@ class EOExecutor:\n handler = logging.FileHandler(log_path)\n formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')\n handler.setFormatter(formatter)\n+ handler.addFilter(LogFileFilter(thread_name=threading.currentThread().getName()))\n \n return handler\n \n", "new_path": "core/eolearn/core/eoexecution.py", "old_path": "core/eolearn/core/eoexecution.py" }, { "change_type": "MODIFY", "diff": "@@ -13,6 +13,7 @@ file in the root directory of this source tree.\n \n import logging\n from collections import OrderedDict\n+from logging import Filter\n \n import numpy as np\n \n@@ -21,6 +22,20 @@ from .constants import FeatureType\n LOGGER = logging.getLogger(__name__)\n \n \n+class LogFileFilter(Filter):\n+ \"\"\" Filters log messages passed to log file\n+ \"\"\"\n+\n+ def __init__(self, thread_name, name=''):\n+ self.thread_name = thread_name\n+ super().__init__(name=name)\n+\n+ def filter(self, record):\n+ \"\"\" Shows everything from the thread that it was initialized in.\n+ \"\"\"\n+ return record.threadName == self.thread_name\n+\n+\n class FeatureParser:\n \"\"\" Takes a collection of features structured in a various ways and parses them into one way. It can parse features\n straight away or it can parse them only if they exist in a given `EOPatch`. If input format is not recognized or\n", "new_path": "core/eolearn/core/utilities.py", "old_path": "core/eolearn/core/utilities.py" }, { "change_type": "MODIFY", "diff": "@@ -73,6 +73,44 @@ class TestEOExecutor(unittest.TestCase):\n for name, log_filename in zip(execution_names, log_filenames):\n self.assertTrue(log_filename == 'eoexecution-{}.log'.format(name))\n \n+ def test_execution_logs_multiprocess(self):\n+ for execution_names in [None, [4, 'x', 'y', 'z']]:\n+ with tempfile.TemporaryDirectory() as tmp_dir_name:\n+ executor = EOExecutor(self.workflow, self.execution_args, save_logs=True,\n+ logs_folder=tmp_dir_name,\n+ execution_names=execution_names)\n+ executor.run(workers=3, multiprocess=True)\n+\n+ self.assertEqual(len(executor.execution_logs), 4)\n+ for log in executor.execution_logs:\n+ self.assertTrue(len(log.split()) >= 3)\n+\n+ log_filenames = sorted(os.listdir(executor.report_folder))\n+ self.assertEqual(len(log_filenames), 4)\n+\n+ if execution_names:\n+ for name, log_filename in zip(execution_names, log_filenames):\n+ self.assertTrue(log_filename == 'eoexecution-{}.log'.format(name))\n+\n+ def test_execution_logs_multithread(self):\n+ for execution_names in [None, [4, 'x', 'y', 'z']]:\n+ with tempfile.TemporaryDirectory() as tmp_dir_name:\n+ executor = EOExecutor(self.workflow, self.execution_args, save_logs=True,\n+ logs_folder=tmp_dir_name,\n+ execution_names=execution_names)\n+ executor.run(workers=3, multiprocess=False)\n+\n+ self.assertEqual(len(executor.execution_logs), 4)\n+ for log in executor.execution_logs:\n+ self.assertTrue(len(log.split()) >= 3)\n+\n+ log_filenames = sorted(os.listdir(executor.report_folder))\n+ self.assertEqual(len(log_filenames), 4)\n+\n+ if execution_names:\n+ for name, log_filename in zip(execution_names, log_filenames):\n+ self.assertTrue(log_filename == 'eoexecution-{}.log'.format(name))\n+\n def test_execution_stats(self):\n with tempfile.TemporaryDirectory() as tmp_dir_name:\n executor = EOExecutor(self.workflow, self.execution_args, logs_folder=tmp_dir_name)\n", "new_path": "core/eolearn/tests/test_eoexecutor.py", "old_path": "core/eolearn/tests/test_eoexecutor.py" } ]
444e7026771cce4ece0feff9f5f3c76f57c61ed3
xaynetwork/xaynet
null
null
Add subdirectories for aggregates To enable multiple aggregates in a clean way in the output directory will now contain a sub-directory for each group for which the aggregate function is called
[ { "change_type": "MODIFY", "diff": "@@ -3,6 +3,7 @@ from typing import Dict, List, Optional, Tuple\n \n from absl import app, flags, logging\n \n+from xain.helpers import storage\n from xain.types import PlotValues, XticksLabels, XticksLocations\n \n from .plot import plot\n@@ -102,7 +103,8 @@ def aggregate() -> str:\n :returns: Absolut path to saved plot\n \"\"\"\n group_name = FLAGS.group_name\n- fname = f\"plot_final_task_accuracies_{group_name}.png\"\n+ dname = storage.create_output_subdir(group_name)\n+ fname = storage.fname_with_default_dir(\"plot_final_task_accuracies.png\", dname)\n \n (data, xticks_args) = prepare_aggregation_data(group_name)\n \n", "new_path": "xain/benchmark/aggregation/final_task_accuracies.py", "old_path": "xain/benchmark/aggregation/final_task_accuracies.py" }, { "change_type": "MODIFY", "diff": "@@ -66,8 +66,8 @@ def test_plot_final_task_accuracies(output_dir, group_name, monkeypatch):\n range(1, 12, 1),\n ),\n ]\n- fname = f\"plot_final_task_accuracies_{group_name}.png\"\n- expected_filepath = os.path.join(output_dir, fname)\n+ fname = f\"plot_final_task_accuracies.png\"\n+ expected_filepath = os.path.join(output_dir, group_name, fname)\n expected_sha1 = \"19cbae25328694a436842de89acbbf661020b4cf\"\n \n xticks_locations = range(1, 12, 1)\n", "new_path": "xain/benchmark/aggregation/final_task_accuracies_test.py", "old_path": "xain/benchmark/aggregation/final_task_accuracies_test.py" }, { "change_type": "MODIFY", "diff": "@@ -43,7 +43,7 @@ def plot(\n \n # if fname is an absolute path use fname directly otherwise assume\n # fname is filename and prepend output_dir\n- fname_abspath = storage.get_abspath(fname, FLAGS.output_dir)\n+ fname_abspath = storage.fname_with_default_dir(fname, FLAGS.output_dir)\n \n plt.figure()\n plt.ylim(0.0, ylim_max)\n", "new_path": "xain/benchmark/aggregation/plot.py", "old_path": "xain/benchmark/aggregation/plot.py" }, { "change_type": "MODIFY", "diff": "@@ -3,6 +3,7 @@ from typing import List, Tuple\n \n from absl import app, flags, logging\n \n+from xain.helpers import storage\n from xain.types import PlotValues\n \n from .plot import plot\n@@ -85,7 +86,8 @@ def aggregate() -> str:\n :returns: Absolut path to saved plot\n \"\"\"\n group_name = FLAGS.group_name\n- fname = f\"plot_task_accuracies_{group_name}.png\"\n+ dname = storage.create_output_subdir(group_name)\n+ fname = storage.fname_with_default_dir(\"plot_task_accuracies.png\", dname)\n \n data = prepare_aggregation_data(group_name)\n \n", "new_path": "xain/benchmark/aggregation/task_accuracies.py", "old_path": "xain/benchmark/aggregation/task_accuracies.py" }, { "change_type": "MODIFY", "diff": "@@ -22,8 +22,8 @@ def test_plot_task_accuracies(output_dir, group_name, monkeypatch):\n range(1, 12, 1),\n ),\n ]\n- fname = f\"plot_task_accuracies_{group_name}.png\"\n- expected_filepath = os.path.join(output_dir, fname)\n+ fname = f\"plot_task_accuracies.png\"\n+ expected_filepath = os.path.join(output_dir, group_name, fname)\n expected_sha1 = \"7138bde2b95eedda6b05b665cc35a6cf204e35e1\"\n \n def mock_prepare_aggregation_data(_: str):\n", "new_path": "xain/benchmark/aggregation/task_accuracies_test.py", "old_path": "xain/benchmark/aggregation/task_accuracies_test.py" }, { "change_type": "MODIFY", "diff": "@@ -130,16 +130,16 @@ def plot_fashion_mnist_dist():\n plt.plot(xs, np.array(dist))\n plt.legend(legend, loc=\"upper left\")\n \n- fname_abspath = storage.get_abspath(\n- \"plot_fashion_mnist_partition_volume_dist\", FLAGS.output_dir\n- )\n- plt.savefig(fname=fname_abspath, format=FORMAT)\n+ dname = storage.create_output_subdir(\"partition_volume_distributions\")\n+ fname = storage.fname_with_default_dir(\"plot_fashion_mnist\", dname)\n+\n+ plt.savefig(fname=fname, format=FORMAT)\n \n # FIXME: Matplotlib is currently using agg, which is a non-GUI\n # backend, so cannot show the figure.\n # plt.show()\n \n- return fname_abspath\n+ return fname\n \n \n def main():\n", "new_path": "xain/generator/partition_volume_distributions.py", "old_path": "xain/generator/partition_volume_distributions.py" }, { "change_type": "MODIFY", "diff": "@@ -29,8 +29,19 @@ def listdir_recursive(dname: str, relpath=True):\n return files\n \n \n-def get_abspath(fname: str, dname: str = None) -> str:\n+def create_output_subdir(dname: str) -> str:\n+ if os.path.isabs(dname):\n+ raise Exception(\"Please provide a relative directory name\")\n \n+ dname = os.path.join(FLAGS.output_dir, dname)\n+\n+ os.makedirs(dname, exist_ok=True)\n+\n+ return dname\n+\n+\n+def fname_with_default_dir(fname: str, dname: str = None) -> str:\n+ \"\"\"Returns fname if its a absolute path otherwise joins it with dname\"\"\"\n if os.path.isabs(fname):\n return fname\n \n@@ -41,12 +52,12 @@ def get_abspath(fname: str, dname: str = None) -> str:\n \n \n def write_json(results: Dict, fname: str):\n- fname = get_abspath(fname, FLAGS.output_dir)\n+ fname = fname_with_default_dir(fname, FLAGS.output_dir)\n with open(fname, \"w\") as outfile:\n json.dump(results, outfile, indent=2, sort_keys=True)\n \n \n def read_json(fname: str):\n- fname = get_abspath(fname, FLAGS.output_dir)\n+ fname = fname_with_default_dir(fname, FLAGS.output_dir)\n with open(fname, \"r\") as outfile:\n return json.loads(outfile.read())\n", "new_path": "xain/helpers/storage.py", "old_path": "xain/helpers/storage.py" }, { "change_type": "MODIFY", "diff": "@@ -3,25 +3,25 @@ import os\n from . import storage\n \n \n-def test_get_abspath_fname_with_absolute_path():\n+def test_fname_with_default_dir_absolute_path():\n # Prepare\n fname = \"/my/absolute/path/myfile\"\n expected_abspath = fname\n \n # Execute\n- actual_abspath = storage.get_abspath(fname)\n+ actual_abspath = storage.fname_with_default_dir(fname)\n \n # Assert\n assert expected_abspath == actual_abspath\n \n \n-def test_get_abspath_fname_only_filename(output_dir):\n+def test_fname_with_default_dir_relative_path(output_dir):\n # Prepare\n fname = \"myfile\"\n expected_abspath = os.path.join(output_dir, fname)\n \n # Execute\n- actual_abspath = storage.get_abspath(fname, output_dir)\n+ actual_abspath = storage.fname_with_default_dir(fname, output_dir)\n \n # Assert\n assert expected_abspath == actual_abspath\n", "new_path": "xain/helpers/storage_test.py", "old_path": "xain/helpers/storage_test.py" } ]
7c1a73370bd6ffb091dbc7cb811ee447f6e176aa
armmbed/mbed-crypto
null
null
Add RepoVersion class to make handling of many arguments easier There are a number of arguments being passed around, nearly all of which are duplicated between the old and new versions. Moving these into a separate class should hopefully make it simpler to follow what is being done.
[ { "change_type": "MODIFY", "diff": "@@ -28,23 +28,37 @@ import fnmatch\n import xml.etree.ElementTree as ET\n \n \n+class RepoVersion(object):\n+\n+ def __init__(self, version, repository, revision,\n+ crypto_repository, crypto_revision):\n+ \"\"\"Class containing details for a particular revision.\n+\n+ version: either 'old' or 'new'\n+ repository: repository for git revision\n+ revision: git revision for comparison\n+ crypto_repository: repository for git revision of crypto submodule\n+ crypto_revision: git revision of crypto submodule\n+ \"\"\"\n+ self.version = version\n+ self.repository = repository\n+ self.revision = revision\n+ self.crypto_repository = crypto_repository\n+ self.crypto_revision = crypto_revision\n+ self.abi_dumps = {}\n+ self.modules = {}\n+\n+\n class AbiChecker(object):\n \"\"\"API and ABI checker.\"\"\"\n \n- def __init__(self, report_dir, old_repo, old_rev, old_crypto_rev,\n- old_crypto_repo, new_repo, new_rev, new_crypto_rev,\n- new_crypto_repo, keep_all_reports, brief, skip_file=None):\n+ def __init__(self, old_version, new_version, report_dir,\n+ keep_all_reports, brief, skip_file=None):\n \"\"\"Instantiate the API/ABI checker.\n \n+ old_version: RepoVersion containing details to compare against\n+ new_version: RepoVersion containing details to check\n report_dir: directory for output files\n- old_repo: repository for git revision to compare against\n- old_rev: reference git revision to compare against\n- old_crypto_rev: reference git revision for old crypto submodule\n- old_crypto_repo: repository for git revision for old crypto submodule\n- new_repo: repository for git revision to check\n- new_rev: git revision to check\n- new_crypto_rev: reference git revision for new crypto submodule\n- new_crypto_repo: repository for git revision for new crypto submodule\n keep_all_reports: if false, delete old reports\n brief: if true, output shorter report to stdout\n skip_file: path to file containing symbols and types to skip\n@@ -56,19 +70,10 @@ class AbiChecker(object):\n self.keep_all_reports = keep_all_reports\n self.can_remove_report_dir = not (os.path.isdir(self.report_dir) or\n keep_all_reports)\n- self.old_repo = old_repo\n- self.old_rev = old_rev\n- self.old_crypto_rev = old_crypto_rev\n- self.old_crypto_repo = old_crypto_repo\n- self.new_repo = new_repo\n- self.new_rev = new_rev\n- self.new_crypto_rev = new_crypto_rev\n- self.new_crypto_repo = new_crypto_repo\n+ self.old_version = old_version\n+ self.new_version = new_version\n self.skip_file = skip_file\n self.brief = brief\n- self.mbedtls_modules = {\"old\": {}, \"new\": {}}\n- self.old_dumps = {}\n- self.new_dumps = {}\n self.git_command = \"git\"\n self.make_command = \"make\"\n \n@@ -90,18 +95,19 @@ class AbiChecker(object):\n if not shutil.which(command):\n raise Exception(\"{} not installed, aborting\".format(command))\n \n- def get_clean_worktree_for_git_revision(self, remote_repo, git_rev):\n- \"\"\"Make a separate worktree with git_rev checked out.\n+ def get_clean_worktree_for_git_revision(self, version):\n+ \"\"\"Make a separate worktree with version.revision checked out.\n Do not modify the current worktree.\"\"\"\n git_worktree_path = tempfile.mkdtemp()\n- if remote_repo:\n+ if version.repository:\n self.log.info(\n \"Checking out git worktree for revision {} from {}\".format(\n- git_rev, remote_repo\n+ version.revision, version.repository\n )\n )\n fetch_process = subprocess.Popen(\n- [self.git_command, \"fetch\", remote_repo, git_rev],\n+ [self.git_command, \"fetch\",\n+ version.repository, version.revision],\n cwd=self.repo_path,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n@@ -112,10 +118,10 @@ class AbiChecker(object):\n raise Exception(\"Fetching revision failed, aborting\")\n worktree_rev = \"FETCH_HEAD\"\n else:\n- self.log.info(\n- \"Checking out git worktree for revision {}\".format(git_rev)\n- )\n- worktree_rev = git_rev\n+ self.log.info(\"Checking out git worktree for revision {}\".format(\n+ version.revision\n+ ))\n+ worktree_rev = version.revision\n worktree_process = subprocess.Popen(\n [self.git_command, \"worktree\", \"add\", \"--detach\",\n git_worktree_path, worktree_rev],\n@@ -129,8 +135,7 @@ class AbiChecker(object):\n raise Exception(\"Checking out worktree failed, aborting\")\n return git_worktree_path\n \n- def update_git_submodules(self, git_worktree_path, crypto_repo,\n- crypto_rev):\n+ def update_git_submodules(self, git_worktree_path, version):\n process = subprocess.Popen(\n [self.git_command, \"submodule\", \"update\", \"--init\", '--recursive'],\n cwd=git_worktree_path,\n@@ -142,14 +147,14 @@ class AbiChecker(object):\n if process.returncode != 0:\n raise Exception(\"git submodule update failed, aborting\")\n if not (os.path.exists(os.path.join(git_worktree_path, \"crypto\"))\n- and crypto_rev):\n+ and version.crypto_revision):\n return\n \n- if crypto_repo:\n+ if version.crypto_repository:\n shutil.rmtree(os.path.join(git_worktree_path, \"crypto\"))\n clone_process = subprocess.Popen(\n- [self.git_command, \"clone\", crypto_repo,\n- \"--branch\", crypto_rev, \"crypto\"],\n+ [self.git_command, \"clone\", version.crypto_repository,\n+ \"--branch\", version.crypto_revision, \"crypto\"],\n cwd=git_worktree_path,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n@@ -160,7 +165,7 @@ class AbiChecker(object):\n raise Exception(\"git clone failed, aborting\")\n else:\n checkout_process = subprocess.Popen(\n- [self.git_command, \"checkout\", crypto_rev],\n+ [self.git_command, \"checkout\", version.crypto_revision],\n cwd=os.path.join(git_worktree_path, \"crypto\"),\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n@@ -187,29 +192,28 @@ class AbiChecker(object):\n self.log.info(make_output.decode(\"utf-8\"))\n for root, dirs, files in os.walk(git_worktree_path):\n for file in fnmatch.filter(files, \"*.so\"):\n- self.mbedtls_modules[version][os.path.splitext(file)[0]] = (\n+ version.modules[os.path.splitext(file)[0]] = (\n os.path.join(root, file)\n )\n if make_process.returncode != 0:\n raise Exception(\"make failed, aborting\")\n \n- def get_abi_dumps_from_shared_libraries(self, git_ref, git_worktree_path,\n+ def get_abi_dumps_from_shared_libraries(self, git_worktree_path,\n version):\n \"\"\"Generate the ABI dumps for the specified git revision.\n It must be checked out in git_worktree_path and the shared libraries\n must have been built.\"\"\"\n- abi_dumps = {}\n- for mbed_module, module_path in self.mbedtls_modules[version].items():\n+ for mbed_module, module_path in version.modules.items():\n output_path = os.path.join(\n- self.report_dir, version, \"{}-{}.dump\".format(\n- mbed_module, git_ref\n+ self.report_dir, version.version, \"{}-{}.dump\".format(\n+ mbed_module, version.revision\n )\n )\n abi_dump_command = [\n \"abi-dumper\",\n module_path,\n \"-o\", output_path,\n- \"-lver\", git_ref\n+ \"-lver\", version.revision\n ]\n abi_dump_process = subprocess.Popen(\n abi_dump_command,\n@@ -220,8 +224,7 @@ class AbiChecker(object):\n self.log.info(abi_dump_output.decode(\"utf-8\"))\n if abi_dump_process.returncode != 0:\n raise Exception(\"abi-dumper failed, aborting\")\n- abi_dumps[mbed_module] = output_path\n- return abi_dumps\n+ version.abi_dumps[mbed_module] = output_path\n \n def cleanup_worktree(self, git_worktree_path):\n \"\"\"Remove the specified git worktree.\"\"\"\n@@ -237,19 +240,13 @@ class AbiChecker(object):\n if worktree_process.returncode != 0:\n raise Exception(\"Worktree cleanup failed, aborting\")\n \n- def get_abi_dump_for_ref(self, remote_repo, git_rev, crypto_repo,\n- crypto_rev, version):\n+ def get_abi_dump_for_ref(self, version):\n \"\"\"Generate the ABI dumps for the specified git revision.\"\"\"\n- git_worktree_path = self.get_clean_worktree_for_git_revision(\n- remote_repo, git_rev\n- )\n- self.update_git_submodules(git_worktree_path, crypto_repo, crypto_rev)\n+ git_worktree_path = self.get_clean_worktree_for_git_revision(version)\n+ self.update_git_submodules(git_worktree_path, version)\n self.build_shared_libraries(git_worktree_path, version)\n- abi_dumps = self.get_abi_dumps_from_shared_libraries(\n- git_rev, git_worktree_path, version\n- )\n+ self.get_abi_dumps_from_shared_libraries(git_worktree_path, version)\n self.cleanup_worktree(git_worktree_path)\n- return abi_dumps\n \n def remove_children_with_tag(self, parent, tag):\n children = parent.getchildren()\n@@ -275,19 +272,20 @@ class AbiChecker(object):\n be available.\"\"\"\n compatibility_report = \"\"\n compliance_return_code = 0\n- shared_modules = list(set(self.mbedtls_modules[\"old\"].keys()) &\n- set(self.mbedtls_modules[\"new\"].keys()))\n+ shared_modules = list(set(self.old_version.modules.keys()) &\n+ set(self.new_version.modules.keys()))\n for mbed_module in shared_modules:\n output_path = os.path.join(\n self.report_dir, \"{}-{}-{}.html\".format(\n- mbed_module, self.old_rev, self.new_rev\n+ mbed_module, self.old_version.revision,\n+ self.new_version.revision\n )\n )\n abi_compliance_command = [\n \"abi-compliance-checker\",\n \"-l\", mbed_module,\n- \"-old\", self.old_dumps[mbed_module],\n- \"-new\", self.new_dumps[mbed_module],\n+ \"-old\", self.old_version.abi_dumps[mbed_module],\n+ \"-new\", self.new_version.abi_dumps[mbed_module],\n \"-strict\",\n \"-report-path\", output_path,\n ]\n@@ -329,8 +327,8 @@ class AbiChecker(object):\n \"abi-compliance-checker failed with a return code of {},\"\n \" aborting\".format(abi_compliance_process.returncode)\n )\n- os.remove(self.old_dumps[mbed_module])\n- os.remove(self.new_dumps[mbed_module])\n+ os.remove(self.old_version.abi_dumps[mbed_module])\n+ os.remove(self.new_version.abi_dumps[mbed_module])\n if self.can_remove_report_dir:\n os.rmdir(self.report_dir)\n self.log.info(compatibility_report)\n@@ -341,12 +339,8 @@ class AbiChecker(object):\n between self.old_rev and self.new_rev.\"\"\"\n self.check_repo_path()\n self.check_abi_tools_are_installed()\n- self.old_dumps = self.get_abi_dump_for_ref(self.old_repo, self.old_rev,\n- self.old_crypto_repo,\n- self.old_crypto_rev, \"old\")\n- self.new_dumps = self.get_abi_dump_for_ref(self.new_repo, self.new_rev,\n- self.new_crypto_repo,\n- self.new_crypto_rev, \"new\")\n+ self.get_abi_dump_for_ref(self.old_version)\n+ self.get_abi_dump_for_ref(self.new_version)\n return self.get_abi_compatibility_report()\n \n \n@@ -412,12 +406,13 @@ def run_main():\n help=\"output only the list of issues to stdout, instead of a full report\",\n )\n abi_args = parser.parse_args()\n+ old_version = RepoVersion(\"old\", abi_args.old_repo, abi_args.old_rev,\n+ abi_args.old_crypto_repo, abi_args.old_crypto_rev)\n+ new_version = RepoVersion(\"new\", abi_args.new_repo, abi_args.new_rev,\n+ abi_args.new_crypto_repo, abi_args.new_crypto_rev)\n abi_check = AbiChecker(\n- abi_args.report_dir, abi_args.old_repo, abi_args.old_rev,\n- abi_args.old_crypto_rev, abi_args.old_crypto_repo,\n- abi_args.new_repo, abi_args.new_rev, abi_args.new_crypto_rev,\n- abi_args.new_crypto_repo, abi_args.keep_all_reports,\n- abi_args.brief, abi_args.skip_file\n+ old_version, new_version, abi_args.report_dir,\n+ abi_args.keep_all_reports, abi_args.brief, abi_args.skip_file\n )\n return_code = abi_check.check_for_abi_changes()\n sys.exit(return_code)\n", "new_path": "scripts/abi_check.py", "old_path": "scripts/abi_check.py" } ]
e8867c423a9b40cd7147a44223733d797bac7964
lepture/authlib
null
null
Add check_client_type to Client model. It is not reliable to use client_secret to detect the client type.
[ { "change_type": "MODIFY", "diff": "@@ -101,7 +101,7 @@ def authenticate_none(query_client, request):\n if client_id and 'client_secret' not in request.data:\n client = _validate_client(query_client, client_id, request.state)\n if client.check_token_endpoint_auth_method('none') \\\n- and not client.has_client_secret():\n+ and not client.check_client_type('public'):\n log.debug(\n 'Authenticate {} via \"none\" '\n 'success'.format(client_id)\n", "new_path": "authlib/specs/rfc6749/authenticate_client.py", "old_path": "authlib/specs/rfc6749/authenticate_client.py" }, { "change_type": "MODIFY", "diff": "@@ -310,7 +310,7 @@ class AuthorizationCodeGrant(RedirectAuthGrant):\n self.GRANT_TYPE,\n user=user,\n scope=scope,\n- include_refresh_token=client.has_client_secret(),\n+ include_refresh_token=client.check_client_type('confidential'),\n )\n log.debug('Issue token {!r} to {!r}'.format(token, client))\n \n", "new_path": "authlib/specs/rfc6749/grants/authorization_code.py", "old_path": "authlib/specs/rfc6749/grants/authorization_code.py" }, { "change_type": "MODIFY", "diff": "@@ -36,7 +36,7 @@ class RefreshTokenGrant(BaseGrant):\n client = self.authenticate_token_endpoint_client()\n log.debug('Validate token request of {!r}'.format(client))\n \n- if not client.has_client_secret():\n+ if client.check_client_type('public'):\n raise UnauthorizedClientError()\n \n if not client.check_grant_type(self.GRANT_TYPE):\n", "new_path": "authlib/specs/rfc6749/grants/refresh_token.py", "old_path": "authlib/specs/rfc6749/grants/refresh_token.py" }, { "change_type": "MODIFY", "diff": "@@ -126,6 +126,33 @@ class ClientMixin(object):\n \"\"\"\n raise NotImplementedError()\n \n+ def check_client_type(self, client_type):\n+ \"\"\"Validate if the client is the given ``client_type``. The available\n+ choices are:\n+\n+ * public:\n+ Clients incapable of maintaining the confidentiality of their\n+ credentials (e.g., clients executing on the device used by the\n+ resource owner, such as an installed native application or a web\n+ browser-based application), and incapable of secure client\n+ authentication via any other means.\n+ * confidential:\n+ Clients capable of maintaining the confidentiality of their\n+ credentials (e.g., client implemented on a secure server with\n+ restricted access to the client credentials), or capable of secure\n+ client authentication using other means.\n+\n+ Developers can overwrite this method to implement a new logic.\n+\n+ :param client_type: string of \"public\" or \"confidential\"\n+ :return: bool\n+ \"\"\"\n+ if client_type == 'public':\n+ return not self.has_client_secret()\n+ if client_type == 'confidential':\n+ return self.has_client_secret()\n+ raise ValueError('Invalid client_type: {!r}'.format(client_type))\n+\n \n class AuthorizationCodeMixin(object):\n def get_redirect_uri(self):\n", "new_path": "authlib/specs/rfc6749/models.py", "old_path": "authlib/specs/rfc6749/models.py" }, { "change_type": "MODIFY", "diff": "@@ -76,7 +76,7 @@ class CodeChallenge(object):\n return\n \n client = grant.request.client\n- if not client.has_client_secret() and not challenge:\n+ if client.check_client_type('public') and not challenge:\n raise InvalidRequestError('Missing \"code_challenge\"')\n \n if method and method not in self.SUPPORTED_CODE_CHALLENGE_METHOD:\n@@ -88,7 +88,7 @@ class CodeChallenge(object):\n client = grant.request.client\n \n # public client MUST verify code challenge\n- if self.required and not client.has_client_secret() and not verifier:\n+ if self.required and client.check_client_type('public') and not verifier:\n raise InvalidRequestError('Missing \"code_verifier\"')\n \n authorization_code = grant.request.credential\n", "new_path": "authlib/specs/rfc7636/grant.py", "old_path": "authlib/specs/rfc7636/grant.py" } ]
a16d89f4b043ea07c6a936646924ac881e2b183b
nextstrain/ncov
null
null
Improve combine metadata script 1. More detailed help message 2. Helpful errors printed if assertions fail 3. Encoding of metadata origin now uses explicit "yes"/"no" values rather than using the empty string.
[ { "change_type": "MODIFY", "diff": "@@ -2,6 +2,7 @@ import argparse\n from augur.utils import read_metadata\n from Bio import SeqIO\n import csv\n+import sys\n \n EMPTY = ''\n \n@@ -18,25 +19,33 @@ EMPTY = ''\n \n def parse_args():\n parser = argparse.ArgumentParser(\n- description=\"Custom script to combine metadata files\",\n+ description=\"\"\"\n+ Custom script to combine metadata files from different origins.\n+ In the case where metadata files specify different values, the latter provided file will take priority.\n+ Columns will be added for each origin with values \"yes\" or \"no\" to identify the input source (origin) of each sample.\n+ \"\"\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument('--metadata', required=True, nargs='+', metavar=\"TSV\", help=\"Metadata files\")\n- parser.add_argument('--origins', required=True, nargs='+', metavar=\"STR\", help=\"Names of origins (metadata columns will be created from these)\")\n- parser.add_argument('--output', required=True, metavar=\"TSV\", help=\"output (merged) metadata\")\n+ parser.add_argument('--origins', required=True, nargs='+', metavar=\"STR\", help=\"Names of origins (order should match provided metadata)\")\n+ parser.add_argument('--output', required=True, metavar=\"TSV\", help=\"Output (merged) metadata\")\n args = parser.parse_args()\n return args\n \n if __name__ == '__main__':\n args = parse_args()\n- assert(len(args.metadata)==len(args.origins))\n- assert(len(args.origins)>1)\n+ try:\n+ assert(len(args.metadata)==len(args.origins))\n+ assert(len(args.origins)>1)\n+ except AssertionError:\n+ print(\"Error. Please check your inputs - there must be the same number of metadata files as origins provided, and there must be more than one of each!\")\n+ sys.exit(2)\n \n # READ IN METADATA FILES\n metadata = []\n for (origin, fname) in zip(args.origins, args.metadata):\n data, columns = read_metadata(fname)\n- metadata.append({'origin': origin, \"fname\": fname, 'data': data, 'columns': columns})\n+ metadata.append({'origin': origin, \"fname\": fname, 'data': data, 'columns': columns, 'strains': {s for s in data.keys()}})\n \n # SUMMARISE INPUT METADATA\n print(f\"Parsed {len(metadata)} metadata TSVs\")\n@@ -54,8 +63,8 @@ if __name__ == '__main__':\n for strain in combined_data:\n for column in combined_columns:\n if column not in combined_data[strain]:\n- combined_data[strain][column] = EMPTY\n- combined_data[strain][metadata[0]['origin']] = \"yes\" # can't use `True` as booleans cause issues for `augur filter`\n+ combined_data[strain][column] = EMPTY \n+ \n for idx in range(1, len(metadata)):\n for strain, row in metadata[idx]['data'].items():\n if strain not in combined_data:\n@@ -69,7 +78,13 @@ if __name__ == '__main__':\n if existing_value != EMPTY:\n print(f\"[{strain}::{column}] Overwriting {combined_data[strain][column]} with {new_value}\")\n combined_data[strain][column] = new_value\n- combined_data[strain][metadata[idx]['origin']] = \"yes\"\n+\n+ # one-hot encoding for origin\n+ # note that we use \"yes\" / \"no\" here as Booleans are problematic for `augur filter`\n+ for metadata_entry in metadata:\n+ origin = metadata_entry['origin']\n+ for strain in combined_data:\n+ combined_data[strain][origin] = \"yes\" if strain in metadata_entry['strains'] else \"no\"\n \n print(f\"Combined metadata: {len(combined_data.keys())} strains x {len(combined_columns)} columns\")\n \n", "new_path": "scripts/combine_metadata.py", "old_path": "scripts/combine_metadata.py" } ]
b1e5407c503fc067da452d6d65f503d018849252
nextstrain/ncov
null
null
Refactor sanitizer transform logic Move logic to parse mapping of old to new column names and strip prefixes into their own functions with tests. This refactoring simplifies the code in the main body of the sanitizer script.
[ { "change_type": "MODIFY", "diff": "@@ -16,6 +16,39 @@ LOCATION_FIELDS = (\n )\n \n \n+def parse_new_column_names(renaming_rules):\n+ \"\"\"Parse the mapping of current to new column names from the given list of renaming rules.\n+\n+ Parameters\n+ ----------\n+ renaming_rules : list[str]\n+ A list of strings mapping an old column name to a new one delimited by an equal symbol (e.g., \"old_column=new_column\").\n+\n+ Returns\n+ -------\n+ dict :\n+ A mapping of new column names for each old column name.\n+\n+ >>> parse_new_column_names([\"old=new\", \"new=old\"])\n+ {'old': 'new', 'new': 'old'}\n+ >>> parse_new_column_names([\"old->new\"])\n+ {}\n+\n+ \"\"\"\n+ new_column_names = {}\n+ for rule in renaming_rules:\n+ if \"=\" in rule:\n+ old_column, new_column = rule.split(\"=\")\n+ new_column_names[old_column] = new_column\n+ else:\n+ print(\n+ f\"WARNING: missing mapping of old to new column in form of 'Virus name=strain' for rule: '{rule}'.\",\n+ file=sys.stderr\n+ )\n+\n+ return new_column_names\n+\n+\n def parse_location_string(location_string, location_fields):\n \"\"\"Parse location string from GISAID into the given separate geographic scales\n and return a dictionary of parse values by scale.\n@@ -69,6 +102,35 @@ def parse_location_string(location_string, location_fields):\n return locations\n \n \n+def strip_prefixes(strain_name, prefixes):\n+ \"\"\"Strip the given prefixes from the given strain name.\n+\n+ Parameters\n+ ----------\n+ strain_name : str\n+ Name of a strain to be sanitized\n+ prefixes : list[str]\n+ A list of prefixes to be stripped from the strain name.\n+\n+ Returns\n+ -------\n+ str :\n+ Strain name without any of the given prefixes.\n+\n+\n+ >>> strip_prefixes(\"hCoV-19/RandomStrain/1/2020\", [\"hCoV-19/\", \"SARS-CoV-2/\"])\n+ 'RandomStrain/1/2020'\n+ >>> strip_prefixes(\"SARS-CoV-2/RandomStrain/2/2020\", [\"hCoV-19/\", \"SARS-CoV-2/\"])\n+ 'RandomStrain/2/2020'\n+ >>> strip_prefixes(\"hCoV-19/RandomStrain/1/2020\", [\"SARS-CoV-2/\"])\n+ 'hCoV-19/RandomStrain/1/2020'\n+\n+ \"\"\"\n+ joined_prefixes = \"|\".join(prefixes)\n+ pattern = f\"^({joined_prefixes})\"\n+ return re.sub(pattern, \"\", strain_name)\n+\n+\n def resolve_duplicates(metadata, strain_field, database_id_columns, error_on_duplicates=False):\n \"\"\"Resolve duplicate records for a given strain field and return a deduplicated\n data frame. This approach chooses the record with the most recent database\n@@ -214,18 +276,8 @@ if __name__ == '__main__':\n axis=1\n ).drop(columns=[args.parse_location_field])\n \n- new_column_names = {}\n- if args.rename_fields:\n- # Rename specific columns using rules like \"Virus name=strain\".\n- for rule in args.rename_fields:\n- if \"=\" in rule:\n- old_column, new_column = rule.split(\"=\")\n- new_column_names[old_column] = new_column\n- else:\n- print(\n- f\"WARNING: missing mapping of old to new column in form of 'Virus name=strain' for rule: '{rule}'.\",\n- file=sys.stderr\n- )\n+ # Parse mapping of old column names to new.\n+ new_column_names = parse_new_column_names(args.rename_fields)\n \n # Rename columns as needed.\n if len(new_column_names) > 0:\n@@ -247,15 +299,8 @@ if __name__ == '__main__':\n sys.exit(1)\n \n if args.strip_prefixes:\n- prefixes = \"|\".join(args.strip_prefixes)\n- pattern = f\"^({prefixes})\"\n-\n metadata[strain_field] = metadata[strain_field].apply(\n- lambda strain: re.sub(\n- pattern,\n- \"\",\n- strain\n- )\n+ lambda strain: strip_prefixes(strain, args.strip_prefixes)\n )\n \n # Replace whitespaces from strain names with underscores to match GISAID's\n", "new_path": "scripts/sanitize_metadata.py", "old_path": "scripts/sanitize_metadata.py" } ]
dbffb035f72de8d43e5f04fc1ea0ce1b0da21e7a
teemtee/tmt
null
null
Support selecting objects under the current folder Add a special single dot notation for filtering stories, plans and tests under the current working directory. Update documentation and usage messages accordingly.
[ { "change_type": "MODIFY", "diff": "@@ -152,6 +152,12 @@ condition::\n $ tmt tests ls --condition 'tier > 0'\n /tests/ls\n \n+In order to select tests under the current working directory use\n+the single dot notation::\n+\n+ $ tmt test show .\n+ $ tmt run test --name .\n+\n \n Lint Tests\n ------------------------------------------------------------------\n@@ -345,6 +351,7 @@ inheritance and merging attributes.\n \n .. _fmf features: https://fmf.readthedocs.io/en/latest/features.html\n \n+\n Stories\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n \n@@ -420,6 +427,11 @@ available for binary status filtering::\n documented /tmt/cli\n ...\n \n+In order to select stories under the current working directory use\n+the single dot notation::\n+\n+ $ tmt story show .\n+\n \n Story Coverage\n ------------------------------------------------------------------\n", "new_path": "docs/examples.rst", "old_path": "docs/examples.rst" }, { "change_type": "MODIFY", "diff": "@@ -15,7 +15,11 @@ story: 'As a user I want to comfortably work with plans'\n \n /filter:\n story: 'Filter available plans'\n+ description:\n+ Search plans using a regular expression or a filter.\n+ Use ``.`` to select plans under the current directory.\n example:\n+ - tmt plan ls .\n - tmt plan ls REGEXP\n - tmt plan show --filter artifact:build\n implemented: /tmt/base.py\n", "new_path": "stories/cli/plan.fmf", "old_path": "stories/cli/plan.fmf" }, { "change_type": "MODIFY", "diff": "@@ -15,13 +15,19 @@ story: 'As a developer I want to comfortably work with stories'\n \n /filter:\n story: 'Search available stories'\n+ description:\n+ Search stories using a regular expression, a filter or\n+ coverage status. Use ``.`` to select stories under the\n+ current directory.\n example:\n- - tmt story --implemented\n- - tmt story --unimplemented\n- - tmt story --tested\n- - tmt story --untested\n- - tmt story --documented\n- - tmt story --undocumented\n+ - tmt story ls .\n+ - tmt story ls REGEXP\n+ - tmt story ls --implemented\n+ - tmt story ls --unimplemented\n+ - tmt story ls --tested\n+ - tmt story ls --untested\n+ - tmt story ls --documented\n+ - tmt story ls --undocumented\n implemented: /tmt/cli\n documented: /docs/examples#filter-stories\n \n", "new_path": "stories/cli/story.fmf", "old_path": "stories/cli/story.fmf" }, { "change_type": "MODIFY", "diff": "@@ -107,7 +107,11 @@ story: 'As a user I want to comfortably work with tests'\n \n /filter:\n story: 'Filter available tests'\n+ description:\n+ Search tests using a regular expression or a filter.\n+ Use ``.`` to select tests under the current directory.\n example:\n+ - tmt test ls .\n - tmt test ls REGEXP\n - tmt test show --filter tier:1\n - tmt test show --condition 'tier < 2'\n", "new_path": "stories/cli/test.fmf", "old_path": "stories/cli/test.fmf" }, { "change_type": "MODIFY", "diff": "@@ -48,6 +48,25 @@ class Node(tmt.utils.Common):\n echo(tmt.utils.format(\n 'sources', self.node.sources, key_color='magenta'))\n \n+ @classmethod\n+ def _save_context(cls, context):\n+ \"\"\" Save provided command line context for future use \"\"\"\n+ super(Node, cls)._save_context(context)\n+\n+ # Handle '.' as an alias for the current working directory\n+ names = cls._opt('names')\n+ if names is not None and '.' in names:\n+ root = context.obj.tree.root\n+ current = os.getcwd()\n+ # Handle special case when directly in the metadata root\n+ if current == root:\n+ path = '/'\n+ # Prepare path from the tree root to the current directory\n+ else:\n+ path = os.path.join('/', os.path.relpath(current, root))\n+ cls._context.params['names'] = (\n+ path if name == '.' else name for name in names)\n+\n def name_and_summary(self):\n \"\"\" Node name and optional summary \"\"\"\n if self.summary:\n", "new_path": "tmt/base.py", "old_path": "tmt/base.py" }, { "change_type": "MODIFY", "diff": "@@ -152,7 +152,7 @@ def main(context, root, **kwargs):\n \"\"\" Test Management Tool \"\"\"\n # Initialize metadata tree\n tree = tmt.Tree(root)\n- tree._context = context\n+ tree._save_context(context)\n context.obj = tmt.utils.Common()\n context.obj.tree = tree\n # List of enabled steps\n@@ -164,6 +164,7 @@ def main(context, root, **kwargs):\n tmt.Plan.overview(tree)\n tmt.Story.overview(tree)\n \n+\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Run\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n@@ -179,8 +180,8 @@ def main(context, root, **kwargs):\n def run(context, all_, id_, **kwargs):\n \"\"\" Run test steps. \"\"\"\n # Initialize\n+ tmt.Run._save_context(context)\n run = tmt.Run(id_, context.obj.tree)\n- run._context = context\n context.obj.run = run\n \n main.add_command(run)\n@@ -196,7 +197,7 @@ main.add_command(run)\n def discover(context, **kwargs):\n \"\"\" Gather and show information about test cases to be executed. \"\"\"\n context.obj.steps.add('discover')\n- tmt.steps.discover.Discover._context = context\n+ tmt.steps.discover.Discover._save_context(context)\n return 'discover'\n \n \n@@ -238,7 +239,7 @@ def discover(context, **kwargs):\n def provision(context, **kwargs):\n \"\"\" Provision an environment for testing (or use localhost). \"\"\"\n context.obj.steps.add('provision')\n- tmt.steps.provision.Provision._context = context\n+ tmt.steps.provision.Provision._save_context(context)\n \n \n @run.command()\n@@ -257,7 +258,7 @@ def provision(context, **kwargs):\n def prepare(context, **kwargs):\n \"\"\" Configure environment for testing (like ansible playbook). \"\"\"\n context.obj.steps.add('prepare')\n- tmt.steps.prepare.Prepare._context = context\n+ tmt.steps.prepare.Prepare._save_context(context)\n \n \n @run.command()\n@@ -273,7 +274,7 @@ def prepare(context, **kwargs):\n def execute(context, **kwargs):\n \"\"\" Run the tests (using the specified framework and its settings). \"\"\"\n context.obj.steps.add('execute')\n- tmt.steps.execute.Execute._context = context\n+ tmt.steps.execute.Execute._save_context(context)\n \n \n @run.command()\n@@ -286,7 +287,7 @@ def execute(context, **kwargs):\n def report(context, **kwargs):\n \"\"\" Provide an overview of test results and send notifications. \"\"\"\n context.obj.steps.add('report')\n- tmt.steps.report.Report._context = context\n+ tmt.steps.report.Report._save_context(context)\n \n \n @run.command()\n@@ -299,7 +300,7 @@ def report(context, **kwargs):\n def finish(context, **kwargs):\n \"\"\" Additional actions to be performed after the test execution. \"\"\"\n context.obj.steps.add('finish')\n- tmt.steps.finish.Finish._context = context\n+ tmt.steps.finish.Finish._save_context(context)\n \n \n @run.command()\n@@ -315,8 +316,13 @@ def finish(context, **kwargs):\n help=\"Use arbitrary Python expression for filtering.\")\n @verbose_debug_quiet\n def plans(context, **kwargs):\n- \"\"\" Select plans which should be executed. \"\"\"\n- tmt.base.Plan._context = context\n+ \"\"\"\n+ Select plans which should be executed\n+\n+ Regular expression can be used to filter plans by name.\n+ Use '.' to select plans under the current working directory.\n+ \"\"\"\n+ tmt.base.Plan._save_context(context)\n \n \n @run.command()\n@@ -332,8 +338,13 @@ def plans(context, **kwargs):\n help=\"Use arbitrary Python expression for filtering.\")\n @verbose_debug_quiet\n def tests(context, **kwargs):\n- \"\"\" Select tests which should be executed. \"\"\"\n- tmt.base.Test._context = context\n+ \"\"\"\n+ Select tests which should be executed\n+\n+ Regular expression can be used to filter tests by name.\n+ Use '.' to select tests under the current working directory.\n+ \"\"\"\n+ tmt.base.Test._save_context(context)\n \n \n @run.resultcallback()\n@@ -370,8 +381,13 @@ main.add_command(tests)\n @name_filter_condition\n @verbose_debug_quiet\n def ls(context, **kwargs):\n- \"\"\" List available tests. \"\"\"\n- tmt.Test._context = context\n+ \"\"\"\n+ List available tests\n+\n+ Regular expression can be used to filter tests by name.\n+ Use '.' to select tests under the current working directory.\n+ \"\"\"\n+ tmt.Test._save_context(context)\n for test in context.obj.tree.tests():\n test.ls()\n \n@@ -381,8 +397,13 @@ def ls(context, **kwargs):\n @name_filter_condition\n @verbose_debug_quiet\n def show(context, **kwargs):\n- \"\"\" Show test details. \"\"\"\n- tmt.Test._context = context\n+ \"\"\"\n+ Show test details\n+\n+ Regular expression can be used to filter tests by name.\n+ Use '.' to select tests under the current working directory.\n+ \"\"\"\n+ tmt.Test._save_context(context)\n for test in context.obj.tree.tests():\n test.show()\n echo()\n@@ -393,8 +414,13 @@ def show(context, **kwargs):\n @name_filter_condition\n @verbose_debug_quiet\n def lint(context, **kwargs):\n- \"\"\" Check tests against the L1 metadata specification. \"\"\"\n- tmt.Test._context = context\n+ \"\"\"\n+ Check tests against the L1 metadata specification\n+\n+ Regular expression can be used to filter tests for linting.\n+ Use '.' to select tests under the current working directory.\n+ \"\"\"\n+ tmt.Test._save_context(context)\n for test in context.obj.tree.tests():\n test.lint()\n echo()\n@@ -412,7 +438,7 @@ _test_templates = listed(tmt.templates.TEST, join='or')\n @force_dry\n def create(context, name, template, force, **kwargs):\n \"\"\" Create a new test based on given template. \"\"\"\n- tmt.Test._context = context\n+ tmt.Test._save_context(context)\n tmt.Test.create(name, template, context.obj.tree, force)\n \n \n@@ -444,7 +470,7 @@ def import_(context, paths, makefile, nitrate, purpose, **kwargs):\n nitrate ...... contact, component, tag,\n environment, relevancy, enabled\n \"\"\"\n- tmt.Test._context = context\n+ tmt.Test._save_context(context)\n if not paths:\n paths = ['.']\n for path in paths:\n@@ -479,8 +505,13 @@ def import_(context, paths, makefile, nitrate, purpose, **kwargs):\n '-d', '--debug', is_flag=True,\n help='Provide as much debugging details as possible.')\n def export(context, format_, **kwargs):\n- \"\"\" Export test data into the desired format. \"\"\"\n- tmt.Test._context = context\n+ \"\"\"\n+ Export test data into the desired format\n+\n+ Regular expression can be used to filter tests by name.\n+ Use '.' to select tests under the current working directory.\n+ \"\"\"\n+ tmt.Test._save_context(context)\n for test in context.obj.tree.tests():\n echo(test.export(format_=format_))\n \n@@ -500,7 +531,7 @@ def plans(context, **kwargs):\n Search for available plans.\n Explore detailed test step configuration.\n \"\"\"\n- tmt.Plan._context = context\n+ tmt.Plan._save_context(context)\n \n # Show overview of available plans\n if context.invoked_subcommand is None:\n@@ -514,8 +545,13 @@ main.add_command(plans)\n @name_filter_condition\n @verbose_debug_quiet\n def ls(context, **kwargs):\n- \"\"\" List available plans. \"\"\"\n- tmt.Plan._context = context\n+ \"\"\"\n+ List available plans\n+\n+ Regular expression can be used to filter plans by name.\n+ Use '.' to select plans under the current working directory.\n+ \"\"\"\n+ tmt.Plan._save_context(context)\n for plan in context.obj.tree.plans():\n plan.ls()\n \n@@ -525,8 +561,13 @@ def ls(context, **kwargs):\n @name_filter_condition\n @verbose_debug_quiet\n def show(context, **kwargs):\n- \"\"\" Show plan details. \"\"\"\n- tmt.Plan._context = context\n+ \"\"\"\n+ Show plan details\n+\n+ Regular expression can be used to filter plans by name.\n+ Use '.' to select plans under the current working directory.\n+ \"\"\"\n+ tmt.Plan._save_context(context)\n for plan in context.obj.tree.plans():\n plan.show()\n echo()\n@@ -537,8 +578,13 @@ def show(context, **kwargs):\n @name_filter_condition\n @verbose_debug_quiet\n def lint(context, **kwargs):\n- \"\"\" Check plans against the L2 metadata specification. \"\"\"\n- tmt.Plan._context = context\n+ \"\"\"\n+ Check plans against the L2 metadata specification\n+\n+ Regular expression can be used to filter plans by name.\n+ Use '.' to select plans under the current working directory.\n+ \"\"\"\n+ tmt.Plan._save_context(context)\n for plan in context.obj.tree.plans():\n plan.lint()\n echo()\n@@ -556,7 +602,7 @@ _plan_templates = listed(tmt.templates.PLAN, join='or')\n @force_dry\n def create(context, name, template, force, **kwargs):\n \"\"\" Create a new plan based on given template. \"\"\"\n- tmt.Plan._context = context\n+ tmt.Plan._save_context(context)\n tmt.Plan.create(name, template, context.obj.tree, force)\n \n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n@@ -574,7 +620,7 @@ def stories(context, **kwargs):\n Check available user stories.\n Explore coverage (test, implementation, documentation).\n \"\"\"\n- tmt.Story._context = context\n+ tmt.Story._save_context(context)\n \n # Show overview of available stories\n if context.invoked_subcommand is None:\n@@ -591,8 +637,13 @@ main.add_command(stories)\n def ls(\n context, implemented, tested, documented, covered,\n unimplemented, untested, undocumented, uncovered, **kwargs):\n- \"\"\" List available stories. \"\"\"\n- tmt.Story._context = context\n+ \"\"\"\n+ List available stories\n+\n+ Regular expression can be used to filter stories by name.\n+ Use '.' to select stories under the current working directory.\n+ \"\"\"\n+ tmt.Story._save_context(context)\n for story in context.obj.tree.stories():\n if story._match(implemented, tested, documented, covered,\n unimplemented, untested, undocumented, uncovered):\n@@ -607,8 +658,13 @@ def ls(\n def show(\n context, implemented, tested, documented, covered,\n unimplemented, untested, undocumented, uncovered, **kwargs):\n- \"\"\" Show story details. \"\"\"\n- tmt.Story._context = context\n+ \"\"\"\n+ Show story details\n+\n+ Regular expression can be used to filter stories by name.\n+ Use '.' to select stories under the current working directory.\n+ \"\"\"\n+ tmt.Story._save_context(context)\n for story in context.obj.tree.stories():\n if story._match(implemented, tested, documented, covered,\n unimplemented, untested, undocumented, uncovered):\n@@ -628,7 +684,7 @@ _story_templates = listed(tmt.templates.STORY, join='or')\n @force_dry\n def create(context, name, template, force, **kwargs):\n \"\"\" Create a new story based on given template. \"\"\"\n- tmt.Story._context = context\n+ tmt.Story._save_context(context)\n tmt.base.Story.create(name, template, context.obj.tree, force)\n \n \n@@ -647,8 +703,13 @@ def coverage(\n context, code, test, docs,\n implemented, tested, documented, covered,\n unimplemented, untested, undocumented, uncovered, **kwargs):\n- \"\"\" Show code, test and docs coverage for given stories. \"\"\"\n- tmt.Story._context = context\n+ \"\"\"\n+ Show code, test and docs coverage for given stories\n+\n+ Regular expression can be used to filter stories by name.\n+ Use '.' to select stories under the current working directory.\n+ \"\"\"\n+ tmt.Story._save_context(context)\n \n def headfoot(text):\n \"\"\" Format simple header/footer \"\"\"\n@@ -703,8 +764,13 @@ def export(\n context, format_,\n implemented, tested, documented, covered,\n unimplemented, untested, undocumented, uncovered, **kwargs):\n- \"\"\" Export selected stories into desired format. \"\"\"\n- tmt.Story._context = context\n+ \"\"\"\n+ Export selected stories into desired format\n+\n+ Regular expression can be used to filter stories by name.\n+ Use '.' to select stories under the current working directory.\n+ \"\"\"\n+ tmt.Story._save_context(context)\n \n for story in context.obj.tree.stories(whole=True):\n if story._match(implemented, tested, documented, covered,\n", "new_path": "tmt/cli.py", "old_path": "tmt/cli.py" }, { "change_type": "MODIFY", "diff": "@@ -54,6 +54,11 @@ class Common(object):\n \"\"\" Name is the default string representation \"\"\"\n return self.name\n \n+ @classmethod\n+ def _save_context(cls, context):\n+ \"\"\" Save provided command line context for future use \"\"\"\n+ cls._context = context\n+\n @classmethod\n def _opt(cls, option, default=None):\n \"\"\" Get an option from the command line context (class version) \"\"\"\n", "new_path": "tmt/utils.py", "old_path": "tmt/utils.py" } ]
c82819fb89809c4cc165b8a05dd54a47d6be0fb1
teemtee/tmt
null
null
Adjust the default branch handling for libraries Detect the default branch and store it after cloning the repo. This seems to make the implementation a bit more readable.
[ { "change_type": "MODIFY", "diff": "@@ -18,26 +18,26 @@ def test_library():\n assert library.format == 'rpm'\n assert library.repo == 'openssl'\n assert library.url == 'https://github.com/beakerlib/openssl'\n- assert library.ref == 'master' # default branch is called master\n+ assert library.ref == 'master' # The default branch is master\n assert library.dest == tmt.beakerlib.DEFAULT_DESTINATION\n shutil.rmtree(library.parent.workdir)\n \n \n @pytest.mark.web\n-def test_library_from_fmf():\n+@pytest.mark.parametrize(\n+ 'url, name, default_branch', [\n+ ('https://github.com/beakerlib/httpd', '/http', 'master'),\n+ ('https://github.com/beakerlib/example', '/file', 'main')\n+ ])\n+def test_library_from_fmf(url, name, default_branch):\n \"\"\" Fetch beakerlib library referenced by fmf identifier \"\"\"\n- library = tmt.beakerlib.Library(\n- {\n- 'url': 'https://github.com/beakerlib/httpd',\n- 'name': '/http'\n- }\n- )\n+ library = tmt.beakerlib.Library(dict(url=url, name=name))\n assert library.format == 'fmf'\n- assert library.ref == 'master' # default branch is called master\n- assert library.url == 'https://github.com/beakerlib/httpd'\n+ assert library.ref == default_branch\n+ assert library.url == url\n assert library.dest == tmt.beakerlib.DEFAULT_DESTINATION\n- assert library.repo == 'httpd'\n- assert library.name == '/http'\n+ assert library.repo == url.split('/')[-1]\n+ assert library.name == name\n shutil.rmtree(library.parent.workdir)\n \n \n@@ -61,7 +61,7 @@ def test_dependencies():\n assert libraries[0].repo == 'httpd'\n assert libraries[0].name == '/http'\n assert libraries[0].url == 'https://github.com/beakerlib/httpd'\n- assert libraries[0].ref == 'master' # default branch is called master\n+ assert libraries[0].ref == 'master' # The default branch is master\n assert libraries[0].dest == tmt.beakerlib.DEFAULT_DESTINATION\n assert libraries[1].repo == 'openssl'\n assert libraries[1].name == '/certgen'\n", "new_path": "tests/unit/test_beakerlib.py", "old_path": "tests/unit/test_beakerlib.py" }, { "change_type": "MODIFY", "diff": "@@ -2,7 +2,6 @@\n \n import re\n import os\n-import shutil\n \n import fmf\n import tmt\n@@ -52,6 +51,9 @@ class Library(object):\n # Use an empty common class if parent not provided (for logging, cache)\n self.parent = parent or tmt.utils.Common(workdir=True)\n \n+ # Default branch is detected from the origin after cloning\n+ self.default_branch = None\n+\n # The 'library(repo/lib)' format\n if isinstance(identifier, str):\n identifier = identifier.strip()\n@@ -62,8 +64,7 @@ class Library(object):\n self.format = 'rpm'\n self.repo, self.name = matched.groups()\n self.url = os.path.join(DEFAULT_REPOSITORY, self.repo)\n- self.ref = None # final value\n- self.__ref = None # original value\n+ self.ref = None\n self.dest = DEFAULT_DESTINATION\n \n # The fmf identifier\n@@ -71,8 +72,7 @@ class Library(object):\n self.parent.debug(f\"Detected library '{identifier}'.\", level=3)\n self.format = 'fmf'\n self.url = identifier.get('url')\n- self.ref = identifier.get('ref', None) # final value\n- self.__ref = None # original value\n+ self.ref = identifier.get('ref', None)\n self.dest = identifier.get(\n 'destination', DEFAULT_DESTINATION).lstrip('/')\n self.name = identifier.get('name', '/')\n@@ -110,18 +110,19 @@ class Library(object):\n # Check if the library was already fetched\n try:\n library = self.parent._library_cache[self.repo]\n+ # The url must be identical\n if library.url != self.url:\n raise tmt.utils.GeneralError(\n f\"Library '{self.repo}' with url '{self.url}' conflicts \"\n f\"with already fetched library from '{library.url}'.\")\n- if library.__ref != self.__ref:\n- # .__ref can be None, indicating we want default branch\n- # .ref is always a brach/commit/tag string\n- lib_ref = library.__ref if library.__ref else '<default branch>'\n- self_ref = self.__ref if self.__ref else '<default branch>'\n+ # Use the default branch if no ref provided\n+ if self.ref is None:\n+ self.ref = library.default_branch\n+ # The same ref has to be used\n+ if library.ref != self.ref:\n raise tmt.utils.GeneralError(\n- f\"Library '{self.repo}' using ref '{self_ref}' conflicts \"\n- f\"with already fetched library using ref '{lib_ref}'.\")\n+ f\"Library '{self.repo}' using ref '{self.ref}' conflicts \"\n+ f\"with already fetched library using ref '{library.ref}'.\")\n self.parent.debug(f\"Library '{self}' already fetched.\", level=3)\n # Reuse the existing metadata tree\n self.tree = library.tree\n@@ -129,17 +130,20 @@ class Library(object):\n except KeyError:\n self.parent.debug(f\"Fetch library '{self}'.\", level=3)\n # Prepare path, clone the repository, checkout ref\n- directory = os.path.join(\n- self.parent.workdir, self.dest, self.repo)\n+ directory = os.path.join(self.parent.workdir, self.dest, self.repo)\n # Clone repo with disabled prompt to ignore missing/private repos\n try:\n self.parent.run(\n ['git', 'clone', self.url, directory],\n shell=False, env={\"GIT_ASKPASS\": \"echo\"})\n- # Store the default_branch\n+ # Detect the default branch from the origin\n+ # The ref format is 'ref: refs/remotes/origin/master'\n head = os.path.join(directory, '.git/refs/remotes/origin/HEAD')\n- default = os.path.join(directory, '.git/refs/heads/__DEFAULT__')\n- shutil.copyfile(head, default)\n+ with open(head) as ref:\n+ self.default_branch = ref.read().strip().split('/')[-1]\n+ # Use the default branch if no ref provided\n+ if self.ref is None:\n+ self.ref = self.default_branch\n except tmt.utils.RunError as error:\n # Fallback to install during the prepare step if in rpm format\n if self.format == 'rpm':\n@@ -148,13 +152,8 @@ class Library(object):\n self.parent.fail(\n f\"Failed to fetch library '{self}' from '{self.url}'.\")\n raise\n- # Check out the requested branch (sets real name of default branch)\n+ # Check out the requested branch\n try:\n- # wants default branch -> replace with the name of real default branch\n- if self.ref is None:\n- with open(os.path.join(directory, '.git/refs/heads/__DEFAULT__')) as f_head:\n- # content should be `ref: refs/remotes/origin/master`\n- self.ref = f_head.read().strip().split('/')[-1]\n self.parent.run(\n ['git', 'checkout', self.ref], shell=False, cwd=directory)\n except tmt.utils.RunError as error:\n", "new_path": "tmt/beakerlib.py", "old_path": "tmt/beakerlib.py" } ]
1b7b3d9f9e20a80e2f733bbaa2a76e3f22148a5c
teemtee/tmt
null
null
Refactor Node class to Core Add _lint_summary method to Core class because it is a Core attribute which is shared.
[ { "change_type": "MODIFY", "diff": "@@ -44,7 +44,7 @@ EXTRA_TEST_KEYS = (\n \"extra-summary extra-task\".split())\n \n \n-class Node(tmt.utils.Common):\n+class Core(tmt.utils.Common):\n \"\"\"\n General node object\n \n@@ -57,7 +57,7 @@ class Node(tmt.utils.Common):\n \n def __init__(self, node, parent=None):\n \"\"\" Initialize the node \"\"\"\n- super(Node, self).__init__(parent=parent, name=node.name)\n+ super(Core, self).__init__(parent=parent, name=node.name)\n self.node = node\n \n # Store original metadata with applied defaults and including\n@@ -152,7 +152,7 @@ class Node(tmt.utils.Common):\n @classmethod\n def _save_context(cls, context):\n \"\"\" Save provided command line context for future use \"\"\"\n- super(Node, cls)._save_context(context)\n+ super(Core, cls)._save_context(context)\n \n # Handle '.' as an alias for the current working directory\n names = cls._opt('names')\n@@ -200,8 +200,20 @@ class Node(tmt.utils.Common):\n known_keys = additional_keys + self._keys\n return [key for key in self.node.get().keys() if key not in known_keys]\n \n+ def _lint_summary(self):\n+ \"\"\" Lint summary attribute \"\"\"\n+ # Summary is advised with a resonable length\n+ if self.summary is None:\n+ verdict(None, \"summary is very useful for quick inspection\")\n+ elif len(self.summary) > 50:\n+ verdict(None, \"summary should not exceed 50 characters\")\n+ return True\n \n-class Test(Node):\n+\n+Node = Core\n+\n+\n+class Test(Core):\n \"\"\" Test object (L1 Metadata) \"\"\"\n \n # Supported attributes (listed in display order)\n@@ -382,10 +394,7 @@ class Test(Node):\n stripped_path.startswith('/'), 'directory path must be absolute')\n valid &= verdict(\n os.path.exists(test_path), 'directory path must exist')\n- if self.summary is None:\n- verdict(None, 'summary is very useful for quick inspection')\n- elif len(self.summary) > 50:\n- verdict(None, 'summary should not exceed 50 characters')\n+ self._lint_summary()\n \n # Check for possible test case relevancy rules\n filename = self.node.sources[-1]\n@@ -450,7 +459,7 @@ class Test(Node):\n return super(Test, self).export(format_, keys)\n \n \n-class Plan(Node):\n+class Plan(Core):\n \"\"\" Plan object (L2 Metadata) \"\"\"\n \n def __init__(self, node, run=None):\n@@ -661,16 +670,6 @@ class Plan(Node):\n \n return correct\n \n- def _lint_summary(self):\n- \"\"\" Lint summary step \"\"\"\n- # Summary is advised with a resonable length\n- if self.summary is None:\n- verdict(None, 'summary is very useful for quick inspection')\n- elif len(self.summary) > 50:\n- verdict(None, 'summary should not exceed 50 characters')\n-\n- return True\n-\n def _lint_discover(self):\n \"\"\" Lint discover step \"\"\"\n # The discover step is optional\n@@ -777,7 +776,7 @@ class Plan(Node):\n self.finish.go()\n \n \n-class Story(Node):\n+class Story(Core):\n \"\"\" User story object \"\"\"\n \n # Supported attributes (listed in display order)\n@@ -1199,10 +1198,10 @@ class Run(tmt.utils.Common):\n self._context.obj.steps = set(data['steps'])\n plans = []\n # The root directory of the tree may not be available, create\n- # a partial Node object that only contains the necessary\n+ # a partial Core object that only contains the necessary\n # attributes required for plan/step loading.\n for plan in data.get('plans'):\n- node = type('Node', (), {\n+ node = type('Core', (), {\n 'name': plan,\n 'data': {},\n # No attributes will ever need to be accessed, just create\n", "new_path": "tmt/base.py", "old_path": "tmt/base.py" } ]
c9605aea40808f5b0bf00bbbd0ec679c09b760ac
teemtee/tmt
null
null
Implement a generic `requires` for all plugins Add support for gathering packages required on the guest to all supported plugins to allow easier future extension if needed.
[ { "change_type": "MODIFY", "diff": "@@ -421,6 +421,10 @@ class Plugin(tmt.utils.Common, metaclass=PluginIndex):\n # Include order in verbose mode\n self.verbose('order', self.order, 'magenta', level=3)\n \n+ def requires(self):\n+ \"\"\" List of packages required by the plugin on the guest \"\"\"\n+ return []\n+\n \n class Action(tmt.utils.Common):\n \"\"\" A special action performed during a normal step. \"\"\"\n", "new_path": "tmt/steps/__init__.py", "old_path": "tmt/steps/__init__.py" }, { "change_type": "MODIFY", "diff": "@@ -86,6 +86,19 @@ class Finish(tmt.steps.Step):\n self.status('done')\n self.save()\n \n+ def requires(self):\n+ \"\"\"\n+ Packages required by all enabled finish plugins\n+\n+ Return a list of packages which need to be installed on the\n+ provisioned guest so that the finishing tasks work well.\n+ Used by the prepare step.\n+ \"\"\"\n+ requires = set()\n+ for plugin in self.plugins(classes=FinishPlugin):\n+ requires.update(plugin.requires())\n+ return list(requires)\n+\n \n class FinishPlugin(tmt.steps.Plugin):\n \"\"\" Common parent of finish plugins \"\"\"\n", "new_path": "tmt/steps/finish/__init__.py", "old_path": "tmt/steps/finish/__init__.py" }, { "change_type": "MODIFY", "diff": "@@ -87,7 +87,10 @@ class Prepare(tmt.steps.Step):\n requires = set(\n self.plan.discover.requires() +\n self.plan.provision.requires() +\n- self.plan.execute.requires()\n+ self.plan.prepare.requires() +\n+ self.plan.execute.requires() +\n+ self.plan.report.requires() +\n+ self.plan.finish.requires()\n )\n \n if requires:\n@@ -147,6 +150,19 @@ class Prepare(tmt.steps.Step):\n self.status('done')\n self.save()\n \n+ def requires(self):\n+ \"\"\"\n+ Packages required by all enabled prepare plugins\n+\n+ Return a list of packages which need to be installed on the\n+ provisioned guest so that the preparation tasks work well.\n+ Used by the prepare step.\n+ \"\"\"\n+ requires = set()\n+ for plugin in self.plugins(classes=PreparePlugin):\n+ requires.update(plugin.requires())\n+ return list(requires)\n+\n \n class PreparePlugin(tmt.steps.Plugin):\n \"\"\" Common parent of prepare plugins \"\"\"\n", "new_path": "tmt/steps/prepare/__init__.py", "old_path": "tmt/steps/prepare/__init__.py" }, { "change_type": "MODIFY", "diff": "@@ -76,8 +76,3 @@ class GuestLocal(tmt.Guest):\n \n def pull(self, source=None, destination=None, options=None):\n \"\"\" Nothing to be done to pull workdir \"\"\"\n-\n- @classmethod\n- def requires(cls):\n- \"\"\" No packages needed to sync workdir \"\"\"\n- return []\n", "new_path": "tmt/steps/provision/local.py", "old_path": "tmt/steps/provision/local.py" }, { "change_type": "MODIFY", "diff": "@@ -218,8 +218,3 @@ class GuestContainer(tmt.Guest):\n if self.container:\n self.podman(['container', 'rm', '-f', self.container])\n self.info('container', 'removed', 'green')\n-\n- @classmethod\n- def requires(cls):\n- \"\"\" No packages needed to sync workdir to the container \"\"\"\n- return []\n", "new_path": "tmt/steps/provision/podman.py", "old_path": "tmt/steps/provision/podman.py" }, { "change_type": "MODIFY", "diff": "@@ -60,6 +60,19 @@ class Report(tmt.steps.Step):\n self.status('done')\n self.save()\n \n+ def requires(self):\n+ \"\"\"\n+ Packages required by all enabled report plugins\n+\n+ Return a list of packages which need to be installed on the\n+ provisioned guest so that the full report can be successfully\n+ generated. Used by the prepare step.\n+ \"\"\"\n+ requires = set()\n+ for plugin in self.plugins(classes=ReportPlugin):\n+ requires.update(plugin.requires())\n+ return list(requires)\n+\n \n class ReportPlugin(tmt.steps.Plugin):\n \"\"\" Common parent of report plugins \"\"\"\n", "new_path": "tmt/steps/report/__init__.py", "old_path": "tmt/steps/report/__init__.py" } ]
87125a4b23c37d94d5ab6deeb8e52d95b1bac3ce
teemtee/tmt
null
null
Adjust disabled shell expansion in Common.run() Provide list format directly from internal functions. Fix handover of the common ssh arguments to ansible. Plus just a couple of minor readability adjustments.
[ { "change_type": "MODIFY", "diff": "@@ -421,14 +421,17 @@ class Guest(tmt.utils.Common):\n def _ansible_verbosity(self):\n \"\"\" Prepare verbose level based on the --debug option count \"\"\"\n if self.opt('debug') < 3:\n- return ''\n+ return []\n else:\n- return '-' + (self.opt('debug') - 2) * 'v'\n+ return ['-' + (self.opt('debug') - 2) * 'v']\n \n @staticmethod\n def _ansible_extra_args(extra_args):\n \"\"\" Prepare extra arguments for ansible-playbook\"\"\"\n- return '' if extra_args is None else str(extra_args)\n+ if extra_args is None:\n+ return []\n+ else:\n+ return shlex.split(str(extra_args))\n \n def _ansible_summary(self, output):\n \"\"\" Check the output for ansible result summary numbers \"\"\"\n@@ -458,25 +461,23 @@ class Guest(tmt.utils.Common):\n # Plan environment and variables provided on the command line\n # override environment provided to execute().\n environment.update(self.parent.plan.environment)\n- return {} if not environment else environment\n+ return environment\n \n @staticmethod\n def _export_environment(environment):\n \"\"\" Prepare shell export of environment variables \"\"\"\n- return f'export {\" \".join(tmt.utils.shell_variables(environment))}; ' \\\n- if environment \\\n- else ''\n+ if not environment:\n+ return \"\"\n+ return f'export {\" \".join(tmt.utils.shell_variables(environment))}; '\n \n def ansible(self, playbook, extra_args=None):\n \"\"\" Prepare guest using ansible playbook \"\"\"\n playbook = self._ansible_playbook_path(playbook)\n- verbosity = [self._ansible_verbosity()] \\\n- if self._ansible_verbosity() else []\n stdout, stderr = self.run(\n ['ansible-playbook'] +\n- verbosity +\n- shlex.split(self._ansible_extra_args(extra_args)) +\n- [f'--ssh-common-args=\"{i}\"' for i in self._ssh_options()] +\n+ self._ansible_verbosity() +\n+ self._ansible_extra_args(extra_args) +\n+ [f'--ssh-common-args={self._ssh_options(join=True)}'] +\n ['-i', f'{self._ssh_guest()},', playbook],\n cwd=self.parent.plan.worktree,\n env=self._prepare_environment())\n", "new_path": "tmt/steps/provision/__init__.py", "old_path": "tmt/steps/provision/__init__.py" }, { "change_type": "MODIFY", "diff": "@@ -1,5 +1,3 @@\n-import shlex\n-\n import tmt\n \n \n@@ -56,12 +54,10 @@ class GuestLocal(tmt.Guest):\n def ansible(self, playbook, extra_args=None):\n \"\"\" Prepare localhost using ansible playbook \"\"\"\n playbook = self._ansible_playbook_path(playbook)\n- verbosity = [self._ansible_verbosity()] \\\n- if self._ansible_verbosity() else []\n stdout, stderr = self.run(\n ['sudo', '-E', 'ansible-playbook'] +\n- verbosity +\n- shlex.split(self._ansible_extra_args(extra_args)) +\n+ self._ansible_verbosity() +\n+ self._ansible_extra_args(extra_args) +\n ['-c', 'local', '-i', 'localhost,', playbook],\n env=self._prepare_environment())\n self._ansible_summary(stdout)\n", "new_path": "tmt/steps/provision/local.py", "old_path": "tmt/steps/provision/local.py" }, { "change_type": "MODIFY", "diff": "@@ -1,5 +1,4 @@\n import os\n-import shlex\n \n import click\n \n@@ -157,13 +156,11 @@ class GuestContainer(tmt.Guest):\n \"\"\" Prepare container using ansible playbook \"\"\"\n playbook = self._ansible_playbook_path(playbook)\n # As non-root we must run with podman unshare\n- podman_unshare = 'podman unshare ' if os.geteuid() != 0 else ''\n- verbosity = [self._ansible_verbosity()] \\\n- if self._ansible_verbosity() else []\n+ podman_unshare = ['podman', 'unshare'] if os.geteuid() != 0 else []\n stdout, stderr = self.run(\n- f'{podman_unshare}ansible-playbook'.split() +\n- verbosity +\n- shlex.split(self._ansible_extra_args(extra_args)) +\n+ podman_unshare + ['ansible-playbook'] +\n+ self._ansible_verbosity() +\n+ self._ansible_extra_args(extra_args) +\n ['-c', 'podman', '-i', f'{self.container},', playbook],\n cwd=self.parent.plan.worktree,\n env=self._prepare_environment())\n", "new_path": "tmt/steps/provision/podman.py", "old_path": "tmt/steps/provision/podman.py" }, { "change_type": "MODIFY", "diff": "@@ -1265,7 +1265,6 @@ def load_run(run):\n # StructuredField\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n \n-\n class StructuredField(object):\n \"\"\"\n Handling multiple text data in a single text field\n", "new_path": "tmt/utils.py", "old_path": "tmt/utils.py" } ]
20ed22e894d89810daaf9a3d979763f9a5d94d74
teemtee/tmt
null
null
Unify Polarion case searching Save and load ids to/from results.yaml. Add constant for extra identification keys. Raise ReportError on TC not found in Polarion.
[ { "change_type": "MODIFY", "diff": "@@ -70,6 +70,9 @@ SECTIONS_HEADINGS = {\n 'Cleanup': ['<h1>Cleanup</h1>']\n }\n \n+# Extra keys used for identification in Result class\n+EXTRA_RESULT_IDENTIFICATION_KEYS = ['extra-nitrate', 'extra-task']\n+\n \n #\n # fmf id types\n@@ -2593,13 +2596,17 @@ class Result:\n self.note = data.get('note')\n self.duration = data.get('duration')\n if test:\n- self.id = test.node.get(\n- 'id', test.node.get(\n- 'extra-nitrate', test.node.get(\n- 'extra-task', '')))\n+ # Saving identifiable information for each test case so we can match them\n+ # to Polarion/Nitrate/other cases and report run results there\n+ self.ids = {tmt.identifier.ID_KEY: test.id}\n+ for key in EXTRA_RESULT_IDENTIFICATION_KEYS:\n+ self.ids[key] = test.node.get(key)\n interpret = test.result or 'respect'\n else:\n- self.id = ''\n+ try:\n+ self.ids = data['ids']\n+ except KeyError:\n+ self.ids = {}\n interpret = 'respect'\n \n # Check for valid results\n@@ -2684,6 +2691,8 @@ class Result:\n data['note'] = self.note\n if self.duration:\n data['duration'] = self.duration\n+ if self.ids:\n+ data['ids'] = self.ids\n return data\n \n \n", "new_path": "tmt/base.py", "old_path": "tmt/base.py" }, { "change_type": "MODIFY", "diff": "@@ -304,24 +304,20 @@ def get_polarion_ids(\n return query_result[0].work_item_id, query_result[0].project_id\n \n \n-def get_polarion_case(data: Dict[str, str], preferred_project: Optional[str] = None) -> Any:\n- \"\"\" Get Polarion case through couple different methods \"\"\"\n- import_polarion()\n- polarion_id = 'None'\n- project_id = None\n-\n+def find_polarion_case_ids(\n+ data: Dict[str, str],\n+ preferred_project: Optional[str] = None) -> Tuple[str, Optional[str]]:\n+ \"\"\" Find IDs for Polarion case from data dictionary \"\"\"\n assert PolarionWorkItem\n- assert PolarionTestCase\n- assert PolarionException\n \n # Search by UUID\n if data.get(ID_KEY):\n query_result = PolarionWorkItem.query(\n data.get(ID_KEY), fields=['work_item_id', 'project_id'])\n- polarion_id, project_id = get_polarion_ids(query_result, preferred_project)\n+ return get_polarion_ids(query_result, preferred_project)\n # Search by TCMS Case ID\n extra_nitrate = data.get('extra-nitrate')\n- if not project_id and extra_nitrate:\n+ if extra_nitrate:\n nitrate_case_id_search = re.search(r'\\d+', extra_nitrate)\n if not nitrate_case_id_search:\n raise ConvertError(\n@@ -329,12 +325,23 @@ def get_polarion_case(data: Dict[str, str], preferred_project: Optional[str] = N\n nitrate_case_id = str(int(nitrate_case_id_search.group()))\n query_result = PolarionWorkItem.query(\n f\"tcmscaseid:{nitrate_case_id}\", fields=['work_item_id', 'project_id'])\n- polarion_id, project_id = get_polarion_ids(query_result, preferred_project)\n+ return get_polarion_ids(query_result, preferred_project)\n # Search by extra task\n- if not project_id and data.get('extra-task'):\n+ if data.get('extra-task'):\n query_result = PolarionWorkItem.query(\n data.get('extra-task'), fields=['work_item_id', 'project_id'])\n- polarion_id, project_id = get_polarion_ids(query_result, preferred_project)\n+ return get_polarion_ids(query_result, preferred_project)\n+ return 'None', None\n+\n+\n+def get_polarion_case(data: Dict[str, str], preferred_project: Optional[str] = None) -> Any:\n+ \"\"\" Get Polarion case through couple different methods \"\"\"\n+ import_polarion()\n+\n+ assert PolarionTestCase\n+ assert PolarionException\n+\n+ polarion_id, project_id = find_polarion_case_ids(data, preferred_project)\n \n try:\n polarion_case = PolarionTestCase(\n", "new_path": "tmt/export.py", "old_path": "tmt/export.py" }, { "change_type": "MODIFY", "diff": "@@ -42,7 +42,7 @@ class ReportPolarion(tmt.steps.report.ReportPlugin):\n \"\"\" Go through executed tests and report into Polarion \"\"\"\n super().go()\n \n- from tmt.export import get_polarion_ids, import_polarion\n+ from tmt.export import find_polarion_case_ids, import_polarion\n import_polarion()\n from tmt.export import PolarionWorkItem\n assert PolarionWorkItem\n@@ -73,16 +73,16 @@ class ReportPolarion(tmt.steps.report.ReportPlugin):\n '*property[@name=\"polarion-project-span-ids\"]')\n \n for result in self.step.plan.execute.results():\n- if not result.id:\n+ if not result.ids or not any(result.ids.values()):\n raise tmt.utils.ReportError(\n f\"Test Case {result.name} is not exported to Polarion, \"\n \"please run 'tmt tests export --how polarion' on it\")\n- work_item_id, test_project_id = get_polarion_ids(\n- PolarionWorkItem.query(\n- result.id, fields=['work_item_id', 'project_id']))\n+ work_item_id, test_project_id = find_polarion_case_ids(result.ids)\n+\n+ if test_project_id is None:\n+ raise tmt.utils.ReportError(\"Test case missing or not found in Polarion\")\n \n assert work_item_id is not None\n- assert test_project_id is not None\n assert project_span_ids is not None\n \n if test_project_id not in project_span_ids.attrib['value']:\n", "new_path": "tmt/steps/report/polarion.py", "old_path": "tmt/steps/report/polarion.py" } ]
c724839dae3bbde4cd102e5e174d1f4886a7abab
teemtee/tmt
null
null
Replace blank "type: ignore" with more specific waivers Using mypy's error codes, waivers can target the actual erorr, leaving space for other kind of violations to not be suppressed.
[ { "change_type": "MODIFY", "diff": "@@ -60,3 +60,22 @@ repos:\n hooks:\n - id: yamllint\n files: ^tmt/schemas/.*\\.yaml\n+\n+ # Yet another static analysis - these hooks use regular expressions to\n+ # process Python code, and offer interesting \"metalinters\", checks for\n+ # what we do to appease flake8 and mypy linters.\n+ - repo: https://github.com/pre-commit/pygrep-hooks\n+ rev: v1.9.0\n+ hooks:\n+ # Enforce `noqa` and `type: ignore` to always appear with specific\n+ # error code(s).\n+ - id: python-check-blanket-noqa\n+ - id: python-check-blanket-type-ignore\n+\n+ # Other potentially useful hooks for future consideration:\n+ #\n+ # - id: python-check-mock-methods\n+ # - id: python-no-eval\n+ # - id: python-no-log-warn\n+ # - id: python-use-type-annotations\n+ # - id: text-unicode-replacement-char\n", "new_path": ".pre-commit-config.yaml", "old_path": ".pre-commit-config.yaml" }, { "change_type": "MODIFY", "diff": "@@ -394,7 +394,7 @@ if run_callback is None:\n \n \n # TODO: commands is unknown, needs revisit\n-@run_callback() # type: ignore\n+@run_callback() # type: ignore[misc]\n @click.pass_context\n def finito(\n click_context: click.core.Context,\n@@ -1310,7 +1310,7 @@ if clean_callback is None:\n \n \n # TODO: commands is unknown, needs revisit\n-@clean_callback() # type: ignore\n+@clean_callback() # type: ignore[misc]\n @click.pass_context\n def perform_clean(\n click_context: click.core.Context,\n", "new_path": "tmt/cli.py", "old_path": "tmt/cli.py" }, { "change_type": "MODIFY", "diff": "@@ -66,7 +66,7 @@ def import_nitrate() -> Any:\n except ImportError:\n raise ConvertError(\n \"Install tmt-test-convert to export tests to nitrate.\")\n- except nitrate.NitrateError as error: # type: ignore\n+ except nitrate.NitrateError as error: # type: ignore[union-attr] # nitrate is no longer None\n raise ConvertError(error)\n \n \n", "new_path": "tmt/export.py", "old_path": "tmt/export.py" }, { "change_type": "MODIFY", "diff": "@@ -34,4 +34,4 @@ class FinishAnsible(tmt.steps.finish.FinishPlugin, PrepareAnsible):\n \n # Assigning class methods seems to cause trouble to mypy\n # See also: https://github.com/python/mypy/issues/6700\n- base_command = tmt.steps.finish.FinishPlugin.base_command # type: ignore\n+ base_command = tmt.steps.finish.FinishPlugin.base_command # type: ignore[assignment]\n", "new_path": "tmt/steps/finish/ansible.py", "old_path": "tmt/steps/finish/ansible.py" }, { "change_type": "MODIFY", "diff": "@@ -350,7 +350,7 @@ class GuestTestcloud(tmt.GuestSsh):\n \"\"\"\n \n # TODO: Revisit this `type: ignore` once `Guest` becomes a generic type\n- _data_class = TestcloudGuestData # type: ignore\n+ _data_class = TestcloudGuestData # type: ignore[assignment]\n \n image: str\n image_url: Optional[str]\n@@ -362,8 +362,8 @@ class GuestTestcloud(tmt.GuestSsh):\n \n # Not to be saved, recreated from image_url/instance_name/... every\n # time guest is instantiated.\n- _image: Optional['testcloud.image.Image'] = None # type: ignore\n- _instance: Optional['testcloud.instance.Instance'] = None # type: ignore\n+ _image: Optional['testcloud.image.Image'] = None # type: ignore[name-defined]\n+ _instance: Optional['testcloud.instance.Instance'] = None # type: ignore[name-defined]\n \n def _get_url(self, url: str, message: str) -> requests.Response:\n \"\"\" Get url, retry when fails, return response \"\"\"\n", "new_path": "tmt/steps/provision/testcloud.py", "old_path": "tmt/steps/provision/testcloud.py" }, { "change_type": "MODIFY", "diff": "@@ -124,10 +124,10 @@ class ReportJUnit(tmt.steps.report.ReportPlugin):\n try:\n with open(f_path, 'w') as fw:\n if hasattr(junit_xml, 'to_xml_report_file'):\n- junit_xml.to_xml_report_file(fw, [suite]) # type: ignore\n+ junit_xml.to_xml_report_file(fw, [suite]) # type: ignore[union-attr]\n else:\n # For older junit-xml\n- junit_xml.TestSuite.to_file(fw, [suite]) # type: ignore\n+ junit_xml.TestSuite.to_file(fw, [suite]) # type: ignore[union-attr]\n self.info(\"output\", f_path, 'yellow')\n except Exception as error:\n raise tmt.utils.ReportError(\n", "new_path": "tmt/steps/report/junit.py", "old_path": "tmt/steps/report/junit.py" }, { "change_type": "MODIFY", "diff": "@@ -1798,7 +1798,7 @@ class TimeoutHTTPAdapter(requests.adapters.HTTPAdapter):\n \n super().__init__(*args, **kwargs)\n \n- def send( # type: ignore # does not match superclass type on purpose\n+ def send( # type: ignore[override] # does not match superclass type on purpose\n self,\n request: requests.PreparedRequest,\n **kwargs: Any) -> requests.Response:\n@@ -1839,7 +1839,7 @@ class RetryStrategy(requests.packages.urllib3.util.retry.Retry): # type: ignore\n return super().increment(*args, **kwargs)\n \n \n-class retry_session(contextlib.AbstractContextManager): # type: ignore\n+class retry_session(contextlib.AbstractContextManager): # type: ignore[type-arg]\n \"\"\"\n Context manager for requests.Session() with retries and timeout\n \"\"\"\n@@ -2627,7 +2627,7 @@ def git_clone(\n return git_clone(url, destination, common, env, shallow=False)\n \n \n-class updatable_message(contextlib.AbstractContextManager): # type: ignore\n+class updatable_message(contextlib.AbstractContextManager): # type: ignore[type-arg]\n \"\"\" Updatable message suitable for progress-bar-like reporting \"\"\"\n \n def __init__(\n", "new_path": "tmt/utils.py", "old_path": "tmt/utils.py" } ]
c1cc2852339ba47afae6808023f7f0a31f271050
teemtee/tmt
null
null
Use generator instead of list comprehension with any/all Construction whole list is not necessary, both `all` and `any` can short-circuit & return as soon as they spot an invalid item. Trivial and cheap optimization.
[ { "change_type": "MODIFY", "diff": "@@ -1524,8 +1524,8 @@ class Tree(tmt.utils.Common):\n filter_vars.update(bool_vars)\n # Conditions\n try:\n- if not all([fmf.utils.evaluate(condition, cond_vars, node)\n- for condition in conditions]):\n+ if not all(fmf.utils.evaluate(condition, cond_vars, node)\n+ for condition in conditions):\n continue\n except fmf.utils.FilterError:\n # Handle missing attributes as if condition failed\n@@ -1535,8 +1535,8 @@ class Tree(tmt.utils.Common):\n f\"Invalid --condition raised exception: {error}\")\n # Filters\n try:\n- if not all([fmf.utils.filter(filter_, filter_vars, regexp=True)\n- for filter_ in filters]):\n+ if not all(fmf.utils.filter(filter_, filter_vars, regexp=True)\n+ for filter_ in filters):\n continue\n except fmf.utils.FilterError:\n # Handle missing attributes as if filter failed\n@@ -1544,14 +1544,13 @@ class Tree(tmt.utils.Common):\n # Links\n try:\n # Links are in OR relation\n- if links and all([not node.has_link(link_)\n- for link_ in links]):\n+ if links and all(not node.has_link(link_) for link_ in links):\n continue\n except BaseException:\n # Handle broken link as not matching\n continue\n # Exclude\n- if any([node for expr in excludes if re.search(expr, node.name)]):\n+ if any(node for expr in excludes if re.search(expr, node.name)):\n continue\n result.append(node)\n return result\n@@ -1600,7 +1599,7 @@ class Tree(tmt.utils.Common):\n return nodes\n return [\n node for node in nodes\n- if any([re.search(name, node.name) for name in cmd_line_names])]\n+ if any(re.search(name, node.name) for name in cmd_line_names)]\n \n # Append post filter to support option --enabled or --disabled\n if Test._opt('enabled'):\n@@ -1892,14 +1891,14 @@ class Run(tmt.utils.Common):\n \n # Filter plans by name unless specified on the command line\n plan_options = ['names', 'filters', 'conditions', 'links', 'default']\n- if not any([Plan._opt(option) for option in plan_options]):\n+ if not any(Plan._opt(option) for option in plan_options):\n self._plans = [\n plan for plan in self.tree.plans(run=self)\n if plan.name in data['plans']]\n \n # Initialize steps only if not selected on the command line\n step_options = 'all since until after before skip'.split()\n- selected = any([self.opt(option) for option in step_options])\n+ selected = any(self.opt(option) for option in step_options)\n if not selected and not self._context.obj.steps:\n self._context.obj.steps = set(data['steps'])\n \n", "new_path": "tmt/base.py", "old_path": "tmt/base.py" }, { "change_type": "MODIFY", "diff": "@@ -1633,7 +1633,7 @@ def format(\n # Make sure everything is string, prepare list, check for spaces\n value = [str(item) for item in value]\n listed_text = fmf.utils.listed(value)\n- has_spaces = any([item.find(' ') > -1 for item in value])\n+ has_spaces = any(item.find(' ') > -1 for item in value)\n # Use listed output only for short lists without spaces\n if len(listed_text) < width - indent and not has_spaces:\n output += listed_text\n@@ -1649,9 +1649,7 @@ def format(\n elif isinstance(value, str):\n # In 'auto' mode enable wrapping when long lines present\n if wrap == 'auto':\n- wrap = any(\n- [len(line) + indent - 7 > width\n- for line in value.split('\\n')])\n+ wrap = any(len(line) + indent - 7 > width for line in value.split('\\n'))\n if wrap:\n output += (wrap_text(\n value, width=width,\n@@ -2541,7 +2539,7 @@ class DistGitHandler:\n \n def its_me(self, remotes: List[str]) -> bool:\n \"\"\" True if self can work with remotes \"\"\"\n- return any([self.remote_substring.search(item) for item in remotes])\n+ return any(self.remote_substring.search(item) for item in remotes)\n \n \n class FedoraDistGit(DistGitHandler):\n", "new_path": "tmt/utils.py", "old_path": "tmt/utils.py" } ]
c2eee2af912e34debfcfa8c575835c2f9d1812d2
teemtee/tmt
null
null
Document & correct use of class conversion methods Recently added `{to,from}_{serialized,raw,dict}` methods for various conversions were poorly documented, and sometimes even wrongly used. This patch adds a short summary on when to use them, and fixes some sub par bits around the code.
[ { "change_type": "MODIFY", "diff": "@@ -72,6 +72,113 @@ In a similar way, the ``tree`` property of the ``Tree`` instance\n points to the original ``fmf.Tree`` from which it was initialized.\n \n \n+Class Conversions\n+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+Various internal objects and classes often need to be converted\n+from their Python nature to data that can be saved, loaded or\n+exported in different form. To facilitate these conversions, three\n+families of helper methods are provided, each with its own set of\n+use cases.\n+\n+``to_spec``/``from_spec``\n+------------------------------------------------------------------\n+\n+This family of methods works with tmt *specification*, i.e. raw\n+user-provided data coming from fmf files describing plans, tests,\n+stories, or from command-line options. ``from_spec()`` shall be\n+called to spawn objects representing the user input, while\n+``to_spec()`` should produce output one could find in fmf files.\n+\n+The default implementation comes from ``tmt.utils.SpecBasedContainer``\n+class, all classes based on user input data should include this\n+class among their bases.\n+\n+.. code-block:: python\n+\n+ # Create an fmf id object from raw data\n+ fmf_id = tmt.base.FmfId.from_spec({'url': ..., 'ref': ...})\n+\n+\n+``to_serialized``/``from_serialized``/``unserialize``\n+------------------------------------------------------------------\n+\n+This family of methods is aiming at runtime objects that may be\n+saved into and loaded from tmt working files, i.e. files tmt uses\n+to store a state in its workdir, like `step.yaml` or `guests.yaml`.\n+\n+Third member of this family, ``unserialize``, is similar to\n+``from_serialized`` - both create an object from its serialized form,\n+only ``unserialize`` is capable of detecting the class to instantiate\n+while for using ``from_serialized``, one must already know which\n+class to work with. ``unserialize`` then uses ``from_serialized``\n+under the hood to do the heavy lifting when correct class is\n+identified.\n+\n+The default implementation comes from ``tmt.utils.SerializableContainer``\n+class, all classes that are being saved and loaded during tmt run\n+should include this class among their bases.\n+\n+See https://en.wikipedia.org/wiki/Serialization for more details\n+on the concept of serialization.\n+\n+.. code-block:: python\n+\n+ # tmt.steps.discover.shell.DiscoverShellData wishes to unserialize its\n+ # `tests` a list of `TestDescription` objects rather than a list of\n+ # dictionaries (the default implementation).\n+ @classmethod\n+ def from_serialized(cls, serialized: Dict[str, Any]) -> 'DiscoverShellData':\n+ obj = super().from_serialized(serialized)\n+\n+ obj.tests = [TestDescription.from_serialized(\n+ serialized_test) for serialized_test in serialized['tests']]\n+\n+ return obj\n+\n+ # A step saving its state...\n+ content: Dict[str, Any] = {\n+ 'status': self.status(),\n+ 'data': [datum.to_serialized() for datum in self.data]\n+ }\n+ self.write('step.yaml', tmt.utils.dict_to_yaml(content))\n+\n+ # ... and loading it back.\n+ # Note the use of unserialize(): step data may have been serialized from\n+ # various different classes (derived from tmt.steps.provision.Guest),\n+ # and unserialize() will detect the correct class.\n+ raw_step_data: Dict[Any, Any] = tmt.utils.yaml_to_dict(self.read('step.yaml'))\n+ self.data = [\n+ StepData.unserialize(raw_datum) for raw_datum in raw_step_data['data']\n+ ]\n+\n+\n+``to_dict``\n+------------------------------------------------------------------\n+\n+Very special helper method: its use cases are not related to any\n+input or output data, and most of the time, when in need of\n+iterating over object's keys and/or values, one can use ``keys()``,\n+``values()`` or ``items()`` methods. It is used as a source of data\n+for serialization and validation, but it usually has no use outside\n+of default implementations.\n+\n+.. warning::\n+\n+ If you think of using ``to_dict()``, please, think again and be sure\n+ you know what are you doing. Despite its output being sometimes\n+ perfectly compatible with output of ``to_serialized()`` or ``to_spec()``,\n+ it is not generaly true, and using it instead of proper methods may lead\n+ to unexpected exceptions.\n+\n+.. code-block:: python\n+\n+ # tmt.base.FmfId's specification is basically just a mapping,\n+ # therefore `to_dict()` is good enough to produce a specification.\n+ def to_spec(self) -> Dict[str, Any]:\n+ return self.to_dict()\n+\n+\n Essential Classes\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n \n", "new_path": "docs/classes.rst", "old_path": "docs/classes.rst" }, { "change_type": "MODIFY", "diff": "@@ -94,7 +94,7 @@ _RawFmfId = TypedDict('_RawFmfId', {\n \n # An internal fmf id representation.\n @dataclasses.dataclass\n-class FmfId(tmt.utils.SerializableContainer):\n+class FmfId(tmt.utils.SpecBasedContainer, tmt.utils.SerializableContainer):\n # The list of valid fmf id keys\n keys: ClassVar[List[str]] = ['url', 'ref', 'path', 'name']\n \n@@ -103,19 +103,14 @@ class FmfId(tmt.utils.SerializableContainer):\n path: Optional[str] = None\n name: Optional[str] = None\n \n- def to_dict(self) -> Dict[str, Any]:\n- \"\"\" Return keys and values in the form of a dictionary \"\"\"\n-\n- return dataclasses.asdict(self)\n-\n- def to_raw(self) -> Dict[str, Any]:\n- \"\"\" Return keys and values as if they originated from fmf node \"\"\"\n+ def to_spec(self) -> Dict[str, Any]:\n+ \"\"\" Convert to a form suitable for saving in a specification file \"\"\"\n \n return self.to_dict()\n \n @classmethod\n- def from_dict(cls, raw: _RawFmfId) -> 'FmfId':\n- \"\"\" Construct an :py:class:`FmfId` from given input container \"\"\"\n+ def from_spec(cls, raw: _RawFmfId) -> 'FmfId':\n+ \"\"\" Convert from a specification file or from a CLI option \"\"\"\n \n return FmfId(**{key: raw.get(key, None) for key in cls.keys})\n \n@@ -133,7 +128,7 @@ class FmfId(tmt.utils.SerializableContainer):\n # Simple asdict() is not good enough, fmf does not like keys that exist but are `None`.\n # Don't include those.\n fmf.base.Tree.node({\n- key: value for key, value in self.to_dict().items()\n+ key: value for key, value in self.items()\n if value is not None\n })\n except fmf.utils.GeneralError as error:\n@@ -421,7 +416,7 @@ class Core(tmt.utils.Common):\n # Links.__init__() method - it is tempting to use to_serialized()\n # and from_unserialized(), but we don't use unserialization code\n # when loading saved data back, so we can't go this way. Yet.\n- data[key] = cast('Links', value).to_raw()\n+ data[key] = cast('Links', value).to_spec()\n \n else:\n data[key] = value\n@@ -1593,7 +1588,7 @@ class Tree(tmt.utils.Common):\n filters = (filters or []) + list(Test._opt('filters', []))\n conditions = (conditions or []) + list(Test._opt('conditions', []))\n links = (links or []) + [\n- LinkNeedle.from_raw(value)\n+ LinkNeedle.from_spec(value)\n for value in cast(List[str], Test._opt('links', []))\n ]\n excludes = (excludes or []) + list(Test._opt('exclude', []))\n@@ -1650,7 +1645,7 @@ class Tree(tmt.utils.Common):\n filters = (filters or []) + list(Plan._opt('filters', []))\n conditions = (conditions or []) + list(Plan._opt('conditions', []))\n links = (links or []) + [\n- LinkNeedle.from_raw(value)\n+ LinkNeedle.from_spec(value)\n for value in cast(List[str], Plan._opt('links', []))\n ]\n excludes = (excludes or []) + list(Plan._opt('exclude', []))\n@@ -1685,7 +1680,7 @@ class Tree(tmt.utils.Common):\n filters = (filters or []) + list(Story._opt('filters', []))\n conditions = (conditions or []) + list(Story._opt('conditions', []))\n links = (links or []) + [\n- LinkNeedle.from_raw(value)\n+ LinkNeedle.from_spec(value)\n for value in cast(List[str], Story._opt('links', []))\n ]\n excludes = (excludes or []) + list(Story._opt('exclude', []))\n@@ -2479,9 +2474,9 @@ class LinkNeedle:\n target: str = r'.*'\n \n @classmethod\n- def from_raw(cls, value: str) -> 'LinkNeedle':\n+ def from_spec(cls, value: str) -> 'LinkNeedle':\n \"\"\"\n- Create a ``LinkNeedle`` instance from its specification.\n+ Convert from a specification file or from a CLI option\n \n Specification is described in [1], this constructor takes care\n of parsing it into a corresponding ``LinkNeedle`` instance.\n@@ -2521,7 +2516,7 @@ class LinkNeedle:\n \n \n @dataclasses.dataclass\n-class Link:\n+class Link(tmt.utils.SpecBasedContainer):\n \"\"\"\n An internal \"link\" as defined by tmt specification.\n \n@@ -2538,9 +2533,9 @@ class Link:\n note: Optional[str] = None\n \n @classmethod\n- def from_raw(cls, spec: _RawLink) -> 'Link':\n+ def from_spec(cls, spec: _RawLink) -> 'Link':\n \"\"\"\n- Create a ``Link`` instance from its specification.\n+ Convert from a specification file or from a CLI option\n \n Specification is described in [1], this constructor takes care\n of parsing it into a corresponding ``Link`` instance.\n@@ -2570,7 +2565,7 @@ class Link:\n if len(relations) == 0:\n return Link(\n relation=Link.DEFAULT_RELATIONSHIP,\n- target=FmfId.from_dict(cast(_RawFmfId, spec)),\n+ target=FmfId.from_spec(cast(_RawFmfId, spec)),\n note=note)\n \n # More relations than 1 are a hard error, only 1 is allowed.\n@@ -2599,25 +2594,25 @@ class Link:\n if isinstance(raw_target, str):\n return Link(relation=relation, target=raw_target, note=note)\n \n- return Link(relation=relation, target=FmfId.from_dict(raw_target), note=note)\n+ return Link(relation=relation, target=FmfId.from_spec(raw_target), note=note)\n \n- def to_raw(self) -> _RawLinkRelation:\n+ def to_spec(self) -> _RawLinkRelation:\n \"\"\"\n- Convert this link into a corresponding link specification.\n+ Convert to a form suitable for saving in a specification file\n \n No matter what the original specification was, every link will\n generate the very same type of specification, the ``relation: target``\n one.\n \n Output of this method is fully compatible with specification, and when\n- given to :py:meth:`from_raw`, it shall create a ``Link`` instance\n+ given to :py:meth:`from_spec`, it shall create a ``Link`` instance\n with the same properties as the original one.\n \n [1] https://tmt.readthedocs.io/en/stable/spec/core.html#link\n \"\"\"\n \n spec = {\n- self.relation: self.target.to_dict() if isinstance(\n+ self.relation: self.target.to_spec() if isinstance(\n self.target,\n FmfId) else self.target}\n \n@@ -2627,7 +2622,7 @@ class Link:\n return spec\n \n \n-class Links:\n+class Links(tmt.utils.SpecBasedContainer):\n \"\"\"\n Collection of links in tests, plans and stories.\n \n@@ -2668,11 +2663,11 @@ class Links:\n specs = data if isinstance(data, list) else [data]\n \n # Ensure that each link is in the canonical form\n- self._links = [Link.from_raw(spec) for spec in specs]\n+ self._links = [Link.from_spec(spec) for spec in specs]\n \n- def to_raw(self) -> List[_RawLinkRelation]:\n+ def to_spec(self) -> List[_RawLinkRelation]:\n \"\"\"\n- Convert this collection of links into a corresponding specification.\n+ Convert to a form suitable for saving in a specification file\n \n No matter what the original specification was, every link will\n generate the very same type of specification, the ``relation: target``\n@@ -2686,7 +2681,7 @@ class Links:\n \"\"\"\n \n return [\n- link.to_raw()\n+ link.to_spec()\n for link in self._links\n ]\n \n", "new_path": "tmt/base.py", "old_path": "tmt/base.py" }, { "change_type": "MODIFY", "diff": "@@ -85,7 +85,10 @@ T = TypeVar('T', bound='StepData')\n \n \n @dataclasses.dataclass\n-class StepData(tmt.utils.NormalizeKeysMixin, tmt.utils.SerializableContainer):\n+class StepData(\n+ tmt.utils.SpecBasedContainer,\n+ tmt.utils.NormalizeKeysMixin,\n+ tmt.utils.SerializableContainer):\n \"\"\"\n Keys necessary to describe, create, save and restore a step.\n \n@@ -110,13 +113,8 @@ class StepData(tmt.utils.NormalizeKeysMixin, tmt.utils.SerializableContainer):\n order: int = tmt.utils.DEFAULT_PLUGIN_ORDER\n summary: Optional[str] = None\n \n- def to_raw(self) -> _RawStepData:\n- \"\"\"\n- Serialize step data instance to a raw representation.\n-\n- The returned value can be used to recreate step data when given\n- to :py:meth:`from_raw`.\n- \"\"\"\n+ def to_spec(self) -> _RawStepData: # type: ignore[override]\n+ \"\"\" Convert to a form suitable for saving in a specification file \"\"\"\n \n return cast(_RawStepData, {\n tmt.utils.key_to_option(key): value\n@@ -135,10 +133,12 @@ class StepData(tmt.utils.NormalizeKeysMixin, tmt.utils.SerializableContainer):\n pass\n \n @classmethod\n- def from_raw(cls: Type[T], raw_data: _RawStepData, logger: tmt.utils.Common) -> T:\n- \"\"\"\n- Unserialize step data instance from its a raw representation.\n- \"\"\"\n+ def from_spec( # type: ignore[override]\n+ cls: Type[T],\n+ raw_data: _RawStepData,\n+ logger: tmt.utils.Common\n+ ) -> T:\n+ \"\"\" Convert from a specification file or from a CLI option \"\"\"\n \n cls.pre_normalization(raw_data, logger)\n \n@@ -406,7 +406,7 @@ class Step(tmt.utils.Common):\n # form for _normalize_data().\n if datum.how == how:\n self.debug(f' compatible: {datum}', level=4)\n- _raw_data.append(datum.to_raw())\n+ _raw_data.append(datum.to_spec())\n \n # Mismatch, throwing away, replacing with new `how` - but we can keep the name.\n else:\n@@ -718,7 +718,7 @@ class BasePlugin(Phase, metaclass=PluginIndex):\n # normalization in the process.\n if raw_data is not None:\n try:\n- data = plugin_data_class.from_raw(raw_data, step)\n+ data = plugin_data_class.from_spec(raw_data, step)\n \n except Exception as exc:\n raise tmt.utils.GeneralError(\n", "new_path": "tmt/steps/__init__.py", "old_path": "tmt/steps/__init__.py" }, { "change_type": "MODIFY", "diff": "@@ -439,7 +439,7 @@ class DiscoverFmf(tmt.steps.discover.DiscoverPlugin):\n \n # Check the 'test --link' option first, then from discover\n raw_link_needles = cast(List[str], tmt.Test._opt('links', []) or self.get('link', []))\n- link_needles = [tmt.base.LinkNeedle.from_raw(\n+ link_needles = [tmt.base.LinkNeedle.from_spec(\n raw_needle) for raw_needle in raw_link_needles]\n \n for link_needle in link_needles:\n", "new_path": "tmt/steps/discover/fmf.py", "old_path": "tmt/steps/discover/fmf.py" }, { "change_type": "MODIFY", "diff": "@@ -21,7 +21,10 @@ T = TypeVar('T', bound='TestDescription')\n \n \n @dataclasses.dataclass\n-class TestDescription(tmt.utils.NormalizeKeysMixin, tmt.utils.SerializableContainer):\n+class TestDescription(\n+ tmt.utils.SpecBasedContainer,\n+ tmt.utils.NormalizeKeysMixin,\n+ tmt.utils.SerializableContainer):\n \"\"\"\n Keys necessary to describe a shell-based test.\n \n@@ -105,30 +108,40 @@ class TestDescription(tmt.utils.NormalizeKeysMixin, tmt.utils.SerializableContai\n \n # Our own implementation, parent uses `name` and `how`, and tests don't have any `how`.\n @classmethod\n- def from_raw(cls: Type[T], raw_data: Dict[str, Any], logger: tmt.utils.Common) -> T:\n- \"\"\"\n- Unserialize step data instance from its a raw representation.\n- \"\"\"\n+ def from_spec( # type: ignore[override]\n+ cls: Type[T],\n+ raw_data: Dict[str, Any],\n+ logger: tmt.utils.Common\n+ ) -> T:\n+ \"\"\" Convert from a specification file or from a CLI option \"\"\"\n \n data = cls(name=raw_data['name'], test=raw_data['test'])\n data._load_keys(raw_data, cls.__name__, logger)\n \n return data\n \n- def to_raw(self) -> Dict[str, Any]:\n- data = super().to_dict()\n- data['link'] = self.link.to_raw() if self.link else None\n+ def to_spec(self) -> Dict[str, Any]:\n+ \"\"\" Convert to a form suitable for saving in a specification file \"\"\"\n+\n+ data = super().to_spec()\n+ data['link'] = self.link.to_spec() if self.link else None\n data['require'] = [\n- require if isinstance(require, str) else require.to_raw()\n+ require if isinstance(require, str) else require.to_spec()\n for require in self.require\n ]\n \n return data\n \n def to_serialized(self) -> Dict[str, Any]:\n+ \"\"\" Convert to a form suitable for saving in a file \"\"\"\n+\n data = super().to_serialized()\n \n- data['link'] = self.link.to_raw() if self.link else None\n+ # Using `to_spec()` on purpose: `Links` does not provide serialization\n+ # methods, because specification of links is already good enough. We\n+ # can use existing `to_spec()` method, and undo it with a simple\n+ # `Links(...)` call.\n+ data['link'] = self.link.to_spec() if self.link else None\n data['require'] = [\n require if isinstance(require, str) else require.to_serialized()\n for require in self.require\n@@ -138,6 +151,8 @@ class TestDescription(tmt.utils.NormalizeKeysMixin, tmt.utils.SerializableContai\n \n @classmethod\n def from_serialized(cls, serialized: Dict[str, Any]) -> 'TestDescription':\n+ \"\"\" Convert from a serialized form loaded from a file \"\"\"\n+\n obj = super().from_serialized(serialized)\n obj.link = tmt.base.Links(serialized['link'])\n obj.require = [\n@@ -154,9 +169,11 @@ class DiscoverShellData(tmt.steps.discover.DiscoverStepData):\n \n def _normalize_tests(self, value: List[Dict[str, Any]]\n ) -> List[TestDescription]:\n- return [TestDescription.from_raw(raw_datum, tmt.utils.Common()) for raw_datum in value]\n+ return [TestDescription.from_spec(raw_datum, tmt.utils.Common()) for raw_datum in value]\n \n def to_serialized(self) -> Dict[str, Any]:\n+ \"\"\" Convert to a form suitable for saving in a file \"\"\"\n+\n serialized = super().to_serialized()\n \n serialized['tests'] = [test.to_serialized() for test in self.tests]\n@@ -165,6 +182,8 @@ class DiscoverShellData(tmt.steps.discover.DiscoverStepData):\n \n @classmethod\n def from_serialized(cls, serialized: Dict[str, Any]) -> 'DiscoverShellData':\n+ \"\"\" Convert from a serialized form loaded from a file \"\"\"\n+\n obj = super().from_serialized(serialized)\n \n obj.tests = [TestDescription.from_serialized(\n@@ -250,7 +269,7 @@ class DiscoverShell(tmt.steps.discover.DiscoverPlugin):\n if dist_git_source:\n data.environment['TMT_SOURCE_DIR'] = sourcedir\n # Create a simple fmf node, adjust its name\n- tests.child(data.name, data.to_raw())\n+ tests.child(data.name, data.to_spec())\n \n # Symlink tests directory to the plan work tree\n testdir = os.path.join(self.workdir, \"tests\")\n", "new_path": "tmt/steps/discover/shell.py", "old_path": "tmt/steps/discover/shell.py" }, { "change_type": "MODIFY", "diff": "@@ -295,7 +295,7 @@ class ProvisionTestcloud(tmt.steps.provision.ProvisionPlugin):\n raise tmt.utils.SpecificationError(\n f\"Value '{value}' cannot be converted to int for '{int_key}' attribute.\")\n \n- for key, value in data.to_dict().items():\n+ for key, value in data.items():\n if key == 'memory':\n self.info('memory', f\"{value} MB\", 'green')\n elif key == 'disk':\n", "new_path": "tmt/steps/provision/testcloud.py", "old_path": "tmt/steps/provision/testcloud.py" }, { "change_type": "MODIFY", "diff": "@@ -1328,19 +1328,16 @@ def option_to_key(option: str) -> str:\n return option.replace('-', '_')\n \n \n-SerializableContainerDerivedType = TypeVar(\n- 'SerializableContainerDerivedType',\n- bound='SerializableContainer')\n-\n-\n @dataclasses.dataclass\n-class SerializableContainer:\n- \"\"\"\n- A mixin class for objects that may be saved in files and restored later\n- \"\"\"\n+class DataContainer:\n+ \"\"\" A base class for objects that have keys and values \"\"\"\n \n def to_dict(self) -> Dict[str, Any]:\n- \"\"\" Return keys and values in the form of a dictionary \"\"\"\n+ \"\"\"\n+ Convert to a mapping.\n+\n+ See :ref:`classes.rst` for more details.\n+ \"\"\"\n \n return dataclasses.asdict(self)\n \n@@ -1365,7 +1362,7 @@ class SerializableContainer:\n yield from self.to_dict().items()\n \n @classmethod\n- def default(cls, key: str, default: Any = None) -> Any:\n+ def _default(cls, key: str, default: Any = None) -> Any:\n \"\"\"\n Return a default value for a given key.\n \n@@ -1416,6 +1413,48 @@ class SerializableContainer:\n \n return True\n \n+\n+SpecBasedContainerT = TypeVar('SpecBasedContainerT', bound='SpecBasedContainer')\n+\n+\n+class SpecBasedContainer(DataContainer):\n+ @classmethod\n+ def from_spec(cls: Type[SpecBasedContainerT], spec: Any) -> SpecBasedContainerT:\n+ \"\"\"\n+ Convert from a specification file or from a CLI option\n+\n+ See :ref:`classes.rst` for more details.\n+\n+ See :py:meth:`to_spec` for its counterpart.\n+ \"\"\"\n+\n+ raise NotImplementedError()\n+\n+ def to_spec(self) -> Dict[str, Any]:\n+ \"\"\"\n+ Convert to a form suitable for saving in a specification file\n+\n+ See :ref:`classes.rst` for more details.\n+\n+ See :py:meth:`from_spec` for its counterpart.\n+ \"\"\"\n+\n+ return self.to_dict()\n+\n+\n+SerializableContainerDerivedType = TypeVar(\n+ 'SerializableContainerDerivedType',\n+ bound='SerializableContainer')\n+\n+\n+@dataclasses.dataclass\n+class SerializableContainer(DataContainer):\n+ \"\"\" A mixin class for saving and loading objects \"\"\"\n+\n+ @classmethod\n+ def default(cls, key: str, default: Any = None) -> Any:\n+ return cls._default(key, default=default)\n+\n #\n # Moving data between containers and objects owning them\n #\n@@ -1449,10 +1488,9 @@ class SerializableContainer:\n \n def to_serialized(self) -> Dict[str, Any]:\n \"\"\"\n- Return keys and values in the form allowing later reconstruction.\n+ Convert to a form suitable for saving in a file.\n \n- Used to transform container into a structure one can save in a\n- YAML file, and restore it later.\n+ See :ref:`classes.rst` for more details.\n \n See :py:meth:`from_serialized` for its counterpart.\n \"\"\"\n@@ -1474,10 +1512,9 @@ class SerializableContainer:\n cls: Type[SerializableContainerDerivedType],\n serialized: Dict[str, Any]) -> SerializableContainerDerivedType:\n \"\"\"\n- Recreate container from its serialized form.\n+ Convert from a serialized form loaded from a file.\n \n- Used to transform data read from a YAML file into the original\n- container.\n+ See :ref:`classes.rst` for more details.\n \n See :py:meth:`to_serialized` for its counterpart.\n \"\"\"\n@@ -1493,7 +1530,7 @@ class SerializableContainer:\n def unserialize(serialized: Dict[str, Any]\n ) -> SerializableContainerDerivedType:\n \"\"\"\n- Recreate container from its serialized form.\n+ Convert from a serialized form loaded from a file.\n \n Similar to :py:meth:`from_serialized`, but this method knows\n nothing about container's class, and will locate the correct\n@@ -1505,6 +1542,10 @@ class SerializableContainer:\n containers when their classes are not know to the code.\n Restoring such containers requires inspection of serialized data\n and dynamic imports of modules as needed.\n+\n+ See :ref:`classes.rst` for more details.\n+\n+ See :py:meth:`to_serialized` for its counterpart.\n \"\"\"\n \n from tmt.plugins import import_member\n", "new_path": "tmt/utils.py", "old_path": "tmt/utils.py" } ]
a43cac0f667f588a9311adfda36f54bd71468843
teemtee/tmt
null
null
Fix default `framework`, remove old execution methods Set the default test `framework` value to `shell`. Remove support for deprecated execution methods as the special handling of them is not compatible with the default value for the `framework` key. Clean up this obsoleted part and adjust related test coverage.
[ { "change_type": "MODIFY", "diff": "@@ -1,20 +1,14 @@\n+discover:\n+ how: fmf\n provision:\n how: local\n+execute:\n+ how: tmt\n \n /shell:\n- discover:\n- how: fmf\n+ discover+:\n test: shell\n \n- /tmt:\n- execute:\n- how: shell.tmt\n-\n /beakerlib:\n- discover:\n- how: fmf\n+ discover+:\n test: beakerlib\n-\n- /tmt:\n- execute:\n- how: beakerlib.tmt\n", "new_path": "tests/execute/basic/data/plan.fmf", "old_path": "tests/execute/basic/data/plan.fmf" }, { "change_type": "MODIFY", "diff": "@@ -10,6 +10,7 @@\n test: a-weird-command\n \n /beakerlib:\n+ framework: beakerlib\n /good:\n summary: Passing test\n test: ./good.sh\n", "new_path": "tests/execute/basic/data/test.fmf", "old_path": "tests/execute/basic/data/test.fmf" }, { "change_type": "MODIFY", "diff": "@@ -26,52 +26,53 @@ rlJournalStart\n rlPhaseEnd\n done\n \n- # NOTE: regular expressions below are slightly less trivial. The order of keys in results.yaml\n- # is not fixed, if parser decides, they may swap positions, therefore expressions try to match\n- # a *multiline section* of results.yaml that should include test and whatever we're grepping\n- # for. Non-greedy matching is used to limit to just a single result in results.yaml, otherwise\n- # grep might not reveal a `result` key missing in a particular results because it'd exist in\n- # the *next* result in the file.\n- for method in tmt; do\n- rlPhaseStartTest \"Check shell.$method results\"\n- results=\"$run/plan/shell/$method/execute/results.yaml\"\n-\n- rlRun \"grep -Pzo '(?sm)^/test/shell/good:$.*?^ *result: pass$' $results\" 0 \"Check pass\"\n- check_duration \"$results\" \"good:\"\n-\n- rlRun \"grep -Pzo '(?sm)^/test/shell/weird:$.*?^ *result: error$' $results\" 0 \"Check error\"\n- check_duration \"$results\" \"weird:\"\n-\n- rlRun \"grep -Pzo '(?sm)^/test/shell/bad:$.*?^ *result: fail$' $results\" 0 \"Check fail\"\n- check_duration \"$results\" \"bad:\"\n-\n- # Check log file exists\n- rlRun \"grep -Pzo '(?sm)^/test/shell/good:$.*?^ +log:$.*?^ +- data/.+?$' $results | grep output.txt\" \\\n- 0 \"Check output.txt log exists in $results\"\n- rlPhaseEnd\n+ # NOTE: regular expressions below are slightly less trivial. The\n+ # order of keys in results.yaml is not fixed, if parser decides,\n+ # they may swap positions, therefore expressions try to match a\n+ # *multiline section* of results.yaml that should include test and\n+ # whatever we're grepping for. Non-greedy matching is used to limit\n+ # to just a single result in results.yaml, otherwise grep might not\n+ # reveal a `result` key missing in a particular results because it'd\n+ # exist in the *next* result in the file.\n \n- rlPhaseStartTest \"Check beakerlib.$method results\"\n- results=\"$run/plan/beakerlib/$method/execute/results.yaml\"\n+ rlPhaseStartTest \"Check shell results\"\n+ results=\"$run/plan/shell/execute/results.yaml\"\n \n- rlRun \"grep -Pzo '(?sm)^/test/beakerlib/good:$.*?^ *result: pass$' $results\" 0 \"Check pass\"\n- check_duration \"$results\" \"good:\"\n+ rlRun \"grep -Pzo '(?sm)^/test/shell/good:$.*?^ *result: pass$' $results\" 0 \"Check pass\"\n+ check_duration \"$results\" \"good:\"\n \n- rlRun \"grep -Pzo '(?sm)^/test/beakerlib/need:$.*?^ *result: warn$' $results\" 0 \"Check warn\"\n- check_duration \"$results\" \"need:\"\n+ rlRun \"grep -Pzo '(?sm)^/test/shell/weird:$.*?^ *result: error$' $results\" 0 \"Check error\"\n+ check_duration \"$results\" \"weird:\"\n \n- rlRun \"grep -Pzo '(?sm)^/test/beakerlib/weird:$.*?^ *result: error$' $results\" 0 \"Check error\"\n- check_duration \"$results\" \"weird:\"\n+ rlRun \"grep -Pzo '(?sm)^/test/shell/bad:$.*?^ *result: fail$' $results\" 0 \"Check fail\"\n+ check_duration \"$results\" \"bad:\"\n \n- rlRun \"grep -Pzo '(?sm)^/test/beakerlib/bad:$.*?^ *result: fail$' $results\" 0 \"Check fail\"\n- check_duration \"$results\" \"bad:\"\n+ # Check log file exists\n+ rlRun \"grep -Pzo '(?sm)^/test/shell/good:$.*?^ +log:$.*?^ +- data/.+?$' $results | grep output.txt\" \\\n+ 0 \"Check output.txt log exists in $results\"\n+ rlPhaseEnd\n \n- # Check log files exist\n- rlRun \"grep -Pzo '(?sm)^/test/beakerlib/good:$.*^ +log:$.*?^ +- data/.+?$' $results | grep output.txt\" \\\n- 0 \"Check output.txt log exists\"\n- rlRun \"grep -Pzo '(?sm)^/test/beakerlib/good:$.*^ +log:$.*?^ +- data/.+?$' $results | grep journal.txt\" \\\n- 0 \"Check journal.txt log exists\"\n- rlPhaseEnd\n- done\n+ rlPhaseStartTest \"Check beakerlib results\"\n+ results=\"$run/plan/beakerlib/execute/results.yaml\"\n+\n+ rlRun \"grep -Pzo '(?sm)^/test/beakerlib/good:$.*?^ *result: pass$' $results\" 0 \"Check pass\"\n+ check_duration \"$results\" \"good:\"\n+\n+ rlRun \"grep -Pzo '(?sm)^/test/beakerlib/need:$.*?^ *result: warn$' $results\" 0 \"Check warn\"\n+ check_duration \"$results\" \"need:\"\n+\n+ rlRun \"grep -Pzo '(?sm)^/test/beakerlib/weird:$.*?^ *result: error$' $results\" 0 \"Check error\"\n+ check_duration \"$results\" \"weird:\"\n+\n+ rlRun \"grep -Pzo '(?sm)^/test/beakerlib/bad:$.*?^ *result: fail$' $results\" 0 \"Check fail\"\n+ check_duration \"$results\" \"bad:\"\n+\n+ # Check log files exist\n+ rlRun \"grep -Pzo '(?sm)^/test/beakerlib/good:$.*^ +log:$.*?^ +- data/.+?$' $results | grep output.txt\" \\\n+ 0 \"Check output.txt log exists\"\n+ rlRun \"grep -Pzo '(?sm)^/test/beakerlib/good:$.*^ +log:$.*?^ +- data/.+?$' $results | grep journal.txt\" \\\n+ 0 \"Check journal.txt log exists\"\n+ rlPhaseEnd\n \n rlPhaseStartCleanup\n rlRun \"popd\"\n", "new_path": "tests/execute/basic/test.sh", "old_path": "tests/execute/basic/test.sh" }, { "change_type": "MODIFY", "diff": "@@ -9,9 +9,9 @@ rlJournalStart\n \n rlPhaseStartTest\n tmt=\"tmt run -ar provision -h local\"\n- rlRun \"$tmt execute -h shell -s true\" 0 \"Good test\"\n- rlRun \"$tmt execute -h shell -s false\" 1 \"Bad test\"\n- rlRun \"$tmt execute -h shell -s fooo\" 2 \"Weird test\"\n+ rlRun \"$tmt execute -h tmt -s true\" 0 \"Good test\"\n+ rlRun \"$tmt execute -h tmt -s false\" 1 \"Bad test\"\n+ rlRun \"$tmt execute -h tmt -s fooo\" 2 \"Weird test\"\n rlRun \"$tmt\" 3 \"No tests\"\n rlPhaseEnd\n \n", "new_path": "tests/execute/codes/test.sh", "old_path": "tests/execute/codes/test.sh" }, { "change_type": "MODIFY", "diff": "@@ -1,19 +1,14 @@\n /beakerlib:\n+ summary: This is a beakerlib test\n+ framework: beakerlib\n test: ./beakerlib.sh\n \n- /without-framework:\n- summary: This is a beakerlib test (without framework)\n-\n- /with-framework:\n- summary: This is a beakerlib test (with framework)\n- framework: beakerlib\n-\n /shell:\n test: ./shell.sh\n \n- /without-framework:\n- summary: This is a shell test (without framework)\n+ /default:\n+ summary: This is a shell test (default framework)\n \n- /with-framework:\n- summary: This is a shell test (with framework)\n+ /explicit:\n+ summary: This is a shell test (explicit framework)\n framework: shell\n", "new_path": "tests/execute/framework/data/tests.fmf", "old_path": "tests/execute/framework/data/tests.fmf" }, { "change_type": "MODIFY", "diff": "@@ -2,7 +2,6 @@ summary: Verify test framework selection\n description:\n Check test framework selection in the L1 metadata.\n Combining different frameworks under a single plan.\n- Ensure backward-compatibility for the old execution methods.\n duration: 30m\n tier: 3\n tag-: [container]\n", "new_path": "tests/execute/framework/main.fmf", "old_path": "tests/execute/framework/main.fmf" }, { "change_type": "MODIFY", "diff": "@@ -3,61 +3,21 @@\n \n rlJournalStart\n rlPhaseStartSetup\n- rlRun \"tmp=\\$(mktemp -d)\" 0 \"Creating tmp directory\"\n rlRun \"pushd data\"\n- rlRun \"set -o pipefail\"\n rlPhaseEnd\n \n- # Old tests without framework\n- sh1=\"/tests/shell/without-framework\"\n- bl1=\"/tests/beakerlib/without-framework\"\n-\n- # New tests with framework\n- sh2=\"/tests/shell/with-framework\"\n- bl2=\"/tests/beakerlib/with-framework\"\n-\n- # Common tmt command line\n- tmt=\"tmt run -avvvdddr\"\n-\n- # Old execute methods\n- for execute in tmt; do\n- for framework in shell beakerlib; do\n- rlPhaseStartTest \"Old execute methods ($framework.$execute)\"\n- rlRun \"$tmt execute -h $framework.$execute \\\n- 2>&1 | tee output\" 0,2\n- rlAssertGrep 'execute method has been deprecated' output\n- # Default framework should be picked from the old method\n- rlAssertGrep \"Execute '$sh1' as a '$framework' test.\" output\n- rlAssertGrep \"Execute '$bl1' as a '$framework' test.\" output\n- # Explicit framework in test should always override default\n- rlAssertGrep \"Execute '$sh2' as a 'shell' test.\" output\n- rlAssertGrep \"Execute '$bl2' as a 'beakerlib' test.\" output\n- # Beakerlib tests should always install beakerlib\n- if [[ $framework == beakerlib ]]; then\n- rlAssertGrep \"dnf install.*beakerlib\" output\n- fi\n- rlAssertGrep \"warn.*execute.*deprecated\" output\n- rlPhaseEnd\n- done\n- done\n-\n- # New execute methods\n- for execute in tmt; do\n- rlPhaseStartTest \"Combine shell and beakerlib ($execute)\"\n- rlRun \"$tmt execute --how $execute 2>&1 | tee output\"\n- # The default test framework should be 'shell'\n- rlAssertGrep \"Execute '$sh1' as a 'shell' test.\" output\n- rlAssertGrep \"Execute '$bl1' as a 'shell' test.\" output\n- # Explicit framework in test should always override default\n- rlAssertGrep \"Execute '$sh2' as a 'shell' test.\" output\n- rlAssertGrep \"Execute '$bl2' as a 'beakerlib' test.\" output\n- # Beakerlib dependency should be detected from framework\n- rlAssertGrep \"dnf install.*beakerlib\" output\n- rlPhaseEnd\n- done\n+ rlPhaseStartTest \"Combine shell and beakerlib\"\n+ rlRun -s \"tmt run -avvvvdddr\"\n+ # The default test framework should be 'shell'\n+ rlAssertGrep \"Execute '/tests/shell/default' as a 'shell' test.\" $rlRun_LOG\n+ # Explicit framework in test should always override default\n+ rlAssertGrep \"Execute '/tests/shell/explicit' as a 'shell' test.\" $rlRun_LOG\n+ rlAssertGrep \"Execute '/tests/beakerlib' as a 'beakerlib' test.\" $rlRun_LOG\n+ # Beakerlib dependency should be detected from framework\n+ rlAssertGrep \"dnf install.*beakerlib\" $rlRun_LOG\n+ rlPhaseEnd\n \n rlPhaseStartCleanup\n rlRun \"popd\"\n- rlRun \"rm -r $tmp\" 0 \"Removing tmp directory\"\n rlPhaseEnd\n rlJournalEnd\n", "new_path": "tests/execute/framework/test.sh", "old_path": "tests/execute/framework/test.sh" }, { "change_type": "MODIFY", "diff": "@@ -4,7 +4,7 @@\n rlJournalStart\n rlPhaseStartSetup\n rlRun \"tmp=\\$(mktemp -d)\" 0 \"Creating tmp directory\"\n- rlRun \"tmt='tmt run -ar provision -h local execute -h shell -s '\"\n+ rlRun \"tmt='tmt run -ar provision -h local execute -h tmt -s '\"\n rlRun \"pushd $tmp\"\n rlRun \"set -o pipefail\"\n rlRun \"tmt init -t mini\"\n", "new_path": "tests/login/when.sh", "old_path": "tests/login/when.sh" }, { "change_type": "MODIFY", "diff": "@@ -10,7 +10,7 @@ rlJournalStart\n rlPhaseEnd\n \n rlPhaseStartTest \"No Metadata\"\n- rlRun \"tmt run -r $options execute -h shell -s 'touch $tmp/no-metadata'\"\n+ rlRun \"tmt run -r $options execute -h tmt -s 'touch $tmp/no-metadata'\"\n rlAssertExists \"$tmp/no-metadata\"\n rlPhaseEnd\n \n", "new_path": "tests/run/default/test.sh", "old_path": "tests/run/default/test.sh" }, { "change_type": "MODIFY", "diff": "@@ -12,14 +12,14 @@ rlJournalStart\n rlPhaseStartTest \"All steps at once (don't remove)\"\n rlRun \"tmt run --id $run --all \\\n provision --how local \\\n- execute --how shell --script true\"\n+ execute --how tmt --script true\"\n rlAssertExists $run\n rlPhaseEnd\n \n rlPhaseStartTest \"All steps at once (remove)\"\n rlRun \"tmt run --id $run --all --remove \\\n provision --how local \\\n- execute --how shell --script true\"\n+ execute --how tmt --script true\"\n rlAssertNotExists $run\n rlPhaseEnd\n \n@@ -27,7 +27,7 @@ rlJournalStart\n rlRun \"tmt run --id $run --remove \\\n discover \\\n provision --how local \\\n- execute --how shell --script true\"\n+ execute --how tmt --script true\"\n rlAssertExists $run\n rlRun \"tmt run --last report\"\n rlAssertExists $run\n", "new_path": "tests/run/remove/test.sh", "old_path": "tests/run/remove/test.sh" }, { "change_type": "MODIFY", "diff": "@@ -28,7 +28,7 @@ def main(argc, argv):\n verify(test01, 'test01', 'component', [])\n verify(test01, 'test01', 'test', 'true')\n verify(test01, 'test01', 'path', '/')\n- verify(test01, 'test01', 'framework', None)\n+ verify(test01, 'test01', 'framework', 'shell')\n verify(test01, 'test01', 'manual', False)\n verify(test01, 'test01', 'require', [])\n verify(test01, 'test01', 'recommend', [])\n@@ -49,7 +49,7 @@ def main(argc, argv):\n verify(test02, 'test02', 'component', [])\n verify(test02, 'test02', 'test', 'true')\n verify(test02, 'test02', 'path', '/')\n- verify(test02, 'test02', 'framework', None)\n+ verify(test02, 'test02', 'framework', 'shell')\n verify(test02, 'test02', 'manual', False)\n verify(test02, 'test02', 'require', [])\n verify(test02, 'test02', 'recommend', [])\n", "new_path": "tests/test/export/parse.py", "old_path": "tests/test/export/parse.py" }, { "change_type": "MODIFY", "diff": "@@ -16,7 +16,7 @@ rlJournalStart\n rlAssertNotGrep \"id\" $rlRun_LOG\n rlAssertGrep \"test ./test.sh\" $rlRun_LOG\n rlAssertGrep \"path /tests\" $rlRun_LOG\n- rlAssertNotGrep \"framework\" $rlRun_LOG\n+ rlAssertGrep \"framework shell\" $rlRun_LOG\n rlAssertGrep \"manual false\" $rlRun_LOG\n rlAssertNotGrep \"require\" $rlRun_LOG\n rlAssertNotGrep \"recommend\" $rlRun_LOG\n", "new_path": "tests/test/show/test.sh", "old_path": "tests/test/show/test.sh" }, { "change_type": "MODIFY", "diff": "@@ -602,7 +602,7 @@ class Test(Core):\n # Test execution data\n test: str\n path: Optional[str] = None\n- framework: Optional[str] = None\n+ framework: str = \"shell\"\n manual: bool = False\n require: List[Require] = []\n recommend: List[Require] = []\n", "new_path": "tmt/base.py", "old_path": "tmt/base.py" }, { "change_type": "MODIFY", "diff": "@@ -266,10 +266,6 @@ class Discover(tmt.steps.Step):\n for test in phase.tests():\n test.name = f\"{prefix}{test.name}\"\n test.path = f\"/{phase.safe_name}{test.path}\"\n- # Use the default test framework if not defined in L1\n- # FIXME remove when we drop the old execution methods\n- if not test.framework:\n- test.framework = self.plan.execute._framework\n # Update test environment with plan environment\n test.environment.update(self.plan.environment)\n self._tests.append(test)\n", "new_path": "tmt/steps/discover/__init__.py", "old_path": "tmt/steps/discover/__init__.py" }, { "change_type": "MODIFY", "diff": "@@ -407,14 +407,6 @@ class ExecutePlugin(tmt.steps.Plugin):\n class Execute(tmt.steps.Step):\n \"\"\"\n Run tests using the specified executor.\n-\n- Note that the old execution methods 'shell.tmt' and 'beakerlib.tmt'\n- have been deprecated and the backward-compatible support for them\n- will be dropped in tmt-2.0.\n-\n- Use the new L1 metadata attribute 'framework' instead to specify\n- which test framework should be used for execution. This allows to\n- combine tests using different test frameworks in a single plan.\n \"\"\"\n \n # Internal executor is the default implementation\n@@ -428,40 +420,6 @@ class Execute(tmt.steps.Step):\n # List of Result() objects representing test results\n self._results: List[tmt.Result] = []\n \n- # Default test framework and mapping old methods\n- # FIXME remove when we drop the old execution methods\n- self._framework = DEFAULT_FRAMEWORK\n-\n- def _map_old_methods(self, data: List[tmt.steps.StepData]) -> None:\n- \"\"\" Map the old execute methods in a backward-compatible way \"\"\"\n- how = data[0].how\n- matched = re.search(r\"^(shell|beakerlib)(\\.tmt)?$\", how)\n- if not matched:\n- return\n- # Show the old method deprecation warning to users\n- self.warn(f\"The '{how}' execute method has been deprecated.\")\n- # Map the old syntax to the appropriate executor\n- # shell, beakerlib ---> tmt\n- # shell.tmt, beakerlib.tmt ---> tmt\n- how = 'tmt'\n- self.warn(f\"Use 'how: {how}' in the execute step instead (L2).\")\n- data[0].how = how\n- # Store shell or beakerlib as the default test framework\n- # (used when the framework is not defined in the L1 metadata)\n- framework = matched.group(1)\n- self.warn(f\"Set 'framework: {framework}' in test metadata (L1).\")\n- self._framework = framework\n- self.warn(\"Support for old methods will be dropped in tmt-2.0.\")\n-\n- def _normalize_data(self, raw_data: List[tmt.steps._RawStepData]) -> List[tmt.steps.StepData]:\n- data = super()._normalize_data(raw_data)\n-\n- # Map old methods now if there is no run (and thus no wake up)\n- # TODO: if not self.plan.my_run:\n- self._map_old_methods(data)\n-\n- return data\n-\n def load(self) -> None:\n \"\"\" Load test results \"\"\"\n super().load()\n@@ -474,9 +432,7 @@ class Execute(tmt.steps.Step):\n def save(self) -> None:\n \"\"\" Save test results to the workdir \"\"\"\n super().save()\n- results = {\n- result.name: result.to_serialized() for result in self.results()\n- }\n+ results = {result.name: result.to_serialized() for result in self.results()}\n self.write('results.yaml', tmt.utils.dict_to_yaml(results))\n \n def wake(self) -> None:\n", "new_path": "tmt/steps/execute/__init__.py", "old_path": "tmt/steps/execute/__init__.py" }, { "change_type": "MODIFY", "diff": "@@ -3,7 +3,7 @@ import json\n import os\n import sys\n import time\n-from typing import Any, List, Optional, cast\n+from typing import Any, List, Optional\n \n import click\n \n@@ -29,8 +29,6 @@ class ExecuteInternalData(tmt.steps.execute.ExecuteStepData):\n \n \n @tmt.steps.provides_method('tmt')\n-@tmt.steps.provides_method('shell.tmt', order=80)\n-@tmt.steps.provides_method('beakerlib.tmt', order=80)\n class ExecuteInternal(tmt.steps.execute.ExecutePlugin):\n \"\"\"\n Use the internal tmt executor to execute tests\n@@ -365,8 +363,4 @@ class ExecuteInternal(tmt.steps.execute.ExecutePlugin):\n \n def requires(self) -> List[str]:\n \"\"\" Return list of required packages \"\"\"\n- # FIXME Remove when we drop support for the old execution methods\n- # FIXME: cast() - https://github.com/teemtee/tmt/issues/1373\n- return ['beakerlib'] if cast(\n- tmt.steps.execute.Execute,\n- self.step)._framework == 'beakerlib' else []\n+ return []\n", "new_path": "tmt/steps/execute/internal.py", "old_path": "tmt/steps/execute/internal.py" } ]
134b710615faa45529757fd23eee012b2e8398a1
teemtee/tmt
null
null
Drop various guest `wake()` methods in favor of parent class They all do the same things anyway: instantiate a guest class, with given data, pass parent and name down the stream. A base class can do all of this.
[ { "change_type": "MODIFY", "diff": "@@ -1037,6 +1037,9 @@ class ProvisionPlugin(tmt.steps.GuestlessPlugin):\n # List of all supported methods aggregated from all plugins of the same step.\n _supported_methods: List[tmt.steps.Method] = []\n \n+ # TODO: Generics would provide a better type, https://github.com/teemtee/tmt/issues/1437\n+ _guest: Optional[Guest] = None\n+\n @classmethod\n def base_command(\n cls,\n@@ -1083,6 +1086,11 @@ class ProvisionPlugin(tmt.steps.GuestlessPlugin):\n \"\"\"\n super().wake()\n \n+ if data is not None:\n+ guest = self._guest_class(data=data, name=self.name, parent=self.step)\n+ guest.wake()\n+ self._guest = guest\n+\n def guest(self) -> Optional[Guest]:\n \"\"\"\n Return provisioned guest\n", "new_path": "tmt/steps/provision/__init__.py", "old_path": "tmt/steps/provision/__init__.py" }, { "change_type": "MODIFY", "diff": "@@ -523,15 +523,6 @@ class ProvisionArtemis(tmt.steps.provision.ProvisionPlugin):\n ),\n ] + super().options(how)\n \n- # FIXME: ignore - https://github.com/teemtee/tmt/issues/1437\n- def wake(self, data: Optional[ArtemisGuestData] = None) -> None: # type: ignore[override]\n- \"\"\" Wake up the plugin, process data, apply options \"\"\"\n-\n- super().wake(data=data)\n-\n- if data:\n- self._guest = GuestArtemis(data=data, name=self.name, parent=self.step)\n-\n def go(self) -> None:\n \"\"\" Provision the guest \"\"\"\n super().go()\n", "new_path": "tmt/steps/provision/artemis.py", "old_path": "tmt/steps/provision/artemis.py" }, { "change_type": "MODIFY", "diff": "@@ -8,7 +8,6 @@ import tmt\n import tmt.steps\n import tmt.steps.provision\n import tmt.utils\n-from tmt.steps.provision import GuestSshData\n \n DEFAULT_USER = \"root\"\n \n@@ -79,13 +78,6 @@ class ProvisionConnect(tmt.steps.provision.ProvisionPlugin):\n help='Password for login into the guest system.'),\n ] + super().options(how)\n \n- # FIXME: ignore - https://github.com/teemtee/tmt/issues/1437\n- def wake(self, data: Optional[GuestSshData] = None) -> None: # type: ignore[override]\n- \"\"\" Wake up the plugin, process data, apply options \"\"\"\n- super().wake(data=data)\n- if data:\n- self._guest = tmt.GuestSsh(data=data, name=self.name, parent=self.step)\n-\n def go(self) -> None:\n \"\"\" Prepare the connection \"\"\"\n super().go()\n", "new_path": "tmt/steps/provision/connect.py", "old_path": "tmt/steps/provision/connect.py" }, { "change_type": "MODIFY", "diff": "@@ -121,12 +121,6 @@ class ProvisionLocal(tmt.steps.provision.ProvisionPlugin):\n # Guest instance\n _guest = None\n \n- def wake(self, data: Optional[tmt.steps.provision.GuestData] = None) -> None:\n- \"\"\" Wake up the plugin, process data, apply options \"\"\"\n- super().wake(data=data)\n- if data:\n- self._guest = GuestLocal(data=data, name=self.name, parent=self.step)\n-\n def go(self) -> None:\n \"\"\" Provision the container \"\"\"\n super().go()\n", "new_path": "tmt/steps/provision/local.py", "old_path": "tmt/steps/provision/local.py" }, { "change_type": "MODIFY", "diff": "@@ -256,15 +256,6 @@ class ProvisionPodman(tmt.steps.provision.ProvisionPlugin):\n \n return super().default(option, default=default)\n \n- def wake(self, data: Optional[tmt.steps.provision.GuestData] = None) -> None:\n- \"\"\" Wake up the plugin, process data, apply options \"\"\"\n- super().wake(data=data)\n- # Wake up podman instance\n- if data:\n- guest = GuestContainer(data=data, name=self.name, parent=self.step)\n- guest.wake()\n- self._guest = guest\n-\n def go(self) -> None:\n \"\"\" Provision the container \"\"\"\n super().go()\n", "new_path": "tmt/steps/provision/podman.py", "old_path": "tmt/steps/provision/podman.py" }, { "change_type": "MODIFY", "diff": "@@ -615,17 +615,6 @@ class ProvisionTestcloud(tmt.steps.provision.ProvisionPlugin):\n help=\"What architecture to virtualize, host arch by default.\"),\n ] + super().options(how)\n \n- # FIXME: ignore - https://github.com/teemtee/tmt/issues/1437\n- def wake(self, data: Optional[TestcloudGuestData] = None) -> None: # type: ignore[override]\n- \"\"\" Wake up the plugin, process data, apply options \"\"\"\n- super().wake(data=data)\n-\n- # Wake up testcloud instance\n- if data:\n- guest = GuestTestcloud(data=data, name=self.name, parent=self.step)\n- guest.wake()\n- self._guest = guest\n-\n def go(self) -> None:\n \"\"\" Provision the testcloud instance \"\"\"\n super().go()\n", "new_path": "tmt/steps/provision/testcloud.py", "old_path": "tmt/steps/provision/testcloud.py" } ]
7b18ddcc6dd7f3f708e0681411033839abc0a203
uma-pi1/kge
null
null
Fix resuming of SOBOL generator during ax search This is done by fixing the seed and regenerating trials already performed.
[ { "change_type": "MODIFY", "diff": "@@ -113,10 +113,18 @@ class AutoSearchJob(SearchJob):\n self.trial_ids.append(trial_id)\n self.parameters.append(parameters)\n self.results.append(None)\n+ self.config.log(\n+ \"Created trial {} with parameters: {}\".format(\n+ trial_no, parameters\n+ )\n+ )\n else:\n # use the trial of a resumed run of this job\n parameters, trial_id = self.register_trial(self.parameters[trial_no])\n self.trial_ids.append(trial_id)\n+ self.config.log(\n+ \"Resumed trial {} with parameters: {}\".format(trial_no, parameters)\n+ )\n \n # create job for trial\n if trial_id is not None:\n", "new_path": "kge/job/auto_search.py", "old_path": "kge/job/auto_search.py" }, { "change_type": "MODIFY", "diff": "@@ -38,16 +38,19 @@ class AxSearchJob(AutoSearchJob):\n enforce_num_arms=True,\n ),\n GenerationStep(\n- model=Models.GPEI, num_arms=-1, recommended_max_parallelism=3,\n- model_gen_kwargs=\n- {'fixed_features':\n- ObservationFeatures(\n- parameters={\n- kv['name']:kv['value']\n- for kv in self.config.get(\"ax_search.fixed_parameters\")\n- }\n- )\n- }\n+ model=Models.GPEI,\n+ num_arms=-1,\n+ recommended_max_parallelism=3,\n+ model_gen_kwargs={\n+ \"fixed_features\": ObservationFeatures(\n+ parameters={\n+ kv[\"name\"]: kv[\"value\"]\n+ for kv in self.config.get(\n+ \"ax_search.fixed_parameters\"\n+ )\n+ }\n+ )\n+ },\n ),\n ],\n )\n@@ -61,33 +64,38 @@ class AxSearchJob(AutoSearchJob):\n objective_name=\"metric_value\",\n minimize=False,\n parameter_constraints=self.config.get(\"ax_search.parameter_constraints\"),\n-\n )\n self.config.log(\n- \"ax search initialized with {}\".format(\n- self.ax_client.generation_strategy\n- )\n+ \"ax search initialized with {}\".format(self.ax_client.generation_strategy)\n )\n \n- # By default, ax first uses a Sobol strategy for a certain number of arms,\n- # and is maybe followed by Bayesian Optimization. If we resume this job,\n- # some of the Sobol arms may have already been generated. The corresponding\n- # arms will be registered later (when this job's run method is executed),\n- # but here we already change the generation strategy to take account of\n- # these configurations.\n- num_generated = len(self.parameters)\n- if num_generated > 0:\n- old_curr = self.ax_client.generation_strategy._curr\n- new_num_arms = max(0, old_curr.num_arms - num_generated)\n- new_curr = old_curr._replace(num_arms=new_num_arms)\n- self.ax_client.generation_strategy._curr = new_curr\n- self.config.log(\n- \"Reduced number of arms for first generation step of \"\n- + \"ax_client from {} to {} due to prior data.\".format(\n- old_curr.num_arms, new_curr.num_arms\n- )\n+ # Make sure sobol models are resumed correctly\n+ if self.ax_client.generation_strategy._curr.model == Models.SOBOL:\n+ # Fix seed for sobol. We do this by generating the model right away (instead\n+ # of automatically once first trial is generated).\n+ self.ax_client.generation_strategy._set_current_model(\n+ experiment=self.ax_client.experiment, data=None, seed=0\n )\n \n+ # Regenerate and drop SOBOL arms already generated. Since we fixed the seed,\n+ # we will skip exactly the arms already generated in the job being resumed.\n+ num_generated = len(self.parameters)\n+ if num_generated > 0:\n+ num_sobol_generated = min(\n+ self.ax_client.generation_strategy._curr.num_arms, num_generated\n+ )\n+ for i in range(num_sobol_generated):\n+ generator_run = self.ax_client.generation_strategy.gen(\n+ experiment=self.ax_client.experiment\n+ )\n+ # self.config.log(\"Skipped parameters: {}\".format(generator_run.arms))\n+ self.config.log(\n+ \"Skipped {} of {} Sobol trials due to prior data.\".format(\n+ num_sobol_generated,\n+ self.ax_client.generation_strategy._curr.num_arms,\n+ )\n+ )\n+\n def register_trial(self, parameters=None):\n trial_id = None\n try:\n", "new_path": "kge/job/ax_search.py", "old_path": "kge/job/ax_search.py" } ]
2906ef3bda60ab0611c5b49b2b303d7864245812
fledge-iot/fledge
null
null
Use a private dict for bearer tokens of registered servers Use a private dict for bearer tokens of registered servers
[ { "change_type": "MODIFY", "diff": "@@ -45,9 +45,9 @@ class ServiceRecord(object):\n # TODO: tell allowed service status?\n pass\n \n- __slots__ = ['_id', '_name', '_type', '_protocol', '_address', '_port', '_management_port', '_status', '_bearer_token']\n+ __slots__ = ['_id', '_name', '_type', '_protocol', '_address', '_port', '_management_port', '_status']\n \n- def __init__(self, s_id, s_name, s_type, s_protocol, s_address, s_port, m_port, s_bearer_token=None):\n+ def __init__(self, s_id, s_name, s_type, s_protocol, s_address, s_port, m_port, s_startup_token=None):\n self._id = s_id\n self._name = s_name\n self._type = self.valid_type(s_type) # check with ServiceRecord.Type, if not a valid type raise error\n@@ -58,12 +58,11 @@ class ServiceRecord(object):\n self._port = int(s_port)\n self._management_port = int(m_port)\n self._status = ServiceRecord.Status.Running\n- self._bearer_token = s_bearer_token if s_bearer_token is not None else None\n \n def __repr__(self):\n template = 'service instance id={s._id}: <{s._name}, type={s._type}, protocol={s._protocol}, ' \\\n 'address={s._address}, service port={s._port}, management port={s._management_port}, ' \\\n- 'status={s._status}, token={s._bearer_token}>'\n+ 'status={s._status}>'\n return template.format(s=self)\n \n def __str__(self):\n", "new_path": "python/fledge/common/service_record.py", "old_path": "python/fledge/common/service_record.py" }, { "change_type": "MODIFY", "diff": "@@ -1113,8 +1113,7 @@ class Server:\n service_management_port = data.get('management_port', None)\n service_protocol = data.get('protocol', 'http')\n token = data.get('token', None)\n- # TODO: if token then check single use token verification; if bad then return 4XX\n- # FOGL-5144\n+\n if not (service_name.strip() or service_type.strip() or service_address.strip()\n or service_management_port.strip() or not service_management_port.isdigit()):\n raise web.HTTPBadRequest(reason='One or more values for type/name/address/management port missing')\n@@ -1126,6 +1125,7 @@ class Server:\n if not isinstance(service_management_port, int):\n raise web.HTTPBadRequest(reason='Service management port can be a positive integer only')\n \n+ # If token then check single use token verification; if bad then return 4XX\n if token is not None:\n if not isinstance(token, str):\n msg = 'Token can be a string only'\n@@ -1177,15 +1177,8 @@ class Server:\n SERVICE_JWT_SECRET,\n SERVICE_JWT_ALGORITHM).decode(\"utf-8\") if token is not None else \"\"\n \n- # Find service name in registry and update the bearer token for that service\n- svc_record = ServiceRegistry.get(name=service_name)\n- svc = svc_record[0]\n- obj = ServiceRecord(s_id=svc._id, s_name=svc._name, s_type=svc._type, s_port=svc._port,\n- m_port=svc._management_port, s_address=svc._address, s_protocol=svc._protocol,\n- s_bearer_token=bearer_token)\n- for idx, item in enumerate(ServiceRegistry._registry):\n- if getattr(item, \"_name\") == service_name:\n- ServiceRegistry._registry[idx] = obj\n+ # Add the bearer token for that service being registered\n+ ServiceRegistry.addBearerToken(service_name, bearer_token)\n \n # Prepare response JSON\n _response = {\n@@ -1698,6 +1691,8 @@ class Server:\n \n Authorization header must contain the Bearer token to verify\n No post data\n+\n+ Note: token will be verified for the service name in token claim 'sub'\n \"\"\"\n \n auth_header = request.headers.get('Authorization', None)\n@@ -1713,17 +1708,11 @@ class Server:\n bearer_token = parts[1]\n \n if bearer_token is not None:\n- # Check input token exists in system\n- foundToken = False\n- services_list = ServiceRegistry.all()\n- for service in services_list:\n- t = service._bearer_token if service._bearer_token is not None else \"\"\n- if t == bearer_token:\n- foundToken = True\n- break\n- # Raise error if token does not exists\n- if foundToken == False:\n- msg = 'token does not exist in system'\n+ claims = cls.validate_token(bearer_token)\n+ # Check input token exists in system for the service name given in claims['sub']\n+ foundToken = ServiceRegistry.getBearerToken(claims['sub'])\n+ if foundToken is None:\n+ msg = 'service bearer token does not exist in system'\n raise web.HTTPBadRequest(reason=msg, body=json.dumps({\"error\": msg}))\n \n # Validate existing token\n@@ -1762,7 +1751,7 @@ class Server:\n Authorization header must contain the Bearer token\n No post data\n \n- Note: token will be refresh for the service it belongs\n+ Note: token will be refreshed for the service it belongs to\n \"\"\"\n auth_header = request.headers.get('Authorization', None)\n if auth_header is None:\n@@ -1778,33 +1767,29 @@ class Server:\n \n try:\n claims = cls.validate_token(bearer_token)\n- services_list = ServiceRegistry.all()\n- for service in services_list:\n- if service._token == bearer_token and service._name == claims['sub']:\n- if claims.get('error') is None:\n- # Expiration set to now + delta\n- claims['exp'] = int(time.time()) + SERVICE_JWT_EXP_DELTA_SECONDS\n- bearer_token = jwt.encode(claims,\n- SERVICE_JWT_SECRET,\n- SERVICE_JWT_ALGORITHM).decode(\"utf-8\")\n- ret = {'bearer_token' : bearer_token}\n-\n- # Find service name in registry and update the bearer token for that service\n- obj = ServiceRecord(s_id=service._id,\n- s_name=service._name,\n- s_type=service._type,\n- s_port=service._port,\n- m_port=service._management_port,\n- s_address=service._address,\n- s_protocol=service._protocol,\n- s_bearer_token=bearer_token)\n- for idx, item in enumerate(ServiceRegistry._registry):\n- if getattr(item, \"_name\") == service._name:\n- ServiceRegistry._registry[idx] = obj\n-\n- return web.json_response(ret)\n-\n- msg = 'service authentication failed for service'\n+ if claims.get('error') is None:\n+ foundToken = ServiceRegistry.getBearerToken(claims['sub'])\n+ if foundToken is None:\n+ msg = \"service '\" + str(claims['sub']) + \"' not registered\"\n+ raise web.HTTPBadRequest(reason=msg, body=json.dumps({\"error\": msg}))\n+\n+ if foundToken != bearer_token:\n+ msg = \"bearer token does not belong to service '\" + str(claims['sub']) + \"'\"\n+ raise web.HTTPBadRequest(reason=msg, body=json.dumps({\"error\": msg}))\n+\n+ # Expiration set to now + delta\n+ claims['exp'] = int(time.time()) + SERVICE_JWT_EXP_DELTA_SECONDS\n+ bearer_token = jwt.encode(claims,\n+ SERVICE_JWT_SECRET,\n+ SERVICE_JWT_ALGORITHM).decode(\"utf-8\")\n+\n+ # Replace bearer_token for the service\n+ ServiceRegistry.addBearerToken(claims['sub'], bearer_token)\n+ ret = {'bearer_token' : bearer_token}\n+\n+ return web.json_response(ret)\n+\n+ msg = 'Failed to parse bearer token'\n raise web.HTTPBadRequest(reason=msg, body=json.dumps({\"error\": msg}))\n \n except Exception as e:\n", "new_path": "python/fledge/services/core/server.py", "old_path": "python/fledge/services/core/server.py" }, { "change_type": "MODIFY", "diff": "@@ -28,6 +28,9 @@ class ServiceRegistry:\n # Startup tokens to pass to service or tasks being started\n _startupTokens = dict()\n \n+ # Bearer token for the registered services\n+ _bearerTokens = dict()\n+\n # INFO - level 20\n _logger = logger.setup(__name__, level=20)\n \n@@ -49,6 +52,14 @@ class ServiceRegistry:\n \n return True\n \n+ @classmethod\n+ def addBearerToken(cls, service_name, bearer_token):\n+ cls._bearerTokens[service_name] = bearer_token\n+\n+ @classmethod\n+ def getBearerToken(cls, service_name):\n+ return cls._bearerTokens.get(service_name, None)\n+\n @classmethod\n def register(cls, name, s_type, address, port, management_port, protocol='http', token=None):\n \"\"\" registers the service instance\n@@ -118,6 +129,8 @@ class ServiceRegistry:\n services[0]._status = service_status\n cls._remove_from_scheduler_records(service_name)\n \n+ cls._bearerTokens.pop(service_name, None)\n+\n # Remove interest registry records, if any\n interest_recs = InterestRegistry().get(microservice_uuid=service_id)\n for interest_rec in interest_recs:\n", "new_path": "python/fledge/services/core/service_registry/service_registry.py", "old_path": "python/fledge/services/core/service_registry/service_registry.py" } ]
926d3e6dbe16c21403a5f4fa83f066ecdff40c67
choderalab/yank
null
null
Move CLI imports into functions for speed In order to prevent the CLI from taking long times due to importing everything, especially when the user mis-types a command for docopt to handle, import statements for the CLI functions have been moved to the actual functions instead of the module header
[ { "change_type": "MODIFY", "diff": "@@ -13,15 +13,7 @@ Analyze YANK output file.\n # MODULE IMPORTS\n # =============================================================================================\n \n-import io\n-import re\n-import os\n-import pickle\n-\n-from simtk import unit\n-\n-import pkg_resources\n-from .. import utils, analyze, mpi\n+# Module imports handled in individual functions since CLI should be faster to boot up\n \n # =============================================================================================\n # COMMAND-LINE INTERFACE\n@@ -109,6 +101,11 @@ General Options:\n \n \n def dispatch(args):\n+\n+ import os\n+ import pickle\n+ from .. import utils, analyze, mpi\n+\n utils.config_root_logger(args['--verbose'])\n \n if args['report']:\n@@ -154,6 +151,9 @@ def dispatch(args):\n \n \n def extract_analyzer_kwargs(args, quantities_as_strings=False):\n+\n+ import simtk.unit as unit\n+\n \"\"\"Return a dictionary with the keyword arguments to pass to the analyzer.\"\"\"\n analyzer_kwargs = {}\n if args['--skipunbiasing']:\n@@ -172,6 +172,10 @@ def extract_analyzer_kwargs(args, quantities_as_strings=False):\n \n \n def dispatch_extract_trajectory(args):\n+\n+ import os\n+ from .. import analyze\n+\n # Paths\n output_path = args['--trajectory']\n nc_path = args['--netcdf']\n@@ -214,6 +218,12 @@ def dispatch_extract_trajectory(args):\n \n def dispatch_report(args):\n \n+ import io\n+ import os\n+ import re\n+ import pkg_resources\n+ from .. import analyze\n+\n # Check modules for render\n store = args['--store']\n yaml_input = args['--yaml']\n", "new_path": "Yank/commands/analyze.py", "old_path": "Yank/commands/analyze.py" }, { "change_type": "MODIFY", "diff": "@@ -13,9 +13,7 @@ Set up and run YANK calculation from script.\n # GLOBAL IMPORTS\n # =============================================================================================\n \n-import os\n-from ..experiment import ExperimentBuilder\n-\n+# Module imports handled in individual functions since CLI should be faster to boot up\n \n # =============================================================================================\n # COMMAND-LINE INTERFACE\n@@ -67,6 +65,10 @@ def dispatch(args):\n Command-line arguments from docopt.\n \n \"\"\"\n+\n+ import os\n+ from ..experiment import ExperimentBuilder\n+\n override = None\n if args['--override']: # Is False for None and [] (empty list)\n over_opts = args['--override']\n", "new_path": "Yank/commands/script.py", "old_path": "Yank/commands/script.py" }, { "change_type": "MODIFY", "diff": "@@ -13,15 +13,7 @@ Run YANK self tests after installation.\n # MODULE IMPORTS\n # =============================================================================================\n \n-import doctest\n-import pkgutil\n-import subprocess\n-import re\n-\n-from .. import version\n-from . import platforms\n-import simtk.openmm as mm\n-\n+# Module imports handled in individual functions since CLI should be faster to boot up\n \n # =============================================================================================\n # COMMAND-LINE INTERFACE\n@@ -48,6 +40,7 @@ General Options:\n # COMMAND DISPATCH\n # =============================================================================================\n \n+\n class LicenseError(Exception):\n \"\"\"Error raised by a missing License.\"\"\"\n pass\n@@ -55,6 +48,14 @@ class LicenseError(Exception):\n \n def dispatch(args):\n \n+ import re\n+ import doctest\n+ import pkgutil\n+ import subprocess\n+ import simtk.openmm as mm\n+ from .. import version\n+ from . import platforms\n+\n # Determine verbosity in advance\n # TODO: Figure out how to get -v back in to command and allow -vv and -vvv\n # nosetests: -v == --verbosity=2\n", "new_path": "Yank/commands/selftest.py", "old_path": "Yank/commands/selftest.py" }, { "change_type": "MODIFY", "diff": "@@ -13,11 +13,7 @@ Query output files for quick status.\n # MODULE IMPORTS\n # =============================================================================================\n \n-import operator\n-import itertools\n-import collections\n-\n-from .. import experiment\n+# Module imports handled in individual functions since CLI should be faster to boot up\n \n # =============================================================================================\n # COMMAND-LINE INTERFACE\n@@ -62,8 +58,12 @@ def find_contiguous_ids(job_ids):\n The job ids organized in contiguous sets.\n \n \"\"\"\n+\n+ import operator\n+ import itertools\n+\n contiguous_job_ids = []\n- for k, g in itertools.groupby(enumerate(job_ids), lambda x:x[0]-x[1]):\n+ for k, g in itertools.groupby(enumerate(job_ids), lambda x: x[0]-x[1]):\n group = list(map(operator.itemgetter(1), g))\n if len(group) == 1:\n contiguous_job_ids.append(str(group[0]))\n@@ -73,6 +73,10 @@ def find_contiguous_ids(job_ids):\n \n \n def dispatch(args):\n+\n+ import collections\n+ from .. import experiment\n+\n # Handle optional arguments.\n if args['--njobs']:\n n_jobs = int(args['--njobs'])\n", "new_path": "Yank/commands/status.py", "old_path": "Yank/commands/status.py" } ]
778a6e2c52a5e70d93aa6495c8f8e537b1e663d9
spikeinterface/spikeinterface
null
null
Make import for sorters at classmethod level and not at module level. This avoid too long import with "spikeinterface.full"
[ { "change_type": "MODIFY", "diff": "@@ -5,7 +5,6 @@ from spikeinterface.core import (BaseSorting, BaseSortingSegment)\n \n try:\n import h5py\n-\n HAVE_H5PY = True\n except ImportError:\n HAVE_H5PY = False\n", "new_path": "spikeinterface/extractors/spykingcircusextractors.py", "old_path": "spikeinterface/extractors/spykingcircusextractors.py" }, { "change_type": "MODIFY", "diff": "@@ -2,22 +2,21 @@ from pathlib import Path\n \n from spikeinterface.core import (BaseSorting, BaseSortingSegment)\n \n-try:\n- import tridesclous as tdc\n-\n- HAVE_TDC = True\n-except ImportError:\n- HAVE_TDC = False\n \n \n class TridesclousSortingExtractor(BaseSorting):\n extractor_name = 'TridesclousSortingExtractor'\n- installed = HAVE_TDC # check at class level if installed or not\n+ installed = False\n is_writable = False\n mode = 'folder'\n installation_mesg = \"To use the TridesclousSortingExtractor install tridesclous: \\n\\n pip install tridesclous\\n\\n\" # error message when not installed\n \n def __init__(self, folder_path, chan_grp=None):\n+ try:\n+ import tridesclous as tdc\n+ HAVE_TDC = True\n+ except ImportError:\n+ HAVE_TDC = False\n assert HAVE_TDC, self.installation_mesg\n tdc_folder = Path(folder_path)\n \n", "new_path": "spikeinterface/extractors/tridesclousextractors.py", "old_path": "spikeinterface/extractors/tridesclousextractors.py" }, { "change_type": "MODIFY", "diff": "@@ -6,23 +6,22 @@ from spikeinterface.core import (BaseRecording, BaseSorting,\n \n try:\n import yaml\n-\n- HAVE_YASS = True\n+ HAVE_YAML = True\n except:\n- HAVE_YASS = False\n+ HAVE_YAML = False\n \n \n class YassSortingExtractor(BaseSorting):\n extractor_name = 'YassExtractor'\n mode = 'folder'\n- installed = HAVE_YASS # check at class level if installed or not\n+ installed = HAVE_YAML # check at class level if installed or not\n \n has_default_locations = False\n is_writable = False\n installation_mesg = \"To use the Yass extractor, install pyyaml: \\n\\n pip install pyyaml\\n\\n\" # error message when not installed\n \n def __init__(self, folder_path):\n- assert HAVE_YASS, self.installation_mesg\n+ assert HAVE_YAML, self.installation_mesg\n \n folder_path = Path(folder_path)\n \n", "new_path": "spikeinterface/extractors/yassextractors.py", "old_path": "spikeinterface/extractors/yassextractors.py" }, { "change_type": "MODIFY", "diff": "@@ -9,12 +9,6 @@ from spikeinterface.core import load_extractor\n from spikeinterface.extractors import HerdingspikesSortingExtractor\n import spikeinterface.toolkit as st\n \n-try:\n- import herdingspikes as hs\n- HAVE_HS = True\n-except ImportError:\n- HAVE_HS = False\n-\n \n class HerdingspikesSorter(BaseSorter):\n \n@@ -130,10 +124,16 @@ class HerdingspikesSorter(BaseSorter):\n \n @classmethod\n def is_installed(cls):\n+ try:\n+ import herdingspikes as hs\n+ HAVE_HS = True\n+ except ImportError:\n+ HAVE_HS = False\n return HAVE_HS\n \n @classmethod\n def get_sorter_version(cls):\n+ import herdingspikes as hs\n return hs.__version__\n \n @classmethod\n@@ -147,6 +147,7 @@ class HerdingspikesSorter(BaseSorter):\n \n @classmethod\n def _run_from_folder(cls, output_folder, params, verbose):\n+ import herdingspikes as hs\n \n recording = load_extractor(output_folder / 'spikeinterface_recording.json')\n \n", "new_path": "spikeinterface/sorters/herdingspikes/herdingspikes.py", "old_path": "spikeinterface/sorters/herdingspikes/herdingspikes.py" }, { "change_type": "MODIFY", "diff": "@@ -11,12 +11,6 @@ from spikeinterface.core import load_extractor\n \n from spikeinterface.extractors import NpzSortingExtractor, NumpySorting\n \n-try:\n- import mountainsort4\n-\n- HAVE_MS4 = True\n-except ImportError:\n- HAVE_MS4 = False\n \n \n class Mountainsort4Sorter(BaseSorter):\n@@ -73,10 +67,16 @@ class Mountainsort4Sorter(BaseSorter):\n \n @classmethod\n def is_installed(cls):\n+ try:\n+ import mountainsort4\n+ HAVE_MS4 = True\n+ except ImportError:\n+ HAVE_MS4 = False\n return HAVE_MS4\n \n @staticmethod\n def get_sorter_version():\n+ import mountainsort4\n if hasattr(mountainsort4, '__version__'):\n return mountainsort4.__version__\n return 'unknown'\n@@ -91,6 +91,8 @@ class Mountainsort4Sorter(BaseSorter):\n \n @classmethod\n def _run_from_folder(cls, output_folder, params, verbose):\n+ import mountainsort4\n+\n recording = load_extractor(output_folder / 'spikeinterface_recording.json')\n \n # alias to params\n", "new_path": "spikeinterface/sorters/mountainsort4/mountainsort4.py", "old_path": "spikeinterface/sorters/mountainsort4/mountainsort4.py" }, { "change_type": "MODIFY", "diff": "@@ -12,12 +12,6 @@ from ..utils import ShellScript\n \n from probeinterface import write_prb\n \n-try:\n- import circus\n- HAVE_SC = True\n-except ImportError:\n- HAVE_SC = False\n-\n \n class SpykingcircusSorter(BaseSorter):\n \"\"\"\n@@ -70,10 +64,16 @@ class SpykingcircusSorter(BaseSorter):\n \n @classmethod\n def is_installed(cls):\n+ try:\n+ import circus\n+ HAVE_SC = True\n+ except ImportError:\n+ HAVE_SC = False \n return HAVE_SC\n \n @staticmethod\n def get_sorter_version():\n+ import circus\n return circus.__version__\n \n @classmethod\n", "new_path": "spikeinterface/sorters/spyking_circus/spyking_circus.py", "old_path": "spikeinterface/sorters/spyking_circus/spyking_circus.py" }, { "change_type": "MODIFY", "diff": "@@ -16,11 +16,6 @@ from spikeinterface.core import BinaryRecordingExtractor\n \n from probeinterface import write_prb\n \n-try:\n- import tridesclous as tdc\n- HAVE_TDC = True\n-except ImportError:\n- HAVE_TDC = False\n \n \n class TridesclousSorter(BaseSorter):\n@@ -68,10 +63,16 @@ class TridesclousSorter(BaseSorter):\n \n @classmethod\n def is_installed(cls):\n+ try:\n+ import tridesclous as tdc\n+ HAVE_TDC = True\n+ except ImportError:\n+ HAVE_TDC = False \n return HAVE_TDC\n \n @classmethod\n def get_sorter_version(cls):\n+ import tridesclous as tdc\n return tdc.__version__\n \n @classmethod\n@@ -80,6 +81,7 @@ class TridesclousSorter(BaseSorter):\n \n @classmethod\n def _setup_recording(cls, recording, output_folder, params, verbose):\n+ import tridesclous as tdc\n \n # save prb file\n probegroup = recording.get_probegroup()\n@@ -123,6 +125,7 @@ class TridesclousSorter(BaseSorter):\n \n @classmethod\n def _run_from_folder(cls, output_folder, params, verbose):\n+ import tridesclous as tdc\n \n tdc_dataio = tdc.DataIO(dirname=str(output_folder))\n \n@@ -169,7 +172,8 @@ class TridesclousSorter(BaseSorter):\n \n \n def make_nested_tdc_params(tdc_dataio, chan_grp, **new_params):\n- \n+ import tridesclous as tdc\n+\n params = tdc.get_auto_params_for_catalogue(tdc_dataio, chan_grp=chan_grp)\n \n if 'freq_min' in new_params:\n", "new_path": "spikeinterface/sorters/tridesclous/tridesclous.py", "old_path": "spikeinterface/sorters/tridesclous/tridesclous.py" }, { "change_type": "MODIFY", "diff": "@@ -11,13 +11,6 @@ from spikeinterface.core import load_extractor\n from spikeinterface.core import BinaryRecordingExtractor\n from spikeinterface.extractors import YassSortingExtractor\n \n-try:\n- import yaml\n- import yass\n- HAVE_YASS = True\n-except ImportError:\n- HAVE_YASS = False\n-\n \n class YassSorter(BaseSorter):\n \"\"\"\n@@ -133,14 +126,23 @@ class YassSorter(BaseSorter):\n \n @classmethod\n def is_installed(cls):\n+ try:\n+ import yaml\n+ import yass\n+ HAVE_YASS = True\n+ except ImportError:\n+ HAVE_YASS = False \n return HAVE_YASS\n \n @classmethod\n def get_sorter_version(cls):\n+ import yass\n return yass.__version__\n \n @classmethod\n def _setup_recording(cls, recording, output_folder, params, verbose):\n+ import yaml\n+\n p = params\n \n source_dir = Path(__file__).parent\n", "new_path": "spikeinterface/sorters/yass/yass.py", "old_path": "spikeinterface/sorters/yass/yass.py" } ]