problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_57041 | rasdani/github-patches | git_diff | espnet__espnet-3073 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Question on chunk shift in ChunkIterFactory.build_iter()
In the code, shift width is calculated as a ratio of utterance length as follows:
S = int(L * self.chunk_shift_ratio)
Shouldn't shift width be calculated as a ratio of chunk length like below ?
S = int(W * self.chunk_shift_ratio)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `espnet2/iterators/chunk_iter_factory.py`
Content:
```
1 import logging
2 from typing import Any
3 from typing import Dict
4 from typing import Iterator
5 from typing import List
6 from typing import Sequence
7 from typing import Tuple
8 from typing import Union
9
10 import numpy as np
11 import torch
12 from typeguard import check_argument_types
13
14 from espnet2.iterators.abs_iter_factory import AbsIterFactory
15 from espnet2.iterators.sequence_iter_factory import SequenceIterFactory
16 from espnet2.samplers.abs_sampler import AbsSampler
17
18
19 class ChunkIterFactory(AbsIterFactory):
20 """Creates chunks from a sequence
21
22 Examples:
23 >>> batches = [["id1"], ["id2"], ...]
24 >>> batch_size = 128
25 >>> chunk_length = 1000
26 >>> iter_factory = ChunkIterFactory(dataset, batches, batch_size, chunk_length)
27 >>> it = iter_factory.build_iter(epoch)
28 >>> for ids, batch in it:
29 ... ...
30
31 - The number of mini-batches are varied in each epochs and
32 we can't get the number in advance
33 because IterFactory doesn't be given to the length information.
34 - Since the first reason, "num_iters_per_epoch" can't be implemented
35 for this iterator. Instead of it, "num_samples_per_epoch" is implemented.
36
37 """
38
39 def __init__(
40 self,
41 dataset,
42 batch_size: int,
43 batches: Union[AbsSampler, Sequence[Sequence[Any]]],
44 chunk_length: Union[int, str],
45 chunk_shift_ratio: float = 0.5,
46 num_cache_chunks: int = 1024,
47 num_samples_per_epoch: int = None,
48 seed: int = 0,
49 shuffle: bool = False,
50 num_workers: int = 0,
51 collate_fn=None,
52 pin_memory: bool = False,
53 ):
54 assert check_argument_types()
55 assert all(len(x) == 1 for x in batches), "batch-size must be 1"
56
57 self.per_sample_iter_factory = SequenceIterFactory(
58 dataset=dataset,
59 batches=batches,
60 num_iters_per_epoch=num_samples_per_epoch,
61 seed=seed,
62 shuffle=shuffle,
63 num_workers=num_workers,
64 collate_fn=collate_fn,
65 pin_memory=pin_memory,
66 )
67
68 self.num_cache_chunks = max(num_cache_chunks, batch_size)
69 if isinstance(chunk_length, str):
70 if len(chunk_length) == 0:
71 raise ValueError("e.g. 5,8 or 3-5: but got empty string")
72
73 self.chunk_lengths = []
74 for x in chunk_length.split(","):
75 try:
76 sps = list(map(int, x.split("-")))
77 except ValueError:
78 raise ValueError(f"e.g. 5,8 or 3-5: but got {chunk_length}")
79
80 if len(sps) > 2:
81 raise ValueError(f"e.g. 5,8 or 3-5: but got {chunk_length}")
82 elif len(sps) == 2:
83 # Append all numbers between the range into the candidates
84 self.chunk_lengths += list(range(sps[0], sps[1] + 1))
85 else:
86 self.chunk_lengths += [sps[0]]
87 else:
88 # Single candidates: Fixed chunk length
89 self.chunk_lengths = [chunk_length]
90
91 self.chunk_shift_ratio = chunk_shift_ratio
92 self.batch_size = batch_size
93 self.seed = seed
94 self.shuffle = shuffle
95
96 def build_iter(
97 self,
98 epoch: int,
99 shuffle: bool = None,
100 ) -> Iterator[Tuple[List[str], Dict[str, torch.Tensor]]]:
101 per_sample_loader = self.per_sample_iter_factory.build_iter(epoch, shuffle)
102
103 if shuffle is None:
104 shuffle = self.shuffle
105 state = np.random.RandomState(epoch + self.seed)
106
107 # NOTE(kamo):
108 # This iterator supports multiple chunk lengths and
109 # keep chunks for each lenghts here until collecting specified numbers
110 cache_chunks_dict = {}
111 cache_id_list_dict = {}
112 for ids, batch in per_sample_loader:
113 # Must be per-sample-loader
114 assert len(ids) == 1, f"Must be per-sample-loader: {len(ids)}"
115 assert all(len(x) == 1 for x in batch.values())
116
117 # Get keys of sequence data
118 sequence_keys = []
119 for key in batch:
120 if key + "_lengths" in batch:
121 sequence_keys.append(key)
122 # Remove lengths data and get the first sample
123 batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
124 id_ = ids[0]
125
126 for key in sequence_keys:
127 if len(batch[key]) != len(batch[sequence_keys[0]]):
128 raise RuntimeError(
129 f"All sequences must has same length: "
130 f"{len(batch[key])} != {len(batch[sequence_keys[0]])}"
131 )
132
133 L = len(batch[sequence_keys[0]])
134 # Select chunk length
135 chunk_lengths = [lg for lg in self.chunk_lengths if lg < L]
136 if len(chunk_lengths) == 0:
137 logging.warning(
138 f"The length of '{id_}' is {L}, but it is shorter than "
139 f"any candidates of chunk-length: {self.chunk_lengths}"
140 )
141 continue
142
143 W = int(state.choice(chunk_lengths, 1))
144 cache_id_list = cache_id_list_dict.setdefault(W, [])
145 cache_chunks = cache_chunks_dict.setdefault(W, {})
146
147 # Shift width to the next chunk
148 S = int(L * self.chunk_shift_ratio)
149 # Number of chunks
150 N = (L - W) // S + 1
151 if shuffle:
152 Z = state.randint(0, (L - W) % S + 1)
153 else:
154 Z = 0
155
156 # Split a sequence into chunks.
157 # Note that the marginal frames divided by chunk length are discarded
158 for k, v in batch.items():
159 if k not in cache_chunks:
160 cache_chunks[k] = []
161 if k in sequence_keys:
162 # Shift chunks with overlapped length for data augmentation
163 cache_chunks[k] += [v[Z + i * S : Z + i * S + W] for i in range(N)]
164 else:
165 # If not sequence, use whole data instead of chunk
166 cache_chunks[k] += [v for _ in range(N)]
167 cache_id_list += [id_ for _ in range(N)]
168
169 if len(cache_id_list) > self.num_cache_chunks:
170 cache_id_list, cache_chunks = yield from self._generate_mini_batches(
171 cache_id_list,
172 cache_chunks,
173 shuffle,
174 state,
175 )
176
177 cache_id_list_dict[W] = cache_id_list
178 cache_chunks_dict[W] = cache_chunks
179
180 else:
181 for W in cache_id_list_dict:
182 cache_id_list = cache_id_list_dict.setdefault(W, [])
183 cache_chunks = cache_chunks_dict.setdefault(W, {})
184
185 yield from self._generate_mini_batches(
186 cache_id_list,
187 cache_chunks,
188 shuffle,
189 state,
190 )
191
192 def _generate_mini_batches(
193 self,
194 id_list: List[str],
195 batches: Dict[str, List[torch.Tensor]],
196 shuffle: bool,
197 state: np.random.RandomState,
198 ):
199 if shuffle:
200 indices = np.arange(0, len(id_list))
201 state.shuffle(indices)
202 batches = {k: [v[i] for i in indices] for k, v in batches.items()}
203 id_list = [id_list[i] for i in indices]
204
205 bs = self.batch_size
206 while len(id_list) >= bs:
207 # Make mini-batch and yield
208 yield (
209 id_list[:bs],
210 {k: torch.stack(v[:bs], 0) for k, v in batches.items()},
211 )
212 id_list = id_list[bs:]
213 batches = {k: v[bs:] for k, v in batches.items()}
214
215 return id_list, batches
216
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/espnet2/iterators/chunk_iter_factory.py b/espnet2/iterators/chunk_iter_factory.py
--- a/espnet2/iterators/chunk_iter_factory.py
+++ b/espnet2/iterators/chunk_iter_factory.py
@@ -145,7 +145,7 @@
cache_chunks = cache_chunks_dict.setdefault(W, {})
# Shift width to the next chunk
- S = int(L * self.chunk_shift_ratio)
+ S = int(W * self.chunk_shift_ratio)
# Number of chunks
N = (L - W) // S + 1
if shuffle:
| {"golden_diff": "diff --git a/espnet2/iterators/chunk_iter_factory.py b/espnet2/iterators/chunk_iter_factory.py\n--- a/espnet2/iterators/chunk_iter_factory.py\n+++ b/espnet2/iterators/chunk_iter_factory.py\n@@ -145,7 +145,7 @@\n cache_chunks = cache_chunks_dict.setdefault(W, {})\n \n # Shift width to the next chunk\n- S = int(L * self.chunk_shift_ratio)\n+ S = int(W * self.chunk_shift_ratio)\n # Number of chunks\n N = (L - W) // S + 1\n if shuffle:\n", "issue": "Question on chunk shift in ChunkIterFactory.build_iter()\nIn the code, shift width is calculated as a ratio of utterance length as follows:\r\nS = int(L * self.chunk_shift_ratio)\r\n\r\nShouldn't shift width be calculated as a ratio of chunk length like below ?\r\nS = int(W * self.chunk_shift_ratio)\r\n\n", "before_files": [{"content": "import logging\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Iterator\nfrom typing import List\nfrom typing import Sequence\nfrom typing import Tuple\nfrom typing import Union\n\nimport numpy as np\nimport torch\nfrom typeguard import check_argument_types\n\nfrom espnet2.iterators.abs_iter_factory import AbsIterFactory\nfrom espnet2.iterators.sequence_iter_factory import SequenceIterFactory\nfrom espnet2.samplers.abs_sampler import AbsSampler\n\n\nclass ChunkIterFactory(AbsIterFactory):\n \"\"\"Creates chunks from a sequence\n\n Examples:\n >>> batches = [[\"id1\"], [\"id2\"], ...]\n >>> batch_size = 128\n >>> chunk_length = 1000\n >>> iter_factory = ChunkIterFactory(dataset, batches, batch_size, chunk_length)\n >>> it = iter_factory.build_iter(epoch)\n >>> for ids, batch in it:\n ... ...\n\n - The number of mini-batches are varied in each epochs and\n we can't get the number in advance\n because IterFactory doesn't be given to the length information.\n - Since the first reason, \"num_iters_per_epoch\" can't be implemented\n for this iterator. Instead of it, \"num_samples_per_epoch\" is implemented.\n\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size: int,\n batches: Union[AbsSampler, Sequence[Sequence[Any]]],\n chunk_length: Union[int, str],\n chunk_shift_ratio: float = 0.5,\n num_cache_chunks: int = 1024,\n num_samples_per_epoch: int = None,\n seed: int = 0,\n shuffle: bool = False,\n num_workers: int = 0,\n collate_fn=None,\n pin_memory: bool = False,\n ):\n assert check_argument_types()\n assert all(len(x) == 1 for x in batches), \"batch-size must be 1\"\n\n self.per_sample_iter_factory = SequenceIterFactory(\n dataset=dataset,\n batches=batches,\n num_iters_per_epoch=num_samples_per_epoch,\n seed=seed,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate_fn,\n pin_memory=pin_memory,\n )\n\n self.num_cache_chunks = max(num_cache_chunks, batch_size)\n if isinstance(chunk_length, str):\n if len(chunk_length) == 0:\n raise ValueError(\"e.g. 5,8 or 3-5: but got empty string\")\n\n self.chunk_lengths = []\n for x in chunk_length.split(\",\"):\n try:\n sps = list(map(int, x.split(\"-\")))\n except ValueError:\n raise ValueError(f\"e.g. 5,8 or 3-5: but got {chunk_length}\")\n\n if len(sps) > 2:\n raise ValueError(f\"e.g. 5,8 or 3-5: but got {chunk_length}\")\n elif len(sps) == 2:\n # Append all numbers between the range into the candidates\n self.chunk_lengths += list(range(sps[0], sps[1] + 1))\n else:\n self.chunk_lengths += [sps[0]]\n else:\n # Single candidates: Fixed chunk length\n self.chunk_lengths = [chunk_length]\n\n self.chunk_shift_ratio = chunk_shift_ratio\n self.batch_size = batch_size\n self.seed = seed\n self.shuffle = shuffle\n\n def build_iter(\n self,\n epoch: int,\n shuffle: bool = None,\n ) -> Iterator[Tuple[List[str], Dict[str, torch.Tensor]]]:\n per_sample_loader = self.per_sample_iter_factory.build_iter(epoch, shuffle)\n\n if shuffle is None:\n shuffle = self.shuffle\n state = np.random.RandomState(epoch + self.seed)\n\n # NOTE(kamo):\n # This iterator supports multiple chunk lengths and\n # keep chunks for each lenghts here until collecting specified numbers\n cache_chunks_dict = {}\n cache_id_list_dict = {}\n for ids, batch in per_sample_loader:\n # Must be per-sample-loader\n assert len(ids) == 1, f\"Must be per-sample-loader: {len(ids)}\"\n assert all(len(x) == 1 for x in batch.values())\n\n # Get keys of sequence data\n sequence_keys = []\n for key in batch:\n if key + \"_lengths\" in batch:\n sequence_keys.append(key)\n # Remove lengths data and get the first sample\n batch = {k: v[0] for k, v in batch.items() if not k.endswith(\"_lengths\")}\n id_ = ids[0]\n\n for key in sequence_keys:\n if len(batch[key]) != len(batch[sequence_keys[0]]):\n raise RuntimeError(\n f\"All sequences must has same length: \"\n f\"{len(batch[key])} != {len(batch[sequence_keys[0]])}\"\n )\n\n L = len(batch[sequence_keys[0]])\n # Select chunk length\n chunk_lengths = [lg for lg in self.chunk_lengths if lg < L]\n if len(chunk_lengths) == 0:\n logging.warning(\n f\"The length of '{id_}' is {L}, but it is shorter than \"\n f\"any candidates of chunk-length: {self.chunk_lengths}\"\n )\n continue\n\n W = int(state.choice(chunk_lengths, 1))\n cache_id_list = cache_id_list_dict.setdefault(W, [])\n cache_chunks = cache_chunks_dict.setdefault(W, {})\n\n # Shift width to the next chunk\n S = int(L * self.chunk_shift_ratio)\n # Number of chunks\n N = (L - W) // S + 1\n if shuffle:\n Z = state.randint(0, (L - W) % S + 1)\n else:\n Z = 0\n\n # Split a sequence into chunks.\n # Note that the marginal frames divided by chunk length are discarded\n for k, v in batch.items():\n if k not in cache_chunks:\n cache_chunks[k] = []\n if k in sequence_keys:\n # Shift chunks with overlapped length for data augmentation\n cache_chunks[k] += [v[Z + i * S : Z + i * S + W] for i in range(N)]\n else:\n # If not sequence, use whole data instead of chunk\n cache_chunks[k] += [v for _ in range(N)]\n cache_id_list += [id_ for _ in range(N)]\n\n if len(cache_id_list) > self.num_cache_chunks:\n cache_id_list, cache_chunks = yield from self._generate_mini_batches(\n cache_id_list,\n cache_chunks,\n shuffle,\n state,\n )\n\n cache_id_list_dict[W] = cache_id_list\n cache_chunks_dict[W] = cache_chunks\n\n else:\n for W in cache_id_list_dict:\n cache_id_list = cache_id_list_dict.setdefault(W, [])\n cache_chunks = cache_chunks_dict.setdefault(W, {})\n\n yield from self._generate_mini_batches(\n cache_id_list,\n cache_chunks,\n shuffle,\n state,\n )\n\n def _generate_mini_batches(\n self,\n id_list: List[str],\n batches: Dict[str, List[torch.Tensor]],\n shuffle: bool,\n state: np.random.RandomState,\n ):\n if shuffle:\n indices = np.arange(0, len(id_list))\n state.shuffle(indices)\n batches = {k: [v[i] for i in indices] for k, v in batches.items()}\n id_list = [id_list[i] for i in indices]\n\n bs = self.batch_size\n while len(id_list) >= bs:\n # Make mini-batch and yield\n yield (\n id_list[:bs],\n {k: torch.stack(v[:bs], 0) for k, v in batches.items()},\n )\n id_list = id_list[bs:]\n batches = {k: v[bs:] for k, v in batches.items()}\n\n return id_list, batches\n", "path": "espnet2/iterators/chunk_iter_factory.py"}], "after_files": [{"content": "import logging\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Iterator\nfrom typing import List\nfrom typing import Sequence\nfrom typing import Tuple\nfrom typing import Union\n\nimport numpy as np\nimport torch\nfrom typeguard import check_argument_types\n\nfrom espnet2.iterators.abs_iter_factory import AbsIterFactory\nfrom espnet2.iterators.sequence_iter_factory import SequenceIterFactory\nfrom espnet2.samplers.abs_sampler import AbsSampler\n\n\nclass ChunkIterFactory(AbsIterFactory):\n \"\"\"Creates chunks from a sequence\n\n Examples:\n >>> batches = [[\"id1\"], [\"id2\"], ...]\n >>> batch_size = 128\n >>> chunk_length = 1000\n >>> iter_factory = ChunkIterFactory(dataset, batches, batch_size, chunk_length)\n >>> it = iter_factory.build_iter(epoch)\n >>> for ids, batch in it:\n ... ...\n\n - The number of mini-batches are varied in each epochs and\n we can't get the number in advance\n because IterFactory doesn't be given to the length information.\n - Since the first reason, \"num_iters_per_epoch\" can't be implemented\n for this iterator. Instead of it, \"num_samples_per_epoch\" is implemented.\n\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size: int,\n batches: Union[AbsSampler, Sequence[Sequence[Any]]],\n chunk_length: Union[int, str],\n chunk_shift_ratio: float = 0.5,\n num_cache_chunks: int = 1024,\n num_samples_per_epoch: int = None,\n seed: int = 0,\n shuffle: bool = False,\n num_workers: int = 0,\n collate_fn=None,\n pin_memory: bool = False,\n ):\n assert check_argument_types()\n assert all(len(x) == 1 for x in batches), \"batch-size must be 1\"\n\n self.per_sample_iter_factory = SequenceIterFactory(\n dataset=dataset,\n batches=batches,\n num_iters_per_epoch=num_samples_per_epoch,\n seed=seed,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate_fn,\n pin_memory=pin_memory,\n )\n\n self.num_cache_chunks = max(num_cache_chunks, batch_size)\n if isinstance(chunk_length, str):\n if len(chunk_length) == 0:\n raise ValueError(\"e.g. 5,8 or 3-5: but got empty string\")\n\n self.chunk_lengths = []\n for x in chunk_length.split(\",\"):\n try:\n sps = list(map(int, x.split(\"-\")))\n except ValueError:\n raise ValueError(f\"e.g. 5,8 or 3-5: but got {chunk_length}\")\n\n if len(sps) > 2:\n raise ValueError(f\"e.g. 5,8 or 3-5: but got {chunk_length}\")\n elif len(sps) == 2:\n # Append all numbers between the range into the candidates\n self.chunk_lengths += list(range(sps[0], sps[1] + 1))\n else:\n self.chunk_lengths += [sps[0]]\n else:\n # Single candidates: Fixed chunk length\n self.chunk_lengths = [chunk_length]\n\n self.chunk_shift_ratio = chunk_shift_ratio\n self.batch_size = batch_size\n self.seed = seed\n self.shuffle = shuffle\n\n def build_iter(\n self,\n epoch: int,\n shuffle: bool = None,\n ) -> Iterator[Tuple[List[str], Dict[str, torch.Tensor]]]:\n per_sample_loader = self.per_sample_iter_factory.build_iter(epoch, shuffle)\n\n if shuffle is None:\n shuffle = self.shuffle\n state = np.random.RandomState(epoch + self.seed)\n\n # NOTE(kamo):\n # This iterator supports multiple chunk lengths and\n # keep chunks for each lenghts here until collecting specified numbers\n cache_chunks_dict = {}\n cache_id_list_dict = {}\n for ids, batch in per_sample_loader:\n # Must be per-sample-loader\n assert len(ids) == 1, f\"Must be per-sample-loader: {len(ids)}\"\n assert all(len(x) == 1 for x in batch.values())\n\n # Get keys of sequence data\n sequence_keys = []\n for key in batch:\n if key + \"_lengths\" in batch:\n sequence_keys.append(key)\n # Remove lengths data and get the first sample\n batch = {k: v[0] for k, v in batch.items() if not k.endswith(\"_lengths\")}\n id_ = ids[0]\n\n for key in sequence_keys:\n if len(batch[key]) != len(batch[sequence_keys[0]]):\n raise RuntimeError(\n f\"All sequences must has same length: \"\n f\"{len(batch[key])} != {len(batch[sequence_keys[0]])}\"\n )\n\n L = len(batch[sequence_keys[0]])\n # Select chunk length\n chunk_lengths = [lg for lg in self.chunk_lengths if lg < L]\n if len(chunk_lengths) == 0:\n logging.warning(\n f\"The length of '{id_}' is {L}, but it is shorter than \"\n f\"any candidates of chunk-length: {self.chunk_lengths}\"\n )\n continue\n\n W = int(state.choice(chunk_lengths, 1))\n cache_id_list = cache_id_list_dict.setdefault(W, [])\n cache_chunks = cache_chunks_dict.setdefault(W, {})\n\n # Shift width to the next chunk\n S = int(W * self.chunk_shift_ratio)\n # Number of chunks\n N = (L - W) // S + 1\n if shuffle:\n Z = state.randint(0, (L - W) % S + 1)\n else:\n Z = 0\n\n # Split a sequence into chunks.\n # Note that the marginal frames divided by chunk length are discarded\n for k, v in batch.items():\n if k not in cache_chunks:\n cache_chunks[k] = []\n if k in sequence_keys:\n # Shift chunks with overlapped length for data augmentation\n cache_chunks[k] += [v[Z + i * S : Z + i * S + W] for i in range(N)]\n else:\n # If not sequence, use whole data instead of chunk\n cache_chunks[k] += [v for _ in range(N)]\n cache_id_list += [id_ for _ in range(N)]\n\n if len(cache_id_list) > self.num_cache_chunks:\n cache_id_list, cache_chunks = yield from self._generate_mini_batches(\n cache_id_list,\n cache_chunks,\n shuffle,\n state,\n )\n\n cache_id_list_dict[W] = cache_id_list\n cache_chunks_dict[W] = cache_chunks\n\n else:\n for W in cache_id_list_dict:\n cache_id_list = cache_id_list_dict.setdefault(W, [])\n cache_chunks = cache_chunks_dict.setdefault(W, {})\n\n yield from self._generate_mini_batches(\n cache_id_list,\n cache_chunks,\n shuffle,\n state,\n )\n\n def _generate_mini_batches(\n self,\n id_list: List[str],\n batches: Dict[str, List[torch.Tensor]],\n shuffle: bool,\n state: np.random.RandomState,\n ):\n if shuffle:\n indices = np.arange(0, len(id_list))\n state.shuffle(indices)\n batches = {k: [v[i] for i in indices] for k, v in batches.items()}\n id_list = [id_list[i] for i in indices]\n\n bs = self.batch_size\n while len(id_list) >= bs:\n # Make mini-batch and yield\n yield (\n id_list[:bs],\n {k: torch.stack(v[:bs], 0) for k, v in batches.items()},\n )\n id_list = id_list[bs:]\n batches = {k: v[bs:] for k, v in batches.items()}\n\n return id_list, batches\n", "path": "espnet2/iterators/chunk_iter_factory.py"}]} | 2,597 | 143 |
gh_patches_debug_20448 | rasdani/github-patches | git_diff | litestar-org__litestar-3454 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docs: Document SSE
### Summary
The SSE documentation is currently lacking:
- Docs for `ServerSentEventMessage`
- Sending messages a dicts
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3011">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3011/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3011/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/examples/responses/sse_responses.py`
Content:
```
1 from asyncio import sleep
2 from typing import AsyncGenerator
3
4 from litestar import Litestar, get
5 from litestar.response import ServerSentEvent
6
7
8 async def my_generator() -> AsyncGenerator[bytes, None]:
9 count = 0
10 while count < 10:
11 await sleep(0.01)
12 count += 1
13 yield str(count)
14
15
16 @get(path="/count", sync_to_thread=False)
17 def sse_handler() -> ServerSentEvent:
18 return ServerSentEvent(my_generator())
19
20
21 app = Litestar(route_handlers=[sse_handler])
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/examples/responses/sse_responses.py b/docs/examples/responses/sse_responses.py
--- a/docs/examples/responses/sse_responses.py
+++ b/docs/examples/responses/sse_responses.py
@@ -2,15 +2,28 @@
from typing import AsyncGenerator
from litestar import Litestar, get
-from litestar.response import ServerSentEvent
+from litestar.response import ServerSentEvent, ServerSentEventMessage
+from litestar.types import SSEData
-async def my_generator() -> AsyncGenerator[bytes, None]:
+async def my_generator() -> AsyncGenerator[SSEData, None]:
count = 0
while count < 10:
await sleep(0.01)
count += 1
+ # In the generator you can yield integers, strings, bytes, dictionaries, or ServerSentEventMessage objects
+ # dicts can have the following keys: data, event, id, retry, comment
+
+ # here we yield an integer
+ yield count
+ # here a string
yield str(count)
+ # here bytes
+ yield str(count).encode("utf-8")
+ # here a dictionary
+ yield {"data": 2 * count, "event": "event2", "retry": 10}
+ # here a ServerSentEventMessage object
+ yield ServerSentEventMessage(event="something-with-comment", retry=1000, comment="some comment")
@get(path="/count", sync_to_thread=False)
| {"golden_diff": "diff --git a/docs/examples/responses/sse_responses.py b/docs/examples/responses/sse_responses.py\n--- a/docs/examples/responses/sse_responses.py\n+++ b/docs/examples/responses/sse_responses.py\n@@ -2,15 +2,28 @@\n from typing import AsyncGenerator\n \n from litestar import Litestar, get\n-from litestar.response import ServerSentEvent\n+from litestar.response import ServerSentEvent, ServerSentEventMessage\n+from litestar.types import SSEData\n \n \n-async def my_generator() -> AsyncGenerator[bytes, None]:\n+async def my_generator() -> AsyncGenerator[SSEData, None]:\n count = 0\n while count < 10:\n await sleep(0.01)\n count += 1\n+ # In the generator you can yield integers, strings, bytes, dictionaries, or ServerSentEventMessage objects\n+ # dicts can have the following keys: data, event, id, retry, comment\n+\n+ # here we yield an integer\n+ yield count\n+ # here a string\n yield str(count)\n+ # here bytes\n+ yield str(count).encode(\"utf-8\")\n+ # here a dictionary\n+ yield {\"data\": 2 * count, \"event\": \"event2\", \"retry\": 10}\n+ # here a ServerSentEventMessage object\n+ yield ServerSentEventMessage(event=\"something-with-comment\", retry=1000, comment=\"some comment\")\n \n \n @get(path=\"/count\", sync_to_thread=False)\n", "issue": "Docs: Document SSE\n### Summary\n\nThe SSE documentation is currently lacking:\r\n\r\n- Docs for `ServerSentEventMessage`\r\n- Sending messages a dicts\r\n\r\n\n\n<!-- POLAR PLEDGE BADGE START -->\n---\n> [!NOTE] \n> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and \n> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.\n>\n> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)\n> * If you would like to see an issue prioritized, make a pledge towards it!\n> * We receive the pledge once the issue is completed & verified\n> * This, along with engagement in the community, helps us know which features are a priority to our users.\n\n<a href=\"https://polar.sh/litestar-org/litestar/issues/3011\">\n<picture>\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/litestar-org/litestar/issues/3011/pledge.svg?darkmode=1\">\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/litestar-org/litestar/issues/3011/pledge.svg\">\n</picture>\n</a>\n<!-- POLAR PLEDGE BADGE END -->\n\n", "before_files": [{"content": "from asyncio import sleep\nfrom typing import AsyncGenerator\n\nfrom litestar import Litestar, get\nfrom litestar.response import ServerSentEvent\n\n\nasync def my_generator() -> AsyncGenerator[bytes, None]:\n count = 0\n while count < 10:\n await sleep(0.01)\n count += 1\n yield str(count)\n\n\n@get(path=\"/count\", sync_to_thread=False)\ndef sse_handler() -> ServerSentEvent:\n return ServerSentEvent(my_generator())\n\n\napp = Litestar(route_handlers=[sse_handler])\n", "path": "docs/examples/responses/sse_responses.py"}], "after_files": [{"content": "from asyncio import sleep\nfrom typing import AsyncGenerator\n\nfrom litestar import Litestar, get\nfrom litestar.response import ServerSentEvent, ServerSentEventMessage\nfrom litestar.types import SSEData\n\n\nasync def my_generator() -> AsyncGenerator[SSEData, None]:\n count = 0\n while count < 10:\n await sleep(0.01)\n count += 1\n # In the generator you can yield integers, strings, bytes, dictionaries, or ServerSentEventMessage objects\n # dicts can have the following keys: data, event, id, retry, comment\n\n # here we yield an integer\n yield count\n # here a string\n yield str(count)\n # here bytes\n yield str(count).encode(\"utf-8\")\n # here a dictionary\n yield {\"data\": 2 * count, \"event\": \"event2\", \"retry\": 10}\n # here a ServerSentEventMessage object\n yield ServerSentEventMessage(event=\"something-with-comment\", retry=1000, comment=\"some comment\")\n\n\n@get(path=\"/count\", sync_to_thread=False)\ndef sse_handler() -> ServerSentEvent:\n return ServerSentEvent(my_generator())\n\n\napp = Litestar(route_handlers=[sse_handler])\n", "path": "docs/examples/responses/sse_responses.py"}]} | 739 | 332 |
gh_patches_debug_26741 | rasdani/github-patches | git_diff | pre-commit__pre-commit-893 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect shebang in .git/hooks/pre-commit for python3 only installations
The shebang for `.git/hooks/pre-commit` is `#!/usr/bin/env python`. I work with setups where `python3` is the only python in env.
Could the shebang be the install python instead? I.e. the installation under `INSTALL_PYTHON = '/usr/bin/python3'`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/commands/install_uninstall.py`
Content:
```
1 from __future__ import print_function
2 from __future__ import unicode_literals
3
4 import io
5 import logging
6 import os.path
7 import sys
8
9 from pre_commit import git
10 from pre_commit import output
11 from pre_commit.repository import repositories
12 from pre_commit.util import cmd_output
13 from pre_commit.util import make_executable
14 from pre_commit.util import mkdirp
15 from pre_commit.util import resource_text
16
17
18 logger = logging.getLogger(__name__)
19
20 # This is used to identify the hook file we install
21 PRIOR_HASHES = (
22 '4d9958c90bc262f47553e2c073f14cfe',
23 'd8ee923c46731b42cd95cc869add4062',
24 '49fd668cb42069aa1b6048464be5d395',
25 '79f09a650522a87b0da915d0d983b2de',
26 'e358c9dae00eac5d06b38dfdb1e33a8c',
27 )
28 CURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'
29 TEMPLATE_START = '# start templated\n'
30 TEMPLATE_END = '# end templated\n'
31
32
33 def _hook_paths(git_root, hook_type):
34 pth = os.path.join(git.get_git_dir(git_root), 'hooks', hook_type)
35 return pth, '{}.legacy'.format(pth)
36
37
38 def is_our_script(filename):
39 if not os.path.exists(filename):
40 return False
41 with io.open(filename) as f:
42 contents = f.read()
43 return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)
44
45
46 def install(
47 runner, store, overwrite=False, hooks=False, hook_type='pre-commit',
48 skip_on_missing_conf=False,
49 ):
50 """Install the pre-commit hooks."""
51 if cmd_output('git', 'config', 'core.hooksPath', retcode=None)[1].strip():
52 logger.error(
53 'Cowardly refusing to install hooks with `core.hooksPath` set.\n'
54 'hint: `git config --unset-all core.hooksPath`',
55 )
56 return 1
57
58 hook_path, legacy_path = _hook_paths(runner.git_root, hook_type)
59
60 mkdirp(os.path.dirname(hook_path))
61
62 # If we have an existing hook, move it to pre-commit.legacy
63 if os.path.lexists(hook_path) and not is_our_script(hook_path):
64 os.rename(hook_path, legacy_path)
65
66 # If we specify overwrite, we simply delete the legacy file
67 if overwrite and os.path.exists(legacy_path):
68 os.remove(legacy_path)
69 elif os.path.exists(legacy_path):
70 output.write_line(
71 'Running in migration mode with existing hooks at {}\n'
72 'Use -f to use only pre-commit.'.format(legacy_path),
73 )
74
75 params = {
76 'CONFIG': runner.config_file,
77 'HOOK_TYPE': hook_type,
78 'INSTALL_PYTHON': sys.executable,
79 'SKIP_ON_MISSING_CONFIG': skip_on_missing_conf,
80 }
81
82 with io.open(hook_path, 'w') as hook_file:
83 contents = resource_text('hook-tmpl')
84 before, rest = contents.split(TEMPLATE_START)
85 to_template, after = rest.split(TEMPLATE_END)
86
87 hook_file.write(before + TEMPLATE_START)
88 for line in to_template.splitlines():
89 var = line.split()[0]
90 hook_file.write('{} = {!r}\n'.format(var, params[var]))
91 hook_file.write(TEMPLATE_END + after)
92 make_executable(hook_path)
93
94 output.write_line('pre-commit installed at {}'.format(hook_path))
95
96 # If they requested we install all of the hooks, do so.
97 if hooks:
98 install_hooks(runner, store)
99
100 return 0
101
102
103 def install_hooks(runner, store):
104 for repository in repositories(runner.config, store):
105 repository.require_installed()
106
107
108 def uninstall(runner, hook_type='pre-commit'):
109 """Uninstall the pre-commit hooks."""
110 hook_path, legacy_path = _hook_paths(runner.git_root, hook_type)
111
112 # If our file doesn't exist or it isn't ours, gtfo.
113 if not os.path.exists(hook_path) or not is_our_script(hook_path):
114 return 0
115
116 os.remove(hook_path)
117 output.write_line('{} uninstalled'.format(hook_type))
118
119 if os.path.exists(legacy_path):
120 os.rename(legacy_path, hook_path)
121 output.write_line('Restored previous hooks to {}'.format(hook_path))
122
123 return 0
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py
--- a/pre_commit/commands/install_uninstall.py
+++ b/pre_commit/commands/install_uninstall.py
@@ -8,6 +8,7 @@
from pre_commit import git
from pre_commit import output
+from pre_commit.languages import python
from pre_commit.repository import repositories
from pre_commit.util import cmd_output
from pre_commit.util import make_executable
@@ -43,6 +44,16 @@
return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)
+def shebang():
+ if sys.platform == 'win32':
+ py = 'python'
+ else:
+ py = python.get_default_version()
+ if py == 'default':
+ py = 'python'
+ return '#!/usr/bin/env {}'.format(py)
+
+
def install(
runner, store, overwrite=False, hooks=False, hook_type='pre-commit',
skip_on_missing_conf=False,
@@ -84,6 +95,8 @@
before, rest = contents.split(TEMPLATE_START)
to_template, after = rest.split(TEMPLATE_END)
+ before = before.replace('#!/usr/bin/env python', shebang())
+
hook_file.write(before + TEMPLATE_START)
for line in to_template.splitlines():
var = line.split()[0]
| {"golden_diff": "diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py\n--- a/pre_commit/commands/install_uninstall.py\n+++ b/pre_commit/commands/install_uninstall.py\n@@ -8,6 +8,7 @@\n \n from pre_commit import git\n from pre_commit import output\n+from pre_commit.languages import python\n from pre_commit.repository import repositories\n from pre_commit.util import cmd_output\n from pre_commit.util import make_executable\n@@ -43,6 +44,16 @@\n return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)\n \n \n+def shebang():\n+ if sys.platform == 'win32':\n+ py = 'python'\n+ else:\n+ py = python.get_default_version()\n+ if py == 'default':\n+ py = 'python'\n+ return '#!/usr/bin/env {}'.format(py)\n+\n+\n def install(\n runner, store, overwrite=False, hooks=False, hook_type='pre-commit',\n skip_on_missing_conf=False,\n@@ -84,6 +95,8 @@\n before, rest = contents.split(TEMPLATE_START)\n to_template, after = rest.split(TEMPLATE_END)\n \n+ before = before.replace('#!/usr/bin/env python', shebang())\n+\n hook_file.write(before + TEMPLATE_START)\n for line in to_template.splitlines():\n var = line.split()[0]\n", "issue": "Incorrect shebang in .git/hooks/pre-commit for python3 only installations\nThe shebang for `.git/hooks/pre-commit` is `#!/usr/bin/env python`. I work with setups where `python3` is the only python in env.\r\n\r\nCould the shebang be the install python instead? I.e. the installation under `INSTALL_PYTHON = '/usr/bin/python3'`\n", "before_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport io\nimport logging\nimport os.path\nimport sys\n\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.repository import repositories\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import make_executable\nfrom pre_commit.util import mkdirp\nfrom pre_commit.util import resource_text\n\n\nlogger = logging.getLogger(__name__)\n\n# This is used to identify the hook file we install\nPRIOR_HASHES = (\n '4d9958c90bc262f47553e2c073f14cfe',\n 'd8ee923c46731b42cd95cc869add4062',\n '49fd668cb42069aa1b6048464be5d395',\n '79f09a650522a87b0da915d0d983b2de',\n 'e358c9dae00eac5d06b38dfdb1e33a8c',\n)\nCURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'\nTEMPLATE_START = '# start templated\\n'\nTEMPLATE_END = '# end templated\\n'\n\n\ndef _hook_paths(git_root, hook_type):\n pth = os.path.join(git.get_git_dir(git_root), 'hooks', hook_type)\n return pth, '{}.legacy'.format(pth)\n\n\ndef is_our_script(filename):\n if not os.path.exists(filename):\n return False\n with io.open(filename) as f:\n contents = f.read()\n return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)\n\n\ndef install(\n runner, store, overwrite=False, hooks=False, hook_type='pre-commit',\n skip_on_missing_conf=False,\n):\n \"\"\"Install the pre-commit hooks.\"\"\"\n if cmd_output('git', 'config', 'core.hooksPath', retcode=None)[1].strip():\n logger.error(\n 'Cowardly refusing to install hooks with `core.hooksPath` set.\\n'\n 'hint: `git config --unset-all core.hooksPath`',\n )\n return 1\n\n hook_path, legacy_path = _hook_paths(runner.git_root, hook_type)\n\n mkdirp(os.path.dirname(hook_path))\n\n # If we have an existing hook, move it to pre-commit.legacy\n if os.path.lexists(hook_path) and not is_our_script(hook_path):\n os.rename(hook_path, legacy_path)\n\n # If we specify overwrite, we simply delete the legacy file\n if overwrite and os.path.exists(legacy_path):\n os.remove(legacy_path)\n elif os.path.exists(legacy_path):\n output.write_line(\n 'Running in migration mode with existing hooks at {}\\n'\n 'Use -f to use only pre-commit.'.format(legacy_path),\n )\n\n params = {\n 'CONFIG': runner.config_file,\n 'HOOK_TYPE': hook_type,\n 'INSTALL_PYTHON': sys.executable,\n 'SKIP_ON_MISSING_CONFIG': skip_on_missing_conf,\n }\n\n with io.open(hook_path, 'w') as hook_file:\n contents = resource_text('hook-tmpl')\n before, rest = contents.split(TEMPLATE_START)\n to_template, after = rest.split(TEMPLATE_END)\n\n hook_file.write(before + TEMPLATE_START)\n for line in to_template.splitlines():\n var = line.split()[0]\n hook_file.write('{} = {!r}\\n'.format(var, params[var]))\n hook_file.write(TEMPLATE_END + after)\n make_executable(hook_path)\n\n output.write_line('pre-commit installed at {}'.format(hook_path))\n\n # If they requested we install all of the hooks, do so.\n if hooks:\n install_hooks(runner, store)\n\n return 0\n\n\ndef install_hooks(runner, store):\n for repository in repositories(runner.config, store):\n repository.require_installed()\n\n\ndef uninstall(runner, hook_type='pre-commit'):\n \"\"\"Uninstall the pre-commit hooks.\"\"\"\n hook_path, legacy_path = _hook_paths(runner.git_root, hook_type)\n\n # If our file doesn't exist or it isn't ours, gtfo.\n if not os.path.exists(hook_path) or not is_our_script(hook_path):\n return 0\n\n os.remove(hook_path)\n output.write_line('{} uninstalled'.format(hook_type))\n\n if os.path.exists(legacy_path):\n os.rename(legacy_path, hook_path)\n output.write_line('Restored previous hooks to {}'.format(hook_path))\n\n return 0\n", "path": "pre_commit/commands/install_uninstall.py"}], "after_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport io\nimport logging\nimport os.path\nimport sys\n\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.languages import python\nfrom pre_commit.repository import repositories\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import make_executable\nfrom pre_commit.util import mkdirp\nfrom pre_commit.util import resource_text\n\n\nlogger = logging.getLogger(__name__)\n\n# This is used to identify the hook file we install\nPRIOR_HASHES = (\n '4d9958c90bc262f47553e2c073f14cfe',\n 'd8ee923c46731b42cd95cc869add4062',\n '49fd668cb42069aa1b6048464be5d395',\n '79f09a650522a87b0da915d0d983b2de',\n 'e358c9dae00eac5d06b38dfdb1e33a8c',\n)\nCURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'\nTEMPLATE_START = '# start templated\\n'\nTEMPLATE_END = '# end templated\\n'\n\n\ndef _hook_paths(git_root, hook_type):\n pth = os.path.join(git.get_git_dir(git_root), 'hooks', hook_type)\n return pth, '{}.legacy'.format(pth)\n\n\ndef is_our_script(filename):\n if not os.path.exists(filename):\n return False\n with io.open(filename) as f:\n contents = f.read()\n return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)\n\n\ndef shebang():\n if sys.platform == 'win32':\n py = 'python'\n else:\n py = python.get_default_version()\n if py == 'default':\n py = 'python'\n return '#!/usr/bin/env {}'.format(py)\n\n\ndef install(\n runner, store, overwrite=False, hooks=False, hook_type='pre-commit',\n skip_on_missing_conf=False,\n):\n \"\"\"Install the pre-commit hooks.\"\"\"\n if cmd_output('git', 'config', 'core.hooksPath', retcode=None)[1].strip():\n logger.error(\n 'Cowardly refusing to install hooks with `core.hooksPath` set.\\n'\n 'hint: `git config --unset-all core.hooksPath`',\n )\n return 1\n\n hook_path, legacy_path = _hook_paths(runner.git_root, hook_type)\n\n mkdirp(os.path.dirname(hook_path))\n\n # If we have an existing hook, move it to pre-commit.legacy\n if os.path.lexists(hook_path) and not is_our_script(hook_path):\n os.rename(hook_path, legacy_path)\n\n # If we specify overwrite, we simply delete the legacy file\n if overwrite and os.path.exists(legacy_path):\n os.remove(legacy_path)\n elif os.path.exists(legacy_path):\n output.write_line(\n 'Running in migration mode with existing hooks at {}\\n'\n 'Use -f to use only pre-commit.'.format(legacy_path),\n )\n\n params = {\n 'CONFIG': runner.config_file,\n 'HOOK_TYPE': hook_type,\n 'INSTALL_PYTHON': sys.executable,\n 'SKIP_ON_MISSING_CONFIG': skip_on_missing_conf,\n }\n\n with io.open(hook_path, 'w') as hook_file:\n contents = resource_text('hook-tmpl')\n before, rest = contents.split(TEMPLATE_START)\n to_template, after = rest.split(TEMPLATE_END)\n\n before = before.replace('#!/usr/bin/env python', shebang())\n\n hook_file.write(before + TEMPLATE_START)\n for line in to_template.splitlines():\n var = line.split()[0]\n hook_file.write('{} = {!r}\\n'.format(var, params[var]))\n hook_file.write(TEMPLATE_END + after)\n make_executable(hook_path)\n\n output.write_line('pre-commit installed at {}'.format(hook_path))\n\n # If they requested we install all of the hooks, do so.\n if hooks:\n install_hooks(runner, store)\n\n return 0\n\n\ndef install_hooks(runner, store):\n for repository in repositories(runner.config, store):\n repository.require_installed()\n\n\ndef uninstall(runner, hook_type='pre-commit'):\n \"\"\"Uninstall the pre-commit hooks.\"\"\"\n hook_path, legacy_path = _hook_paths(runner.git_root, hook_type)\n\n # If our file doesn't exist or it isn't ours, gtfo.\n if not os.path.exists(hook_path) or not is_our_script(hook_path):\n return 0\n\n os.remove(hook_path)\n output.write_line('{} uninstalled'.format(hook_type))\n\n if os.path.exists(legacy_path):\n os.rename(legacy_path, hook_path)\n output.write_line('Restored previous hooks to {}'.format(hook_path))\n\n return 0\n", "path": "pre_commit/commands/install_uninstall.py"}]} | 1,668 | 303 |
gh_patches_debug_36938 | rasdani/github-patches | git_diff | ManimCommunity__manim-435 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Latex and Sphinx are not yet functioning together.
This example is working on sphinx:
```
.. manim:: DotScene1
:quality: low
:save_last_frame:
class DotScene1(Scene):
def construct(self):
dot = Dot().set_color(GREEN)
self.add(dot)
self.wait(1)
```
However, when I have something tex related, it throws an error:
```
.. manim:: TextExample
:quality: medium
:save_last_frame:
class TextExample(Scene):
def construct(self):
t = TextMobject("Hello World")
self.add(t)
```
> Exception occurred:
File "/home/k/projects/manim-community/manim/utils/tex_file_writing.py", line 32, in generate_tex_file
with open(result, "w", encoding="utf-8") as outfile:
FileNotFoundError: [Errno 2] No such file or directory: 'media/Tex/7d1ec941f0e30957.tex'
The full traceback has been saved in /tmp/sphinx-err-4zdxhjgt.log, if you want to report the issue to the developers.
Please also report this if it was a user error, so that a better error message can be provided next time.
A bug report can be filed in the tracker at <https://github.com/sphinx-doc/sphinx/issues>. Thanks!
make: *** [Makefile:26: html] Error 2
A similar error message comes e.g. for this example:
```
.. manim:: Plot1
:quality: medium
:save_last_frame:
class Plot1(GraphScene):
def construct(self):
self.setup_axes()
my_func = lambda x: np.sin(x)
func_graph=self.get_graph(my_func)
self.add(func_graph)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/manim_directive.py`
Content:
```
1 r"""
2 A directive for including Manim videos in a Sphinx document
3 ===========================================================
4
5 When rendering the HTML documentation, the ``.. manim::`` directive
6 implemented here allows to include rendered videos.
7
8 Its basic usage that allows processing **inline content**
9 looks as follows::
10
11 .. manim:: MyScene
12
13 class MyScene(Scene):
14 def construct(self):
15 ...
16
17 It is required to pass the name of the class representing the
18 scene to be rendered to the directive.
19
20 As a second application, the directive can also be used to
21 render scenes that are defined within doctests, for example::
22
23 .. manim:: DirectiveDoctestExample
24
25 >>> dot = Dot(color=RED)
26 >>> dot.color
27 <Color #fc6255>
28 >>> class DirectiveDoctestExample(Scene):
29 ... def construct(self):
30 ... self.play(ShowCreation(dot))
31
32
33 Options
34 -------
35
36 Options can be passed as follows::
37
38 .. manim:: <Class name>
39 :<option name>: <value>
40
41 The following configuration options are supported by the
42 directive:
43
44 display_source
45 If this flag is present without argument,
46 the source code is displayed above the rendered video.
47
48 quality : {'low', 'medium', 'high', 'fourk'}
49 Controls render quality of the video, in analogy to
50 the corresponding command line flags.
51
52 save_as_gif
53 If this flag is present without argument,
54 the scene is rendered as a gif.
55
56 save_last_frame
57 If this flag is present without argument,
58 an image representing the last frame of the scene will
59 be rendered and displayed, instead of a video.
60
61 """
62 from docutils.parsers.rst import directives, Directive
63 from docutils.parsers.rst.directives.images import Image
64
65 import jinja2
66 import os
67 from os.path import relpath
68
69 import shutil
70
71 classnamedict = {}
72
73
74 class ManimDirective(Directive):
75 r"""The ``.. manim::`` directive.
76
77 See the module docstring for documentation.
78 """
79 has_content = True
80 required_arguments = 1
81 optional_arguments = 0
82 option_spec = {
83 "display_source": bool,
84 "quality": lambda arg: directives.choice(
85 arg, ("low", "medium", "high", "fourk")
86 ),
87 "save_as_gif": bool,
88 "save_last_frame": bool,
89 }
90 final_argument_whitespace = True
91
92 def run(self):
93 from manim import config
94
95 global classnamedict
96
97 clsname = self.arguments[0]
98 if clsname not in classnamedict:
99 classnamedict[clsname] = 1
100 else:
101 classnamedict[clsname] += 1
102
103 display_source = "display_source" in self.options
104 save_as_gif = "save_as_gif" in self.options
105 save_last_frame = "save_last_frame" in self.options
106 assert not (save_as_gif and save_last_frame)
107
108 frame_rate = config["frame_rate"]
109 pixel_height = config["pixel_height"]
110 pixel_width = config["pixel_width"]
111
112 if "quality" in self.options:
113 quality = self.options["quality"]
114 if quality == "low":
115 pixel_height = 480
116 pixel_width = 854
117 frame_rate = 15
118 elif quality == "medium":
119 pixel_height = 720
120 pixel_width = 1280
121 frame_rate = 30
122 elif quality == "high":
123 pixel_height = 1440
124 pixel_width = 2560
125 frame_rate = 60
126 elif quality == "fourk":
127 pixel_height = 2160
128 pixel_width = 3840
129 frame_rate = 60
130
131 qualitydir = f"{pixel_height}p{frame_rate}"
132
133 state_machine = self.state_machine
134 document = state_machine.document
135
136 source_file_name = document.attributes["source"]
137 source_rel_name = relpath(source_file_name, setup.confdir)
138 source_rel_dir = os.path.dirname(source_rel_name)
139 while source_rel_dir.startswith(os.path.sep):
140 source_rel_dir = source_rel_dir[1:]
141
142 dest_dir = os.path.abspath(
143 os.path.join(setup.app.builder.outdir, source_rel_dir)
144 )
145 if not os.path.exists(dest_dir):
146 os.makedirs(dest_dir)
147
148 source_block = [
149 ".. code-block:: python",
150 "",
151 *[" " + line for line in self.content],
152 ]
153 source_block = "\n".join(source_block)
154
155 media_dir = os.path.join("source", "media")
156 images_dir = os.path.join(media_dir, "images")
157 video_dir = os.path.join(media_dir, "videos")
158 output_file = f"{clsname}-{classnamedict[clsname]}"
159
160 file_writer_config_code = [
161 f'config["frame_rate"] = {frame_rate}',
162 f'config["pixel_height"] = {pixel_height}',
163 f'config["pixel_width"] = {pixel_width}',
164 f'file_writer_config["media_dir"] = "{media_dir}"',
165 f'file_writer_config["images_dir"] = "{images_dir}"',
166 f'file_writer_config["video_dir"] = "{video_dir}"',
167 f'file_writer_config["save_last_frame"] = {save_last_frame}',
168 f'file_writer_config["save_as_gif"] = {save_as_gif}',
169 f'file_writer_config["output_file"] = "{output_file}"',
170 ]
171
172 user_code = self.content
173 if user_code[0].startswith(">>> "): # check whether block comes from doctest
174 user_code = [
175 line[4:] for line in user_code if line.startswith((">>> ", "... "))
176 ]
177
178 code = [
179 "from manim import *",
180 *file_writer_config_code,
181 *user_code,
182 f"{clsname}()",
183 ]
184 exec("\n".join(code), globals())
185
186 # copy video file to output directory
187 if not (save_as_gif or save_last_frame):
188 filename = f"{output_file}.mp4"
189 filesrc = os.path.join(video_dir, qualitydir, filename)
190 destfile = os.path.join(dest_dir, filename)
191 shutil.copyfile(filesrc, destfile)
192 elif save_as_gif:
193 filename = f"{output_file}.gif"
194 filesrc = os.path.join(video_dir, qualitydir, filename)
195 elif save_last_frame:
196 filename = f"{output_file}.png"
197 filesrc = os.path.join(images_dir, filename)
198 else:
199 raise ValueError("Invalid combination of render flags received.")
200
201 rendered_template = jinja2.Template(TEMPLATE).render(
202 display_source=display_source,
203 filesrc=filesrc[6:],
204 output_file=output_file,
205 save_last_frame=save_last_frame,
206 save_as_gif=save_as_gif,
207 source_block=source_block,
208 )
209 state_machine.insert_input(
210 rendered_template.split("\n"), source=document.attributes["source"]
211 )
212
213 return []
214
215
216 def setup(app):
217 import manim
218
219 setup.app = app
220 setup.config = app.config
221 setup.confdir = app.confdir
222 app.add_directive("manim", ManimDirective)
223
224 metadata = {"parallel_read_safe": False, "parallel_write_safe": True}
225 return metadata
226
227
228 TEMPLATE = r"""
229 {% if display_source %}
230 .. raw:: html
231
232 <div class="manim-example">
233
234 {{ source_block }}
235 {% endif %}
236
237 {% if not (save_as_gif or save_last_frame) %}
238 .. raw:: html
239
240 <video class="manim-video" controls loop autoplay src="./{{ output_file }}.mp4"></video>
241 {% elif save_as_gif %}
242 .. image:: {{ filesrc }}
243 :align: center
244 {% elif save_last_frame %}
245 .. image:: {{ filesrc }}
246 :align: center
247 {% endif %}
248
249 {% if display_source %}
250 .. raw:: html
251
252 </div>
253 {% endif %}
254 """
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/source/manim_directive.py b/docs/source/manim_directive.py
--- a/docs/source/manim_directive.py
+++ b/docs/source/manim_directive.py
@@ -152,8 +152,18 @@
]
source_block = "\n".join(source_block)
- media_dir = os.path.join("source", "media")
+ media_dir = os.path.join(setup.confdir, "media")
+ if not os.path.exists(media_dir):
+ os.mkdir(media_dir)
images_dir = os.path.join(media_dir, "images")
+ if not os.path.exists(images_dir):
+ os.mkdir(images_dir)
+ tex_dir = os.path.join(media_dir, "tex")
+ if not os.path.exists(tex_dir):
+ os.mkdir(tex_dir)
+ text_dir = os.path.join(media_dir, "text")
+ if not os.path.exists(text_dir):
+ os.mkdir(text_dir)
video_dir = os.path.join(media_dir, "videos")
output_file = f"{clsname}-{classnamedict[clsname]}"
@@ -163,6 +173,8 @@
f'config["pixel_width"] = {pixel_width}',
f'file_writer_config["media_dir"] = "{media_dir}"',
f'file_writer_config["images_dir"] = "{images_dir}"',
+ f'file_writer_config["tex_dir"] = "{tex_dir}"',
+ f'file_writer_config["text_dir"] = "{text_dir}"',
f'file_writer_config["video_dir"] = "{video_dir}"',
f'file_writer_config["save_last_frame"] = {save_last_frame}',
f'file_writer_config["save_as_gif"] = {save_as_gif}',
@@ -200,7 +212,7 @@
rendered_template = jinja2.Template(TEMPLATE).render(
display_source=display_source,
- filesrc=filesrc[6:],
+ filesrc_rel=os.path.relpath(filesrc, setup.confdir),
output_file=output_file,
save_last_frame=save_last_frame,
save_as_gif=save_as_gif,
@@ -239,10 +251,10 @@
<video class="manim-video" controls loop autoplay src="./{{ output_file }}.mp4"></video>
{% elif save_as_gif %}
-.. image:: {{ filesrc }}
+.. image:: /{{ filesrc_rel }}
:align: center
{% elif save_last_frame %}
-.. image:: {{ filesrc }}
+.. image:: /{{ filesrc_rel }}
:align: center
{% endif %}
| {"golden_diff": "diff --git a/docs/source/manim_directive.py b/docs/source/manim_directive.py\n--- a/docs/source/manim_directive.py\n+++ b/docs/source/manim_directive.py\n@@ -152,8 +152,18 @@\n ]\n source_block = \"\\n\".join(source_block)\n \n- media_dir = os.path.join(\"source\", \"media\")\n+ media_dir = os.path.join(setup.confdir, \"media\")\n+ if not os.path.exists(media_dir):\n+ os.mkdir(media_dir)\n images_dir = os.path.join(media_dir, \"images\")\n+ if not os.path.exists(images_dir):\n+ os.mkdir(images_dir)\n+ tex_dir = os.path.join(media_dir, \"tex\")\n+ if not os.path.exists(tex_dir):\n+ os.mkdir(tex_dir)\n+ text_dir = os.path.join(media_dir, \"text\")\n+ if not os.path.exists(text_dir):\n+ os.mkdir(text_dir)\n video_dir = os.path.join(media_dir, \"videos\")\n output_file = f\"{clsname}-{classnamedict[clsname]}\"\n \n@@ -163,6 +173,8 @@\n f'config[\"pixel_width\"] = {pixel_width}',\n f'file_writer_config[\"media_dir\"] = \"{media_dir}\"',\n f'file_writer_config[\"images_dir\"] = \"{images_dir}\"',\n+ f'file_writer_config[\"tex_dir\"] = \"{tex_dir}\"',\n+ f'file_writer_config[\"text_dir\"] = \"{text_dir}\"',\n f'file_writer_config[\"video_dir\"] = \"{video_dir}\"',\n f'file_writer_config[\"save_last_frame\"] = {save_last_frame}',\n f'file_writer_config[\"save_as_gif\"] = {save_as_gif}',\n@@ -200,7 +212,7 @@\n \n rendered_template = jinja2.Template(TEMPLATE).render(\n display_source=display_source,\n- filesrc=filesrc[6:],\n+ filesrc_rel=os.path.relpath(filesrc, setup.confdir),\n output_file=output_file,\n save_last_frame=save_last_frame,\n save_as_gif=save_as_gif,\n@@ -239,10 +251,10 @@\n \n <video class=\"manim-video\" controls loop autoplay src=\"./{{ output_file }}.mp4\"></video>\n {% elif save_as_gif %}\n-.. image:: {{ filesrc }}\n+.. image:: /{{ filesrc_rel }}\n :align: center\n {% elif save_last_frame %}\n-.. image:: {{ filesrc }}\n+.. image:: /{{ filesrc_rel }}\n :align: center\n {% endif %}\n", "issue": "Latex and Sphinx are not yet functioning together.\nThis example is working on sphinx:\r\n```\r\n.. manim:: DotScene1\r\n :quality: low\r\n :save_last_frame:\r\n\r\n class DotScene1(Scene):\r\n def construct(self):\r\n dot = Dot().set_color(GREEN)\r\n self.add(dot)\r\n self.wait(1)\r\n```\r\nHowever, when I have something tex related, it throws an error:\r\n```\r\n.. manim:: TextExample\r\n :quality: medium\r\n :save_last_frame:\r\n \r\n class TextExample(Scene):\r\n def construct(self):\r\n t = TextMobject(\"Hello World\")\r\n self.add(t)\r\n```\r\n> Exception occurred:\r\n File \"/home/k/projects/manim-community/manim/utils/tex_file_writing.py\", line 32, in generate_tex_file\r\n with open(result, \"w\", encoding=\"utf-8\") as outfile:\r\nFileNotFoundError: [Errno 2] No such file or directory: 'media/Tex/7d1ec941f0e30957.tex'\r\nThe full traceback has been saved in /tmp/sphinx-err-4zdxhjgt.log, if you want to report the issue to the developers.\r\nPlease also report this if it was a user error, so that a better error message can be provided next time.\r\nA bug report can be filed in the tracker at <https://github.com/sphinx-doc/sphinx/issues>. Thanks!\r\nmake: *** [Makefile:26: html] Error 2\r\n\r\nA similar error message comes e.g. for this example:\r\n```\r\n.. manim:: Plot1\r\n :quality: medium\r\n :save_last_frame:\r\n\r\n class Plot1(GraphScene):\r\n def construct(self):\r\n self.setup_axes()\r\n my_func = lambda x: np.sin(x)\r\n func_graph=self.get_graph(my_func)\r\n self.add(func_graph)\r\n```\n", "before_files": [{"content": "r\"\"\"\nA directive for including Manim videos in a Sphinx document\n===========================================================\n\nWhen rendering the HTML documentation, the ``.. manim::`` directive\nimplemented here allows to include rendered videos.\n\nIts basic usage that allows processing **inline content** \nlooks as follows::\n\n .. manim:: MyScene\n\n class MyScene(Scene):\n def construct(self):\n ...\n\nIt is required to pass the name of the class representing the\nscene to be rendered to the directive.\n\nAs a second application, the directive can also be used to\nrender scenes that are defined within doctests, for example::\n\n .. manim:: DirectiveDoctestExample\n\n >>> dot = Dot(color=RED)\n >>> dot.color\n <Color #fc6255>\n >>> class DirectiveDoctestExample(Scene):\n ... def construct(self):\n ... self.play(ShowCreation(dot))\n\n\nOptions\n-------\n\nOptions can be passed as follows::\n\n .. manim:: <Class name>\n :<option name>: <value>\n\nThe following configuration options are supported by the\ndirective:\n\n display_source\n If this flag is present without argument,\n the source code is displayed above the rendered video.\n\n quality : {'low', 'medium', 'high', 'fourk'}\n Controls render quality of the video, in analogy to\n the corresponding command line flags.\n\n save_as_gif\n If this flag is present without argument,\n the scene is rendered as a gif.\n\n save_last_frame\n If this flag is present without argument,\n an image representing the last frame of the scene will\n be rendered and displayed, instead of a video.\n\n\"\"\"\nfrom docutils.parsers.rst import directives, Directive\nfrom docutils.parsers.rst.directives.images import Image\n\nimport jinja2\nimport os\nfrom os.path import relpath\n\nimport shutil\n\nclassnamedict = {}\n\n\nclass ManimDirective(Directive):\n r\"\"\"The ``.. manim::`` directive.\n\n See the module docstring for documentation.\n \"\"\"\n has_content = True\n required_arguments = 1\n optional_arguments = 0\n option_spec = {\n \"display_source\": bool,\n \"quality\": lambda arg: directives.choice(\n arg, (\"low\", \"medium\", \"high\", \"fourk\")\n ),\n \"save_as_gif\": bool,\n \"save_last_frame\": bool,\n }\n final_argument_whitespace = True\n\n def run(self):\n from manim import config\n\n global classnamedict\n\n clsname = self.arguments[0]\n if clsname not in classnamedict:\n classnamedict[clsname] = 1\n else:\n classnamedict[clsname] += 1\n\n display_source = \"display_source\" in self.options\n save_as_gif = \"save_as_gif\" in self.options\n save_last_frame = \"save_last_frame\" in self.options\n assert not (save_as_gif and save_last_frame)\n\n frame_rate = config[\"frame_rate\"]\n pixel_height = config[\"pixel_height\"]\n pixel_width = config[\"pixel_width\"]\n\n if \"quality\" in self.options:\n quality = self.options[\"quality\"]\n if quality == \"low\":\n pixel_height = 480\n pixel_width = 854\n frame_rate = 15\n elif quality == \"medium\":\n pixel_height = 720\n pixel_width = 1280\n frame_rate = 30\n elif quality == \"high\":\n pixel_height = 1440\n pixel_width = 2560\n frame_rate = 60\n elif quality == \"fourk\":\n pixel_height = 2160\n pixel_width = 3840\n frame_rate = 60\n\n qualitydir = f\"{pixel_height}p{frame_rate}\"\n\n state_machine = self.state_machine\n document = state_machine.document\n\n source_file_name = document.attributes[\"source\"]\n source_rel_name = relpath(source_file_name, setup.confdir)\n source_rel_dir = os.path.dirname(source_rel_name)\n while source_rel_dir.startswith(os.path.sep):\n source_rel_dir = source_rel_dir[1:]\n\n dest_dir = os.path.abspath(\n os.path.join(setup.app.builder.outdir, source_rel_dir)\n )\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n\n source_block = [\n \".. code-block:: python\",\n \"\",\n *[\" \" + line for line in self.content],\n ]\n source_block = \"\\n\".join(source_block)\n\n media_dir = os.path.join(\"source\", \"media\")\n images_dir = os.path.join(media_dir, \"images\")\n video_dir = os.path.join(media_dir, \"videos\")\n output_file = f\"{clsname}-{classnamedict[clsname]}\"\n\n file_writer_config_code = [\n f'config[\"frame_rate\"] = {frame_rate}',\n f'config[\"pixel_height\"] = {pixel_height}',\n f'config[\"pixel_width\"] = {pixel_width}',\n f'file_writer_config[\"media_dir\"] = \"{media_dir}\"',\n f'file_writer_config[\"images_dir\"] = \"{images_dir}\"',\n f'file_writer_config[\"video_dir\"] = \"{video_dir}\"',\n f'file_writer_config[\"save_last_frame\"] = {save_last_frame}',\n f'file_writer_config[\"save_as_gif\"] = {save_as_gif}',\n f'file_writer_config[\"output_file\"] = \"{output_file}\"',\n ]\n\n user_code = self.content\n if user_code[0].startswith(\">>> \"): # check whether block comes from doctest\n user_code = [\n line[4:] for line in user_code if line.startswith((\">>> \", \"... \"))\n ]\n\n code = [\n \"from manim import *\",\n *file_writer_config_code,\n *user_code,\n f\"{clsname}()\",\n ]\n exec(\"\\n\".join(code), globals())\n\n # copy video file to output directory\n if not (save_as_gif or save_last_frame):\n filename = f\"{output_file}.mp4\"\n filesrc = os.path.join(video_dir, qualitydir, filename)\n destfile = os.path.join(dest_dir, filename)\n shutil.copyfile(filesrc, destfile)\n elif save_as_gif:\n filename = f\"{output_file}.gif\"\n filesrc = os.path.join(video_dir, qualitydir, filename)\n elif save_last_frame:\n filename = f\"{output_file}.png\"\n filesrc = os.path.join(images_dir, filename)\n else:\n raise ValueError(\"Invalid combination of render flags received.\")\n\n rendered_template = jinja2.Template(TEMPLATE).render(\n display_source=display_source,\n filesrc=filesrc[6:],\n output_file=output_file,\n save_last_frame=save_last_frame,\n save_as_gif=save_as_gif,\n source_block=source_block,\n )\n state_machine.insert_input(\n rendered_template.split(\"\\n\"), source=document.attributes[\"source\"]\n )\n\n return []\n\n\ndef setup(app):\n import manim\n\n setup.app = app\n setup.config = app.config\n setup.confdir = app.confdir\n app.add_directive(\"manim\", ManimDirective)\n\n metadata = {\"parallel_read_safe\": False, \"parallel_write_safe\": True}\n return metadata\n\n\nTEMPLATE = r\"\"\"\n{% if display_source %}\n.. raw:: html\n\n <div class=\"manim-example\">\n\n{{ source_block }}\n{% endif %}\n\n{% if not (save_as_gif or save_last_frame) %}\n.. raw:: html\n\n <video class=\"manim-video\" controls loop autoplay src=\"./{{ output_file }}.mp4\"></video>\n{% elif save_as_gif %}\n.. image:: {{ filesrc }}\n :align: center\n{% elif save_last_frame %}\n.. image:: {{ filesrc }}\n :align: center\n{% endif %}\n\n{% if display_source %}\n.. raw:: html\n\n </div>\n{% endif %}\n\"\"\"\n", "path": "docs/source/manim_directive.py"}], "after_files": [{"content": "r\"\"\"\nA directive for including Manim videos in a Sphinx document\n===========================================================\n\nWhen rendering the HTML documentation, the ``.. manim::`` directive\nimplemented here allows to include rendered videos.\n\nIts basic usage that allows processing **inline content** \nlooks as follows::\n\n .. manim:: MyScene\n\n class MyScene(Scene):\n def construct(self):\n ...\n\nIt is required to pass the name of the class representing the\nscene to be rendered to the directive.\n\nAs a second application, the directive can also be used to\nrender scenes that are defined within doctests, for example::\n\n .. manim:: DirectiveDoctestExample\n\n >>> dot = Dot(color=RED)\n >>> dot.color\n <Color #fc6255>\n >>> class DirectiveDoctestExample(Scene):\n ... def construct(self):\n ... self.play(ShowCreation(dot))\n\n\nOptions\n-------\n\nOptions can be passed as follows::\n\n .. manim:: <Class name>\n :<option name>: <value>\n\nThe following configuration options are supported by the\ndirective:\n\n display_source\n If this flag is present without argument,\n the source code is displayed above the rendered video.\n\n quality : {'low', 'medium', 'high', 'fourk'}\n Controls render quality of the video, in analogy to\n the corresponding command line flags.\n\n save_as_gif\n If this flag is present without argument,\n the scene is rendered as a gif.\n\n save_last_frame\n If this flag is present without argument,\n an image representing the last frame of the scene will\n be rendered and displayed, instead of a video.\n\n\"\"\"\nfrom docutils.parsers.rst import directives, Directive\nfrom docutils.parsers.rst.directives.images import Image\n\nimport jinja2\nimport os\nfrom os.path import relpath\n\nimport shutil\n\nclassnamedict = {}\n\n\nclass ManimDirective(Directive):\n r\"\"\"The ``.. manim::`` directive.\n\n See the module docstring for documentation.\n \"\"\"\n has_content = True\n required_arguments = 1\n optional_arguments = 0\n option_spec = {\n \"display_source\": bool,\n \"quality\": lambda arg: directives.choice(\n arg, (\"low\", \"medium\", \"high\", \"fourk\")\n ),\n \"save_as_gif\": bool,\n \"save_last_frame\": bool,\n }\n final_argument_whitespace = True\n\n def run(self):\n from manim import config\n\n global classnamedict\n\n clsname = self.arguments[0]\n if clsname not in classnamedict:\n classnamedict[clsname] = 1\n else:\n classnamedict[clsname] += 1\n\n display_source = \"display_source\" in self.options\n save_as_gif = \"save_as_gif\" in self.options\n save_last_frame = \"save_last_frame\" in self.options\n assert not (save_as_gif and save_last_frame)\n\n frame_rate = config[\"frame_rate\"]\n pixel_height = config[\"pixel_height\"]\n pixel_width = config[\"pixel_width\"]\n\n if \"quality\" in self.options:\n quality = self.options[\"quality\"]\n if quality == \"low\":\n pixel_height = 480\n pixel_width = 854\n frame_rate = 15\n elif quality == \"medium\":\n pixel_height = 720\n pixel_width = 1280\n frame_rate = 30\n elif quality == \"high\":\n pixel_height = 1440\n pixel_width = 2560\n frame_rate = 60\n elif quality == \"fourk\":\n pixel_height = 2160\n pixel_width = 3840\n frame_rate = 60\n\n qualitydir = f\"{pixel_height}p{frame_rate}\"\n\n state_machine = self.state_machine\n document = state_machine.document\n\n source_file_name = document.attributes[\"source\"]\n source_rel_name = relpath(source_file_name, setup.confdir)\n source_rel_dir = os.path.dirname(source_rel_name)\n while source_rel_dir.startswith(os.path.sep):\n source_rel_dir = source_rel_dir[1:]\n\n dest_dir = os.path.abspath(\n os.path.join(setup.app.builder.outdir, source_rel_dir)\n )\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n\n source_block = [\n \".. code-block:: python\",\n \"\",\n *[\" \" + line for line in self.content],\n ]\n source_block = \"\\n\".join(source_block)\n\n media_dir = os.path.join(setup.confdir, \"media\")\n if not os.path.exists(media_dir):\n os.mkdir(media_dir)\n images_dir = os.path.join(media_dir, \"images\")\n if not os.path.exists(images_dir):\n os.mkdir(images_dir)\n tex_dir = os.path.join(media_dir, \"tex\")\n if not os.path.exists(tex_dir):\n os.mkdir(tex_dir)\n text_dir = os.path.join(media_dir, \"text\")\n if not os.path.exists(text_dir):\n os.mkdir(text_dir)\n video_dir = os.path.join(media_dir, \"videos\")\n output_file = f\"{clsname}-{classnamedict[clsname]}\"\n\n file_writer_config_code = [\n f'config[\"frame_rate\"] = {frame_rate}',\n f'config[\"pixel_height\"] = {pixel_height}',\n f'config[\"pixel_width\"] = {pixel_width}',\n f'file_writer_config[\"media_dir\"] = \"{media_dir}\"',\n f'file_writer_config[\"images_dir\"] = \"{images_dir}\"',\n f'file_writer_config[\"tex_dir\"] = \"{tex_dir}\"',\n f'file_writer_config[\"text_dir\"] = \"{text_dir}\"',\n f'file_writer_config[\"video_dir\"] = \"{video_dir}\"',\n f'file_writer_config[\"save_last_frame\"] = {save_last_frame}',\n f'file_writer_config[\"save_as_gif\"] = {save_as_gif}',\n f'file_writer_config[\"output_file\"] = \"{output_file}\"',\n ]\n\n user_code = self.content\n if user_code[0].startswith(\">>> \"): # check whether block comes from doctest\n user_code = [\n line[4:] for line in user_code if line.startswith((\">>> \", \"... \"))\n ]\n\n code = [\n \"from manim import *\",\n *file_writer_config_code,\n *user_code,\n f\"{clsname}()\",\n ]\n exec(\"\\n\".join(code), globals())\n\n # copy video file to output directory\n if not (save_as_gif or save_last_frame):\n filename = f\"{output_file}.mp4\"\n filesrc = os.path.join(video_dir, qualitydir, filename)\n destfile = os.path.join(dest_dir, filename)\n shutil.copyfile(filesrc, destfile)\n elif save_as_gif:\n filename = f\"{output_file}.gif\"\n filesrc = os.path.join(video_dir, qualitydir, filename)\n elif save_last_frame:\n filename = f\"{output_file}.png\"\n filesrc = os.path.join(images_dir, filename)\n else:\n raise ValueError(\"Invalid combination of render flags received.\")\n\n rendered_template = jinja2.Template(TEMPLATE).render(\n display_source=display_source,\n filesrc_rel=os.path.relpath(filesrc, setup.confdir),\n output_file=output_file,\n save_last_frame=save_last_frame,\n save_as_gif=save_as_gif,\n source_block=source_block,\n )\n state_machine.insert_input(\n rendered_template.split(\"\\n\"), source=document.attributes[\"source\"]\n )\n\n return []\n\n\ndef setup(app):\n import manim\n\n setup.app = app\n setup.config = app.config\n setup.confdir = app.confdir\n app.add_directive(\"manim\", ManimDirective)\n\n metadata = {\"parallel_read_safe\": False, \"parallel_write_safe\": True}\n return metadata\n\n\nTEMPLATE = r\"\"\"\n{% if display_source %}\n.. raw:: html\n\n <div class=\"manim-example\">\n\n{{ source_block }}\n{% endif %}\n\n{% if not (save_as_gif or save_last_frame) %}\n.. raw:: html\n\n <video class=\"manim-video\" controls loop autoplay src=\"./{{ output_file }}.mp4\"></video>\n{% elif save_as_gif %}\n.. image:: /{{ filesrc_rel }}\n :align: center\n{% elif save_last_frame %}\n.. image:: /{{ filesrc_rel }}\n :align: center\n{% endif %}\n\n{% if display_source %}\n.. raw:: html\n\n </div>\n{% endif %}\n\"\"\"\n", "path": "docs/source/manim_directive.py"}]} | 3,076 | 575 |
gh_patches_debug_38239 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-795 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Getting 'W2030:You must specify a valid Default value' in 0.17.1
*cfn-lint version: (`cfn-lint --version`)*
`cfn-lint 0.17.1`
*Description of issue.*
```
[cfn-lint] W2030:You must specify a valid Default value for DemoInstanceType (/Demo/DemoInstanceType).
Valid values are ['a1.2xlarge', 'a1.4xlarge', 'a1.large', 'a1.medium', 'a1.xlarge', 'c1.medium', 'c1.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c3.large', 'c3.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'c4.large', 'c4.xlarge', 'c5.18xlarge', 'c5.2xlarge', 'c5.4xlarge', 'c5.9xlarge', 'c5.large', 'c5.xlarge', 'c5d.18xlarge', 'c5d.2xlarge', 'c5d.4xlarge', 'c5d.9xlarge', 'c5d.large', 'c5d.xlarge', 'c5n.18xlarge', 'c5n.2xlarge', 'c5n.4xlarge', 'c5n.9xlarge', 'c5n.large', 'c5n.xlarge', 'cc2.8xlarge', 'cr1.8xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'd2.xlarge', 'f1.16xlarge', 'f1.2xlarge', 'f1.4xlarge', 'g2.2xlarge', 'g2.8xlarge', 'g3.16xlarge', 'g3.4xlarge', 'g3.8xlarge', 'g3s.xlarge', 'h1.16xlarge', 'h1.2xlarge', 'h1.4xlarge', 'h1.8xlarge', 'hs1.8xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'i2.xlarge', 'i3.16xlarge', 'i3.2xlarge', 'i3.4xlarge', 'i3.8xlarge', 'i3.large', 'i3.xlarge', 'm1.large', 'm1.medium', 'm1.small', 'm1.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm2.xlarge', 'm3.2xlarge', 'm3.large', 'm3.medium', 'm3.xlarge', 'm4.10xlarge', 'm4.16xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.large', 'm4.xlarge', 'm5.12xlarge', 'm5.24xlarge', 'm5.2xlarge', 'm5.4xlarge', 'm5.large', 'm5.metal', 'm5.xlarge', 'm5a.12xlarge', 'm5a.24xlarge', 'm5a.2xlarge', 'm5a.4xlarge', 'm5a.large', 'm5a.xlarge', 'm5ad.12xlarge', 'm5ad.24xlarge', 'm5ad.2xlarge', 'm5ad.4xlarge', 'm5ad.large', 'm5ad.xlarge', 'm5d.12xlarge', 'm5d.24xlarge', 'm5d.2xlarge', 'm5d.4xlarge', 'm5d.large', 'm5d.metal', 'm5d.xlarge', 'p2.16xlarge', 'p2.8xlarge', 'p2.xlarge', 'p3.16xlarge', 'p3.2xlarge', 'p3.8xlarge', 'p3dn.24xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'r3.large', 'r3.xlarge', 'r4.16xlarge', 'r4.2xlarge', 'r4.4xlarge', 'r4.8xlarge', 'r4.large', 'r4.xlarge', 'r5.12xlarge', 'r5.24xlarge', 'r5.2xlarge', 'r5.4xlarge', 'r5.large', 'r5.xlarge', 'r5a.12xlarge', 'r5a.24xlarge', 'r5a.2xlarge', 'r5a.4xlarge', 'r5a.large', 'r5a.xlarge', 'r5ad.12xlarge', 'r5ad.24xlarge', 'r5ad.2xlarge', 'r5ad.4xlarge', 'r5ad.large', 'r5ad.xlarge', 'r5d.12xlarge', 'r5d.24xlarge', 'r5d.2xlarge', 'r5d.4xlarge', 'r5d.large', 'r5d.xlarge', 't1.micro', 't2.2xlarge', 't2.large', 't2.medium', 't2.micro', 't2.nano', 't2.small', 't2.xlarge', 't3.2xlarge', 't3.large', 't3.medium', 't3.micro', 't3.nano', 't3.small', 't3.xlarge', 'x1.16xlarge', 'x1.32xlarge', 'x1e.16xlarge', 'x1e.2xlarge', 'x1e.32xlarge', 'x1e.4xlarge', 'x1e.8xlarge', 'x1e.xlarge', 'z1d.12xlarge', 'z1d.2xlarge', 'z1d.3xlarge', 'z1d.6xlarge', 'z1d.large', 'z1d.xlarge']
```
The CloudFormation parameter is :
```
DemoInstanceType:
Default: /Demo/DemoInstanceType # Recommend t3.nano
Description: EC2 instance type to use to create the collector host
Type: AWS::SSM::Parameter::Value<String>
```
The value of the SSM parameter is `t3.nano`
I have an older project using the same pattern and the virtual environment still has cfn-lint version 0.12.0. It's not raising this complaint. I verified by updating to latest (0.17.1) and the problem cropped up. When I downgraded back to 0.12.0, the problem went away.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/parameters/AllowedValue.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 from cfnlint import CloudFormationLintRule
18 from cfnlint import RuleMatch
19
20 from cfnlint.helpers import RESOURCE_SPECS
21
22
23 class AllowedValue(CloudFormationLintRule):
24 """Check if parameters have a valid value"""
25 id = 'W2030'
26 shortdesc = 'Check if parameters have a valid value'
27 description = 'Check if parameters have a valid value in case of an enumator. The Parameter''s allowed values is based on the usages in property (Ref)'
28 source_url = 'https://github.com/aws-cloudformation/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#allowedvalue'
29 tags = ['resources', 'property', 'allowed value']
30
31 def initialize(self, cfn):
32 """Initialize the rule"""
33 for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):
34 self.resource_property_types.append(resource_type_spec)
35 for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):
36 self.resource_sub_property_types.append(property_type_spec)
37
38 def check_value_ref(self, value, **kwargs):
39 """Check Ref"""
40 matches = []
41
42 allowed_value_specs = kwargs.get('value_specs', {}).get('AllowedValues', {})
43 cfn = kwargs.get('cfn')
44
45 if allowed_value_specs:
46 if value in cfn.template.get('Parameters', {}):
47 param = cfn.template.get('Parameters').get(value, {})
48 parameter_values = param.get('AllowedValues')
49 default_value = param.get('Default')
50
51 # Check Allowed Values
52 if parameter_values:
53 for index, allowed_value in enumerate(parameter_values):
54 if allowed_value not in allowed_value_specs:
55 param_path = ['Parameters', value, 'AllowedValues', index]
56 message = 'You must specify a valid allowed value for {0} ({1}).\nValid values are {2}'
57 matches.append(RuleMatch(param_path, message.format(value, allowed_value, allowed_value_specs)))
58 elif default_value:
59 # Check Default, only if no allowed Values are specified in the parameter (that's covered by E2015)
60 if default_value not in allowed_value_specs:
61 param_path = ['Parameters', value, 'Default']
62 message = 'You must specify a valid Default value for {0} ({1}).\nValid values are {2}'
63 matches.append(RuleMatch(param_path, message.format(value, default_value, allowed_value_specs)))
64
65 return matches
66
67 def check(self, cfn, properties, value_specs, property_specs, path):
68 """Check itself"""
69 matches = list()
70 for p_value, p_path in properties.items_safe(path[:]):
71 for prop in p_value:
72 if prop in value_specs:
73 value = value_specs.get(prop).get('Value', {})
74 if value:
75 value_type = value.get('ValueType', '')
76 property_type = property_specs.get('Properties').get(prop).get('Type')
77 matches.extend(
78 cfn.check_value(
79 p_value, prop, p_path,
80 check_ref=self.check_value_ref,
81 value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get('ValueTypes').get(value_type, {}),
82 cfn=cfn, property_type=property_type, property_name=prop
83 )
84 )
85
86 return matches
87
88 def match_resource_sub_properties(self, properties, property_type, path, cfn):
89 """Match for sub properties"""
90 matches = list()
91
92 specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type, {}).get('Properties', {})
93 property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type)
94 matches.extend(self.check(cfn, properties, specs, property_specs, path))
95
96 return matches
97
98 def match_resource_properties(self, properties, resource_type, path, cfn):
99 """Check CloudFormation Properties"""
100 matches = list()
101
102 specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type, {}).get('Properties', {})
103 resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type)
104 matches.extend(self.check(cfn, properties, specs, resource_specs, path))
105
106 return matches
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/parameters/AllowedValue.py b/src/cfnlint/rules/parameters/AllowedValue.py
--- a/src/cfnlint/rules/parameters/AllowedValue.py
+++ b/src/cfnlint/rules/parameters/AllowedValue.py
@@ -14,6 +14,7 @@
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
+import six
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
@@ -47,20 +48,24 @@
param = cfn.template.get('Parameters').get(value, {})
parameter_values = param.get('AllowedValues')
default_value = param.get('Default')
-
- # Check Allowed Values
- if parameter_values:
- for index, allowed_value in enumerate(parameter_values):
- if allowed_value not in allowed_value_specs:
- param_path = ['Parameters', value, 'AllowedValues', index]
- message = 'You must specify a valid allowed value for {0} ({1}).\nValid values are {2}'
- matches.append(RuleMatch(param_path, message.format(value, allowed_value, allowed_value_specs)))
- elif default_value:
- # Check Default, only if no allowed Values are specified in the parameter (that's covered by E2015)
- if default_value not in allowed_value_specs:
- param_path = ['Parameters', value, 'Default']
- message = 'You must specify a valid Default value for {0} ({1}).\nValid values are {2}'
- matches.append(RuleMatch(param_path, message.format(value, default_value, allowed_value_specs)))
+ parameter_type = param.get('Type')
+ if isinstance(parameter_type, six.string_types):
+ if ((not parameter_type.startswith('List<')) and
+ (not parameter_type.startswith('AWS::SSM::Parameter::Value<')) and
+ parameter_type not in ['CommaDelimitedList']):
+ # Check Allowed Values
+ if parameter_values:
+ for index, allowed_value in enumerate(parameter_values):
+ if allowed_value not in allowed_value_specs:
+ param_path = ['Parameters', value, 'AllowedValues', index]
+ message = 'You must specify a valid allowed value for {0} ({1}).\nValid values are {2}'
+ matches.append(RuleMatch(param_path, message.format(value, allowed_value, allowed_value_specs)))
+ elif default_value:
+ # Check Default, only if no allowed Values are specified in the parameter (that's covered by E2015)
+ if default_value not in allowed_value_specs:
+ param_path = ['Parameters', value, 'Default']
+ message = 'You must specify a valid Default value for {0} ({1}).\nValid values are {2}'
+ matches.append(RuleMatch(param_path, message.format(value, default_value, allowed_value_specs)))
return matches
| {"golden_diff": "diff --git a/src/cfnlint/rules/parameters/AllowedValue.py b/src/cfnlint/rules/parameters/AllowedValue.py\n--- a/src/cfnlint/rules/parameters/AllowedValue.py\n+++ b/src/cfnlint/rules/parameters/AllowedValue.py\n@@ -14,6 +14,7 @@\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n \"\"\"\n+import six\n from cfnlint import CloudFormationLintRule\n from cfnlint import RuleMatch\n \n@@ -47,20 +48,24 @@\n param = cfn.template.get('Parameters').get(value, {})\n parameter_values = param.get('AllowedValues')\n default_value = param.get('Default')\n-\n- # Check Allowed Values\n- if parameter_values:\n- for index, allowed_value in enumerate(parameter_values):\n- if allowed_value not in allowed_value_specs:\n- param_path = ['Parameters', value, 'AllowedValues', index]\n- message = 'You must specify a valid allowed value for {0} ({1}).\\nValid values are {2}'\n- matches.append(RuleMatch(param_path, message.format(value, allowed_value, allowed_value_specs)))\n- elif default_value:\n- # Check Default, only if no allowed Values are specified in the parameter (that's covered by E2015)\n- if default_value not in allowed_value_specs:\n- param_path = ['Parameters', value, 'Default']\n- message = 'You must specify a valid Default value for {0} ({1}).\\nValid values are {2}'\n- matches.append(RuleMatch(param_path, message.format(value, default_value, allowed_value_specs)))\n+ parameter_type = param.get('Type')\n+ if isinstance(parameter_type, six.string_types):\n+ if ((not parameter_type.startswith('List<')) and\n+ (not parameter_type.startswith('AWS::SSM::Parameter::Value<')) and\n+ parameter_type not in ['CommaDelimitedList']):\n+ # Check Allowed Values\n+ if parameter_values:\n+ for index, allowed_value in enumerate(parameter_values):\n+ if allowed_value not in allowed_value_specs:\n+ param_path = ['Parameters', value, 'AllowedValues', index]\n+ message = 'You must specify a valid allowed value for {0} ({1}).\\nValid values are {2}'\n+ matches.append(RuleMatch(param_path, message.format(value, allowed_value, allowed_value_specs)))\n+ elif default_value:\n+ # Check Default, only if no allowed Values are specified in the parameter (that's covered by E2015)\n+ if default_value not in allowed_value_specs:\n+ param_path = ['Parameters', value, 'Default']\n+ message = 'You must specify a valid Default value for {0} ({1}).\\nValid values are {2}'\n+ matches.append(RuleMatch(param_path, message.format(value, default_value, allowed_value_specs)))\n \n return matches\n", "issue": "Getting 'W2030:You must specify a valid Default value' in 0.17.1\n*cfn-lint version: (`cfn-lint --version`)*\r\n`cfn-lint 0.17.1`\r\n\r\n*Description of issue.*\r\n```\r\n[cfn-lint] W2030:You must specify a valid Default value for DemoInstanceType (/Demo/DemoInstanceType).\r\nValid values are ['a1.2xlarge', 'a1.4xlarge', 'a1.large', 'a1.medium', 'a1.xlarge', 'c1.medium', 'c1.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c3.large', 'c3.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'c4.large', 'c4.xlarge', 'c5.18xlarge', 'c5.2xlarge', 'c5.4xlarge', 'c5.9xlarge', 'c5.large', 'c5.xlarge', 'c5d.18xlarge', 'c5d.2xlarge', 'c5d.4xlarge', 'c5d.9xlarge', 'c5d.large', 'c5d.xlarge', 'c5n.18xlarge', 'c5n.2xlarge', 'c5n.4xlarge', 'c5n.9xlarge', 'c5n.large', 'c5n.xlarge', 'cc2.8xlarge', 'cr1.8xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'd2.xlarge', 'f1.16xlarge', 'f1.2xlarge', 'f1.4xlarge', 'g2.2xlarge', 'g2.8xlarge', 'g3.16xlarge', 'g3.4xlarge', 'g3.8xlarge', 'g3s.xlarge', 'h1.16xlarge', 'h1.2xlarge', 'h1.4xlarge', 'h1.8xlarge', 'hs1.8xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'i2.xlarge', 'i3.16xlarge', 'i3.2xlarge', 'i3.4xlarge', 'i3.8xlarge', 'i3.large', 'i3.xlarge', 'm1.large', 'm1.medium', 'm1.small', 'm1.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm2.xlarge', 'm3.2xlarge', 'm3.large', 'm3.medium', 'm3.xlarge', 'm4.10xlarge', 'm4.16xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.large', 'm4.xlarge', 'm5.12xlarge', 'm5.24xlarge', 'm5.2xlarge', 'm5.4xlarge', 'm5.large', 'm5.metal', 'm5.xlarge', 'm5a.12xlarge', 'm5a.24xlarge', 'm5a.2xlarge', 'm5a.4xlarge', 'm5a.large', 'm5a.xlarge', 'm5ad.12xlarge', 'm5ad.24xlarge', 'm5ad.2xlarge', 'm5ad.4xlarge', 'm5ad.large', 'm5ad.xlarge', 'm5d.12xlarge', 'm5d.24xlarge', 'm5d.2xlarge', 'm5d.4xlarge', 'm5d.large', 'm5d.metal', 'm5d.xlarge', 'p2.16xlarge', 'p2.8xlarge', 'p2.xlarge', 'p3.16xlarge', 'p3.2xlarge', 'p3.8xlarge', 'p3dn.24xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'r3.large', 'r3.xlarge', 'r4.16xlarge', 'r4.2xlarge', 'r4.4xlarge', 'r4.8xlarge', 'r4.large', 'r4.xlarge', 'r5.12xlarge', 'r5.24xlarge', 'r5.2xlarge', 'r5.4xlarge', 'r5.large', 'r5.xlarge', 'r5a.12xlarge', 'r5a.24xlarge', 'r5a.2xlarge', 'r5a.4xlarge', 'r5a.large', 'r5a.xlarge', 'r5ad.12xlarge', 'r5ad.24xlarge', 'r5ad.2xlarge', 'r5ad.4xlarge', 'r5ad.large', 'r5ad.xlarge', 'r5d.12xlarge', 'r5d.24xlarge', 'r5d.2xlarge', 'r5d.4xlarge', 'r5d.large', 'r5d.xlarge', 't1.micro', 't2.2xlarge', 't2.large', 't2.medium', 't2.micro', 't2.nano', 't2.small', 't2.xlarge', 't3.2xlarge', 't3.large', 't3.medium', 't3.micro', 't3.nano', 't3.small', 't3.xlarge', 'x1.16xlarge', 'x1.32xlarge', 'x1e.16xlarge', 'x1e.2xlarge', 'x1e.32xlarge', 'x1e.4xlarge', 'x1e.8xlarge', 'x1e.xlarge', 'z1d.12xlarge', 'z1d.2xlarge', 'z1d.3xlarge', 'z1d.6xlarge', 'z1d.large', 'z1d.xlarge']\r\n```\r\n\r\nThe CloudFormation parameter is :\r\n```\r\n DemoInstanceType:\r\n Default: /Demo/DemoInstanceType # Recommend t3.nano\r\n Description: EC2 instance type to use to create the collector host\r\n Type: AWS::SSM::Parameter::Value<String>\r\n```\r\n\r\nThe value of the SSM parameter is `t3.nano`\r\n\r\nI have an older project using the same pattern and the virtual environment still has cfn-lint version 0.12.0. It's not raising this complaint. I verified by updating to latest (0.17.1) and the problem cropped up. When I downgraded back to 0.12.0, the problem went away.\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\nfrom cfnlint.helpers import RESOURCE_SPECS\n\n\nclass AllowedValue(CloudFormationLintRule):\n \"\"\"Check if parameters have a valid value\"\"\"\n id = 'W2030'\n shortdesc = 'Check if parameters have a valid value'\n description = 'Check if parameters have a valid value in case of an enumator. The Parameter''s allowed values is based on the usages in property (Ref)'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#allowedvalue'\n tags = ['resources', 'property', 'allowed value']\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):\n self.resource_sub_property_types.append(property_type_spec)\n\n def check_value_ref(self, value, **kwargs):\n \"\"\"Check Ref\"\"\"\n matches = []\n\n allowed_value_specs = kwargs.get('value_specs', {}).get('AllowedValues', {})\n cfn = kwargs.get('cfn')\n\n if allowed_value_specs:\n if value in cfn.template.get('Parameters', {}):\n param = cfn.template.get('Parameters').get(value, {})\n parameter_values = param.get('AllowedValues')\n default_value = param.get('Default')\n\n # Check Allowed Values\n if parameter_values:\n for index, allowed_value in enumerate(parameter_values):\n if allowed_value not in allowed_value_specs:\n param_path = ['Parameters', value, 'AllowedValues', index]\n message = 'You must specify a valid allowed value for {0} ({1}).\\nValid values are {2}'\n matches.append(RuleMatch(param_path, message.format(value, allowed_value, allowed_value_specs)))\n elif default_value:\n # Check Default, only if no allowed Values are specified in the parameter (that's covered by E2015)\n if default_value not in allowed_value_specs:\n param_path = ['Parameters', value, 'Default']\n message = 'You must specify a valid Default value for {0} ({1}).\\nValid values are {2}'\n matches.append(RuleMatch(param_path, message.format(value, default_value, allowed_value_specs)))\n\n return matches\n\n def check(self, cfn, properties, value_specs, property_specs, path):\n \"\"\"Check itself\"\"\"\n matches = list()\n for p_value, p_path in properties.items_safe(path[:]):\n for prop in p_value:\n if prop in value_specs:\n value = value_specs.get(prop).get('Value', {})\n if value:\n value_type = value.get('ValueType', '')\n property_type = property_specs.get('Properties').get(prop).get('Type')\n matches.extend(\n cfn.check_value(\n p_value, prop, p_path,\n check_ref=self.check_value_ref,\n value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get('ValueTypes').get(value_type, {}),\n cfn=cfn, property_type=property_type, property_name=prop\n )\n )\n\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type, {}).get('Properties', {})\n property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type)\n matches.extend(self.check(cfn, properties, specs, property_specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type, {}).get('Properties', {})\n resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type)\n matches.extend(self.check(cfn, properties, specs, resource_specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/parameters/AllowedValue.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\nfrom cfnlint.helpers import RESOURCE_SPECS\n\n\nclass AllowedValue(CloudFormationLintRule):\n \"\"\"Check if parameters have a valid value\"\"\"\n id = 'W2030'\n shortdesc = 'Check if parameters have a valid value'\n description = 'Check if parameters have a valid value in case of an enumator. The Parameter''s allowed values is based on the usages in property (Ref)'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#allowedvalue'\n tags = ['resources', 'property', 'allowed value']\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):\n self.resource_sub_property_types.append(property_type_spec)\n\n def check_value_ref(self, value, **kwargs):\n \"\"\"Check Ref\"\"\"\n matches = []\n\n allowed_value_specs = kwargs.get('value_specs', {}).get('AllowedValues', {})\n cfn = kwargs.get('cfn')\n\n if allowed_value_specs:\n if value in cfn.template.get('Parameters', {}):\n param = cfn.template.get('Parameters').get(value, {})\n parameter_values = param.get('AllowedValues')\n default_value = param.get('Default')\n parameter_type = param.get('Type')\n if isinstance(parameter_type, six.string_types):\n if ((not parameter_type.startswith('List<')) and\n (not parameter_type.startswith('AWS::SSM::Parameter::Value<')) and\n parameter_type not in ['CommaDelimitedList']):\n # Check Allowed Values\n if parameter_values:\n for index, allowed_value in enumerate(parameter_values):\n if allowed_value not in allowed_value_specs:\n param_path = ['Parameters', value, 'AllowedValues', index]\n message = 'You must specify a valid allowed value for {0} ({1}).\\nValid values are {2}'\n matches.append(RuleMatch(param_path, message.format(value, allowed_value, allowed_value_specs)))\n elif default_value:\n # Check Default, only if no allowed Values are specified in the parameter (that's covered by E2015)\n if default_value not in allowed_value_specs:\n param_path = ['Parameters', value, 'Default']\n message = 'You must specify a valid Default value for {0} ({1}).\\nValid values are {2}'\n matches.append(RuleMatch(param_path, message.format(value, default_value, allowed_value_specs)))\n\n return matches\n\n def check(self, cfn, properties, value_specs, property_specs, path):\n \"\"\"Check itself\"\"\"\n matches = list()\n for p_value, p_path in properties.items_safe(path[:]):\n for prop in p_value:\n if prop in value_specs:\n value = value_specs.get(prop).get('Value', {})\n if value:\n value_type = value.get('ValueType', '')\n property_type = property_specs.get('Properties').get(prop).get('Type')\n matches.extend(\n cfn.check_value(\n p_value, prop, p_path,\n check_ref=self.check_value_ref,\n value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get('ValueTypes').get(value_type, {}),\n cfn=cfn, property_type=property_type, property_name=prop\n )\n )\n\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type, {}).get('Properties', {})\n property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type)\n matches.extend(self.check(cfn, properties, specs, property_specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type, {}).get('Properties', {})\n resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type)\n matches.extend(self.check(cfn, properties, specs, resource_specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/parameters/AllowedValue.py"}]} | 3,207 | 653 |
gh_patches_debug_9753 | rasdani/github-patches | git_diff | bentoml__BentoML-1625 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deployment on remote Yatai server fails due to injection issue
**Describe the bug**
Attempting to deploy to SageMaker or Lambda fails with this error:
```
Error: sagemaker deploy failed: INTERNAL:<dependency_injector.wiring.Provide object at 0x11f748be0> has type Provide, but expected one of: bytes, unicode
```
**To Reproduce**
**This is based on the latest version of the code as of this writing**
- Start remote Yatai server
- Configure BentoML to use the remote Yatai server (e.g. by modifying `default_bentoml.yml`
- Start a deployment to SageMaker or Lambda (without specifying a namespace through the `--namespace` option
- The error message above is shown
**Expected behavior**
Deployment should proceed normally, and the error message should not be displayed.
**Environment:**
- BentoML version 0.12.1+24.g4019bac.dirty
**Additional context**
After some initial debugging, the error appears to originate from this line: https://github.com/bentoml/BentoML/blob/4019bac4af320bad73bf960f6bd2d617f3fd4a52/bentoml/yatai/yatai_service_impl.py#L106
`self.default_namespace` is not wired / injected properly, and will instead be a `Provide` object. This causes issues downstream as a string is expected. A workaround is to specify the environment when deploying via the CLI (`--namespace`).
My hunch is that `YataiServiceImpl` does not get properly wired/injected due to it being wrapped in the `get_yatai_service_impl` method here:https://github.com/bentoml/BentoML/blob/4019bac4af320bad73bf960f6bd2d617f3fd4a52/bentoml/yatai/yatai_service_impl.py#L74
I have little experience with dependency injection so couldn't figure out _why_ it wouldn't get wired properly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bentoml/configuration/__init__.py`
Content:
```
1 # Copyright 2019 Atalaya Tech, Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import logging
17 from functools import lru_cache
18
19 from bentoml import __version__, _version as version_mod
20
21
22 # Note this file is loaded prior to logging being configured, thus logger is only
23 # used within functions in this file
24 logger = logging.getLogger(__name__)
25
26
27 DEBUG_ENV_VAR = "BENTOML_DEBUG"
28
29
30 def expand_env_var(env_var):
31 """Expands potentially nested env var by repeatedly applying `expandvars` and
32 `expanduser` until interpolation stops having any effect.
33 """
34 if not env_var:
35 return env_var
36 while True:
37 interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))
38 if interpolated == env_var:
39 return interpolated
40 else:
41 env_var = interpolated
42
43
44 # This is used as default for config('core', 'bentoml_deploy_version') - which is used
45 # for getting the BentoML PyPI version string or the URL to a BentoML sdist, indicating
46 # the BentoML module to be used when loading and using a saved BentoService bundle.
47 # This is useful when using customized BentoML fork/branch or when working with
48 # development branches of BentoML
49 BENTOML_VERSION = __version__
50 # e.g. from '0.4.2+5.g6cac97f.dirty' to '0.4.2'
51 LAST_PYPI_RELEASE_VERSION = __version__.split('+')[0]
52
53
54 def _is_pip_installed_bentoml():
55 is_installed_package = hasattr(version_mod, 'version_json')
56 is_tagged = not __version__.startswith('0+untagged')
57 is_clean = not version_mod.get_versions()['dirty']
58 return is_installed_package and is_tagged and is_clean
59
60
61 def get_local_config_file():
62 if "BENTOML_CONFIG" in os.environ:
63 # User local config file for customizing bentoml
64 return expand_env_var(os.environ.get("BENTOML_CONFIG"))
65 return None
66
67
68 @lru_cache(maxsize=1)
69 def get_bentoml_deploy_version(bentoml_deploy_version: str):
70 """
71 BentoML version to use for generated docker image or serverless function bundle to
72 be deployed, this can be changed to an url to your fork of BentoML on github, or an
73 url to your custom BentoML build, for example:
74
75 bentoml_deploy_version = git+https://github.com/{username}/bentoml.git@{branch}
76 """
77
78 if bentoml_deploy_version != LAST_PYPI_RELEASE_VERSION:
79 logger.info(f"Setting BentoML deploy version to '{bentoml_deploy_version}'")
80
81 if LAST_PYPI_RELEASE_VERSION != BENTOML_VERSION:
82 if _is_pip_installed_bentoml():
83 logger.warning(
84 "Using BentoML not from official PyPI release. In order to find the "
85 "same version of BentoML when deploying your BentoService, you must "
86 "set the 'core/bentoml_deploy_version' config to a http/git location "
87 "of your BentoML fork, e.g.: 'bentoml_deploy_version = "
88 "git+https://github.com/{username}/bentoml.git@{branch}'"
89 )
90 else:
91 logger.warning(
92 "Using BentoML installed in `editable` model, the local BentoML "
93 "repository including all code changes will be packaged together with "
94 "saved bundle created, under the './bundled_pip_dependencies' "
95 "directory of the saved bundle."
96 )
97 return bentoml_deploy_version
98
99
100 def set_debug_mode(enabled: bool):
101 os.environ[DEBUG_ENV_VAR] = str(enabled)
102
103 # reconfigure logging
104 from bentoml.utils.log import configure_logging
105
106 configure_logging()
107
108 logger.debug(
109 f"Setting debug mode: {'ON' if enabled else 'OFF'} for current session"
110 )
111
112
113 def get_debug_mode():
114 if DEBUG_ENV_VAR in os.environ:
115 return os.environ[DEBUG_ENV_VAR].lower() == "true"
116 return False
117
118
119 def inject_dependencies():
120 """Inject dependencies and configuration to BentoML packages"""
121
122 from timeit import default_timer as timer
123
124 start = timer()
125
126 logger.debug("Start dependency injection")
127
128 from bentoml.configuration.containers import BentoMLContainer, BentoMLConfiguration
129
130 config_file = get_local_config_file()
131 if config_file and config_file.endswith(".yml"):
132 configuration = BentoMLConfiguration(override_config_file=config_file)
133 else:
134 configuration = BentoMLConfiguration()
135
136 container = BentoMLContainer()
137 container.config.from_dict(configuration.as_dict())
138
139 from bentoml import (
140 marshal,
141 server,
142 tracing,
143 cli,
144 adapters,
145 saved_bundle,
146 service,
147 )
148 from bentoml.yatai import yatai_service
149 from bentoml.yatai.repository import s3_repository, gcs_repository
150
151 container.wire(
152 modules=[yatai_service, s3_repository, gcs_repository],
153 packages=[marshal, server, tracing, cli, adapters, saved_bundle, service],
154 )
155
156 end = timer()
157
158 logger.debug("Dependency injection completed in %.3f seconds", end - start)
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bentoml/configuration/__init__.py b/bentoml/configuration/__init__.py
--- a/bentoml/configuration/__init__.py
+++ b/bentoml/configuration/__init__.py
@@ -146,10 +146,11 @@
service,
)
from bentoml.yatai import yatai_service
+ from bentoml.yatai import yatai_service_impl
from bentoml.yatai.repository import s3_repository, gcs_repository
container.wire(
- modules=[yatai_service, s3_repository, gcs_repository],
+ modules=[yatai_service, s3_repository, gcs_repository, yatai_service_impl],
packages=[marshal, server, tracing, cli, adapters, saved_bundle, service],
)
| {"golden_diff": "diff --git a/bentoml/configuration/__init__.py b/bentoml/configuration/__init__.py\n--- a/bentoml/configuration/__init__.py\n+++ b/bentoml/configuration/__init__.py\n@@ -146,10 +146,11 @@\n service,\n )\n from bentoml.yatai import yatai_service\n+ from bentoml.yatai import yatai_service_impl\n from bentoml.yatai.repository import s3_repository, gcs_repository\n \n container.wire(\n- modules=[yatai_service, s3_repository, gcs_repository],\n+ modules=[yatai_service, s3_repository, gcs_repository, yatai_service_impl],\n packages=[marshal, server, tracing, cli, adapters, saved_bundle, service],\n )\n", "issue": "Deployment on remote Yatai server fails due to injection issue\n**Describe the bug**\r\n\r\nAttempting to deploy to SageMaker or Lambda fails with this error:\r\n\r\n```\r\nError: sagemaker deploy failed: INTERNAL:<dependency_injector.wiring.Provide object at 0x11f748be0> has type Provide, but expected one of: bytes, unicode\r\n```\r\n\r\n**To Reproduce**\r\n\r\n**This is based on the latest version of the code as of this writing**\r\n\r\n- Start remote Yatai server\r\n- Configure BentoML to use the remote Yatai server (e.g. by modifying `default_bentoml.yml`\r\n- Start a deployment to SageMaker or Lambda (without specifying a namespace through the `--namespace` option\r\n- The error message above is shown\r\n\r\n**Expected behavior**\r\nDeployment should proceed normally, and the error message should not be displayed.\r\n\r\n**Environment:**\r\n- BentoML version 0.12.1+24.g4019bac.dirty\r\n\r\n\r\n**Additional context**\r\n\r\nAfter some initial debugging, the error appears to originate from this line: https://github.com/bentoml/BentoML/blob/4019bac4af320bad73bf960f6bd2d617f3fd4a52/bentoml/yatai/yatai_service_impl.py#L106\r\n\r\n`self.default_namespace` is not wired / injected properly, and will instead be a `Provide` object. This causes issues downstream as a string is expected. A workaround is to specify the environment when deploying via the CLI (`--namespace`).\r\n\r\nMy hunch is that `YataiServiceImpl` does not get properly wired/injected due to it being wrapped in the `get_yatai_service_impl` method here:https://github.com/bentoml/BentoML/blob/4019bac4af320bad73bf960f6bd2d617f3fd4a52/bentoml/yatai/yatai_service_impl.py#L74 \r\n\r\nI have little experience with dependency injection so couldn't figure out _why_ it wouldn't get wired properly.\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2019 Atalaya Tech, Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport logging\nfrom functools import lru_cache\n\nfrom bentoml import __version__, _version as version_mod\n\n\n# Note this file is loaded prior to logging being configured, thus logger is only\n# used within functions in this file\nlogger = logging.getLogger(__name__)\n\n\nDEBUG_ENV_VAR = \"BENTOML_DEBUG\"\n\n\ndef expand_env_var(env_var):\n \"\"\"Expands potentially nested env var by repeatedly applying `expandvars` and\n `expanduser` until interpolation stops having any effect.\n \"\"\"\n if not env_var:\n return env_var\n while True:\n interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))\n if interpolated == env_var:\n return interpolated\n else:\n env_var = interpolated\n\n\n# This is used as default for config('core', 'bentoml_deploy_version') - which is used\n# for getting the BentoML PyPI version string or the URL to a BentoML sdist, indicating\n# the BentoML module to be used when loading and using a saved BentoService bundle.\n# This is useful when using customized BentoML fork/branch or when working with\n# development branches of BentoML\nBENTOML_VERSION = __version__\n# e.g. from '0.4.2+5.g6cac97f.dirty' to '0.4.2'\nLAST_PYPI_RELEASE_VERSION = __version__.split('+')[0]\n\n\ndef _is_pip_installed_bentoml():\n is_installed_package = hasattr(version_mod, 'version_json')\n is_tagged = not __version__.startswith('0+untagged')\n is_clean = not version_mod.get_versions()['dirty']\n return is_installed_package and is_tagged and is_clean\n\n\ndef get_local_config_file():\n if \"BENTOML_CONFIG\" in os.environ:\n # User local config file for customizing bentoml\n return expand_env_var(os.environ.get(\"BENTOML_CONFIG\"))\n return None\n\n\n@lru_cache(maxsize=1)\ndef get_bentoml_deploy_version(bentoml_deploy_version: str):\n \"\"\"\n BentoML version to use for generated docker image or serverless function bundle to\n be deployed, this can be changed to an url to your fork of BentoML on github, or an\n url to your custom BentoML build, for example:\n\n bentoml_deploy_version = git+https://github.com/{username}/bentoml.git@{branch}\n \"\"\"\n\n if bentoml_deploy_version != LAST_PYPI_RELEASE_VERSION:\n logger.info(f\"Setting BentoML deploy version to '{bentoml_deploy_version}'\")\n\n if LAST_PYPI_RELEASE_VERSION != BENTOML_VERSION:\n if _is_pip_installed_bentoml():\n logger.warning(\n \"Using BentoML not from official PyPI release. In order to find the \"\n \"same version of BentoML when deploying your BentoService, you must \"\n \"set the 'core/bentoml_deploy_version' config to a http/git location \"\n \"of your BentoML fork, e.g.: 'bentoml_deploy_version = \"\n \"git+https://github.com/{username}/bentoml.git@{branch}'\"\n )\n else:\n logger.warning(\n \"Using BentoML installed in `editable` model, the local BentoML \"\n \"repository including all code changes will be packaged together with \"\n \"saved bundle created, under the './bundled_pip_dependencies' \"\n \"directory of the saved bundle.\"\n )\n return bentoml_deploy_version\n\n\ndef set_debug_mode(enabled: bool):\n os.environ[DEBUG_ENV_VAR] = str(enabled)\n\n # reconfigure logging\n from bentoml.utils.log import configure_logging\n\n configure_logging()\n\n logger.debug(\n f\"Setting debug mode: {'ON' if enabled else 'OFF'} for current session\"\n )\n\n\ndef get_debug_mode():\n if DEBUG_ENV_VAR in os.environ:\n return os.environ[DEBUG_ENV_VAR].lower() == \"true\"\n return False\n\n\ndef inject_dependencies():\n \"\"\"Inject dependencies and configuration to BentoML packages\"\"\"\n\n from timeit import default_timer as timer\n\n start = timer()\n\n logger.debug(\"Start dependency injection\")\n\n from bentoml.configuration.containers import BentoMLContainer, BentoMLConfiguration\n\n config_file = get_local_config_file()\n if config_file and config_file.endswith(\".yml\"):\n configuration = BentoMLConfiguration(override_config_file=config_file)\n else:\n configuration = BentoMLConfiguration()\n\n container = BentoMLContainer()\n container.config.from_dict(configuration.as_dict())\n\n from bentoml import (\n marshal,\n server,\n tracing,\n cli,\n adapters,\n saved_bundle,\n service,\n )\n from bentoml.yatai import yatai_service\n from bentoml.yatai.repository import s3_repository, gcs_repository\n\n container.wire(\n modules=[yatai_service, s3_repository, gcs_repository],\n packages=[marshal, server, tracing, cli, adapters, saved_bundle, service],\n )\n\n end = timer()\n\n logger.debug(\"Dependency injection completed in %.3f seconds\", end - start)\n", "path": "bentoml/configuration/__init__.py"}], "after_files": [{"content": "# Copyright 2019 Atalaya Tech, Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport logging\nfrom functools import lru_cache\n\nfrom bentoml import __version__, _version as version_mod\n\n\n# Note this file is loaded prior to logging being configured, thus logger is only\n# used within functions in this file\nlogger = logging.getLogger(__name__)\n\n\nDEBUG_ENV_VAR = \"BENTOML_DEBUG\"\n\n\ndef expand_env_var(env_var):\n \"\"\"Expands potentially nested env var by repeatedly applying `expandvars` and\n `expanduser` until interpolation stops having any effect.\n \"\"\"\n if not env_var:\n return env_var\n while True:\n interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))\n if interpolated == env_var:\n return interpolated\n else:\n env_var = interpolated\n\n\n# This is used as default for config('core', 'bentoml_deploy_version') - which is used\n# for getting the BentoML PyPI version string or the URL to a BentoML sdist, indicating\n# the BentoML module to be used when loading and using a saved BentoService bundle.\n# This is useful when using customized BentoML fork/branch or when working with\n# development branches of BentoML\nBENTOML_VERSION = __version__\n# e.g. from '0.4.2+5.g6cac97f.dirty' to '0.4.2'\nLAST_PYPI_RELEASE_VERSION = __version__.split('+')[0]\n\n\ndef _is_pip_installed_bentoml():\n is_installed_package = hasattr(version_mod, 'version_json')\n is_tagged = not __version__.startswith('0+untagged')\n is_clean = not version_mod.get_versions()['dirty']\n return is_installed_package and is_tagged and is_clean\n\n\ndef get_local_config_file():\n if \"BENTOML_CONFIG\" in os.environ:\n # User local config file for customizing bentoml\n return expand_env_var(os.environ.get(\"BENTOML_CONFIG\"))\n return None\n\n\n@lru_cache(maxsize=1)\ndef get_bentoml_deploy_version(bentoml_deploy_version: str):\n \"\"\"\n BentoML version to use for generated docker image or serverless function bundle to\n be deployed, this can be changed to an url to your fork of BentoML on github, or an\n url to your custom BentoML build, for example:\n\n bentoml_deploy_version = git+https://github.com/{username}/bentoml.git@{branch}\n \"\"\"\n\n if bentoml_deploy_version != LAST_PYPI_RELEASE_VERSION:\n logger.info(f\"Setting BentoML deploy version to '{bentoml_deploy_version}'\")\n\n if LAST_PYPI_RELEASE_VERSION != BENTOML_VERSION:\n if _is_pip_installed_bentoml():\n logger.warning(\n \"Using BentoML not from official PyPI release. In order to find the \"\n \"same version of BentoML when deploying your BentoService, you must \"\n \"set the 'core/bentoml_deploy_version' config to a http/git location \"\n \"of your BentoML fork, e.g.: 'bentoml_deploy_version = \"\n \"git+https://github.com/{username}/bentoml.git@{branch}'\"\n )\n else:\n logger.warning(\n \"Using BentoML installed in `editable` model, the local BentoML \"\n \"repository including all code changes will be packaged together with \"\n \"saved bundle created, under the './bundled_pip_dependencies' \"\n \"directory of the saved bundle.\"\n )\n return bentoml_deploy_version\n\n\ndef set_debug_mode(enabled: bool):\n os.environ[DEBUG_ENV_VAR] = str(enabled)\n\n # reconfigure logging\n from bentoml.utils.log import configure_logging\n\n configure_logging()\n\n logger.debug(\n f\"Setting debug mode: {'ON' if enabled else 'OFF'} for current session\"\n )\n\n\ndef get_debug_mode():\n if DEBUG_ENV_VAR in os.environ:\n return os.environ[DEBUG_ENV_VAR].lower() == \"true\"\n return False\n\n\ndef inject_dependencies():\n \"\"\"Inject dependencies and configuration to BentoML packages\"\"\"\n\n from timeit import default_timer as timer\n\n start = timer()\n\n logger.debug(\"Start dependency injection\")\n\n from bentoml.configuration.containers import BentoMLContainer, BentoMLConfiguration\n\n config_file = get_local_config_file()\n if config_file and config_file.endswith(\".yml\"):\n configuration = BentoMLConfiguration(override_config_file=config_file)\n else:\n configuration = BentoMLConfiguration()\n\n container = BentoMLContainer()\n container.config.from_dict(configuration.as_dict())\n\n from bentoml import (\n marshal,\n server,\n tracing,\n cli,\n adapters,\n saved_bundle,\n service,\n )\n from bentoml.yatai import yatai_service\n from bentoml.yatai import yatai_service_impl\n from bentoml.yatai.repository import s3_repository, gcs_repository\n\n container.wire(\n modules=[yatai_service, s3_repository, gcs_repository, yatai_service_impl],\n packages=[marshal, server, tracing, cli, adapters, saved_bundle, service],\n )\n\n end = timer()\n\n logger.debug(\"Dependency injection completed in %.3f seconds\", end - start)\n", "path": "bentoml/configuration/__init__.py"}]} | 2,389 | 182 |
gh_patches_debug_1228 | rasdani/github-patches | git_diff | beeware__toga-928 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
toga-demo alias doesn't work on Windows
## Expected Behavior
Examples in the documentation should work. I have to specify version 0.2.15 for anything to run properly - the normal pip installation of toga installs the dev builds that are not functioning.
## Current Behavior
They all fail with various errors of missing items, etc.
```
C:\Users\bubth\Development\togatest> pip install --pre toga-demo
Collecting toga-demo
Downloading https://files.pythonhosted.org/packages/33/05/61d94bccdfe6831eb60fc59cd79c60d7780983d07df984d82e2a8f298b8b
/toga_demo-0.3.0.dev19-py3-none-any.whl (616kB)
|████████████████████████████████| 624kB 819kB/s
Collecting toga==0.3.0.dev18 (from toga-demo)
Downloading https://files.pythonhosted.org/packages/9c/cd/4ec127b063c9b1c6f045791e7613e05247dc30e0cb817bccf09de9377ecf
/toga-0.3.0.dev18-py3-none-any.whl
Collecting toga-winforms==0.3.0.dev18; sys_platform == "win32" (from toga==0.3.0.dev18->toga-demo)
Downloading https://files.pythonhosted.org/packages/81/67/6e16ddc4c4286a4b6f08005c66006524e305c3befca01df34f509ef76202
/toga_winforms-0.3.0.dev18-py3-none-any.whl
Collecting toga-core==0.3.0.dev18 (from toga-winforms==0.3.0.dev18; sys_platform == "win32"->toga==0.3.0.dev18->toga-dem
o)
/toga_core-0.3.0.dev18-py3-none-any.whl (512kB)
|████████████████████████████████| 522kB 6.8MB/s
Requirement already satisfied: pythonnet in c:\program files\python37\lib\site-packages (from toga-winforms==0.3.0.dev18Requirement already satisfied: importlib-metadata; python_version < "3.8" in c:\users\bubth\appdata\roaming\python\pythotoga-demo) (0.18)
Collecting travertino>=0.1.0 (from toga-core==0.3.0.dev18->toga-winforms==0.3.0.dev18; sys_platform == "win32"->toga==0.3.0.dev18->toga-demo)
Downloading https://files.pythonhosted.org/packages/4c/78/b33e38d372707fbf2c461d1bde6797a12c8d20f97279db63cb57dc24eacb/travertino-0.1.3-py3-none-any.whl
Requirement already satisfied: zipp>=0.5 in c:\users\bubth\appdata\roaming\python\python37\site-packages (from importlib-metadata; python_version < "3.8"->toga-core==0.3.0.dev18->toga-winforms==0.3.0.dev18; sys_platform == "win32"->toga==0.3.0.dev18->toga-demo) (0.5.2)
Installing collected packages: travertino, toga-core, toga-winforms, toga, toga-demo
Found existing installation: toga-core 0.2.15
Uninstalling toga-core-0.2.15:
Successfully uninstalled toga-core-0.2.15
Found existing installation: toga-winforms 0.2.15
Uninstalling toga-winforms-0.2.15:
Successfully uninstalled toga-winforms-0.2.15
Found existing installation: toga 0.2.15
Uninstalling toga-0.2.15:
Successfully uninstalled toga-0.2.15
Successfully installed toga-0.3.0.dev18 toga-core-0.3.0.dev18 toga-demo-0.3.0.dev19 toga-winforms-0.3.0.dev18 travertino-0.1.3
WARNING: You are using pip version 19.2.1, however version 20.1.1 is available.
You should consider upgrading via the 'python -m pip install --upgrade pip' command.
C:\Users\bubth\Development\togatest> python --versoin
unknown option --versoin
usage: C:\Program Files\Python37\python.exe [option] ... [-c cmd | -m mod | file | -] [arg] ...
Try `python -h' for more information.
C:\Users\bubth\Development\togatest> python --version
Python 3.7.3
C:\Users\bubth\Development\togatest> toga-demo
Traceback (most recent call last):
File "c:\program files\python37\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\program files\python37\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\Program Files\Python37\Scripts\toga-demo.exe\__main__.py", line 9, in <module>
File "c:\program files\python37\lib\site-packages\toga_demo\__main__.py", line 5, in run
main().main_loop()
File "c:\program files\python37\lib\site-packages\toga_demo\app.py", line 98, in main
return TogaDemo('Toga Demo', 'org.beeware.toga-demo')
File "c:\program files\python37\lib\site-packages\toga\app.py", line 184, in __init__
self.icon = 'resources/{app_name}'.format(app_name=self.app_name)
File "c:\program files\python37\lib\site-packages\toga\app.py", line 317, in icon
self._icon.bind(self.factory)
File "c:\program files\python37\lib\site-packages\toga\icons.py", line 41, in bind
resource_path = factory.paths.app
File "c:\program files\python37\lib\site-packages\toga_winforms\paths.py", line 10, in app
return Path(sys.modules[App.app.module_name].__file__).parent
KeyError: ''
C:\Users\bubth\Development\togatest>
```
```
Traceback (most recent call last):
File ".\test.py", line 2, in <module>
from toga.style.pack import Pack, ROW, CENTER, COLUMN
ModuleNotFoundError: No module named 'toga.style'
```
```
C:\Users\bubth\Development\togatest> python .\test.py
Traceback (most recent call last):
File ".\test.py", line 24, in <module>
main().main_loop()
File "C:\Program Files\Python37\lib\site-packages\toga_winforms\app.py", line 49, in main_loop
self._startup()
File "C:\Program Files\Python37\lib\site-packages\toga_winforms\app.py", line 41, in _startup
self.startup()
File "C:\Program Files\Python37\lib\site-packages\toga\interface\app.py", line 144, in startup
self.main_window.content = self._startup_method(self)
File ".\test.py", line 9, in build
box = toga.Box()
File "C:\Program Files\Python37\lib\site-packages\toga_winforms\widgets\box.py", line 10, in __init__
super().__init__(id=id, style=style, children=children)
File "C:\Program Files\Python37\lib\site-packages\toga\interface\widgets\box.py", line 21, in __init__
super().__init__(id=id, style=style, children=children)
File "C:\Program Files\Python37\lib\site-packages\toga\interface\widgets\base.py", line 144, in __init__
self.style = CSS()
File "C:\Program Files\Python37\lib\site-packages\toga\interface\widgets\base.py", line 170, in style
self._style = value.bind(self)
AttributeError: 'CSS' object has no attribute 'bind'
```
## Steps to reproduce
<!--- Provide a set of steps describing how to reproduce this bug. If you have a live example, provide the link below -->
1. Be on windows
2. install toga
3. Follow the browser tutorial or hello world tutorial
## Your Environment
<!--- Provide details on your current environment you found the bug in -->
* Python Version (list the specific version number)
```
C:\Users\bubth\Development\togatest> python --version
Python 3.7.3
```
* Operating System and Version (select from the following and list the specific version number; if your OS is not listed, list that as well)
```
OS Name Microsoft Windows 10 Pro
Version 10.0.19041 Build 19041
Other OS Description Not Available
OS Manufacturer Microsoft Corporation
System Name LAPPYTOPPY
System Manufacturer Micro-Star International Co., Ltd.
System Model GP73 Leopard 8RF
System Type x64-based PC
System SKU 17C5.1
Processor Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, 2201 Mhz, 6 Core(s), 12 Logical Processor(s)
BIOS Version/Date American Megatrends Inc. E17C5IMS.10A, 7/13/2018
SMBIOS Version 3.1
Embedded Controller Version 255.255
BIOS Mode UEFI
BaseBoard Manufacturer Micro-Star International Co., Ltd.
BaseBoard Product MS-17C5
BaseBoard Version REV:1.0
Platform Role Mobile
Secure Boot State On
PCR7 Configuration Elevation Required to View
Windows Directory C:\WINDOWS
System Directory C:\WINDOWS\system32
Boot Device \Device\HarddiskVolume3
Locale United States
Hardware Abstraction Layer Version = "10.0.19041.1"
User Name LAPPYTOPPY\bubth
Time Zone Mountain Daylight Time
Installed Physical Memory (RAM) 16.0 GB
Total Physical Memory 15.8 GB
Available Physical Memory 4.19 GB
Total Virtual Memory 18.2 GB
Available Virtual Memory 4.69 GB
Page File Space 2.38 GB
Page File C:\pagefile.sys
Kernel DMA Protection Off
Virtualization-based security Running
Virtualization-based security Required Security Properties
Virtualization-based security Available Security Properties Base Virtualization Support, Secure Boot, DMA Protection, SMM Security Mitigations 1.0, Mode Based Execution Control
Virtualization-based security Services Configured
Virtualization-based security Services Running
Device Encryption Support Elevation Required to View
A hypervisor has been detected. Features required for Hyper-V will not be displayed.
```
* Toga Version (list the specific version number or git hash)
```
C:\Users\bubth\Development\togatest> python
Python 3.7.3 (v3.7.3:ef4ec6ed12, Mar 25 2019, 22:22:05) [MSC v.1916 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license" for more information.
>>> import toga
>>> toga.__version__
'0.3.0.dev18'
```
* Toga Target (the type of app you are trying to generate)
- [ ] android
- [ ] cocoa
- [ ] django
- [ ] gtk
- [ ] iOS
- [ ] tvOS
- [ ] watchOS
- [x ] winforms
- [ ] win32
- [ ] Other (please specify)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `demo/setup.py`
Content:
```
1 #!/usr/bin/env python
2 import io
3
4 from setuptools import setup, find_packages
5
6
7 with io.open('README.rst', encoding='utf8') as readme:
8 long_description = readme.read()
9
10
11 setup(
12 name='toga-demo',
13 version='0.3.0.dev20',
14 description='A demonstration of the capabilities of the Toga widget toolkit.',
15 long_description=long_description,
16 author='Russell Keith-Magee',
17 author_email='russell@keith-magee.com',
18 url='http://beeware.org/toga-demo',
19 include_package_data=True,
20 packages=find_packages(),
21 python_requires='>=3.5',
22 package_data={
23 'toga_demo': ['resources/*.icns', 'resources/*.png'],
24 },
25 install_requires=[
26 'toga==0.3.0.dev18'
27 ],
28 entry_points={
29 'console_scripts': [
30 'toga-demo = toga_demo.__main__:run',
31 ]
32 },
33 license='New BSD',
34 classifiers=[
35 'Development Status :: 4 - Beta',
36 'Intended Audience :: Developers',
37 'License :: OSI Approved :: BSD License',
38 'Operating System :: OS Independent',
39 'Programming Language :: Python :: 3',
40 'Programming Language :: Python :: 3.5',
41 'Programming Language :: Python :: 3.6',
42 'Programming Language :: Python :: 3.7',
43 'Programming Language :: Python :: 3 :: Only',
44 'Topic :: Software Development',
45 'Topic :: Utilities',
46 ],
47 options={
48 'app': {
49 'formal_name': 'Toga Demo',
50 'bundle': 'org.beeware',
51 },
52 'ios': {
53 'app_requires': [
54 'toga-ios==0.3.0.dev20',
55 ]
56 },
57 'django': {
58 'app_requires': [
59 'toga-django==0.3.0.dev20',
60 ]
61 },
62 'macos': {
63 'app_requires': [
64 'toga-cocoa==0.3.0.dev20',
65 ]
66 },
67 'linux': {
68 'app_requires': [
69 'toga-gtk==0.3.0.dev20',
70 ]
71 },
72 'windows': {
73 'app_requires': [
74 'toga-winform==0.3.0.dev20',
75 ]
76 },
77 'android': {
78 'app_requires': [
79 'toga-android==0.3.0.dev20',
80 ]
81 }
82 }
83 )
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/demo/setup.py b/demo/setup.py
--- a/demo/setup.py
+++ b/demo/setup.py
@@ -23,7 +23,7 @@
'toga_demo': ['resources/*.icns', 'resources/*.png'],
},
install_requires=[
- 'toga==0.3.0.dev18'
+ 'toga==0.3.0.dev20'
],
entry_points={
'console_scripts': [
| {"golden_diff": "diff --git a/demo/setup.py b/demo/setup.py\n--- a/demo/setup.py\n+++ b/demo/setup.py\n@@ -23,7 +23,7 @@\n 'toga_demo': ['resources/*.icns', 'resources/*.png'],\n },\n install_requires=[\n- 'toga==0.3.0.dev18'\n+ 'toga==0.3.0.dev20'\n ],\n entry_points={\n 'console_scripts': [\n", "issue": "toga-demo alias doesn't work on Windows\n## Expected Behavior\r\nExamples in the documentation should work. I have to specify version 0.2.15 for anything to run properly - the normal pip installation of toga installs the dev builds that are not functioning.\r\n\r\n\r\n## Current Behavior\r\nThey all fail with various errors of missing items, etc.\r\n```\r\nC:\\Users\\bubth\\Development\\togatest> pip install --pre toga-demo\r\nCollecting toga-demo\r\n Downloading https://files.pythonhosted.org/packages/33/05/61d94bccdfe6831eb60fc59cd79c60d7780983d07df984d82e2a8f298b8b\r\n/toga_demo-0.3.0.dev19-py3-none-any.whl (616kB)\r\n |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 624kB 819kB/s\r\nCollecting toga==0.3.0.dev18 (from toga-demo)\r\n Downloading https://files.pythonhosted.org/packages/9c/cd/4ec127b063c9b1c6f045791e7613e05247dc30e0cb817bccf09de9377ecf\r\n/toga-0.3.0.dev18-py3-none-any.whl\r\nCollecting toga-winforms==0.3.0.dev18; sys_platform == \"win32\" (from toga==0.3.0.dev18->toga-demo)\r\n Downloading https://files.pythonhosted.org/packages/81/67/6e16ddc4c4286a4b6f08005c66006524e305c3befca01df34f509ef76202\r\n/toga_winforms-0.3.0.dev18-py3-none-any.whl\r\nCollecting toga-core==0.3.0.dev18 (from toga-winforms==0.3.0.dev18; sys_platform == \"win32\"->toga==0.3.0.dev18->toga-dem\r\no)\r\n/toga_core-0.3.0.dev18-py3-none-any.whl (512kB)\r\n |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 522kB 6.8MB/s\r\nRequirement already satisfied: pythonnet in c:\\program files\\python37\\lib\\site-packages (from toga-winforms==0.3.0.dev18Requirement already satisfied: importlib-metadata; python_version < \"3.8\" in c:\\users\\bubth\\appdata\\roaming\\python\\pythotoga-demo) (0.18)\r\nCollecting travertino>=0.1.0 (from toga-core==0.3.0.dev18->toga-winforms==0.3.0.dev18; sys_platform == \"win32\"->toga==0.3.0.dev18->toga-demo)\r\n Downloading https://files.pythonhosted.org/packages/4c/78/b33e38d372707fbf2c461d1bde6797a12c8d20f97279db63cb57dc24eacb/travertino-0.1.3-py3-none-any.whl\r\nRequirement already satisfied: zipp>=0.5 in c:\\users\\bubth\\appdata\\roaming\\python\\python37\\site-packages (from importlib-metadata; python_version < \"3.8\"->toga-core==0.3.0.dev18->toga-winforms==0.3.0.dev18; sys_platform == \"win32\"->toga==0.3.0.dev18->toga-demo) (0.5.2)\r\nInstalling collected packages: travertino, toga-core, toga-winforms, toga, toga-demo\r\n Found existing installation: toga-core 0.2.15\r\n Uninstalling toga-core-0.2.15:\r\n Successfully uninstalled toga-core-0.2.15\r\n Found existing installation: toga-winforms 0.2.15\r\n Uninstalling toga-winforms-0.2.15:\r\n Successfully uninstalled toga-winforms-0.2.15\r\n Found existing installation: toga 0.2.15\r\n Uninstalling toga-0.2.15:\r\n Successfully uninstalled toga-0.2.15\r\nSuccessfully installed toga-0.3.0.dev18 toga-core-0.3.0.dev18 toga-demo-0.3.0.dev19 toga-winforms-0.3.0.dev18 travertino-0.1.3\r\nWARNING: You are using pip version 19.2.1, however version 20.1.1 is available.\r\nYou should consider upgrading via the 'python -m pip install --upgrade pip' command.\r\nC:\\Users\\bubth\\Development\\togatest> python --versoin\r\nunknown option --versoin\r\nusage: C:\\Program Files\\Python37\\python.exe [option] ... [-c cmd | -m mod | file | -] [arg] ...\r\nTry `python -h' for more information.\r\nC:\\Users\\bubth\\Development\\togatest> python --version\r\nPython 3.7.3\r\nC:\\Users\\bubth\\Development\\togatest> toga-demo\r\nTraceback (most recent call last):\r\n File \"c:\\program files\\python37\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"c:\\program files\\python37\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Program Files\\Python37\\Scripts\\toga-demo.exe\\__main__.py\", line 9, in <module>\r\n File \"c:\\program files\\python37\\lib\\site-packages\\toga_demo\\__main__.py\", line 5, in run\r\n main().main_loop()\r\n File \"c:\\program files\\python37\\lib\\site-packages\\toga_demo\\app.py\", line 98, in main\r\n return TogaDemo('Toga Demo', 'org.beeware.toga-demo')\r\n File \"c:\\program files\\python37\\lib\\site-packages\\toga\\app.py\", line 184, in __init__\r\n self.icon = 'resources/{app_name}'.format(app_name=self.app_name)\r\n File \"c:\\program files\\python37\\lib\\site-packages\\toga\\app.py\", line 317, in icon\r\n self._icon.bind(self.factory)\r\n File \"c:\\program files\\python37\\lib\\site-packages\\toga\\icons.py\", line 41, in bind\r\n resource_path = factory.paths.app\r\n File \"c:\\program files\\python37\\lib\\site-packages\\toga_winforms\\paths.py\", line 10, in app\r\n return Path(sys.modules[App.app.module_name].__file__).parent\r\nKeyError: ''\r\nC:\\Users\\bubth\\Development\\togatest>\r\n```\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \".\\test.py\", line 2, in <module>\r\n from toga.style.pack import Pack, ROW, CENTER, COLUMN\r\nModuleNotFoundError: No module named 'toga.style'\r\n```\r\n\r\n```\r\nC:\\Users\\bubth\\Development\\togatest> python .\\test.py\r\nTraceback (most recent call last):\r\n File \".\\test.py\", line 24, in <module>\r\n main().main_loop()\r\n File \"C:\\Program Files\\Python37\\lib\\site-packages\\toga_winforms\\app.py\", line 49, in main_loop\r\n self._startup()\r\n File \"C:\\Program Files\\Python37\\lib\\site-packages\\toga_winforms\\app.py\", line 41, in _startup\r\n self.startup()\r\n File \"C:\\Program Files\\Python37\\lib\\site-packages\\toga\\interface\\app.py\", line 144, in startup\r\n self.main_window.content = self._startup_method(self)\r\n File \".\\test.py\", line 9, in build\r\n box = toga.Box()\r\n File \"C:\\Program Files\\Python37\\lib\\site-packages\\toga_winforms\\widgets\\box.py\", line 10, in __init__\r\n super().__init__(id=id, style=style, children=children)\r\n File \"C:\\Program Files\\Python37\\lib\\site-packages\\toga\\interface\\widgets\\box.py\", line 21, in __init__\r\n super().__init__(id=id, style=style, children=children)\r\n File \"C:\\Program Files\\Python37\\lib\\site-packages\\toga\\interface\\widgets\\base.py\", line 144, in __init__\r\n self.style = CSS()\r\n File \"C:\\Program Files\\Python37\\lib\\site-packages\\toga\\interface\\widgets\\base.py\", line 170, in style\r\n self._style = value.bind(self)\r\nAttributeError: 'CSS' object has no attribute 'bind'\r\n```\r\n\r\n## Steps to reproduce\r\n<!--- Provide a set of steps describing how to reproduce this bug. If you have a live example, provide the link below -->\r\n1. Be on windows\r\n\r\n2. install toga\r\n\r\n3. Follow the browser tutorial or hello world tutorial\r\n\r\n\r\n## Your Environment\r\n<!--- Provide details on your current environment you found the bug in -->\r\n\r\n* Python Version (list the specific version number)\r\n```\r\nC:\\Users\\bubth\\Development\\togatest> python --version\r\nPython 3.7.3\r\n```\r\n\r\n* Operating System and Version (select from the following and list the specific version number; if your OS is not listed, list that as well)\r\n\r\n```\r\nOS Name\tMicrosoft Windows 10 Pro\r\nVersion\t10.0.19041 Build 19041\r\nOther OS Description \tNot Available\r\nOS Manufacturer\tMicrosoft Corporation\r\nSystem Name\tLAPPYTOPPY\r\nSystem Manufacturer\tMicro-Star International Co., Ltd.\r\nSystem Model\tGP73 Leopard 8RF\r\nSystem Type\tx64-based PC\r\nSystem SKU\t17C5.1\r\nProcessor\tIntel(R) Core(TM) i7-8750H CPU @ 2.20GHz, 2201 Mhz, 6 Core(s), 12 Logical Processor(s)\r\nBIOS Version/Date\tAmerican Megatrends Inc. E17C5IMS.10A, 7/13/2018\r\nSMBIOS Version\t3.1\r\nEmbedded Controller Version\t255.255\r\nBIOS Mode\tUEFI\r\nBaseBoard Manufacturer\tMicro-Star International Co., Ltd.\r\nBaseBoard Product\tMS-17C5\r\nBaseBoard Version\tREV:1.0\r\nPlatform Role\tMobile\r\nSecure Boot State\tOn\r\nPCR7 Configuration\tElevation Required to View\r\nWindows Directory\tC:\\WINDOWS\r\nSystem Directory\tC:\\WINDOWS\\system32\r\nBoot Device\t\\Device\\HarddiskVolume3\r\nLocale\tUnited States\r\nHardware Abstraction Layer\tVersion = \"10.0.19041.1\"\r\nUser Name\tLAPPYTOPPY\\bubth\r\nTime Zone\tMountain Daylight Time\r\nInstalled Physical Memory (RAM)\t16.0 GB\r\nTotal Physical Memory\t15.8 GB\r\nAvailable Physical Memory\t4.19 GB\r\nTotal Virtual Memory\t18.2 GB\r\nAvailable Virtual Memory\t4.69 GB\r\nPage File Space\t2.38 GB\r\nPage File\tC:\\pagefile.sys\r\nKernel DMA Protection\tOff\r\nVirtualization-based security\tRunning\r\nVirtualization-based security Required Security Properties\t\r\nVirtualization-based security Available Security Properties\tBase Virtualization Support, Secure Boot, DMA Protection, SMM Security Mitigations 1.0, Mode Based Execution Control\r\nVirtualization-based security Services Configured\t\r\nVirtualization-based security Services Running\t\r\nDevice Encryption Support\tElevation Required to View\r\nA hypervisor has been detected. Features required for Hyper-V will not be displayed.\t\r\n```\r\n\r\n* Toga Version (list the specific version number or git hash)\r\n\r\n```\r\nC:\\Users\\bubth\\Development\\togatest> python\r\nPython 3.7.3 (v3.7.3:ef4ec6ed12, Mar 25 2019, 22:22:05) [MSC v.1916 64 bit (AMD64)] on win32\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import toga\r\n>>> toga.__version__\r\n'0.3.0.dev18'\r\n```\r\n\r\n* Toga Target (the type of app you are trying to generate)\r\n\r\n - [ ] android\r\n - [ ] cocoa\r\n - [ ] django\r\n - [ ] gtk\r\n - [ ] iOS\r\n - [ ] tvOS\r\n - [ ] watchOS\r\n - [x ] winforms\r\n - [ ] win32\r\n - [ ] Other (please specify)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport io\n\nfrom setuptools import setup, find_packages\n\n\nwith io.open('README.rst', encoding='utf8') as readme:\n long_description = readme.read()\n\n\nsetup(\n name='toga-demo',\n version='0.3.0.dev20',\n description='A demonstration of the capabilities of the Toga widget toolkit.',\n long_description=long_description,\n author='Russell Keith-Magee',\n author_email='russell@keith-magee.com',\n url='http://beeware.org/toga-demo',\n include_package_data=True,\n packages=find_packages(),\n python_requires='>=3.5',\n package_data={\n 'toga_demo': ['resources/*.icns', 'resources/*.png'],\n },\n install_requires=[\n 'toga==0.3.0.dev18'\n ],\n entry_points={\n 'console_scripts': [\n 'toga-demo = toga_demo.__main__:run',\n ]\n },\n license='New BSD',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n options={\n 'app': {\n 'formal_name': 'Toga Demo',\n 'bundle': 'org.beeware',\n },\n 'ios': {\n 'app_requires': [\n 'toga-ios==0.3.0.dev20',\n ]\n },\n 'django': {\n 'app_requires': [\n 'toga-django==0.3.0.dev20',\n ]\n },\n 'macos': {\n 'app_requires': [\n 'toga-cocoa==0.3.0.dev20',\n ]\n },\n 'linux': {\n 'app_requires': [\n 'toga-gtk==0.3.0.dev20',\n ]\n },\n 'windows': {\n 'app_requires': [\n 'toga-winform==0.3.0.dev20',\n ]\n },\n 'android': {\n 'app_requires': [\n 'toga-android==0.3.0.dev20',\n ]\n }\n }\n)\n", "path": "demo/setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport io\n\nfrom setuptools import setup, find_packages\n\n\nwith io.open('README.rst', encoding='utf8') as readme:\n long_description = readme.read()\n\n\nsetup(\n name='toga-demo',\n version='0.3.0.dev20',\n description='A demonstration of the capabilities of the Toga widget toolkit.',\n long_description=long_description,\n author='Russell Keith-Magee',\n author_email='russell@keith-magee.com',\n url='http://beeware.org/toga-demo',\n include_package_data=True,\n packages=find_packages(),\n python_requires='>=3.5',\n package_data={\n 'toga_demo': ['resources/*.icns', 'resources/*.png'],\n },\n install_requires=[\n 'toga==0.3.0.dev20'\n ],\n entry_points={\n 'console_scripts': [\n 'toga-demo = toga_demo.__main__:run',\n ]\n },\n license='New BSD',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n options={\n 'app': {\n 'formal_name': 'Toga Demo',\n 'bundle': 'org.beeware',\n },\n 'ios': {\n 'app_requires': [\n 'toga-ios==0.3.0.dev20',\n ]\n },\n 'django': {\n 'app_requires': [\n 'toga-django==0.3.0.dev20',\n ]\n },\n 'macos': {\n 'app_requires': [\n 'toga-cocoa==0.3.0.dev20',\n ]\n },\n 'linux': {\n 'app_requires': [\n 'toga-gtk==0.3.0.dev20',\n ]\n },\n 'windows': {\n 'app_requires': [\n 'toga-winform==0.3.0.dev20',\n ]\n },\n 'android': {\n 'app_requires': [\n 'toga-android==0.3.0.dev20',\n ]\n }\n }\n)\n", "path": "demo/setup.py"}]} | 3,933 | 101 |
gh_patches_debug_3831 | rasdani/github-patches | git_diff | pantsbuild__pants-18678 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong version of Python used to build `pex_binary` targets in `2.16.0rc0`
**Describe the bug**
* Our CI image contains both Python 3.8 and 3.9.
* We set `[python].interpreter_constraints = ["==3.8.*"]` in `pants.toml`.
* At least one `pex_binary` depends on a version of `numpy` that (for reasons we haven't dug into) only works with Python 3.8, not Python 3.9
* We haven't investigated the build failures because we expect everything to run against Python 3.8 as configured by `[python].interpreter_constraints`
After upgrading to Pants 2.16.0rc0 we see failures building the `pex_binary` in CI, with errors that indicate the build process is trying to build a dependency (`numpy`) against Python 3.9 instead of the expected/configured Python 3.8
This is very concerning. We still run Python 3.8 everywhere in production, so I don't want Pexes to be building against Python 3.9. I've downgraded us back to 2.16.0a1 for now and confirmed this fixes the problem.
**Pants version**
2.16.0rc0
**OS**
Linux
**Additional info**
I am suspicious of https://github.com/pantsbuild/pants/commit/d3d325777952435186be42443fb28fde6771fae7 and https://github.com/pantsbuild/pants/commit/e8d387ba6b4d4502e3b6db5ae68ffe7beeeb10a7
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/backend/python/util_rules/pex_cli.py`
Content:
```
1 # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 import dataclasses
7 from dataclasses import dataclass
8 from typing import Iterable, List, Mapping, Optional, Tuple
9
10 from pants.backend.python.subsystems.python_native_code import PythonNativeCodeSubsystem
11 from pants.backend.python.subsystems.setup import PythonSetup
12 from pants.backend.python.util_rules import pex_environment
13 from pants.backend.python.util_rules.pex_environment import PexEnvironment, PexSubsystem
14 from pants.core.util_rules import adhoc_binaries, external_tool
15 from pants.core.util_rules.adhoc_binaries import PythonBuildStandaloneBinary
16 from pants.core.util_rules.external_tool import (
17 DownloadedExternalTool,
18 ExternalToolRequest,
19 TemplatedExternalTool,
20 )
21 from pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests
22 from pants.engine.internals.selectors import MultiGet
23 from pants.engine.platform import Platform
24 from pants.engine.process import Process, ProcessCacheScope
25 from pants.engine.rules import Get, collect_rules, rule
26 from pants.option.global_options import GlobalOptions, ca_certs_path_to_file_content
27 from pants.util.frozendict import FrozenDict
28 from pants.util.logging import LogLevel
29 from pants.util.meta import classproperty
30 from pants.util.strutil import create_path_env_var
31
32
33 class PexCli(TemplatedExternalTool):
34 options_scope = "pex-cli"
35 name = "pex"
36 help = "The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex)."
37
38 default_version = "v2.1.131"
39 default_url_template = "https://github.com/pantsbuild/pex/releases/download/{version}/pex"
40 version_constraints = ">=2.1.124,<3.0"
41
42 @classproperty
43 def default_known_versions(cls):
44 return [
45 "|".join(
46 (
47 cls.default_version,
48 plat,
49 "28b9dfc7e2f5f49f1e189b79eba3dd79ca2186f765009ea02dd6095f5359bf59",
50 "4084520",
51 )
52 )
53 for plat in ["macos_arm64", "macos_x86_64", "linux_x86_64", "linux_arm64"]
54 ]
55
56
57 @dataclass(frozen=True)
58 class PexCliProcess:
59 subcommand: tuple[str, ...]
60 extra_args: tuple[str, ...]
61 description: str = dataclasses.field(compare=False)
62 additional_input_digest: Optional[Digest]
63 extra_env: Optional[FrozenDict[str, str]]
64 output_files: Optional[Tuple[str, ...]]
65 output_directories: Optional[Tuple[str, ...]]
66 level: LogLevel
67 concurrency_available: int
68 cache_scope: ProcessCacheScope
69
70 def __init__(
71 self,
72 *,
73 subcommand: Iterable[str],
74 extra_args: Iterable[str],
75 description: str,
76 additional_input_digest: Optional[Digest] = None,
77 extra_env: Optional[Mapping[str, str]] = None,
78 output_files: Optional[Iterable[str]] = None,
79 output_directories: Optional[Iterable[str]] = None,
80 level: LogLevel = LogLevel.INFO,
81 concurrency_available: int = 0,
82 cache_scope: ProcessCacheScope = ProcessCacheScope.SUCCESSFUL,
83 ) -> None:
84 object.__setattr__(self, "subcommand", tuple(subcommand))
85 object.__setattr__(self, "extra_args", tuple(extra_args))
86 object.__setattr__(self, "description", description)
87 object.__setattr__(self, "additional_input_digest", additional_input_digest)
88 object.__setattr__(self, "extra_env", FrozenDict(extra_env) if extra_env else None)
89 object.__setattr__(self, "output_files", tuple(output_files) if output_files else None)
90 object.__setattr__(
91 self, "output_directories", tuple(output_directories) if output_directories else None
92 )
93 object.__setattr__(self, "level", level)
94 object.__setattr__(self, "concurrency_available", concurrency_available)
95 object.__setattr__(self, "cache_scope", cache_scope)
96
97 self.__post_init__()
98
99 def __post_init__(self) -> None:
100 if "--pex-root-path" in self.extra_args:
101 raise ValueError("`--pex-root` flag not allowed. We set its value for you.")
102
103
104 class PexPEX(DownloadedExternalTool):
105 """The Pex PEX binary."""
106
107
108 @rule
109 async def download_pex_pex(pex_cli: PexCli, platform: Platform) -> PexPEX:
110 pex_pex = await Get(DownloadedExternalTool, ExternalToolRequest, pex_cli.get_request(platform))
111 return PexPEX(digest=pex_pex.digest, exe=pex_pex.exe)
112
113
114 @rule
115 async def setup_pex_cli_process(
116 request: PexCliProcess,
117 pex_pex: PexPEX,
118 pex_env: PexEnvironment,
119 bootstrap_python: PythonBuildStandaloneBinary,
120 python_native_code: PythonNativeCodeSubsystem.EnvironmentAware,
121 global_options: GlobalOptions,
122 pex_subsystem: PexSubsystem,
123 python_setup: PythonSetup,
124 ) -> Process:
125 tmpdir = ".tmp"
126 gets: List[Get] = [Get(Digest, CreateDigest([Directory(tmpdir)]))]
127
128 cert_args = []
129 if global_options.ca_certs_path:
130 ca_certs_fc = ca_certs_path_to_file_content(global_options.ca_certs_path)
131 gets.append(Get(Digest, CreateDigest((ca_certs_fc,))))
132 cert_args = ["--cert", ca_certs_fc.path]
133
134 digests_to_merge = [pex_pex.digest]
135 digests_to_merge.extend(await MultiGet(gets))
136 if request.additional_input_digest:
137 digests_to_merge.append(request.additional_input_digest)
138 input_digest = await Get(Digest, MergeDigests(digests_to_merge))
139
140 global_args = [
141 # Ensure Pex and its subprocesses create temporary files in the the process execution
142 # sandbox. It may make sense to do this generally for Processes, but in the short term we
143 # have known use cases where /tmp is too small to hold large wheel downloads Pex is asked to
144 # perform. Making the TMPDIR local to the sandbox allows control via
145 # --local-execution-root-dir for the local case and should work well with remote cases where
146 # a remoting implementation has to allow for processes producing large binaries in a
147 # sandbox to support reasonable workloads. Communicating TMPDIR via --tmpdir instead of via
148 # environment variable allows Pex to absolutize the path ensuring subprocesses that change
149 # CWD can find the TMPDIR.
150 "--tmpdir",
151 tmpdir,
152 ]
153
154 if request.concurrency_available > 0:
155 global_args.extend(["--jobs", "{pants_concurrency}"])
156
157 verbosity_args = [f"-{'v' * pex_subsystem.verbosity}"] if pex_subsystem.verbosity > 0 else []
158
159 # NB: We should always pass `--python-path`, as that tells Pex where to look for interpreters
160 # when `--python` isn't an absolute path.
161 resolve_args = [
162 *cert_args,
163 "--python-path",
164 create_path_env_var(pex_env.interpreter_search_paths),
165 ]
166 # All old-style pex runs take the --pip-version flag, but only certain subcommands of the
167 # `pex3` console script do. So if invoked with a subcommand, the caller must selectively
168 # set --pip-version only on subcommands that take it.
169 pip_version_args = (
170 [] if request.subcommand else ["--pip-version", python_setup.pip_version.value]
171 )
172 args = [
173 *request.subcommand,
174 *global_args,
175 *verbosity_args,
176 *pip_version_args,
177 *resolve_args,
178 # NB: This comes at the end because it may use `--` passthrough args, # which must come at
179 # the end.
180 *request.extra_args,
181 ]
182
183 complete_pex_env = pex_env.in_sandbox(working_directory=None)
184 normalized_argv = complete_pex_env.create_argv(pex_pex.exe, *args)
185 env = {
186 **complete_pex_env.environment_dict(python=bootstrap_python),
187 **python_native_code.subprocess_env_vars,
188 **(request.extra_env or {}),
189 # If a subcommand is used, we need to use the `pex3` console script.
190 **({"PEX_SCRIPT": "pex3"} if request.subcommand else {}),
191 }
192
193 return Process(
194 normalized_argv,
195 description=request.description,
196 input_digest=input_digest,
197 env=env,
198 output_files=request.output_files,
199 output_directories=request.output_directories,
200 append_only_caches=complete_pex_env.append_only_caches,
201 immutable_input_digests=bootstrap_python.immutable_input_digests,
202 level=request.level,
203 concurrency_available=request.concurrency_available,
204 cache_scope=request.cache_scope,
205 )
206
207
208 def rules():
209 return [
210 *collect_rules(),
211 *external_tool.rules(),
212 *pex_environment.rules(),
213 *adhoc_binaries.rules(),
214 ]
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/python/pants/backend/python/util_rules/pex_cli.py b/src/python/pants/backend/python/util_rules/pex_cli.py
--- a/src/python/pants/backend/python/util_rules/pex_cli.py
+++ b/src/python/pants/backend/python/util_rules/pex_cli.py
@@ -37,7 +37,7 @@
default_version = "v2.1.131"
default_url_template = "https://github.com/pantsbuild/pex/releases/download/{version}/pex"
- version_constraints = ">=2.1.124,<3.0"
+ version_constraints = ">=2.1.129,<3.0"
@classproperty
def default_known_versions(cls):
| {"golden_diff": "diff --git a/src/python/pants/backend/python/util_rules/pex_cli.py b/src/python/pants/backend/python/util_rules/pex_cli.py\n--- a/src/python/pants/backend/python/util_rules/pex_cli.py\n+++ b/src/python/pants/backend/python/util_rules/pex_cli.py\n@@ -37,7 +37,7 @@\n \n default_version = \"v2.1.131\"\n default_url_template = \"https://github.com/pantsbuild/pex/releases/download/{version}/pex\"\n- version_constraints = \">=2.1.124,<3.0\"\n+ version_constraints = \">=2.1.129,<3.0\"\n \n @classproperty\n def default_known_versions(cls):\n", "issue": "Wrong version of Python used to build `pex_binary` targets in `2.16.0rc0`\n**Describe the bug**\r\n\r\n* Our CI image contains both Python 3.8 and 3.9.\r\n* We set `[python].interpreter_constraints = [\"==3.8.*\"]` in `pants.toml`.\r\n* At least one `pex_binary` depends on a version of `numpy` that (for reasons we haven't dug into) only works with Python 3.8, not Python 3.9\r\n * We haven't investigated the build failures because we expect everything to run against Python 3.8 as configured by `[python].interpreter_constraints`\r\n\r\nAfter upgrading to Pants 2.16.0rc0 we see failures building the `pex_binary` in CI, with errors that indicate the build process is trying to build a dependency (`numpy`) against Python 3.9 instead of the expected/configured Python 3.8\r\n\r\nThis is very concerning. We still run Python 3.8 everywhere in production, so I don't want Pexes to be building against Python 3.9. I've downgraded us back to 2.16.0a1 for now and confirmed this fixes the problem.\r\n\r\n**Pants version**\r\n\r\n2.16.0rc0\r\n\r\n**OS**\r\n\r\nLinux\r\n\r\n**Additional info**\r\n\r\nI am suspicious of https://github.com/pantsbuild/pants/commit/d3d325777952435186be42443fb28fde6771fae7 and https://github.com/pantsbuild/pants/commit/e8d387ba6b4d4502e3b6db5ae68ffe7beeeb10a7\n", "before_files": [{"content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport dataclasses\nfrom dataclasses import dataclass\nfrom typing import Iterable, List, Mapping, Optional, Tuple\n\nfrom pants.backend.python.subsystems.python_native_code import PythonNativeCodeSubsystem\nfrom pants.backend.python.subsystems.setup import PythonSetup\nfrom pants.backend.python.util_rules import pex_environment\nfrom pants.backend.python.util_rules.pex_environment import PexEnvironment, PexSubsystem\nfrom pants.core.util_rules import adhoc_binaries, external_tool\nfrom pants.core.util_rules.adhoc_binaries import PythonBuildStandaloneBinary\nfrom pants.core.util_rules.external_tool import (\n DownloadedExternalTool,\n ExternalToolRequest,\n TemplatedExternalTool,\n)\nfrom pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests\nfrom pants.engine.internals.selectors import MultiGet\nfrom pants.engine.platform import Platform\nfrom pants.engine.process import Process, ProcessCacheScope\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.option.global_options import GlobalOptions, ca_certs_path_to_file_content\nfrom pants.util.frozendict import FrozenDict\nfrom pants.util.logging import LogLevel\nfrom pants.util.meta import classproperty\nfrom pants.util.strutil import create_path_env_var\n\n\nclass PexCli(TemplatedExternalTool):\n options_scope = \"pex-cli\"\n name = \"pex\"\n help = \"The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex).\"\n\n default_version = \"v2.1.131\"\n default_url_template = \"https://github.com/pantsbuild/pex/releases/download/{version}/pex\"\n version_constraints = \">=2.1.124,<3.0\"\n\n @classproperty\n def default_known_versions(cls):\n return [\n \"|\".join(\n (\n cls.default_version,\n plat,\n \"28b9dfc7e2f5f49f1e189b79eba3dd79ca2186f765009ea02dd6095f5359bf59\",\n \"4084520\",\n )\n )\n for plat in [\"macos_arm64\", \"macos_x86_64\", \"linux_x86_64\", \"linux_arm64\"]\n ]\n\n\n@dataclass(frozen=True)\nclass PexCliProcess:\n subcommand: tuple[str, ...]\n extra_args: tuple[str, ...]\n description: str = dataclasses.field(compare=False)\n additional_input_digest: Optional[Digest]\n extra_env: Optional[FrozenDict[str, str]]\n output_files: Optional[Tuple[str, ...]]\n output_directories: Optional[Tuple[str, ...]]\n level: LogLevel\n concurrency_available: int\n cache_scope: ProcessCacheScope\n\n def __init__(\n self,\n *,\n subcommand: Iterable[str],\n extra_args: Iterable[str],\n description: str,\n additional_input_digest: Optional[Digest] = None,\n extra_env: Optional[Mapping[str, str]] = None,\n output_files: Optional[Iterable[str]] = None,\n output_directories: Optional[Iterable[str]] = None,\n level: LogLevel = LogLevel.INFO,\n concurrency_available: int = 0,\n cache_scope: ProcessCacheScope = ProcessCacheScope.SUCCESSFUL,\n ) -> None:\n object.__setattr__(self, \"subcommand\", tuple(subcommand))\n object.__setattr__(self, \"extra_args\", tuple(extra_args))\n object.__setattr__(self, \"description\", description)\n object.__setattr__(self, \"additional_input_digest\", additional_input_digest)\n object.__setattr__(self, \"extra_env\", FrozenDict(extra_env) if extra_env else None)\n object.__setattr__(self, \"output_files\", tuple(output_files) if output_files else None)\n object.__setattr__(\n self, \"output_directories\", tuple(output_directories) if output_directories else None\n )\n object.__setattr__(self, \"level\", level)\n object.__setattr__(self, \"concurrency_available\", concurrency_available)\n object.__setattr__(self, \"cache_scope\", cache_scope)\n\n self.__post_init__()\n\n def __post_init__(self) -> None:\n if \"--pex-root-path\" in self.extra_args:\n raise ValueError(\"`--pex-root` flag not allowed. We set its value for you.\")\n\n\nclass PexPEX(DownloadedExternalTool):\n \"\"\"The Pex PEX binary.\"\"\"\n\n\n@rule\nasync def download_pex_pex(pex_cli: PexCli, platform: Platform) -> PexPEX:\n pex_pex = await Get(DownloadedExternalTool, ExternalToolRequest, pex_cli.get_request(platform))\n return PexPEX(digest=pex_pex.digest, exe=pex_pex.exe)\n\n\n@rule\nasync def setup_pex_cli_process(\n request: PexCliProcess,\n pex_pex: PexPEX,\n pex_env: PexEnvironment,\n bootstrap_python: PythonBuildStandaloneBinary,\n python_native_code: PythonNativeCodeSubsystem.EnvironmentAware,\n global_options: GlobalOptions,\n pex_subsystem: PexSubsystem,\n python_setup: PythonSetup,\n) -> Process:\n tmpdir = \".tmp\"\n gets: List[Get] = [Get(Digest, CreateDigest([Directory(tmpdir)]))]\n\n cert_args = []\n if global_options.ca_certs_path:\n ca_certs_fc = ca_certs_path_to_file_content(global_options.ca_certs_path)\n gets.append(Get(Digest, CreateDigest((ca_certs_fc,))))\n cert_args = [\"--cert\", ca_certs_fc.path]\n\n digests_to_merge = [pex_pex.digest]\n digests_to_merge.extend(await MultiGet(gets))\n if request.additional_input_digest:\n digests_to_merge.append(request.additional_input_digest)\n input_digest = await Get(Digest, MergeDigests(digests_to_merge))\n\n global_args = [\n # Ensure Pex and its subprocesses create temporary files in the the process execution\n # sandbox. It may make sense to do this generally for Processes, but in the short term we\n # have known use cases where /tmp is too small to hold large wheel downloads Pex is asked to\n # perform. Making the TMPDIR local to the sandbox allows control via\n # --local-execution-root-dir for the local case and should work well with remote cases where\n # a remoting implementation has to allow for processes producing large binaries in a\n # sandbox to support reasonable workloads. Communicating TMPDIR via --tmpdir instead of via\n # environment variable allows Pex to absolutize the path ensuring subprocesses that change\n # CWD can find the TMPDIR.\n \"--tmpdir\",\n tmpdir,\n ]\n\n if request.concurrency_available > 0:\n global_args.extend([\"--jobs\", \"{pants_concurrency}\"])\n\n verbosity_args = [f\"-{'v' * pex_subsystem.verbosity}\"] if pex_subsystem.verbosity > 0 else []\n\n # NB: We should always pass `--python-path`, as that tells Pex where to look for interpreters\n # when `--python` isn't an absolute path.\n resolve_args = [\n *cert_args,\n \"--python-path\",\n create_path_env_var(pex_env.interpreter_search_paths),\n ]\n # All old-style pex runs take the --pip-version flag, but only certain subcommands of the\n # `pex3` console script do. So if invoked with a subcommand, the caller must selectively\n # set --pip-version only on subcommands that take it.\n pip_version_args = (\n [] if request.subcommand else [\"--pip-version\", python_setup.pip_version.value]\n )\n args = [\n *request.subcommand,\n *global_args,\n *verbosity_args,\n *pip_version_args,\n *resolve_args,\n # NB: This comes at the end because it may use `--` passthrough args, # which must come at\n # the end.\n *request.extra_args,\n ]\n\n complete_pex_env = pex_env.in_sandbox(working_directory=None)\n normalized_argv = complete_pex_env.create_argv(pex_pex.exe, *args)\n env = {\n **complete_pex_env.environment_dict(python=bootstrap_python),\n **python_native_code.subprocess_env_vars,\n **(request.extra_env or {}),\n # If a subcommand is used, we need to use the `pex3` console script.\n **({\"PEX_SCRIPT\": \"pex3\"} if request.subcommand else {}),\n }\n\n return Process(\n normalized_argv,\n description=request.description,\n input_digest=input_digest,\n env=env,\n output_files=request.output_files,\n output_directories=request.output_directories,\n append_only_caches=complete_pex_env.append_only_caches,\n immutable_input_digests=bootstrap_python.immutable_input_digests,\n level=request.level,\n concurrency_available=request.concurrency_available,\n cache_scope=request.cache_scope,\n )\n\n\ndef rules():\n return [\n *collect_rules(),\n *external_tool.rules(),\n *pex_environment.rules(),\n *adhoc_binaries.rules(),\n ]\n", "path": "src/python/pants/backend/python/util_rules/pex_cli.py"}], "after_files": [{"content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport dataclasses\nfrom dataclasses import dataclass\nfrom typing import Iterable, List, Mapping, Optional, Tuple\n\nfrom pants.backend.python.subsystems.python_native_code import PythonNativeCodeSubsystem\nfrom pants.backend.python.subsystems.setup import PythonSetup\nfrom pants.backend.python.util_rules import pex_environment\nfrom pants.backend.python.util_rules.pex_environment import PexEnvironment, PexSubsystem\nfrom pants.core.util_rules import adhoc_binaries, external_tool\nfrom pants.core.util_rules.adhoc_binaries import PythonBuildStandaloneBinary\nfrom pants.core.util_rules.external_tool import (\n DownloadedExternalTool,\n ExternalToolRequest,\n TemplatedExternalTool,\n)\nfrom pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests\nfrom pants.engine.internals.selectors import MultiGet\nfrom pants.engine.platform import Platform\nfrom pants.engine.process import Process, ProcessCacheScope\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.option.global_options import GlobalOptions, ca_certs_path_to_file_content\nfrom pants.util.frozendict import FrozenDict\nfrom pants.util.logging import LogLevel\nfrom pants.util.meta import classproperty\nfrom pants.util.strutil import create_path_env_var\n\n\nclass PexCli(TemplatedExternalTool):\n options_scope = \"pex-cli\"\n name = \"pex\"\n help = \"The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex).\"\n\n default_version = \"v2.1.131\"\n default_url_template = \"https://github.com/pantsbuild/pex/releases/download/{version}/pex\"\n version_constraints = \">=2.1.129,<3.0\"\n\n @classproperty\n def default_known_versions(cls):\n return [\n \"|\".join(\n (\n cls.default_version,\n plat,\n \"28b9dfc7e2f5f49f1e189b79eba3dd79ca2186f765009ea02dd6095f5359bf59\",\n \"4084520\",\n )\n )\n for plat in [\"macos_arm64\", \"macos_x86_64\", \"linux_x86_64\", \"linux_arm64\"]\n ]\n\n\n@dataclass(frozen=True)\nclass PexCliProcess:\n subcommand: tuple[str, ...]\n extra_args: tuple[str, ...]\n description: str = dataclasses.field(compare=False)\n additional_input_digest: Optional[Digest]\n extra_env: Optional[FrozenDict[str, str]]\n output_files: Optional[Tuple[str, ...]]\n output_directories: Optional[Tuple[str, ...]]\n level: LogLevel\n concurrency_available: int\n cache_scope: ProcessCacheScope\n\n def __init__(\n self,\n *,\n subcommand: Iterable[str],\n extra_args: Iterable[str],\n description: str,\n additional_input_digest: Optional[Digest] = None,\n extra_env: Optional[Mapping[str, str]] = None,\n output_files: Optional[Iterable[str]] = None,\n output_directories: Optional[Iterable[str]] = None,\n level: LogLevel = LogLevel.INFO,\n concurrency_available: int = 0,\n cache_scope: ProcessCacheScope = ProcessCacheScope.SUCCESSFUL,\n ) -> None:\n object.__setattr__(self, \"subcommand\", tuple(subcommand))\n object.__setattr__(self, \"extra_args\", tuple(extra_args))\n object.__setattr__(self, \"description\", description)\n object.__setattr__(self, \"additional_input_digest\", additional_input_digest)\n object.__setattr__(self, \"extra_env\", FrozenDict(extra_env) if extra_env else None)\n object.__setattr__(self, \"output_files\", tuple(output_files) if output_files else None)\n object.__setattr__(\n self, \"output_directories\", tuple(output_directories) if output_directories else None\n )\n object.__setattr__(self, \"level\", level)\n object.__setattr__(self, \"concurrency_available\", concurrency_available)\n object.__setattr__(self, \"cache_scope\", cache_scope)\n\n self.__post_init__()\n\n def __post_init__(self) -> None:\n if \"--pex-root-path\" in self.extra_args:\n raise ValueError(\"`--pex-root` flag not allowed. We set its value for you.\")\n\n\nclass PexPEX(DownloadedExternalTool):\n \"\"\"The Pex PEX binary.\"\"\"\n\n\n@rule\nasync def download_pex_pex(pex_cli: PexCli, platform: Platform) -> PexPEX:\n pex_pex = await Get(DownloadedExternalTool, ExternalToolRequest, pex_cli.get_request(platform))\n return PexPEX(digest=pex_pex.digest, exe=pex_pex.exe)\n\n\n@rule\nasync def setup_pex_cli_process(\n request: PexCliProcess,\n pex_pex: PexPEX,\n pex_env: PexEnvironment,\n bootstrap_python: PythonBuildStandaloneBinary,\n python_native_code: PythonNativeCodeSubsystem.EnvironmentAware,\n global_options: GlobalOptions,\n pex_subsystem: PexSubsystem,\n python_setup: PythonSetup,\n) -> Process:\n tmpdir = \".tmp\"\n gets: List[Get] = [Get(Digest, CreateDigest([Directory(tmpdir)]))]\n\n cert_args = []\n if global_options.ca_certs_path:\n ca_certs_fc = ca_certs_path_to_file_content(global_options.ca_certs_path)\n gets.append(Get(Digest, CreateDigest((ca_certs_fc,))))\n cert_args = [\"--cert\", ca_certs_fc.path]\n\n digests_to_merge = [pex_pex.digest]\n digests_to_merge.extend(await MultiGet(gets))\n if request.additional_input_digest:\n digests_to_merge.append(request.additional_input_digest)\n input_digest = await Get(Digest, MergeDigests(digests_to_merge))\n\n global_args = [\n # Ensure Pex and its subprocesses create temporary files in the the process execution\n # sandbox. It may make sense to do this generally for Processes, but in the short term we\n # have known use cases where /tmp is too small to hold large wheel downloads Pex is asked to\n # perform. Making the TMPDIR local to the sandbox allows control via\n # --local-execution-root-dir for the local case and should work well with remote cases where\n # a remoting implementation has to allow for processes producing large binaries in a\n # sandbox to support reasonable workloads. Communicating TMPDIR via --tmpdir instead of via\n # environment variable allows Pex to absolutize the path ensuring subprocesses that change\n # CWD can find the TMPDIR.\n \"--tmpdir\",\n tmpdir,\n ]\n\n if request.concurrency_available > 0:\n global_args.extend([\"--jobs\", \"{pants_concurrency}\"])\n\n verbosity_args = [f\"-{'v' * pex_subsystem.verbosity}\"] if pex_subsystem.verbosity > 0 else []\n\n # NB: We should always pass `--python-path`, as that tells Pex where to look for interpreters\n # when `--python` isn't an absolute path.\n resolve_args = [\n *cert_args,\n \"--python-path\",\n create_path_env_var(pex_env.interpreter_search_paths),\n ]\n # All old-style pex runs take the --pip-version flag, but only certain subcommands of the\n # `pex3` console script do. So if invoked with a subcommand, the caller must selectively\n # set --pip-version only on subcommands that take it.\n pip_version_args = (\n [] if request.subcommand else [\"--pip-version\", python_setup.pip_version.value]\n )\n args = [\n *request.subcommand,\n *global_args,\n *verbosity_args,\n *pip_version_args,\n *resolve_args,\n # NB: This comes at the end because it may use `--` passthrough args, # which must come at\n # the end.\n *request.extra_args,\n ]\n\n complete_pex_env = pex_env.in_sandbox(working_directory=None)\n normalized_argv = complete_pex_env.create_argv(pex_pex.exe, *args)\n env = {\n **complete_pex_env.environment_dict(python=bootstrap_python),\n **python_native_code.subprocess_env_vars,\n **(request.extra_env or {}),\n # If a subcommand is used, we need to use the `pex3` console script.\n **({\"PEX_SCRIPT\": \"pex3\"} if request.subcommand else {}),\n }\n\n return Process(\n normalized_argv,\n description=request.description,\n input_digest=input_digest,\n env=env,\n output_files=request.output_files,\n output_directories=request.output_directories,\n append_only_caches=complete_pex_env.append_only_caches,\n immutable_input_digests=bootstrap_python.immutable_input_digests,\n level=request.level,\n concurrency_available=request.concurrency_available,\n cache_scope=request.cache_scope,\n )\n\n\ndef rules():\n return [\n *collect_rules(),\n *external_tool.rules(),\n *pex_environment.rules(),\n *adhoc_binaries.rules(),\n ]\n", "path": "src/python/pants/backend/python/util_rules/pex_cli.py"}]} | 3,230 | 161 |
gh_patches_debug_3064 | rasdani/github-patches | git_diff | NVIDIA__NVFlare-1350 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug in prostate_2D example
https://github.com/NVIDIA/NVFlare/blob/8f8f029eeecf58a85d9633357ce1ed4f8f39f655/examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py#L171
`self.transform_valid` is not defined if `cache_rate=0`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py`
Content:
```
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import json
16 import os
17
18 import torch
19 import torch.optim as optim
20 from learners.supervised_learner import SupervisedLearner
21 from monai.data import CacheDataset, DataLoader, Dataset, load_decathlon_datalist
22 from monai.inferers import SimpleInferer
23 from monai.losses import DiceLoss
24 from monai.metrics import DiceMetric
25 from monai.networks.nets.unet import UNet
26 from monai.transforms import (
27 Activations,
28 AsDiscrete,
29 AsDiscreted,
30 Compose,
31 EnsureChannelFirstd,
32 EnsureType,
33 EnsureTyped,
34 LoadImaged,
35 Resized,
36 ScaleIntensityRanged,
37 )
38 from utils.custom_client_datalist_json_path import custom_client_datalist_json_path
39
40 from nvflare.apis.fl_context import FLContext
41 from nvflare.app_common.app_constant import AppConstants
42 from nvflare.app_common.pt.pt_fedproxloss import PTFedProxLoss
43
44
45 class SupervisedMonaiProstateLearner(SupervisedLearner):
46 def __init__(
47 self,
48 train_config_filename,
49 aggregation_epochs: int = 1,
50 train_task_name: str = AppConstants.TASK_TRAIN,
51 ):
52 """MONAI Learner for prostate segmentation task.
53 It inherits from SupervisedLearner.
54
55 Args:
56 train_config_filename: path for config file, this is an addition term for config loading
57 aggregation_epochs: the number of training epochs for a round.
58 train_task_name: name of the task to train the model.
59
60 Returns:
61 a Shareable with the updated local model after running `execute()`
62 """
63 super().__init__(
64 aggregation_epochs=aggregation_epochs,
65 train_task_name=train_task_name,
66 )
67 self.train_config_filename = train_config_filename
68 self.config_info = None
69
70 def train_config(self, fl_ctx: FLContext):
71 """MONAI traning configuration
72 Here, we use a json to specify the needed parameters
73 """
74
75 # Load training configurations json
76 engine = fl_ctx.get_engine()
77 ws = engine.get_workspace()
78 app_config_dir = ws.get_app_config_dir(fl_ctx.get_job_id())
79 train_config_file_path = os.path.join(app_config_dir, self.train_config_filename)
80 if not os.path.isfile(train_config_file_path):
81 self.log_error(
82 fl_ctx,
83 f"Training configuration file does not exist at {train_config_file_path}",
84 )
85 with open(train_config_file_path) as file:
86 self.config_info = json.load(file)
87
88 # Get the config_info
89 self.lr = self.config_info["learning_rate"]
90 self.fedproxloss_mu = self.config_info["fedproxloss_mu"]
91 cache_rate = self.config_info["cache_dataset"]
92 dataset_base_dir = self.config_info["dataset_base_dir"]
93 datalist_json_path = self.config_info["datalist_json_path"]
94
95 # Get datalist json
96 datalist_json_path = custom_client_datalist_json_path(datalist_json_path, self.client_id)
97
98 # Set datalist
99 train_list = load_decathlon_datalist(
100 data_list_file_path=datalist_json_path,
101 is_segmentation=True,
102 data_list_key="training",
103 base_dir=dataset_base_dir,
104 )
105 valid_list = load_decathlon_datalist(
106 data_list_file_path=datalist_json_path,
107 is_segmentation=True,
108 data_list_key="validation",
109 base_dir=dataset_base_dir,
110 )
111 self.log_info(
112 fl_ctx,
113 f"Training Size: {len(train_list)}, Validation Size: {len(valid_list)}",
114 )
115
116 # Set the training-related context
117 self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
118 self.model = UNet(
119 spatial_dims=2,
120 in_channels=1,
121 out_channels=1,
122 channels=(16, 32, 64, 128, 256),
123 strides=(2, 2, 2, 2),
124 num_res_units=2,
125 ).to(self.device)
126 self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
127 self.criterion = DiceLoss(sigmoid=True)
128
129 if self.fedproxloss_mu > 0:
130 self.log_info(fl_ctx, f"using FedProx loss with mu {self.fedproxloss_mu}")
131 self.criterion_prox = PTFedProxLoss(mu=self.fedproxloss_mu)
132
133 self.transform = Compose(
134 [
135 LoadImaged(keys=["image", "label"]),
136 EnsureChannelFirstd(keys=["image", "label"]),
137 ScaleIntensityRanged(keys=["image", "label"], a_min=0, a_max=255, b_min=0.0, b_max=1.0),
138 Resized(
139 keys=["image", "label"],
140 spatial_size=(256, 256),
141 mode=("bilinear"),
142 align_corners=True,
143 ),
144 AsDiscreted(keys=["label"], threshold=0.5),
145 EnsureTyped(keys=["image", "label"]),
146 ]
147 )
148 self.transform_post = Compose([EnsureType(), Activations(sigmoid=True), AsDiscrete(threshold=0.5)])
149
150 # Set dataset
151 if cache_rate > 0.0:
152 self.train_dataset = CacheDataset(
153 data=train_list,
154 transform=self.transform,
155 cache_rate=cache_rate,
156 num_workers=4,
157 )
158 self.valid_dataset = CacheDataset(
159 data=valid_list,
160 transform=self.transform,
161 cache_rate=cache_rate,
162 num_workers=4,
163 )
164 else:
165 self.train_dataset = Dataset(
166 data=train_list,
167 transform=self.transform,
168 )
169 self.valid_dataset = Dataset(
170 data=valid_list,
171 transform=self.transform_valid,
172 )
173
174 self.train_loader = DataLoader(
175 self.train_dataset,
176 batch_size=1,
177 shuffle=True,
178 num_workers=2,
179 )
180 self.valid_loader = DataLoader(
181 self.valid_dataset,
182 batch_size=1,
183 shuffle=False,
184 num_workers=2,
185 )
186
187 # Set inferer and evaluation metric
188 self.inferer = SimpleInferer()
189 self.valid_metric = DiceMetric(include_background=False, reduction="mean", get_not_nans=False)
190
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py b/examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py
--- a/examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py
+++ b/examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py
@@ -168,7 +168,7 @@
)
self.valid_dataset = Dataset(
data=valid_list,
- transform=self.transform_valid,
+ transform=self.transform,
)
self.train_loader = DataLoader(
| {"golden_diff": "diff --git a/examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py b/examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py\n--- a/examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py\n+++ b/examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py\n@@ -168,7 +168,7 @@\n )\n self.valid_dataset = Dataset(\n data=valid_list,\n- transform=self.transform_valid,\n+ transform=self.transform,\n )\n \n self.train_loader = DataLoader(\n", "issue": "Bug in prostate_2D example\nhttps://github.com/NVIDIA/NVFlare/blob/8f8f029eeecf58a85d9633357ce1ed4f8f39f655/examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py#L171\r\n\r\n`self.transform_valid` is not defined if `cache_rate=0`.\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport os\n\nimport torch\nimport torch.optim as optim\nfrom learners.supervised_learner import SupervisedLearner\nfrom monai.data import CacheDataset, DataLoader, Dataset, load_decathlon_datalist\nfrom monai.inferers import SimpleInferer\nfrom monai.losses import DiceLoss\nfrom monai.metrics import DiceMetric\nfrom monai.networks.nets.unet import UNet\nfrom monai.transforms import (\n Activations,\n AsDiscrete,\n AsDiscreted,\n Compose,\n EnsureChannelFirstd,\n EnsureType,\n EnsureTyped,\n LoadImaged,\n Resized,\n ScaleIntensityRanged,\n)\nfrom utils.custom_client_datalist_json_path import custom_client_datalist_json_path\n\nfrom nvflare.apis.fl_context import FLContext\nfrom nvflare.app_common.app_constant import AppConstants\nfrom nvflare.app_common.pt.pt_fedproxloss import PTFedProxLoss\n\n\nclass SupervisedMonaiProstateLearner(SupervisedLearner):\n def __init__(\n self,\n train_config_filename,\n aggregation_epochs: int = 1,\n train_task_name: str = AppConstants.TASK_TRAIN,\n ):\n \"\"\"MONAI Learner for prostate segmentation task.\n It inherits from SupervisedLearner.\n\n Args:\n train_config_filename: path for config file, this is an addition term for config loading\n aggregation_epochs: the number of training epochs for a round.\n train_task_name: name of the task to train the model.\n\n Returns:\n a Shareable with the updated local model after running `execute()`\n \"\"\"\n super().__init__(\n aggregation_epochs=aggregation_epochs,\n train_task_name=train_task_name,\n )\n self.train_config_filename = train_config_filename\n self.config_info = None\n\n def train_config(self, fl_ctx: FLContext):\n \"\"\"MONAI traning configuration\n Here, we use a json to specify the needed parameters\n \"\"\"\n\n # Load training configurations json\n engine = fl_ctx.get_engine()\n ws = engine.get_workspace()\n app_config_dir = ws.get_app_config_dir(fl_ctx.get_job_id())\n train_config_file_path = os.path.join(app_config_dir, self.train_config_filename)\n if not os.path.isfile(train_config_file_path):\n self.log_error(\n fl_ctx,\n f\"Training configuration file does not exist at {train_config_file_path}\",\n )\n with open(train_config_file_path) as file:\n self.config_info = json.load(file)\n\n # Get the config_info\n self.lr = self.config_info[\"learning_rate\"]\n self.fedproxloss_mu = self.config_info[\"fedproxloss_mu\"]\n cache_rate = self.config_info[\"cache_dataset\"]\n dataset_base_dir = self.config_info[\"dataset_base_dir\"]\n datalist_json_path = self.config_info[\"datalist_json_path\"]\n\n # Get datalist json\n datalist_json_path = custom_client_datalist_json_path(datalist_json_path, self.client_id)\n\n # Set datalist\n train_list = load_decathlon_datalist(\n data_list_file_path=datalist_json_path,\n is_segmentation=True,\n data_list_key=\"training\",\n base_dir=dataset_base_dir,\n )\n valid_list = load_decathlon_datalist(\n data_list_file_path=datalist_json_path,\n is_segmentation=True,\n data_list_key=\"validation\",\n base_dir=dataset_base_dir,\n )\n self.log_info(\n fl_ctx,\n f\"Training Size: {len(train_list)}, Validation Size: {len(valid_list)}\",\n )\n\n # Set the training-related context\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.model = UNet(\n spatial_dims=2,\n in_channels=1,\n out_channels=1,\n channels=(16, 32, 64, 128, 256),\n strides=(2, 2, 2, 2),\n num_res_units=2,\n ).to(self.device)\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)\n self.criterion = DiceLoss(sigmoid=True)\n\n if self.fedproxloss_mu > 0:\n self.log_info(fl_ctx, f\"using FedProx loss with mu {self.fedproxloss_mu}\")\n self.criterion_prox = PTFedProxLoss(mu=self.fedproxloss_mu)\n\n self.transform = Compose(\n [\n LoadImaged(keys=[\"image\", \"label\"]),\n EnsureChannelFirstd(keys=[\"image\", \"label\"]),\n ScaleIntensityRanged(keys=[\"image\", \"label\"], a_min=0, a_max=255, b_min=0.0, b_max=1.0),\n Resized(\n keys=[\"image\", \"label\"],\n spatial_size=(256, 256),\n mode=(\"bilinear\"),\n align_corners=True,\n ),\n AsDiscreted(keys=[\"label\"], threshold=0.5),\n EnsureTyped(keys=[\"image\", \"label\"]),\n ]\n )\n self.transform_post = Compose([EnsureType(), Activations(sigmoid=True), AsDiscrete(threshold=0.5)])\n\n # Set dataset\n if cache_rate > 0.0:\n self.train_dataset = CacheDataset(\n data=train_list,\n transform=self.transform,\n cache_rate=cache_rate,\n num_workers=4,\n )\n self.valid_dataset = CacheDataset(\n data=valid_list,\n transform=self.transform,\n cache_rate=cache_rate,\n num_workers=4,\n )\n else:\n self.train_dataset = Dataset(\n data=train_list,\n transform=self.transform,\n )\n self.valid_dataset = Dataset(\n data=valid_list,\n transform=self.transform_valid,\n )\n\n self.train_loader = DataLoader(\n self.train_dataset,\n batch_size=1,\n shuffle=True,\n num_workers=2,\n )\n self.valid_loader = DataLoader(\n self.valid_dataset,\n batch_size=1,\n shuffle=False,\n num_workers=2,\n )\n\n # Set inferer and evaluation metric\n self.inferer = SimpleInferer()\n self.valid_metric = DiceMetric(include_background=False, reduction=\"mean\", get_not_nans=False)\n", "path": "examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py"}], "after_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport os\n\nimport torch\nimport torch.optim as optim\nfrom learners.supervised_learner import SupervisedLearner\nfrom monai.data import CacheDataset, DataLoader, Dataset, load_decathlon_datalist\nfrom monai.inferers import SimpleInferer\nfrom monai.losses import DiceLoss\nfrom monai.metrics import DiceMetric\nfrom monai.networks.nets.unet import UNet\nfrom monai.transforms import (\n Activations,\n AsDiscrete,\n AsDiscreted,\n Compose,\n EnsureChannelFirstd,\n EnsureType,\n EnsureTyped,\n LoadImaged,\n Resized,\n ScaleIntensityRanged,\n)\nfrom utils.custom_client_datalist_json_path import custom_client_datalist_json_path\n\nfrom nvflare.apis.fl_context import FLContext\nfrom nvflare.app_common.app_constant import AppConstants\nfrom nvflare.app_common.pt.pt_fedproxloss import PTFedProxLoss\n\n\nclass SupervisedMonaiProstateLearner(SupervisedLearner):\n def __init__(\n self,\n train_config_filename,\n aggregation_epochs: int = 1,\n train_task_name: str = AppConstants.TASK_TRAIN,\n ):\n \"\"\"MONAI Learner for prostate segmentation task.\n It inherits from SupervisedLearner.\n\n Args:\n train_config_filename: path for config file, this is an addition term for config loading\n aggregation_epochs: the number of training epochs for a round.\n train_task_name: name of the task to train the model.\n\n Returns:\n a Shareable with the updated local model after running `execute()`\n \"\"\"\n super().__init__(\n aggregation_epochs=aggregation_epochs,\n train_task_name=train_task_name,\n )\n self.train_config_filename = train_config_filename\n self.config_info = None\n\n def train_config(self, fl_ctx: FLContext):\n \"\"\"MONAI traning configuration\n Here, we use a json to specify the needed parameters\n \"\"\"\n\n # Load training configurations json\n engine = fl_ctx.get_engine()\n ws = engine.get_workspace()\n app_config_dir = ws.get_app_config_dir(fl_ctx.get_job_id())\n train_config_file_path = os.path.join(app_config_dir, self.train_config_filename)\n if not os.path.isfile(train_config_file_path):\n self.log_error(\n fl_ctx,\n f\"Training configuration file does not exist at {train_config_file_path}\",\n )\n with open(train_config_file_path) as file:\n self.config_info = json.load(file)\n\n # Get the config_info\n self.lr = self.config_info[\"learning_rate\"]\n self.fedproxloss_mu = self.config_info[\"fedproxloss_mu\"]\n cache_rate = self.config_info[\"cache_dataset\"]\n dataset_base_dir = self.config_info[\"dataset_base_dir\"]\n datalist_json_path = self.config_info[\"datalist_json_path\"]\n\n # Get datalist json\n datalist_json_path = custom_client_datalist_json_path(datalist_json_path, self.client_id)\n\n # Set datalist\n train_list = load_decathlon_datalist(\n data_list_file_path=datalist_json_path,\n is_segmentation=True,\n data_list_key=\"training\",\n base_dir=dataset_base_dir,\n )\n valid_list = load_decathlon_datalist(\n data_list_file_path=datalist_json_path,\n is_segmentation=True,\n data_list_key=\"validation\",\n base_dir=dataset_base_dir,\n )\n self.log_info(\n fl_ctx,\n f\"Training Size: {len(train_list)}, Validation Size: {len(valid_list)}\",\n )\n\n # Set the training-related context\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.model = UNet(\n spatial_dims=2,\n in_channels=1,\n out_channels=1,\n channels=(16, 32, 64, 128, 256),\n strides=(2, 2, 2, 2),\n num_res_units=2,\n ).to(self.device)\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)\n self.criterion = DiceLoss(sigmoid=True)\n\n if self.fedproxloss_mu > 0:\n self.log_info(fl_ctx, f\"using FedProx loss with mu {self.fedproxloss_mu}\")\n self.criterion_prox = PTFedProxLoss(mu=self.fedproxloss_mu)\n\n self.transform = Compose(\n [\n LoadImaged(keys=[\"image\", \"label\"]),\n EnsureChannelFirstd(keys=[\"image\", \"label\"]),\n ScaleIntensityRanged(keys=[\"image\", \"label\"], a_min=0, a_max=255, b_min=0.0, b_max=1.0),\n Resized(\n keys=[\"image\", \"label\"],\n spatial_size=(256, 256),\n mode=(\"bilinear\"),\n align_corners=True,\n ),\n AsDiscreted(keys=[\"label\"], threshold=0.5),\n EnsureTyped(keys=[\"image\", \"label\"]),\n ]\n )\n self.transform_post = Compose([EnsureType(), Activations(sigmoid=True), AsDiscrete(threshold=0.5)])\n\n # Set dataset\n if cache_rate > 0.0:\n self.train_dataset = CacheDataset(\n data=train_list,\n transform=self.transform,\n cache_rate=cache_rate,\n num_workers=4,\n )\n self.valid_dataset = CacheDataset(\n data=valid_list,\n transform=self.transform,\n cache_rate=cache_rate,\n num_workers=4,\n )\n else:\n self.train_dataset = Dataset(\n data=train_list,\n transform=self.transform,\n )\n self.valid_dataset = Dataset(\n data=valid_list,\n transform=self.transform,\n )\n\n self.train_loader = DataLoader(\n self.train_dataset,\n batch_size=1,\n shuffle=True,\n num_workers=2,\n )\n self.valid_loader = DataLoader(\n self.valid_dataset,\n batch_size=1,\n shuffle=False,\n num_workers=2,\n )\n\n # Set inferer and evaluation metric\n self.inferer = SimpleInferer()\n self.valid_metric = DiceMetric(include_background=False, reduction=\"mean\", get_not_nans=False)\n", "path": "examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py"}]} | 2,347 | 160 |
gh_patches_debug_1192 | rasdani/github-patches | git_diff | getredash__redash-4189 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
JIRA setup: change password field name to "API Token"
While a password can be used there, it's not recommended and eventually will be deprecated.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/query_runner/jql.py`
Content:
```
1 import re
2 from collections import OrderedDict
3
4 from redash.query_runner import *
5 from redash.utils import json_dumps, json_loads
6
7
8 # TODO: make this more general and move into __init__.py
9 class ResultSet(object):
10 def __init__(self):
11 self.columns = OrderedDict()
12 self.rows = []
13
14 def add_row(self, row):
15 for key in row.keys():
16 self.add_column(key)
17
18 self.rows.append(row)
19
20 def add_column(self, column, column_type=TYPE_STRING):
21 if column not in self.columns:
22 self.columns[column] = {'name': column, 'type': column_type, 'friendly_name': column}
23
24 def to_json(self):
25 return json_dumps({'rows': self.rows, 'columns': self.columns.values()})
26
27 def merge(self, set):
28 self.rows = self.rows + set.rows
29
30
31 def parse_issue(issue, field_mapping):
32 result = OrderedDict()
33 result['key'] = issue['key']
34
35 for k, v in issue['fields'].iteritems():#
36 output_name = field_mapping.get_output_field_name(k)
37 member_names = field_mapping.get_dict_members(k)
38
39 if isinstance(v, dict):
40 if len(member_names) > 0:
41 # if field mapping with dict member mappings defined get value of each member
42 for member_name in member_names:
43 if member_name in v:
44 result[field_mapping.get_dict_output_field_name(k, member_name)] = v[member_name]
45
46 else:
47 # these special mapping rules are kept for backwards compatibility
48 if 'key' in v:
49 result['{}_key'.format(output_name)] = v['key']
50 if 'name' in v:
51 result['{}_name'.format(output_name)] = v['name']
52
53 if k in v:
54 result[output_name] = v[k]
55
56 if 'watchCount' in v:
57 result[output_name] = v['watchCount']
58
59 elif isinstance(v, list):
60 if len(member_names) > 0:
61 # if field mapping with dict member mappings defined get value of each member
62 for member_name in member_names:
63 listValues = []
64 for listItem in v:
65 if isinstance(listItem, dict):
66 if member_name in listItem:
67 listValues.append(listItem[member_name])
68 if len(listValues) > 0:
69 result[field_mapping.get_dict_output_field_name(k, member_name)] = ','.join(listValues)
70
71 else:
72 # otherwise support list values only for non-dict items
73 listValues = []
74 for listItem in v:
75 if not isinstance(listItem, dict):
76 listValues.append(listItem)
77 if len(listValues) > 0:
78 result[output_name] = ','.join(listValues)
79
80 else:
81 result[output_name] = v
82
83 return result
84
85
86 def parse_issues(data, field_mapping):
87 results = ResultSet()
88
89 for issue in data['issues']:
90 results.add_row(parse_issue(issue, field_mapping))
91
92 return results
93
94
95 def parse_count(data):
96 results = ResultSet()
97 results.add_row({'count': data['total']})
98 return results
99
100
101 class FieldMapping:
102
103 def __init__(cls, query_field_mapping):
104 cls.mapping = []
105 for k, v in query_field_mapping.iteritems():
106 field_name = k
107 member_name = None
108
109 # check for member name contained in field name
110 member_parser = re.search('(\w+)\.(\w+)', k)
111 if (member_parser):
112 field_name = member_parser.group(1)
113 member_name = member_parser.group(2)
114
115 cls.mapping.append({
116 'field_name': field_name,
117 'member_name': member_name,
118 'output_field_name': v
119 })
120
121 def get_output_field_name(cls, field_name):
122 for item in cls.mapping:
123 if item['field_name'] == field_name and not item['member_name']:
124 return item['output_field_name']
125 return field_name
126
127 def get_dict_members(cls, field_name):
128 member_names = []
129 for item in cls.mapping:
130 if item['field_name'] == field_name and item['member_name']:
131 member_names.append(item['member_name'])
132 return member_names
133
134 def get_dict_output_field_name(cls, field_name, member_name):
135 for item in cls.mapping:
136 if item['field_name'] == field_name and item['member_name'] == member_name:
137 return item['output_field_name']
138 return None
139
140
141 class JiraJQL(BaseHTTPQueryRunner):
142 noop_query = '{"queryType": "count"}'
143 response_error = "JIRA returned unexpected status code"
144 requires_authentication = True
145 url_title = 'JIRA URL'
146 username_title = 'Username'
147 password_title = 'Password'
148
149 @classmethod
150 def name(cls):
151 return "JIRA (JQL)"
152
153 def __init__(self, configuration):
154 super(JiraJQL, self).__init__(configuration)
155 self.syntax = 'json'
156
157 def run_query(self, query, user):
158 jql_url = '{}/rest/api/2/search'.format(self.configuration["url"])
159
160 try:
161 query = json_loads(query)
162 query_type = query.pop('queryType', 'select')
163 field_mapping = FieldMapping(query.pop('fieldMapping', {}))
164
165 if query_type == 'count':
166 query['maxResults'] = 1
167 query['fields'] = ''
168 else:
169 query['maxResults'] = query.get('maxResults', 1000)
170
171 response, error = self.get_response(jql_url, params=query)
172 if error is not None:
173 return None, error
174
175 data = response.json()
176
177 if query_type == 'count':
178 results = parse_count(data)
179 else:
180 results = parse_issues(data, field_mapping)
181 index = data['startAt'] + data['maxResults']
182
183 while data['total'] > index:
184 query['startAt'] = index
185 response, error = self.get_response(jql_url, params=query)
186 if error is not None:
187 return None, error
188
189 data = response.json()
190 index = data['startAt'] + data['maxResults']
191
192 addl_results = parse_issues(data, field_mapping)
193 results.merge(addl_results)
194
195 return results.to_json(), None
196 except KeyboardInterrupt:
197 return None, "Query cancelled by user."
198
199
200 register(JiraJQL)
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/query_runner/jql.py b/redash/query_runner/jql.py
--- a/redash/query_runner/jql.py
+++ b/redash/query_runner/jql.py
@@ -144,7 +144,7 @@
requires_authentication = True
url_title = 'JIRA URL'
username_title = 'Username'
- password_title = 'Password'
+ password_title = 'API Token'
@classmethod
def name(cls):
| {"golden_diff": "diff --git a/redash/query_runner/jql.py b/redash/query_runner/jql.py\n--- a/redash/query_runner/jql.py\n+++ b/redash/query_runner/jql.py\n@@ -144,7 +144,7 @@\n requires_authentication = True\n url_title = 'JIRA URL'\n username_title = 'Username'\n- password_title = 'Password'\n+ password_title = 'API Token'\n \n @classmethod\n def name(cls):\n", "issue": "JIRA setup: change password field name to \"API Token\"\nWhile a password can be used there, it's not recommended and eventually will be deprecated. \n", "before_files": [{"content": "import re\nfrom collections import OrderedDict\n\nfrom redash.query_runner import *\nfrom redash.utils import json_dumps, json_loads\n\n\n# TODO: make this more general and move into __init__.py\nclass ResultSet(object):\n def __init__(self):\n self.columns = OrderedDict()\n self.rows = []\n\n def add_row(self, row):\n for key in row.keys():\n self.add_column(key)\n\n self.rows.append(row)\n\n def add_column(self, column, column_type=TYPE_STRING):\n if column not in self.columns:\n self.columns[column] = {'name': column, 'type': column_type, 'friendly_name': column}\n\n def to_json(self):\n return json_dumps({'rows': self.rows, 'columns': self.columns.values()})\n\n def merge(self, set):\n self.rows = self.rows + set.rows\n\n\ndef parse_issue(issue, field_mapping):\n result = OrderedDict()\n result['key'] = issue['key']\n\n for k, v in issue['fields'].iteritems():#\n output_name = field_mapping.get_output_field_name(k)\n member_names = field_mapping.get_dict_members(k)\n\n if isinstance(v, dict):\n if len(member_names) > 0:\n # if field mapping with dict member mappings defined get value of each member\n for member_name in member_names:\n if member_name in v:\n result[field_mapping.get_dict_output_field_name(k, member_name)] = v[member_name]\n\n else:\n # these special mapping rules are kept for backwards compatibility\n if 'key' in v:\n result['{}_key'.format(output_name)] = v['key']\n if 'name' in v:\n result['{}_name'.format(output_name)] = v['name']\n\n if k in v:\n result[output_name] = v[k]\n\n if 'watchCount' in v:\n result[output_name] = v['watchCount']\n\n elif isinstance(v, list):\n if len(member_names) > 0:\n # if field mapping with dict member mappings defined get value of each member\n for member_name in member_names:\n listValues = []\n for listItem in v:\n if isinstance(listItem, dict):\n if member_name in listItem:\n listValues.append(listItem[member_name])\n if len(listValues) > 0:\n result[field_mapping.get_dict_output_field_name(k, member_name)] = ','.join(listValues)\n\n else:\n # otherwise support list values only for non-dict items\n listValues = []\n for listItem in v:\n if not isinstance(listItem, dict):\n listValues.append(listItem)\n if len(listValues) > 0:\n result[output_name] = ','.join(listValues)\n\n else:\n result[output_name] = v\n\n return result\n\n\ndef parse_issues(data, field_mapping):\n results = ResultSet()\n\n for issue in data['issues']:\n results.add_row(parse_issue(issue, field_mapping))\n\n return results\n\n\ndef parse_count(data):\n results = ResultSet()\n results.add_row({'count': data['total']})\n return results\n\n\nclass FieldMapping:\n\n def __init__(cls, query_field_mapping):\n cls.mapping = []\n for k, v in query_field_mapping.iteritems():\n field_name = k\n member_name = None\n\n # check for member name contained in field name\n member_parser = re.search('(\\w+)\\.(\\w+)', k)\n if (member_parser):\n field_name = member_parser.group(1)\n member_name = member_parser.group(2)\n\n cls.mapping.append({\n 'field_name': field_name,\n 'member_name': member_name,\n 'output_field_name': v\n })\n\n def get_output_field_name(cls, field_name):\n for item in cls.mapping:\n if item['field_name'] == field_name and not item['member_name']:\n return item['output_field_name']\n return field_name\n\n def get_dict_members(cls, field_name):\n member_names = []\n for item in cls.mapping:\n if item['field_name'] == field_name and item['member_name']:\n member_names.append(item['member_name'])\n return member_names\n\n def get_dict_output_field_name(cls, field_name, member_name):\n for item in cls.mapping:\n if item['field_name'] == field_name and item['member_name'] == member_name:\n return item['output_field_name']\n return None\n\n\nclass JiraJQL(BaseHTTPQueryRunner):\n noop_query = '{\"queryType\": \"count\"}'\n response_error = \"JIRA returned unexpected status code\"\n requires_authentication = True\n url_title = 'JIRA URL'\n username_title = 'Username'\n password_title = 'Password'\n\n @classmethod\n def name(cls):\n return \"JIRA (JQL)\"\n\n def __init__(self, configuration):\n super(JiraJQL, self).__init__(configuration)\n self.syntax = 'json'\n\n def run_query(self, query, user):\n jql_url = '{}/rest/api/2/search'.format(self.configuration[\"url\"])\n\n try:\n query = json_loads(query)\n query_type = query.pop('queryType', 'select')\n field_mapping = FieldMapping(query.pop('fieldMapping', {}))\n\n if query_type == 'count':\n query['maxResults'] = 1\n query['fields'] = ''\n else:\n query['maxResults'] = query.get('maxResults', 1000)\n\n response, error = self.get_response(jql_url, params=query)\n if error is not None:\n return None, error\n\n data = response.json()\n\n if query_type == 'count':\n results = parse_count(data)\n else:\n results = parse_issues(data, field_mapping)\n index = data['startAt'] + data['maxResults']\n\n while data['total'] > index:\n query['startAt'] = index\n response, error = self.get_response(jql_url, params=query)\n if error is not None:\n return None, error\n\n data = response.json()\n index = data['startAt'] + data['maxResults']\n\n addl_results = parse_issues(data, field_mapping)\n results.merge(addl_results)\n\n return results.to_json(), None\n except KeyboardInterrupt:\n return None, \"Query cancelled by user.\"\n\n\nregister(JiraJQL)\n", "path": "redash/query_runner/jql.py"}], "after_files": [{"content": "import re\nfrom collections import OrderedDict\n\nfrom redash.query_runner import *\nfrom redash.utils import json_dumps, json_loads\n\n\n# TODO: make this more general and move into __init__.py\nclass ResultSet(object):\n def __init__(self):\n self.columns = OrderedDict()\n self.rows = []\n\n def add_row(self, row):\n for key in row.keys():\n self.add_column(key)\n\n self.rows.append(row)\n\n def add_column(self, column, column_type=TYPE_STRING):\n if column not in self.columns:\n self.columns[column] = {'name': column, 'type': column_type, 'friendly_name': column}\n\n def to_json(self):\n return json_dumps({'rows': self.rows, 'columns': self.columns.values()})\n\n def merge(self, set):\n self.rows = self.rows + set.rows\n\n\ndef parse_issue(issue, field_mapping):\n result = OrderedDict()\n result['key'] = issue['key']\n\n for k, v in issue['fields'].iteritems():#\n output_name = field_mapping.get_output_field_name(k)\n member_names = field_mapping.get_dict_members(k)\n\n if isinstance(v, dict):\n if len(member_names) > 0:\n # if field mapping with dict member mappings defined get value of each member\n for member_name in member_names:\n if member_name in v:\n result[field_mapping.get_dict_output_field_name(k, member_name)] = v[member_name]\n\n else:\n # these special mapping rules are kept for backwards compatibility\n if 'key' in v:\n result['{}_key'.format(output_name)] = v['key']\n if 'name' in v:\n result['{}_name'.format(output_name)] = v['name']\n\n if k in v:\n result[output_name] = v[k]\n\n if 'watchCount' in v:\n result[output_name] = v['watchCount']\n\n elif isinstance(v, list):\n if len(member_names) > 0:\n # if field mapping with dict member mappings defined get value of each member\n for member_name in member_names:\n listValues = []\n for listItem in v:\n if isinstance(listItem, dict):\n if member_name in listItem:\n listValues.append(listItem[member_name])\n if len(listValues) > 0:\n result[field_mapping.get_dict_output_field_name(k, member_name)] = ','.join(listValues)\n\n else:\n # otherwise support list values only for non-dict items\n listValues = []\n for listItem in v:\n if not isinstance(listItem, dict):\n listValues.append(listItem)\n if len(listValues) > 0:\n result[output_name] = ','.join(listValues)\n\n else:\n result[output_name] = v\n\n return result\n\n\ndef parse_issues(data, field_mapping):\n results = ResultSet()\n\n for issue in data['issues']:\n results.add_row(parse_issue(issue, field_mapping))\n\n return results\n\n\ndef parse_count(data):\n results = ResultSet()\n results.add_row({'count': data['total']})\n return results\n\n\nclass FieldMapping:\n\n def __init__(cls, query_field_mapping):\n cls.mapping = []\n for k, v in query_field_mapping.iteritems():\n field_name = k\n member_name = None\n\n # check for member name contained in field name\n member_parser = re.search('(\\w+)\\.(\\w+)', k)\n if (member_parser):\n field_name = member_parser.group(1)\n member_name = member_parser.group(2)\n\n cls.mapping.append({\n 'field_name': field_name,\n 'member_name': member_name,\n 'output_field_name': v\n })\n\n def get_output_field_name(cls, field_name):\n for item in cls.mapping:\n if item['field_name'] == field_name and not item['member_name']:\n return item['output_field_name']\n return field_name\n\n def get_dict_members(cls, field_name):\n member_names = []\n for item in cls.mapping:\n if item['field_name'] == field_name and item['member_name']:\n member_names.append(item['member_name'])\n return member_names\n\n def get_dict_output_field_name(cls, field_name, member_name):\n for item in cls.mapping:\n if item['field_name'] == field_name and item['member_name'] == member_name:\n return item['output_field_name']\n return None\n\n\nclass JiraJQL(BaseHTTPQueryRunner):\n noop_query = '{\"queryType\": \"count\"}'\n response_error = \"JIRA returned unexpected status code\"\n requires_authentication = True\n url_title = 'JIRA URL'\n username_title = 'Username'\n password_title = 'API Token'\n\n @classmethod\n def name(cls):\n return \"JIRA (JQL)\"\n\n def __init__(self, configuration):\n super(JiraJQL, self).__init__(configuration)\n self.syntax = 'json'\n\n def run_query(self, query, user):\n jql_url = '{}/rest/api/2/search'.format(self.configuration[\"url\"])\n\n try:\n query = json_loads(query)\n query_type = query.pop('queryType', 'select')\n field_mapping = FieldMapping(query.pop('fieldMapping', {}))\n\n if query_type == 'count':\n query['maxResults'] = 1\n query['fields'] = ''\n else:\n query['maxResults'] = query.get('maxResults', 1000)\n\n response, error = self.get_response(jql_url, params=query)\n if error is not None:\n return None, error\n\n data = response.json()\n\n if query_type == 'count':\n results = parse_count(data)\n else:\n results = parse_issues(data, field_mapping)\n index = data['startAt'] + data['maxResults']\n\n while data['total'] > index:\n query['startAt'] = index\n response, error = self.get_response(jql_url, params=query)\n if error is not None:\n return None, error\n\n data = response.json()\n index = data['startAt'] + data['maxResults']\n\n addl_results = parse_issues(data, field_mapping)\n results.merge(addl_results)\n\n return results.to_json(), None\n except KeyboardInterrupt:\n return None, \"Query cancelled by user.\"\n\n\nregister(JiraJQL)\n", "path": "redash/query_runner/jql.py"}]} | 2,186 | 103 |
gh_patches_debug_10217 | rasdani/github-patches | git_diff | sbi-dev__sbi-1155 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Density Estimator batched sample mixes up samples from different posteriors
**Describe the bug**
Given a batched observation, i.e., x1 and x2, the sampling method mixes up samples from different distributions.
**To Reproduce**
```python
import torch
from sbi import analysis as analysis
from sbi import utils as utils
from sbi.inference.base import infer
num_dim = 3
prior = utils.BoxUniform(low=-2 * torch.ones(num_dim), high=2 * torch.ones(num_dim))
def simulator(parameter_set):
return 1.0 + parameter_set + torch.randn(parameter_set.shape) * 0.1
posterior = infer(simulator, prior, method="SNPE", num_simulations=200)
observation = torch.stack([torch.zeros(3), torch.ones(3)])
posterior_samples = posterior.posterior_estimator.sample((1000,), condition=observation)
# Outputs an multimodal distribution, but should be unimodal (mixes up samples from the two different x_os)
samples1 = posterior_samples[:,0].detach()
_ = analysis.pairplot([samples1], limits=[[-2, 2], [-2, 2], [-2, 2]], figsize=(6, 6))
```
**Additional context**
Likely a "reshaping" bug.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sbi/neural_nets/density_estimators/nflows_flow.py`
Content:
```
1 # This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed
2 # under the Apache License Version 2.0, see <https://www.apache.org/licenses/>
3
4 from typing import Tuple
5
6 import torch
7 from pyknos.nflows.flows import Flow
8 from torch import Tensor, nn
9
10 from sbi.neural_nets.density_estimators.base import DensityEstimator
11 from sbi.sbi_types import Shape
12
13
14 class NFlowsFlow(DensityEstimator):
15 r"""`nflows`- based normalizing flow density estimator.
16
17 Flow type objects already have a .log_prob() and .sample() method, so here we just
18 wrap them and add the .loss() method.
19 """
20
21 def __init__(
22 self, net: Flow, input_shape: torch.Size, condition_shape: torch.Size
23 ) -> None:
24 """Initialize density estimator which wraps flows from the `nflows` library.
25
26 Args:
27 net: The raw `nflows` flow.
28 input_shape: Event shape of the input at which the density is being
29 evaluated (and which is also the event_shape of samples).
30 condition_shape: Shape of the condition. If not provided, it will assume a
31 1D input.
32 """
33 super().__init__(net, input_shape=input_shape, condition_shape=condition_shape)
34 # TODO: Remove as soon as DensityEstimator becomes abstract
35 self.net: Flow
36
37 @property
38 def embedding_net(self) -> nn.Module:
39 r"""Return the embedding network."""
40 return self.net._embedding_net
41
42 def inverse_transform(self, input: Tensor, condition: Tensor) -> Tensor:
43 r"""Return the inverse flow-transform of the inputs given a condition.
44
45 The inverse transform is the transformation that maps the inputs back to the
46 base distribution (noise) space.
47
48 Args:
49 input: Inputs to evaluate the inverse transform on of shape
50 (*batch_shape1, input_size).
51 condition: Conditions of shape (*batch_shape2, *condition_shape).
52
53 Raises:
54 RuntimeError: If batch_shape1 and batch_shape2 are not broadcastable.
55
56 Returns:
57 noise: Transformed inputs.
58 """
59 self._check_condition_shape(condition)
60 condition_dims = len(self.condition_shape)
61
62 # PyTorch's automatic broadcasting
63 batch_shape_in = input.shape[:-1]
64 batch_shape_cond = condition.shape[:-condition_dims]
65 batch_shape = torch.broadcast_shapes(batch_shape_in, batch_shape_cond)
66 # Expand the input and condition to the same batch shape
67 input = input.expand(batch_shape + (input.shape[-1],))
68 condition = condition.expand(batch_shape + self.condition_shape)
69 # Flatten required by nflows, but now both have the same batch shape
70 input = input.reshape(-1, input.shape[-1])
71 condition = condition.reshape(-1, *self.condition_shape)
72
73 noise, _ = self.net._transorm(input, context=condition)
74 noise = noise.reshape(batch_shape)
75 return noise
76
77 def log_prob(self, input: Tensor, condition: Tensor) -> Tensor:
78 r"""Return the log probabilities of the inputs given a condition or multiple
79 i.e. batched conditions.
80
81 Args:
82 input: Inputs to evaluate the log probability on. Of shape
83 `(sample_dim, batch_dim, *event_shape)`.
84 condition: Conditions of shape `(sample_dim, batch_dim, *event_shape)`.
85
86 Raises:
87 AssertionError: If `input_batch_dim != condition_batch_dim`.
88
89 Returns:
90 Sample-wise log probabilities, shape `(input_sample_dim, input_batch_dim)`.
91 """
92 input_sample_dim = input.shape[0]
93 input_batch_dim = input.shape[1]
94 condition_batch_dim = condition.shape[0]
95 condition_event_dims = len(condition.shape[1:])
96
97 assert condition_batch_dim == input_batch_dim, (
98 f"Batch shape of condition {condition_batch_dim} and input "
99 f"{input_batch_dim} do not match."
100 )
101
102 # Nflows needs to have a single batch dimension for condition and input.
103 input = input.reshape((input_batch_dim * input_sample_dim, -1))
104
105 # Repeat the condition to match `input_batch_dim * input_sample_dim`.
106 ones_for_event_dims = (1,) * condition_event_dims # Tuple of 1s, e.g. (1, 1, 1)
107 condition = condition.repeat(input_sample_dim, *ones_for_event_dims)
108
109 log_probs = self.net.log_prob(input, context=condition)
110 return log_probs.reshape((input_sample_dim, input_batch_dim))
111
112 def loss(self, input: Tensor, condition: Tensor) -> Tensor:
113 r"""Return the negative log-probability for training the density estimator.
114
115 Args:
116 input: Inputs of shape `(batch_dim, *input_event_shape)`.
117 condition: Conditions of shape `(batch_dim, *condition_event_shape)`.
118
119 Returns:
120 Negative log-probability of shape `(batch_dim,)`.
121 """
122 return -self.log_prob(input.unsqueeze(0), condition)[0]
123
124 def sample(self, sample_shape: Shape, condition: Tensor) -> Tensor:
125 r"""Return samples from the density estimator.
126
127 Args:
128 sample_shape: Shape of the samples to return.
129 condition: Conditions of shape `(sample_dim, batch_dim, *event_shape)`.
130
131 Returns:
132 Samples of shape `(*sample_shape, condition_batch_dim)`.
133 """
134 condition_batch_dim = condition.shape[0]
135 num_samples = torch.Size(sample_shape).numel()
136
137 samples = self.net.sample(num_samples, context=condition)
138
139 return samples.reshape((
140 *sample_shape,
141 condition_batch_dim,
142 -1,
143 ))
144
145 def sample_and_log_prob(
146 self, sample_shape: torch.Size, condition: Tensor, **kwargs
147 ) -> Tuple[Tensor, Tensor]:
148 r"""Return samples and their density from the density estimator.
149
150 Args:
151 sample_shape: Shape of the samples to return.
152 condition: Conditions of shape (sample_dim, batch_dim, *event_shape).
153
154 Returns:
155 Samples of shape `(*sample_shape, condition_batch_dim, *input_event_shape)`
156 and associated log probs of shape `(*sample_shape, condition_batch_dim)`.
157 """
158 condition_batch_dim = condition.shape[0]
159 num_samples = torch.Size(sample_shape).numel()
160
161 samples, log_probs = self.net.sample_and_log_prob(
162 num_samples, context=condition
163 )
164 samples = samples.reshape((*sample_shape, condition_batch_dim, -1))
165 log_probs = log_probs.reshape((*sample_shape, -1))
166 return samples, log_probs
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sbi/neural_nets/density_estimators/nflows_flow.py b/sbi/neural_nets/density_estimators/nflows_flow.py
--- a/sbi/neural_nets/density_estimators/nflows_flow.py
+++ b/sbi/neural_nets/density_estimators/nflows_flow.py
@@ -135,12 +135,8 @@
num_samples = torch.Size(sample_shape).numel()
samples = self.net.sample(num_samples, context=condition)
-
- return samples.reshape((
- *sample_shape,
- condition_batch_dim,
- -1,
- ))
+ samples = samples.transpose(0, 1)
+ return samples.reshape((*sample_shape, condition_batch_dim, *self.input_shape))
def sample_and_log_prob(
self, sample_shape: torch.Size, condition: Tensor, **kwargs
| {"golden_diff": "diff --git a/sbi/neural_nets/density_estimators/nflows_flow.py b/sbi/neural_nets/density_estimators/nflows_flow.py\n--- a/sbi/neural_nets/density_estimators/nflows_flow.py\n+++ b/sbi/neural_nets/density_estimators/nflows_flow.py\n@@ -135,12 +135,8 @@\n num_samples = torch.Size(sample_shape).numel()\n \n samples = self.net.sample(num_samples, context=condition)\n-\n- return samples.reshape((\n- *sample_shape,\n- condition_batch_dim,\n- -1,\n- ))\n+ samples = samples.transpose(0, 1)\n+ return samples.reshape((*sample_shape, condition_batch_dim, *self.input_shape))\n \n def sample_and_log_prob(\n self, sample_shape: torch.Size, condition: Tensor, **kwargs\n", "issue": "Density Estimator batched sample mixes up samples from different posteriors\n**Describe the bug**\r\nGiven a batched observation, i.e., x1 and x2, the sampling method mixes up samples from different distributions.\r\n\r\n**To Reproduce**\r\n```python\r\nimport torch\r\n\r\nfrom sbi import analysis as analysis\r\nfrom sbi import utils as utils\r\nfrom sbi.inference.base import infer\r\n\r\nnum_dim = 3\r\nprior = utils.BoxUniform(low=-2 * torch.ones(num_dim), high=2 * torch.ones(num_dim))\r\n\r\ndef simulator(parameter_set):\r\n return 1.0 + parameter_set + torch.randn(parameter_set.shape) * 0.1\r\n\r\nposterior = infer(simulator, prior, method=\"SNPE\", num_simulations=200)\r\nobservation = torch.stack([torch.zeros(3), torch.ones(3)])\r\nposterior_samples = posterior.posterior_estimator.sample((1000,), condition=observation)\r\n\r\n# Outputs an multimodal distribution, but should be unimodal (mixes up samples from the two different x_os)\r\nsamples1 = posterior_samples[:,0].detach()\r\n_ = analysis.pairplot([samples1], limits=[[-2, 2], [-2, 2], [-2, 2]], figsize=(6, 6))\r\n```\r\n\r\n**Additional context**\r\n\r\nLikely a \"reshaping\" bug. \r\n\n", "before_files": [{"content": "# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed\n# under the Apache License Version 2.0, see <https://www.apache.org/licenses/>\n\nfrom typing import Tuple\n\nimport torch\nfrom pyknos.nflows.flows import Flow\nfrom torch import Tensor, nn\n\nfrom sbi.neural_nets.density_estimators.base import DensityEstimator\nfrom sbi.sbi_types import Shape\n\n\nclass NFlowsFlow(DensityEstimator):\n r\"\"\"`nflows`- based normalizing flow density estimator.\n\n Flow type objects already have a .log_prob() and .sample() method, so here we just\n wrap them and add the .loss() method.\n \"\"\"\n\n def __init__(\n self, net: Flow, input_shape: torch.Size, condition_shape: torch.Size\n ) -> None:\n \"\"\"Initialize density estimator which wraps flows from the `nflows` library.\n\n Args:\n net: The raw `nflows` flow.\n input_shape: Event shape of the input at which the density is being\n evaluated (and which is also the event_shape of samples).\n condition_shape: Shape of the condition. If not provided, it will assume a\n 1D input.\n \"\"\"\n super().__init__(net, input_shape=input_shape, condition_shape=condition_shape)\n # TODO: Remove as soon as DensityEstimator becomes abstract\n self.net: Flow\n\n @property\n def embedding_net(self) -> nn.Module:\n r\"\"\"Return the embedding network.\"\"\"\n return self.net._embedding_net\n\n def inverse_transform(self, input: Tensor, condition: Tensor) -> Tensor:\n r\"\"\"Return the inverse flow-transform of the inputs given a condition.\n\n The inverse transform is the transformation that maps the inputs back to the\n base distribution (noise) space.\n\n Args:\n input: Inputs to evaluate the inverse transform on of shape\n (*batch_shape1, input_size).\n condition: Conditions of shape (*batch_shape2, *condition_shape).\n\n Raises:\n RuntimeError: If batch_shape1 and batch_shape2 are not broadcastable.\n\n Returns:\n noise: Transformed inputs.\n \"\"\"\n self._check_condition_shape(condition)\n condition_dims = len(self.condition_shape)\n\n # PyTorch's automatic broadcasting\n batch_shape_in = input.shape[:-1]\n batch_shape_cond = condition.shape[:-condition_dims]\n batch_shape = torch.broadcast_shapes(batch_shape_in, batch_shape_cond)\n # Expand the input and condition to the same batch shape\n input = input.expand(batch_shape + (input.shape[-1],))\n condition = condition.expand(batch_shape + self.condition_shape)\n # Flatten required by nflows, but now both have the same batch shape\n input = input.reshape(-1, input.shape[-1])\n condition = condition.reshape(-1, *self.condition_shape)\n\n noise, _ = self.net._transorm(input, context=condition)\n noise = noise.reshape(batch_shape)\n return noise\n\n def log_prob(self, input: Tensor, condition: Tensor) -> Tensor:\n r\"\"\"Return the log probabilities of the inputs given a condition or multiple\n i.e. batched conditions.\n\n Args:\n input: Inputs to evaluate the log probability on. Of shape\n `(sample_dim, batch_dim, *event_shape)`.\n condition: Conditions of shape `(sample_dim, batch_dim, *event_shape)`.\n\n Raises:\n AssertionError: If `input_batch_dim != condition_batch_dim`.\n\n Returns:\n Sample-wise log probabilities, shape `(input_sample_dim, input_batch_dim)`.\n \"\"\"\n input_sample_dim = input.shape[0]\n input_batch_dim = input.shape[1]\n condition_batch_dim = condition.shape[0]\n condition_event_dims = len(condition.shape[1:])\n\n assert condition_batch_dim == input_batch_dim, (\n f\"Batch shape of condition {condition_batch_dim} and input \"\n f\"{input_batch_dim} do not match.\"\n )\n\n # Nflows needs to have a single batch dimension for condition and input.\n input = input.reshape((input_batch_dim * input_sample_dim, -1))\n\n # Repeat the condition to match `input_batch_dim * input_sample_dim`.\n ones_for_event_dims = (1,) * condition_event_dims # Tuple of 1s, e.g. (1, 1, 1)\n condition = condition.repeat(input_sample_dim, *ones_for_event_dims)\n\n log_probs = self.net.log_prob(input, context=condition)\n return log_probs.reshape((input_sample_dim, input_batch_dim))\n\n def loss(self, input: Tensor, condition: Tensor) -> Tensor:\n r\"\"\"Return the negative log-probability for training the density estimator.\n\n Args:\n input: Inputs of shape `(batch_dim, *input_event_shape)`.\n condition: Conditions of shape `(batch_dim, *condition_event_shape)`.\n\n Returns:\n Negative log-probability of shape `(batch_dim,)`.\n \"\"\"\n return -self.log_prob(input.unsqueeze(0), condition)[0]\n\n def sample(self, sample_shape: Shape, condition: Tensor) -> Tensor:\n r\"\"\"Return samples from the density estimator.\n\n Args:\n sample_shape: Shape of the samples to return.\n condition: Conditions of shape `(sample_dim, batch_dim, *event_shape)`.\n\n Returns:\n Samples of shape `(*sample_shape, condition_batch_dim)`.\n \"\"\"\n condition_batch_dim = condition.shape[0]\n num_samples = torch.Size(sample_shape).numel()\n\n samples = self.net.sample(num_samples, context=condition)\n\n return samples.reshape((\n *sample_shape,\n condition_batch_dim,\n -1,\n ))\n\n def sample_and_log_prob(\n self, sample_shape: torch.Size, condition: Tensor, **kwargs\n ) -> Tuple[Tensor, Tensor]:\n r\"\"\"Return samples and their density from the density estimator.\n\n Args:\n sample_shape: Shape of the samples to return.\n condition: Conditions of shape (sample_dim, batch_dim, *event_shape).\n\n Returns:\n Samples of shape `(*sample_shape, condition_batch_dim, *input_event_shape)`\n and associated log probs of shape `(*sample_shape, condition_batch_dim)`.\n \"\"\"\n condition_batch_dim = condition.shape[0]\n num_samples = torch.Size(sample_shape).numel()\n\n samples, log_probs = self.net.sample_and_log_prob(\n num_samples, context=condition\n )\n samples = samples.reshape((*sample_shape, condition_batch_dim, -1))\n log_probs = log_probs.reshape((*sample_shape, -1))\n return samples, log_probs\n", "path": "sbi/neural_nets/density_estimators/nflows_flow.py"}], "after_files": [{"content": "# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed\n# under the Apache License Version 2.0, see <https://www.apache.org/licenses/>\n\nfrom typing import Tuple\n\nimport torch\nfrom pyknos.nflows.flows import Flow\nfrom torch import Tensor, nn\n\nfrom sbi.neural_nets.density_estimators.base import DensityEstimator\nfrom sbi.sbi_types import Shape\n\n\nclass NFlowsFlow(DensityEstimator):\n r\"\"\"`nflows`- based normalizing flow density estimator.\n\n Flow type objects already have a .log_prob() and .sample() method, so here we just\n wrap them and add the .loss() method.\n \"\"\"\n\n def __init__(\n self, net: Flow, input_shape: torch.Size, condition_shape: torch.Size\n ) -> None:\n \"\"\"Initialize density estimator which wraps flows from the `nflows` library.\n\n Args:\n net: The raw `nflows` flow.\n input_shape: Event shape of the input at which the density is being\n evaluated (and which is also the event_shape of samples).\n condition_shape: Shape of the condition. If not provided, it will assume a\n 1D input.\n \"\"\"\n super().__init__(net, input_shape=input_shape, condition_shape=condition_shape)\n # TODO: Remove as soon as DensityEstimator becomes abstract\n self.net: Flow\n\n @property\n def embedding_net(self) -> nn.Module:\n r\"\"\"Return the embedding network.\"\"\"\n return self.net._embedding_net\n\n def inverse_transform(self, input: Tensor, condition: Tensor) -> Tensor:\n r\"\"\"Return the inverse flow-transform of the inputs given a condition.\n\n The inverse transform is the transformation that maps the inputs back to the\n base distribution (noise) space.\n\n Args:\n input: Inputs to evaluate the inverse transform on of shape\n (*batch_shape1, input_size).\n condition: Conditions of shape (*batch_shape2, *condition_shape).\n\n Raises:\n RuntimeError: If batch_shape1 and batch_shape2 are not broadcastable.\n\n Returns:\n noise: Transformed inputs.\n \"\"\"\n self._check_condition_shape(condition)\n condition_dims = len(self.condition_shape)\n\n # PyTorch's automatic broadcasting\n batch_shape_in = input.shape[:-1]\n batch_shape_cond = condition.shape[:-condition_dims]\n batch_shape = torch.broadcast_shapes(batch_shape_in, batch_shape_cond)\n # Expand the input and condition to the same batch shape\n input = input.expand(batch_shape + (input.shape[-1],))\n condition = condition.expand(batch_shape + self.condition_shape)\n # Flatten required by nflows, but now both have the same batch shape\n input = input.reshape(-1, input.shape[-1])\n condition = condition.reshape(-1, *self.condition_shape)\n\n noise, _ = self.net._transorm(input, context=condition)\n noise = noise.reshape(batch_shape)\n return noise\n\n def log_prob(self, input: Tensor, condition: Tensor) -> Tensor:\n r\"\"\"Return the log probabilities of the inputs given a condition or multiple\n i.e. batched conditions.\n\n Args:\n input: Inputs to evaluate the log probability on. Of shape\n `(sample_dim, batch_dim, *event_shape)`.\n condition: Conditions of shape `(sample_dim, batch_dim, *event_shape)`.\n\n Raises:\n AssertionError: If `input_batch_dim != condition_batch_dim`.\n\n Returns:\n Sample-wise log probabilities, shape `(input_sample_dim, input_batch_dim)`.\n \"\"\"\n input_sample_dim = input.shape[0]\n input_batch_dim = input.shape[1]\n condition_batch_dim = condition.shape[0]\n condition_event_dims = len(condition.shape[1:])\n\n assert condition_batch_dim == input_batch_dim, (\n f\"Batch shape of condition {condition_batch_dim} and input \"\n f\"{input_batch_dim} do not match.\"\n )\n\n # Nflows needs to have a single batch dimension for condition and input.\n input = input.reshape((input_batch_dim * input_sample_dim, -1))\n\n # Repeat the condition to match `input_batch_dim * input_sample_dim`.\n ones_for_event_dims = (1,) * condition_event_dims # Tuple of 1s, e.g. (1, 1, 1)\n condition = condition.repeat(input_sample_dim, *ones_for_event_dims)\n\n log_probs = self.net.log_prob(input, context=condition)\n return log_probs.reshape((input_sample_dim, input_batch_dim))\n\n def loss(self, input: Tensor, condition: Tensor) -> Tensor:\n r\"\"\"Return the negative log-probability for training the density estimator.\n\n Args:\n input: Inputs of shape `(batch_dim, *input_event_shape)`.\n condition: Conditions of shape `(batch_dim, *condition_event_shape)`.\n\n Returns:\n Negative log-probability of shape `(batch_dim,)`.\n \"\"\"\n return -self.log_prob(input.unsqueeze(0), condition)[0]\n\n def sample(self, sample_shape: Shape, condition: Tensor) -> Tensor:\n r\"\"\"Return samples from the density estimator.\n\n Args:\n sample_shape: Shape of the samples to return.\n condition: Conditions of shape `(sample_dim, batch_dim, *event_shape)`.\n\n Returns:\n Samples of shape `(*sample_shape, condition_batch_dim)`.\n \"\"\"\n condition_batch_dim = condition.shape[0]\n num_samples = torch.Size(sample_shape).numel()\n\n samples = self.net.sample(num_samples, context=condition)\n samples = samples.transpose(0, 1)\n return samples.reshape((*sample_shape, condition_batch_dim, *self.input_shape))\n\n def sample_and_log_prob(\n self, sample_shape: torch.Size, condition: Tensor, **kwargs\n ) -> Tuple[Tensor, Tensor]:\n r\"\"\"Return samples and their density from the density estimator.\n\n Args:\n sample_shape: Shape of the samples to return.\n condition: Conditions of shape (sample_dim, batch_dim, *event_shape).\n\n Returns:\n Samples of shape `(*sample_shape, condition_batch_dim, *input_event_shape)`\n and associated log probs of shape `(*sample_shape, condition_batch_dim)`.\n \"\"\"\n condition_batch_dim = condition.shape[0]\n num_samples = torch.Size(sample_shape).numel()\n\n samples, log_probs = self.net.sample_and_log_prob(\n num_samples, context=condition\n )\n samples = samples.reshape((*sample_shape, condition_batch_dim, -1))\n log_probs = log_probs.reshape((*sample_shape, -1))\n return samples, log_probs\n", "path": "sbi/neural_nets/density_estimators/nflows_flow.py"}]} | 2,376 | 186 |
gh_patches_debug_18582 | rasdani/github-patches | git_diff | wagtail__wagtail-423 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
test_post_reorder in editors picks unit tests failing on Sqlite
Running the unit tests under sqlite:
```
DATABASE_ENGINE=django.db.backends.sqlite3 ./runtests.py
```
results in this test failure:
```
FAIL: test_post_reorder (wagtail.wagtailsearch.tests.test_editorspicks.TestEditorsPicksEditView)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/vagrant/wagtail/wagtail/wagtailsearch/tests/test_editorspicks.py", line 222, in test_post_reorder
self.assertEqual(models.Query.get("Hello").editors_picks.all()[0], self.editors_pick_2)
AssertionError: <EditorsPick: EditorsPick object> != <EditorsPick: EditorsPick object>
----------------------------------------------------------------------
Ran 446 tests in 36.358s
FAILED (failures=1, skipped=9, expected failures=1)
Destroying test database for alias 'default'...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/wagtailsearch/views/editorspicks.py`
Content:
```
1 from django.shortcuts import render, redirect, get_object_or_404
2 from django.contrib.auth.decorators import permission_required
3 from django.contrib import messages
4
5 from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
6 from django.utils.translation import ugettext as _
7 from django.views.decorators.vary import vary_on_headers
8
9 from wagtail.wagtailsearch import models, forms
10 from wagtail.wagtailadmin.forms import SearchForm
11
12
13 @permission_required('wagtailadmin.access_admin')
14 @vary_on_headers('X-Requested-With')
15 def index(request):
16 is_searching = False
17 page = request.GET.get('p', 1)
18 query_string = request.GET.get('q', "")
19
20 queries = models.Query.objects.filter(editors_picks__isnull=False).distinct()
21
22 # Search
23 if query_string:
24 queries = queries.filter(query_string__icontains=query_string)
25 is_searching = True
26
27 # Pagination
28 paginator = Paginator(queries, 20)
29 try:
30 queries = paginator.page(page)
31 except PageNotAnInteger:
32 queries = paginator.page(1)
33 except EmptyPage:
34 queries = paginator.page(paginator.num_pages)
35
36 if request.is_ajax():
37 return render(request, "wagtailsearch/editorspicks/results.html", {
38 'is_searching': is_searching,
39 'queries': queries,
40 'query_string': query_string,
41 })
42 else:
43 return render(request, 'wagtailsearch/editorspicks/index.html', {
44 'is_searching': is_searching,
45 'queries': queries,
46 'query_string': query_string,
47 'search_form': SearchForm(data=dict(q=query_string) if query_string else None, placeholder=_("Search editor's picks")),
48 })
49
50
51 def save_editorspicks(query, new_query, editors_pick_formset):
52 # Save
53 if editors_pick_formset.is_valid():
54 # Set sort_order
55 for i, form in enumerate(editors_pick_formset.ordered_forms):
56 form.instance.sort_order = i
57
58 editors_pick_formset.save()
59
60 # If query was changed, move all editors picks to the new query
61 if query != new_query:
62 editors_pick_formset.get_queryset().update(query=new_query)
63
64 return True
65 else:
66 return False
67
68
69 @permission_required('wagtailadmin.access_admin')
70 def add(request):
71 if request.POST:
72 # Get query
73 query_form = forms.QueryForm(request.POST)
74 if query_form.is_valid():
75 query = models.Query.get(query_form['query_string'].value())
76
77 # Save editors picks
78 editors_pick_formset = forms.EditorsPickFormSet(request.POST, instance=query)
79 if save_editorspicks(query, query, editors_pick_formset):
80 messages.success(request, _("Editor's picks for '{0}' created.").format(query))
81 return redirect('wagtailsearch_editorspicks_index')
82 else:
83 if len(editors_pick_formset.non_form_errors()):
84 messages.error(request, " ".join(error for error in editors_pick_formset.non_form_errors())) # formset level error (e.g. no forms submitted)
85 else:
86 messages.error(request, _("Recommendations have not been created due to errors")) # specific errors will be displayed within form fields
87 else:
88 editors_pick_formset = forms.EditorsPickFormSet()
89 else:
90 query_form = forms.QueryForm()
91 editors_pick_formset = forms.EditorsPickFormSet()
92
93 return render(request, 'wagtailsearch/editorspicks/add.html', {
94 'query_form': query_form,
95 'editors_pick_formset': editors_pick_formset,
96 })
97
98
99 @permission_required('wagtailadmin.access_admin')
100 def edit(request, query_id):
101 query = get_object_or_404(models.Query, id=query_id)
102
103 if request.POST:
104 # Get query
105 query_form = forms.QueryForm(request.POST)
106 # and the recommendations
107 editors_pick_formset = forms.EditorsPickFormSet(request.POST, instance=query)
108
109 if query_form.is_valid():
110 new_query = models.Query.get(query_form['query_string'].value())
111
112 # Save editors picks
113 if save_editorspicks(query, new_query, editors_pick_formset):
114 messages.success(request, _("Editor's picks for '{0}' updated.").format(new_query))
115 return redirect('wagtailsearch_editorspicks_index')
116 else:
117 if len(editors_pick_formset.non_form_errors()):
118 messages.error(request, " ".join(error for error in editors_pick_formset.non_form_errors())) # formset level error (e.g. no forms submitted)
119 else:
120 messages.error(request, _("Recommendations have not been saved due to errors")) # specific errors will be displayed within form fields
121
122 else:
123 query_form = forms.QueryForm(initial=dict(query_string=query.query_string))
124 editors_pick_formset = forms.EditorsPickFormSet(instance=query)
125
126 return render(request, 'wagtailsearch/editorspicks/edit.html', {
127 'query_form': query_form,
128 'editors_pick_formset': editors_pick_formset,
129 'query': query,
130 })
131
132
133 @permission_required('wagtailadmin.access_admin')
134 def delete(request, query_id):
135 query = get_object_or_404(models.Query, id=query_id)
136
137 if request.POST:
138 query.editors_picks.all().delete()
139 messages.success(request, _("Editor's picks deleted."))
140 return redirect('wagtailsearch_editorspicks_index')
141
142 return render(request, 'wagtailsearch/editorspicks/confirm_delete.html', {
143 'query': query,
144 })
145
```
Path: `wagtail/wagtailsearch/models.py`
Content:
```
1 import datetime
2
3 from django.db import models
4 from django.utils import timezone
5 from django.utils.encoding import python_2_unicode_compatible
6
7 from wagtail.wagtailsearch.indexed import Indexed
8 from wagtail.wagtailsearch.utils import normalise_query_string, MAX_QUERY_STRING_LENGTH
9
10
11 @python_2_unicode_compatible
12 class Query(models.Model):
13 query_string = models.CharField(max_length=MAX_QUERY_STRING_LENGTH, unique=True)
14
15 def save(self, *args, **kwargs):
16 # Normalise query string
17 self.query_string = normalise_query_string(self.query_string)
18
19 super(Query, self).save(*args, **kwargs)
20
21 def add_hit(self, date=None):
22 if date is None:
23 date = timezone.now().date()
24 daily_hits, created = QueryDailyHits.objects.get_or_create(query=self, date=date)
25 daily_hits.hits = models.F('hits') + 1
26 daily_hits.save()
27
28 def __str__(self):
29 return self.query_string
30
31 @property
32 def hits(self):
33 hits = self.daily_hits.aggregate(models.Sum('hits'))['hits__sum']
34 return hits if hits else 0
35
36 @classmethod
37 def garbage_collect(cls):
38 """
39 Deletes all Query records that have no daily hits or editors picks
40 """
41 cls.objects.filter(daily_hits__isnull=True, editors_picks__isnull=True).delete()
42
43 @classmethod
44 def get(cls, query_string):
45 return cls.objects.get_or_create(query_string=normalise_query_string(query_string))[0]
46
47 @classmethod
48 def get_most_popular(cls, date_since=None):
49 # TODO: Implement date_since
50 return cls.objects.filter(daily_hits__isnull=False).annotate(_hits=models.Sum('daily_hits__hits')).distinct().order_by('-_hits')
51
52
53 class QueryDailyHits(models.Model):
54 query = models.ForeignKey(Query, db_index=True, related_name='daily_hits')
55 date = models.DateField()
56 hits = models.IntegerField(default=0)
57
58 @classmethod
59 def garbage_collect(cls):
60 """
61 Deletes all QueryDailyHits records that are older than 7 days
62 """
63 min_date = timezone.now().date() - datetime.timedelta(days=7)
64
65 cls.objects.filter(date__lt=min_date).delete()
66
67 class Meta:
68 unique_together = (
69 ('query', 'date'),
70 )
71
72
73 class EditorsPick(models.Model):
74 query = models.ForeignKey(Query, db_index=True, related_name='editors_picks')
75 page = models.ForeignKey('wagtailcore.Page')
76 sort_order = models.IntegerField(null=True, blank=True, editable=False)
77 description = models.TextField(blank=True)
78
79 class Meta:
80 ordering = ('sort_order', )
81
82
83 # Used for tests
84
85 class SearchTest(models.Model, Indexed):
86 title = models.CharField(max_length=255)
87 content = models.TextField()
88 live = models.BooleanField(default=False)
89
90 indexed_fields = ("title", "content", "callable_indexed_field", "live")
91
92 def callable_indexed_field(self):
93 return "Callable"
94
95
96 class SearchTestChild(SearchTest):
97 extra_content = models.TextField()
98
99 indexed_fields = "extra_content"
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/wagtailsearch/models.py b/wagtail/wagtailsearch/models.py
--- a/wagtail/wagtailsearch/models.py
+++ b/wagtail/wagtailsearch/models.py
@@ -76,6 +76,9 @@
sort_order = models.IntegerField(null=True, blank=True, editable=False)
description = models.TextField(blank=True)
+ def __repr__(self):
+ return 'EditorsPick(query="' + self.query.query_string + '", page="' + self.page.title + '")'
+
class Meta:
ordering = ('sort_order', )
diff --git a/wagtail/wagtailsearch/views/editorspicks.py b/wagtail/wagtailsearch/views/editorspicks.py
--- a/wagtail/wagtailsearch/views/editorspicks.py
+++ b/wagtail/wagtailsearch/views/editorspicks.py
@@ -55,6 +55,9 @@
for i, form in enumerate(editors_pick_formset.ordered_forms):
form.instance.sort_order = i
+ # Make sure the form is marked as changed so it gets saved with the new order
+ form.has_changed = lambda: True
+
editors_pick_formset.save()
# If query was changed, move all editors picks to the new query
| {"golden_diff": "diff --git a/wagtail/wagtailsearch/models.py b/wagtail/wagtailsearch/models.py\n--- a/wagtail/wagtailsearch/models.py\n+++ b/wagtail/wagtailsearch/models.py\n@@ -76,6 +76,9 @@\n sort_order = models.IntegerField(null=True, blank=True, editable=False)\n description = models.TextField(blank=True)\n \n+ def __repr__(self):\n+ return 'EditorsPick(query=\"' + self.query.query_string + '\", page=\"' + self.page.title + '\")'\n+\n class Meta:\n ordering = ('sort_order', )\n \ndiff --git a/wagtail/wagtailsearch/views/editorspicks.py b/wagtail/wagtailsearch/views/editorspicks.py\n--- a/wagtail/wagtailsearch/views/editorspicks.py\n+++ b/wagtail/wagtailsearch/views/editorspicks.py\n@@ -55,6 +55,9 @@\n for i, form in enumerate(editors_pick_formset.ordered_forms):\n form.instance.sort_order = i\n \n+ # Make sure the form is marked as changed so it gets saved with the new order\n+ form.has_changed = lambda: True\n+\n editors_pick_formset.save()\n \n # If query was changed, move all editors picks to the new query\n", "issue": "test_post_reorder in editors picks unit tests failing on Sqlite\nRunning the unit tests under sqlite:\n\n```\nDATABASE_ENGINE=django.db.backends.sqlite3 ./runtests.py\n```\n\nresults in this test failure:\n\n```\nFAIL: test_post_reorder (wagtail.wagtailsearch.tests.test_editorspicks.TestEditorsPicksEditView)\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"/home/vagrant/wagtail/wagtail/wagtailsearch/tests/test_editorspicks.py\", line 222, in test_post_reorder\n self.assertEqual(models.Query.get(\"Hello\").editors_picks.all()[0], self.editors_pick_2)\nAssertionError: <EditorsPick: EditorsPick object> != <EditorsPick: EditorsPick object>\n\n----------------------------------------------------------------------\nRan 446 tests in 36.358s\n\nFAILED (failures=1, skipped=9, expected failures=1)\nDestroying test database for alias 'default'...\n```\n\n", "before_files": [{"content": "from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import permission_required\nfrom django.contrib import messages\n\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.vary import vary_on_headers\n\nfrom wagtail.wagtailsearch import models, forms\nfrom wagtail.wagtailadmin.forms import SearchForm\n\n\n@permission_required('wagtailadmin.access_admin')\n@vary_on_headers('X-Requested-With')\ndef index(request):\n is_searching = False\n page = request.GET.get('p', 1)\n query_string = request.GET.get('q', \"\")\n\n queries = models.Query.objects.filter(editors_picks__isnull=False).distinct()\n\n # Search\n if query_string:\n queries = queries.filter(query_string__icontains=query_string)\n is_searching = True\n\n # Pagination\n paginator = Paginator(queries, 20)\n try:\n queries = paginator.page(page)\n except PageNotAnInteger:\n queries = paginator.page(1)\n except EmptyPage:\n queries = paginator.page(paginator.num_pages)\n\n if request.is_ajax():\n return render(request, \"wagtailsearch/editorspicks/results.html\", {\n 'is_searching': is_searching,\n 'queries': queries,\n 'query_string': query_string,\n })\n else:\n return render(request, 'wagtailsearch/editorspicks/index.html', {\n 'is_searching': is_searching,\n 'queries': queries,\n 'query_string': query_string,\n 'search_form': SearchForm(data=dict(q=query_string) if query_string else None, placeholder=_(\"Search editor's picks\")),\n })\n\n\ndef save_editorspicks(query, new_query, editors_pick_formset):\n # Save\n if editors_pick_formset.is_valid():\n # Set sort_order\n for i, form in enumerate(editors_pick_formset.ordered_forms):\n form.instance.sort_order = i\n\n editors_pick_formset.save()\n\n # If query was changed, move all editors picks to the new query\n if query != new_query:\n editors_pick_formset.get_queryset().update(query=new_query)\n\n return True\n else:\n return False\n\n\n@permission_required('wagtailadmin.access_admin')\ndef add(request):\n if request.POST:\n # Get query\n query_form = forms.QueryForm(request.POST)\n if query_form.is_valid():\n query = models.Query.get(query_form['query_string'].value())\n\n # Save editors picks\n editors_pick_formset = forms.EditorsPickFormSet(request.POST, instance=query)\n if save_editorspicks(query, query, editors_pick_formset):\n messages.success(request, _(\"Editor's picks for '{0}' created.\").format(query))\n return redirect('wagtailsearch_editorspicks_index')\n else:\n if len(editors_pick_formset.non_form_errors()):\n messages.error(request, \" \".join(error for error in editors_pick_formset.non_form_errors())) # formset level error (e.g. no forms submitted)\n else:\n messages.error(request, _(\"Recommendations have not been created due to errors\")) # specific errors will be displayed within form fields\n else:\n editors_pick_formset = forms.EditorsPickFormSet()\n else:\n query_form = forms.QueryForm()\n editors_pick_formset = forms.EditorsPickFormSet()\n\n return render(request, 'wagtailsearch/editorspicks/add.html', {\n 'query_form': query_form,\n 'editors_pick_formset': editors_pick_formset,\n })\n\n\n@permission_required('wagtailadmin.access_admin')\ndef edit(request, query_id):\n query = get_object_or_404(models.Query, id=query_id)\n\n if request.POST:\n # Get query\n query_form = forms.QueryForm(request.POST)\n # and the recommendations\n editors_pick_formset = forms.EditorsPickFormSet(request.POST, instance=query)\n\n if query_form.is_valid():\n new_query = models.Query.get(query_form['query_string'].value())\n\n # Save editors picks\n if save_editorspicks(query, new_query, editors_pick_formset):\n messages.success(request, _(\"Editor's picks for '{0}' updated.\").format(new_query))\n return redirect('wagtailsearch_editorspicks_index')\n else:\n if len(editors_pick_formset.non_form_errors()):\n messages.error(request, \" \".join(error for error in editors_pick_formset.non_form_errors())) # formset level error (e.g. no forms submitted)\n else:\n messages.error(request, _(\"Recommendations have not been saved due to errors\")) # specific errors will be displayed within form fields\n\n else:\n query_form = forms.QueryForm(initial=dict(query_string=query.query_string))\n editors_pick_formset = forms.EditorsPickFormSet(instance=query)\n\n return render(request, 'wagtailsearch/editorspicks/edit.html', {\n 'query_form': query_form,\n 'editors_pick_formset': editors_pick_formset,\n 'query': query,\n })\n\n\n@permission_required('wagtailadmin.access_admin')\ndef delete(request, query_id):\n query = get_object_or_404(models.Query, id=query_id)\n\n if request.POST:\n query.editors_picks.all().delete()\n messages.success(request, _(\"Editor's picks deleted.\"))\n return redirect('wagtailsearch_editorspicks_index')\n\n return render(request, 'wagtailsearch/editorspicks/confirm_delete.html', {\n 'query': query,\n })\n", "path": "wagtail/wagtailsearch/views/editorspicks.py"}, {"content": "import datetime\n\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.encoding import python_2_unicode_compatible\n\nfrom wagtail.wagtailsearch.indexed import Indexed\nfrom wagtail.wagtailsearch.utils import normalise_query_string, MAX_QUERY_STRING_LENGTH\n\n\n@python_2_unicode_compatible\nclass Query(models.Model):\n query_string = models.CharField(max_length=MAX_QUERY_STRING_LENGTH, unique=True)\n\n def save(self, *args, **kwargs):\n # Normalise query string\n self.query_string = normalise_query_string(self.query_string)\n\n super(Query, self).save(*args, **kwargs)\n\n def add_hit(self, date=None):\n if date is None:\n date = timezone.now().date()\n daily_hits, created = QueryDailyHits.objects.get_or_create(query=self, date=date)\n daily_hits.hits = models.F('hits') + 1\n daily_hits.save()\n\n def __str__(self):\n return self.query_string\n\n @property\n def hits(self):\n hits = self.daily_hits.aggregate(models.Sum('hits'))['hits__sum']\n return hits if hits else 0\n\n @classmethod\n def garbage_collect(cls):\n \"\"\"\n Deletes all Query records that have no daily hits or editors picks\n \"\"\"\n cls.objects.filter(daily_hits__isnull=True, editors_picks__isnull=True).delete()\n\n @classmethod\n def get(cls, query_string):\n return cls.objects.get_or_create(query_string=normalise_query_string(query_string))[0]\n\n @classmethod\n def get_most_popular(cls, date_since=None):\n # TODO: Implement date_since\n return cls.objects.filter(daily_hits__isnull=False).annotate(_hits=models.Sum('daily_hits__hits')).distinct().order_by('-_hits')\n\n\nclass QueryDailyHits(models.Model):\n query = models.ForeignKey(Query, db_index=True, related_name='daily_hits')\n date = models.DateField()\n hits = models.IntegerField(default=0)\n\n @classmethod\n def garbage_collect(cls):\n \"\"\"\n Deletes all QueryDailyHits records that are older than 7 days\n \"\"\"\n min_date = timezone.now().date() - datetime.timedelta(days=7)\n\n cls.objects.filter(date__lt=min_date).delete()\n\n class Meta:\n unique_together = (\n ('query', 'date'),\n )\n\n\nclass EditorsPick(models.Model):\n query = models.ForeignKey(Query, db_index=True, related_name='editors_picks')\n page = models.ForeignKey('wagtailcore.Page')\n sort_order = models.IntegerField(null=True, blank=True, editable=False)\n description = models.TextField(blank=True)\n\n class Meta:\n ordering = ('sort_order', )\n\n\n# Used for tests\n\nclass SearchTest(models.Model, Indexed):\n title = models.CharField(max_length=255)\n content = models.TextField()\n live = models.BooleanField(default=False)\n\n indexed_fields = (\"title\", \"content\", \"callable_indexed_field\", \"live\")\n\n def callable_indexed_field(self):\n return \"Callable\"\n\n\nclass SearchTestChild(SearchTest):\n extra_content = models.TextField()\n\n indexed_fields = \"extra_content\"\n", "path": "wagtail/wagtailsearch/models.py"}], "after_files": [{"content": "from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import permission_required\nfrom django.contrib import messages\n\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.vary import vary_on_headers\n\nfrom wagtail.wagtailsearch import models, forms\nfrom wagtail.wagtailadmin.forms import SearchForm\n\n\n@permission_required('wagtailadmin.access_admin')\n@vary_on_headers('X-Requested-With')\ndef index(request):\n is_searching = False\n page = request.GET.get('p', 1)\n query_string = request.GET.get('q', \"\")\n\n queries = models.Query.objects.filter(editors_picks__isnull=False).distinct()\n\n # Search\n if query_string:\n queries = queries.filter(query_string__icontains=query_string)\n is_searching = True\n\n # Pagination\n paginator = Paginator(queries, 20)\n try:\n queries = paginator.page(page)\n except PageNotAnInteger:\n queries = paginator.page(1)\n except EmptyPage:\n queries = paginator.page(paginator.num_pages)\n\n if request.is_ajax():\n return render(request, \"wagtailsearch/editorspicks/results.html\", {\n 'is_searching': is_searching,\n 'queries': queries,\n 'query_string': query_string,\n })\n else:\n return render(request, 'wagtailsearch/editorspicks/index.html', {\n 'is_searching': is_searching,\n 'queries': queries,\n 'query_string': query_string,\n 'search_form': SearchForm(data=dict(q=query_string) if query_string else None, placeholder=_(\"Search editor's picks\")),\n })\n\n\ndef save_editorspicks(query, new_query, editors_pick_formset):\n # Save\n if editors_pick_formset.is_valid():\n # Set sort_order\n for i, form in enumerate(editors_pick_formset.ordered_forms):\n form.instance.sort_order = i\n\n # Make sure the form is marked as changed so it gets saved with the new order\n form.has_changed = lambda: True\n\n editors_pick_formset.save()\n\n # If query was changed, move all editors picks to the new query\n if query != new_query:\n editors_pick_formset.get_queryset().update(query=new_query)\n\n return True\n else:\n return False\n\n\n@permission_required('wagtailadmin.access_admin')\ndef add(request):\n if request.POST:\n # Get query\n query_form = forms.QueryForm(request.POST)\n if query_form.is_valid():\n query = models.Query.get(query_form['query_string'].value())\n\n # Save editors picks\n editors_pick_formset = forms.EditorsPickFormSet(request.POST, instance=query)\n if save_editorspicks(query, query, editors_pick_formset):\n messages.success(request, _(\"Editor's picks for '{0}' created.\").format(query))\n return redirect('wagtailsearch_editorspicks_index')\n else:\n if len(editors_pick_formset.non_form_errors()):\n messages.error(request, \" \".join(error for error in editors_pick_formset.non_form_errors())) # formset level error (e.g. no forms submitted)\n else:\n messages.error(request, _(\"Recommendations have not been created due to errors\")) # specific errors will be displayed within form fields\n else:\n editors_pick_formset = forms.EditorsPickFormSet()\n else:\n query_form = forms.QueryForm()\n editors_pick_formset = forms.EditorsPickFormSet()\n\n return render(request, 'wagtailsearch/editorspicks/add.html', {\n 'query_form': query_form,\n 'editors_pick_formset': editors_pick_formset,\n })\n\n\n@permission_required('wagtailadmin.access_admin')\ndef edit(request, query_id):\n query = get_object_or_404(models.Query, id=query_id)\n\n if request.POST:\n # Get query\n query_form = forms.QueryForm(request.POST)\n # and the recommendations\n editors_pick_formset = forms.EditorsPickFormSet(request.POST, instance=query)\n\n if query_form.is_valid():\n new_query = models.Query.get(query_form['query_string'].value())\n\n # Save editors picks\n if save_editorspicks(query, new_query, editors_pick_formset):\n messages.success(request, _(\"Editor's picks for '{0}' updated.\").format(new_query))\n return redirect('wagtailsearch_editorspicks_index')\n else:\n if len(editors_pick_formset.non_form_errors()):\n messages.error(request, \" \".join(error for error in editors_pick_formset.non_form_errors())) # formset level error (e.g. no forms submitted)\n else:\n messages.error(request, _(\"Recommendations have not been saved due to errors\")) # specific errors will be displayed within form fields\n\n else:\n query_form = forms.QueryForm(initial=dict(query_string=query.query_string))\n editors_pick_formset = forms.EditorsPickFormSet(instance=query)\n\n return render(request, 'wagtailsearch/editorspicks/edit.html', {\n 'query_form': query_form,\n 'editors_pick_formset': editors_pick_formset,\n 'query': query,\n })\n\n\n@permission_required('wagtailadmin.access_admin')\ndef delete(request, query_id):\n query = get_object_or_404(models.Query, id=query_id)\n\n if request.POST:\n query.editors_picks.all().delete()\n messages.success(request, _(\"Editor's picks deleted.\"))\n return redirect('wagtailsearch_editorspicks_index')\n\n return render(request, 'wagtailsearch/editorspicks/confirm_delete.html', {\n 'query': query,\n })\n", "path": "wagtail/wagtailsearch/views/editorspicks.py"}, {"content": "import datetime\n\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.encoding import python_2_unicode_compatible\n\nfrom wagtail.wagtailsearch.indexed import Indexed\nfrom wagtail.wagtailsearch.utils import normalise_query_string, MAX_QUERY_STRING_LENGTH\n\n\n@python_2_unicode_compatible\nclass Query(models.Model):\n query_string = models.CharField(max_length=MAX_QUERY_STRING_LENGTH, unique=True)\n\n def save(self, *args, **kwargs):\n # Normalise query string\n self.query_string = normalise_query_string(self.query_string)\n\n super(Query, self).save(*args, **kwargs)\n\n def add_hit(self, date=None):\n if date is None:\n date = timezone.now().date()\n daily_hits, created = QueryDailyHits.objects.get_or_create(query=self, date=date)\n daily_hits.hits = models.F('hits') + 1\n daily_hits.save()\n\n def __str__(self):\n return self.query_string\n\n @property\n def hits(self):\n hits = self.daily_hits.aggregate(models.Sum('hits'))['hits__sum']\n return hits if hits else 0\n\n @classmethod\n def garbage_collect(cls):\n \"\"\"\n Deletes all Query records that have no daily hits or editors picks\n \"\"\"\n cls.objects.filter(daily_hits__isnull=True, editors_picks__isnull=True).delete()\n\n @classmethod\n def get(cls, query_string):\n return cls.objects.get_or_create(query_string=normalise_query_string(query_string))[0]\n\n @classmethod\n def get_most_popular(cls, date_since=None):\n # TODO: Implement date_since\n return cls.objects.filter(daily_hits__isnull=False).annotate(_hits=models.Sum('daily_hits__hits')).distinct().order_by('-_hits')\n\n\nclass QueryDailyHits(models.Model):\n query = models.ForeignKey(Query, db_index=True, related_name='daily_hits')\n date = models.DateField()\n hits = models.IntegerField(default=0)\n\n @classmethod\n def garbage_collect(cls):\n \"\"\"\n Deletes all QueryDailyHits records that are older than 7 days\n \"\"\"\n min_date = timezone.now().date() - datetime.timedelta(days=7)\n\n cls.objects.filter(date__lt=min_date).delete()\n\n class Meta:\n unique_together = (\n ('query', 'date'),\n )\n\n\nclass EditorsPick(models.Model):\n query = models.ForeignKey(Query, db_index=True, related_name='editors_picks')\n page = models.ForeignKey('wagtailcore.Page')\n sort_order = models.IntegerField(null=True, blank=True, editable=False)\n description = models.TextField(blank=True)\n\n def __repr__(self):\n return 'EditorsPick(query=\"' + self.query.query_string + '\", page=\"' + self.page.title + '\")'\n\n class Meta:\n ordering = ('sort_order', )\n\n\n# Used for tests\n\nclass SearchTest(models.Model, Indexed):\n title = models.CharField(max_length=255)\n content = models.TextField()\n live = models.BooleanField(default=False)\n\n indexed_fields = (\"title\", \"content\", \"callable_indexed_field\", \"live\")\n\n def callable_indexed_field(self):\n return \"Callable\"\n\n\nclass SearchTestChild(SearchTest):\n extra_content = models.TextField()\n\n indexed_fields = \"extra_content\"\n", "path": "wagtail/wagtailsearch/models.py"}]} | 2,917 | 286 |
gh_patches_debug_35746 | rasdani/github-patches | git_diff | vispy__vispy-1391 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SceneGraph: HowTo view single scene in different viewboxes
Using https://github.com/vispy/vispy/blob/master/examples/basics/scene/one_scene_four_cams.py to view a single scene in four different viewboxes doesn't work.
The scene is actually generated four times, not only once. There are reminders of multi-parenting commented out in the example, but this won't work any more (since removal of multi-parenting).
Is it possible to have one scene viewed from different angels (eg. top view, front view and side view) without recreating the scene four times?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/basics/scene/one_scene_four_cams.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # -----------------------------------------------------------------------------
3 # Copyright (c) Vispy Development Team. All Rights Reserved.
4 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
5 # -----------------------------------------------------------------------------
6 # vispy: gallery 2
7
8 """
9 Demonstrating a single scene that is shown in four different viewboxes,
10 each with a different camera.
11 """
12
13 # todo: the panzoom camera sometimes work, sometimes not. Not sure why.
14 # we should probably make iterating over children deterministic, so that
15 # an error like this becomes easier to reproduce ...
16
17 import sys
18
19 from vispy import app, scene, io
20
21 canvas = scene.SceneCanvas(keys='interactive')
22 canvas.size = 800, 600
23 canvas.show()
24
25 # Create two ViewBoxes, place side-by-side
26 vb1 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)
27 vb2 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)
28 vb3 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)
29 vb4 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)
30 scenes = vb1.scene, vb2.scene, vb3.scene, vb4.scene
31
32 # Put viewboxes in a grid
33 grid = canvas.central_widget.add_grid()
34 grid.padding = 6
35 grid.add_widget(vb1, 0, 0)
36 grid.add_widget(vb2, 0, 1)
37 grid.add_widget(vb3, 1, 0)
38 grid.add_widget(vb4, 1, 1)
39
40 # Create some visuals to show
41 # AK: Ideally, we could just create one visual that is present in all
42 # scenes, but that results in flicker for the PanZoomCamera, I suspect
43 # due to errors in transform caching.
44 im1 = io.load_crate().astype('float32') / 255
45 #image1 = scene.visuals.Image(im1, grid=(20, 20), parent=scenes)
46 for par in scenes:
47 image = scene.visuals.Image(im1, grid=(20, 20), parent=par)
48
49 #vol1 = np.load(io.load_data_file('volume/stent.npz'))['arr_0']
50 #volume1 = scene.visuals.Volume(vol1, parent=scenes)
51 #volume1.transform = scene.STTransform(translate=(0, 0, 10))
52
53 # Assign cameras
54 vb1.camera = scene.BaseCamera()
55 vb2.camera = scene.PanZoomCamera()
56 vb3.camera = scene.TurntableCamera()
57 vb4.camera = scene.FlyCamera()
58
59
60 # If True, show a cuboid at each camera
61 if False:
62 cube = scene.visuals.Cube((3, 3, 5))
63 cube.transform = scene.STTransform(translate=(0, 0, 6))
64 for vb in (vb1, vb2, vb3, vb4):
65 vb.camera.parents = scenes
66 cube.add_parent(vb.camera)
67
68 if __name__ == '__main__':
69 if sys.flags.interactive != 1:
70 app.run()
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/basics/scene/one_scene_four_cams.py b/examples/basics/scene/one_scene_four_cams.py
--- a/examples/basics/scene/one_scene_four_cams.py
+++ b/examples/basics/scene/one_scene_four_cams.py
@@ -8,11 +8,12 @@
"""
Demonstrating a single scene that is shown in four different viewboxes,
each with a different camera.
-"""
-# todo: the panzoom camera sometimes work, sometimes not. Not sure why.
-# we should probably make iterating over children deterministic, so that
-# an error like this becomes easier to reproduce ...
+Note:
+ This example just creates four scenes using the same visual.
+ Multiple views are currently not available. See #1124 how this could
+ be achieved.
+"""
import sys
@@ -22,7 +23,7 @@
canvas.size = 800, 600
canvas.show()
-# Create two ViewBoxes, place side-by-side
+# Create four ViewBoxes
vb1 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)
vb2 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)
vb3 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)
@@ -38,33 +39,16 @@
grid.add_widget(vb4, 1, 1)
# Create some visuals to show
-# AK: Ideally, we could just create one visual that is present in all
-# scenes, but that results in flicker for the PanZoomCamera, I suspect
-# due to errors in transform caching.
im1 = io.load_crate().astype('float32') / 255
-#image1 = scene.visuals.Image(im1, grid=(20, 20), parent=scenes)
for par in scenes:
image = scene.visuals.Image(im1, grid=(20, 20), parent=par)
-#vol1 = np.load(io.load_data_file('volume/stent.npz'))['arr_0']
-#volume1 = scene.visuals.Volume(vol1, parent=scenes)
-#volume1.transform = scene.STTransform(translate=(0, 0, 10))
-
# Assign cameras
vb1.camera = scene.BaseCamera()
vb2.camera = scene.PanZoomCamera()
vb3.camera = scene.TurntableCamera()
vb4.camera = scene.FlyCamera()
-
-# If True, show a cuboid at each camera
-if False:
- cube = scene.visuals.Cube((3, 3, 5))
- cube.transform = scene.STTransform(translate=(0, 0, 6))
- for vb in (vb1, vb2, vb3, vb4):
- vb.camera.parents = scenes
- cube.add_parent(vb.camera)
-
if __name__ == '__main__':
if sys.flags.interactive != 1:
app.run()
| {"golden_diff": "diff --git a/examples/basics/scene/one_scene_four_cams.py b/examples/basics/scene/one_scene_four_cams.py\n--- a/examples/basics/scene/one_scene_four_cams.py\n+++ b/examples/basics/scene/one_scene_four_cams.py\n@@ -8,11 +8,12 @@\n \"\"\"\n Demonstrating a single scene that is shown in four different viewboxes,\n each with a different camera.\n-\"\"\"\n \n-# todo: the panzoom camera sometimes work, sometimes not. Not sure why.\n-# we should probably make iterating over children deterministic, so that\n-# an error like this becomes easier to reproduce ...\n+Note:\n+ This example just creates four scenes using the same visual.\n+ Multiple views are currently not available. See #1124 how this could\n+ be achieved.\n+\"\"\"\n \n import sys\n \n@@ -22,7 +23,7 @@\n canvas.size = 800, 600\n canvas.show()\n \n-# Create two ViewBoxes, place side-by-side\n+# Create four ViewBoxes\n vb1 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\n vb2 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\n vb3 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\n@@ -38,33 +39,16 @@\n grid.add_widget(vb4, 1, 1)\n \n # Create some visuals to show\n-# AK: Ideally, we could just create one visual that is present in all\n-# scenes, but that results in flicker for the PanZoomCamera, I suspect\n-# due to errors in transform caching.\n im1 = io.load_crate().astype('float32') / 255\n-#image1 = scene.visuals.Image(im1, grid=(20, 20), parent=scenes)\n for par in scenes:\n image = scene.visuals.Image(im1, grid=(20, 20), parent=par)\n \n-#vol1 = np.load(io.load_data_file('volume/stent.npz'))['arr_0']\n-#volume1 = scene.visuals.Volume(vol1, parent=scenes)\n-#volume1.transform = scene.STTransform(translate=(0, 0, 10))\n-\n # Assign cameras\n vb1.camera = scene.BaseCamera()\n vb2.camera = scene.PanZoomCamera()\n vb3.camera = scene.TurntableCamera()\n vb4.camera = scene.FlyCamera()\n \n-\n-# If True, show a cuboid at each camera\n-if False:\n- cube = scene.visuals.Cube((3, 3, 5))\n- cube.transform = scene.STTransform(translate=(0, 0, 6))\n- for vb in (vb1, vb2, vb3, vb4):\n- vb.camera.parents = scenes\n- cube.add_parent(vb.camera)\n-\n if __name__ == '__main__':\n if sys.flags.interactive != 1:\n app.run()\n", "issue": "SceneGraph: HowTo view single scene in different viewboxes\nUsing https://github.com/vispy/vispy/blob/master/examples/basics/scene/one_scene_four_cams.py to view a single scene in four different viewboxes doesn't work.\n\nThe scene is actually generated four times, not only once. There are reminders of multi-parenting commented out in the example, but this won't work any more (since removal of multi-parenting).\n\nIs it possible to have one scene viewed from different angels (eg. top view, front view and side view) without recreating the scene four times?\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n# vispy: gallery 2\n\n\"\"\"\nDemonstrating a single scene that is shown in four different viewboxes,\neach with a different camera.\n\"\"\"\n\n# todo: the panzoom camera sometimes work, sometimes not. Not sure why.\n# we should probably make iterating over children deterministic, so that\n# an error like this becomes easier to reproduce ...\n\nimport sys\n\nfrom vispy import app, scene, io\n\ncanvas = scene.SceneCanvas(keys='interactive')\ncanvas.size = 800, 600\ncanvas.show()\n\n# Create two ViewBoxes, place side-by-side\nvb1 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\nvb2 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\nvb3 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\nvb4 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\nscenes = vb1.scene, vb2.scene, vb3.scene, vb4.scene\n\n# Put viewboxes in a grid\ngrid = canvas.central_widget.add_grid()\ngrid.padding = 6\ngrid.add_widget(vb1, 0, 0)\ngrid.add_widget(vb2, 0, 1)\ngrid.add_widget(vb3, 1, 0)\ngrid.add_widget(vb4, 1, 1)\n\n# Create some visuals to show\n# AK: Ideally, we could just create one visual that is present in all\n# scenes, but that results in flicker for the PanZoomCamera, I suspect\n# due to errors in transform caching.\nim1 = io.load_crate().astype('float32') / 255\n#image1 = scene.visuals.Image(im1, grid=(20, 20), parent=scenes)\nfor par in scenes:\n image = scene.visuals.Image(im1, grid=(20, 20), parent=par)\n\n#vol1 = np.load(io.load_data_file('volume/stent.npz'))['arr_0']\n#volume1 = scene.visuals.Volume(vol1, parent=scenes)\n#volume1.transform = scene.STTransform(translate=(0, 0, 10))\n\n# Assign cameras\nvb1.camera = scene.BaseCamera()\nvb2.camera = scene.PanZoomCamera()\nvb3.camera = scene.TurntableCamera()\nvb4.camera = scene.FlyCamera()\n\n\n# If True, show a cuboid at each camera\nif False:\n cube = scene.visuals.Cube((3, 3, 5))\n cube.transform = scene.STTransform(translate=(0, 0, 6))\n for vb in (vb1, vb2, vb3, vb4):\n vb.camera.parents = scenes\n cube.add_parent(vb.camera)\n\nif __name__ == '__main__':\n if sys.flags.interactive != 1:\n app.run()\n", "path": "examples/basics/scene/one_scene_four_cams.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n# vispy: gallery 2\n\n\"\"\"\nDemonstrating a single scene that is shown in four different viewboxes,\neach with a different camera.\n\nNote:\n This example just creates four scenes using the same visual.\n Multiple views are currently not available. See #1124 how this could\n be achieved.\n\"\"\"\n\nimport sys\n\nfrom vispy import app, scene, io\n\ncanvas = scene.SceneCanvas(keys='interactive')\ncanvas.size = 800, 600\ncanvas.show()\n\n# Create four ViewBoxes\nvb1 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\nvb2 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\nvb3 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\nvb4 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\nscenes = vb1.scene, vb2.scene, vb3.scene, vb4.scene\n\n# Put viewboxes in a grid\ngrid = canvas.central_widget.add_grid()\ngrid.padding = 6\ngrid.add_widget(vb1, 0, 0)\ngrid.add_widget(vb2, 0, 1)\ngrid.add_widget(vb3, 1, 0)\ngrid.add_widget(vb4, 1, 1)\n\n# Create some visuals to show\nim1 = io.load_crate().astype('float32') / 255\nfor par in scenes:\n image = scene.visuals.Image(im1, grid=(20, 20), parent=par)\n\n# Assign cameras\nvb1.camera = scene.BaseCamera()\nvb2.camera = scene.PanZoomCamera()\nvb3.camera = scene.TurntableCamera()\nvb4.camera = scene.FlyCamera()\n\nif __name__ == '__main__':\n if sys.flags.interactive != 1:\n app.run()\n", "path": "examples/basics/scene/one_scene_four_cams.py"}]} | 1,181 | 653 |
gh_patches_debug_26263 | rasdani/github-patches | git_diff | pypa__pip-2303 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Selfcheck failure on Windows
I get this warning all the time:
```
There was an error checking the latest version of pip
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\pip\utils\outdated.py", line 115, in pip_version_check
state.save(pypi_version, current_time)
File "C:\Python27\lib\site-packages\pip\utils\outdated.py", line 62, in save
with open(self.statefile_path) as statefile:
IOError: [Errno 2] No such file or directory: u'C:\\Users\\ionel_000\\AppData\\Local\\pip\\Cache\\selfcheck.json'
```
If I create the file, it complains about invalid json. I've put `{}` inside, the warning has gone away, but this seems very wrong to me.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pip/utils/outdated.py`
Content:
```
1 from __future__ import absolute_import
2
3 import datetime
4 import json
5 import logging
6 import os.path
7 import sys
8
9 from pip._vendor import lockfile
10 from pip._vendor import pkg_resources
11
12 from pip.compat import total_seconds
13 from pip.index import PyPI
14 from pip.locations import USER_CACHE_DIR, running_under_virtualenv
15
16
17 SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
18
19
20 logger = logging.getLogger(__name__)
21
22
23 class VirtualenvSelfCheckState(object):
24 def __init__(self):
25 self.statefile_path = os.path.join(sys.prefix, "pip-selfcheck.json")
26
27 # Load the existing state
28 try:
29 with open(self.statefile_path) as statefile:
30 self.state = json.load(statefile)
31 except (IOError, ValueError):
32 self.state = {}
33
34 def save(self, pypi_version, current_time):
35 # Attempt to write out our version check file
36 with open(self.statefile_path, "w") as statefile:
37 json.dump(
38 {
39 "last_check": current_time.strftime(SELFCHECK_DATE_FMT),
40 "pypi_version": pypi_version,
41 },
42 statefile,
43 sort_keys=True,
44 separators=(",", ":")
45 )
46
47
48 class GlobalSelfCheckState(object):
49 def __init__(self):
50 self.statefile_path = os.path.join(USER_CACHE_DIR, "selfcheck.json")
51
52 # Load the existing state
53 try:
54 with open(self.statefile_path) as statefile:
55 self.state = json.load(statefile)[sys.prefix]
56 except (IOError, ValueError, KeyError):
57 self.state = {}
58
59 def save(self, pypi_version, current_time):
60 # Attempt to write out our version check file
61 with lockfile.LockFile(self.statefile_path):
62 with open(self.statefile_path) as statefile:
63 state = json.load(statefile)
64
65 state[sys.prefix] = {
66 "last_check": current_time.strftime(SELFCHECK_DATE_FMT),
67 "pypi_version": pypi_version,
68 }
69
70 with open(self.statefile_path, "w") as statefile:
71 json.dump(state, statefile, sort_keys=True,
72 separators=(",", ":"))
73
74
75 def load_selfcheck_statefile():
76 if running_under_virtualenv():
77 return VirtualenvSelfCheckState()
78 else:
79 return GlobalSelfCheckState()
80
81
82 def pip_version_check(session):
83 """Check for an update for pip.
84
85 Limit the frequency of checks to once per week. State is stored either in
86 the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
87 of the pip script path.
88 """
89 import pip # imported here to prevent circular imports
90 pypi_version = None
91
92 try:
93 state = load_selfcheck_statefile()
94
95 current_time = datetime.datetime.utcnow()
96 # Determine if we need to refresh the state
97 if "last_check" in state.state and "pypi_version" in state.state:
98 last_check = datetime.datetime.strptime(
99 state.state["last_check"],
100 SELFCHECK_DATE_FMT
101 )
102 if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60:
103 pypi_version = state.state["pypi_version"]
104
105 # Refresh the version if we need to or just see if we need to warn
106 if pypi_version is None:
107 resp = session.get(
108 PyPI.pip_json_url,
109 headers={"Accept": "application/json"},
110 )
111 resp.raise_for_status()
112 pypi_version = resp.json()["info"]["version"]
113
114 # save that we've performed a check
115 state.save(pypi_version, current_time)
116
117 pip_version = pkg_resources.parse_version(pip.__version__)
118
119 # Determine if our pypi_version is older
120 if pip_version < pkg_resources.parse_version(pypi_version):
121 logger.warning(
122 "You are using pip version %s, however version %s is "
123 "available.\nYou should consider upgrading via the "
124 "'pip install --upgrade pip' command." % (pip.__version__,
125 pypi_version)
126 )
127
128 except Exception:
129 logger.debug(
130 "There was an error checking the latest version of pip",
131 exc_info=True,
132 )
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pip/utils/outdated.py b/pip/utils/outdated.py
--- a/pip/utils/outdated.py
+++ b/pip/utils/outdated.py
@@ -1,6 +1,7 @@
from __future__ import absolute_import
import datetime
+import errno
import json
import logging
import os.path
@@ -12,6 +13,7 @@
from pip.compat import total_seconds
from pip.index import PyPI
from pip.locations import USER_CACHE_DIR, running_under_virtualenv
+from pip.utils.filesystem import check_path_owner
SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
@@ -57,6 +59,19 @@
self.state = {}
def save(self, pypi_version, current_time):
+ # Check to make sure that we own the directory
+ if not check_path_owner(
+ os.path.dirname(self.statefile_path), os.geteuid()):
+ return
+
+ # Now that we've ensured the directory is owned by this user, we'll go
+ # ahead and make sure that all our directories are created.
+ try:
+ os.makedirs(os.path.dirname(self.statefile_path))
+ except OSError as exc:
+ if exc.errno != errno.EEXIST:
+ raise
+
# Attempt to write out our version check file
with lockfile.LockFile(self.statefile_path):
with open(self.statefile_path) as statefile:
| {"golden_diff": "diff --git a/pip/utils/outdated.py b/pip/utils/outdated.py\n--- a/pip/utils/outdated.py\n+++ b/pip/utils/outdated.py\n@@ -1,6 +1,7 @@\n from __future__ import absolute_import\n \n import datetime\n+import errno\n import json\n import logging\n import os.path\n@@ -12,6 +13,7 @@\n from pip.compat import total_seconds\n from pip.index import PyPI\n from pip.locations import USER_CACHE_DIR, running_under_virtualenv\n+from pip.utils.filesystem import check_path_owner\n \n \n SELFCHECK_DATE_FMT = \"%Y-%m-%dT%H:%M:%SZ\"\n@@ -57,6 +59,19 @@\n self.state = {}\n \n def save(self, pypi_version, current_time):\n+ # Check to make sure that we own the directory\n+ if not check_path_owner(\n+ os.path.dirname(self.statefile_path), os.geteuid()):\n+ return\n+\n+ # Now that we've ensured the directory is owned by this user, we'll go\n+ # ahead and make sure that all our directories are created.\n+ try:\n+ os.makedirs(os.path.dirname(self.statefile_path))\n+ except OSError as exc:\n+ if exc.errno != errno.EEXIST:\n+ raise\n+\n # Attempt to write out our version check file\n with lockfile.LockFile(self.statefile_path):\n with open(self.statefile_path) as statefile:\n", "issue": "Selfcheck failure on Windows\nI get this warning all the time:\n\n```\nThere was an error checking the latest version of pip\nTraceback (most recent call last):\n File \"C:\\Python27\\lib\\site-packages\\pip\\utils\\outdated.py\", line 115, in pip_version_check\n state.save(pypi_version, current_time)\n File \"C:\\Python27\\lib\\site-packages\\pip\\utils\\outdated.py\", line 62, in save\n with open(self.statefile_path) as statefile:\nIOError: [Errno 2] No such file or directory: u'C:\\\\Users\\\\ionel_000\\\\AppData\\\\Local\\\\pip\\\\Cache\\\\selfcheck.json'\n```\n\nIf I create the file, it complains about invalid json. I've put `{}` inside, the warning has gone away, but this seems very wrong to me.\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport datetime\nimport json\nimport logging\nimport os.path\nimport sys\n\nfrom pip._vendor import lockfile\nfrom pip._vendor import pkg_resources\n\nfrom pip.compat import total_seconds\nfrom pip.index import PyPI\nfrom pip.locations import USER_CACHE_DIR, running_under_virtualenv\n\n\nSELFCHECK_DATE_FMT = \"%Y-%m-%dT%H:%M:%SZ\"\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass VirtualenvSelfCheckState(object):\n def __init__(self):\n self.statefile_path = os.path.join(sys.prefix, \"pip-selfcheck.json\")\n\n # Load the existing state\n try:\n with open(self.statefile_path) as statefile:\n self.state = json.load(statefile)\n except (IOError, ValueError):\n self.state = {}\n\n def save(self, pypi_version, current_time):\n # Attempt to write out our version check file\n with open(self.statefile_path, \"w\") as statefile:\n json.dump(\n {\n \"last_check\": current_time.strftime(SELFCHECK_DATE_FMT),\n \"pypi_version\": pypi_version,\n },\n statefile,\n sort_keys=True,\n separators=(\",\", \":\")\n )\n\n\nclass GlobalSelfCheckState(object):\n def __init__(self):\n self.statefile_path = os.path.join(USER_CACHE_DIR, \"selfcheck.json\")\n\n # Load the existing state\n try:\n with open(self.statefile_path) as statefile:\n self.state = json.load(statefile)[sys.prefix]\n except (IOError, ValueError, KeyError):\n self.state = {}\n\n def save(self, pypi_version, current_time):\n # Attempt to write out our version check file\n with lockfile.LockFile(self.statefile_path):\n with open(self.statefile_path) as statefile:\n state = json.load(statefile)\n\n state[sys.prefix] = {\n \"last_check\": current_time.strftime(SELFCHECK_DATE_FMT),\n \"pypi_version\": pypi_version,\n }\n\n with open(self.statefile_path, \"w\") as statefile:\n json.dump(state, statefile, sort_keys=True,\n separators=(\",\", \":\"))\n\n\ndef load_selfcheck_statefile():\n if running_under_virtualenv():\n return VirtualenvSelfCheckState()\n else:\n return GlobalSelfCheckState()\n\n\ndef pip_version_check(session):\n \"\"\"Check for an update for pip.\n\n Limit the frequency of checks to once per week. State is stored either in\n the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix\n of the pip script path.\n \"\"\"\n import pip # imported here to prevent circular imports\n pypi_version = None\n\n try:\n state = load_selfcheck_statefile()\n\n current_time = datetime.datetime.utcnow()\n # Determine if we need to refresh the state\n if \"last_check\" in state.state and \"pypi_version\" in state.state:\n last_check = datetime.datetime.strptime(\n state.state[\"last_check\"],\n SELFCHECK_DATE_FMT\n )\n if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60:\n pypi_version = state.state[\"pypi_version\"]\n\n # Refresh the version if we need to or just see if we need to warn\n if pypi_version is None:\n resp = session.get(\n PyPI.pip_json_url,\n headers={\"Accept\": \"application/json\"},\n )\n resp.raise_for_status()\n pypi_version = resp.json()[\"info\"][\"version\"]\n\n # save that we've performed a check\n state.save(pypi_version, current_time)\n\n pip_version = pkg_resources.parse_version(pip.__version__)\n\n # Determine if our pypi_version is older\n if pip_version < pkg_resources.parse_version(pypi_version):\n logger.warning(\n \"You are using pip version %s, however version %s is \"\n \"available.\\nYou should consider upgrading via the \"\n \"'pip install --upgrade pip' command.\" % (pip.__version__,\n pypi_version)\n )\n\n except Exception:\n logger.debug(\n \"There was an error checking the latest version of pip\",\n exc_info=True,\n )\n", "path": "pip/utils/outdated.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport datetime\nimport errno\nimport json\nimport logging\nimport os.path\nimport sys\n\nfrom pip._vendor import lockfile\nfrom pip._vendor import pkg_resources\n\nfrom pip.compat import total_seconds\nfrom pip.index import PyPI\nfrom pip.locations import USER_CACHE_DIR, running_under_virtualenv\nfrom pip.utils.filesystem import check_path_owner\n\n\nSELFCHECK_DATE_FMT = \"%Y-%m-%dT%H:%M:%SZ\"\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass VirtualenvSelfCheckState(object):\n def __init__(self):\n self.statefile_path = os.path.join(sys.prefix, \"pip-selfcheck.json\")\n\n # Load the existing state\n try:\n with open(self.statefile_path) as statefile:\n self.state = json.load(statefile)\n except (IOError, ValueError):\n self.state = {}\n\n def save(self, pypi_version, current_time):\n # Attempt to write out our version check file\n with open(self.statefile_path, \"w\") as statefile:\n json.dump(\n {\n \"last_check\": current_time.strftime(SELFCHECK_DATE_FMT),\n \"pypi_version\": pypi_version,\n },\n statefile,\n sort_keys=True,\n separators=(\",\", \":\")\n )\n\n\nclass GlobalSelfCheckState(object):\n def __init__(self):\n self.statefile_path = os.path.join(USER_CACHE_DIR, \"selfcheck.json\")\n\n # Load the existing state\n try:\n with open(self.statefile_path) as statefile:\n self.state = json.load(statefile)[sys.prefix]\n except (IOError, ValueError, KeyError):\n self.state = {}\n\n def save(self, pypi_version, current_time):\n # Check to make sure that we own the directory\n if not check_path_owner(\n os.path.dirname(self.statefile_path), os.geteuid()):\n return\n\n # Now that we've ensured the directory is owned by this user, we'll go\n # ahead and make sure that all our directories are created.\n try:\n os.makedirs(os.path.dirname(self.statefile_path))\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n\n # Attempt to write out our version check file\n with lockfile.LockFile(self.statefile_path):\n with open(self.statefile_path) as statefile:\n state = json.load(statefile)\n\n state[sys.prefix] = {\n \"last_check\": current_time.strftime(SELFCHECK_DATE_FMT),\n \"pypi_version\": pypi_version,\n }\n\n with open(self.statefile_path, \"w\") as statefile:\n json.dump(state, statefile, sort_keys=True,\n separators=(\",\", \":\"))\n\n\ndef load_selfcheck_statefile():\n if running_under_virtualenv():\n return VirtualenvSelfCheckState()\n else:\n return GlobalSelfCheckState()\n\n\ndef pip_version_check(session):\n \"\"\"Check for an update for pip.\n\n Limit the frequency of checks to once per week. State is stored either in\n the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix\n of the pip script path.\n \"\"\"\n import pip # imported here to prevent circular imports\n pypi_version = None\n\n try:\n state = load_selfcheck_statefile()\n\n current_time = datetime.datetime.utcnow()\n # Determine if we need to refresh the state\n if \"last_check\" in state.state and \"pypi_version\" in state.state:\n last_check = datetime.datetime.strptime(\n state.state[\"last_check\"],\n SELFCHECK_DATE_FMT\n )\n if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60:\n pypi_version = state.state[\"pypi_version\"]\n\n # Refresh the version if we need to or just see if we need to warn\n if pypi_version is None:\n resp = session.get(\n PyPI.pip_json_url,\n headers={\"Accept\": \"application/json\"},\n )\n resp.raise_for_status()\n pypi_version = resp.json()[\"info\"][\"version\"]\n\n # save that we've performed a check\n state.save(pypi_version, current_time)\n\n pip_version = pkg_resources.parse_version(pip.__version__)\n\n # Determine if our pypi_version is older\n if pip_version < pkg_resources.parse_version(pypi_version):\n logger.warning(\n \"You are using pip version %s, however version %s is \"\n \"available.\\nYou should consider upgrading via the \"\n \"'pip install --upgrade pip' command.\" % (pip.__version__,\n pypi_version)\n )\n\n except Exception:\n logger.debug(\n \"There was an error checking the latest version of pip\",\n exc_info=True,\n )\n", "path": "pip/utils/outdated.py"}]} | 1,665 | 319 |
gh_patches_debug_3852 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1773 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Taiwan real-time data has stopped working
Taiwain seems to have been offline recently
It used to work correctly, something may have changed in the data source?
Kibana error description [here](https://kibana.electricitymap.org/app/kibana#/discover/10af54f0-0c4a-11e9-85c1-1d63df8c862c?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(message,extra.key,level),filters:!(('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:level,negate:!f,params:(query:ERROR,type:phrase),type:phrase,value:ERROR),query:(match:(level:(query:ERROR,type:phrase)))),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:'@timestamp',negate:!f,params:(query:'2019-02-13T09:56:26.971Z',type:phrase),type:phrase,value:'February%2013th%202019,%2010:56:26.971'),query:(match:('@timestamp':(query:'2019-02-13T09:56:26.971Z',type:phrase))))),index:'96f67170-0c49-11e9-85c1-1d63df8c862c',interval:auto,query:(language:lucene,query:''),sort:!('@timestamp',asc)))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/TW.py`
Content:
```
1 #!/usr/bin/env python3
2 import arrow
3 import requests
4 import pandas
5 import dateutil
6
7
8 def fetch_production(zone_key='TW', session=None, target_datetime=None, logger=None):
9 if target_datetime:
10 raise NotImplementedError('This parser is not yet able to parse past dates')
11
12 url = 'http://data.taipower.com.tw/opendata01/apply/file/d006001/001.txt'
13 response = requests.get(url)
14 data = response.json()
15
16 dumpDate = data['']
17 prodData = data['aaData']
18
19 tz = 'Asia/Taipei'
20 dumpDate = arrow.get(dumpDate, 'YYYY-MM-DD HH:mm').replace(tzinfo=dateutil.tz.gettz(tz))
21
22 objData = pandas.DataFrame(prodData)
23
24 objData.columns = ['fueltype', 'name', 'capacity', 'output', 'percentage',
25 'additional']
26
27 objData['fueltype'] = objData.fueltype.str.split('(').str[1]
28 objData['fueltype'] = objData.fueltype.str.split(')').str[0]
29 objData.drop('additional', axis=1, inplace=True)
30 objData.drop('percentage', axis=1, inplace=True)
31
32 objData = objData.convert_objects(convert_numeric=True)
33 production = pandas.DataFrame(objData.groupby('fueltype').sum())
34 production.columns = ['capacity', 'output']
35
36 coal_capacity = production.ix['Coal'].capacity + production.ix['IPP-Coal'].capacity
37 gas_capacity = production.ix['LNG'].capacity + production.ix['IPP-LNG'].capacity
38 oil_capacity = production.ix['Oil'].capacity + production.ix['Diesel'].capacity
39
40 coal_production = production.ix['Coal'].output + production.ix['IPP-Coal'].output
41 gas_production = production.ix['LNG'].output + production.ix['IPP-LNG'].output
42 oil_production = production.ix['Oil'].output + production.ix['Diesel'].output
43
44 # For storage, note that load will be negative, and generation positive.
45 # We require the opposite
46
47 returndata = {
48 'zoneKey': zone_key,
49 'datetime': dumpDate.datetime,
50 'production': {
51 'coal': coal_production,
52 'gas': gas_production,
53 'oil': oil_production,
54 'hydro': production.ix['Hydro'].output,
55 'nuclear': production.ix['Nuclear'].output,
56 'solar': production.ix['Solar'].output,
57 'wind': production.ix['Wind'].output,
58 'unknown': production.ix['Co-Gen'].output
59 },
60 'capacity': {
61 'coal': coal_capacity,
62 'gas': gas_capacity,
63 'oil': oil_capacity,
64 'hydro': production.ix['Hydro'].capacity,
65 'hydro storage':production.ix['Pumping Gen'].capacity,
66 'nuclear': production.ix['Nuclear'].capacity,
67 'solar': production.ix['Solar'].capacity,
68 'wind': production.ix['Wind'].capacity,
69 'unknown': production.ix['Co-Gen'].capacity
70 },
71 'storage': {
72 'hydro': -1 * production.ix['Pumping Load'].output - production.ix['Pumping Gen'].output
73 },
74 'source': 'taipower.com.tw'
75 }
76
77 return returndata
78
79
80 if __name__ == '__main__':
81 print(fetch_production())
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/TW.py b/parsers/TW.py
--- a/parsers/TW.py
+++ b/parsers/TW.py
@@ -9,7 +9,7 @@
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
- url = 'http://data.taipower.com.tw/opendata01/apply/file/d006001/001.txt'
+ url = 'http://www.taipower.com.tw/d006/loadGraph/loadGraph/data/genary.txt'
response = requests.get(url)
data = response.json()
| {"golden_diff": "diff --git a/parsers/TW.py b/parsers/TW.py\n--- a/parsers/TW.py\n+++ b/parsers/TW.py\n@@ -9,7 +9,7 @@\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n \n- url = 'http://data.taipower.com.tw/opendata01/apply/file/d006001/001.txt'\n+ url = 'http://www.taipower.com.tw/d006/loadGraph/loadGraph/data/genary.txt'\n response = requests.get(url)\n data = response.json()\n", "issue": "Taiwan real-time data has stopped working\nTaiwain seems to have been offline recently\r\nIt used to work correctly, something may have changed in the data source?\r\n\r\nKibana error description [here](https://kibana.electricitymap.org/app/kibana#/discover/10af54f0-0c4a-11e9-85c1-1d63df8c862c?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(message,extra.key,level),filters:!(('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:level,negate:!f,params:(query:ERROR,type:phrase),type:phrase,value:ERROR),query:(match:(level:(query:ERROR,type:phrase)))),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:'@timestamp',negate:!f,params:(query:'2019-02-13T09:56:26.971Z',type:phrase),type:phrase,value:'February%2013th%202019,%2010:56:26.971'),query:(match:('@timestamp':(query:'2019-02-13T09:56:26.971Z',type:phrase))))),index:'96f67170-0c49-11e9-85c1-1d63df8c862c',interval:auto,query:(language:lucene,query:''),sort:!('@timestamp',asc)))\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport arrow\nimport requests\nimport pandas\nimport dateutil\n\n\ndef fetch_production(zone_key='TW', session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n url = 'http://data.taipower.com.tw/opendata01/apply/file/d006001/001.txt'\n response = requests.get(url)\n data = response.json()\n\n dumpDate = data['']\n prodData = data['aaData']\n\n tz = 'Asia/Taipei'\n dumpDate = arrow.get(dumpDate, 'YYYY-MM-DD HH:mm').replace(tzinfo=dateutil.tz.gettz(tz))\n\n objData = pandas.DataFrame(prodData)\n\n objData.columns = ['fueltype', 'name', 'capacity', 'output', 'percentage',\n 'additional']\n\n objData['fueltype'] = objData.fueltype.str.split('(').str[1]\n objData['fueltype'] = objData.fueltype.str.split(')').str[0]\n objData.drop('additional', axis=1, inplace=True)\n objData.drop('percentage', axis=1, inplace=True)\n\n objData = objData.convert_objects(convert_numeric=True)\n production = pandas.DataFrame(objData.groupby('fueltype').sum())\n production.columns = ['capacity', 'output']\n\n coal_capacity = production.ix['Coal'].capacity + production.ix['IPP-Coal'].capacity\n gas_capacity = production.ix['LNG'].capacity + production.ix['IPP-LNG'].capacity\n oil_capacity = production.ix['Oil'].capacity + production.ix['Diesel'].capacity\n\n coal_production = production.ix['Coal'].output + production.ix['IPP-Coal'].output\n gas_production = production.ix['LNG'].output + production.ix['IPP-LNG'].output\n oil_production = production.ix['Oil'].output + production.ix['Diesel'].output\n\n # For storage, note that load will be negative, and generation positive.\n # We require the opposite\n\n returndata = {\n 'zoneKey': zone_key,\n 'datetime': dumpDate.datetime,\n 'production': {\n 'coal': coal_production,\n 'gas': gas_production,\n 'oil': oil_production,\n 'hydro': production.ix['Hydro'].output,\n 'nuclear': production.ix['Nuclear'].output,\n 'solar': production.ix['Solar'].output,\n 'wind': production.ix['Wind'].output,\n 'unknown': production.ix['Co-Gen'].output\n },\n 'capacity': {\n 'coal': coal_capacity,\n 'gas': gas_capacity,\n 'oil': oil_capacity,\n 'hydro': production.ix['Hydro'].capacity,\n 'hydro storage':production.ix['Pumping Gen'].capacity,\n 'nuclear': production.ix['Nuclear'].capacity,\n 'solar': production.ix['Solar'].capacity,\n 'wind': production.ix['Wind'].capacity,\n 'unknown': production.ix['Co-Gen'].capacity\n },\n 'storage': {\n 'hydro': -1 * production.ix['Pumping Load'].output - production.ix['Pumping Gen'].output\n },\n 'source': 'taipower.com.tw'\n }\n\n return returndata\n\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/TW.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport arrow\nimport requests\nimport pandas\nimport dateutil\n\n\ndef fetch_production(zone_key='TW', session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n url = 'http://www.taipower.com.tw/d006/loadGraph/loadGraph/data/genary.txt'\n response = requests.get(url)\n data = response.json()\n\n dumpDate = data['']\n prodData = data['aaData']\n\n tz = 'Asia/Taipei'\n dumpDate = arrow.get(dumpDate, 'YYYY-MM-DD HH:mm').replace(tzinfo=dateutil.tz.gettz(tz))\n\n objData = pandas.DataFrame(prodData)\n\n objData.columns = ['fueltype', 'name', 'capacity', 'output', 'percentage',\n 'additional']\n\n objData['fueltype'] = objData.fueltype.str.split('(').str[1]\n objData['fueltype'] = objData.fueltype.str.split(')').str[0]\n objData.drop('additional', axis=1, inplace=True)\n objData.drop('percentage', axis=1, inplace=True)\n\n objData = objData.convert_objects(convert_numeric=True)\n production = pandas.DataFrame(objData.groupby('fueltype').sum())\n production.columns = ['capacity', 'output']\n\n coal_capacity = production.ix['Coal'].capacity + production.ix['IPP-Coal'].capacity\n gas_capacity = production.ix['LNG'].capacity + production.ix['IPP-LNG'].capacity\n oil_capacity = production.ix['Oil'].capacity + production.ix['Diesel'].capacity\n\n coal_production = production.ix['Coal'].output + production.ix['IPP-Coal'].output\n gas_production = production.ix['LNG'].output + production.ix['IPP-LNG'].output\n oil_production = production.ix['Oil'].output + production.ix['Diesel'].output\n\n # For storage, note that load will be negative, and generation positive.\n # We require the opposite\n\n returndata = {\n 'zoneKey': zone_key,\n 'datetime': dumpDate.datetime,\n 'production': {\n 'coal': coal_production,\n 'gas': gas_production,\n 'oil': oil_production,\n 'hydro': production.ix['Hydro'].output,\n 'nuclear': production.ix['Nuclear'].output,\n 'solar': production.ix['Solar'].output,\n 'wind': production.ix['Wind'].output,\n 'unknown': production.ix['Co-Gen'].output\n },\n 'capacity': {\n 'coal': coal_capacity,\n 'gas': gas_capacity,\n 'oil': oil_capacity,\n 'hydro': production.ix['Hydro'].capacity,\n 'hydro storage':production.ix['Pumping Gen'].capacity,\n 'nuclear': production.ix['Nuclear'].capacity,\n 'solar': production.ix['Solar'].capacity,\n 'wind': production.ix['Wind'].capacity,\n 'unknown': production.ix['Co-Gen'].capacity\n },\n 'storage': {\n 'hydro': -1 * production.ix['Pumping Load'].output - production.ix['Pumping Gen'].output\n },\n 'source': 'taipower.com.tw'\n }\n\n return returndata\n\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/TW.py"}]} | 1,620 | 136 |
gh_patches_debug_495 | rasdani/github-patches | git_diff | mantl__mantl-1470 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace cisco logo with Mantl logo in documentation
Now that Mantl has it's own logo (see http://mantl.io and the readme), should we replace [the Cisco logo](https://github.com/CiscoCloud/mantl/blob/master/docs/_static/cisco.png) we use in the docs?
- Ansible version (`ansible --version`): n/a
- Python version (`python --version`): n/a
- Git commit hash or branch: n/a
- Cloud Environment: n/a
- Terraform version (`terraform version`): n/a
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Mantl documentation build configuration file, created by sphinx-quickstart on
4 # Wed Feb 4 06:59:14 2015.
5 #
6 # This file is execfile()d with the current directory set to its
7 # containing dir.
8 #
9 # Note that not all possible configuration values are present in this
10 # autogenerated file.
11 #
12 # All configuration values have a default; values that are commented out
13 # serve to show the default.
14
15 import sys
16 import os
17
18 # If extensions (or modules to document with autodoc) are in another directory,
19 # add these directories to sys.path here. If the directory is relative to the
20 # documentation root, use os.path.abspath to make it absolute, like shown here.
21 #sys.path.insert(0, os.path.abspath('.'))
22
23 # -- General configuration ------------------------------------------------
24
25 # If your documentation needs a minimal Sphinx version, state it here.
26 #needs_sphinx = '1.0'
27
28 # Add any Sphinx extension module names here, as strings. They can be
29 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
30 # ones.
31 extensions = [
32 'sphinx.ext.autodoc',
33 'sphinx.ext.intersphinx',
34 'sphinx.ext.viewcode',
35 'sphinx.ext.todo',
36 ]
37
38 # Add any paths that contain templates here, relative to this directory.
39 templates_path = ['_templates']
40
41 # The suffix of source filenames.
42 source_suffix = '.rst'
43
44 # The encoding of source files.
45 #source_encoding = 'utf-8-sig'
46
47 # The master toctree document.
48 master_doc = 'index'
49
50 # General information about the project.
51 project = u'Mantl'
52 copyright = u'2015, Cisco Systems, Incorporated'
53
54 # The version info for the project you're documenting, acts as replacement for
55 # |version| and |release|, also used in various other places throughout the
56 # built documents.
57 #
58 # The short X.Y version.
59 version = '1.0'
60 # The full version, including alpha/beta/rc tags.
61 release = '1.0.3'
62
63 # The language for content autogenerated by Sphinx. Refer to documentation
64 # for a list of supported languages.
65 #language = None
66
67 # There are two options for replacing |today|: either, you set today to some
68 # non-false value, then it is used:
69 #today = ''
70 # Else, today_fmt is used as the format for a strftime call.
71 #today_fmt = '%B %d, %Y'
72
73 # List of patterns, relative to source directory, that match files and
74 # directories to ignore when looking for source files.
75 exclude_patterns = ['_build']
76
77 # The reST default role (used for this markup: `text`) to use for all
78 # documents.
79 #default_role = None
80
81 # If true, '()' will be appended to :func: etc. cross-reference text.
82 #add_function_parentheses = True
83
84 # If true, the current module name will be prepended to all description
85 # unit titles (such as .. function::).
86 #add_module_names = True
87
88 # If true, sectionauthor and moduleauthor directives will be shown in the
89 # output. They are ignored by default.
90 #show_authors = False
91
92 # The name of the Pygments (syntax highlighting) style to use.
93 pygments_style = 'sphinx'
94
95 # A list of ignored prefixes for module index sorting.
96 #modindex_common_prefix = []
97
98 # If true, keep warnings as "system message" paragraphs in the built documents.
99 #keep_warnings = False
100
101
102 # -- Options for HTML output ----------------------------------------------
103
104 import alabaster
105
106 # The theme to use for HTML and HTML Help pages. See the documentation for
107 # a list of builtin themes.
108 html_theme = 'alabaster'
109
110 # Theme options are theme-specific and customize the look and feel of a theme
111 # further. For a list of options available for each theme, see the
112 # documentation.
113 extensions += ['alabaster']
114 html_theme_options = {
115 'github_user': 'ciscocloud',
116 'github_repo': 'mantl',
117 'logo': 'cisco.png',
118 'logo_name': True,
119 }
120
121 # Add any paths that contain custom themes here, relative to this directory.
122 html_theme_path = [alabaster.get_path()]
123
124 # The name for this set of Sphinx documents. If None, it defaults to
125 # "<project> v<release> documentation".
126 #html_title = None
127
128 # A shorter title for the navigation bar. Default is the same as html_title.
129 #html_short_title = None
130
131 # The name of an image file (relative to this directory) to place at the top
132 # of the sidebar.
133 # html_logo = None
134
135 # The name of an image file (within the static path) to use as favicon of the
136 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
137 # pixels large.
138 #html_favicon = None
139
140 # Add any paths that contain custom static files (such as style sheets) here,
141 # relative to this directory. They are copied after the builtin static files,
142 # so a file named "default.css" will overwrite the builtin "default.css".
143 html_static_path = ['_static']
144
145 # Add any extra paths that contain custom files (such as robots.txt or
146 # .htaccess) here, relative to this directory. These files are copied
147 # directly to the root of the documentation.
148 #html_extra_path = []
149
150 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
151 # using the given strftime format.
152 html_last_updated_fmt = '%b %d, %Y'
153
154 # If true, SmartyPants will be used to convert quotes and dashes to
155 # typographically correct entities.
156 #html_use_smartypants = True
157
158 # Custom sidebar templates, maps document names to template names.
159 html_sidebars = {
160 '**': [
161 'about.html', 'navigation.html', 'searchbox.html'
162 ]
163 }
164
165 # Additional templates that should be rendered to pages, maps page names to
166 # template names.
167 #html_additional_pages = {}
168
169 # If false, no module index is generated.
170 html_domain_indices = True
171
172 # If false, no index is generated.
173 html_use_index = True
174
175 # If true, the index is split into individual pages for each letter.
176 #html_split_index = False
177
178 # If true, links to the reST sources are added to the pages.
179 html_show_sourcelink = True
180
181 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
182 html_show_sphinx = False
183
184 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
185 #html_show_copyright = True
186
187 # If true, an OpenSearch description file will be output, and all pages will
188 # contain a <link> tag referring to it. The value of this option must be the
189 # base URL from which the finished HTML is served.
190 #html_use_opensearch = ''
191
192 # This is the file name suffix for HTML files (e.g. ".xhtml").
193 #html_file_suffix = None
194
195 # Output file base name for HTML help builder.
196 htmlhelp_basename = 'Mantldoc'
197
198
199 # -- Options for LaTeX output ---------------------------------------------
200
201 latex_elements = {
202 # The paper size ('letterpaper' or 'a4paper').
203 #'papersize': 'letterpaper',
204
205 # The font size ('10pt', '11pt' or '12pt').
206 #'pointsize': '10pt',
207
208 # Additional stuff for the LaTeX preamble.
209 #'preamble': '',
210 }
211
212 # Grouping the document tree into LaTeX files. List of tuples
213 # (source start file, target name, title,
214 # author, documentclass [howto, manual, or own class]).
215 latex_documents = [
216 ('index', 'Mantl.tex', u'Mantl Documentation',
217 u'Cisco Systems, Incorporated', 'manual'),
218 ]
219
220 # The name of an image file (relative to this directory) to place at the top of
221 # the title page.
222 #latex_logo = None
223
224 # For "manual" documents, if this is true, then toplevel headings are parts,
225 # not chapters.
226 #latex_use_parts = False
227
228 # If true, show page references after internal links.
229 #latex_show_pagerefs = False
230
231 # If true, show URL addresses after external links.
232 latex_show_urls = 'footnote'
233
234 # Documents to append as an appendix to all manuals.
235 #latex_appendices = []
236
237 # If false, no module index is generated.
238 #latex_domain_indices = True
239
240
241 # -- Options for manual page output ---------------------------------------
242
243 # One entry per manual page. List of tuples
244 # (source start file, name, description, authors, manual section).
245 man_pages = [
246 ('index', 'Mantl', u'Mantl Documentation',
247 [u'Cisco Systems, Incorporated'], 1)
248 ]
249
250 # If true, show URL addresses after external links.
251 #man_show_urls = False
252
253
254 # -- Options for Texinfo output -------------------------------------------
255
256 # Grouping the document tree into Texinfo files. List of tuples
257 # (source start file, target name, title, author,
258 # dir menu entry, description, category)
259 texinfo_documents = [
260 ('index', 'Mantl', u'Mantl Documentation',
261 u'Cisco Systems, Incorporated', 'Mantl', 'One line description of project.',
262 'Miscellaneous'),
263 ]
264
265 # Documents to append as an appendix to all manuals.
266 #texinfo_appendices = []
267
268 # If false, no module index is generated.
269 #texinfo_domain_indices = True
270
271 # How to display URL addresses: 'footnote', 'no', or 'inline'.
272 #texinfo_show_urls = 'footnote'
273
274 # If true, do not generate a @detailmenu in the "Top" node's menu.
275 #texinfo_no_detailmenu = False
276
277
278 # Example configuration for intersphinx: refer to the Python standard library.
279 intersphinx_mapping = {
280 'python': ('http://docs.python.org/', None),
281 'ansible': ('http://docs.ansible.com/', None),
282 }
283
284 # -- Options for todo ext ------------------------------------------------
285 todo_include_todos = os.getenv('INCLUDE_TODOS', '0') == '1' or version != release
286
287 # -- setup ---------------------------------------------------------------
288 def setup(app):
289 from sphinx.util.texescape import tex_replacements
290 tex_replacements.extend([
291 (u'☐', u'[ ]'),
292 (u'☑', u'[x]'),
293 ])
294
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -114,7 +114,7 @@
html_theme_options = {
'github_user': 'ciscocloud',
'github_repo': 'mantl',
- 'logo': 'cisco.png',
+ 'logo': 'mantl-logo.png',
'logo_name': True,
}
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -114,7 +114,7 @@\n html_theme_options = {\n 'github_user': 'ciscocloud',\n 'github_repo': 'mantl',\n- 'logo': 'cisco.png',\n+ 'logo': 'mantl-logo.png',\n 'logo_name': True,\n }\n", "issue": "Replace cisco logo with Mantl logo in documentation\nNow that Mantl has it's own logo (see http://mantl.io and the readme), should we replace [the Cisco logo](https://github.com/CiscoCloud/mantl/blob/master/docs/_static/cisco.png) we use in the docs? \n- Ansible version (`ansible --version`): n/a\n- Python version (`python --version`): n/a\n- Git commit hash or branch: n/a\n- Cloud Environment: n/a\n- Terraform version (`terraform version`): n/a\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Mantl documentation build configuration file, created by sphinx-quickstart on\n# Wed Feb 4 06:59:14 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.todo',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Mantl'\ncopyright = u'2015, Cisco Systems, Incorporated'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '1.0'\n# The full version, including alpha/beta/rc tags.\nrelease = '1.0.3'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\nimport alabaster\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nextensions += ['alabaster']\nhtml_theme_options = {\n 'github_user': 'ciscocloud',\n 'github_repo': 'mantl',\n 'logo': 'cisco.png',\n 'logo_name': True,\n}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = [alabaster.get_path()]\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n# html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\nhtml_sidebars = {\n '**': [\n 'about.html', 'navigation.html', 'searchbox.html'\n ]\n}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\nhtml_domain_indices = True\n\n# If false, no index is generated.\nhtml_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\nhtml_show_sphinx = False\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Mantldoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'Mantl.tex', u'Mantl Documentation',\n u'Cisco Systems, Incorporated', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\nlatex_show_urls = 'footnote'\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'Mantl', u'Mantl Documentation',\n [u'Cisco Systems, Incorporated'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Mantl', u'Mantl Documentation',\n u'Cisco Systems, Incorporated', 'Mantl', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'python': ('http://docs.python.org/', None),\n 'ansible': ('http://docs.ansible.com/', None),\n}\n\n# -- Options for todo ext ------------------------------------------------\ntodo_include_todos = os.getenv('INCLUDE_TODOS', '0') == '1' or version != release\n\n# -- setup ---------------------------------------------------------------\ndef setup(app):\n from sphinx.util.texescape import tex_replacements\n tex_replacements.extend([\n (u'\u2610', u'[ ]'),\n (u'\u2611', u'[x]'),\n ])\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Mantl documentation build configuration file, created by sphinx-quickstart on\n# Wed Feb 4 06:59:14 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.todo',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Mantl'\ncopyright = u'2015, Cisco Systems, Incorporated'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '1.0'\n# The full version, including alpha/beta/rc tags.\nrelease = '1.0.3'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\nimport alabaster\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nextensions += ['alabaster']\nhtml_theme_options = {\n 'github_user': 'ciscocloud',\n 'github_repo': 'mantl',\n 'logo': 'mantl-logo.png',\n 'logo_name': True,\n}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = [alabaster.get_path()]\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n# html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\nhtml_sidebars = {\n '**': [\n 'about.html', 'navigation.html', 'searchbox.html'\n ]\n}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\nhtml_domain_indices = True\n\n# If false, no index is generated.\nhtml_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\nhtml_show_sphinx = False\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Mantldoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'Mantl.tex', u'Mantl Documentation',\n u'Cisco Systems, Incorporated', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\nlatex_show_urls = 'footnote'\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'Mantl', u'Mantl Documentation',\n [u'Cisco Systems, Incorporated'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Mantl', u'Mantl Documentation',\n u'Cisco Systems, Incorporated', 'Mantl', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'python': ('http://docs.python.org/', None),\n 'ansible': ('http://docs.ansible.com/', None),\n}\n\n# -- Options for todo ext ------------------------------------------------\ntodo_include_todos = os.getenv('INCLUDE_TODOS', '0') == '1' or version != release\n\n# -- setup ---------------------------------------------------------------\ndef setup(app):\n from sphinx.util.texescape import tex_replacements\n tex_replacements.extend([\n (u'\u2610', u'[ ]'),\n (u'\u2611', u'[x]'),\n ])\n", "path": "docs/conf.py"}]} | 3,442 | 92 |
gh_patches_debug_3005 | rasdani/github-patches | git_diff | gratipay__gratipay.com-3792 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
log spam during test
What's this `TypeError` about? Seems spurious ...
```
pid-13897 thread-4384100352 (Thread-1) Traceback (most recent call last):
pid-13897 thread-4384100352 (Thread-1) File "/Users/whit537/personal/gratipay/gratipay.com/gratipay/cron.py", line 26, in f
pid-13897 thread-4384100352 (Thread-1) func()
pid-13897 thread-4384100352 (Thread-1) File "/Users/whit537/personal/gratipay/gratipay.com/gratipay/main.py", line 82, in <lambda>
pid-13897 thread-4384100352 (Thread-1) cron(env.update_cta_every, lambda: utils.update_cta(website))
pid-13897 thread-4384100352 (Thread-1) File "/Users/whit537/personal/gratipay/gratipay.com/gratipay/utils/__init__.py", line 145, in update_cta
pid-13897 thread-4384100352 (Thread-1) website.support_current = cur = int(round(nreceiving_from / nusers * 100)) if nusers else 0
pid-13897 thread-4384100352 (Thread-1) TypeError: unsupported operand type(s) for /: 'int' and 'tuple'
```
log spam during test
What's this `TypeError` about? Seems spurious ...
```
pid-13897 thread-4384100352 (Thread-1) Traceback (most recent call last):
pid-13897 thread-4384100352 (Thread-1) File "/Users/whit537/personal/gratipay/gratipay.com/gratipay/cron.py", line 26, in f
pid-13897 thread-4384100352 (Thread-1) func()
pid-13897 thread-4384100352 (Thread-1) File "/Users/whit537/personal/gratipay/gratipay.com/gratipay/main.py", line 82, in <lambda>
pid-13897 thread-4384100352 (Thread-1) cron(env.update_cta_every, lambda: utils.update_cta(website))
pid-13897 thread-4384100352 (Thread-1) File "/Users/whit537/personal/gratipay/gratipay.com/gratipay/utils/__init__.py", line 145, in update_cta
pid-13897 thread-4384100352 (Thread-1) website.support_current = cur = int(round(nreceiving_from / nusers * 100)) if nusers else 0
pid-13897 thread-4384100352 (Thread-1) TypeError: unsupported operand type(s) for /: 'int' and 'tuple'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gratipay/utils/__init__.py`
Content:
```
1 # encoding: utf8
2
3 from __future__ import absolute_import, division, print_function, unicode_literals
4
5 from datetime import datetime, timedelta
6
7 from aspen import Response, json
8 from aspen.utils import to_rfc822, utcnow
9 from dependency_injection import resolve_dependencies
10 from postgres.cursors import SimpleCursorBase
11
12 import gratipay
13
14
15 BEGINNING_OF_EPOCH = to_rfc822(datetime(1970, 1, 1)).encode('ascii')
16
17 # Difference between current time and credit card expiring date when
18 # card is considered as expiring
19 EXPIRING_DELTA = timedelta(days = 30)
20
21
22 def dict_to_querystring(mapping):
23 if not mapping:
24 return u''
25
26 arguments = []
27 for key, values in mapping.iteritems():
28 for val in values:
29 arguments.append(u'='.join([key, val]))
30
31 return u'?' + u'&'.join(arguments)
32
33
34 def use_tildes_for_participants(website, request):
35 if request.path.raw.startswith('/~/'):
36 to = '/~' + request.path.raw[3:]
37 if request.qs.raw:
38 to += '?' + request.qs.raw
39 website.redirect(to)
40 elif request.path.raw.startswith('/~'):
41 request.path.__init__('/~/' + request.path.raw[2:])
42
43
44 def canonicalize(redirect, path, base, canonical, given, arguments=None):
45 if given != canonical:
46 assert canonical.lower() == given.lower() # sanity check
47 remainder = path[len(base + given):]
48
49 if arguments is not None:
50 arguments = dict_to_querystring(arguments)
51
52 newpath = base + canonical + remainder + arguments or ''
53 redirect(newpath)
54
55
56 def get_participant(state, restrict=True, resolve_unclaimed=True):
57 """Given a Request, raise Response or return Participant.
58
59 If restrict is True then we'll restrict access to owners and admins.
60
61 """
62 redirect = state['website'].redirect
63 request = state['request']
64 user = state['user']
65 slug = request.line.uri.path['username']
66 qs = request.line.uri.querystring
67 _ = state['_']
68
69 if restrict:
70 if user.ANON:
71 raise Response(403, _("You need to log in to access this page."))
72
73 from gratipay.models.participant import Participant # avoid circular import
74 participant = Participant.from_username(slug)
75
76 if participant is None:
77 raise Response(404)
78
79 canonicalize(redirect, request.line.uri.path.raw, '/~/', participant.username, slug, qs)
80
81 if participant.is_closed:
82 if user.ADMIN:
83 return participant
84 raise Response(410)
85
86 if participant.claimed_time is None and resolve_unclaimed:
87 to = participant.resolve_unclaimed()
88 if to:
89 # This is a stub account (someone on another platform who hasn't
90 # actually registered with Gratipay yet)
91 redirect(to)
92 else:
93 # This is an archived account (result of take_over)
94 if user.ADMIN:
95 return participant
96 raise Response(404)
97
98 if restrict:
99 if participant != user.participant:
100 if not user.ADMIN:
101 raise Response(403, _("You are not authorized to access this page."))
102
103 return participant
104
105
106 def get_team(state):
107 """Given a Request, raise Response or return Team.
108 """
109 redirect = state['website'].redirect
110 request = state['request']
111 user = state['user']
112 slug = request.line.uri.path['team']
113 qs = request.line.uri.querystring
114
115 from gratipay.models.team import Team # avoid circular import
116 team = Team.from_slug(slug)
117
118 if team is None:
119 # Try to redirect to a Participant.
120 from gratipay.models.participant import Participant # avoid circular import
121 participant = Participant.from_username(slug)
122 if participant is not None:
123 qs = '?' + request.qs.raw if request.qs.raw else ''
124 redirect('/~' + request.path.raw[1:] + qs)
125 raise Response(404)
126
127 canonicalize(redirect, request.line.uri.path.raw, '/', team.slug, slug, qs)
128
129 if team.is_closed and not user.ADMIN:
130 raise Response(410)
131
132 return team
133
134
135 def update_cta(website):
136 nusers = website.db.one("""
137 SELECT nusers FROM paydays
138 ORDER BY ts_end DESC LIMIT 1
139 """, default=(0.0, 0))
140 nreceiving_from = website.db.one("""
141 SELECT nreceiving_from
142 FROM teams
143 WHERE slug = 'Gratipay'
144 """, default=0)
145 website.support_current = cur = int(round(nreceiving_from / nusers * 100)) if nusers else 0
146 if cur < 10: goal = 20
147 elif cur < 15: goal = 30
148 elif cur < 25: goal = 40
149 elif cur < 35: goal = 50
150 elif cur < 45: goal = 60
151 elif cur < 55: goal = 70
152 elif cur < 65: goal = 80
153 elif cur > 70: goal = None
154 website.support_goal = goal
155
156
157 def _execute(this, sql, params=[]):
158 print(sql.strip(), params)
159 super(SimpleCursorBase, this).execute(sql, params)
160
161 def log_cursor(f):
162 "Prints sql and params to stdout. Works globaly so watch for threaded use."
163 def wrapper(*a, **kw):
164 try:
165 SimpleCursorBase.execute = _execute
166 ret = f(*a, **kw)
167 finally:
168 del SimpleCursorBase.execute
169 return ret
170 return wrapper
171
172
173 def format_money(money):
174 format = '%.2f' if money < 1000 else '%.0f'
175 return format % money
176
177
178 def excerpt_intro(text, length=175, append=u'…'):
179 if not text:
180 return ''
181 if len(text) > length:
182 return text[:length] + append
183 return text
184
185
186 def is_card_expiring(expiration_year, expiration_month):
187 now = datetime.utcnow()
188 expiring_date = datetime(expiration_year, expiration_month, 1)
189 delta = expiring_date - now
190 return delta < EXPIRING_DELTA
191
192
193 def set_cookie(cookies, key, value, expires=None, httponly=True, path=b'/'):
194 cookies[key] = value
195 cookie = cookies[key]
196 if expires:
197 if isinstance(expires, timedelta):
198 expires += utcnow()
199 if isinstance(expires, datetime):
200 expires = to_rfc822(expires).encode('ascii')
201 cookie[b'expires'] = expires
202 if httponly:
203 cookie[b'httponly'] = True
204 if path:
205 cookie[b'path'] = path
206 if gratipay.use_secure_cookies:
207 cookie[b'secure'] = True
208
209
210 def erase_cookie(cookies, key, **kw):
211 set_cookie(cookies, key, '', BEGINNING_OF_EPOCH, **kw)
212
213
214 def filter_profile_nav(user, participant, pages):
215 out = []
216 for foo, bar, show_them, show_others in pages:
217 if (user.participant == participant and show_them) \
218 or (user.participant != participant and show_others) \
219 or user.ADMIN:
220 out.append((foo, bar, show_them, show_others))
221 return out
222
223
224 def to_javascript(obj):
225 """For when you want to inject an object into a <script> tag.
226 """
227 return json.dumps(obj).replace('</', '<\\/')
228
229
230 class LazyResponse(Response):
231
232 def __init__(self, code, lazy_body, **kw):
233 Response.__init__(self, code, '', **kw)
234 self.lazy_body = lazy_body
235
236 def render_body(self, state):
237 f = self.lazy_body
238 self.body = f(*resolve_dependencies(f, state).as_args)
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gratipay/utils/__init__.py b/gratipay/utils/__init__.py
--- a/gratipay/utils/__init__.py
+++ b/gratipay/utils/__init__.py
@@ -136,7 +136,7 @@
nusers = website.db.one("""
SELECT nusers FROM paydays
ORDER BY ts_end DESC LIMIT 1
- """, default=(0.0, 0))
+ """, default=0)
nreceiving_from = website.db.one("""
SELECT nreceiving_from
FROM teams
| {"golden_diff": "diff --git a/gratipay/utils/__init__.py b/gratipay/utils/__init__.py\n--- a/gratipay/utils/__init__.py\n+++ b/gratipay/utils/__init__.py\n@@ -136,7 +136,7 @@\n nusers = website.db.one(\"\"\"\n SELECT nusers FROM paydays\n ORDER BY ts_end DESC LIMIT 1\n- \"\"\", default=(0.0, 0))\n+ \"\"\", default=0)\n nreceiving_from = website.db.one(\"\"\"\n SELECT nreceiving_from\n FROM teams\n", "issue": "log spam during test\nWhat's this `TypeError` about? Seems spurious ...\n\n```\npid-13897 thread-4384100352 (Thread-1) Traceback (most recent call last):\npid-13897 thread-4384100352 (Thread-1) File \"/Users/whit537/personal/gratipay/gratipay.com/gratipay/cron.py\", line 26, in f\npid-13897 thread-4384100352 (Thread-1) func()\npid-13897 thread-4384100352 (Thread-1) File \"/Users/whit537/personal/gratipay/gratipay.com/gratipay/main.py\", line 82, in <lambda>\npid-13897 thread-4384100352 (Thread-1) cron(env.update_cta_every, lambda: utils.update_cta(website))\npid-13897 thread-4384100352 (Thread-1) File \"/Users/whit537/personal/gratipay/gratipay.com/gratipay/utils/__init__.py\", line 145, in update_cta\npid-13897 thread-4384100352 (Thread-1) website.support_current = cur = int(round(nreceiving_from / nusers * 100)) if nusers else 0\npid-13897 thread-4384100352 (Thread-1) TypeError: unsupported operand type(s) for /: 'int' and 'tuple'\n```\n\nlog spam during test\nWhat's this `TypeError` about? Seems spurious ...\n\n```\npid-13897 thread-4384100352 (Thread-1) Traceback (most recent call last):\npid-13897 thread-4384100352 (Thread-1) File \"/Users/whit537/personal/gratipay/gratipay.com/gratipay/cron.py\", line 26, in f\npid-13897 thread-4384100352 (Thread-1) func()\npid-13897 thread-4384100352 (Thread-1) File \"/Users/whit537/personal/gratipay/gratipay.com/gratipay/main.py\", line 82, in <lambda>\npid-13897 thread-4384100352 (Thread-1) cron(env.update_cta_every, lambda: utils.update_cta(website))\npid-13897 thread-4384100352 (Thread-1) File \"/Users/whit537/personal/gratipay/gratipay.com/gratipay/utils/__init__.py\", line 145, in update_cta\npid-13897 thread-4384100352 (Thread-1) website.support_current = cur = int(round(nreceiving_from / nusers * 100)) if nusers else 0\npid-13897 thread-4384100352 (Thread-1) TypeError: unsupported operand type(s) for /: 'int' and 'tuple'\n```\n\n", "before_files": [{"content": "# encoding: utf8\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom datetime import datetime, timedelta\n\nfrom aspen import Response, json\nfrom aspen.utils import to_rfc822, utcnow\nfrom dependency_injection import resolve_dependencies\nfrom postgres.cursors import SimpleCursorBase\n\nimport gratipay\n\n\nBEGINNING_OF_EPOCH = to_rfc822(datetime(1970, 1, 1)).encode('ascii')\n\n# Difference between current time and credit card expiring date when\n# card is considered as expiring\nEXPIRING_DELTA = timedelta(days = 30)\n\n\ndef dict_to_querystring(mapping):\n if not mapping:\n return u''\n\n arguments = []\n for key, values in mapping.iteritems():\n for val in values:\n arguments.append(u'='.join([key, val]))\n\n return u'?' + u'&'.join(arguments)\n\n\ndef use_tildes_for_participants(website, request):\n if request.path.raw.startswith('/~/'):\n to = '/~' + request.path.raw[3:]\n if request.qs.raw:\n to += '?' + request.qs.raw\n website.redirect(to)\n elif request.path.raw.startswith('/~'):\n request.path.__init__('/~/' + request.path.raw[2:])\n\n\ndef canonicalize(redirect, path, base, canonical, given, arguments=None):\n if given != canonical:\n assert canonical.lower() == given.lower() # sanity check\n remainder = path[len(base + given):]\n\n if arguments is not None:\n arguments = dict_to_querystring(arguments)\n\n newpath = base + canonical + remainder + arguments or ''\n redirect(newpath)\n\n\ndef get_participant(state, restrict=True, resolve_unclaimed=True):\n \"\"\"Given a Request, raise Response or return Participant.\n\n If restrict is True then we'll restrict access to owners and admins.\n\n \"\"\"\n redirect = state['website'].redirect\n request = state['request']\n user = state['user']\n slug = request.line.uri.path['username']\n qs = request.line.uri.querystring\n _ = state['_']\n\n if restrict:\n if user.ANON:\n raise Response(403, _(\"You need to log in to access this page.\"))\n\n from gratipay.models.participant import Participant # avoid circular import\n participant = Participant.from_username(slug)\n\n if participant is None:\n raise Response(404)\n\n canonicalize(redirect, request.line.uri.path.raw, '/~/', participant.username, slug, qs)\n\n if participant.is_closed:\n if user.ADMIN:\n return participant\n raise Response(410)\n\n if participant.claimed_time is None and resolve_unclaimed:\n to = participant.resolve_unclaimed()\n if to:\n # This is a stub account (someone on another platform who hasn't\n # actually registered with Gratipay yet)\n redirect(to)\n else:\n # This is an archived account (result of take_over)\n if user.ADMIN:\n return participant\n raise Response(404)\n\n if restrict:\n if participant != user.participant:\n if not user.ADMIN:\n raise Response(403, _(\"You are not authorized to access this page.\"))\n\n return participant\n\n\ndef get_team(state):\n \"\"\"Given a Request, raise Response or return Team.\n \"\"\"\n redirect = state['website'].redirect\n request = state['request']\n user = state['user']\n slug = request.line.uri.path['team']\n qs = request.line.uri.querystring\n\n from gratipay.models.team import Team # avoid circular import\n team = Team.from_slug(slug)\n\n if team is None:\n # Try to redirect to a Participant.\n from gratipay.models.participant import Participant # avoid circular import\n participant = Participant.from_username(slug)\n if participant is not None:\n qs = '?' + request.qs.raw if request.qs.raw else ''\n redirect('/~' + request.path.raw[1:] + qs)\n raise Response(404)\n\n canonicalize(redirect, request.line.uri.path.raw, '/', team.slug, slug, qs)\n\n if team.is_closed and not user.ADMIN:\n raise Response(410)\n\n return team\n\n\ndef update_cta(website):\n nusers = website.db.one(\"\"\"\n SELECT nusers FROM paydays\n ORDER BY ts_end DESC LIMIT 1\n \"\"\", default=(0.0, 0))\n nreceiving_from = website.db.one(\"\"\"\n SELECT nreceiving_from\n FROM teams\n WHERE slug = 'Gratipay'\n \"\"\", default=0)\n website.support_current = cur = int(round(nreceiving_from / nusers * 100)) if nusers else 0\n if cur < 10: goal = 20\n elif cur < 15: goal = 30\n elif cur < 25: goal = 40\n elif cur < 35: goal = 50\n elif cur < 45: goal = 60\n elif cur < 55: goal = 70\n elif cur < 65: goal = 80\n elif cur > 70: goal = None\n website.support_goal = goal\n\n\ndef _execute(this, sql, params=[]):\n print(sql.strip(), params)\n super(SimpleCursorBase, this).execute(sql, params)\n\ndef log_cursor(f):\n \"Prints sql and params to stdout. Works globaly so watch for threaded use.\"\n def wrapper(*a, **kw):\n try:\n SimpleCursorBase.execute = _execute\n ret = f(*a, **kw)\n finally:\n del SimpleCursorBase.execute\n return ret\n return wrapper\n\n\ndef format_money(money):\n format = '%.2f' if money < 1000 else '%.0f'\n return format % money\n\n\ndef excerpt_intro(text, length=175, append=u'\u2026'):\n if not text:\n return ''\n if len(text) > length:\n return text[:length] + append\n return text\n\n\ndef is_card_expiring(expiration_year, expiration_month):\n now = datetime.utcnow()\n expiring_date = datetime(expiration_year, expiration_month, 1)\n delta = expiring_date - now\n return delta < EXPIRING_DELTA\n\n\ndef set_cookie(cookies, key, value, expires=None, httponly=True, path=b'/'):\n cookies[key] = value\n cookie = cookies[key]\n if expires:\n if isinstance(expires, timedelta):\n expires += utcnow()\n if isinstance(expires, datetime):\n expires = to_rfc822(expires).encode('ascii')\n cookie[b'expires'] = expires\n if httponly:\n cookie[b'httponly'] = True\n if path:\n cookie[b'path'] = path\n if gratipay.use_secure_cookies:\n cookie[b'secure'] = True\n\n\ndef erase_cookie(cookies, key, **kw):\n set_cookie(cookies, key, '', BEGINNING_OF_EPOCH, **kw)\n\n\ndef filter_profile_nav(user, participant, pages):\n out = []\n for foo, bar, show_them, show_others in pages:\n if (user.participant == participant and show_them) \\\n or (user.participant != participant and show_others) \\\n or user.ADMIN:\n out.append((foo, bar, show_them, show_others))\n return out\n\n\ndef to_javascript(obj):\n \"\"\"For when you want to inject an object into a <script> tag.\n \"\"\"\n return json.dumps(obj).replace('</', '<\\\\/')\n\n\nclass LazyResponse(Response):\n\n def __init__(self, code, lazy_body, **kw):\n Response.__init__(self, code, '', **kw)\n self.lazy_body = lazy_body\n\n def render_body(self, state):\n f = self.lazy_body\n self.body = f(*resolve_dependencies(f, state).as_args)\n", "path": "gratipay/utils/__init__.py"}], "after_files": [{"content": "# encoding: utf8\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom datetime import datetime, timedelta\n\nfrom aspen import Response, json\nfrom aspen.utils import to_rfc822, utcnow\nfrom dependency_injection import resolve_dependencies\nfrom postgres.cursors import SimpleCursorBase\n\nimport gratipay\n\n\nBEGINNING_OF_EPOCH = to_rfc822(datetime(1970, 1, 1)).encode('ascii')\n\n# Difference between current time and credit card expiring date when\n# card is considered as expiring\nEXPIRING_DELTA = timedelta(days = 30)\n\n\ndef dict_to_querystring(mapping):\n if not mapping:\n return u''\n\n arguments = []\n for key, values in mapping.iteritems():\n for val in values:\n arguments.append(u'='.join([key, val]))\n\n return u'?' + u'&'.join(arguments)\n\n\ndef use_tildes_for_participants(website, request):\n if request.path.raw.startswith('/~/'):\n to = '/~' + request.path.raw[3:]\n if request.qs.raw:\n to += '?' + request.qs.raw\n website.redirect(to)\n elif request.path.raw.startswith('/~'):\n request.path.__init__('/~/' + request.path.raw[2:])\n\n\ndef canonicalize(redirect, path, base, canonical, given, arguments=None):\n if given != canonical:\n assert canonical.lower() == given.lower() # sanity check\n remainder = path[len(base + given):]\n\n if arguments is not None:\n arguments = dict_to_querystring(arguments)\n\n newpath = base + canonical + remainder + arguments or ''\n redirect(newpath)\n\n\ndef get_participant(state, restrict=True, resolve_unclaimed=True):\n \"\"\"Given a Request, raise Response or return Participant.\n\n If restrict is True then we'll restrict access to owners and admins.\n\n \"\"\"\n redirect = state['website'].redirect\n request = state['request']\n user = state['user']\n slug = request.line.uri.path['username']\n qs = request.line.uri.querystring\n _ = state['_']\n\n if restrict:\n if user.ANON:\n raise Response(403, _(\"You need to log in to access this page.\"))\n\n from gratipay.models.participant import Participant # avoid circular import\n participant = Participant.from_username(slug)\n\n if participant is None:\n raise Response(404)\n\n canonicalize(redirect, request.line.uri.path.raw, '/~/', participant.username, slug, qs)\n\n if participant.is_closed:\n if user.ADMIN:\n return participant\n raise Response(410)\n\n if participant.claimed_time is None and resolve_unclaimed:\n to = participant.resolve_unclaimed()\n if to:\n # This is a stub account (someone on another platform who hasn't\n # actually registered with Gratipay yet)\n redirect(to)\n else:\n # This is an archived account (result of take_over)\n if user.ADMIN:\n return participant\n raise Response(404)\n\n if restrict:\n if participant != user.participant:\n if not user.ADMIN:\n raise Response(403, _(\"You are not authorized to access this page.\"))\n\n return participant\n\n\ndef get_team(state):\n \"\"\"Given a Request, raise Response or return Team.\n \"\"\"\n redirect = state['website'].redirect\n request = state['request']\n user = state['user']\n slug = request.line.uri.path['team']\n qs = request.line.uri.querystring\n\n from gratipay.models.team import Team # avoid circular import\n team = Team.from_slug(slug)\n\n if team is None:\n # Try to redirect to a Participant.\n from gratipay.models.participant import Participant # avoid circular import\n participant = Participant.from_username(slug)\n if participant is not None:\n qs = '?' + request.qs.raw if request.qs.raw else ''\n redirect('/~' + request.path.raw[1:] + qs)\n raise Response(404)\n\n canonicalize(redirect, request.line.uri.path.raw, '/', team.slug, slug, qs)\n\n if team.is_closed and not user.ADMIN:\n raise Response(410)\n\n return team\n\n\ndef update_cta(website):\n nusers = website.db.one(\"\"\"\n SELECT nusers FROM paydays\n ORDER BY ts_end DESC LIMIT 1\n \"\"\", default=0)\n nreceiving_from = website.db.one(\"\"\"\n SELECT nreceiving_from\n FROM teams\n WHERE slug = 'Gratipay'\n \"\"\", default=0)\n website.support_current = cur = int(round(nreceiving_from / nusers * 100)) if nusers else 0\n if cur < 10: goal = 20\n elif cur < 15: goal = 30\n elif cur < 25: goal = 40\n elif cur < 35: goal = 50\n elif cur < 45: goal = 60\n elif cur < 55: goal = 70\n elif cur < 65: goal = 80\n elif cur > 70: goal = None\n website.support_goal = goal\n\n\ndef _execute(this, sql, params=[]):\n print(sql.strip(), params)\n super(SimpleCursorBase, this).execute(sql, params)\n\ndef log_cursor(f):\n \"Prints sql and params to stdout. Works globaly so watch for threaded use.\"\n def wrapper(*a, **kw):\n try:\n SimpleCursorBase.execute = _execute\n ret = f(*a, **kw)\n finally:\n del SimpleCursorBase.execute\n return ret\n return wrapper\n\n\ndef format_money(money):\n format = '%.2f' if money < 1000 else '%.0f'\n return format % money\n\n\ndef excerpt_intro(text, length=175, append=u'\u2026'):\n if not text:\n return ''\n if len(text) > length:\n return text[:length] + append\n return text\n\n\ndef is_card_expiring(expiration_year, expiration_month):\n now = datetime.utcnow()\n expiring_date = datetime(expiration_year, expiration_month, 1)\n delta = expiring_date - now\n return delta < EXPIRING_DELTA\n\n\ndef set_cookie(cookies, key, value, expires=None, httponly=True, path=b'/'):\n cookies[key] = value\n cookie = cookies[key]\n if expires:\n if isinstance(expires, timedelta):\n expires += utcnow()\n if isinstance(expires, datetime):\n expires = to_rfc822(expires).encode('ascii')\n cookie[b'expires'] = expires\n if httponly:\n cookie[b'httponly'] = True\n if path:\n cookie[b'path'] = path\n if gratipay.use_secure_cookies:\n cookie[b'secure'] = True\n\n\ndef erase_cookie(cookies, key, **kw):\n set_cookie(cookies, key, '', BEGINNING_OF_EPOCH, **kw)\n\n\ndef filter_profile_nav(user, participant, pages):\n out = []\n for foo, bar, show_them, show_others in pages:\n if (user.participant == participant and show_them) \\\n or (user.participant != participant and show_others) \\\n or user.ADMIN:\n out.append((foo, bar, show_them, show_others))\n return out\n\n\ndef to_javascript(obj):\n \"\"\"For when you want to inject an object into a <script> tag.\n \"\"\"\n return json.dumps(obj).replace('</', '<\\\\/')\n\n\nclass LazyResponse(Response):\n\n def __init__(self, code, lazy_body, **kw):\n Response.__init__(self, code, '', **kw)\n self.lazy_body = lazy_body\n\n def render_body(self, state):\n f = self.lazy_body\n self.body = f(*resolve_dependencies(f, state).as_args)\n", "path": "gratipay/utils/__init__.py"}]} | 3,440 | 126 |
gh_patches_debug_25842 | rasdani/github-patches | git_diff | amundsen-io__amundsen-1303 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug Report: Glue search_tables with Filters and result tables more than 100 items
<!--- Provide a general summary of the issue in the Title above -->
<!--- Look through existing open and closed issues to see if someone has reported the issue before -->
There is a bug while applying Filters for the database which contains more than 100 items. Since glue returns 100 items per page and to move to the next page we need to specify `NextToken`.
I have 138 tables, which means I will be iterating 2 times over the result.
The filter:
```python
{
'Key': 'DatabaseName',
'Value': glue_database_name
}
```
Every time I run the code I get different results: the length of the list is always the same - 138. However, the length of the set is always different. It ranges from 1 to 30.
I run my check over 10 times.
I took look at the documentation and found a proper parameter `MaxResults` for further checking. Since I know precisely desired table count, I put it as 150 and the issue has totally gone.
## Expected Behavior
Get the exact same result for filtered tables.
## Current Behavior
Query result from [`self._glue.search_tables(**kwargs)`](https://github.com/amundsen-io/amundsen/blob/main/databuilder/databuilder/extractor/glue_extractor.py#L78) contains duplicates
## Possible Solution
I'm not sure, but I think for the next (second) iteration (page, which contains up to 100 items) we are using a new `NextToken` with previous filters. Maybe the problem lies here.
## Steps to Reproduce
1. Have more than 100 glue tables in a single DB in AWS
2. Query it using the abovementioned `DatabaseName` filter
3. Observe duplicates in the list
## Hot-fix
1. Add `MaxResults` to [`kwargs`](https://github.com/amundsen-io/amundsen/blob/main/databuilder/databuilder/extractor/glue_extractor.py#L80) that is more than your actual size of overall tables
2. Observe a proper behavior
## Context
Q: How has this issue affected you?
A: It affects our production system
## Your Environment
```
amundsen-databuilder==4.5.3
amundsen-gremlin==0.0.9
Flask==1.1.4
gremlinpython==3.4.9
requests-aws4auth==1.1.1
typing-extensions==3.10.0
overrides==6.1.0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `databuilder/databuilder/extractor/glue_extractor.py`
Content:
```
1 # Copyright Contributors to the Amundsen project.
2 # SPDX-License-Identifier: Apache-2.0
3
4 from typing import (
5 Any, Dict, Iterator, List, Union,
6 )
7
8 import boto3
9 from pyhocon import ConfigFactory, ConfigTree
10
11 from databuilder.extractor.base_extractor import Extractor
12 from databuilder.models.table_metadata import ColumnMetadata, TableMetadata
13
14
15 class GlueExtractor(Extractor):
16 """
17 Extracts tables and columns metadata from AWS Glue metastore
18 """
19
20 CLUSTER_KEY = 'cluster'
21 FILTER_KEY = 'filters'
22 DEFAULT_CONFIG = ConfigFactory.from_dict({CLUSTER_KEY: 'gold', FILTER_KEY: None})
23
24 def init(self, conf: ConfigTree) -> None:
25 conf = conf.with_fallback(GlueExtractor.DEFAULT_CONFIG)
26 self._cluster = conf.get_string(GlueExtractor.CLUSTER_KEY)
27 self._filters = conf.get(GlueExtractor.FILTER_KEY)
28 self._glue = boto3.client('glue')
29 self._extract_iter: Union[None, Iterator] = None
30
31 def extract(self) -> Union[TableMetadata, None]:
32 if not self._extract_iter:
33 self._extract_iter = self._get_extract_iter()
34 try:
35 return next(self._extract_iter)
36 except StopIteration:
37 return None
38
39 def get_scope(self) -> str:
40 return 'extractor.glue'
41
42 def _get_extract_iter(self) -> Iterator[TableMetadata]:
43 """
44 It gets all tables and yields TableMetadata
45 :return:
46 """
47 for row in self._get_raw_extract_iter():
48 columns, i = [], 0
49
50 for column in row['StorageDescriptor']['Columns'] \
51 + row.get('PartitionKeys', []):
52 columns.append(ColumnMetadata(
53 column['Name'],
54 column['Comment'] if 'Comment' in column else None,
55 column['Type'],
56 i
57 ))
58 i += 1
59
60 yield TableMetadata(
61 'glue',
62 self._cluster,
63 row['DatabaseName'],
64 row['Name'],
65 row.get('Description') or row.get('Parameters', {}).get('comment'),
66 columns,
67 row.get('TableType') == 'VIRTUAL_VIEW',
68 )
69
70 def _get_raw_extract_iter(self) -> Iterator[Dict[str, Any]]:
71 """
72 Provides iterator of results row from glue client
73 :return:
74 """
75 tables = self._search_tables()
76 return iter(tables)
77
78 def _search_tables(self) -> List[Dict[str, Any]]:
79 tables = []
80 kwargs = {}
81 if self._filters is not None:
82 kwargs['Filters'] = self._filters
83 data = self._glue.search_tables(**kwargs)
84 tables += data['TableList']
85 while 'NextToken' in data:
86 token = data['NextToken']
87 kwargs['NextToken'] = token
88 data = self._glue.search_tables(**kwargs)
89 tables += data['TableList']
90 return tables
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/databuilder/databuilder/extractor/glue_extractor.py b/databuilder/databuilder/extractor/glue_extractor.py
--- a/databuilder/databuilder/extractor/glue_extractor.py
+++ b/databuilder/databuilder/extractor/glue_extractor.py
@@ -19,12 +19,14 @@
CLUSTER_KEY = 'cluster'
FILTER_KEY = 'filters'
- DEFAULT_CONFIG = ConfigFactory.from_dict({CLUSTER_KEY: 'gold', FILTER_KEY: None})
+ MAX_RESULTS_KEY = 'max_results'
+ DEFAULT_CONFIG = ConfigFactory.from_dict({CLUSTER_KEY: 'gold', FILTER_KEY: None, MAX_RESULTS_KEY: 500})
def init(self, conf: ConfigTree) -> None:
conf = conf.with_fallback(GlueExtractor.DEFAULT_CONFIG)
self._cluster = conf.get_string(GlueExtractor.CLUSTER_KEY)
self._filters = conf.get(GlueExtractor.FILTER_KEY)
+ self._max_results = conf.get(GlueExtractor.MAX_RESULTS_KEY)
self._glue = boto3.client('glue')
self._extract_iter: Union[None, Iterator] = None
@@ -80,6 +82,7 @@
kwargs = {}
if self._filters is not None:
kwargs['Filters'] = self._filters
+ kwargs['MaxResults'] = self._max_results
data = self._glue.search_tables(**kwargs)
tables += data['TableList']
while 'NextToken' in data:
| {"golden_diff": "diff --git a/databuilder/databuilder/extractor/glue_extractor.py b/databuilder/databuilder/extractor/glue_extractor.py\n--- a/databuilder/databuilder/extractor/glue_extractor.py\n+++ b/databuilder/databuilder/extractor/glue_extractor.py\n@@ -19,12 +19,14 @@\n \n CLUSTER_KEY = 'cluster'\n FILTER_KEY = 'filters'\n- DEFAULT_CONFIG = ConfigFactory.from_dict({CLUSTER_KEY: 'gold', FILTER_KEY: None})\n+ MAX_RESULTS_KEY = 'max_results'\n+ DEFAULT_CONFIG = ConfigFactory.from_dict({CLUSTER_KEY: 'gold', FILTER_KEY: None, MAX_RESULTS_KEY: 500})\n \n def init(self, conf: ConfigTree) -> None:\n conf = conf.with_fallback(GlueExtractor.DEFAULT_CONFIG)\n self._cluster = conf.get_string(GlueExtractor.CLUSTER_KEY)\n self._filters = conf.get(GlueExtractor.FILTER_KEY)\n+ self._max_results = conf.get(GlueExtractor.MAX_RESULTS_KEY)\n self._glue = boto3.client('glue')\n self._extract_iter: Union[None, Iterator] = None\n \n@@ -80,6 +82,7 @@\n kwargs = {}\n if self._filters is not None:\n kwargs['Filters'] = self._filters\n+ kwargs['MaxResults'] = self._max_results\n data = self._glue.search_tables(**kwargs)\n tables += data['TableList']\n while 'NextToken' in data:\n", "issue": "Bug Report: Glue search_tables with Filters and result tables more than 100 items\n<!--- Provide a general summary of the issue in the Title above -->\r\n<!--- Look through existing open and closed issues to see if someone has reported the issue before -->\r\n\r\nThere is a bug while applying Filters for the database which contains more than 100 items. Since glue returns 100 items per page and to move to the next page we need to specify `NextToken`.\r\nI have 138 tables, which means I will be iterating 2 times over the result.\r\n\r\nThe filter:\r\n```python\r\n{\r\n 'Key': 'DatabaseName',\r\n 'Value': glue_database_name\r\n}\r\n```\r\n\r\nEvery time I run the code I get different results: the length of the list is always the same - 138. However, the length of the set is always different. It ranges from 1 to 30.\r\nI run my check over 10 times.\r\n\r\nI took look at the documentation and found a proper parameter `MaxResults` for further checking. Since I know precisely desired table count, I put it as 150 and the issue has totally gone.\r\n\r\n## Expected Behavior\r\nGet the exact same result for filtered tables.\r\n\r\n## Current Behavior\r\nQuery result from [`self._glue.search_tables(**kwargs)`](https://github.com/amundsen-io/amundsen/blob/main/databuilder/databuilder/extractor/glue_extractor.py#L78) contains duplicates\r\n\r\n## Possible Solution\r\nI'm not sure, but I think for the next (second) iteration (page, which contains up to 100 items) we are using a new `NextToken` with previous filters. Maybe the problem lies here.\r\n\r\n## Steps to Reproduce\r\n1. Have more than 100 glue tables in a single DB in AWS\r\n2. Query it using the abovementioned `DatabaseName` filter\r\n3. Observe duplicates in the list\r\n\r\n## Hot-fix\r\n1. Add `MaxResults` to [`kwargs`](https://github.com/amundsen-io/amundsen/blob/main/databuilder/databuilder/extractor/glue_extractor.py#L80) that is more than your actual size of overall tables\r\n2. Observe a proper behavior\r\n\r\n## Context\r\nQ: How has this issue affected you?\r\nA: It affects our production system\r\n\r\n## Your Environment\r\n```\r\namundsen-databuilder==4.5.3\r\namundsen-gremlin==0.0.9\r\nFlask==1.1.4\r\ngremlinpython==3.4.9\r\nrequests-aws4auth==1.1.1\r\ntyping-extensions==3.10.0\r\noverrides==6.1.0\r\n```\n", "before_files": [{"content": "# Copyright Contributors to the Amundsen project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom typing import (\n Any, Dict, Iterator, List, Union,\n)\n\nimport boto3\nfrom pyhocon import ConfigFactory, ConfigTree\n\nfrom databuilder.extractor.base_extractor import Extractor\nfrom databuilder.models.table_metadata import ColumnMetadata, TableMetadata\n\n\nclass GlueExtractor(Extractor):\n \"\"\"\n Extracts tables and columns metadata from AWS Glue metastore\n \"\"\"\n\n CLUSTER_KEY = 'cluster'\n FILTER_KEY = 'filters'\n DEFAULT_CONFIG = ConfigFactory.from_dict({CLUSTER_KEY: 'gold', FILTER_KEY: None})\n\n def init(self, conf: ConfigTree) -> None:\n conf = conf.with_fallback(GlueExtractor.DEFAULT_CONFIG)\n self._cluster = conf.get_string(GlueExtractor.CLUSTER_KEY)\n self._filters = conf.get(GlueExtractor.FILTER_KEY)\n self._glue = boto3.client('glue')\n self._extract_iter: Union[None, Iterator] = None\n\n def extract(self) -> Union[TableMetadata, None]:\n if not self._extract_iter:\n self._extract_iter = self._get_extract_iter()\n try:\n return next(self._extract_iter)\n except StopIteration:\n return None\n\n def get_scope(self) -> str:\n return 'extractor.glue'\n\n def _get_extract_iter(self) -> Iterator[TableMetadata]:\n \"\"\"\n It gets all tables and yields TableMetadata\n :return:\n \"\"\"\n for row in self._get_raw_extract_iter():\n columns, i = [], 0\n\n for column in row['StorageDescriptor']['Columns'] \\\n + row.get('PartitionKeys', []):\n columns.append(ColumnMetadata(\n column['Name'],\n column['Comment'] if 'Comment' in column else None,\n column['Type'],\n i\n ))\n i += 1\n\n yield TableMetadata(\n 'glue',\n self._cluster,\n row['DatabaseName'],\n row['Name'],\n row.get('Description') or row.get('Parameters', {}).get('comment'),\n columns,\n row.get('TableType') == 'VIRTUAL_VIEW',\n )\n\n def _get_raw_extract_iter(self) -> Iterator[Dict[str, Any]]:\n \"\"\"\n Provides iterator of results row from glue client\n :return:\n \"\"\"\n tables = self._search_tables()\n return iter(tables)\n\n def _search_tables(self) -> List[Dict[str, Any]]:\n tables = []\n kwargs = {}\n if self._filters is not None:\n kwargs['Filters'] = self._filters\n data = self._glue.search_tables(**kwargs)\n tables += data['TableList']\n while 'NextToken' in data:\n token = data['NextToken']\n kwargs['NextToken'] = token\n data = self._glue.search_tables(**kwargs)\n tables += data['TableList']\n return tables\n", "path": "databuilder/databuilder/extractor/glue_extractor.py"}], "after_files": [{"content": "# Copyright Contributors to the Amundsen project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom typing import (\n Any, Dict, Iterator, List, Union,\n)\n\nimport boto3\nfrom pyhocon import ConfigFactory, ConfigTree\n\nfrom databuilder.extractor.base_extractor import Extractor\nfrom databuilder.models.table_metadata import ColumnMetadata, TableMetadata\n\n\nclass GlueExtractor(Extractor):\n \"\"\"\n Extracts tables and columns metadata from AWS Glue metastore\n \"\"\"\n\n CLUSTER_KEY = 'cluster'\n FILTER_KEY = 'filters'\n MAX_RESULTS_KEY = 'max_results'\n DEFAULT_CONFIG = ConfigFactory.from_dict({CLUSTER_KEY: 'gold', FILTER_KEY: None, MAX_RESULTS_KEY: 500})\n\n def init(self, conf: ConfigTree) -> None:\n conf = conf.with_fallback(GlueExtractor.DEFAULT_CONFIG)\n self._cluster = conf.get_string(GlueExtractor.CLUSTER_KEY)\n self._filters = conf.get(GlueExtractor.FILTER_KEY)\n self._max_results = conf.get(GlueExtractor.MAX_RESULTS_KEY)\n self._glue = boto3.client('glue')\n self._extract_iter: Union[None, Iterator] = None\n\n def extract(self) -> Union[TableMetadata, None]:\n if not self._extract_iter:\n self._extract_iter = self._get_extract_iter()\n try:\n return next(self._extract_iter)\n except StopIteration:\n return None\n\n def get_scope(self) -> str:\n return 'extractor.glue'\n\n def _get_extract_iter(self) -> Iterator[TableMetadata]:\n \"\"\"\n It gets all tables and yields TableMetadata\n :return:\n \"\"\"\n for row in self._get_raw_extract_iter():\n columns, i = [], 0\n\n for column in row['StorageDescriptor']['Columns'] \\\n + row.get('PartitionKeys', []):\n columns.append(ColumnMetadata(\n column['Name'],\n column['Comment'] if 'Comment' in column else None,\n column['Type'],\n i\n ))\n i += 1\n\n yield TableMetadata(\n 'glue',\n self._cluster,\n row['DatabaseName'],\n row['Name'],\n row.get('Description') or row.get('Parameters', {}).get('comment'),\n columns,\n row.get('TableType') == 'VIRTUAL_VIEW',\n )\n\n def _get_raw_extract_iter(self) -> Iterator[Dict[str, Any]]:\n \"\"\"\n Provides iterator of results row from glue client\n :return:\n \"\"\"\n tables = self._search_tables()\n return iter(tables)\n\n def _search_tables(self) -> List[Dict[str, Any]]:\n tables = []\n kwargs = {}\n if self._filters is not None:\n kwargs['Filters'] = self._filters\n kwargs['MaxResults'] = self._max_results\n data = self._glue.search_tables(**kwargs)\n tables += data['TableList']\n while 'NextToken' in data:\n token = data['NextToken']\n kwargs['NextToken'] = token\n data = self._glue.search_tables(**kwargs)\n tables += data['TableList']\n return tables\n", "path": "databuilder/databuilder/extractor/glue_extractor.py"}]} | 1,661 | 338 |
gh_patches_debug_4343 | rasdani/github-patches | git_diff | Pyomo__pyomo-2319 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`build-extensions` fails on pip install because it can't find `FindASL.cmake`
## Summary
In a fresh Python 3.9.9 environment, I ran `pip install pyomo`, then `pyomo build-extensions`, and get a failure to build the PyNumero libraries as `ampl_function_demo/src/FindASL.cmake` is not found. It doesn't appear to be anywhere in `site_packages`.
### Steps to reproduce the issue
```console
$ pip install pyomo
$ pyomo build-extensions
```
### Error Message
```
**** Building PyNumero libraries ****
-- The C compiler identification is GNU 7.5.0
-- The CXX compiler identification is GNU 7.5.0
-- Check for working C compiler: /usr/bin/cc
-- Check for working C compiler: /usr/bin/cc -- works
-- Detecting C compiler ABI info
-- Detecting C compiler ABI info - done
-- Detecting C compile features
-- Detecting C compile features - done
-- Check for working CXX compiler: /usr/bin/c++
-- Check for working CXX compiler: /usr/bin/c++ -- works
-- Detecting CXX compiler ABI info
-- Detecting CXX compiler ABI info - done
-- Detecting CXX compile features
-- Detecting CXX compile features - done
CMake Error at CMakeLists.txt:18 (INCLUDE):
INCLUDE could not find load file:
/home/robert/idaes/fresh-env/lib/python3.9/site-packages/pyomo/contrib/pynumero/src/../../ampl_function_demo/src/FindASL.cmake
-- Found PkgConfig: /usr/bin/pkg-config (found version "0.29.1")
-- Configuring incomplete, errors occurred!
See also "/tmp/tmpmu0rmdiu/CMakeFiles/CMakeOutput.log".
ERROR: DistutilsExecError: command '/usr/bin/cmake' failed with exit code 1
```
### Information on your system
Pyomo version: 6.3.0
Python version: 3.9.9
Operating system: Ubuntu 18.04
How Pyomo was installed (PyPI, conda, source): pip
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
5 # Under the terms of Contract DE-NA0003525 with National Technology and
6 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
7 # rights in this software.
8 # This software is distributed under the 3-clause BSD License.
9 # ___________________________________________________________________________
10
11 """
12 Script to generate the installer for pyomo.
13 """
14
15 import os
16 import platform
17 import sys
18 from setuptools import setup, find_packages, Command
19 try:
20 from setuptools import DistutilsOptionError
21 except ImportError:
22 from distutils.errors import DistutilsOptionError
23
24 def read(*rnames):
25 with open(os.path.join(os.path.dirname(__file__), *rnames)) as README:
26 # Strip all leading badges up to, but not including the COIN-OR
27 # badge so that they do not appear in the PyPI description
28 while True:
29 line = README.readline()
30 if 'COIN-OR' in line:
31 break
32 if line.strip() and '[![' not in line:
33 break
34 return line + README.read()
35
36 def import_pyomo_module(*path):
37 _module_globals = dict(globals())
38 _module_globals['__name__'] = None
39 _source = os.path.join(os.path.dirname(__file__), *path)
40 with open(_source) as _FILE:
41 exec(_FILE.read(), _module_globals)
42 return _module_globals
43
44 def get_version():
45 # Source pyomo/version/info.py to get the version number
46 return import_pyomo_module('pyomo','version','info.py')['__version__']
47
48 CYTHON_REQUIRED = "required"
49 if not any(arg.startswith(cmd)
50 for cmd in ('build','install','bdist') for arg in sys.argv):
51 using_cython = False
52 else:
53 using_cython = "automatic"
54 if '--with-cython' in sys.argv:
55 using_cython = CYTHON_REQUIRED
56 sys.argv.remove('--with-cython')
57 if '--without-cython' in sys.argv:
58 using_cython = False
59 sys.argv.remove('--without-cython')
60
61 ext_modules = []
62 if using_cython:
63 try:
64 if platform.python_implementation() != "CPython":
65 # break out of this try-except (disable Cython)
66 raise RuntimeError("Cython is only supported under CPython")
67 from Cython.Build import cythonize
68 #
69 # Note: The Cython developers recommend that you destribute C source
70 # files to users. But this is fine for evaluating the utility of Cython
71 #
72 import shutil
73 files = [
74 "pyomo/core/expr/numvalue.pyx",
75 "pyomo/core/expr/numeric_expr.pyx",
76 "pyomo/core/expr/logical_expr.pyx",
77 #"pyomo/core/expr/visitor.pyx",
78 "pyomo/core/util.pyx",
79 "pyomo/repn/standard_repn.pyx",
80 "pyomo/repn/plugins/cpxlp.pyx",
81 "pyomo/repn/plugins/gams_writer.pyx",
82 "pyomo/repn/plugins/baron_writer.pyx",
83 "pyomo/repn/plugins/ampl/ampl_.pyx",
84 ]
85 for f in files:
86 shutil.copyfile(f[:-1], f)
87 ext_modules = cythonize(files,
88 compiler_directives={"language_level": 3})
89 except:
90 if using_cython == CYTHON_REQUIRED:
91 print("""
92 ERROR: Cython was explicitly requested with --with-cython, but cythonization
93 of core Pyomo modules failed.
94 """)
95 raise
96 using_cython = False
97
98 if (('--with-distributable-extensions' in sys.argv)
99 or (os.getenv('PYOMO_SETUP_ARGS') is not None and
100 '--with-distributable-extensions' in os.getenv('PYOMO_SETUP_ARGS'))):
101 try:
102 sys.argv.remove('--with-distributable-extensions')
103 except:
104 pass
105 #
106 # Import the APPSI extension builder
107 #
108 appsi_extension = import_pyomo_module(
109 'pyomo', 'contrib', 'appsi', 'build.py')['get_appsi_extension'](
110 in_setup=True, appsi_root=os.path.join(
111 os.path.dirname(__file__), 'pyomo', 'contrib', 'appsi'))
112 ext_modules.append(appsi_extension)
113
114
115 class DependenciesCommand(Command):
116 """Custom setuptools command
117
118 This will output the list of dependencies, including any optional
119 dependencies for 'extras_require` targets. This is needed so that
120 we can (relatively) easily extract what `pip install '.[optional]'`
121 would have done so that we can pass it on to a 'conda install'
122 command when setting up Pyomo testing in a conda environment
123 (because conda for all intents does not acknowledge
124 `extras_require`).
125
126 """
127 description = "list the dependencies for this package"
128 user_options = [
129 ('extras=', None, 'extra targets to include'),
130 ]
131
132 def initialize_options(self):
133 self.extras = None
134
135 def finalize_options(self):
136 if self.extras is not None:
137 self.extras = [
138 e for e in (_.strip() for _ in self.extras.split(',')) if e
139 ]
140 for e in self.extras:
141 if e not in setup_kwargs['extras_require']:
142 raise DistutilsOptionError(
143 "extras can only include {%s}"
144 % (', '.join(setup_kwargs['extras_require'])))
145
146 def run(self):
147 deps = list(self._print_deps(setup_kwargs['install_requires']))
148 if self.extras is not None:
149 for e in self.extras:
150 deps.extend(self._print_deps(setup_kwargs['extras_require'][e]))
151 print(' '.join(deps))
152
153 def _print_deps(self, deplist):
154 implementation_name = sys.implementation.name
155 platform_system = platform.system()
156 python_version = '.'.join(platform.python_version_tuple()[:2])
157 for entry in deplist:
158 dep, _, condition = (_.strip() for _ in entry.partition(';'))
159 if condition and not eval(condition):
160 continue
161 yield dep
162
163
164 setup_kwargs = dict(
165 name = 'Pyomo',
166 #
167 # Note: the release number is set in pyomo/version/info.py
168 #
169 cmdclass = {'dependencies': DependenciesCommand},
170 version = get_version(),
171 maintainer = 'Pyomo Developer Team',
172 maintainer_email = 'pyomo-developers@googlegroups.com',
173 url = 'http://pyomo.org',
174 license = 'BSD',
175 platforms = ["any"],
176 description = 'Pyomo: Python Optimization Modeling Objects',
177 long_description = read('README.md'),
178 long_description_content_type = 'text/markdown',
179 keywords = ['optimization'],
180 classifiers = [
181 'Development Status :: 5 - Production/Stable',
182 'Intended Audience :: End Users/Desktop',
183 'Intended Audience :: Science/Research',
184 'License :: OSI Approved :: BSD License',
185 'Natural Language :: English',
186 'Operating System :: MacOS',
187 'Operating System :: Microsoft :: Windows',
188 'Operating System :: Unix',
189 'Programming Language :: Python',
190 'Programming Language :: Python :: 3',
191 'Programming Language :: Python :: 3.7',
192 'Programming Language :: Python :: 3.8',
193 'Programming Language :: Python :: 3.9',
194 'Programming Language :: Python :: 3.10',
195 'Programming Language :: Python :: Implementation :: CPython',
196 'Programming Language :: Python :: Implementation :: PyPy',
197 'Topic :: Scientific/Engineering :: Mathematics',
198 'Topic :: Software Development :: Libraries :: Python Modules' ],
199 python_requires = '>=3.7',
200 install_requires = [
201 'ply',
202 ],
203 extras_require = {
204 'tests': [
205 'coverage',
206 'pytest',
207 'pytest-parallel',
208 'parameterized',
209 'pybind11',
210 ],
211 'docs': [
212 'Sphinx>2',
213 'sphinx-copybutton',
214 'sphinx_rtd_theme>0.5',
215 'sphinxcontrib-jsmath',
216 'sphinxcontrib-napoleon',
217 'numpy', # Needed by autodoc for pynumero
218 ],
219 'optional': [
220 'dill', # No direct use, but improves lambda pickle
221 'ipython', # contrib.viewer
222 'matplotlib',
223 'networkx', # network, incidence_analysis, community_detection
224 'numpy',
225 'openpyxl', # dataportals
226 #'pathos', # requested for #963, but PR currently closed
227 'pint', # units
228 'python-louvain', # community_detection
229 'pyyaml', # core
230 'sympy', # differentiation
231 'xlrd', # dataportals
232 'z3-solver', # community_detection
233 #
234 # subprocess output is merged more reliably if
235 # 'PeekNamedPipe' is available from pywin32
236 'pywin32; platform_system=="Windows"',
237 #
238 # The following optional dependencies are difficult to
239 # install on PyPy (binary wheels are not available), so we
240 # will only "require" them on other (CPython) platforms:
241 'casadi; implementation_name!="pypy"', # dae
242 'numdifftools; implementation_name!="pypy"', # pynumero
243 'pandas; implementation_name!="pypy"',
244 'scipy; implementation_name!="pypy"',
245 'seaborn; implementation_name!="pypy"', # parmest.graphics
246 ],
247 },
248 packages = find_packages(exclude=("scripts",)),
249 package_data = {
250 "pyomo.contrib.appsi.cmodel": ["src/*"],
251 "pyomo.contrib.mcpp": ["*.cpp"],
252 "pyomo.contrib.pynumero": ['src/*', 'src/tests/*'],
253 "pyomo.contrib.viewer": ["*.ui"],
254 },
255 ext_modules = ext_modules,
256 entry_points = """
257 [console_scripts]
258 pyomo = pyomo.scripting.pyomo_main:main_console_script
259
260 [pyomo.command]
261 pyomo.help = pyomo.scripting.driver_help
262 pyomo.viewer=pyomo.contrib.viewer.pyomo_viewer
263 """
264 )
265
266
267 try:
268 setup(**setup_kwargs)
269 except SystemExit as e_info:
270 # Cython can generate a SystemExit exception on Windows if the
271 # environment is missing / has an incorrect Microsoft compiler.
272 # Since Cython is not strictly required, we will disable Cython and
273 # try re-running setup(), but only for this very specific situation.
274 if 'Microsoft Visual C++' not in str(e_info):
275 raise
276 elif using_cython == CYTHON_REQUIRED:
277 print("""
278 ERROR: Cython was explicitly requested with --with-cython, but cythonization
279 of core Pyomo modules failed.
280 """)
281 raise
282 else:
283 print("""
284 ERROR: setup() failed:
285 %s
286 Re-running setup() without the Cython modules
287 """ % (str(e_info),))
288 setup_kwargs['ext_modules'] = []
289 setup(**setup_kwargs)
290 print("""
291 WARNING: Installation completed successfully, but the attempt to cythonize
292 core Pyomo modules failed. Cython provides performance
293 optimizations and is not required for any Pyomo functionality.
294 Cython returned the following error:
295 "%s"
296 """ % (str(e_info),))
297
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -247,6 +247,7 @@
},
packages = find_packages(exclude=("scripts",)),
package_data = {
+ "pyomo.contrib.ampl_function_demo": ["src/*"],
"pyomo.contrib.appsi.cmodel": ["src/*"],
"pyomo.contrib.mcpp": ["*.cpp"],
"pyomo.contrib.pynumero": ['src/*', 'src/tests/*'],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -247,6 +247,7 @@\n },\n packages = find_packages(exclude=(\"scripts\",)),\n package_data = {\n+ \"pyomo.contrib.ampl_function_demo\": [\"src/*\"],\n \"pyomo.contrib.appsi.cmodel\": [\"src/*\"],\n \"pyomo.contrib.mcpp\": [\"*.cpp\"],\n \"pyomo.contrib.pynumero\": ['src/*', 'src/tests/*'],\n", "issue": "`build-extensions` fails on pip install because it can't find `FindASL.cmake`\n## Summary\r\nIn a fresh Python 3.9.9 environment, I ran `pip install pyomo`, then `pyomo build-extensions`, and get a failure to build the PyNumero libraries as `ampl_function_demo/src/FindASL.cmake` is not found. It doesn't appear to be anywhere in `site_packages`.\r\n\r\n### Steps to reproduce the issue\r\n\r\n```console\r\n$ pip install pyomo\r\n$ pyomo build-extensions\r\n```\r\n\r\n### Error Message\r\n\r\n```\r\n**** Building PyNumero libraries ****\r\n-- The C compiler identification is GNU 7.5.0\r\n-- The CXX compiler identification is GNU 7.5.0\r\n-- Check for working C compiler: /usr/bin/cc\r\n-- Check for working C compiler: /usr/bin/cc -- works\r\n-- Detecting C compiler ABI info\r\n-- Detecting C compiler ABI info - done\r\n-- Detecting C compile features\r\n-- Detecting C compile features - done\r\n-- Check for working CXX compiler: /usr/bin/c++\r\n-- Check for working CXX compiler: /usr/bin/c++ -- works\r\n-- Detecting CXX compiler ABI info\r\n-- Detecting CXX compiler ABI info - done\r\n-- Detecting CXX compile features\r\n-- Detecting CXX compile features - done\r\nCMake Error at CMakeLists.txt:18 (INCLUDE):\r\n INCLUDE could not find load file:\r\n\r\n /home/robert/idaes/fresh-env/lib/python3.9/site-packages/pyomo/contrib/pynumero/src/../../ampl_function_demo/src/FindASL.cmake\r\n\r\n\r\n-- Found PkgConfig: /usr/bin/pkg-config (found version \"0.29.1\") \r\n-- Configuring incomplete, errors occurred!\r\nSee also \"/tmp/tmpmu0rmdiu/CMakeFiles/CMakeOutput.log\".\r\nERROR: DistutilsExecError: command '/usr/bin/cmake' failed with exit code 1\r\n```\r\n\r\n### Information on your system\r\n\r\nPyomo version: 6.3.0\r\nPython version: 3.9.9\r\nOperating system: Ubuntu 18.04\r\nHow Pyomo was installed (PyPI, conda, source): pip\r\n\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\n\"\"\"\nScript to generate the installer for pyomo.\n\"\"\"\n\nimport os\nimport platform\nimport sys\nfrom setuptools import setup, find_packages, Command\ntry:\n from setuptools import DistutilsOptionError\nexcept ImportError:\n from distutils.errors import DistutilsOptionError\n\ndef read(*rnames):\n with open(os.path.join(os.path.dirname(__file__), *rnames)) as README:\n # Strip all leading badges up to, but not including the COIN-OR\n # badge so that they do not appear in the PyPI description\n while True:\n line = README.readline()\n if 'COIN-OR' in line:\n break\n if line.strip() and '[![' not in line:\n break\n return line + README.read()\n\ndef import_pyomo_module(*path):\n _module_globals = dict(globals())\n _module_globals['__name__'] = None\n _source = os.path.join(os.path.dirname(__file__), *path)\n with open(_source) as _FILE:\n exec(_FILE.read(), _module_globals)\n return _module_globals\n\ndef get_version():\n # Source pyomo/version/info.py to get the version number\n return import_pyomo_module('pyomo','version','info.py')['__version__']\n\nCYTHON_REQUIRED = \"required\"\nif not any(arg.startswith(cmd)\n for cmd in ('build','install','bdist') for arg in sys.argv):\n using_cython = False\nelse:\n using_cython = \"automatic\"\nif '--with-cython' in sys.argv:\n using_cython = CYTHON_REQUIRED\n sys.argv.remove('--with-cython')\nif '--without-cython' in sys.argv:\n using_cython = False\n sys.argv.remove('--without-cython')\n\next_modules = []\nif using_cython:\n try:\n if platform.python_implementation() != \"CPython\":\n # break out of this try-except (disable Cython)\n raise RuntimeError(\"Cython is only supported under CPython\")\n from Cython.Build import cythonize\n #\n # Note: The Cython developers recommend that you destribute C source\n # files to users. But this is fine for evaluating the utility of Cython\n #\n import shutil\n files = [\n \"pyomo/core/expr/numvalue.pyx\",\n \"pyomo/core/expr/numeric_expr.pyx\",\n \"pyomo/core/expr/logical_expr.pyx\",\n #\"pyomo/core/expr/visitor.pyx\",\n \"pyomo/core/util.pyx\",\n \"pyomo/repn/standard_repn.pyx\",\n \"pyomo/repn/plugins/cpxlp.pyx\",\n \"pyomo/repn/plugins/gams_writer.pyx\",\n \"pyomo/repn/plugins/baron_writer.pyx\",\n \"pyomo/repn/plugins/ampl/ampl_.pyx\",\n ]\n for f in files:\n shutil.copyfile(f[:-1], f)\n ext_modules = cythonize(files,\n compiler_directives={\"language_level\": 3})\n except:\n if using_cython == CYTHON_REQUIRED:\n print(\"\"\"\nERROR: Cython was explicitly requested with --with-cython, but cythonization\n of core Pyomo modules failed.\n\"\"\")\n raise\n using_cython = False\n\nif (('--with-distributable-extensions' in sys.argv)\n or (os.getenv('PYOMO_SETUP_ARGS') is not None and\n '--with-distributable-extensions' in os.getenv('PYOMO_SETUP_ARGS'))):\n try:\n sys.argv.remove('--with-distributable-extensions')\n except:\n pass\n #\n # Import the APPSI extension builder\n #\n appsi_extension = import_pyomo_module(\n 'pyomo', 'contrib', 'appsi', 'build.py')['get_appsi_extension'](\n in_setup=True, appsi_root=os.path.join(\n os.path.dirname(__file__), 'pyomo', 'contrib', 'appsi'))\n ext_modules.append(appsi_extension)\n\n\nclass DependenciesCommand(Command):\n \"\"\"Custom setuptools command\n\n This will output the list of dependencies, including any optional\n dependencies for 'extras_require` targets. This is needed so that\n we can (relatively) easily extract what `pip install '.[optional]'`\n would have done so that we can pass it on to a 'conda install'\n command when setting up Pyomo testing in a conda environment\n (because conda for all intents does not acknowledge\n `extras_require`).\n\n \"\"\"\n description = \"list the dependencies for this package\"\n user_options = [\n ('extras=', None, 'extra targets to include'),\n ]\n\n def initialize_options(self):\n self.extras = None\n\n def finalize_options(self):\n if self.extras is not None:\n self.extras = [\n e for e in (_.strip() for _ in self.extras.split(',')) if e\n ]\n for e in self.extras:\n if e not in setup_kwargs['extras_require']:\n raise DistutilsOptionError(\n \"extras can only include {%s}\"\n % (', '.join(setup_kwargs['extras_require'])))\n\n def run(self):\n deps = list(self._print_deps(setup_kwargs['install_requires']))\n if self.extras is not None:\n for e in self.extras:\n deps.extend(self._print_deps(setup_kwargs['extras_require'][e]))\n print(' '.join(deps))\n\n def _print_deps(self, deplist):\n implementation_name = sys.implementation.name\n platform_system = platform.system()\n python_version = '.'.join(platform.python_version_tuple()[:2])\n for entry in deplist:\n dep, _, condition = (_.strip() for _ in entry.partition(';'))\n if condition and not eval(condition):\n continue\n yield dep\n\n\nsetup_kwargs = dict(\n name = 'Pyomo',\n #\n # Note: the release number is set in pyomo/version/info.py\n #\n cmdclass = {'dependencies': DependenciesCommand},\n version = get_version(),\n maintainer = 'Pyomo Developer Team',\n maintainer_email = 'pyomo-developers@googlegroups.com',\n url = 'http://pyomo.org',\n license = 'BSD',\n platforms = [\"any\"],\n description = 'Pyomo: Python Optimization Modeling Objects',\n long_description = read('README.md'),\n long_description_content_type = 'text/markdown',\n keywords = ['optimization'],\n classifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules' ],\n python_requires = '>=3.7',\n install_requires = [\n 'ply',\n ],\n extras_require = {\n 'tests': [\n 'coverage',\n 'pytest',\n 'pytest-parallel',\n 'parameterized',\n 'pybind11',\n ],\n 'docs': [\n 'Sphinx>2',\n 'sphinx-copybutton',\n 'sphinx_rtd_theme>0.5',\n 'sphinxcontrib-jsmath',\n 'sphinxcontrib-napoleon',\n 'numpy', # Needed by autodoc for pynumero\n ],\n 'optional': [\n 'dill', # No direct use, but improves lambda pickle\n 'ipython', # contrib.viewer\n 'matplotlib',\n 'networkx', # network, incidence_analysis, community_detection\n 'numpy',\n 'openpyxl', # dataportals\n #'pathos', # requested for #963, but PR currently closed\n 'pint', # units\n 'python-louvain', # community_detection\n 'pyyaml', # core\n 'sympy', # differentiation\n 'xlrd', # dataportals\n 'z3-solver', # community_detection\n #\n # subprocess output is merged more reliably if\n # 'PeekNamedPipe' is available from pywin32\n 'pywin32; platform_system==\"Windows\"',\n #\n # The following optional dependencies are difficult to\n # install on PyPy (binary wheels are not available), so we\n # will only \"require\" them on other (CPython) platforms:\n 'casadi; implementation_name!=\"pypy\"', # dae\n 'numdifftools; implementation_name!=\"pypy\"', # pynumero\n 'pandas; implementation_name!=\"pypy\"',\n 'scipy; implementation_name!=\"pypy\"',\n 'seaborn; implementation_name!=\"pypy\"', # parmest.graphics\n ],\n },\n packages = find_packages(exclude=(\"scripts\",)),\n package_data = {\n \"pyomo.contrib.appsi.cmodel\": [\"src/*\"],\n \"pyomo.contrib.mcpp\": [\"*.cpp\"],\n \"pyomo.contrib.pynumero\": ['src/*', 'src/tests/*'],\n \"pyomo.contrib.viewer\": [\"*.ui\"],\n },\n ext_modules = ext_modules,\n entry_points = \"\"\"\n [console_scripts]\n pyomo = pyomo.scripting.pyomo_main:main_console_script\n\n [pyomo.command]\n pyomo.help = pyomo.scripting.driver_help\n pyomo.viewer=pyomo.contrib.viewer.pyomo_viewer\n \"\"\"\n)\n\n\ntry:\n setup(**setup_kwargs)\nexcept SystemExit as e_info:\n # Cython can generate a SystemExit exception on Windows if the\n # environment is missing / has an incorrect Microsoft compiler.\n # Since Cython is not strictly required, we will disable Cython and\n # try re-running setup(), but only for this very specific situation.\n if 'Microsoft Visual C++' not in str(e_info):\n raise\n elif using_cython == CYTHON_REQUIRED:\n print(\"\"\"\nERROR: Cython was explicitly requested with --with-cython, but cythonization\n of core Pyomo modules failed.\n\"\"\")\n raise\n else:\n print(\"\"\"\nERROR: setup() failed:\n %s\nRe-running setup() without the Cython modules\n\"\"\" % (str(e_info),))\n setup_kwargs['ext_modules'] = []\n setup(**setup_kwargs)\n print(\"\"\"\nWARNING: Installation completed successfully, but the attempt to cythonize\n core Pyomo modules failed. Cython provides performance\n optimizations and is not required for any Pyomo functionality.\n Cython returned the following error:\n \"%s\"\n\"\"\" % (str(e_info),))\n", "path": "setup.py"}], "after_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\n\"\"\"\nScript to generate the installer for pyomo.\n\"\"\"\n\nimport os\nimport platform\nimport sys\nfrom setuptools import setup, find_packages, Command\ntry:\n from setuptools import DistutilsOptionError\nexcept ImportError:\n from distutils.errors import DistutilsOptionError\n\ndef read(*rnames):\n with open(os.path.join(os.path.dirname(__file__), *rnames)) as README:\n # Strip all leading badges up to, but not including the COIN-OR\n # badge so that they do not appear in the PyPI description\n while True:\n line = README.readline()\n if 'COIN-OR' in line:\n break\n if line.strip() and '[![' not in line:\n break\n return line + README.read()\n\ndef import_pyomo_module(*path):\n _module_globals = dict(globals())\n _module_globals['__name__'] = None\n _source = os.path.join(os.path.dirname(__file__), *path)\n with open(_source) as _FILE:\n exec(_FILE.read(), _module_globals)\n return _module_globals\n\ndef get_version():\n # Source pyomo/version/info.py to get the version number\n return import_pyomo_module('pyomo','version','info.py')['__version__']\n\nCYTHON_REQUIRED = \"required\"\nif not any(arg.startswith(cmd)\n for cmd in ('build','install','bdist') for arg in sys.argv):\n using_cython = False\nelse:\n using_cython = \"automatic\"\nif '--with-cython' in sys.argv:\n using_cython = CYTHON_REQUIRED\n sys.argv.remove('--with-cython')\nif '--without-cython' in sys.argv:\n using_cython = False\n sys.argv.remove('--without-cython')\n\next_modules = []\nif using_cython:\n try:\n if platform.python_implementation() != \"CPython\":\n # break out of this try-except (disable Cython)\n raise RuntimeError(\"Cython is only supported under CPython\")\n from Cython.Build import cythonize\n #\n # Note: The Cython developers recommend that you destribute C source\n # files to users. But this is fine for evaluating the utility of Cython\n #\n import shutil\n files = [\n \"pyomo/core/expr/numvalue.pyx\",\n \"pyomo/core/expr/numeric_expr.pyx\",\n \"pyomo/core/expr/logical_expr.pyx\",\n #\"pyomo/core/expr/visitor.pyx\",\n \"pyomo/core/util.pyx\",\n \"pyomo/repn/standard_repn.pyx\",\n \"pyomo/repn/plugins/cpxlp.pyx\",\n \"pyomo/repn/plugins/gams_writer.pyx\",\n \"pyomo/repn/plugins/baron_writer.pyx\",\n \"pyomo/repn/plugins/ampl/ampl_.pyx\",\n ]\n for f in files:\n shutil.copyfile(f[:-1], f)\n ext_modules = cythonize(files,\n compiler_directives={\"language_level\": 3})\n except:\n if using_cython == CYTHON_REQUIRED:\n print(\"\"\"\nERROR: Cython was explicitly requested with --with-cython, but cythonization\n of core Pyomo modules failed.\n\"\"\")\n raise\n using_cython = False\n\nif (('--with-distributable-extensions' in sys.argv)\n or (os.getenv('PYOMO_SETUP_ARGS') is not None and\n '--with-distributable-extensions' in os.getenv('PYOMO_SETUP_ARGS'))):\n try:\n sys.argv.remove('--with-distributable-extensions')\n except:\n pass\n #\n # Import the APPSI extension builder\n #\n appsi_extension = import_pyomo_module(\n 'pyomo', 'contrib', 'appsi', 'build.py')['get_appsi_extension'](\n in_setup=True, appsi_root=os.path.join(\n os.path.dirname(__file__), 'pyomo', 'contrib', 'appsi'))\n ext_modules.append(appsi_extension)\n\n\nclass DependenciesCommand(Command):\n \"\"\"Custom setuptools command\n\n This will output the list of dependencies, including any optional\n dependencies for 'extras_require` targets. This is needed so that\n we can (relatively) easily extract what `pip install '.[optional]'`\n would have done so that we can pass it on to a 'conda install'\n command when setting up Pyomo testing in a conda environment\n (because conda for all intents does not acknowledge\n `extras_require`).\n\n \"\"\"\n description = \"list the dependencies for this package\"\n user_options = [\n ('extras=', None, 'extra targets to include'),\n ]\n\n def initialize_options(self):\n self.extras = None\n\n def finalize_options(self):\n if self.extras is not None:\n self.extras = [\n e for e in (_.strip() for _ in self.extras.split(',')) if e\n ]\n for e in self.extras:\n if e not in setup_kwargs['extras_require']:\n raise DistutilsOptionError(\n \"extras can only include {%s}\"\n % (', '.join(setup_kwargs['extras_require'])))\n\n def run(self):\n deps = list(self._print_deps(setup_kwargs['install_requires']))\n if self.extras is not None:\n for e in self.extras:\n deps.extend(self._print_deps(setup_kwargs['extras_require'][e]))\n print(' '.join(deps))\n\n def _print_deps(self, deplist):\n implementation_name = sys.implementation.name\n platform_system = platform.system()\n python_version = '.'.join(platform.python_version_tuple()[:2])\n for entry in deplist:\n dep, _, condition = (_.strip() for _ in entry.partition(';'))\n if condition and not eval(condition):\n continue\n yield dep\n\n\nsetup_kwargs = dict(\n name = 'Pyomo',\n #\n # Note: the release number is set in pyomo/version/info.py\n #\n cmdclass = {'dependencies': DependenciesCommand},\n version = get_version(),\n maintainer = 'Pyomo Developer Team',\n maintainer_email = 'pyomo-developers@googlegroups.com',\n url = 'http://pyomo.org',\n license = 'BSD',\n platforms = [\"any\"],\n description = 'Pyomo: Python Optimization Modeling Objects',\n long_description = read('README.md'),\n long_description_content_type = 'text/markdown',\n keywords = ['optimization'],\n classifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules' ],\n python_requires = '>=3.7',\n install_requires = [\n 'ply',\n ],\n extras_require = {\n 'tests': [\n 'coverage',\n 'pytest',\n 'pytest-parallel',\n 'parameterized',\n 'pybind11',\n ],\n 'docs': [\n 'Sphinx>2',\n 'sphinx-copybutton',\n 'sphinx_rtd_theme>0.5',\n 'sphinxcontrib-jsmath',\n 'sphinxcontrib-napoleon',\n 'numpy', # Needed by autodoc for pynumero\n ],\n 'optional': [\n 'dill', # No direct use, but improves lambda pickle\n 'ipython', # contrib.viewer\n 'matplotlib',\n 'networkx', # network, incidence_analysis, community_detection\n 'numpy',\n 'openpyxl', # dataportals\n #'pathos', # requested for #963, but PR currently closed\n 'pint', # units\n 'python-louvain', # community_detection\n 'pyyaml', # core\n 'sympy', # differentiation\n 'xlrd', # dataportals\n 'z3-solver', # community_detection\n #\n # subprocess output is merged more reliably if\n # 'PeekNamedPipe' is available from pywin32\n 'pywin32; platform_system==\"Windows\"',\n #\n # The following optional dependencies are difficult to\n # install on PyPy (binary wheels are not available), so we\n # will only \"require\" them on other (CPython) platforms:\n 'casadi; implementation_name!=\"pypy\"', # dae\n 'numdifftools; implementation_name!=\"pypy\"', # pynumero\n 'pandas; implementation_name!=\"pypy\"',\n 'scipy; implementation_name!=\"pypy\"',\n 'seaborn; implementation_name!=\"pypy\"', # parmest.graphics\n ],\n },\n packages = find_packages(exclude=(\"scripts\",)),\n package_data = {\n \"pyomo.contrib.ampl_function_demo\": [\"src/*\"],\n \"pyomo.contrib.appsi.cmodel\": [\"src/*\"],\n \"pyomo.contrib.mcpp\": [\"*.cpp\"],\n \"pyomo.contrib.pynumero\": ['src/*', 'src/tests/*'],\n \"pyomo.contrib.viewer\": [\"*.ui\"],\n },\n ext_modules = ext_modules,\n entry_points = \"\"\"\n [console_scripts]\n pyomo = pyomo.scripting.pyomo_main:main_console_script\n\n [pyomo.command]\n pyomo.help = pyomo.scripting.driver_help\n pyomo.viewer=pyomo.contrib.viewer.pyomo_viewer\n \"\"\"\n)\n\n\ntry:\n setup(**setup_kwargs)\nexcept SystemExit as e_info:\n # Cython can generate a SystemExit exception on Windows if the\n # environment is missing / has an incorrect Microsoft compiler.\n # Since Cython is not strictly required, we will disable Cython and\n # try re-running setup(), but only for this very specific situation.\n if 'Microsoft Visual C++' not in str(e_info):\n raise\n elif using_cython == CYTHON_REQUIRED:\n print(\"\"\"\nERROR: Cython was explicitly requested with --with-cython, but cythonization\n of core Pyomo modules failed.\n\"\"\")\n raise\n else:\n print(\"\"\"\nERROR: setup() failed:\n %s\nRe-running setup() without the Cython modules\n\"\"\" % (str(e_info),))\n setup_kwargs['ext_modules'] = []\n setup(**setup_kwargs)\n print(\"\"\"\nWARNING: Installation completed successfully, but the attempt to cythonize\n core Pyomo modules failed. Cython provides performance\n optimizations and is not required for any Pyomo functionality.\n Cython returned the following error:\n \"%s\"\n\"\"\" % (str(e_info),))\n", "path": "setup.py"}]} | 4,031 | 112 |
gh_patches_debug_23504 | rasdani/github-patches | git_diff | iterative__dvc-8197 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
installing from Ubuntu repo does not install s3 adapter
# Bug Report
DVC version 2.6.3

DVC version 2.21.1

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/pyinstaller/build.py`
Content:
```
1 import os
2 import pathlib
3 from subprocess import STDOUT, check_call
4
5 path = pathlib.Path(__file__).parent.absolute()
6 hooks = path / "hooks"
7 dvc = path.parent.parent / "dvc"
8 entry = dvc / "__main__.py"
9
10 check_call(
11 [
12 "pyinstaller",
13 "--additional-hooks-dir",
14 os.fspath(hooks),
15 "--name",
16 "dvc",
17 "-y",
18 os.fspath(entry),
19 ],
20 cwd=path,
21 stderr=STDOUT,
22 )
23
24 check_call(
25 [
26 path / "dist" / "dvc" / "dvc",
27 "doctor",
28 ],
29 stderr=STDOUT,
30 )
31
```
Path: `scripts/pyinstaller/hooks/hook-dvc.py`
Content:
```
1 from PyInstaller.utils.hooks import ( # pylint:disable=import-error
2 copy_metadata,
3 )
4
5 # needed for `dvc doctor` to show dep versions
6 datas = copy_metadata("adlfs", recursive=True)
7 datas += copy_metadata("knack")
8 datas += copy_metadata("gcsfs")
9 datas += copy_metadata("pyarrow")
10 datas += copy_metadata("pydrive2")
11 datas += copy_metadata("s3fs", recursive=True)
12 datas += copy_metadata("boto3")
13 datas += copy_metadata("ossfs")
14 datas += copy_metadata("sshfs")
15 datas += copy_metadata("webdav4")
16 datas += copy_metadata("aiohttp")
17 datas += copy_metadata("aiohttp_retry")
18
19 # https://github.com/pypa/setuptools/issues/1963
20 hiddenimports = ["pkg_resources.py2_warn"]
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/pyinstaller/build.py b/scripts/pyinstaller/build.py
--- a/scripts/pyinstaller/build.py
+++ b/scripts/pyinstaller/build.py
@@ -1,6 +1,6 @@
import os
import pathlib
-from subprocess import STDOUT, check_call
+from subprocess import STDOUT, check_call, check_output
path = pathlib.Path(__file__).parent.absolute()
hooks = path / "hooks"
@@ -21,10 +21,27 @@
stderr=STDOUT,
)
-check_call(
+out = check_output(
[
path / "dist" / "dvc" / "dvc",
"doctor",
],
stderr=STDOUT,
-)
+).decode()
+
+remotes = [
+ "s3",
+ "oss",
+ "gdrive",
+ "gs",
+ "hdfs",
+ "http",
+ "webhdfs",
+ "azure",
+ "ssh",
+ "webdav",
+]
+
+print(out)
+for remote in remotes:
+ assert f"\t{remote}" in out, f"Missing support for {remote}"
diff --git a/scripts/pyinstaller/hooks/hook-dvc.py b/scripts/pyinstaller/hooks/hook-dvc.py
--- a/scripts/pyinstaller/hooks/hook-dvc.py
+++ b/scripts/pyinstaller/hooks/hook-dvc.py
@@ -16,5 +16,15 @@
datas += copy_metadata("aiohttp")
datas += copy_metadata("aiohttp_retry")
-# https://github.com/pypa/setuptools/issues/1963
-hiddenimports = ["pkg_resources.py2_warn"]
+hiddenimports = [
+ "dvc_azure",
+ "dvc_gdrive",
+ "dvc_gs",
+ "dvc_hdfs",
+ "dvc_oss",
+ "dvc_s3",
+ "dvc_webdav",
+ "dvc_webhdfs",
+ # https://github.com/pypa/setuptools/issues/1963
+ "pkg_resources.py2_warn",
+]
| {"golden_diff": "diff --git a/scripts/pyinstaller/build.py b/scripts/pyinstaller/build.py\n--- a/scripts/pyinstaller/build.py\n+++ b/scripts/pyinstaller/build.py\n@@ -1,6 +1,6 @@\n import os\n import pathlib\n-from subprocess import STDOUT, check_call\n+from subprocess import STDOUT, check_call, check_output\n \n path = pathlib.Path(__file__).parent.absolute()\n hooks = path / \"hooks\"\n@@ -21,10 +21,27 @@\n stderr=STDOUT,\n )\n \n-check_call(\n+out = check_output(\n [\n path / \"dist\" / \"dvc\" / \"dvc\",\n \"doctor\",\n ],\n stderr=STDOUT,\n-)\n+).decode()\n+\n+remotes = [\n+ \"s3\",\n+ \"oss\",\n+ \"gdrive\",\n+ \"gs\",\n+ \"hdfs\",\n+ \"http\",\n+ \"webhdfs\",\n+ \"azure\",\n+ \"ssh\",\n+ \"webdav\",\n+]\n+\n+print(out)\n+for remote in remotes:\n+ assert f\"\\t{remote}\" in out, f\"Missing support for {remote}\"\ndiff --git a/scripts/pyinstaller/hooks/hook-dvc.py b/scripts/pyinstaller/hooks/hook-dvc.py\n--- a/scripts/pyinstaller/hooks/hook-dvc.py\n+++ b/scripts/pyinstaller/hooks/hook-dvc.py\n@@ -16,5 +16,15 @@\n datas += copy_metadata(\"aiohttp\")\n datas += copy_metadata(\"aiohttp_retry\")\n \n-# https://github.com/pypa/setuptools/issues/1963\n-hiddenimports = [\"pkg_resources.py2_warn\"]\n+hiddenimports = [\n+ \"dvc_azure\",\n+ \"dvc_gdrive\",\n+ \"dvc_gs\",\n+ \"dvc_hdfs\",\n+ \"dvc_oss\",\n+ \"dvc_s3\",\n+ \"dvc_webdav\",\n+ \"dvc_webhdfs\",\n+ # https://github.com/pypa/setuptools/issues/1963\n+ \"pkg_resources.py2_warn\",\n+]\n", "issue": "installing from Ubuntu repo does not install s3 adapter\n# Bug Report\r\n\r\nDVC version 2.6.3\r\n\r\n\r\n\r\nDVC version 2.21.1\r\n\r\n\r\n\n", "before_files": [{"content": "import os\nimport pathlib\nfrom subprocess import STDOUT, check_call\n\npath = pathlib.Path(__file__).parent.absolute()\nhooks = path / \"hooks\"\ndvc = path.parent.parent / \"dvc\"\nentry = dvc / \"__main__.py\"\n\ncheck_call(\n [\n \"pyinstaller\",\n \"--additional-hooks-dir\",\n os.fspath(hooks),\n \"--name\",\n \"dvc\",\n \"-y\",\n os.fspath(entry),\n ],\n cwd=path,\n stderr=STDOUT,\n)\n\ncheck_call(\n [\n path / \"dist\" / \"dvc\" / \"dvc\",\n \"doctor\",\n ],\n stderr=STDOUT,\n)\n", "path": "scripts/pyinstaller/build.py"}, {"content": "from PyInstaller.utils.hooks import ( # pylint:disable=import-error\n copy_metadata,\n)\n\n# needed for `dvc doctor` to show dep versions\ndatas = copy_metadata(\"adlfs\", recursive=True)\ndatas += copy_metadata(\"knack\")\ndatas += copy_metadata(\"gcsfs\")\ndatas += copy_metadata(\"pyarrow\")\ndatas += copy_metadata(\"pydrive2\")\ndatas += copy_metadata(\"s3fs\", recursive=True)\ndatas += copy_metadata(\"boto3\")\ndatas += copy_metadata(\"ossfs\")\ndatas += copy_metadata(\"sshfs\")\ndatas += copy_metadata(\"webdav4\")\ndatas += copy_metadata(\"aiohttp\")\ndatas += copy_metadata(\"aiohttp_retry\")\n\n# https://github.com/pypa/setuptools/issues/1963\nhiddenimports = [\"pkg_resources.py2_warn\"]\n", "path": "scripts/pyinstaller/hooks/hook-dvc.py"}], "after_files": [{"content": "import os\nimport pathlib\nfrom subprocess import STDOUT, check_call, check_output\n\npath = pathlib.Path(__file__).parent.absolute()\nhooks = path / \"hooks\"\ndvc = path.parent.parent / \"dvc\"\nentry = dvc / \"__main__.py\"\n\ncheck_call(\n [\n \"pyinstaller\",\n \"--additional-hooks-dir\",\n os.fspath(hooks),\n \"--name\",\n \"dvc\",\n \"-y\",\n os.fspath(entry),\n ],\n cwd=path,\n stderr=STDOUT,\n)\n\nout = check_output(\n [\n path / \"dist\" / \"dvc\" / \"dvc\",\n \"doctor\",\n ],\n stderr=STDOUT,\n).decode()\n\nremotes = [\n \"s3\",\n \"oss\",\n \"gdrive\",\n \"gs\",\n \"hdfs\",\n \"http\",\n \"webhdfs\",\n \"azure\",\n \"ssh\",\n \"webdav\",\n]\n\nprint(out)\nfor remote in remotes:\n assert f\"\\t{remote}\" in out, f\"Missing support for {remote}\"\n", "path": "scripts/pyinstaller/build.py"}, {"content": "from PyInstaller.utils.hooks import ( # pylint:disable=import-error\n copy_metadata,\n)\n\n# needed for `dvc doctor` to show dep versions\ndatas = copy_metadata(\"adlfs\", recursive=True)\ndatas += copy_metadata(\"knack\")\ndatas += copy_metadata(\"gcsfs\")\ndatas += copy_metadata(\"pyarrow\")\ndatas += copy_metadata(\"pydrive2\")\ndatas += copy_metadata(\"s3fs\", recursive=True)\ndatas += copy_metadata(\"boto3\")\ndatas += copy_metadata(\"ossfs\")\ndatas += copy_metadata(\"sshfs\")\ndatas += copy_metadata(\"webdav4\")\ndatas += copy_metadata(\"aiohttp\")\ndatas += copy_metadata(\"aiohttp_retry\")\n\nhiddenimports = [\n \"dvc_azure\",\n \"dvc_gdrive\",\n \"dvc_gs\",\n \"dvc_hdfs\",\n \"dvc_oss\",\n \"dvc_s3\",\n \"dvc_webdav\",\n \"dvc_webhdfs\",\n # https://github.com/pypa/setuptools/issues/1963\n \"pkg_resources.py2_warn\",\n]\n", "path": "scripts/pyinstaller/hooks/hook-dvc.py"}]} | 837 | 456 |
gh_patches_debug_5606 | rasdani/github-patches | git_diff | ansible__ansible-lint-477 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
False positive EANSIBLE0014 also flags vars in shell task
# Issue Type
- Bug report
# Ansible and Ansible Lint details
```
ansible --version
ansible 2.3.0.0
ansible-lint --version
ansible-lint 3.4.13
```
- ansible installation method: pip
- ansible-lint installation method: pip
# Desired Behaviour
EANSIBLE0014 should validate only command task, not shell.
# Actual Behaviour (Bug report only)
When ansible-lint validating playbook with shell tasks with env vars
```
- hosts: "localhost"
gather_facts: no
become: no
tasks:
- shell: 'MYVAR="$(date)" env | grep MY'
```
it fails and complains about Env vars shouldn't be in command
```
test-play.yaml:5: [EANSIBLE0014] Environment variables don't work as part of command
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/ansiblelint/rules/EnvVarsInCommandRule.py`
Content:
```
1 # Copyright (c) 2016 Will Thames <will@thames.id.au>
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 # THE SOFTWARE.
20
21 from ansiblelint import AnsibleLintRule
22 from ansiblelint.utils import LINE_NUMBER_KEY, FILENAME_KEY, get_first_cmd_arg
23
24
25 class EnvVarsInCommandRule(AnsibleLintRule):
26 id = '304'
27 shortdesc = "Environment variables don't work as part of command"
28 description = (
29 'Environment variables should be passed to ``shell`` or ``command`` '
30 'through environment argument'
31 )
32 severity = 'VERY_HIGH'
33 tags = ['command-shell', 'bug', 'ANSIBLE0014']
34 version_added = 'historic'
35
36 expected_args = ['chdir', 'creates', 'executable', 'removes', 'stdin', 'warn',
37 'cmd', '__ansible_module__', '__ansible_arguments__',
38 LINE_NUMBER_KEY, FILENAME_KEY]
39
40 def matchtask(self, file, task):
41 if task["action"]["__ansible_module__"] in ['shell', 'command']:
42 first_cmd_arg = get_first_cmd_arg(task)
43 if not first_cmd_arg:
44 return
45
46 return any([arg not in self.expected_args for arg in task['action']] +
47 ["=" in first_cmd_arg])
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/ansiblelint/rules/EnvVarsInCommandRule.py b/lib/ansiblelint/rules/EnvVarsInCommandRule.py
--- a/lib/ansiblelint/rules/EnvVarsInCommandRule.py
+++ b/lib/ansiblelint/rules/EnvVarsInCommandRule.py
@@ -38,7 +38,7 @@
LINE_NUMBER_KEY, FILENAME_KEY]
def matchtask(self, file, task):
- if task["action"]["__ansible_module__"] in ['shell', 'command']:
+ if task["action"]["__ansible_module__"] in ['command']:
first_cmd_arg = get_first_cmd_arg(task)
if not first_cmd_arg:
return
| {"golden_diff": "diff --git a/lib/ansiblelint/rules/EnvVarsInCommandRule.py b/lib/ansiblelint/rules/EnvVarsInCommandRule.py\n--- a/lib/ansiblelint/rules/EnvVarsInCommandRule.py\n+++ b/lib/ansiblelint/rules/EnvVarsInCommandRule.py\n@@ -38,7 +38,7 @@\n LINE_NUMBER_KEY, FILENAME_KEY]\n \n def matchtask(self, file, task):\n- if task[\"action\"][\"__ansible_module__\"] in ['shell', 'command']:\n+ if task[\"action\"][\"__ansible_module__\"] in ['command']:\n first_cmd_arg = get_first_cmd_arg(task)\n if not first_cmd_arg:\n return\n", "issue": "False positive EANSIBLE0014 also flags vars in shell task\n# Issue Type\r\n- Bug report\r\n\r\n# Ansible and Ansible Lint details\r\n```\r\nansible --version\r\nansible 2.3.0.0\r\nansible-lint --version\r\nansible-lint 3.4.13\r\n```\r\n\r\n- ansible installation method: pip\r\n- ansible-lint installation method: pip\r\n\r\n# Desired Behaviour\r\n\r\nEANSIBLE0014 should validate only command task, not shell.\r\n\r\n# Actual Behaviour (Bug report only)\r\n\r\nWhen ansible-lint validating playbook with shell tasks with env vars\r\n```\r\n- hosts: \"localhost\"\r\n gather_facts: no\r\n become: no\r\n tasks:\r\n - shell: 'MYVAR=\"$(date)\" env | grep MY'\r\n```\r\nit fails and complains about Env vars shouldn't be in command\r\n```\r\ntest-play.yaml:5: [EANSIBLE0014] Environment variables don't work as part of command\r\n```\r\n\n", "before_files": [{"content": "# Copyright (c) 2016 Will Thames <will@thames.id.au>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom ansiblelint import AnsibleLintRule\nfrom ansiblelint.utils import LINE_NUMBER_KEY, FILENAME_KEY, get_first_cmd_arg\n\n\nclass EnvVarsInCommandRule(AnsibleLintRule):\n id = '304'\n shortdesc = \"Environment variables don't work as part of command\"\n description = (\n 'Environment variables should be passed to ``shell`` or ``command`` '\n 'through environment argument'\n )\n severity = 'VERY_HIGH'\n tags = ['command-shell', 'bug', 'ANSIBLE0014']\n version_added = 'historic'\n\n expected_args = ['chdir', 'creates', 'executable', 'removes', 'stdin', 'warn',\n 'cmd', '__ansible_module__', '__ansible_arguments__',\n LINE_NUMBER_KEY, FILENAME_KEY]\n\n def matchtask(self, file, task):\n if task[\"action\"][\"__ansible_module__\"] in ['shell', 'command']:\n first_cmd_arg = get_first_cmd_arg(task)\n if not first_cmd_arg:\n return\n\n return any([arg not in self.expected_args for arg in task['action']] +\n [\"=\" in first_cmd_arg])\n", "path": "lib/ansiblelint/rules/EnvVarsInCommandRule.py"}], "after_files": [{"content": "# Copyright (c) 2016 Will Thames <will@thames.id.au>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom ansiblelint import AnsibleLintRule\nfrom ansiblelint.utils import LINE_NUMBER_KEY, FILENAME_KEY, get_first_cmd_arg\n\n\nclass EnvVarsInCommandRule(AnsibleLintRule):\n id = '304'\n shortdesc = \"Environment variables don't work as part of command\"\n description = (\n 'Environment variables should be passed to ``shell`` or ``command`` '\n 'through environment argument'\n )\n severity = 'VERY_HIGH'\n tags = ['command-shell', 'bug', 'ANSIBLE0014']\n version_added = 'historic'\n\n expected_args = ['chdir', 'creates', 'executable', 'removes', 'stdin', 'warn',\n 'cmd', '__ansible_module__', '__ansible_arguments__',\n LINE_NUMBER_KEY, FILENAME_KEY]\n\n def matchtask(self, file, task):\n if task[\"action\"][\"__ansible_module__\"] in ['command']:\n first_cmd_arg = get_first_cmd_arg(task)\n if not first_cmd_arg:\n return\n\n return any([arg not in self.expected_args for arg in task['action']] +\n [\"=\" in first_cmd_arg])\n", "path": "lib/ansiblelint/rules/EnvVarsInCommandRule.py"}]} | 1,051 | 148 |
gh_patches_debug_29604 | rasdani/github-patches | git_diff | sublimelsp__LSP-1310 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[LSP-elm] Documentation popup is no logner visible when "More" link is clicked
* OS and language server - Ubunutu 20.04, LSP-elm, ST 4085
* How you installed LSP - git, latest st-4000-exploration
When clicking the `More` link in the AC popup,
I expect to see a documentation popup.
But I see nothing.
This commit introduced this behavior 19df9e19afeb0f32064a8b7e3a11ebaa4254f63c
If I checkout the commit before 19df9e19afeb0f32064a8b7e3a11ebaa4254f63c, everything works as expected.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/completion.py`
Content:
```
1 import mdpopups
2 import sublime
3 import sublime_plugin
4 import webbrowser
5 from .core.css import css
6 from .core.logging import debug
7 from .core.edit import parse_text_edit
8 from .core.protocol import Request, InsertTextFormat, Range
9 from .core.registry import LspTextCommand
10 from .core.typing import Any, List, Dict, Optional, Generator, Union
11 from .core.views import FORMAT_STRING, FORMAT_MARKUP_CONTENT, minihtml
12 from .core.views import range_to_region
13
14
15 class LspResolveDocsCommand(LspTextCommand):
16
17 completions = [] # type: List[Dict[str, Any]]
18
19 def run(self, edit: sublime.Edit, index: int, event: Optional[dict] = None) -> None:
20 item = self.completions[index]
21 detail = self.format_documentation(item.get('detail') or "")
22 documentation = self.format_documentation(item.get("documentation") or "")
23 # don't show the detail in the cooperate AC popup if it is already shown in the AC details filed.
24 self.is_detail_shown = bool(detail)
25 if not detail or not documentation:
26 # To make sure that the detail or documentation fields doesn't exist we need to resove the completion item.
27 # If those fields appear after the item is resolved we show them in the popup.
28 self.do_resolve(item)
29 else:
30 minihtml_content = self.get_content(documentation, detail)
31 self.show_popup(minihtml_content)
32
33 def format_documentation(self, content: Union[str, Dict[str, str]]) -> str:
34 return minihtml(self.view, content, allowed_formats=FORMAT_STRING | FORMAT_MARKUP_CONTENT)
35
36 def get_content(self, documentation: str, detail: str) -> str:
37 content = ""
38 if detail and not self.is_detail_shown:
39 content += "<div class='highlight'>{}</div>".format(detail)
40 if documentation:
41 content += "<div>{}</div>".format(documentation)
42 return content
43
44 def show_popup(self, minihtml_content: str) -> None:
45 viewport_width = self.view.viewport_extent()[0]
46 mdpopups.show_popup(
47 self.view,
48 minihtml_content,
49 flags=sublime.COOPERATE_WITH_AUTO_COMPLETE,
50 css=css().popups,
51 wrapper_class=css().popups_classname,
52 max_width=viewport_width,
53 on_navigate=self.on_navigate
54 )
55
56 def on_navigate(self, url: str) -> None:
57 webbrowser.open(url)
58
59 def do_resolve(self, item: dict) -> None:
60 session = self.best_session('completionProvider.resolveProvider')
61 if session:
62 session.send_request(
63 Request.resolveCompletionItem(item),
64 lambda res: self.handle_resolve_response(res))
65
66 def handle_resolve_response(self, item: Optional[dict]) -> None:
67 detail = ""
68 documentation = ""
69 if item:
70 detail = self.format_documentation(item.get('detail') or "")
71 documentation = self.format_documentation(item.get("documentation") or "")
72 if not documentation:
73 documentation = self.format_documentation({"kind": "markdown", "value": "*No documentation available.*"})
74 minihtml_content = self.get_content(documentation, detail)
75 show = self.update_popup if self.view.is_popup_visible() else self.show_popup
76 # NOTE: Update/show popups from the main thread, or else the popup might make the AC widget disappear.
77 sublime.set_timeout(lambda: show(minihtml_content))
78
79 def update_popup(self, minihtml_content: str) -> None:
80 mdpopups.update_popup(
81 self.view,
82 minihtml_content,
83 css=css().popups,
84 wrapper_class=css().popups_classname,
85 )
86
87
88 class LspCompleteCommand(sublime_plugin.TextCommand):
89
90 def epilogue(self, item: Dict[str, Any]) -> None:
91 additional_edits = item.get('additionalTextEdits')
92 if additional_edits:
93 edits = [parse_text_edit(additional_edit) for additional_edit in additional_edits]
94 self.view.run_command("lsp_apply_document_edit", {'changes': edits})
95 command = item.get("command")
96 if command:
97 debug('Running server command "{}" for view {}'.format(command, self.view.id()))
98 self.view.run_command("lsp_execute", {"command_name": command})
99
100
101 class LspCompleteInsertTextCommand(LspCompleteCommand):
102
103 def run(self, edit: sublime.Edit, **item: Any) -> None:
104 insert_text = item.get("insertText") or item["label"]
105 if item.get("insertTextFormat", InsertTextFormat.PlainText) == InsertTextFormat.Snippet:
106 self.view.run_command("insert_snippet", {"contents": insert_text})
107 else:
108 self.view.run_command("insert", {"characters": insert_text})
109 self.epilogue(item)
110
111
112 class LspCompleteTextEditCommand(LspCompleteCommand):
113
114 def run(self, edit: sublime.Edit, **item: Any) -> None:
115 text_edit = item["textEdit"]
116 new_text = text_edit['newText']
117 edit_region = range_to_region(Range.from_lsp(text_edit['range']), self.view)
118 if item.get("insertTextFormat", InsertTextFormat.PlainText) == InsertTextFormat.Snippet:
119 for region in self.translated_regions(edit_region):
120 self.view.erase(edit, region)
121 self.view.run_command("insert_snippet", {"contents": new_text})
122 else:
123 for region in self.translated_regions(edit_region):
124 # NOTE: Cannot do .replace, because ST will select the replacement.
125 self.view.erase(edit, region)
126 self.view.insert(edit, region.a, new_text)
127 self.epilogue(item)
128
129 def translated_regions(self, edit_region: sublime.Region) -> Generator[sublime.Region, None, None]:
130 selection = self.view.sel()
131 primary_cursor_position = selection[0].b
132 for region in reversed(selection):
133 # For each selection region, apply the same removal as for the "primary" region.
134 # To do that, translate, or offset, the LSP edit region into the non-"primary" regions.
135 # The concept of "primary" is our own, and there is no mention of it in the LSP spec.
136 translation = region.b - primary_cursor_position
137 translated_edit_region = sublime.Region(edit_region.a + translation, edit_region.b + translation)
138 yield translated_edit_region
139
140
141 def resolve(completion_list: sublime.CompletionList, items: List[sublime.CompletionItem], flags: int = 0) -> None:
142 # Resolve the promise on the main thread to prevent any sort of data race for _set_target (see sublime_plugin.py).
143 sublime.set_timeout(lambda: completion_list.set_completions(items, flags))
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugin/completion.py b/plugin/completion.py
--- a/plugin/completion.py
+++ b/plugin/completion.py
@@ -25,10 +25,12 @@
if not detail or not documentation:
# To make sure that the detail or documentation fields doesn't exist we need to resove the completion item.
# If those fields appear after the item is resolved we show them in the popup.
- self.do_resolve(item)
- else:
- minihtml_content = self.get_content(documentation, detail)
- self.show_popup(minihtml_content)
+ session = self.best_session('completionProvider.resolveProvider')
+ if session:
+ session.send_request(Request.resolveCompletionItem(item), self.handle_resolve_response)
+ return
+ minihtml_content = self.get_content(documentation, detail)
+ self.show_popup(minihtml_content)
def format_documentation(self, content: Union[str, Dict[str, str]]) -> str:
return minihtml(self.view, content, allowed_formats=FORMAT_STRING | FORMAT_MARKUP_CONTENT)
@@ -56,13 +58,6 @@
def on_navigate(self, url: str) -> None:
webbrowser.open(url)
- def do_resolve(self, item: dict) -> None:
- session = self.best_session('completionProvider.resolveProvider')
- if session:
- session.send_request(
- Request.resolveCompletionItem(item),
- lambda res: self.handle_resolve_response(res))
-
def handle_resolve_response(self, item: Optional[dict]) -> None:
detail = ""
documentation = ""
| {"golden_diff": "diff --git a/plugin/completion.py b/plugin/completion.py\n--- a/plugin/completion.py\n+++ b/plugin/completion.py\n@@ -25,10 +25,12 @@\n if not detail or not documentation:\n # To make sure that the detail or documentation fields doesn't exist we need to resove the completion item.\n # If those fields appear after the item is resolved we show them in the popup.\n- self.do_resolve(item)\n- else:\n- minihtml_content = self.get_content(documentation, detail)\n- self.show_popup(minihtml_content)\n+ session = self.best_session('completionProvider.resolveProvider')\n+ if session:\n+ session.send_request(Request.resolveCompletionItem(item), self.handle_resolve_response)\n+ return\n+ minihtml_content = self.get_content(documentation, detail)\n+ self.show_popup(minihtml_content)\n \n def format_documentation(self, content: Union[str, Dict[str, str]]) -> str:\n return minihtml(self.view, content, allowed_formats=FORMAT_STRING | FORMAT_MARKUP_CONTENT)\n@@ -56,13 +58,6 @@\n def on_navigate(self, url: str) -> None:\n webbrowser.open(url)\n \n- def do_resolve(self, item: dict) -> None:\n- session = self.best_session('completionProvider.resolveProvider')\n- if session:\n- session.send_request(\n- Request.resolveCompletionItem(item),\n- lambda res: self.handle_resolve_response(res))\n-\n def handle_resolve_response(self, item: Optional[dict]) -> None:\n detail = \"\"\n documentation = \"\"\n", "issue": "[LSP-elm] Documentation popup is no logner visible when \"More\" link is clicked\n* OS and language server - Ubunutu 20.04, LSP-elm, ST 4085 \r\n* How you installed LSP - git, latest st-4000-exploration\r\n\r\nWhen clicking the `More` link in the AC popup,\r\nI expect to see a documentation popup.\r\nBut I see nothing. \r\n\r\nThis commit introduced this behavior 19df9e19afeb0f32064a8b7e3a11ebaa4254f63c\r\nIf I checkout the commit before 19df9e19afeb0f32064a8b7e3a11ebaa4254f63c, everything works as expected.\n", "before_files": [{"content": "import mdpopups\nimport sublime\nimport sublime_plugin\nimport webbrowser\nfrom .core.css import css\nfrom .core.logging import debug\nfrom .core.edit import parse_text_edit\nfrom .core.protocol import Request, InsertTextFormat, Range\nfrom .core.registry import LspTextCommand\nfrom .core.typing import Any, List, Dict, Optional, Generator, Union\nfrom .core.views import FORMAT_STRING, FORMAT_MARKUP_CONTENT, minihtml\nfrom .core.views import range_to_region\n\n\nclass LspResolveDocsCommand(LspTextCommand):\n\n completions = [] # type: List[Dict[str, Any]]\n\n def run(self, edit: sublime.Edit, index: int, event: Optional[dict] = None) -> None:\n item = self.completions[index]\n detail = self.format_documentation(item.get('detail') or \"\")\n documentation = self.format_documentation(item.get(\"documentation\") or \"\")\n # don't show the detail in the cooperate AC popup if it is already shown in the AC details filed.\n self.is_detail_shown = bool(detail)\n if not detail or not documentation:\n # To make sure that the detail or documentation fields doesn't exist we need to resove the completion item.\n # If those fields appear after the item is resolved we show them in the popup.\n self.do_resolve(item)\n else:\n minihtml_content = self.get_content(documentation, detail)\n self.show_popup(minihtml_content)\n\n def format_documentation(self, content: Union[str, Dict[str, str]]) -> str:\n return minihtml(self.view, content, allowed_formats=FORMAT_STRING | FORMAT_MARKUP_CONTENT)\n\n def get_content(self, documentation: str, detail: str) -> str:\n content = \"\"\n if detail and not self.is_detail_shown:\n content += \"<div class='highlight'>{}</div>\".format(detail)\n if documentation:\n content += \"<div>{}</div>\".format(documentation)\n return content\n\n def show_popup(self, minihtml_content: str) -> None:\n viewport_width = self.view.viewport_extent()[0]\n mdpopups.show_popup(\n self.view,\n minihtml_content,\n flags=sublime.COOPERATE_WITH_AUTO_COMPLETE,\n css=css().popups,\n wrapper_class=css().popups_classname,\n max_width=viewport_width,\n on_navigate=self.on_navigate\n )\n\n def on_navigate(self, url: str) -> None:\n webbrowser.open(url)\n\n def do_resolve(self, item: dict) -> None:\n session = self.best_session('completionProvider.resolveProvider')\n if session:\n session.send_request(\n Request.resolveCompletionItem(item),\n lambda res: self.handle_resolve_response(res))\n\n def handle_resolve_response(self, item: Optional[dict]) -> None:\n detail = \"\"\n documentation = \"\"\n if item:\n detail = self.format_documentation(item.get('detail') or \"\")\n documentation = self.format_documentation(item.get(\"documentation\") or \"\")\n if not documentation:\n documentation = self.format_documentation({\"kind\": \"markdown\", \"value\": \"*No documentation available.*\"})\n minihtml_content = self.get_content(documentation, detail)\n show = self.update_popup if self.view.is_popup_visible() else self.show_popup\n # NOTE: Update/show popups from the main thread, or else the popup might make the AC widget disappear.\n sublime.set_timeout(lambda: show(minihtml_content))\n\n def update_popup(self, minihtml_content: str) -> None:\n mdpopups.update_popup(\n self.view,\n minihtml_content,\n css=css().popups,\n wrapper_class=css().popups_classname,\n )\n\n\nclass LspCompleteCommand(sublime_plugin.TextCommand):\n\n def epilogue(self, item: Dict[str, Any]) -> None:\n additional_edits = item.get('additionalTextEdits')\n if additional_edits:\n edits = [parse_text_edit(additional_edit) for additional_edit in additional_edits]\n self.view.run_command(\"lsp_apply_document_edit\", {'changes': edits})\n command = item.get(\"command\")\n if command:\n debug('Running server command \"{}\" for view {}'.format(command, self.view.id()))\n self.view.run_command(\"lsp_execute\", {\"command_name\": command})\n\n\nclass LspCompleteInsertTextCommand(LspCompleteCommand):\n\n def run(self, edit: sublime.Edit, **item: Any) -> None:\n insert_text = item.get(\"insertText\") or item[\"label\"]\n if item.get(\"insertTextFormat\", InsertTextFormat.PlainText) == InsertTextFormat.Snippet:\n self.view.run_command(\"insert_snippet\", {\"contents\": insert_text})\n else:\n self.view.run_command(\"insert\", {\"characters\": insert_text})\n self.epilogue(item)\n\n\nclass LspCompleteTextEditCommand(LspCompleteCommand):\n\n def run(self, edit: sublime.Edit, **item: Any) -> None:\n text_edit = item[\"textEdit\"]\n new_text = text_edit['newText']\n edit_region = range_to_region(Range.from_lsp(text_edit['range']), self.view)\n if item.get(\"insertTextFormat\", InsertTextFormat.PlainText) == InsertTextFormat.Snippet:\n for region in self.translated_regions(edit_region):\n self.view.erase(edit, region)\n self.view.run_command(\"insert_snippet\", {\"contents\": new_text})\n else:\n for region in self.translated_regions(edit_region):\n # NOTE: Cannot do .replace, because ST will select the replacement.\n self.view.erase(edit, region)\n self.view.insert(edit, region.a, new_text)\n self.epilogue(item)\n\n def translated_regions(self, edit_region: sublime.Region) -> Generator[sublime.Region, None, None]:\n selection = self.view.sel()\n primary_cursor_position = selection[0].b\n for region in reversed(selection):\n # For each selection region, apply the same removal as for the \"primary\" region.\n # To do that, translate, or offset, the LSP edit region into the non-\"primary\" regions.\n # The concept of \"primary\" is our own, and there is no mention of it in the LSP spec.\n translation = region.b - primary_cursor_position\n translated_edit_region = sublime.Region(edit_region.a + translation, edit_region.b + translation)\n yield translated_edit_region\n\n\ndef resolve(completion_list: sublime.CompletionList, items: List[sublime.CompletionItem], flags: int = 0) -> None:\n # Resolve the promise on the main thread to prevent any sort of data race for _set_target (see sublime_plugin.py).\n sublime.set_timeout(lambda: completion_list.set_completions(items, flags))\n", "path": "plugin/completion.py"}], "after_files": [{"content": "import mdpopups\nimport sublime\nimport sublime_plugin\nimport webbrowser\nfrom .core.css import css\nfrom .core.logging import debug\nfrom .core.edit import parse_text_edit\nfrom .core.protocol import Request, InsertTextFormat, Range\nfrom .core.registry import LspTextCommand\nfrom .core.typing import Any, List, Dict, Optional, Generator, Union\nfrom .core.views import FORMAT_STRING, FORMAT_MARKUP_CONTENT, minihtml\nfrom .core.views import range_to_region\n\n\nclass LspResolveDocsCommand(LspTextCommand):\n\n completions = [] # type: List[Dict[str, Any]]\n\n def run(self, edit: sublime.Edit, index: int, event: Optional[dict] = None) -> None:\n item = self.completions[index]\n detail = self.format_documentation(item.get('detail') or \"\")\n documentation = self.format_documentation(item.get(\"documentation\") or \"\")\n # don't show the detail in the cooperate AC popup if it is already shown in the AC details filed.\n self.is_detail_shown = bool(detail)\n if not detail or not documentation:\n # To make sure that the detail or documentation fields doesn't exist we need to resove the completion item.\n # If those fields appear after the item is resolved we show them in the popup.\n session = self.best_session('completionProvider.resolveProvider')\n if session:\n session.send_request(Request.resolveCompletionItem(item), self.handle_resolve_response)\n return\n minihtml_content = self.get_content(documentation, detail)\n self.show_popup(minihtml_content)\n\n def format_documentation(self, content: Union[str, Dict[str, str]]) -> str:\n return minihtml(self.view, content, allowed_formats=FORMAT_STRING | FORMAT_MARKUP_CONTENT)\n\n def get_content(self, documentation: str, detail: str) -> str:\n content = \"\"\n if detail and not self.is_detail_shown:\n content += \"<div class='highlight'>{}</div>\".format(detail)\n if documentation:\n content += \"<div>{}</div>\".format(documentation)\n return content\n\n def show_popup(self, minihtml_content: str) -> None:\n viewport_width = self.view.viewport_extent()[0]\n mdpopups.show_popup(\n self.view,\n minihtml_content,\n flags=sublime.COOPERATE_WITH_AUTO_COMPLETE,\n css=css().popups,\n wrapper_class=css().popups_classname,\n max_width=viewport_width,\n on_navigate=self.on_navigate\n )\n\n def on_navigate(self, url: str) -> None:\n webbrowser.open(url)\n\n def handle_resolve_response(self, item: Optional[dict]) -> None:\n detail = \"\"\n documentation = \"\"\n if item:\n detail = self.format_documentation(item.get('detail') or \"\")\n documentation = self.format_documentation(item.get(\"documentation\") or \"\")\n if not documentation:\n documentation = self.format_documentation({\"kind\": \"markdown\", \"value\": \"*No documentation available.*\"})\n minihtml_content = self.get_content(documentation, detail)\n show = self.update_popup if self.view.is_popup_visible() else self.show_popup\n # NOTE: Update/show popups from the main thread, or else the popup might make the AC widget disappear.\n sublime.set_timeout(lambda: show(minihtml_content))\n\n def update_popup(self, minihtml_content: str) -> None:\n mdpopups.update_popup(\n self.view,\n minihtml_content,\n css=css().popups,\n wrapper_class=css().popups_classname,\n )\n\n\nclass LspCompleteCommand(sublime_plugin.TextCommand):\n\n def epilogue(self, item: Dict[str, Any]) -> None:\n additional_edits = item.get('additionalTextEdits')\n if additional_edits:\n edits = [parse_text_edit(additional_edit) for additional_edit in additional_edits]\n self.view.run_command(\"lsp_apply_document_edit\", {'changes': edits})\n command = item.get(\"command\")\n if command:\n debug('Running server command \"{}\" for view {}'.format(command, self.view.id()))\n self.view.run_command(\"lsp_execute\", {\"command_name\": command})\n\n\nclass LspCompleteInsertTextCommand(LspCompleteCommand):\n\n def run(self, edit: sublime.Edit, **item: Any) -> None:\n insert_text = item.get(\"insertText\") or item[\"label\"]\n if item.get(\"insertTextFormat\", InsertTextFormat.PlainText) == InsertTextFormat.Snippet:\n self.view.run_command(\"insert_snippet\", {\"contents\": insert_text})\n else:\n self.view.run_command(\"insert\", {\"characters\": insert_text})\n self.epilogue(item)\n\n\nclass LspCompleteTextEditCommand(LspCompleteCommand):\n\n def run(self, edit: sublime.Edit, **item: Any) -> None:\n text_edit = item[\"textEdit\"]\n new_text = text_edit['newText']\n edit_region = range_to_region(Range.from_lsp(text_edit['range']), self.view)\n if item.get(\"insertTextFormat\", InsertTextFormat.PlainText) == InsertTextFormat.Snippet:\n for region in self.translated_regions(edit_region):\n self.view.erase(edit, region)\n self.view.run_command(\"insert_snippet\", {\"contents\": new_text})\n else:\n for region in self.translated_regions(edit_region):\n # NOTE: Cannot do .replace, because ST will select the replacement.\n self.view.erase(edit, region)\n self.view.insert(edit, region.a, new_text)\n self.epilogue(item)\n\n def translated_regions(self, edit_region: sublime.Region) -> Generator[sublime.Region, None, None]:\n selection = self.view.sel()\n primary_cursor_position = selection[0].b\n for region in reversed(selection):\n # For each selection region, apply the same removal as for the \"primary\" region.\n # To do that, translate, or offset, the LSP edit region into the non-\"primary\" regions.\n # The concept of \"primary\" is our own, and there is no mention of it in the LSP spec.\n translation = region.b - primary_cursor_position\n translated_edit_region = sublime.Region(edit_region.a + translation, edit_region.b + translation)\n yield translated_edit_region\n\n\ndef resolve(completion_list: sublime.CompletionList, items: List[sublime.CompletionItem], flags: int = 0) -> None:\n # Resolve the promise on the main thread to prevent any sort of data race for _set_target (see sublime_plugin.py).\n sublime.set_timeout(lambda: completion_list.set_completions(items, flags))\n", "path": "plugin/completion.py"}]} | 2,207 | 345 |
gh_patches_debug_20627 | rasdani/github-patches | git_diff | ciudadanointeligente__votainteligente-portal-electoral-283 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Candidate has_answered siempre en false
¿Cómo se hace para que deje de mostrar el enlace a twitter para candidatos que tienen todas las respuestas?
¿Cómo se hace para cambiar "pídele" por "pedile"?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elections/models.py`
Content:
```
1 # coding=utf-8
2 from django.db import models
3 from autoslug import AutoSlugField
4 from taggit.managers import TaggableManager
5 from django.core.urlresolvers import reverse
6 from popolo.models import Person, Area
7 from django.utils.translation import ugettext_lazy as _
8 from markdown_deux.templatetags.markdown_deux_tags import markdown_allowed
9 from candidator.models import Category, Topic as CanTopic
10 from picklefield.fields import PickledObjectField
11 from django.conf import settings
12 from django.utils.encoding import python_2_unicode_compatible
13 from django.contrib.flatpages.models import FlatPage
14 import copy
15
16
17 class ExtraInfoMixin(models.Model):
18 extra_info = PickledObjectField(default={})
19
20 class Meta:
21 abstract = True
22
23 def __init__(self, *args, **kwargs):
24 super(ExtraInfoMixin, self).__init__(*args, **kwargs)
25 default_extra_info = copy.copy(self.default_extra_info)
26 default_extra_info.update(self.extra_info)
27 self.extra_info = default_extra_info
28
29
30 class Candidate(Person, ExtraInfoMixin):
31 election = models.ForeignKey('Election', related_name='candidates', null=True)
32
33 default_extra_info = settings.DEFAULT_CANDIDATE_EXTRA_INFO
34
35 @property
36 def twitter(self):
37 links = self.contact_details.filter(contact_type="TWITTER")
38 if links:
39 return links.first()
40
41 class Meta:
42 verbose_name = _("Candidato")
43 verbose_name_plural = _("Candidatos")
44
45
46 class CandidateFlatPage(FlatPage):
47 candidate = models.ForeignKey(Candidate, related_name='flatpages')
48
49 class Meta:
50 verbose_name = _(u"Página estáticas por candidato")
51 verbose_name_plural = _(u"Páginas estáticas por candidato")
52
53 def get_absolute_url(self):
54 return reverse('candidate_flatpage', kwargs={'election_slug': self.candidate.election.slug,
55 'slug': self.candidate.id,
56 'url': self.url
57 }
58 )
59
60
61 class PersonalData(models.Model):
62 candidate = models.ForeignKey('Candidate', related_name="personal_datas")
63 label = models.CharField(max_length=512)
64 value = models.CharField(max_length=1024)
65
66
67 class Topic(CanTopic):
68 class Meta:
69 proxy = True
70 verbose_name = _(u"Pregunta")
71 verbose_name_plural = _(u"Preguntas")
72
73 @property
74 def election(self):
75 category = QuestionCategory.objects.get(category_ptr=self.category)
76 return category.election
77
78
79 @python_2_unicode_compatible
80 class QuestionCategory(Category):
81 election = models.ForeignKey('Election', related_name='categories', null=True)
82
83 def __str__(self):
84 return u'<%s> in <%s>' % (self.name, self.election.name)
85
86 class Meta:
87 verbose_name = _(u"Categoría de pregunta")
88 verbose_name_plural = _(u"Categorías de pregunta")
89
90
91 class Election(ExtraInfoMixin, models.Model):
92 name = models.CharField(max_length=255)
93 slug = AutoSlugField(populate_from='name', unique=True)
94 description = models.TextField(blank=True)
95 tags = TaggableManager(blank=True)
96 searchable = models.BooleanField(default=True)
97 highlighted = models.BooleanField(default=False)
98 extra_info_title = models.CharField(max_length=50, blank=True, null=True)
99 extra_info_content = models.TextField(max_length=3000, blank=True, null=True, help_text=_("Puedes usar Markdown. <br/> ")
100 + markdown_allowed())
101 uses_preguntales = models.BooleanField(default=True, help_text=_(u"Esta elección debe usar preguntales?"))
102 uses_ranking = models.BooleanField(default=True, help_text=_(u"Esta elección debe usar ranking"))
103 uses_face_to_face = models.BooleanField(default=True, help_text=_(u"Esta elección debe usar frente a frente"))
104 uses_soul_mate = models.BooleanField(default=True, help_text=_(u"Esta elección debe usar 1/2 naranja"))
105 uses_questionary = models.BooleanField(default=True, help_text=_(u"Esta elección debe usar cuestionario"))
106
107 default_extra_info = settings.DEFAULT_ELECTION_EXTRA_INFO
108 area = models.ForeignKey(Area, null=True, related_name="elections")
109
110 def __unicode__(self):
111 return self.name
112
113 def get_absolute_url(self):
114 return reverse('election_view', kwargs={'slug': self.slug})
115
116 def get_extra_info_url(self):
117 return reverse('election_extra_info', kwargs={'slug': self.slug})
118
119 class Meta:
120 verbose_name = _(u'Mi Elección')
121 verbose_name_plural = _(u'Mis Elecciones')
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elections/models.py b/elections/models.py
--- a/elections/models.py
+++ b/elections/models.py
@@ -6,7 +6,7 @@
from popolo.models import Person, Area
from django.utils.translation import ugettext_lazy as _
from markdown_deux.templatetags.markdown_deux_tags import markdown_allowed
-from candidator.models import Category, Topic as CanTopic
+from candidator.models import Category, Topic as CanTopic, TakenPosition
from picklefield.fields import PickledObjectField
from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
@@ -38,6 +38,11 @@
if links:
return links.first()
+ @property
+ def has_answered(self):
+ are_there_answers = TakenPosition.objects.filter(person=self, position__isnull=False).exists()
+ return are_there_answers
+
class Meta:
verbose_name = _("Candidato")
verbose_name_plural = _("Candidatos")
| {"golden_diff": "diff --git a/elections/models.py b/elections/models.py\n--- a/elections/models.py\n+++ b/elections/models.py\n@@ -6,7 +6,7 @@\n from popolo.models import Person, Area\n from django.utils.translation import ugettext_lazy as _\n from markdown_deux.templatetags.markdown_deux_tags import markdown_allowed\n-from candidator.models import Category, Topic as CanTopic\n+from candidator.models import Category, Topic as CanTopic, TakenPosition\n from picklefield.fields import PickledObjectField\n from django.conf import settings\n from django.utils.encoding import python_2_unicode_compatible\n@@ -38,6 +38,11 @@\n if links:\n return links.first()\n \n+ @property\n+ def has_answered(self):\n+ are_there_answers = TakenPosition.objects.filter(person=self, position__isnull=False).exists()\n+ return are_there_answers\n+\n class Meta:\n verbose_name = _(\"Candidato\")\n verbose_name_plural = _(\"Candidatos\")\n", "issue": "Candidate has_answered siempre en false\n\u00bfC\u00f3mo se hace para que deje de mostrar el enlace a twitter para candidatos que tienen todas las respuestas?\n\u00bfC\u00f3mo se hace para cambiar \"p\u00eddele\" por \"pedile\"?\n\n", "before_files": [{"content": "# coding=utf-8\nfrom django.db import models\nfrom autoslug import AutoSlugField\nfrom taggit.managers import TaggableManager\nfrom django.core.urlresolvers import reverse\nfrom popolo.models import Person, Area\nfrom django.utils.translation import ugettext_lazy as _\nfrom markdown_deux.templatetags.markdown_deux_tags import markdown_allowed\nfrom candidator.models import Category, Topic as CanTopic\nfrom picklefield.fields import PickledObjectField\nfrom django.conf import settings\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.contrib.flatpages.models import FlatPage\nimport copy\n\n\nclass ExtraInfoMixin(models.Model):\n extra_info = PickledObjectField(default={})\n\n class Meta:\n abstract = True\n\n def __init__(self, *args, **kwargs):\n super(ExtraInfoMixin, self).__init__(*args, **kwargs)\n default_extra_info = copy.copy(self.default_extra_info)\n default_extra_info.update(self.extra_info)\n self.extra_info = default_extra_info\n\n\nclass Candidate(Person, ExtraInfoMixin):\n election = models.ForeignKey('Election', related_name='candidates', null=True)\n\n default_extra_info = settings.DEFAULT_CANDIDATE_EXTRA_INFO\n\n @property\n def twitter(self):\n links = self.contact_details.filter(contact_type=\"TWITTER\")\n if links:\n return links.first()\n\n class Meta:\n verbose_name = _(\"Candidato\")\n verbose_name_plural = _(\"Candidatos\")\n\n\nclass CandidateFlatPage(FlatPage):\n candidate = models.ForeignKey(Candidate, related_name='flatpages')\n\n class Meta:\n verbose_name = _(u\"P\u00e1gina est\u00e1ticas por candidato\")\n verbose_name_plural = _(u\"P\u00e1ginas est\u00e1ticas por candidato\")\n\n def get_absolute_url(self):\n return reverse('candidate_flatpage', kwargs={'election_slug': self.candidate.election.slug,\n 'slug': self.candidate.id,\n 'url': self.url\n }\n )\n\n\nclass PersonalData(models.Model):\n candidate = models.ForeignKey('Candidate', related_name=\"personal_datas\")\n label = models.CharField(max_length=512)\n value = models.CharField(max_length=1024)\n\n\nclass Topic(CanTopic):\n class Meta:\n proxy = True\n verbose_name = _(u\"Pregunta\")\n verbose_name_plural = _(u\"Preguntas\")\n\n @property\n def election(self):\n category = QuestionCategory.objects.get(category_ptr=self.category)\n return category.election\n\n\n@python_2_unicode_compatible\nclass QuestionCategory(Category):\n election = models.ForeignKey('Election', related_name='categories', null=True)\n\n def __str__(self):\n return u'<%s> in <%s>' % (self.name, self.election.name)\n\n class Meta:\n verbose_name = _(u\"Categor\u00eda de pregunta\")\n verbose_name_plural = _(u\"Categor\u00edas de pregunta\")\n\n\nclass Election(ExtraInfoMixin, models.Model):\n name = models.CharField(max_length=255)\n slug = AutoSlugField(populate_from='name', unique=True)\n description = models.TextField(blank=True)\n tags = TaggableManager(blank=True)\n searchable = models.BooleanField(default=True)\n highlighted = models.BooleanField(default=False)\n extra_info_title = models.CharField(max_length=50, blank=True, null=True)\n extra_info_content = models.TextField(max_length=3000, blank=True, null=True, help_text=_(\"Puedes usar Markdown. <br/> \")\n + markdown_allowed())\n uses_preguntales = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar preguntales?\"))\n uses_ranking = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar ranking\"))\n uses_face_to_face = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar frente a frente\"))\n uses_soul_mate = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar 1/2 naranja\"))\n uses_questionary = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar cuestionario\"))\n\n default_extra_info = settings.DEFAULT_ELECTION_EXTRA_INFO\n area = models.ForeignKey(Area, null=True, related_name=\"elections\")\n\n def __unicode__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse('election_view', kwargs={'slug': self.slug})\n\n def get_extra_info_url(self):\n return reverse('election_extra_info', kwargs={'slug': self.slug})\n\n class Meta:\n verbose_name = _(u'Mi Elecci\u00f3n')\n verbose_name_plural = _(u'Mis Elecciones')\n", "path": "elections/models.py"}], "after_files": [{"content": "# coding=utf-8\nfrom django.db import models\nfrom autoslug import AutoSlugField\nfrom taggit.managers import TaggableManager\nfrom django.core.urlresolvers import reverse\nfrom popolo.models import Person, Area\nfrom django.utils.translation import ugettext_lazy as _\nfrom markdown_deux.templatetags.markdown_deux_tags import markdown_allowed\nfrom candidator.models import Category, Topic as CanTopic, TakenPosition\nfrom picklefield.fields import PickledObjectField\nfrom django.conf import settings\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.contrib.flatpages.models import FlatPage\nimport copy\n\n\nclass ExtraInfoMixin(models.Model):\n extra_info = PickledObjectField(default={})\n\n class Meta:\n abstract = True\n\n def __init__(self, *args, **kwargs):\n super(ExtraInfoMixin, self).__init__(*args, **kwargs)\n default_extra_info = copy.copy(self.default_extra_info)\n default_extra_info.update(self.extra_info)\n self.extra_info = default_extra_info\n\n\nclass Candidate(Person, ExtraInfoMixin):\n election = models.ForeignKey('Election', related_name='candidates', null=True)\n\n default_extra_info = settings.DEFAULT_CANDIDATE_EXTRA_INFO\n\n @property\n def twitter(self):\n links = self.contact_details.filter(contact_type=\"TWITTER\")\n if links:\n return links.first()\n\n @property\n def has_answered(self):\n are_there_answers = TakenPosition.objects.filter(person=self, position__isnull=False).exists()\n return are_there_answers\n\n class Meta:\n verbose_name = _(\"Candidato\")\n verbose_name_plural = _(\"Candidatos\")\n\n\nclass CandidateFlatPage(FlatPage):\n candidate = models.ForeignKey(Candidate, related_name='flatpages')\n\n class Meta:\n verbose_name = _(u\"P\u00e1gina est\u00e1ticas por candidato\")\n verbose_name_plural = _(u\"P\u00e1ginas est\u00e1ticas por candidato\")\n\n def get_absolute_url(self):\n return reverse('candidate_flatpage', kwargs={'election_slug': self.candidate.election.slug,\n 'slug': self.candidate.id,\n 'url': self.url\n }\n )\n\n\nclass PersonalData(models.Model):\n candidate = models.ForeignKey('Candidate', related_name=\"personal_datas\")\n label = models.CharField(max_length=512)\n value = models.CharField(max_length=1024)\n\n\nclass Topic(CanTopic):\n class Meta:\n proxy = True\n verbose_name = _(u\"Pregunta\")\n verbose_name_plural = _(u\"Preguntas\")\n\n @property\n def election(self):\n category = QuestionCategory.objects.get(category_ptr=self.category)\n return category.election\n\n\n@python_2_unicode_compatible\nclass QuestionCategory(Category):\n election = models.ForeignKey('Election', related_name='categories', null=True)\n\n def __str__(self):\n return u'<%s> in <%s>' % (self.name, self.election.name)\n\n class Meta:\n verbose_name = _(u\"Categor\u00eda de pregunta\")\n verbose_name_plural = _(u\"Categor\u00edas de pregunta\")\n\n\nclass Election(ExtraInfoMixin, models.Model):\n name = models.CharField(max_length=255)\n slug = AutoSlugField(populate_from='name', unique=True)\n description = models.TextField(blank=True)\n tags = TaggableManager(blank=True)\n searchable = models.BooleanField(default=True)\n highlighted = models.BooleanField(default=False)\n extra_info_title = models.CharField(max_length=50, blank=True, null=True)\n extra_info_content = models.TextField(max_length=3000, blank=True, null=True, help_text=_(\"Puedes usar Markdown. <br/> \")\n + markdown_allowed())\n uses_preguntales = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar preguntales?\"))\n uses_ranking = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar ranking\"))\n uses_face_to_face = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar frente a frente\"))\n uses_soul_mate = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar 1/2 naranja\"))\n uses_questionary = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar cuestionario\"))\n\n default_extra_info = settings.DEFAULT_ELECTION_EXTRA_INFO\n area = models.ForeignKey(Area, null=True, related_name=\"elections\")\n\n def __unicode__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse('election_view', kwargs={'slug': self.slug})\n\n def get_extra_info_url(self):\n return reverse('election_extra_info', kwargs={'slug': self.slug})\n\n class Meta:\n verbose_name = _(u'Mi Elecci\u00f3n')\n verbose_name_plural = _(u'Mis Elecciones')\n", "path": "elections/models.py"}]} | 1,574 | 221 |
gh_patches_debug_6647 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1647 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[META 555] Add automated span type/subtype checking against shared spec
Spec PR: https://github.com/elastic/apm/pull/443
To start, we would just ensure that all span types/subtypes appear in the spec. In the future we will work on cross-agent alignment.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/instrumentation/packages/asyncio/aiopg.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from elasticapm.contrib.asyncio.traces import async_capture_span
32 from elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule
33 from elasticapm.instrumentation.packages.dbapi2 import extract_signature
34
35
36 class AioPGInstrumentation(AsyncAbstractInstrumentedModule):
37 name = "aiopg"
38
39 instrument_list = [
40 ("aiopg.cursor", "Cursor.execute"),
41 ("aiopg.cursor", "Cursor.callproc"),
42 ("aiopg.connection", "Cursor.execute"),
43 ("aiopg.connection", "Cursor.callproc"),
44 ]
45
46 async def call(self, module, method, wrapped, instance, args, kwargs):
47 if method == "Cursor.execute":
48 query = args[0] if len(args) else kwargs["operation"]
49 query = _bake_sql(instance.raw, query)
50 name = extract_signature(query)
51 context = {"db": {"type": "sql", "statement": query}}
52 action = "query"
53 elif method == "Cursor.callproc":
54 func = args[0] if len(args) else kwargs["procname"]
55 name = func + "()"
56 context = None
57 action = "exec"
58 else:
59 raise AssertionError("call from uninstrumented method")
60 async with async_capture_span(
61 name, leaf=True, span_type="db", span_subtype="postgres", span_action=action, extra=context
62 ):
63 return await wrapped(*args, **kwargs)
64
65
66 def _bake_sql(cursor, sql):
67 # if this is a Composable object, use its `as_string` method
68 # see http://initd.org/psycopg/docs/sql.html
69 if hasattr(sql, "as_string"):
70 return sql.as_string(cursor)
71 return sql
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticapm/instrumentation/packages/asyncio/aiopg.py b/elasticapm/instrumentation/packages/asyncio/aiopg.py
--- a/elasticapm/instrumentation/packages/asyncio/aiopg.py
+++ b/elasticapm/instrumentation/packages/asyncio/aiopg.py
@@ -58,7 +58,7 @@
else:
raise AssertionError("call from uninstrumented method")
async with async_capture_span(
- name, leaf=True, span_type="db", span_subtype="postgres", span_action=action, extra=context
+ name, leaf=True, span_type="db", span_subtype="postgresql", span_action=action, extra=context
):
return await wrapped(*args, **kwargs)
| {"golden_diff": "diff --git a/elasticapm/instrumentation/packages/asyncio/aiopg.py b/elasticapm/instrumentation/packages/asyncio/aiopg.py\n--- a/elasticapm/instrumentation/packages/asyncio/aiopg.py\n+++ b/elasticapm/instrumentation/packages/asyncio/aiopg.py\n@@ -58,7 +58,7 @@\n else:\n raise AssertionError(\"call from uninstrumented method\")\n async with async_capture_span(\n- name, leaf=True, span_type=\"db\", span_subtype=\"postgres\", span_action=action, extra=context\n+ name, leaf=True, span_type=\"db\", span_subtype=\"postgresql\", span_action=action, extra=context\n ):\n return await wrapped(*args, **kwargs)\n", "issue": "[META 555] Add automated span type/subtype checking against shared spec\nSpec PR: https://github.com/elastic/apm/pull/443\r\n\r\nTo start, we would just ensure that all span types/subtypes appear in the spec. In the future we will work on cross-agent alignment.\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom elasticapm.contrib.asyncio.traces import async_capture_span\nfrom elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule\nfrom elasticapm.instrumentation.packages.dbapi2 import extract_signature\n\n\nclass AioPGInstrumentation(AsyncAbstractInstrumentedModule):\n name = \"aiopg\"\n\n instrument_list = [\n (\"aiopg.cursor\", \"Cursor.execute\"),\n (\"aiopg.cursor\", \"Cursor.callproc\"),\n (\"aiopg.connection\", \"Cursor.execute\"),\n (\"aiopg.connection\", \"Cursor.callproc\"),\n ]\n\n async def call(self, module, method, wrapped, instance, args, kwargs):\n if method == \"Cursor.execute\":\n query = args[0] if len(args) else kwargs[\"operation\"]\n query = _bake_sql(instance.raw, query)\n name = extract_signature(query)\n context = {\"db\": {\"type\": \"sql\", \"statement\": query}}\n action = \"query\"\n elif method == \"Cursor.callproc\":\n func = args[0] if len(args) else kwargs[\"procname\"]\n name = func + \"()\"\n context = None\n action = \"exec\"\n else:\n raise AssertionError(\"call from uninstrumented method\")\n async with async_capture_span(\n name, leaf=True, span_type=\"db\", span_subtype=\"postgres\", span_action=action, extra=context\n ):\n return await wrapped(*args, **kwargs)\n\n\ndef _bake_sql(cursor, sql):\n # if this is a Composable object, use its `as_string` method\n # see http://initd.org/psycopg/docs/sql.html\n if hasattr(sql, \"as_string\"):\n return sql.as_string(cursor)\n return sql\n", "path": "elasticapm/instrumentation/packages/asyncio/aiopg.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom elasticapm.contrib.asyncio.traces import async_capture_span\nfrom elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule\nfrom elasticapm.instrumentation.packages.dbapi2 import extract_signature\n\n\nclass AioPGInstrumentation(AsyncAbstractInstrumentedModule):\n name = \"aiopg\"\n\n instrument_list = [\n (\"aiopg.cursor\", \"Cursor.execute\"),\n (\"aiopg.cursor\", \"Cursor.callproc\"),\n (\"aiopg.connection\", \"Cursor.execute\"),\n (\"aiopg.connection\", \"Cursor.callproc\"),\n ]\n\n async def call(self, module, method, wrapped, instance, args, kwargs):\n if method == \"Cursor.execute\":\n query = args[0] if len(args) else kwargs[\"operation\"]\n query = _bake_sql(instance.raw, query)\n name = extract_signature(query)\n context = {\"db\": {\"type\": \"sql\", \"statement\": query}}\n action = \"query\"\n elif method == \"Cursor.callproc\":\n func = args[0] if len(args) else kwargs[\"procname\"]\n name = func + \"()\"\n context = None\n action = \"exec\"\n else:\n raise AssertionError(\"call from uninstrumented method\")\n async with async_capture_span(\n name, leaf=True, span_type=\"db\", span_subtype=\"postgresql\", span_action=action, extra=context\n ):\n return await wrapped(*args, **kwargs)\n\n\ndef _bake_sql(cursor, sql):\n # if this is a Composable object, use its `as_string` method\n # see http://initd.org/psycopg/docs/sql.html\n if hasattr(sql, \"as_string\"):\n return sql.as_string(cursor)\n return sql\n", "path": "elasticapm/instrumentation/packages/asyncio/aiopg.py"}]} | 1,184 | 171 |
gh_patches_debug_18169 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-490 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ensure cutoff date is updated
figure out when the cutoff date for ambiguous timestamps needs updating and either calculate it dynamically or add a unit test that fails when it needs adjusting.
https://github.com/scoutapp/scout_apm_python/blob/cf2246e6ff0dc1b69ffff25e10cd83782895ee27/src/scout_apm/core/web_requests.py#L149-L173
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/scout_apm/core/web_requests.py`
Content:
```
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import time
5
6 from scout_apm.compat import datetime_to_timestamp, parse_qsl, urlencode
7 from scout_apm.core.config import scout_config
8
9 # Originally derived from:
10 # 1. Rails:
11 # https://github.com/rails/rails/blob/0196551e6039ca864d1eee1e01819fcae12c1dc9/railties/lib/rails/generators/rails/app/templates/config/initializers/filter_parameter_logging.rb.tt # noqa
12 # 2. Sentry server side scrubbing:
13 # https://docs.sentry.io/data-management/sensitive-data/#server-side-scrubbing
14 FILTER_PARAMETERS = frozenset(
15 [
16 "access",
17 "access_token",
18 "api_key",
19 "apikey",
20 "auth",
21 "auth_token",
22 "card[number]",
23 "certificate",
24 "credentials",
25 "crypt",
26 "key",
27 "mysql_pwd",
28 "otp",
29 "passwd",
30 "password",
31 "private",
32 "protected",
33 "salt",
34 "secret",
35 "ssn",
36 "stripetoken",
37 "token",
38 ]
39 )
40
41
42 def create_filtered_path(path, query_params):
43 if scout_config.value("uri_reporting") == "path":
44 return path
45 # Python 2 unicode compatibility: force all keys and values to bytes
46 filtered_params = sorted(
47 (
48 (
49 key.encode("utf-8"),
50 (
51 b"[FILTERED]"
52 if key.lower() in FILTER_PARAMETERS
53 else value.encode("utf-8")
54 ),
55 )
56 for key, value in query_params
57 )
58 )
59 if not filtered_params:
60 return path
61 return path + "?" + urlencode(filtered_params)
62
63
64 def ignore_path(path):
65 ignored_paths = scout_config.value("ignore")
66 for ignored in ignored_paths:
67 if path.startswith(ignored):
68 return True
69 return False
70
71
72 def track_request_queue_time(header_value, tracked_request):
73 if header_value.startswith("t="):
74 header_value = header_value[2:]
75
76 try:
77 first_char = header_value[0]
78 except IndexError:
79 return False
80
81 if not first_char.isdigit(): # filter out negatives, nan, inf, etc.
82 return False
83
84 try:
85 ambiguous_start_timestamp = float(header_value)
86 except ValueError:
87 return False
88
89 start_timestamp_ns = convert_ambiguous_timestamp_to_ns(ambiguous_start_timestamp)
90 if start_timestamp_ns == 0.0:
91 return False
92
93 tr_start_timestamp_ns = datetime_to_timestamp(tracked_request.start_time) * 1e9
94
95 # Ignore if in the future
96 if start_timestamp_ns > tr_start_timestamp_ns:
97 return False
98
99 queue_time_ns = int(tr_start_timestamp_ns - start_timestamp_ns)
100 tracked_request.tag("scout.queue_time_ns", queue_time_ns)
101 return True
102
103
104 def track_amazon_request_queue_time(header_value, tracked_request):
105 items = header_value.split(";")
106 found_item = None
107 for item in items:
108 if found_item is None and item.startswith("Root="):
109 found_item = item
110 elif item.startswith("Self="):
111 found_item = item
112
113 if found_item is None:
114 return False
115
116 pieces = found_item.split("-")
117 if len(pieces) != 3:
118 return False
119
120 timestamp_str = pieces[1]
121
122 try:
123 first_char = timestamp_str[0]
124 except IndexError:
125 return False
126
127 if not first_char.isdigit():
128 return False
129
130 try:
131 start_timestamp_ns = int(timestamp_str) * 1000000000.0
132 except ValueError:
133 return False
134
135 if start_timestamp_ns == 0:
136 return False
137
138 tr_start_timestamp_ns = datetime_to_timestamp(tracked_request.start_time) * 1e9
139
140 # Ignore if in the futuren
141 if start_timestamp_ns > tr_start_timestamp_ns:
142 return False
143
144 queue_time_ns = int(tr_start_timestamp_ns - start_timestamp_ns)
145 tracked_request.tag("scout.queue_time_ns", queue_time_ns)
146 return True
147
148
149 # Cutoff epoch is used for determining ambiguous timestamp boundaries, and is
150 # just over 10 years ago at time of writing
151 CUTOFF_EPOCH_S = time.mktime((2009, 6, 1, 0, 0, 0, 0, 0, 0))
152 CUTOFF_EPOCH_MS = CUTOFF_EPOCH_S * 1000.0
153 CUTOFF_EPOCH_US = CUTOFF_EPOCH_S * 1000000.0
154 CUTOFF_EPOCH_NS = CUTOFF_EPOCH_S * 1000000000.0
155
156
157 def convert_ambiguous_timestamp_to_ns(timestamp):
158 """
159 Convert an ambiguous float timestamp that could be in nanoseconds,
160 microseconds, milliseconds, or seconds to nanoseconds. Return 0.0 for
161 values in the more than 10 years ago.
162 """
163 if timestamp > CUTOFF_EPOCH_NS:
164 converted_timestamp = timestamp
165 elif timestamp > CUTOFF_EPOCH_US:
166 converted_timestamp = timestamp * 1000.0
167 elif timestamp > CUTOFF_EPOCH_MS:
168 converted_timestamp = timestamp * 1000000.0
169 elif timestamp > CUTOFF_EPOCH_S:
170 converted_timestamp = timestamp * 1000000000.0
171 else:
172 return 0.0
173 return converted_timestamp
174
175
176 def asgi_track_request_data(scope, tracked_request):
177 """
178 Track request data from an ASGI HTTP or Websocket scope.
179 """
180 path = scope.get("root_path", "") + scope["path"]
181 query_params = parse_qsl(scope.get("query_string", b"").decode("utf-8"))
182 tracked_request.tag("path", create_filtered_path(path, query_params))
183 if ignore_path(path):
184 tracked_request.tag("ignore_transaction", True)
185
186 # We only care about the last values of headers so don't care that we use
187 # a plain dict rather than a multi-value dict
188 headers = {k.lower(): v for k, v in scope.get("headers", ())}
189
190 user_ip = (
191 headers.get(b"x-forwarded-for", b"").decode("latin1").split(",")[0]
192 or headers.get(b"client-ip", b"").decode("latin1").split(",")[0]
193 or scope.get("client", ("",))[0]
194 )
195 tracked_request.tag("user_ip", user_ip)
196
197 queue_time = headers.get(b"x-queue-start", b"") or headers.get(
198 b"x-request-start", b""
199 )
200 tracked_queue_time = track_request_queue_time(
201 queue_time.decode("latin1"), tracked_request
202 )
203 if not tracked_queue_time:
204 amazon_queue_time = headers.get(b"x-amzn-trace-id", b"")
205 track_amazon_request_queue_time(
206 amazon_queue_time.decode("latin1"), tracked_request
207 )
208
209
210 def werkzeug_track_request_data(werkzeug_request, tracked_request):
211 """
212 Several integrations use Werkzeug requests, so share the code for
213 extracting common data here.
214 """
215 path = werkzeug_request.path
216 tracked_request.tag(
217 "path", create_filtered_path(path, werkzeug_request.args.items(multi=True))
218 )
219 if ignore_path(path):
220 tracked_request.tag("ignore_transaction", True)
221
222 # Determine a remote IP to associate with the request. The value is
223 # spoofable by the requester so this is not suitable to use in any
224 # security sensitive context.
225 user_ip = (
226 werkzeug_request.headers.get("x-forwarded-for", default="").split(",")[0]
227 or werkzeug_request.headers.get("client-ip", default="").split(",")[0]
228 or werkzeug_request.remote_addr
229 )
230 tracked_request.tag("user_ip", user_ip)
231
232 queue_time = werkzeug_request.headers.get(
233 "x-queue-start", default=""
234 ) or werkzeug_request.headers.get("x-request-start", default="")
235 tracked_queue_time = track_request_queue_time(queue_time, tracked_request)
236 if not tracked_queue_time:
237 amazon_queue_time = werkzeug_request.headers.get("x-amzn-trace-id", default="")
238 track_amazon_request_queue_time(amazon_queue_time, tracked_request)
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/scout_apm/core/web_requests.py b/src/scout_apm/core/web_requests.py
--- a/src/scout_apm/core/web_requests.py
+++ b/src/scout_apm/core/web_requests.py
@@ -1,6 +1,7 @@
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
+import datetime as dt
import time
from scout_apm.compat import datetime_to_timestamp, parse_qsl, urlencode
@@ -146,9 +147,8 @@
return True
-# Cutoff epoch is used for determining ambiguous timestamp boundaries, and is
-# just over 10 years ago at time of writing
-CUTOFF_EPOCH_S = time.mktime((2009, 6, 1, 0, 0, 0, 0, 0, 0))
+# Cutoff epoch is used for determining ambiguous timestamp boundaries
+CUTOFF_EPOCH_S = time.mktime((dt.date.today().year - 10, 1, 1, 0, 0, 0, 0, 0, 0))
CUTOFF_EPOCH_MS = CUTOFF_EPOCH_S * 1000.0
CUTOFF_EPOCH_US = CUTOFF_EPOCH_S * 1000000.0
CUTOFF_EPOCH_NS = CUTOFF_EPOCH_S * 1000000000.0
| {"golden_diff": "diff --git a/src/scout_apm/core/web_requests.py b/src/scout_apm/core/web_requests.py\n--- a/src/scout_apm/core/web_requests.py\n+++ b/src/scout_apm/core/web_requests.py\n@@ -1,6 +1,7 @@\n # coding=utf-8\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n+import datetime as dt\n import time\n \n from scout_apm.compat import datetime_to_timestamp, parse_qsl, urlencode\n@@ -146,9 +147,8 @@\n return True\n \n \n-# Cutoff epoch is used for determining ambiguous timestamp boundaries, and is\n-# just over 10 years ago at time of writing\n-CUTOFF_EPOCH_S = time.mktime((2009, 6, 1, 0, 0, 0, 0, 0, 0))\n+# Cutoff epoch is used for determining ambiguous timestamp boundaries\n+CUTOFF_EPOCH_S = time.mktime((dt.date.today().year - 10, 1, 1, 0, 0, 0, 0, 0, 0))\n CUTOFF_EPOCH_MS = CUTOFF_EPOCH_S * 1000.0\n CUTOFF_EPOCH_US = CUTOFF_EPOCH_S * 1000000.0\n CUTOFF_EPOCH_NS = CUTOFF_EPOCH_S * 1000000000.0\n", "issue": "Ensure cutoff date is updated\nfigure out when the cutoff date for ambiguous timestamps needs updating and either calculate it dynamically or add a unit test that fails when it needs adjusting.\r\n\r\nhttps://github.com/scoutapp/scout_apm_python/blob/cf2246e6ff0dc1b69ffff25e10cd83782895ee27/src/scout_apm/core/web_requests.py#L149-L173\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport time\n\nfrom scout_apm.compat import datetime_to_timestamp, parse_qsl, urlencode\nfrom scout_apm.core.config import scout_config\n\n# Originally derived from:\n# 1. Rails:\n# https://github.com/rails/rails/blob/0196551e6039ca864d1eee1e01819fcae12c1dc9/railties/lib/rails/generators/rails/app/templates/config/initializers/filter_parameter_logging.rb.tt # noqa\n# 2. Sentry server side scrubbing:\n# https://docs.sentry.io/data-management/sensitive-data/#server-side-scrubbing\nFILTER_PARAMETERS = frozenset(\n [\n \"access\",\n \"access_token\",\n \"api_key\",\n \"apikey\",\n \"auth\",\n \"auth_token\",\n \"card[number]\",\n \"certificate\",\n \"credentials\",\n \"crypt\",\n \"key\",\n \"mysql_pwd\",\n \"otp\",\n \"passwd\",\n \"password\",\n \"private\",\n \"protected\",\n \"salt\",\n \"secret\",\n \"ssn\",\n \"stripetoken\",\n \"token\",\n ]\n)\n\n\ndef create_filtered_path(path, query_params):\n if scout_config.value(\"uri_reporting\") == \"path\":\n return path\n # Python 2 unicode compatibility: force all keys and values to bytes\n filtered_params = sorted(\n (\n (\n key.encode(\"utf-8\"),\n (\n b\"[FILTERED]\"\n if key.lower() in FILTER_PARAMETERS\n else value.encode(\"utf-8\")\n ),\n )\n for key, value in query_params\n )\n )\n if not filtered_params:\n return path\n return path + \"?\" + urlencode(filtered_params)\n\n\ndef ignore_path(path):\n ignored_paths = scout_config.value(\"ignore\")\n for ignored in ignored_paths:\n if path.startswith(ignored):\n return True\n return False\n\n\ndef track_request_queue_time(header_value, tracked_request):\n if header_value.startswith(\"t=\"):\n header_value = header_value[2:]\n\n try:\n first_char = header_value[0]\n except IndexError:\n return False\n\n if not first_char.isdigit(): # filter out negatives, nan, inf, etc.\n return False\n\n try:\n ambiguous_start_timestamp = float(header_value)\n except ValueError:\n return False\n\n start_timestamp_ns = convert_ambiguous_timestamp_to_ns(ambiguous_start_timestamp)\n if start_timestamp_ns == 0.0:\n return False\n\n tr_start_timestamp_ns = datetime_to_timestamp(tracked_request.start_time) * 1e9\n\n # Ignore if in the future\n if start_timestamp_ns > tr_start_timestamp_ns:\n return False\n\n queue_time_ns = int(tr_start_timestamp_ns - start_timestamp_ns)\n tracked_request.tag(\"scout.queue_time_ns\", queue_time_ns)\n return True\n\n\ndef track_amazon_request_queue_time(header_value, tracked_request):\n items = header_value.split(\";\")\n found_item = None\n for item in items:\n if found_item is None and item.startswith(\"Root=\"):\n found_item = item\n elif item.startswith(\"Self=\"):\n found_item = item\n\n if found_item is None:\n return False\n\n pieces = found_item.split(\"-\")\n if len(pieces) != 3:\n return False\n\n timestamp_str = pieces[1]\n\n try:\n first_char = timestamp_str[0]\n except IndexError:\n return False\n\n if not first_char.isdigit():\n return False\n\n try:\n start_timestamp_ns = int(timestamp_str) * 1000000000.0\n except ValueError:\n return False\n\n if start_timestamp_ns == 0:\n return False\n\n tr_start_timestamp_ns = datetime_to_timestamp(tracked_request.start_time) * 1e9\n\n # Ignore if in the futuren\n if start_timestamp_ns > tr_start_timestamp_ns:\n return False\n\n queue_time_ns = int(tr_start_timestamp_ns - start_timestamp_ns)\n tracked_request.tag(\"scout.queue_time_ns\", queue_time_ns)\n return True\n\n\n# Cutoff epoch is used for determining ambiguous timestamp boundaries, and is\n# just over 10 years ago at time of writing\nCUTOFF_EPOCH_S = time.mktime((2009, 6, 1, 0, 0, 0, 0, 0, 0))\nCUTOFF_EPOCH_MS = CUTOFF_EPOCH_S * 1000.0\nCUTOFF_EPOCH_US = CUTOFF_EPOCH_S * 1000000.0\nCUTOFF_EPOCH_NS = CUTOFF_EPOCH_S * 1000000000.0\n\n\ndef convert_ambiguous_timestamp_to_ns(timestamp):\n \"\"\"\n Convert an ambiguous float timestamp that could be in nanoseconds,\n microseconds, milliseconds, or seconds to nanoseconds. Return 0.0 for\n values in the more than 10 years ago.\n \"\"\"\n if timestamp > CUTOFF_EPOCH_NS:\n converted_timestamp = timestamp\n elif timestamp > CUTOFF_EPOCH_US:\n converted_timestamp = timestamp * 1000.0\n elif timestamp > CUTOFF_EPOCH_MS:\n converted_timestamp = timestamp * 1000000.0\n elif timestamp > CUTOFF_EPOCH_S:\n converted_timestamp = timestamp * 1000000000.0\n else:\n return 0.0\n return converted_timestamp\n\n\ndef asgi_track_request_data(scope, tracked_request):\n \"\"\"\n Track request data from an ASGI HTTP or Websocket scope.\n \"\"\"\n path = scope.get(\"root_path\", \"\") + scope[\"path\"]\n query_params = parse_qsl(scope.get(\"query_string\", b\"\").decode(\"utf-8\"))\n tracked_request.tag(\"path\", create_filtered_path(path, query_params))\n if ignore_path(path):\n tracked_request.tag(\"ignore_transaction\", True)\n\n # We only care about the last values of headers so don't care that we use\n # a plain dict rather than a multi-value dict\n headers = {k.lower(): v for k, v in scope.get(\"headers\", ())}\n\n user_ip = (\n headers.get(b\"x-forwarded-for\", b\"\").decode(\"latin1\").split(\",\")[0]\n or headers.get(b\"client-ip\", b\"\").decode(\"latin1\").split(\",\")[0]\n or scope.get(\"client\", (\"\",))[0]\n )\n tracked_request.tag(\"user_ip\", user_ip)\n\n queue_time = headers.get(b\"x-queue-start\", b\"\") or headers.get(\n b\"x-request-start\", b\"\"\n )\n tracked_queue_time = track_request_queue_time(\n queue_time.decode(\"latin1\"), tracked_request\n )\n if not tracked_queue_time:\n amazon_queue_time = headers.get(b\"x-amzn-trace-id\", b\"\")\n track_amazon_request_queue_time(\n amazon_queue_time.decode(\"latin1\"), tracked_request\n )\n\n\ndef werkzeug_track_request_data(werkzeug_request, tracked_request):\n \"\"\"\n Several integrations use Werkzeug requests, so share the code for\n extracting common data here.\n \"\"\"\n path = werkzeug_request.path\n tracked_request.tag(\n \"path\", create_filtered_path(path, werkzeug_request.args.items(multi=True))\n )\n if ignore_path(path):\n tracked_request.tag(\"ignore_transaction\", True)\n\n # Determine a remote IP to associate with the request. The value is\n # spoofable by the requester so this is not suitable to use in any\n # security sensitive context.\n user_ip = (\n werkzeug_request.headers.get(\"x-forwarded-for\", default=\"\").split(\",\")[0]\n or werkzeug_request.headers.get(\"client-ip\", default=\"\").split(\",\")[0]\n or werkzeug_request.remote_addr\n )\n tracked_request.tag(\"user_ip\", user_ip)\n\n queue_time = werkzeug_request.headers.get(\n \"x-queue-start\", default=\"\"\n ) or werkzeug_request.headers.get(\"x-request-start\", default=\"\")\n tracked_queue_time = track_request_queue_time(queue_time, tracked_request)\n if not tracked_queue_time:\n amazon_queue_time = werkzeug_request.headers.get(\"x-amzn-trace-id\", default=\"\")\n track_amazon_request_queue_time(amazon_queue_time, tracked_request)\n", "path": "src/scout_apm/core/web_requests.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\nimport time\n\nfrom scout_apm.compat import datetime_to_timestamp, parse_qsl, urlencode\nfrom scout_apm.core.config import scout_config\n\n# Originally derived from:\n# 1. Rails:\n# https://github.com/rails/rails/blob/0196551e6039ca864d1eee1e01819fcae12c1dc9/railties/lib/rails/generators/rails/app/templates/config/initializers/filter_parameter_logging.rb.tt # noqa\n# 2. Sentry server side scrubbing:\n# https://docs.sentry.io/data-management/sensitive-data/#server-side-scrubbing\nFILTER_PARAMETERS = frozenset(\n [\n \"access\",\n \"access_token\",\n \"api_key\",\n \"apikey\",\n \"auth\",\n \"auth_token\",\n \"card[number]\",\n \"certificate\",\n \"credentials\",\n \"crypt\",\n \"key\",\n \"mysql_pwd\",\n \"otp\",\n \"passwd\",\n \"password\",\n \"private\",\n \"protected\",\n \"salt\",\n \"secret\",\n \"ssn\",\n \"stripetoken\",\n \"token\",\n ]\n)\n\n\ndef create_filtered_path(path, query_params):\n if scout_config.value(\"uri_reporting\") == \"path\":\n return path\n # Python 2 unicode compatibility: force all keys and values to bytes\n filtered_params = sorted(\n (\n (\n key.encode(\"utf-8\"),\n (\n b\"[FILTERED]\"\n if key.lower() in FILTER_PARAMETERS\n else value.encode(\"utf-8\")\n ),\n )\n for key, value in query_params\n )\n )\n if not filtered_params:\n return path\n return path + \"?\" + urlencode(filtered_params)\n\n\ndef ignore_path(path):\n ignored_paths = scout_config.value(\"ignore\")\n for ignored in ignored_paths:\n if path.startswith(ignored):\n return True\n return False\n\n\ndef track_request_queue_time(header_value, tracked_request):\n if header_value.startswith(\"t=\"):\n header_value = header_value[2:]\n\n try:\n first_char = header_value[0]\n except IndexError:\n return False\n\n if not first_char.isdigit(): # filter out negatives, nan, inf, etc.\n return False\n\n try:\n ambiguous_start_timestamp = float(header_value)\n except ValueError:\n return False\n\n start_timestamp_ns = convert_ambiguous_timestamp_to_ns(ambiguous_start_timestamp)\n if start_timestamp_ns == 0.0:\n return False\n\n tr_start_timestamp_ns = datetime_to_timestamp(tracked_request.start_time) * 1e9\n\n # Ignore if in the future\n if start_timestamp_ns > tr_start_timestamp_ns:\n return False\n\n queue_time_ns = int(tr_start_timestamp_ns - start_timestamp_ns)\n tracked_request.tag(\"scout.queue_time_ns\", queue_time_ns)\n return True\n\n\ndef track_amazon_request_queue_time(header_value, tracked_request):\n items = header_value.split(\";\")\n found_item = None\n for item in items:\n if found_item is None and item.startswith(\"Root=\"):\n found_item = item\n elif item.startswith(\"Self=\"):\n found_item = item\n\n if found_item is None:\n return False\n\n pieces = found_item.split(\"-\")\n if len(pieces) != 3:\n return False\n\n timestamp_str = pieces[1]\n\n try:\n first_char = timestamp_str[0]\n except IndexError:\n return False\n\n if not first_char.isdigit():\n return False\n\n try:\n start_timestamp_ns = int(timestamp_str) * 1000000000.0\n except ValueError:\n return False\n\n if start_timestamp_ns == 0:\n return False\n\n tr_start_timestamp_ns = datetime_to_timestamp(tracked_request.start_time) * 1e9\n\n # Ignore if in the futuren\n if start_timestamp_ns > tr_start_timestamp_ns:\n return False\n\n queue_time_ns = int(tr_start_timestamp_ns - start_timestamp_ns)\n tracked_request.tag(\"scout.queue_time_ns\", queue_time_ns)\n return True\n\n\n# Cutoff epoch is used for determining ambiguous timestamp boundaries\nCUTOFF_EPOCH_S = time.mktime((dt.date.today().year - 10, 1, 1, 0, 0, 0, 0, 0, 0))\nCUTOFF_EPOCH_MS = CUTOFF_EPOCH_S * 1000.0\nCUTOFF_EPOCH_US = CUTOFF_EPOCH_S * 1000000.0\nCUTOFF_EPOCH_NS = CUTOFF_EPOCH_S * 1000000000.0\n\n\ndef convert_ambiguous_timestamp_to_ns(timestamp):\n \"\"\"\n Convert an ambiguous float timestamp that could be in nanoseconds,\n microseconds, milliseconds, or seconds to nanoseconds. Return 0.0 for\n values in the more than 10 years ago.\n \"\"\"\n if timestamp > CUTOFF_EPOCH_NS:\n converted_timestamp = timestamp\n elif timestamp > CUTOFF_EPOCH_US:\n converted_timestamp = timestamp * 1000.0\n elif timestamp > CUTOFF_EPOCH_MS:\n converted_timestamp = timestamp * 1000000.0\n elif timestamp > CUTOFF_EPOCH_S:\n converted_timestamp = timestamp * 1000000000.0\n else:\n return 0.0\n return converted_timestamp\n\n\ndef asgi_track_request_data(scope, tracked_request):\n \"\"\"\n Track request data from an ASGI HTTP or Websocket scope.\n \"\"\"\n path = scope.get(\"root_path\", \"\") + scope[\"path\"]\n query_params = parse_qsl(scope.get(\"query_string\", b\"\").decode(\"utf-8\"))\n tracked_request.tag(\"path\", create_filtered_path(path, query_params))\n if ignore_path(path):\n tracked_request.tag(\"ignore_transaction\", True)\n\n # We only care about the last values of headers so don't care that we use\n # a plain dict rather than a multi-value dict\n headers = {k.lower(): v for k, v in scope.get(\"headers\", ())}\n\n user_ip = (\n headers.get(b\"x-forwarded-for\", b\"\").decode(\"latin1\").split(\",\")[0]\n or headers.get(b\"client-ip\", b\"\").decode(\"latin1\").split(\",\")[0]\n or scope.get(\"client\", (\"\",))[0]\n )\n tracked_request.tag(\"user_ip\", user_ip)\n\n queue_time = headers.get(b\"x-queue-start\", b\"\") or headers.get(\n b\"x-request-start\", b\"\"\n )\n tracked_queue_time = track_request_queue_time(\n queue_time.decode(\"latin1\"), tracked_request\n )\n if not tracked_queue_time:\n amazon_queue_time = headers.get(b\"x-amzn-trace-id\", b\"\")\n track_amazon_request_queue_time(\n amazon_queue_time.decode(\"latin1\"), tracked_request\n )\n\n\ndef werkzeug_track_request_data(werkzeug_request, tracked_request):\n \"\"\"\n Several integrations use Werkzeug requests, so share the code for\n extracting common data here.\n \"\"\"\n path = werkzeug_request.path\n tracked_request.tag(\n \"path\", create_filtered_path(path, werkzeug_request.args.items(multi=True))\n )\n if ignore_path(path):\n tracked_request.tag(\"ignore_transaction\", True)\n\n # Determine a remote IP to associate with the request. The value is\n # spoofable by the requester so this is not suitable to use in any\n # security sensitive context.\n user_ip = (\n werkzeug_request.headers.get(\"x-forwarded-for\", default=\"\").split(\",\")[0]\n or werkzeug_request.headers.get(\"client-ip\", default=\"\").split(\",\")[0]\n or werkzeug_request.remote_addr\n )\n tracked_request.tag(\"user_ip\", user_ip)\n\n queue_time = werkzeug_request.headers.get(\n \"x-queue-start\", default=\"\"\n ) or werkzeug_request.headers.get(\"x-request-start\", default=\"\")\n tracked_queue_time = track_request_queue_time(queue_time, tracked_request)\n if not tracked_queue_time:\n amazon_queue_time = werkzeug_request.headers.get(\"x-amzn-trace-id\", default=\"\")\n track_amazon_request_queue_time(amazon_queue_time, tracked_request)\n", "path": "src/scout_apm/core/web_requests.py"}]} | 2,830 | 326 |
gh_patches_debug_30933 | rasdani/github-patches | git_diff | fossasia__open-event-server-4770 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Modules API gives 404 at localhost
**I'm submitting a ...** (check one with "x")
- [x] bug report
- [ ] feature request
- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-server
**Current behavior:**
<!-- Describe how the bug manifests. -->
Currently, neither GET nor PATCH is working in `/v1/modules`
**Expected behavior:**
<!-- Describe what the behavior would be without the bug. -->
It should work.
**Steps to reproduce:**
<!-- If you are able to illustrate the bug or feature request with an example, please provide steps to reproduce -->
**Related code:**
```
insert any relevant code here else remove this section
```
**Other information:**
<!-- List any other information that is relevant to your issue. Stack traces, related issues, suggestions on how to fix, Stack Overflow links, forum links, etc. -->
**System information:**
<!-- Add information about the system your facing this bug on. If you think this is irrelevant or if it's a UI bug or a feature request, please remove this section -->
```
Your operating system
```
```
output of `python --version`
```
**Wanna work on this issue**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/modules.py`
Content:
```
1 from flask_rest_jsonapi import ResourceDetail
2
3 from app.api.bootstrap import api
4 from app.api.schema.modules import ModuleSchema
5 from app.models import db
6 from app.models.module import Module
7
8
9 class ModuleDetail(ResourceDetail):
10 """
11 module detail by id
12 """
13 def before_get(self, args, kwargs):
14 """
15 before get method to get the resource id for fetching details
16 :param args:
17 :param kwargs:
18 :return:
19 """
20 kwargs['id'] = 1
21
22 decorators = (api.has_permission('is_admin', methods="PATCH", id="1"),)
23 methods = ['GET', 'PATCH']
24 schema = ModuleSchema
25 data_layer = {'session': db.session,
26 'model': Module}
27
```
Path: `populate_db.py`
Content:
```
1 from app import current_app
2 from app.models import db
3 from app.api.helpers.db import get_or_create # , save_to_db
4
5 # Admin message settings
6 from app.api.helpers.system_mails import MAILS
7 from app.models.message_setting import MessageSettings
8
9 # Event Role-Service Permissions
10 from app.models.role import Role
11 from app.models.service import Service
12 from app.models.permission import Permission
13
14 from app.models.track import Track
15 from app.models.session import Session
16 from app.models.speaker import Speaker
17 from app.models.sponsor import Sponsor
18 from app.models.microlocation import Microlocation
19
20 from app.models.user import ORGANIZER, COORGANIZER, TRACK_ORGANIZER, MODERATOR, ATTENDEE, REGISTRAR
21
22 # Admin Panel Permissions
23 from app.models.panel_permission import PanelPermission
24 from app.models.custom_system_role import CustomSysRole
25
26 from app.models.setting import Setting
27
28 # User Permissions
29 from app.models.user_permission import UserPermission
30 SALES = 'sales'
31
32
33 def create_roles():
34 get_or_create(Role, name=ORGANIZER, title_name='Organizer')
35 get_or_create(Role, name=COORGANIZER, title_name='Co-organizer')
36 get_or_create(Role, name=TRACK_ORGANIZER, title_name='Track Organizer')
37 get_or_create(Role, name=MODERATOR, title_name='Moderator')
38 get_or_create(Role, name=ATTENDEE, title_name='Attendee')
39 get_or_create(Role, name=REGISTRAR, title_name='Registrar')
40
41
42 def create_services():
43 track = Track.get_service_name()
44 session = Session.get_service_name()
45 speaker = Speaker.get_service_name()
46 sponsor = Sponsor.get_service_name()
47 microlocation = Microlocation.get_service_name()
48
49 get_or_create(Service, name=track)
50 get_or_create(Service, name=session)
51 get_or_create(Service, name=speaker)
52 get_or_create(Service, name=sponsor)
53 get_or_create(Service, name=microlocation)
54
55
56 def create_settings():
57 get_or_create(Setting, app_name='Open Event')
58
59
60 def create_permissions():
61 orgr = Role.query.get(1)
62 coorgr = Role.query.get(2)
63 track_orgr = Role.query.get(3)
64 mod = Role.query.get(4)
65
66 track = Service.query.get(1)
67 session = Service.query.get(2)
68 speaker = Service.query.get(3)
69 sponsor = Service.query.get(4)
70 microlocation = Service.query.get(5)
71
72 # For ORGANIZER
73 # All four permissions set to True
74 get_or_create(Permission, role=orgr, service=track)
75 get_or_create(Permission, role=orgr, service=session)
76 get_or_create(Permission, role=orgr, service=speaker)
77 get_or_create(Permission, role=orgr, service=sponsor)
78 get_or_create(Permission, role=orgr, service=microlocation)
79
80 # For COORGANIZER
81 perm, _ = get_or_create(Permission, role=coorgr, service=track)
82 perm.can_create, perm.can_delete = False, False
83 db.session.add(perm)
84
85 perm, _ = get_or_create(Permission, role=coorgr, service=session)
86 perm.can_create, perm.can_delete = False, False
87 db.session.add(perm)
88
89 perm, _ = get_or_create(Permission, role=coorgr, service=speaker)
90 perm.can_create, perm.can_delete = False, False
91 db.session.add(perm)
92
93 perm, _ = get_or_create(Permission, role=coorgr, service=sponsor)
94 perm.can_create, perm.can_delete = False, False
95 db.session.add(perm)
96
97 perm, _ = get_or_create(Permission, role=coorgr, service=microlocation)
98 perm.can_create, perm.can_delete = False, False
99 db.session.add(perm)
100
101 # For TRACK_ORGANIZER
102 perm, _ = get_or_create(Permission, role=track_orgr, service=track)
103 db.session.add(perm)
104
105 # For MODERATOR
106 perm, _ = get_or_create(Permission, role=mod, service=track)
107 perm.can_create, perm.can_update, perm.can_delete = False, False, False
108 db.session.add(perm)
109
110
111 def create_custom_sys_roles():
112 role, _ = get_or_create(CustomSysRole, name='Sales Admin')
113 db.session.add(role)
114 role, _ = get_or_create(CustomSysRole, name='Marketer')
115 db.session.add(role)
116
117
118 def create_panel_permissions():
119 sales_admin = CustomSysRole.query.filter_by(name='Sales Admin').first()
120 perm, _ = get_or_create(PanelPermission, panel_name=SALES, role=sales_admin)
121 db.session.add(perm)
122 marketer = CustomSysRole.query.filter_by(name='Marketer').first()
123 perm, _ = get_or_create(PanelPermission, panel_name=SALES, role=marketer)
124 db.session.add(perm)
125
126
127 def create_user_permissions():
128 # Publish Event
129 user_perm, _ = get_or_create(UserPermission, name='publish_event',
130 description='Publish event (make event live)')
131 user_perm.verified_user = True
132 db.session.add(user_perm)
133
134 # Create Event
135 user_perm, _ = get_or_create(UserPermission, name='create_event',
136 description='Create event')
137 user_perm.verified_user, user_perm.unverified_user = True, True
138 db.session.add(user_perm)
139
140
141 def create_admin_message_settings():
142 default_mails = ["Next Event",
143 "Session Schedule Change",
144 "User email",
145 "Invitation For Papers",
146 "After Event",
147 "Ticket(s) Purchased",
148 "Session Accept or Reject",
149 "Event Published",
150 "Event Export Failed",
151 "Event Exported",
152 "Event Role Invitation",
153 "New Session Proposal"]
154 for mail in MAILS:
155 if mail in default_mails:
156 get_or_create(MessageSettings, action=mail, mail_status=1, notification_status=1, user_control_status=1)
157 else:
158 get_or_create(MessageSettings, action=mail, mail_status=0, notification_status=0, user_control_status=0)
159
160
161 def populate():
162 """
163 Create defined Roles, Services and Permissions.
164 """
165 print('Creating roles...')
166 create_roles()
167 print('Creating services...')
168 create_services()
169 print('Creating permissions...')
170 create_permissions()
171 print('Creating custom system roles...')
172 create_custom_sys_roles()
173 print('Creating admin panel permissions...')
174 create_panel_permissions()
175 print('Creating user permissions...')
176 create_user_permissions()
177 print('Creating settings...')
178 create_settings()
179 print('Creating admin message settings...')
180 create_admin_message_settings()
181
182
183 def populate_without_print():
184 """
185 Create defined Roles, Services and Permissions.
186 """
187 create_roles()
188 create_services()
189 create_permissions()
190 create_custom_sys_roles()
191 create_panel_permissions()
192 create_user_permissions()
193 create_admin_message_settings()
194
195 db.session.commit()
196
197
198 if __name__ == '__main__':
199 with current_app.app_context():
200 populate()
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/modules.py b/app/api/modules.py
--- a/app/api/modules.py
+++ b/app/api/modules.py
@@ -10,6 +10,7 @@
"""
module detail by id
"""
+
def before_get(self, args, kwargs):
"""
before get method to get the resource id for fetching details
@@ -19,7 +20,7 @@
"""
kwargs['id'] = 1
- decorators = (api.has_permission('is_admin', methods="PATCH", id="1"),)
+ decorators = (api.has_permission('is_admin', methods='PATCH', id='1'),)
methods = ['GET', 'PATCH']
schema = ModuleSchema
data_layer = {'session': db.session,
diff --git a/populate_db.py b/populate_db.py
--- a/populate_db.py
+++ b/populate_db.py
@@ -24,6 +24,7 @@
from app.models.custom_system_role import CustomSysRole
from app.models.setting import Setting
+from app.models.module import Module
# User Permissions
from app.models.user_permission import UserPermission
@@ -57,6 +58,10 @@
get_or_create(Setting, app_name='Open Event')
+def create_modules():
+ get_or_create(Module, donation_include=False)
+
+
def create_permissions():
orgr = Role.query.get(1)
coorgr = Role.query.get(2)
@@ -176,6 +181,8 @@
create_user_permissions()
print('Creating settings...')
create_settings()
+ print('Creating modules...')
+ create_modules()
print('Creating admin message settings...')
create_admin_message_settings()
| {"golden_diff": "diff --git a/app/api/modules.py b/app/api/modules.py\n--- a/app/api/modules.py\n+++ b/app/api/modules.py\n@@ -10,6 +10,7 @@\n \"\"\"\n module detail by id\n \"\"\"\n+\n def before_get(self, args, kwargs):\n \"\"\"\n before get method to get the resource id for fetching details\n@@ -19,7 +20,7 @@\n \"\"\"\n kwargs['id'] = 1\n \n- decorators = (api.has_permission('is_admin', methods=\"PATCH\", id=\"1\"),)\n+ decorators = (api.has_permission('is_admin', methods='PATCH', id='1'),)\n methods = ['GET', 'PATCH']\n schema = ModuleSchema\n data_layer = {'session': db.session,\ndiff --git a/populate_db.py b/populate_db.py\n--- a/populate_db.py\n+++ b/populate_db.py\n@@ -24,6 +24,7 @@\n from app.models.custom_system_role import CustomSysRole\n \n from app.models.setting import Setting\n+from app.models.module import Module\n \n # User Permissions\n from app.models.user_permission import UserPermission\n@@ -57,6 +58,10 @@\n get_or_create(Setting, app_name='Open Event')\n \n \n+def create_modules():\n+ get_or_create(Module, donation_include=False)\n+\n+\n def create_permissions():\n orgr = Role.query.get(1)\n coorgr = Role.query.get(2)\n@@ -176,6 +181,8 @@\n create_user_permissions()\n print('Creating settings...')\n create_settings()\n+ print('Creating modules...')\n+ create_modules()\n print('Creating admin message settings...')\n create_admin_message_settings()\n", "issue": "Modules API gives 404 at localhost\n**I'm submitting a ...** (check one with \"x\")\r\n- [x] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-server\r\n\r\n**Current behavior:**\r\n<!-- Describe how the bug manifests. -->\r\n\r\nCurrently, neither GET nor PATCH is working in `/v1/modules`\r\n**Expected behavior:**\r\n<!-- Describe what the behavior would be without the bug. -->\r\n\r\nIt should work.\r\n**Steps to reproduce:**\r\n<!-- If you are able to illustrate the bug or feature request with an example, please provide steps to reproduce -->\r\n\r\n**Related code:**\r\n\r\n```\r\ninsert any relevant code here else remove this section\r\n```\r\n\r\n**Other information:**\r\n<!-- List any other information that is relevant to your issue. Stack traces, related issues, suggestions on how to fix, Stack Overflow links, forum links, etc. -->\r\n\r\n**System information:** \r\n\r\n<!-- Add information about the system your facing this bug on. If you think this is irrelevant or if it's a UI bug or a feature request, please remove this section -->\r\n\r\n```\r\nYour operating system\r\n```\r\n\r\n```\r\noutput of `python --version`\r\n```\r\n**Wanna work on this issue**\n", "before_files": [{"content": "from flask_rest_jsonapi import ResourceDetail\n\nfrom app.api.bootstrap import api\nfrom app.api.schema.modules import ModuleSchema\nfrom app.models import db\nfrom app.models.module import Module\n\n\nclass ModuleDetail(ResourceDetail):\n \"\"\"\n module detail by id\n \"\"\"\n def before_get(self, args, kwargs):\n \"\"\"\n before get method to get the resource id for fetching details\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n kwargs['id'] = 1\n\n decorators = (api.has_permission('is_admin', methods=\"PATCH\", id=\"1\"),)\n methods = ['GET', 'PATCH']\n schema = ModuleSchema\n data_layer = {'session': db.session,\n 'model': Module}\n", "path": "app/api/modules.py"}, {"content": "from app import current_app\nfrom app.models import db\nfrom app.api.helpers.db import get_or_create # , save_to_db\n\n# Admin message settings\nfrom app.api.helpers.system_mails import MAILS\nfrom app.models.message_setting import MessageSettings\n\n# Event Role-Service Permissions\nfrom app.models.role import Role\nfrom app.models.service import Service\nfrom app.models.permission import Permission\n\nfrom app.models.track import Track\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.sponsor import Sponsor\nfrom app.models.microlocation import Microlocation\n\nfrom app.models.user import ORGANIZER, COORGANIZER, TRACK_ORGANIZER, MODERATOR, ATTENDEE, REGISTRAR\n\n# Admin Panel Permissions\nfrom app.models.panel_permission import PanelPermission\nfrom app.models.custom_system_role import CustomSysRole\n\nfrom app.models.setting import Setting\n\n# User Permissions\nfrom app.models.user_permission import UserPermission\nSALES = 'sales'\n\n\ndef create_roles():\n get_or_create(Role, name=ORGANIZER, title_name='Organizer')\n get_or_create(Role, name=COORGANIZER, title_name='Co-organizer')\n get_or_create(Role, name=TRACK_ORGANIZER, title_name='Track Organizer')\n get_or_create(Role, name=MODERATOR, title_name='Moderator')\n get_or_create(Role, name=ATTENDEE, title_name='Attendee')\n get_or_create(Role, name=REGISTRAR, title_name='Registrar')\n\n\ndef create_services():\n track = Track.get_service_name()\n session = Session.get_service_name()\n speaker = Speaker.get_service_name()\n sponsor = Sponsor.get_service_name()\n microlocation = Microlocation.get_service_name()\n\n get_or_create(Service, name=track)\n get_or_create(Service, name=session)\n get_or_create(Service, name=speaker)\n get_or_create(Service, name=sponsor)\n get_or_create(Service, name=microlocation)\n\n\ndef create_settings():\n get_or_create(Setting, app_name='Open Event')\n\n\ndef create_permissions():\n orgr = Role.query.get(1)\n coorgr = Role.query.get(2)\n track_orgr = Role.query.get(3)\n mod = Role.query.get(4)\n\n track = Service.query.get(1)\n session = Service.query.get(2)\n speaker = Service.query.get(3)\n sponsor = Service.query.get(4)\n microlocation = Service.query.get(5)\n\n # For ORGANIZER\n # All four permissions set to True\n get_or_create(Permission, role=orgr, service=track)\n get_or_create(Permission, role=orgr, service=session)\n get_or_create(Permission, role=orgr, service=speaker)\n get_or_create(Permission, role=orgr, service=sponsor)\n get_or_create(Permission, role=orgr, service=microlocation)\n\n # For COORGANIZER\n perm, _ = get_or_create(Permission, role=coorgr, service=track)\n perm.can_create, perm.can_delete = False, False\n db.session.add(perm)\n\n perm, _ = get_or_create(Permission, role=coorgr, service=session)\n perm.can_create, perm.can_delete = False, False\n db.session.add(perm)\n\n perm, _ = get_or_create(Permission, role=coorgr, service=speaker)\n perm.can_create, perm.can_delete = False, False\n db.session.add(perm)\n\n perm, _ = get_or_create(Permission, role=coorgr, service=sponsor)\n perm.can_create, perm.can_delete = False, False\n db.session.add(perm)\n\n perm, _ = get_or_create(Permission, role=coorgr, service=microlocation)\n perm.can_create, perm.can_delete = False, False\n db.session.add(perm)\n\n # For TRACK_ORGANIZER\n perm, _ = get_or_create(Permission, role=track_orgr, service=track)\n db.session.add(perm)\n\n # For MODERATOR\n perm, _ = get_or_create(Permission, role=mod, service=track)\n perm.can_create, perm.can_update, perm.can_delete = False, False, False\n db.session.add(perm)\n\n\ndef create_custom_sys_roles():\n role, _ = get_or_create(CustomSysRole, name='Sales Admin')\n db.session.add(role)\n role, _ = get_or_create(CustomSysRole, name='Marketer')\n db.session.add(role)\n\n\ndef create_panel_permissions():\n sales_admin = CustomSysRole.query.filter_by(name='Sales Admin').first()\n perm, _ = get_or_create(PanelPermission, panel_name=SALES, role=sales_admin)\n db.session.add(perm)\n marketer = CustomSysRole.query.filter_by(name='Marketer').first()\n perm, _ = get_or_create(PanelPermission, panel_name=SALES, role=marketer)\n db.session.add(perm)\n\n\ndef create_user_permissions():\n # Publish Event\n user_perm, _ = get_or_create(UserPermission, name='publish_event',\n description='Publish event (make event live)')\n user_perm.verified_user = True\n db.session.add(user_perm)\n\n # Create Event\n user_perm, _ = get_or_create(UserPermission, name='create_event',\n description='Create event')\n user_perm.verified_user, user_perm.unverified_user = True, True\n db.session.add(user_perm)\n\n\ndef create_admin_message_settings():\n default_mails = [\"Next Event\",\n \"Session Schedule Change\",\n \"User email\",\n \"Invitation For Papers\",\n \"After Event\",\n \"Ticket(s) Purchased\",\n \"Session Accept or Reject\",\n \"Event Published\",\n \"Event Export Failed\",\n \"Event Exported\",\n \"Event Role Invitation\",\n \"New Session Proposal\"]\n for mail in MAILS:\n if mail in default_mails:\n get_or_create(MessageSettings, action=mail, mail_status=1, notification_status=1, user_control_status=1)\n else:\n get_or_create(MessageSettings, action=mail, mail_status=0, notification_status=0, user_control_status=0)\n\n\ndef populate():\n \"\"\"\n Create defined Roles, Services and Permissions.\n \"\"\"\n print('Creating roles...')\n create_roles()\n print('Creating services...')\n create_services()\n print('Creating permissions...')\n create_permissions()\n print('Creating custom system roles...')\n create_custom_sys_roles()\n print('Creating admin panel permissions...')\n create_panel_permissions()\n print('Creating user permissions...')\n create_user_permissions()\n print('Creating settings...')\n create_settings()\n print('Creating admin message settings...')\n create_admin_message_settings()\n\n\ndef populate_without_print():\n \"\"\"\n Create defined Roles, Services and Permissions.\n \"\"\"\n create_roles()\n create_services()\n create_permissions()\n create_custom_sys_roles()\n create_panel_permissions()\n create_user_permissions()\n create_admin_message_settings()\n\n db.session.commit()\n\n\nif __name__ == '__main__':\n with current_app.app_context():\n populate()\n", "path": "populate_db.py"}], "after_files": [{"content": "from flask_rest_jsonapi import ResourceDetail\n\nfrom app.api.bootstrap import api\nfrom app.api.schema.modules import ModuleSchema\nfrom app.models import db\nfrom app.models.module import Module\n\n\nclass ModuleDetail(ResourceDetail):\n \"\"\"\n module detail by id\n \"\"\"\n\n def before_get(self, args, kwargs):\n \"\"\"\n before get method to get the resource id for fetching details\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n kwargs['id'] = 1\n\n decorators = (api.has_permission('is_admin', methods='PATCH', id='1'),)\n methods = ['GET', 'PATCH']\n schema = ModuleSchema\n data_layer = {'session': db.session,\n 'model': Module}\n", "path": "app/api/modules.py"}, {"content": "from app import current_app\nfrom app.models import db\nfrom app.api.helpers.db import get_or_create # , save_to_db\n\n# Admin message settings\nfrom app.api.helpers.system_mails import MAILS\nfrom app.models.message_setting import MessageSettings\n\n# Event Role-Service Permissions\nfrom app.models.role import Role\nfrom app.models.service import Service\nfrom app.models.permission import Permission\n\nfrom app.models.track import Track\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.sponsor import Sponsor\nfrom app.models.microlocation import Microlocation\n\nfrom app.models.user import ORGANIZER, COORGANIZER, TRACK_ORGANIZER, MODERATOR, ATTENDEE, REGISTRAR\n\n# Admin Panel Permissions\nfrom app.models.panel_permission import PanelPermission\nfrom app.models.custom_system_role import CustomSysRole\n\nfrom app.models.setting import Setting\nfrom app.models.module import Module\n\n# User Permissions\nfrom app.models.user_permission import UserPermission\nSALES = 'sales'\n\n\ndef create_roles():\n get_or_create(Role, name=ORGANIZER, title_name='Organizer')\n get_or_create(Role, name=COORGANIZER, title_name='Co-organizer')\n get_or_create(Role, name=TRACK_ORGANIZER, title_name='Track Organizer')\n get_or_create(Role, name=MODERATOR, title_name='Moderator')\n get_or_create(Role, name=ATTENDEE, title_name='Attendee')\n get_or_create(Role, name=REGISTRAR, title_name='Registrar')\n\n\ndef create_services():\n track = Track.get_service_name()\n session = Session.get_service_name()\n speaker = Speaker.get_service_name()\n sponsor = Sponsor.get_service_name()\n microlocation = Microlocation.get_service_name()\n\n get_or_create(Service, name=track)\n get_or_create(Service, name=session)\n get_or_create(Service, name=speaker)\n get_or_create(Service, name=sponsor)\n get_or_create(Service, name=microlocation)\n\n\ndef create_settings():\n get_or_create(Setting, app_name='Open Event')\n\n\ndef create_modules():\n get_or_create(Module, donation_include=False)\n\n\ndef create_permissions():\n orgr = Role.query.get(1)\n coorgr = Role.query.get(2)\n track_orgr = Role.query.get(3)\n mod = Role.query.get(4)\n\n track = Service.query.get(1)\n session = Service.query.get(2)\n speaker = Service.query.get(3)\n sponsor = Service.query.get(4)\n microlocation = Service.query.get(5)\n\n # For ORGANIZER\n # All four permissions set to True\n get_or_create(Permission, role=orgr, service=track)\n get_or_create(Permission, role=orgr, service=session)\n get_or_create(Permission, role=orgr, service=speaker)\n get_or_create(Permission, role=orgr, service=sponsor)\n get_or_create(Permission, role=orgr, service=microlocation)\n\n # For COORGANIZER\n perm, _ = get_or_create(Permission, role=coorgr, service=track)\n perm.can_create, perm.can_delete = False, False\n db.session.add(perm)\n\n perm, _ = get_or_create(Permission, role=coorgr, service=session)\n perm.can_create, perm.can_delete = False, False\n db.session.add(perm)\n\n perm, _ = get_or_create(Permission, role=coorgr, service=speaker)\n perm.can_create, perm.can_delete = False, False\n db.session.add(perm)\n\n perm, _ = get_or_create(Permission, role=coorgr, service=sponsor)\n perm.can_create, perm.can_delete = False, False\n db.session.add(perm)\n\n perm, _ = get_or_create(Permission, role=coorgr, service=microlocation)\n perm.can_create, perm.can_delete = False, False\n db.session.add(perm)\n\n # For TRACK_ORGANIZER\n perm, _ = get_or_create(Permission, role=track_orgr, service=track)\n db.session.add(perm)\n\n # For MODERATOR\n perm, _ = get_or_create(Permission, role=mod, service=track)\n perm.can_create, perm.can_update, perm.can_delete = False, False, False\n db.session.add(perm)\n\n\ndef create_custom_sys_roles():\n role, _ = get_or_create(CustomSysRole, name='Sales Admin')\n db.session.add(role)\n role, _ = get_or_create(CustomSysRole, name='Marketer')\n db.session.add(role)\n\n\ndef create_panel_permissions():\n sales_admin = CustomSysRole.query.filter_by(name='Sales Admin').first()\n perm, _ = get_or_create(PanelPermission, panel_name=SALES, role=sales_admin)\n db.session.add(perm)\n marketer = CustomSysRole.query.filter_by(name='Marketer').first()\n perm, _ = get_or_create(PanelPermission, panel_name=SALES, role=marketer)\n db.session.add(perm)\n\n\ndef create_user_permissions():\n # Publish Event\n user_perm, _ = get_or_create(UserPermission, name='publish_event',\n description='Publish event (make event live)')\n user_perm.verified_user = True\n db.session.add(user_perm)\n\n # Create Event\n user_perm, _ = get_or_create(UserPermission, name='create_event',\n description='Create event')\n user_perm.verified_user, user_perm.unverified_user = True, True\n db.session.add(user_perm)\n\n\ndef create_admin_message_settings():\n default_mails = [\"Next Event\",\n \"Session Schedule Change\",\n \"User email\",\n \"Invitation For Papers\",\n \"After Event\",\n \"Ticket(s) Purchased\",\n \"Session Accept or Reject\",\n \"Event Published\",\n \"Event Export Failed\",\n \"Event Exported\",\n \"Event Role Invitation\",\n \"New Session Proposal\"]\n for mail in MAILS:\n if mail in default_mails:\n get_or_create(MessageSettings, action=mail, mail_status=1, notification_status=1, user_control_status=1)\n else:\n get_or_create(MessageSettings, action=mail, mail_status=0, notification_status=0, user_control_status=0)\n\n\ndef populate():\n \"\"\"\n Create defined Roles, Services and Permissions.\n \"\"\"\n print('Creating roles...')\n create_roles()\n print('Creating services...')\n create_services()\n print('Creating permissions...')\n create_permissions()\n print('Creating custom system roles...')\n create_custom_sys_roles()\n print('Creating admin panel permissions...')\n create_panel_permissions()\n print('Creating user permissions...')\n create_user_permissions()\n print('Creating settings...')\n create_settings()\n print('Creating modules...')\n create_modules()\n print('Creating admin message settings...')\n create_admin_message_settings()\n\n\ndef populate_without_print():\n \"\"\"\n Create defined Roles, Services and Permissions.\n \"\"\"\n create_roles()\n create_services()\n create_permissions()\n create_custom_sys_roles()\n create_panel_permissions()\n create_user_permissions()\n create_admin_message_settings()\n\n db.session.commit()\n\n\nif __name__ == '__main__':\n with current_app.app_context():\n populate()\n", "path": "populate_db.py"}]} | 2,803 | 370 |
gh_patches_debug_28632 | rasdani/github-patches | git_diff | Parsl__parsl-1951 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bash wrapper does not close log file, resulting in accumulation of open files
**Describe the bug**
This code in the bash remote wrapper:
```
set_file_logger(filename='{0}/bashexec.{1}.log'.format(logbase, t), name=logname, level=logging.DEBUG, format_string=format_string)
```
opens a new log file per app. But it does not ever close that log file, so a worker running many bash apps will accumulate many open files.
This log file should be closed at the end of each bash app execution.
**To Reproduce**
Run two bash apps in one worker with a long delay. use `lsof` to see which files are open for that worker.
**Expected behavior**
log file should be closed at end of bash app execution
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/app/bash.py`
Content:
```
1 from functools import update_wrapper
2 from functools import partial
3 from inspect import signature, Parameter
4
5 from parsl.app.errors import wrap_error
6 from parsl.app.app import AppBase
7 from parsl.dataflow.dflow import DataFlowKernelLoader
8
9
10 def remote_side_bash_executor(func, *args, **kwargs):
11 """Executes the supplied function with *args and **kwargs to get a
12 command-line to run, and then run that command-line using bash.
13 """
14 import os
15 import time
16 import subprocess
17 import logging
18 import parsl.app.errors as pe
19 from parsl import set_file_logger
20 from parsl.utils import get_std_fname_mode
21
22 logbase = "/tmp"
23 format_string = "%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] %(message)s"
24
25 # make this name unique per invocation so that each invocation can
26 # log to its own file. It would be better to include the task_id here
27 # but that is awkward to wire through at the moment as apps do not
28 # have access to that execution context.
29 t = time.time()
30
31 logname = __name__ + "." + str(t)
32 logger = logging.getLogger(logname)
33 set_file_logger(filename='{0}/bashexec.{1}.log'.format(logbase, t), name=logname, level=logging.DEBUG, format_string=format_string)
34
35 func_name = func.__name__
36
37 executable = None
38
39 # Try to run the func to compose the commandline
40 try:
41 # Execute the func to get the commandline
42 executable = func(*args, **kwargs)
43
44 if not isinstance(executable, str):
45 raise ValueError(f"Expected a str for bash_app commandline, got {type(executable)}")
46
47 except AttributeError as e:
48 if executable is not None:
49 raise pe.AppBadFormatting("App formatting failed for app '{}' with AttributeError: {}".format(func_name, e))
50 else:
51 raise pe.BashAppNoReturn("Bash app '{}' did not return a value, or returned None - with this exception: {}".format(func_name, e))
52
53 except IndexError as e:
54 raise pe.AppBadFormatting("App formatting failed for app '{}' with IndexError: {}".format(func_name, e))
55 except Exception as e:
56 logger.error("Caught exception during formatting of app '{}': {}".format(func_name, e))
57 raise e
58
59 logger.debug("Executable: %s", executable)
60
61 # Updating stdout, stderr if values passed at call time.
62
63 def open_std_fd(fdname):
64 # fdname is 'stdout' or 'stderr'
65 stdfspec = kwargs.get(fdname) # spec is str name or tuple (name, mode)
66 if stdfspec is None:
67 return None
68
69 fname, mode = get_std_fname_mode(fdname, stdfspec)
70 try:
71 if os.path.dirname(fname):
72 os.makedirs(os.path.dirname(fname), exist_ok=True)
73 fd = open(fname, mode)
74 except Exception as e:
75 raise pe.BadStdStreamFile(fname, e)
76 return fd
77
78 std_out = open_std_fd('stdout')
79 std_err = open_std_fd('stderr')
80 timeout = kwargs.get('walltime')
81
82 if std_err is not None:
83 print('--> executable follows <--\n{}\n--> end executable <--'.format(executable), file=std_err, flush=True)
84
85 returncode = None
86 try:
87 proc = subprocess.Popen(executable, stdout=std_out, stderr=std_err, shell=True, executable='/bin/bash')
88 proc.wait(timeout=timeout)
89 returncode = proc.returncode
90
91 except subprocess.TimeoutExpired:
92 raise pe.AppTimeout("[{}] App exceeded walltime: {}".format(func_name, timeout))
93
94 except Exception as e:
95 raise pe.AppException("[{}] App caught exception with returncode: {}".format(func_name, returncode), e)
96
97 if returncode != 0:
98 raise pe.BashExitFailure(func_name, proc.returncode)
99
100 # TODO : Add support for globs here
101
102 missing = []
103 for outputfile in kwargs.get('outputs', []):
104 fpath = outputfile.filepath
105
106 if not os.path.exists(fpath):
107 missing.extend([outputfile])
108
109 if missing:
110 raise pe.MissingOutputs("[{}] Missing outputs".format(func_name), missing)
111
112 return returncode
113
114
115 class BashApp(AppBase):
116
117 def __init__(self, func, data_flow_kernel=None, cache=False, executors='all', ignore_for_cache=None):
118 super().__init__(func, data_flow_kernel=data_flow_kernel, executors=executors, cache=cache, ignore_for_cache=ignore_for_cache)
119 self.kwargs = {}
120
121 # We duplicate the extraction of parameter defaults
122 # to self.kwargs to ensure availability at point of
123 # command string format. Refer: #349
124 sig = signature(func)
125
126 for s in sig.parameters:
127 if sig.parameters[s].default is not Parameter.empty:
128 self.kwargs[s] = sig.parameters[s].default
129
130 # update_wrapper allows remote_side_bash_executor to masquerade as self.func
131 # partial is used to attach the first arg the "func" to the remote_side_bash_executor
132 # this is done to avoid passing a function type in the args which parsl.serializer
133 # doesn't support
134 remote_fn = partial(update_wrapper(remote_side_bash_executor, self.func), self.func)
135 remote_fn.__name__ = self.func.__name__
136 self.wrapped_remote_function = wrap_error(remote_fn)
137
138 def __call__(self, *args, **kwargs):
139 """Handle the call to a Bash app.
140
141 Args:
142 - Arbitrary
143
144 Kwargs:
145 - Arbitrary
146
147 Returns:
148 App_fut
149
150 """
151 invocation_kwargs = {}
152 invocation_kwargs.update(self.kwargs)
153 invocation_kwargs.update(kwargs)
154
155 if self.data_flow_kernel is None:
156 dfk = DataFlowKernelLoader.dfk()
157 else:
158 dfk = self.data_flow_kernel
159
160 app_fut = dfk.submit(self.wrapped_remote_function,
161 app_args=args,
162 executors=self.executors,
163 cache=self.cache,
164 ignore_for_cache=self.ignore_for_cache,
165 app_kwargs=invocation_kwargs)
166
167 return app_fut
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsl/app/bash.py b/parsl/app/bash.py
--- a/parsl/app/bash.py
+++ b/parsl/app/bash.py
@@ -12,26 +12,10 @@
command-line to run, and then run that command-line using bash.
"""
import os
- import time
import subprocess
- import logging
import parsl.app.errors as pe
- from parsl import set_file_logger
from parsl.utils import get_std_fname_mode
- logbase = "/tmp"
- format_string = "%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] %(message)s"
-
- # make this name unique per invocation so that each invocation can
- # log to its own file. It would be better to include the task_id here
- # but that is awkward to wire through at the moment as apps do not
- # have access to that execution context.
- t = time.time()
-
- logname = __name__ + "." + str(t)
- logger = logging.getLogger(logname)
- set_file_logger(filename='{0}/bashexec.{1}.log'.format(logbase, t), name=logname, level=logging.DEBUG, format_string=format_string)
-
func_name = func.__name__
executable = None
@@ -53,11 +37,8 @@
except IndexError as e:
raise pe.AppBadFormatting("App formatting failed for app '{}' with IndexError: {}".format(func_name, e))
except Exception as e:
- logger.error("Caught exception during formatting of app '{}': {}".format(func_name, e))
raise e
- logger.debug("Executable: %s", executable)
-
# Updating stdout, stderr if values passed at call time.
def open_std_fd(fdname):
| {"golden_diff": "diff --git a/parsl/app/bash.py b/parsl/app/bash.py\n--- a/parsl/app/bash.py\n+++ b/parsl/app/bash.py\n@@ -12,26 +12,10 @@\n command-line to run, and then run that command-line using bash.\n \"\"\"\n import os\n- import time\n import subprocess\n- import logging\n import parsl.app.errors as pe\n- from parsl import set_file_logger\n from parsl.utils import get_std_fname_mode\n \n- logbase = \"/tmp\"\n- format_string = \"%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] %(message)s\"\n-\n- # make this name unique per invocation so that each invocation can\n- # log to its own file. It would be better to include the task_id here\n- # but that is awkward to wire through at the moment as apps do not\n- # have access to that execution context.\n- t = time.time()\n-\n- logname = __name__ + \".\" + str(t)\n- logger = logging.getLogger(logname)\n- set_file_logger(filename='{0}/bashexec.{1}.log'.format(logbase, t), name=logname, level=logging.DEBUG, format_string=format_string)\n-\n func_name = func.__name__\n \n executable = None\n@@ -53,11 +37,8 @@\n except IndexError as e:\n raise pe.AppBadFormatting(\"App formatting failed for app '{}' with IndexError: {}\".format(func_name, e))\n except Exception as e:\n- logger.error(\"Caught exception during formatting of app '{}': {}\".format(func_name, e))\n raise e\n \n- logger.debug(\"Executable: %s\", executable)\n-\n # Updating stdout, stderr if values passed at call time.\n \n def open_std_fd(fdname):\n", "issue": "bash wrapper does not close log file, resulting in accumulation of open files\n**Describe the bug**\r\nThis code in the bash remote wrapper:\r\n```\r\n set_file_logger(filename='{0}/bashexec.{1}.log'.format(logbase, t), name=logname, level=logging.DEBUG, format_string=format_string)\r\n```\r\nopens a new log file per app. But it does not ever close that log file, so a worker running many bash apps will accumulate many open files.\r\n\r\nThis log file should be closed at the end of each bash app execution.\r\n\r\n**To Reproduce**\r\nRun two bash apps in one worker with a long delay. use `lsof` to see which files are open for that worker.\r\n\r\n**Expected behavior**\r\nlog file should be closed at end of bash app execution\r\n\n", "before_files": [{"content": "from functools import update_wrapper\nfrom functools import partial\nfrom inspect import signature, Parameter\n\nfrom parsl.app.errors import wrap_error\nfrom parsl.app.app import AppBase\nfrom parsl.dataflow.dflow import DataFlowKernelLoader\n\n\ndef remote_side_bash_executor(func, *args, **kwargs):\n \"\"\"Executes the supplied function with *args and **kwargs to get a\n command-line to run, and then run that command-line using bash.\n \"\"\"\n import os\n import time\n import subprocess\n import logging\n import parsl.app.errors as pe\n from parsl import set_file_logger\n from parsl.utils import get_std_fname_mode\n\n logbase = \"/tmp\"\n format_string = \"%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] %(message)s\"\n\n # make this name unique per invocation so that each invocation can\n # log to its own file. It would be better to include the task_id here\n # but that is awkward to wire through at the moment as apps do not\n # have access to that execution context.\n t = time.time()\n\n logname = __name__ + \".\" + str(t)\n logger = logging.getLogger(logname)\n set_file_logger(filename='{0}/bashexec.{1}.log'.format(logbase, t), name=logname, level=logging.DEBUG, format_string=format_string)\n\n func_name = func.__name__\n\n executable = None\n\n # Try to run the func to compose the commandline\n try:\n # Execute the func to get the commandline\n executable = func(*args, **kwargs)\n\n if not isinstance(executable, str):\n raise ValueError(f\"Expected a str for bash_app commandline, got {type(executable)}\")\n\n except AttributeError as e:\n if executable is not None:\n raise pe.AppBadFormatting(\"App formatting failed for app '{}' with AttributeError: {}\".format(func_name, e))\n else:\n raise pe.BashAppNoReturn(\"Bash app '{}' did not return a value, or returned None - with this exception: {}\".format(func_name, e))\n\n except IndexError as e:\n raise pe.AppBadFormatting(\"App formatting failed for app '{}' with IndexError: {}\".format(func_name, e))\n except Exception as e:\n logger.error(\"Caught exception during formatting of app '{}': {}\".format(func_name, e))\n raise e\n\n logger.debug(\"Executable: %s\", executable)\n\n # Updating stdout, stderr if values passed at call time.\n\n def open_std_fd(fdname):\n # fdname is 'stdout' or 'stderr'\n stdfspec = kwargs.get(fdname) # spec is str name or tuple (name, mode)\n if stdfspec is None:\n return None\n\n fname, mode = get_std_fname_mode(fdname, stdfspec)\n try:\n if os.path.dirname(fname):\n os.makedirs(os.path.dirname(fname), exist_ok=True)\n fd = open(fname, mode)\n except Exception as e:\n raise pe.BadStdStreamFile(fname, e)\n return fd\n\n std_out = open_std_fd('stdout')\n std_err = open_std_fd('stderr')\n timeout = kwargs.get('walltime')\n\n if std_err is not None:\n print('--> executable follows <--\\n{}\\n--> end executable <--'.format(executable), file=std_err, flush=True)\n\n returncode = None\n try:\n proc = subprocess.Popen(executable, stdout=std_out, stderr=std_err, shell=True, executable='/bin/bash')\n proc.wait(timeout=timeout)\n returncode = proc.returncode\n\n except subprocess.TimeoutExpired:\n raise pe.AppTimeout(\"[{}] App exceeded walltime: {}\".format(func_name, timeout))\n\n except Exception as e:\n raise pe.AppException(\"[{}] App caught exception with returncode: {}\".format(func_name, returncode), e)\n\n if returncode != 0:\n raise pe.BashExitFailure(func_name, proc.returncode)\n\n # TODO : Add support for globs here\n\n missing = []\n for outputfile in kwargs.get('outputs', []):\n fpath = outputfile.filepath\n\n if not os.path.exists(fpath):\n missing.extend([outputfile])\n\n if missing:\n raise pe.MissingOutputs(\"[{}] Missing outputs\".format(func_name), missing)\n\n return returncode\n\n\nclass BashApp(AppBase):\n\n def __init__(self, func, data_flow_kernel=None, cache=False, executors='all', ignore_for_cache=None):\n super().__init__(func, data_flow_kernel=data_flow_kernel, executors=executors, cache=cache, ignore_for_cache=ignore_for_cache)\n self.kwargs = {}\n\n # We duplicate the extraction of parameter defaults\n # to self.kwargs to ensure availability at point of\n # command string format. Refer: #349\n sig = signature(func)\n\n for s in sig.parameters:\n if sig.parameters[s].default is not Parameter.empty:\n self.kwargs[s] = sig.parameters[s].default\n\n # update_wrapper allows remote_side_bash_executor to masquerade as self.func\n # partial is used to attach the first arg the \"func\" to the remote_side_bash_executor\n # this is done to avoid passing a function type in the args which parsl.serializer\n # doesn't support\n remote_fn = partial(update_wrapper(remote_side_bash_executor, self.func), self.func)\n remote_fn.__name__ = self.func.__name__\n self.wrapped_remote_function = wrap_error(remote_fn)\n\n def __call__(self, *args, **kwargs):\n \"\"\"Handle the call to a Bash app.\n\n Args:\n - Arbitrary\n\n Kwargs:\n - Arbitrary\n\n Returns:\n App_fut\n\n \"\"\"\n invocation_kwargs = {}\n invocation_kwargs.update(self.kwargs)\n invocation_kwargs.update(kwargs)\n\n if self.data_flow_kernel is None:\n dfk = DataFlowKernelLoader.dfk()\n else:\n dfk = self.data_flow_kernel\n\n app_fut = dfk.submit(self.wrapped_remote_function,\n app_args=args,\n executors=self.executors,\n cache=self.cache,\n ignore_for_cache=self.ignore_for_cache,\n app_kwargs=invocation_kwargs)\n\n return app_fut\n", "path": "parsl/app/bash.py"}], "after_files": [{"content": "from functools import update_wrapper\nfrom functools import partial\nfrom inspect import signature, Parameter\n\nfrom parsl.app.errors import wrap_error\nfrom parsl.app.app import AppBase\nfrom parsl.dataflow.dflow import DataFlowKernelLoader\n\n\ndef remote_side_bash_executor(func, *args, **kwargs):\n \"\"\"Executes the supplied function with *args and **kwargs to get a\n command-line to run, and then run that command-line using bash.\n \"\"\"\n import os\n import subprocess\n import parsl.app.errors as pe\n from parsl.utils import get_std_fname_mode\n\n func_name = func.__name__\n\n executable = None\n\n # Try to run the func to compose the commandline\n try:\n # Execute the func to get the commandline\n executable = func(*args, **kwargs)\n\n if not isinstance(executable, str):\n raise ValueError(f\"Expected a str for bash_app commandline, got {type(executable)}\")\n\n except AttributeError as e:\n if executable is not None:\n raise pe.AppBadFormatting(\"App formatting failed for app '{}' with AttributeError: {}\".format(func_name, e))\n else:\n raise pe.BashAppNoReturn(\"Bash app '{}' did not return a value, or returned None - with this exception: {}\".format(func_name, e))\n\n except IndexError as e:\n raise pe.AppBadFormatting(\"App formatting failed for app '{}' with IndexError: {}\".format(func_name, e))\n except Exception as e:\n raise e\n\n # Updating stdout, stderr if values passed at call time.\n\n def open_std_fd(fdname):\n # fdname is 'stdout' or 'stderr'\n stdfspec = kwargs.get(fdname) # spec is str name or tuple (name, mode)\n if stdfspec is None:\n return None\n\n fname, mode = get_std_fname_mode(fdname, stdfspec)\n try:\n if os.path.dirname(fname):\n os.makedirs(os.path.dirname(fname), exist_ok=True)\n fd = open(fname, mode)\n except Exception as e:\n raise pe.BadStdStreamFile(fname, e)\n return fd\n\n std_out = open_std_fd('stdout')\n std_err = open_std_fd('stderr')\n timeout = kwargs.get('walltime')\n\n if std_err is not None:\n print('--> executable follows <--\\n{}\\n--> end executable <--'.format(executable), file=std_err, flush=True)\n\n returncode = None\n try:\n proc = subprocess.Popen(executable, stdout=std_out, stderr=std_err, shell=True, executable='/bin/bash')\n proc.wait(timeout=timeout)\n returncode = proc.returncode\n\n except subprocess.TimeoutExpired:\n raise pe.AppTimeout(\"[{}] App exceeded walltime: {}\".format(func_name, timeout))\n\n except Exception as e:\n raise pe.AppException(\"[{}] App caught exception with returncode: {}\".format(func_name, returncode), e)\n\n if returncode != 0:\n raise pe.BashExitFailure(func_name, proc.returncode)\n\n # TODO : Add support for globs here\n\n missing = []\n for outputfile in kwargs.get('outputs', []):\n fpath = outputfile.filepath\n\n if not os.path.exists(fpath):\n missing.extend([outputfile])\n\n if missing:\n raise pe.MissingOutputs(\"[{}] Missing outputs\".format(func_name), missing)\n\n return returncode\n\n\nclass BashApp(AppBase):\n\n def __init__(self, func, data_flow_kernel=None, cache=False, executors='all', ignore_for_cache=None):\n super().__init__(func, data_flow_kernel=data_flow_kernel, executors=executors, cache=cache, ignore_for_cache=ignore_for_cache)\n self.kwargs = {}\n\n # We duplicate the extraction of parameter defaults\n # to self.kwargs to ensure availability at point of\n # command string format. Refer: #349\n sig = signature(func)\n\n for s in sig.parameters:\n if sig.parameters[s].default is not Parameter.empty:\n self.kwargs[s] = sig.parameters[s].default\n\n # update_wrapper allows remote_side_bash_executor to masquerade as self.func\n # partial is used to attach the first arg the \"func\" to the remote_side_bash_executor\n # this is done to avoid passing a function type in the args which parsl.serializer\n # doesn't support\n remote_fn = partial(update_wrapper(remote_side_bash_executor, self.func), self.func)\n remote_fn.__name__ = self.func.__name__\n self.wrapped_remote_function = wrap_error(remote_fn)\n\n def __call__(self, *args, **kwargs):\n \"\"\"Handle the call to a Bash app.\n\n Args:\n - Arbitrary\n\n Kwargs:\n - Arbitrary\n\n Returns:\n App_fut\n\n \"\"\"\n invocation_kwargs = {}\n invocation_kwargs.update(self.kwargs)\n invocation_kwargs.update(kwargs)\n\n if self.data_flow_kernel is None:\n dfk = DataFlowKernelLoader.dfk()\n else:\n dfk = self.data_flow_kernel\n\n app_fut = dfk.submit(self.wrapped_remote_function,\n app_args=args,\n executors=self.executors,\n cache=self.cache,\n ignore_for_cache=self.ignore_for_cache,\n app_kwargs=invocation_kwargs)\n\n return app_fut\n", "path": "parsl/app/bash.py"}]} | 2,189 | 409 |
gh_patches_debug_34381 | rasdani/github-patches | git_diff | facebookresearch__hydra-1560 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[callbacks] call on_*_end events in reverse order
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hydra/core/callbacks.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import warnings
3 from typing import Any
4
5 from omegaconf import DictConfig
6
7 from hydra.core.utils import JobReturn
8 from hydra.utils import instantiate
9
10
11 class Callbacks:
12 def __init__(self, config: DictConfig) -> None:
13 self.callbacks = []
14 for params in config.hydra.callbacks.values():
15 self.callbacks.append(instantiate(params))
16
17 def _notify(self, function_name: str, **kwargs: Any) -> None:
18 for c in self.callbacks:
19 try:
20 getattr(c, function_name)(**kwargs)
21 except Exception as e:
22 warnings.warn(
23 f"Callback {type(c).__name__}.{function_name} raised {type(e).__name__}: {e}"
24 )
25
26 def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:
27 self._notify(function_name="on_run_start", config=config, **kwargs)
28
29 def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:
30 self._notify(function_name="on_run_end", config=config, **kwargs)
31
32 def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:
33 self._notify(function_name="on_multirun_start", config=config, **kwargs)
34
35 def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:
36 self._notify(function_name="on_multirun_end", config=config, **kwargs)
37
38 def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:
39 self._notify(function_name="on_job_start", config=config, **kwargs)
40
41 def on_job_end(
42 self, config: DictConfig, job_return: JobReturn, **kwargs: Any
43 ) -> None:
44 self._notify(
45 function_name="on_job_end", config=config, job_return=job_return, **kwargs
46 )
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hydra/core/callbacks.py b/hydra/core/callbacks.py
--- a/hydra/core/callbacks.py
+++ b/hydra/core/callbacks.py
@@ -14,8 +14,9 @@
for params in config.hydra.callbacks.values():
self.callbacks.append(instantiate(params))
- def _notify(self, function_name: str, **kwargs: Any) -> None:
- for c in self.callbacks:
+ def _notify(self, function_name: str, reverse: bool = False, **kwargs: Any) -> None:
+ callbacks = reversed(self.callbacks) if reverse else self.callbacks
+ for c in callbacks:
try:
getattr(c, function_name)(**kwargs)
except Exception as e:
@@ -27,13 +28,15 @@
self._notify(function_name="on_run_start", config=config, **kwargs)
def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:
- self._notify(function_name="on_run_end", config=config, **kwargs)
+ self._notify(function_name="on_run_end", config=config, reverse=True, **kwargs)
def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:
self._notify(function_name="on_multirun_start", config=config, **kwargs)
def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:
- self._notify(function_name="on_multirun_end", config=config, **kwargs)
+ self._notify(
+ function_name="on_multirun_end", reverse=True, config=config, **kwargs
+ )
def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:
self._notify(function_name="on_job_start", config=config, **kwargs)
@@ -42,5 +45,9 @@
self, config: DictConfig, job_return: JobReturn, **kwargs: Any
) -> None:
self._notify(
- function_name="on_job_end", config=config, job_return=job_return, **kwargs
+ function_name="on_job_end",
+ config=config,
+ job_return=job_return,
+ reverse=True,
+ **kwargs,
)
| {"golden_diff": "diff --git a/hydra/core/callbacks.py b/hydra/core/callbacks.py\n--- a/hydra/core/callbacks.py\n+++ b/hydra/core/callbacks.py\n@@ -14,8 +14,9 @@\n for params in config.hydra.callbacks.values():\n self.callbacks.append(instantiate(params))\n \n- def _notify(self, function_name: str, **kwargs: Any) -> None:\n- for c in self.callbacks:\n+ def _notify(self, function_name: str, reverse: bool = False, **kwargs: Any) -> None:\n+ callbacks = reversed(self.callbacks) if reverse else self.callbacks\n+ for c in callbacks:\n try:\n getattr(c, function_name)(**kwargs)\n except Exception as e:\n@@ -27,13 +28,15 @@\n self._notify(function_name=\"on_run_start\", config=config, **kwargs)\n \n def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:\n- self._notify(function_name=\"on_run_end\", config=config, **kwargs)\n+ self._notify(function_name=\"on_run_end\", config=config, reverse=True, **kwargs)\n \n def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_multirun_start\", config=config, **kwargs)\n \n def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:\n- self._notify(function_name=\"on_multirun_end\", config=config, **kwargs)\n+ self._notify(\n+ function_name=\"on_multirun_end\", reverse=True, config=config, **kwargs\n+ )\n \n def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_job_start\", config=config, **kwargs)\n@@ -42,5 +45,9 @@\n self, config: DictConfig, job_return: JobReturn, **kwargs: Any\n ) -> None:\n self._notify(\n- function_name=\"on_job_end\", config=config, job_return=job_return, **kwargs\n+ function_name=\"on_job_end\",\n+ config=config,\n+ job_return=job_return,\n+ reverse=True,\n+ **kwargs,\n )\n", "issue": "[callbacks] call on_*_end events in reverse order\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport warnings\nfrom typing import Any\n\nfrom omegaconf import DictConfig\n\nfrom hydra.core.utils import JobReturn\nfrom hydra.utils import instantiate\n\n\nclass Callbacks:\n def __init__(self, config: DictConfig) -> None:\n self.callbacks = []\n for params in config.hydra.callbacks.values():\n self.callbacks.append(instantiate(params))\n\n def _notify(self, function_name: str, **kwargs: Any) -> None:\n for c in self.callbacks:\n try:\n getattr(c, function_name)(**kwargs)\n except Exception as e:\n warnings.warn(\n f\"Callback {type(c).__name__}.{function_name} raised {type(e).__name__}: {e}\"\n )\n\n def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_run_start\", config=config, **kwargs)\n\n def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_run_end\", config=config, **kwargs)\n\n def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_multirun_start\", config=config, **kwargs)\n\n def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_multirun_end\", config=config, **kwargs)\n\n def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_job_start\", config=config, **kwargs)\n\n def on_job_end(\n self, config: DictConfig, job_return: JobReturn, **kwargs: Any\n ) -> None:\n self._notify(\n function_name=\"on_job_end\", config=config, job_return=job_return, **kwargs\n )\n", "path": "hydra/core/callbacks.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport warnings\nfrom typing import Any\n\nfrom omegaconf import DictConfig\n\nfrom hydra.core.utils import JobReturn\nfrom hydra.utils import instantiate\n\n\nclass Callbacks:\n def __init__(self, config: DictConfig) -> None:\n self.callbacks = []\n for params in config.hydra.callbacks.values():\n self.callbacks.append(instantiate(params))\n\n def _notify(self, function_name: str, reverse: bool = False, **kwargs: Any) -> None:\n callbacks = reversed(self.callbacks) if reverse else self.callbacks\n for c in callbacks:\n try:\n getattr(c, function_name)(**kwargs)\n except Exception as e:\n warnings.warn(\n f\"Callback {type(c).__name__}.{function_name} raised {type(e).__name__}: {e}\"\n )\n\n def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_run_start\", config=config, **kwargs)\n\n def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_run_end\", config=config, reverse=True, **kwargs)\n\n def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_multirun_start\", config=config, **kwargs)\n\n def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(\n function_name=\"on_multirun_end\", reverse=True, config=config, **kwargs\n )\n\n def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_job_start\", config=config, **kwargs)\n\n def on_job_end(\n self, config: DictConfig, job_return: JobReturn, **kwargs: Any\n ) -> None:\n self._notify(\n function_name=\"on_job_end\",\n config=config,\n job_return=job_return,\n reverse=True,\n **kwargs,\n )\n", "path": "hydra/core/callbacks.py"}]} | 786 | 504 |
gh_patches_debug_6037 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-2346 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MI failing since at least 2018-05-12
MI has been failing since 2018-05-12
Based on automated runs it appears that MI has not run successfully in 2 days (2018-05-12).
```
02:47:18 INFO scrapelib: GET - http://legislature.mi.gov/doc.aspx?2018-HJR-GG
02:47:19 INFO pupa: save bill HJR GG in 2017-2018 as bill_dcf34e60-5681-11e8-a8aa-029b97b45e2a.json
02:47:19 INFO scrapelib: GET - http://legislature.mi.gov/doc.aspx?2017-HJR-HH
02:47:20 INFO scrapelib: GET - http://legislature.mi.gov/doc.aspx?2018-HJR-HH
02:47:21 INFO pupa: save bill HJR HH in 2017-2018 as bill_de254248-5681-11e8-a8aa-029b97b45e2a.json
02:47:21 INFO scrapelib: GET - http://legislature.mi.gov/doc.aspx?2017-HJR-II
02:47:22 INFO scrapelib: GET - http://legislature.mi.gov/doc.aspx?2018-HJR-II
02:47:23 INFO pupa: save bill HJR II in 2017-2018 as bill_df57e738-5681-11e8-a8aa-029b97b45e2a.json
02:47:23 INFO scrapelib: GET - http://legislature.mi.gov/doc.aspx?2017-HJR-JJ
02:47:24 INFO scrapelib: GET - http://legislature.mi.gov/doc.aspx?2018-HJR-JJ
02:47:25 WARNING pupa: Cannot open bill page for HJR JJ; skipping
02:47:25 INFO scrapelib: GET - http://www.senate.michigan.gov/senatorinfo_list.html
02:47:25 INFO scrapelib: HEAD - http://www.senate.michigan.gov/_images/Booher.png
02:47:26 INFO scrapelib: GET - http://www.SenatorDarwinBooher.com/contact/
02:47:27 INFO pupa: save person Booher, Darwin L as person_e21c0558-5681-11e8-a8aa-029b97b45e2a.json
loaded Open States pupa settings...
mi (scrape, import)
bills: {}
people: {}
committees: {}
Traceback (most recent call last):
File "/opt/openstates/venv-pupa//bin/pupa", line 11, in <module>
load_entry_point('pupa', 'console_scripts', 'pupa')()
File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/__main__.py", line 68, in main
subcommands[args.subcommand].handle(args, other)
File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 260, in handle
return self.do_handle(args, other, juris)
File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 305, in do_handle
report['scrape'] = self.do_scrape(juris, args, scrapers)
File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 173, in do_scrape
report[scraper_name] = scraper.do_scrape(**scrape_args)
File "/opt/openstates/venv-pupa/src/pupa/pupa/scrape/base.py", line 116, in do_scrape
self.save_object(obj)
File "/opt/openstates/venv-pupa/src/pupa/pupa/scrape/base.py", line 99, in save_object
raise ve
File "/opt/openstates/venv-pupa/src/pupa/pupa/scrape/base.py", line 96, in save_object
obj.validate()
File "/opt/openstates/venv-pupa/src/pupa/pupa/scrape/base.py", line 191, in validate
self.__class__.__name__, self._id, '\n\t'+'\n\t'.join(errors)
pupa.exceptions.ScrapeValueError: validation of Person e21c0558-5681-11e8-a8aa-029b97b45e2a failed:
'/booher' is not a 'uri'
Failed validating 'format' in schema['properties']['links']['items']['properties']['url']:
{'format': 'uri', 'type': 'string'}
On instance['links'][0]['url']:
'/booher'
'/booher' is not a 'uri'
Failed validating 'format' in schema['properties']['sources']['items']['properties']['url']:
{'format': 'uri', 'type': 'string'}
On instance['sources'][0]['url']:
'/booher'
```
Visit http://bobsled.openstates.org for more info.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/mi/people.py`
Content:
```
1 import re
2 import requests
3
4 import lxml.html
5 import scrapelib
6 from pupa.scrape import Person, Scraper
7
8
9 abbr = {'D': 'Democratic', 'R': 'Republican'}
10
11
12 class MIPersonScraper(Scraper):
13 def scrape(self, chamber=None, session=None):
14 if chamber == 'upper':
15 yield from self.scrape_upper(chamber)
16 elif chamber == 'lower':
17 yield from self.scrape_lower(chamber)
18 else:
19 yield from self.scrape_upper(chamber)
20 yield from self.scrape_lower(chamber)
21
22 def scrape_lower(self, chamber):
23 url = 'http://www.house.mi.gov/mhrpublic/frmRepList.aspx'
24 table = [
25 "website",
26 "district",
27 "name",
28 "party",
29 "location",
30 "phone",
31 "email"
32 ]
33
34 data = self.get(url).text
35 doc = lxml.html.fromstring(data)
36
37 # skip two rows at top
38 for row in doc.xpath('//table[@id="grvRepInfo"]/*'):
39 tds = row.xpath('.//td')
40 if len(tds) == 0:
41 continue
42 metainf = {}
43 for i in range(0, len(table)):
44 metainf[table[i]] = tds[i]
45 district = str(int(metainf['district'].text_content().strip()))
46 party = metainf['party'].text_content().strip()
47 phone = metainf['phone'].text_content().strip()
48 email = metainf['email'].text_content().strip()
49 name = metainf['name'].text_content().strip()
50 if name == 'Vacant' or re.match(r'^District \d{1,3}$', name):
51 self.warning('District {} appears vacant, and will be skipped'.format(district))
52 continue
53 leg_url = metainf['website'].xpath("./a")[0].attrib['href']
54
55 office = metainf['location'].text_content().strip()
56 office = re.sub(
57 ' HOB',
58 ' Anderson House Office Building\n124 North Capitol Avenue\nLansing, MI 48933',
59 office
60 )
61 office = re.sub(
62 ' CB',
63 ' State Capitol Building\nLansing, MI 48909',
64 office
65 )
66
67 try:
68 photo_url = self.get_photo_url(leg_url)[0]
69 except (scrapelib.HTTPError, IndexError):
70 photo_url = ''
71 self.warning('no photo url for %s', name)
72
73 person = Person(name=name, district=district, party=abbr[party],
74 primary_org='lower', image=photo_url)
75
76 person.add_link(leg_url)
77 person.add_source(leg_url)
78
79 person.add_contact_detail(type='address', value=office, note='Capitol Office')
80 person.add_contact_detail(type='voice', value=phone, note='Capitol Office')
81 person.add_contact_detail(type='email', value=email, note='Capitol Office')
82
83 yield person
84
85 def scrape_upper(self, chamber):
86 url = 'http://www.senate.michigan.gov/senatorinfo_list.html'
87 url_to_append = 'http://www.senate.michigan.gov/_images/'
88 data = self.get(url).text
89 doc = lxml.html.fromstring(data)
90 for row in doc.xpath('//table[not(@class="calendar")]//tr')[3:]:
91 if len(row) != 7:
92 continue
93
94 # party, dist, member, office_phone, office_fax, office_loc
95 party, dist, member, contact, phone, fax, loc = row.getchildren()
96 if (party.text_content().strip() == "" or
97 'Lieutenant Governor' in member.text_content()):
98 continue
99
100 party = abbr[party.text]
101 district = dist.text_content().strip()
102 name = member.text_content().strip()
103 name = re.sub(r'\s+', " ", name)
104 surname = re.split(', | ', name)
105 surname[0] = re.sub('[\']', '', surname[0])
106 try:
107 self.head(url_to_append + surname[0] + '.png')
108 photo_url = url_to_append + surname[0] + '.png'
109 except scrapelib.HTTPError:
110 try:
111 self.head(url_to_append + surname[0] + '.jpg')
112 photo_url = url_to_append + surname[0] + '.jpg'
113 except scrapelib.HTTPError:
114 photo_url = None
115
116 if name == 'Vacant':
117 self.info('district %s is vacant', district)
118 continue
119
120 leg_url = member.xpath('a/@href')[0]
121 office_phone = phone.text
122 office_fax = fax.text
123
124 office_loc = loc.text
125 office_loc = re.sub(
126 ' Farnum Bldg',
127 ' Farnum Office Building\n125 West Allegan Street\nLansing, MI 48933',
128 office_loc
129 )
130 office_loc = re.sub(
131 ' Capitol Bldg',
132 ' State Capitol Building\nLansing, MI 48909',
133 office_loc
134 )
135
136 # email addresses aren't on the list page anymore but they
137 # are on the page linked off "Contact Me"
138
139 # data has a typo in a row
140 email = None
141 contact_url = [
142 a for a in row.xpath(".//a")
143 if a.text in ('Contact Me', 'Conact Me')][0].get('href')
144 try:
145 contact_html = self.get(contact_url).text
146 contact_doc = lxml.html.fromstring(contact_html)
147
148 header_email = contact_doc.xpath("//a[@class='header_email']")
149 if header_email:
150 email = header_email[0].text
151 else:
152 # not using the most common template, but maybe they
153 # dropped their email on the page somewhere
154 links = contact_doc.xpath('//a') or []
155 text_email = [a for a in links
156 if 'mailto:' in (a.get('href') or '')]
157 if text_email:
158 email = text_email[0].text
159 except requests.exceptions.TooManyRedirects:
160 self.warning("Contact Link Not Working for %s" % name)
161 person = Person(name=name, district=district, party=party,
162 primary_org='upper', image=photo_url)
163
164 person.add_link(leg_url)
165 person.add_source(leg_url)
166
167 person.add_contact_detail(type='address', value=office_loc, note='Capitol Office')
168 person.add_contact_detail(type='voice', value=office_phone, note='Capitol Office')
169 person.add_contact_detail(type='fax', value=office_fax, note='Capitol Office')
170 if email:
171 person.add_contact_detail(type='email', value=email, note='Capitol Office')
172
173 yield person
174
175 def get_photo_url(self, url):
176 data = self.get(url).text
177 doc = lxml.html.fromstring(data)
178 doc.make_links_absolute(url)
179 return (
180 doc.xpath('//div[contains(@class, "headshotTop")]//img/@src') + # housedems.com
181 doc.xpath('//div[contains(@class, "widget_sp_image")]//img/@src') # gophouse.org
182 )
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openstates/mi/people.py b/openstates/mi/people.py
--- a/openstates/mi/people.py
+++ b/openstates/mi/people.py
@@ -87,6 +87,7 @@
url_to_append = 'http://www.senate.michigan.gov/_images/'
data = self.get(url).text
doc = lxml.html.fromstring(data)
+ doc.make_links_absolute(url)
for row in doc.xpath('//table[not(@class="calendar")]//tr')[3:]:
if len(row) != 7:
continue
| {"golden_diff": "diff --git a/openstates/mi/people.py b/openstates/mi/people.py\n--- a/openstates/mi/people.py\n+++ b/openstates/mi/people.py\n@@ -87,6 +87,7 @@\n url_to_append = 'http://www.senate.michigan.gov/_images/'\n data = self.get(url).text\n doc = lxml.html.fromstring(data)\n+ doc.make_links_absolute(url)\n for row in doc.xpath('//table[not(@class=\"calendar\")]//tr')[3:]:\n if len(row) != 7:\n continue\n", "issue": "MI failing since at least 2018-05-12\nMI has been failing since 2018-05-12\n\nBased on automated runs it appears that MI has not run successfully in 2 days (2018-05-12).\n\n\n```\n 02:47:18 INFO scrapelib: GET - http://legislature.mi.gov/doc.aspx?2018-HJR-GG\n02:47:19 INFO pupa: save bill HJR GG in 2017-2018 as bill_dcf34e60-5681-11e8-a8aa-029b97b45e2a.json\n02:47:19 INFO scrapelib: GET - http://legislature.mi.gov/doc.aspx?2017-HJR-HH\n02:47:20 INFO scrapelib: GET - http://legislature.mi.gov/doc.aspx?2018-HJR-HH\n02:47:21 INFO pupa: save bill HJR HH in 2017-2018 as bill_de254248-5681-11e8-a8aa-029b97b45e2a.json\n02:47:21 INFO scrapelib: GET - http://legislature.mi.gov/doc.aspx?2017-HJR-II\n02:47:22 INFO scrapelib: GET - http://legislature.mi.gov/doc.aspx?2018-HJR-II\n02:47:23 INFO pupa: save bill HJR II in 2017-2018 as bill_df57e738-5681-11e8-a8aa-029b97b45e2a.json\n02:47:23 INFO scrapelib: GET - http://legislature.mi.gov/doc.aspx?2017-HJR-JJ\n02:47:24 INFO scrapelib: GET - http://legislature.mi.gov/doc.aspx?2018-HJR-JJ\n02:47:25 WARNING pupa: Cannot open bill page for HJR JJ; skipping\n02:47:25 INFO scrapelib: GET - http://www.senate.michigan.gov/senatorinfo_list.html\n02:47:25 INFO scrapelib: HEAD - http://www.senate.michigan.gov/_images/Booher.png\n02:47:26 INFO scrapelib: GET - http://www.SenatorDarwinBooher.com/contact/\n02:47:27 INFO pupa: save person Booher, Darwin L as person_e21c0558-5681-11e8-a8aa-029b97b45e2a.json\nloaded Open States pupa settings...\nmi (scrape, import)\n bills: {}\n people: {}\n committees: {}\nTraceback (most recent call last):\n File \"/opt/openstates/venv-pupa//bin/pupa\", line 11, in <module>\n load_entry_point('pupa', 'console_scripts', 'pupa')()\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/__main__.py\", line 68, in main\n subcommands[args.subcommand].handle(args, other)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 260, in handle\n return self.do_handle(args, other, juris)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 305, in do_handle\n report['scrape'] = self.do_scrape(juris, args, scrapers)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 173, in do_scrape\n report[scraper_name] = scraper.do_scrape(**scrape_args)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/scrape/base.py\", line 116, in do_scrape\n self.save_object(obj)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/scrape/base.py\", line 99, in save_object\n raise ve\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/scrape/base.py\", line 96, in save_object\n obj.validate()\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/scrape/base.py\", line 191, in validate\n self.__class__.__name__, self._id, '\\n\\t'+'\\n\\t'.join(errors)\npupa.exceptions.ScrapeValueError: validation of Person e21c0558-5681-11e8-a8aa-029b97b45e2a failed: \n\t'/booher' is not a 'uri'\nFailed validating 'format' in schema['properties']['links']['items']['properties']['url']:\n {'format': 'uri', 'type': 'string'}\nOn instance['links'][0]['url']:\n '/booher'\n\t'/booher' is not a 'uri'\nFailed validating 'format' in schema['properties']['sources']['items']['properties']['url']:\n {'format': 'uri', 'type': 'string'}\nOn instance['sources'][0]['url']:\n '/booher'\n```\n\nVisit http://bobsled.openstates.org for more info.\n\n", "before_files": [{"content": "import re\nimport requests\n\nimport lxml.html\nimport scrapelib\nfrom pupa.scrape import Person, Scraper\n\n\nabbr = {'D': 'Democratic', 'R': 'Republican'}\n\n\nclass MIPersonScraper(Scraper):\n def scrape(self, chamber=None, session=None):\n if chamber == 'upper':\n yield from self.scrape_upper(chamber)\n elif chamber == 'lower':\n yield from self.scrape_lower(chamber)\n else:\n yield from self.scrape_upper(chamber)\n yield from self.scrape_lower(chamber)\n\n def scrape_lower(self, chamber):\n url = 'http://www.house.mi.gov/mhrpublic/frmRepList.aspx'\n table = [\n \"website\",\n \"district\",\n \"name\",\n \"party\",\n \"location\",\n \"phone\",\n \"email\"\n ]\n\n data = self.get(url).text\n doc = lxml.html.fromstring(data)\n\n # skip two rows at top\n for row in doc.xpath('//table[@id=\"grvRepInfo\"]/*'):\n tds = row.xpath('.//td')\n if len(tds) == 0:\n continue\n metainf = {}\n for i in range(0, len(table)):\n metainf[table[i]] = tds[i]\n district = str(int(metainf['district'].text_content().strip()))\n party = metainf['party'].text_content().strip()\n phone = metainf['phone'].text_content().strip()\n email = metainf['email'].text_content().strip()\n name = metainf['name'].text_content().strip()\n if name == 'Vacant' or re.match(r'^District \\d{1,3}$', name):\n self.warning('District {} appears vacant, and will be skipped'.format(district))\n continue\n leg_url = metainf['website'].xpath(\"./a\")[0].attrib['href']\n\n office = metainf['location'].text_content().strip()\n office = re.sub(\n ' HOB',\n ' Anderson House Office Building\\n124 North Capitol Avenue\\nLansing, MI 48933',\n office\n )\n office = re.sub(\n ' CB',\n ' State Capitol Building\\nLansing, MI 48909',\n office\n )\n\n try:\n photo_url = self.get_photo_url(leg_url)[0]\n except (scrapelib.HTTPError, IndexError):\n photo_url = ''\n self.warning('no photo url for %s', name)\n\n person = Person(name=name, district=district, party=abbr[party],\n primary_org='lower', image=photo_url)\n\n person.add_link(leg_url)\n person.add_source(leg_url)\n\n person.add_contact_detail(type='address', value=office, note='Capitol Office')\n person.add_contact_detail(type='voice', value=phone, note='Capitol Office')\n person.add_contact_detail(type='email', value=email, note='Capitol Office')\n\n yield person\n\n def scrape_upper(self, chamber):\n url = 'http://www.senate.michigan.gov/senatorinfo_list.html'\n url_to_append = 'http://www.senate.michigan.gov/_images/'\n data = self.get(url).text\n doc = lxml.html.fromstring(data)\n for row in doc.xpath('//table[not(@class=\"calendar\")]//tr')[3:]:\n if len(row) != 7:\n continue\n\n # party, dist, member, office_phone, office_fax, office_loc\n party, dist, member, contact, phone, fax, loc = row.getchildren()\n if (party.text_content().strip() == \"\" or\n 'Lieutenant Governor' in member.text_content()):\n continue\n\n party = abbr[party.text]\n district = dist.text_content().strip()\n name = member.text_content().strip()\n name = re.sub(r'\\s+', \" \", name)\n surname = re.split(', | ', name)\n surname[0] = re.sub('[\\']', '', surname[0])\n try:\n self.head(url_to_append + surname[0] + '.png')\n photo_url = url_to_append + surname[0] + '.png'\n except scrapelib.HTTPError:\n try:\n self.head(url_to_append + surname[0] + '.jpg')\n photo_url = url_to_append + surname[0] + '.jpg'\n except scrapelib.HTTPError:\n photo_url = None\n\n if name == 'Vacant':\n self.info('district %s is vacant', district)\n continue\n\n leg_url = member.xpath('a/@href')[0]\n office_phone = phone.text\n office_fax = fax.text\n\n office_loc = loc.text\n office_loc = re.sub(\n ' Farnum Bldg',\n ' Farnum Office Building\\n125 West Allegan Street\\nLansing, MI 48933',\n office_loc\n )\n office_loc = re.sub(\n ' Capitol Bldg',\n ' State Capitol Building\\nLansing, MI 48909',\n office_loc\n )\n\n # email addresses aren't on the list page anymore but they\n # are on the page linked off \"Contact Me\"\n\n # data has a typo in a row\n email = None\n contact_url = [\n a for a in row.xpath(\".//a\")\n if a.text in ('Contact Me', 'Conact Me')][0].get('href')\n try:\n contact_html = self.get(contact_url).text\n contact_doc = lxml.html.fromstring(contact_html)\n\n header_email = contact_doc.xpath(\"//a[@class='header_email']\")\n if header_email:\n email = header_email[0].text\n else:\n # not using the most common template, but maybe they\n # dropped their email on the page somewhere\n links = contact_doc.xpath('//a') or []\n text_email = [a for a in links\n if 'mailto:' in (a.get('href') or '')]\n if text_email:\n email = text_email[0].text\n except requests.exceptions.TooManyRedirects:\n self.warning(\"Contact Link Not Working for %s\" % name)\n person = Person(name=name, district=district, party=party,\n primary_org='upper', image=photo_url)\n\n person.add_link(leg_url)\n person.add_source(leg_url)\n\n person.add_contact_detail(type='address', value=office_loc, note='Capitol Office')\n person.add_contact_detail(type='voice', value=office_phone, note='Capitol Office')\n person.add_contact_detail(type='fax', value=office_fax, note='Capitol Office')\n if email:\n person.add_contact_detail(type='email', value=email, note='Capitol Office')\n\n yield person\n\n def get_photo_url(self, url):\n data = self.get(url).text\n doc = lxml.html.fromstring(data)\n doc.make_links_absolute(url)\n return (\n doc.xpath('//div[contains(@class, \"headshotTop\")]//img/@src') + # housedems.com\n doc.xpath('//div[contains(@class, \"widget_sp_image\")]//img/@src') # gophouse.org\n )\n", "path": "openstates/mi/people.py"}], "after_files": [{"content": "import re\nimport requests\n\nimport lxml.html\nimport scrapelib\nfrom pupa.scrape import Person, Scraper\n\n\nabbr = {'D': 'Democratic', 'R': 'Republican'}\n\n\nclass MIPersonScraper(Scraper):\n def scrape(self, chamber=None, session=None):\n if chamber == 'upper':\n yield from self.scrape_upper(chamber)\n elif chamber == 'lower':\n yield from self.scrape_lower(chamber)\n else:\n yield from self.scrape_upper(chamber)\n yield from self.scrape_lower(chamber)\n\n def scrape_lower(self, chamber):\n url = 'http://www.house.mi.gov/mhrpublic/frmRepList.aspx'\n table = [\n \"website\",\n \"district\",\n \"name\",\n \"party\",\n \"location\",\n \"phone\",\n \"email\"\n ]\n\n data = self.get(url).text\n doc = lxml.html.fromstring(data)\n\n # skip two rows at top\n for row in doc.xpath('//table[@id=\"grvRepInfo\"]/*'):\n tds = row.xpath('.//td')\n if len(tds) == 0:\n continue\n metainf = {}\n for i in range(0, len(table)):\n metainf[table[i]] = tds[i]\n district = str(int(metainf['district'].text_content().strip()))\n party = metainf['party'].text_content().strip()\n phone = metainf['phone'].text_content().strip()\n email = metainf['email'].text_content().strip()\n name = metainf['name'].text_content().strip()\n if name == 'Vacant' or re.match(r'^District \\d{1,3}$', name):\n self.warning('District {} appears vacant, and will be skipped'.format(district))\n continue\n leg_url = metainf['website'].xpath(\"./a\")[0].attrib['href']\n\n office = metainf['location'].text_content().strip()\n office = re.sub(\n ' HOB',\n ' Anderson House Office Building\\n124 North Capitol Avenue\\nLansing, MI 48933',\n office\n )\n office = re.sub(\n ' CB',\n ' State Capitol Building\\nLansing, MI 48909',\n office\n )\n\n try:\n photo_url = self.get_photo_url(leg_url)[0]\n except (scrapelib.HTTPError, IndexError):\n photo_url = ''\n self.warning('no photo url for %s', name)\n\n person = Person(name=name, district=district, party=abbr[party],\n primary_org='lower', image=photo_url)\n\n person.add_link(leg_url)\n person.add_source(leg_url)\n\n person.add_contact_detail(type='address', value=office, note='Capitol Office')\n person.add_contact_detail(type='voice', value=phone, note='Capitol Office')\n person.add_contact_detail(type='email', value=email, note='Capitol Office')\n\n yield person\n\n def scrape_upper(self, chamber):\n url = 'http://www.senate.michigan.gov/senatorinfo_list.html'\n url_to_append = 'http://www.senate.michigan.gov/_images/'\n data = self.get(url).text\n doc = lxml.html.fromstring(data)\n doc.make_links_absolute(url)\n for row in doc.xpath('//table[not(@class=\"calendar\")]//tr')[3:]:\n if len(row) != 7:\n continue\n\n # party, dist, member, office_phone, office_fax, office_loc\n party, dist, member, contact, phone, fax, loc = row.getchildren()\n if (party.text_content().strip() == \"\" or\n 'Lieutenant Governor' in member.text_content()):\n continue\n\n party = abbr[party.text]\n district = dist.text_content().strip()\n name = member.text_content().strip()\n name = re.sub(r'\\s+', \" \", name)\n surname = re.split(', | ', name)\n surname[0] = re.sub('[\\']', '', surname[0])\n try:\n self.head(url_to_append + surname[0] + '.png')\n photo_url = url_to_append + surname[0] + '.png'\n except scrapelib.HTTPError:\n try:\n self.head(url_to_append + surname[0] + '.jpg')\n photo_url = url_to_append + surname[0] + '.jpg'\n except scrapelib.HTTPError:\n photo_url = None\n\n if name == 'Vacant':\n self.info('district %s is vacant', district)\n continue\n\n leg_url = member.xpath('a/@href')[0]\n office_phone = phone.text\n office_fax = fax.text\n\n office_loc = loc.text\n office_loc = re.sub(\n ' Farnum Bldg',\n ' Farnum Office Building\\n125 West Allegan Street\\nLansing, MI 48933',\n office_loc\n )\n office_loc = re.sub(\n ' Capitol Bldg',\n ' State Capitol Building\\nLansing, MI 48909',\n office_loc\n )\n\n # email addresses aren't on the list page anymore but they\n # are on the page linked off \"Contact Me\"\n\n # data has a typo in a row\n email = None\n contact_url = [\n a for a in row.xpath(\".//a\")\n if a.text in ('Contact Me', 'Conact Me')][0].get('href')\n try:\n contact_html = self.get(contact_url).text\n contact_doc = lxml.html.fromstring(contact_html)\n\n header_email = contact_doc.xpath(\"//a[@class='header_email']\")\n if header_email:\n email = header_email[0].text\n else:\n # not using the most common template, but maybe they\n # dropped their email on the page somewhere\n links = contact_doc.xpath('//a') or []\n text_email = [a for a in links\n if 'mailto:' in (a.get('href') or '')]\n if text_email:\n email = text_email[0].text\n except requests.exceptions.TooManyRedirects:\n self.warning(\"Contact Link Not Working for %s\" % name)\n person = Person(name=name, district=district, party=party,\n primary_org='upper', image=photo_url)\n\n person.add_link(leg_url)\n person.add_source(leg_url)\n\n person.add_contact_detail(type='address', value=office_loc, note='Capitol Office')\n person.add_contact_detail(type='voice', value=office_phone, note='Capitol Office')\n person.add_contact_detail(type='fax', value=office_fax, note='Capitol Office')\n if email:\n person.add_contact_detail(type='email', value=email, note='Capitol Office')\n\n yield person\n\n def get_photo_url(self, url):\n data = self.get(url).text\n doc = lxml.html.fromstring(data)\n doc.make_links_absolute(url)\n return (\n doc.xpath('//div[contains(@class, \"headshotTop\")]//img/@src') + # housedems.com\n doc.xpath('//div[contains(@class, \"widget_sp_image\")]//img/@src') # gophouse.org\n )\n", "path": "openstates/mi/people.py"}]} | 3,551 | 124 |
gh_patches_debug_23761 | rasdani/github-patches | git_diff | fossasia__open-event-server-5139 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add custom form for attendees
**Is your feature request related to a problem? Please describe.**
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
Add custom form for attendees
**Describe the solution you'd like**
<!-- A clear and concise description of what you want to happen. -->
**Describe alternatives you've considered**
<!-- A clear and concise description of any alternative solutions or features you've considered. -->
**Additional context**
<!-- Add any other context or screenshots about the feature request here. -->
**Working on it**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/models/custom_form.py`
Content:
```
1 import json
2 from sqlalchemy.schema import UniqueConstraint
3
4 from app.models import db
5 from app.models.base import SoftDeletionModel
6
7 SESSION_FORM = {
8 "title": {"include": 1, "require": 1},
9 "subtitle": {"include": 0, "require": 0},
10 "short_abstract": {"include": 1, "require": 0},
11 "long_abstract": {"include": 0, "require": 0},
12 "comments": {"include": 1, "require": 0},
13 "track": {"include": 0, "require": 0},
14 "session_type": {"include": 0, "require": 0},
15 "language": {"include": 0, "require": 0},
16 "slides": {"include": 1, "require": 0},
17 "video": {"include": 0, "require": 0},
18 "audio": {"include": 0, "require": 0}
19 }
20
21 SPEAKER_FORM = {
22 "name": {"include": 1, "require": 1},
23 "email": {"include": 1, "require": 1},
24 "photo": {"include": 1, "require": 0},
25 "organisation": {"include": 1, "require": 0},
26 "position": {"include": 1, "require": 0},
27 "country": {"include": 1, "require": 0},
28 "short_biography": {"include": 1, "require": 0},
29 "long_biography": {"include": 0, "require": 0},
30 "mobile": {"include": 0, "require": 0},
31 "website": {"include": 1, "require": 0},
32 "facebook": {"include": 0, "require": 0},
33 "twitter": {"include": 1, "require": 0},
34 "github": {"include": 0, "require": 0},
35 "linkedin": {"include": 0, "require": 0}
36 }
37
38 session_form_str = json.dumps(SESSION_FORM, separators=(',', ':'))
39 speaker_form_str = json.dumps(SPEAKER_FORM, separators=(',', ':'))
40
41
42 class CustomForms(SoftDeletionModel):
43 """custom form model class"""
44 __tablename__ = 'custom_forms'
45 __table_args__ = (UniqueConstraint('event_id', 'field_identifier', 'form', name='custom_form_identifier'), )
46 id = db.Column(db.Integer, primary_key=True)
47 field_identifier = db.Column(db.String, nullable=False)
48 form = db.Column(db.String, nullable=False)
49 type = db.Column(db.String, nullable=False)
50 is_required = db.Column(db.Boolean)
51 is_included = db.Column(db.Boolean)
52 is_fixed = db.Column(db.Boolean)
53 event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'))
54
55 def __init__(self,
56 event_id=None,
57 field_identifier=None,
58 form=None,
59 type=None,
60 is_required=None,
61 is_included=None,
62 is_fixed=None,
63 deleted_at=None):
64 self.event_id = event_id
65 self.field_identifier = field_identifier,
66 self.form = form,
67 self.type = type,
68 self.is_required = is_required,
69 self.is_included = is_included,
70 self.is_fixed = is_fixed
71 self.deleted_at = deleted_at
72
73 def __repr__(self):
74 return '<CustomForm %r>' % self.id
75
76 def __str__(self):
77 return self.__repr__()
78
79 @property
80 def serialize(self):
81 """Return object data in easily serializable format"""
82
83 return {
84 'id': self.id,
85 'field_identifier': self.field_identifier,
86 'form': self.form,
87 'type': self.type,
88 'is_required': self.is_required,
89 'is_included': self.is_included,
90 'is_fixed': self.is_fixed
91 }
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/models/custom_form.py b/app/models/custom_form.py
--- a/app/models/custom_form.py
+++ b/app/models/custom_form.py
@@ -35,8 +35,34 @@
"linkedin": {"include": 0, "require": 0}
}
+ATTENDEE_FORM = {
+ "firstname": {"include": 1, "require": 1},
+ "lastname": {"include": 1, "require": 1},
+ "email": {"include": 1, "require": 0},
+ "address": {"include": 1, "require": 0},
+ "city": {"include": 1, "require": 0},
+ "state": {"include": 1, "require": 0},
+ "country": {"include": 1, "require": 0},
+ "job_title": {"include": 1, "require": 0},
+ "phone": {"include": 1, "require": 0},
+ "tax_business_info": {"include": 0, "require": 0},
+ "billing_address": {"include": 0, "require": 0},
+ "home_address": {"include": 0, "require": 0},
+ "shipping_address": {"include": 0, "require": 0},
+ "company": {"include": 0, "require": 0},
+ "work_address": {"include": 0, "require": 0},
+ "work_phone": {"include": 0, "require": 0},
+ "website": {"include": 1, "require": 0},
+ "blog": {"include": 0, "require": 0},
+ "twitter": {"include": 1, "require": 0},
+ "facebook": {"include": 0, "require": 0},
+ "github": {"include": 1, "require": 0},
+ "gender": {"include": 0, "require": 0},
+}
+
session_form_str = json.dumps(SESSION_FORM, separators=(',', ':'))
speaker_form_str = json.dumps(SPEAKER_FORM, separators=(',', ':'))
+attendee_form_str = json.dumps(ATTENDEE_FORM, separators=(',', ':'))
class CustomForms(SoftDeletionModel):
| {"golden_diff": "diff --git a/app/models/custom_form.py b/app/models/custom_form.py\n--- a/app/models/custom_form.py\n+++ b/app/models/custom_form.py\n@@ -35,8 +35,34 @@\n \"linkedin\": {\"include\": 0, \"require\": 0}\n }\n \n+ATTENDEE_FORM = {\n+ \"firstname\": {\"include\": 1, \"require\": 1},\n+ \"lastname\": {\"include\": 1, \"require\": 1},\n+ \"email\": {\"include\": 1, \"require\": 0},\n+ \"address\": {\"include\": 1, \"require\": 0},\n+ \"city\": {\"include\": 1, \"require\": 0},\n+ \"state\": {\"include\": 1, \"require\": 0},\n+ \"country\": {\"include\": 1, \"require\": 0},\n+ \"job_title\": {\"include\": 1, \"require\": 0},\n+ \"phone\": {\"include\": 1, \"require\": 0},\n+ \"tax_business_info\": {\"include\": 0, \"require\": 0},\n+ \"billing_address\": {\"include\": 0, \"require\": 0},\n+ \"home_address\": {\"include\": 0, \"require\": 0},\n+ \"shipping_address\": {\"include\": 0, \"require\": 0},\n+ \"company\": {\"include\": 0, \"require\": 0},\n+ \"work_address\": {\"include\": 0, \"require\": 0},\n+ \"work_phone\": {\"include\": 0, \"require\": 0},\n+ \"website\": {\"include\": 1, \"require\": 0},\n+ \"blog\": {\"include\": 0, \"require\": 0},\n+ \"twitter\": {\"include\": 1, \"require\": 0},\n+ \"facebook\": {\"include\": 0, \"require\": 0},\n+ \"github\": {\"include\": 1, \"require\": 0},\n+ \"gender\": {\"include\": 0, \"require\": 0},\n+}\n+\n session_form_str = json.dumps(SESSION_FORM, separators=(',', ':'))\n speaker_form_str = json.dumps(SPEAKER_FORM, separators=(',', ':'))\n+attendee_form_str = json.dumps(ATTENDEE_FORM, separators=(',', ':'))\n \n \n class CustomForms(SoftDeletionModel):\n", "issue": "Add custom form for attendees\n**Is your feature request related to a problem? Please describe.**\r\n<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->\r\n\r\nAdd custom form for attendees\r\n\r\n**Describe the solution you'd like**\r\n<!-- A clear and concise description of what you want to happen. -->\r\n\r\n**Describe alternatives you've considered**\r\n<!-- A clear and concise description of any alternative solutions or features you've considered. -->\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\n\r\n**Working on it**\n", "before_files": [{"content": "import json\nfrom sqlalchemy.schema import UniqueConstraint\n\nfrom app.models import db\nfrom app.models.base import SoftDeletionModel\n\nSESSION_FORM = {\n \"title\": {\"include\": 1, \"require\": 1},\n \"subtitle\": {\"include\": 0, \"require\": 0},\n \"short_abstract\": {\"include\": 1, \"require\": 0},\n \"long_abstract\": {\"include\": 0, \"require\": 0},\n \"comments\": {\"include\": 1, \"require\": 0},\n \"track\": {\"include\": 0, \"require\": 0},\n \"session_type\": {\"include\": 0, \"require\": 0},\n \"language\": {\"include\": 0, \"require\": 0},\n \"slides\": {\"include\": 1, \"require\": 0},\n \"video\": {\"include\": 0, \"require\": 0},\n \"audio\": {\"include\": 0, \"require\": 0}\n}\n\nSPEAKER_FORM = {\n \"name\": {\"include\": 1, \"require\": 1},\n \"email\": {\"include\": 1, \"require\": 1},\n \"photo\": {\"include\": 1, \"require\": 0},\n \"organisation\": {\"include\": 1, \"require\": 0},\n \"position\": {\"include\": 1, \"require\": 0},\n \"country\": {\"include\": 1, \"require\": 0},\n \"short_biography\": {\"include\": 1, \"require\": 0},\n \"long_biography\": {\"include\": 0, \"require\": 0},\n \"mobile\": {\"include\": 0, \"require\": 0},\n \"website\": {\"include\": 1, \"require\": 0},\n \"facebook\": {\"include\": 0, \"require\": 0},\n \"twitter\": {\"include\": 1, \"require\": 0},\n \"github\": {\"include\": 0, \"require\": 0},\n \"linkedin\": {\"include\": 0, \"require\": 0}\n}\n\nsession_form_str = json.dumps(SESSION_FORM, separators=(',', ':'))\nspeaker_form_str = json.dumps(SPEAKER_FORM, separators=(',', ':'))\n\n\nclass CustomForms(SoftDeletionModel):\n \"\"\"custom form model class\"\"\"\n __tablename__ = 'custom_forms'\n __table_args__ = (UniqueConstraint('event_id', 'field_identifier', 'form', name='custom_form_identifier'), )\n id = db.Column(db.Integer, primary_key=True)\n field_identifier = db.Column(db.String, nullable=False)\n form = db.Column(db.String, nullable=False)\n type = db.Column(db.String, nullable=False)\n is_required = db.Column(db.Boolean)\n is_included = db.Column(db.Boolean)\n is_fixed = db.Column(db.Boolean)\n event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'))\n\n def __init__(self,\n event_id=None,\n field_identifier=None,\n form=None,\n type=None,\n is_required=None,\n is_included=None,\n is_fixed=None,\n deleted_at=None):\n self.event_id = event_id\n self.field_identifier = field_identifier,\n self.form = form,\n self.type = type,\n self.is_required = is_required,\n self.is_included = is_included,\n self.is_fixed = is_fixed\n self.deleted_at = deleted_at\n\n def __repr__(self):\n return '<CustomForm %r>' % self.id\n\n def __str__(self):\n return self.__repr__()\n\n @property\n def serialize(self):\n \"\"\"Return object data in easily serializable format\"\"\"\n\n return {\n 'id': self.id,\n 'field_identifier': self.field_identifier,\n 'form': self.form,\n 'type': self.type,\n 'is_required': self.is_required,\n 'is_included': self.is_included,\n 'is_fixed': self.is_fixed\n }\n", "path": "app/models/custom_form.py"}], "after_files": [{"content": "import json\nfrom sqlalchemy.schema import UniqueConstraint\n\nfrom app.models import db\nfrom app.models.base import SoftDeletionModel\n\nSESSION_FORM = {\n \"title\": {\"include\": 1, \"require\": 1},\n \"subtitle\": {\"include\": 0, \"require\": 0},\n \"short_abstract\": {\"include\": 1, \"require\": 0},\n \"long_abstract\": {\"include\": 0, \"require\": 0},\n \"comments\": {\"include\": 1, \"require\": 0},\n \"track\": {\"include\": 0, \"require\": 0},\n \"session_type\": {\"include\": 0, \"require\": 0},\n \"language\": {\"include\": 0, \"require\": 0},\n \"slides\": {\"include\": 1, \"require\": 0},\n \"video\": {\"include\": 0, \"require\": 0},\n \"audio\": {\"include\": 0, \"require\": 0}\n}\n\nSPEAKER_FORM = {\n \"name\": {\"include\": 1, \"require\": 1},\n \"email\": {\"include\": 1, \"require\": 1},\n \"photo\": {\"include\": 1, \"require\": 0},\n \"organisation\": {\"include\": 1, \"require\": 0},\n \"position\": {\"include\": 1, \"require\": 0},\n \"country\": {\"include\": 1, \"require\": 0},\n \"short_biography\": {\"include\": 1, \"require\": 0},\n \"long_biography\": {\"include\": 0, \"require\": 0},\n \"mobile\": {\"include\": 0, \"require\": 0},\n \"website\": {\"include\": 1, \"require\": 0},\n \"facebook\": {\"include\": 0, \"require\": 0},\n \"twitter\": {\"include\": 1, \"require\": 0},\n \"github\": {\"include\": 0, \"require\": 0},\n \"linkedin\": {\"include\": 0, \"require\": 0}\n}\n\nATTENDEE_FORM = {\n \"firstname\": {\"include\": 1, \"require\": 1},\n \"lastname\": {\"include\": 1, \"require\": 1},\n \"email\": {\"include\": 1, \"require\": 0},\n \"address\": {\"include\": 1, \"require\": 0},\n \"city\": {\"include\": 1, \"require\": 0},\n \"state\": {\"include\": 1, \"require\": 0},\n \"country\": {\"include\": 1, \"require\": 0},\n \"job_title\": {\"include\": 1, \"require\": 0},\n \"phone\": {\"include\": 1, \"require\": 0},\n \"tax_business_info\": {\"include\": 0, \"require\": 0},\n \"billing_address\": {\"include\": 0, \"require\": 0},\n \"home_address\": {\"include\": 0, \"require\": 0},\n \"shipping_address\": {\"include\": 0, \"require\": 0},\n \"company\": {\"include\": 0, \"require\": 0},\n \"work_address\": {\"include\": 0, \"require\": 0},\n \"work_phone\": {\"include\": 0, \"require\": 0},\n \"website\": {\"include\": 1, \"require\": 0},\n \"blog\": {\"include\": 0, \"require\": 0},\n \"twitter\": {\"include\": 1, \"require\": 0},\n \"facebook\": {\"include\": 0, \"require\": 0},\n \"github\": {\"include\": 1, \"require\": 0},\n \"gender\": {\"include\": 0, \"require\": 0},\n}\n\nsession_form_str = json.dumps(SESSION_FORM, separators=(',', ':'))\nspeaker_form_str = json.dumps(SPEAKER_FORM, separators=(',', ':'))\nattendee_form_str = json.dumps(ATTENDEE_FORM, separators=(',', ':'))\n\n\nclass CustomForms(SoftDeletionModel):\n \"\"\"custom form model class\"\"\"\n __tablename__ = 'custom_forms'\n __table_args__ = (UniqueConstraint('event_id', 'field_identifier', 'form', name='custom_form_identifier'), )\n id = db.Column(db.Integer, primary_key=True)\n field_identifier = db.Column(db.String, nullable=False)\n form = db.Column(db.String, nullable=False)\n type = db.Column(db.String, nullable=False)\n is_required = db.Column(db.Boolean)\n is_included = db.Column(db.Boolean)\n is_fixed = db.Column(db.Boolean)\n event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'))\n\n def __init__(self,\n event_id=None,\n field_identifier=None,\n form=None,\n type=None,\n is_required=None,\n is_included=None,\n is_fixed=None,\n deleted_at=None):\n self.event_id = event_id\n self.field_identifier = field_identifier,\n self.form = form,\n self.type = type,\n self.is_required = is_required,\n self.is_included = is_included,\n self.is_fixed = is_fixed\n self.deleted_at = deleted_at\n\n def __repr__(self):\n return '<CustomForm %r>' % self.id\n\n def __str__(self):\n return self.__repr__()\n\n @property\n def serialize(self):\n \"\"\"Return object data in easily serializable format\"\"\"\n\n return {\n 'id': self.id,\n 'field_identifier': self.field_identifier,\n 'form': self.form,\n 'type': self.type,\n 'is_required': self.is_required,\n 'is_included': self.is_included,\n 'is_fixed': self.is_fixed\n }\n", "path": "app/models/custom_form.py"}]} | 1,406 | 517 |
gh_patches_debug_33913 | rasdani/github-patches | git_diff | cocotb__cocotb-1881 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
XGMII monitor crashes with AttributeError
`self._pkt` is initialized to `b""` (immutable), but we are using `.append()` to add payload data to it. This won't work. Maybe instead use a `bytearray()`?
_Originally posted by @LeChuck42 in https://github.com/cocotb/cocotb/pull/1545#issuecomment-635394899_
These lines exemplify the issue:
https://github.com/cocotb/cocotb/blob/924f35a3b7d39543118b7bfaed77dd4808e6612b/cocotb/monitors/xgmii.py#L107-L121
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cocotb/monitors/xgmii.py`
Content:
```
1 # Copyright (c) 2013 Potential Ventures Ltd
2 # All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are met:
6 # * Redistributions of source code must retain the above copyright
7 # notice, this list of conditions and the following disclaimer.
8 # * Redistributions in binary form must reproduce the above copyright
9 # notice, this list of conditions and the following disclaimer in the
10 # documentation and/or other materials provided with the distribution.
11 # * Neither the name of Potential Ventures Ltd nor the names of its
12 # contributors may be used to endorse or promote products derived from this
13 # software without specific prior written permission.
14 #
15 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
19 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
26 """Monitor for XGMII (10 Gigabit Media Independent Interface)."""
27
28 # By default cast to scapy packets, otherwise we pass the string of bytes
29 try:
30 from scapy.all import Ether
31 _have_scapy = True
32 except ImportError:
33 _have_scapy = False
34
35 import struct
36 import zlib
37
38 import cocotb
39 from cocotb.utils import hexdump
40 from cocotb.monitors import Monitor
41 from cocotb.triggers import RisingEdge
42
43 _XGMII_IDLE = 0x07 # noqa
44 _XGMII_START = 0xFB # noqa
45 _XGMII_TERMINATE = 0xFD # noqa
46
47 _PREAMBLE_SFD = b"\x55\x55\x55\x55\x55\x55\xD5"
48
49
50 class XGMII(Monitor):
51 """XGMII (10 Gigabit Media Independent Interface) Monitor.
52
53 Assumes a single vector, either 4 or 8 bytes plus control bit for each byte.
54
55 If interleaved is ``True`` then the control bits are adjacent to the bytes.
56
57 .. versionchanged:: 1.4.0
58 This now emits packets of type :class:`bytes` rather than :class:`str`,
59 which matches the behavior of :class:`cocotb.drivers.xgmii.XGMII`.
60 """
61
62 def __init__(self, signal, clock, interleaved=True, callback=None,
63 event=None):
64 """Args:
65 signal (SimHandle): The XGMII data bus.
66 clock (SimHandle): The associated clock (assumed to be
67 driven by another coroutine).
68 interleaved (bool, optional): Whether control bits are interleaved
69 with the data bytes or not.
70
71 If interleaved the bus is
72 byte0, byte0_control, byte1, byte1_control, ...
73
74 Otherwise expect
75 byte0, byte1, ..., byte0_control, byte1_control, ...
76 """
77 self.log = signal._log
78 self.clock = clock
79 self.signal = signal
80 self.bytes = len(self.signal) // 9
81 self.interleaved = interleaved
82 Monitor.__init__(self, callback=callback, event=event)
83
84 def _get_bytes(self):
85 """Take a value and extract the individual bytes and control bits.
86
87 Returns a tuple of lists.
88 """
89 value = self.signal.value.integer
90 bytes = []
91 ctrls = []
92 byte_shift = 8
93 ctrl_base = 8 * self.bytes
94 ctrl_inc = 1
95 if self.interleaved:
96 byte_shift += 1
97 ctrl_base = 8
98 ctrl_inc = 9
99
100 for i in range(self.bytes):
101 bytes.append((value >> (i * byte_shift)) & 0xff)
102 ctrls.append(bool(value & (1 << ctrl_base)))
103 ctrl_base += ctrl_inc
104
105 return ctrls, bytes
106
107 def _add_payload(self, ctrl, bytes):
108 """Take the payload and return true if more to come"""
109 for index, byte in enumerate(bytes):
110 if ctrl[index]:
111 if byte != _XGMII_TERMINATE:
112 self.log.error("Got control character in XGMII payload")
113 self.log.info("data = :" +
114 " ".join(["%02X" % b for b in bytes]))
115 self.log.info("ctrl = :" +
116 " ".join(["%s" % str(c) for c in ctrl]))
117 self._pkt = b""
118 return False
119
120 self._pkt.append(byte)
121 return True
122
123 @cocotb.coroutine
124 def _monitor_recv(self):
125 clk = RisingEdge(self.clock)
126 self._pkt = b""
127
128 while True:
129 yield clk
130 ctrl, bytes = self._get_bytes()
131
132 if ctrl[0] and bytes[0] == _XGMII_START:
133
134 ctrl, bytes = ctrl[1:], bytes[1:]
135
136 while self._add_payload(ctrl, bytes):
137 yield clk
138 ctrl, bytes = self._get_bytes()
139
140 elif self.bytes == 8 :
141 if ctrl[4] and bytes[4] == _XGMII_START:
142
143 ctrl, bytes = ctrl[5:], bytes[5:]
144
145 while self._add_payload(ctrl, bytes):
146 yield clk
147 ctrl, bytes = self._get_bytes()
148
149 if self._pkt:
150
151 self.log.debug("Received:\n%s" % (hexdump(self._pkt)))
152
153 if len(self._pkt) < 64 + 7:
154 self.log.error("Received a runt frame!")
155 if len(self._pkt) < 12:
156 self.log.error("No data to extract")
157 self._pkt = b""
158 continue
159
160 preamble_sfd = self._pkt[0:7]
161 crc32 = self._pkt[-4:]
162 payload = self._pkt[7:-4]
163
164 if preamble_sfd != _PREAMBLE_SFD:
165 self.log.error("Got a frame with unknown preamble/SFD")
166 self.log.error(hexdump(preamble_sfd))
167 self._pkt = b""
168 continue
169
170 expected_crc = struct.pack("<I",
171 (zlib.crc32(payload) & 0xFFFFFFFF))
172
173 if crc32 != expected_crc:
174 self.log.error("Incorrect CRC on received packet")
175 self.log.info("Expected: %s" % (hexdump(expected_crc)))
176 self.log.info("Received: %s" % (hexdump(crc32)))
177
178 # Use scapy to decode the packet
179 if _have_scapy:
180 p = Ether(payload)
181 self.log.debug("Received decoded packet:\n%s" % p.show2())
182 else:
183 p = payload
184
185 self._recv(p)
186 self._pkt = b""
187
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cocotb/monitors/xgmii.py b/cocotb/monitors/xgmii.py
--- a/cocotb/monitors/xgmii.py
+++ b/cocotb/monitors/xgmii.py
@@ -114,7 +114,7 @@
" ".join(["%02X" % b for b in bytes]))
self.log.info("ctrl = :" +
" ".join(["%s" % str(c) for c in ctrl]))
- self._pkt = b""
+ self._pkt = bytearray()
return False
self._pkt.append(byte)
@@ -123,7 +123,7 @@
@cocotb.coroutine
def _monitor_recv(self):
clk = RisingEdge(self.clock)
- self._pkt = b""
+ self._pkt = bytearray()
while True:
yield clk
@@ -154,7 +154,7 @@
self.log.error("Received a runt frame!")
if len(self._pkt) < 12:
self.log.error("No data to extract")
- self._pkt = b""
+ self._pkt = bytearray()
continue
preamble_sfd = self._pkt[0:7]
@@ -164,7 +164,7 @@
if preamble_sfd != _PREAMBLE_SFD:
self.log.error("Got a frame with unknown preamble/SFD")
self.log.error(hexdump(preamble_sfd))
- self._pkt = b""
+ self._pkt = bytearray()
continue
expected_crc = struct.pack("<I",
@@ -183,4 +183,4 @@
p = payload
self._recv(p)
- self._pkt = b""
+ self._pkt = bytearray()
| {"golden_diff": "diff --git a/cocotb/monitors/xgmii.py b/cocotb/monitors/xgmii.py\n--- a/cocotb/monitors/xgmii.py\n+++ b/cocotb/monitors/xgmii.py\n@@ -114,7 +114,7 @@\n \" \".join([\"%02X\" % b for b in bytes]))\n self.log.info(\"ctrl = :\" +\n \" \".join([\"%s\" % str(c) for c in ctrl]))\n- self._pkt = b\"\"\n+ self._pkt = bytearray()\n return False\n \n self._pkt.append(byte)\n@@ -123,7 +123,7 @@\n @cocotb.coroutine\n def _monitor_recv(self):\n clk = RisingEdge(self.clock)\n- self._pkt = b\"\"\n+ self._pkt = bytearray()\n \n while True:\n yield clk\n@@ -154,7 +154,7 @@\n self.log.error(\"Received a runt frame!\")\n if len(self._pkt) < 12:\n self.log.error(\"No data to extract\")\n- self._pkt = b\"\"\n+ self._pkt = bytearray()\n continue\n \n preamble_sfd = self._pkt[0:7]\n@@ -164,7 +164,7 @@\n if preamble_sfd != _PREAMBLE_SFD:\n self.log.error(\"Got a frame with unknown preamble/SFD\")\n self.log.error(hexdump(preamble_sfd))\n- self._pkt = b\"\"\n+ self._pkt = bytearray()\n continue\n \n expected_crc = struct.pack(\"<I\",\n@@ -183,4 +183,4 @@\n p = payload\n \n self._recv(p)\n- self._pkt = b\"\"\n+ self._pkt = bytearray()\n", "issue": "XGMII monitor crashes with AttributeError\n`self._pkt` is initialized to `b\"\"` (immutable), but we are using `.append()` to add payload data to it. This won't work. Maybe instead use a `bytearray()`?\r\n\r\n_Originally posted by @LeChuck42 in https://github.com/cocotb/cocotb/pull/1545#issuecomment-635394899_\r\n\r\nThese lines exemplify the issue:\r\n\r\nhttps://github.com/cocotb/cocotb/blob/924f35a3b7d39543118b7bfaed77dd4808e6612b/cocotb/monitors/xgmii.py#L107-L121\n", "before_files": [{"content": "# Copyright (c) 2013 Potential Ventures Ltd\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd nor the names of its\n# contributors may be used to endorse or promote products derived from this\n# software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Monitor for XGMII (10 Gigabit Media Independent Interface).\"\"\"\n\n# By default cast to scapy packets, otherwise we pass the string of bytes\ntry:\n from scapy.all import Ether\n _have_scapy = True\nexcept ImportError:\n _have_scapy = False\n\nimport struct\nimport zlib\n\nimport cocotb\nfrom cocotb.utils import hexdump\nfrom cocotb.monitors import Monitor\nfrom cocotb.triggers import RisingEdge\n\n_XGMII_IDLE = 0x07 # noqa\n_XGMII_START = 0xFB # noqa\n_XGMII_TERMINATE = 0xFD # noqa\n\n_PREAMBLE_SFD = b\"\\x55\\x55\\x55\\x55\\x55\\x55\\xD5\"\n\n\nclass XGMII(Monitor):\n \"\"\"XGMII (10 Gigabit Media Independent Interface) Monitor.\n\n Assumes a single vector, either 4 or 8 bytes plus control bit for each byte.\n\n If interleaved is ``True`` then the control bits are adjacent to the bytes.\n\n .. versionchanged:: 1.4.0\n This now emits packets of type :class:`bytes` rather than :class:`str`,\n which matches the behavior of :class:`cocotb.drivers.xgmii.XGMII`.\n \"\"\"\n\n def __init__(self, signal, clock, interleaved=True, callback=None,\n event=None):\n \"\"\"Args:\n signal (SimHandle): The XGMII data bus.\n clock (SimHandle): The associated clock (assumed to be\n driven by another coroutine).\n interleaved (bool, optional): Whether control bits are interleaved\n with the data bytes or not.\n\n If interleaved the bus is\n byte0, byte0_control, byte1, byte1_control, ...\n\n Otherwise expect\n byte0, byte1, ..., byte0_control, byte1_control, ...\n \"\"\"\n self.log = signal._log\n self.clock = clock\n self.signal = signal\n self.bytes = len(self.signal) // 9\n self.interleaved = interleaved\n Monitor.__init__(self, callback=callback, event=event)\n\n def _get_bytes(self):\n \"\"\"Take a value and extract the individual bytes and control bits.\n\n Returns a tuple of lists.\n \"\"\"\n value = self.signal.value.integer\n bytes = []\n ctrls = []\n byte_shift = 8\n ctrl_base = 8 * self.bytes\n ctrl_inc = 1\n if self.interleaved:\n byte_shift += 1\n ctrl_base = 8\n ctrl_inc = 9\n\n for i in range(self.bytes):\n bytes.append((value >> (i * byte_shift)) & 0xff)\n ctrls.append(bool(value & (1 << ctrl_base)))\n ctrl_base += ctrl_inc\n\n return ctrls, bytes\n\n def _add_payload(self, ctrl, bytes):\n \"\"\"Take the payload and return true if more to come\"\"\"\n for index, byte in enumerate(bytes):\n if ctrl[index]:\n if byte != _XGMII_TERMINATE:\n self.log.error(\"Got control character in XGMII payload\")\n self.log.info(\"data = :\" +\n \" \".join([\"%02X\" % b for b in bytes]))\n self.log.info(\"ctrl = :\" +\n \" \".join([\"%s\" % str(c) for c in ctrl]))\n self._pkt = b\"\"\n return False\n\n self._pkt.append(byte)\n return True\n\n @cocotb.coroutine\n def _monitor_recv(self):\n clk = RisingEdge(self.clock)\n self._pkt = b\"\"\n\n while True:\n yield clk\n ctrl, bytes = self._get_bytes()\n\n if ctrl[0] and bytes[0] == _XGMII_START:\n\n ctrl, bytes = ctrl[1:], bytes[1:]\n\n while self._add_payload(ctrl, bytes):\n yield clk\n ctrl, bytes = self._get_bytes()\n\n elif self.bytes == 8 :\n if ctrl[4] and bytes[4] == _XGMII_START:\n\n ctrl, bytes = ctrl[5:], bytes[5:]\n\n while self._add_payload(ctrl, bytes):\n yield clk\n ctrl, bytes = self._get_bytes()\n\n if self._pkt:\n\n self.log.debug(\"Received:\\n%s\" % (hexdump(self._pkt)))\n\n if len(self._pkt) < 64 + 7:\n self.log.error(\"Received a runt frame!\")\n if len(self._pkt) < 12:\n self.log.error(\"No data to extract\")\n self._pkt = b\"\"\n continue\n\n preamble_sfd = self._pkt[0:7]\n crc32 = self._pkt[-4:]\n payload = self._pkt[7:-4]\n\n if preamble_sfd != _PREAMBLE_SFD:\n self.log.error(\"Got a frame with unknown preamble/SFD\")\n self.log.error(hexdump(preamble_sfd))\n self._pkt = b\"\"\n continue\n\n expected_crc = struct.pack(\"<I\",\n (zlib.crc32(payload) & 0xFFFFFFFF))\n\n if crc32 != expected_crc:\n self.log.error(\"Incorrect CRC on received packet\")\n self.log.info(\"Expected: %s\" % (hexdump(expected_crc)))\n self.log.info(\"Received: %s\" % (hexdump(crc32)))\n\n # Use scapy to decode the packet\n if _have_scapy:\n p = Ether(payload)\n self.log.debug(\"Received decoded packet:\\n%s\" % p.show2())\n else:\n p = payload\n\n self._recv(p)\n self._pkt = b\"\"\n", "path": "cocotb/monitors/xgmii.py"}], "after_files": [{"content": "# Copyright (c) 2013 Potential Ventures Ltd\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd nor the names of its\n# contributors may be used to endorse or promote products derived from this\n# software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Monitor for XGMII (10 Gigabit Media Independent Interface).\"\"\"\n\n# By default cast to scapy packets, otherwise we pass the string of bytes\ntry:\n from scapy.all import Ether\n _have_scapy = True\nexcept ImportError:\n _have_scapy = False\n\nimport struct\nimport zlib\n\nimport cocotb\nfrom cocotb.utils import hexdump\nfrom cocotb.monitors import Monitor\nfrom cocotb.triggers import RisingEdge\n\n_XGMII_IDLE = 0x07 # noqa\n_XGMII_START = 0xFB # noqa\n_XGMII_TERMINATE = 0xFD # noqa\n\n_PREAMBLE_SFD = b\"\\x55\\x55\\x55\\x55\\x55\\x55\\xD5\"\n\n\nclass XGMII(Monitor):\n \"\"\"XGMII (10 Gigabit Media Independent Interface) Monitor.\n\n Assumes a single vector, either 4 or 8 bytes plus control bit for each byte.\n\n If interleaved is ``True`` then the control bits are adjacent to the bytes.\n\n .. versionchanged:: 1.4.0\n This now emits packets of type :class:`bytes` rather than :class:`str`,\n which matches the behavior of :class:`cocotb.drivers.xgmii.XGMII`.\n \"\"\"\n\n def __init__(self, signal, clock, interleaved=True, callback=None,\n event=None):\n \"\"\"Args:\n signal (SimHandle): The XGMII data bus.\n clock (SimHandle): The associated clock (assumed to be\n driven by another coroutine).\n interleaved (bool, optional): Whether control bits are interleaved\n with the data bytes or not.\n\n If interleaved the bus is\n byte0, byte0_control, byte1, byte1_control, ...\n\n Otherwise expect\n byte0, byte1, ..., byte0_control, byte1_control, ...\n \"\"\"\n self.log = signal._log\n self.clock = clock\n self.signal = signal\n self.bytes = len(self.signal) // 9\n self.interleaved = interleaved\n Monitor.__init__(self, callback=callback, event=event)\n\n def _get_bytes(self):\n \"\"\"Take a value and extract the individual bytes and control bits.\n\n Returns a tuple of lists.\n \"\"\"\n value = self.signal.value.integer\n bytes = []\n ctrls = []\n byte_shift = 8\n ctrl_base = 8 * self.bytes\n ctrl_inc = 1\n if self.interleaved:\n byte_shift += 1\n ctrl_base = 8\n ctrl_inc = 9\n\n for i in range(self.bytes):\n bytes.append((value >> (i * byte_shift)) & 0xff)\n ctrls.append(bool(value & (1 << ctrl_base)))\n ctrl_base += ctrl_inc\n\n return ctrls, bytes\n\n def _add_payload(self, ctrl, bytes):\n \"\"\"Take the payload and return true if more to come\"\"\"\n for index, byte in enumerate(bytes):\n if ctrl[index]:\n if byte != _XGMII_TERMINATE:\n self.log.error(\"Got control character in XGMII payload\")\n self.log.info(\"data = :\" +\n \" \".join([\"%02X\" % b for b in bytes]))\n self.log.info(\"ctrl = :\" +\n \" \".join([\"%s\" % str(c) for c in ctrl]))\n self._pkt = bytearray()\n return False\n\n self._pkt.append(byte)\n return True\n\n @cocotb.coroutine\n def _monitor_recv(self):\n clk = RisingEdge(self.clock)\n self._pkt = bytearray()\n\n while True:\n yield clk\n ctrl, bytes = self._get_bytes()\n\n if ctrl[0] and bytes[0] == _XGMII_START:\n\n ctrl, bytes = ctrl[1:], bytes[1:]\n\n while self._add_payload(ctrl, bytes):\n yield clk\n ctrl, bytes = self._get_bytes()\n\n elif self.bytes == 8 :\n if ctrl[4] and bytes[4] == _XGMII_START:\n\n ctrl, bytes = ctrl[5:], bytes[5:]\n\n while self._add_payload(ctrl, bytes):\n yield clk\n ctrl, bytes = self._get_bytes()\n\n if self._pkt:\n\n self.log.debug(\"Received:\\n%s\" % (hexdump(self._pkt)))\n\n if len(self._pkt) < 64 + 7:\n self.log.error(\"Received a runt frame!\")\n if len(self._pkt) < 12:\n self.log.error(\"No data to extract\")\n self._pkt = bytearray()\n continue\n\n preamble_sfd = self._pkt[0:7]\n crc32 = self._pkt[-4:]\n payload = self._pkt[7:-4]\n\n if preamble_sfd != _PREAMBLE_SFD:\n self.log.error(\"Got a frame with unknown preamble/SFD\")\n self.log.error(hexdump(preamble_sfd))\n self._pkt = bytearray()\n continue\n\n expected_crc = struct.pack(\"<I\",\n (zlib.crc32(payload) & 0xFFFFFFFF))\n\n if crc32 != expected_crc:\n self.log.error(\"Incorrect CRC on received packet\")\n self.log.info(\"Expected: %s\" % (hexdump(expected_crc)))\n self.log.info(\"Received: %s\" % (hexdump(crc32)))\n\n # Use scapy to decode the packet\n if _have_scapy:\n p = Ether(payload)\n self.log.debug(\"Received decoded packet:\\n%s\" % p.show2())\n else:\n p = payload\n\n self._recv(p)\n self._pkt = bytearray()\n", "path": "cocotb/monitors/xgmii.py"}]} | 2,479 | 402 |
gh_patches_debug_9114 | rasdani/github-patches | git_diff | UTNkar__moore-183 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Centre drive embeds
<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: admin@utn.se -->
### Description
Center drive embeds. Currently it looks a bit weird.

### Steps to Reproduce
1. [First Step]
2. [Second Step]
3. [and so on...]
<!-- Please select the appropriate "topic category"/blue and "issue type"/yellow label -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/google/models.py`
Content:
```
1 from datetime import date
2
3 from django.db import models
4 from django.utils.translation import ugettext_lazy as _
5 from wagtail.wagtailadmin.edit_handlers import FieldPanel, StreamFieldPanel, \
6 TabbedInterface, ObjectList
7 from wagtail.wagtailcore import blocks
8 from wagtail.wagtailcore.fields import StreamField, RichTextField
9 from wagtail.wagtailcore.models import Page
10 from wagtail.wagtailsearch import index
11
12 from blocks.models import WAGTAIL_STATIC_BLOCKTYPES
13 from utils.translation import TranslatedField
14
15
16 class GoogleFormBlock(blocks.StructBlock):
17 form_id = blocks.CharBlock()
18 height = blocks.IntegerBlock()
19
20 class Meta:
21 label = _('Google Form')
22 icon = 'fa-check-square-o'
23 template = 'google/blocks/form.html'
24 group = _('Meta')
25
26
27 class GoogleFormIndex(Page):
28 title_sv = models.CharField(max_length=255)
29 translated_title = TranslatedField('title', 'title_sv')
30
31 description_en = RichTextField(
32 verbose_name=_('English description'),
33 blank=True,
34 )
35 description_sv = RichTextField(
36 verbose_name=_('Swedish description'),
37 blank=True,
38 )
39 description = TranslatedField('description_en', 'description_sv')
40
41 # Editor panels configuration
42 content_panels = Page.content_panels + [
43 FieldPanel('title_sv', classname="full title"),
44 FieldPanel('description_en'),
45 FieldPanel('description_sv'),
46 ]
47
48 # Sub-page type rules
49 subpage_types = ['google.GoogleFormPage']
50
51 def get_context(self, request, **kwargs):
52 context = super(GoogleFormIndex, self).get_context(request, **kwargs)
53
54 # Add extra variables and return the updated context
55 context['google_forms'] = GoogleFormPage.objects.child_of(self).live()\
56 .order_by('-deadline')
57 return context
58
59
60 class GoogleFormPage(Page):
61 title_sv = models.CharField(max_length=255)
62 translated_title = TranslatedField('title', 'title_sv')
63
64 # TODO: Limit to one form!
65 form_en = StreamField([('google_form', GoogleFormBlock())])
66 form_sv = StreamField([('google_form', GoogleFormBlock())])
67 form = TranslatedField('form_en', 'form_sv')
68
69 deadline = models.DateField(verbose_name=_('Form deadline'))
70
71 results_en = StreamField(
72 WAGTAIL_STATIC_BLOCKTYPES,
73 blank=True,
74 )
75 results_sv = StreamField(
76 WAGTAIL_STATIC_BLOCKTYPES,
77 blank=True,
78 )
79 results = TranslatedField('results_en', 'results_sv')
80
81 @property
82 def is_past_due(self) -> bool:
83 return date.today() > self.deadline
84
85 # Editor panels configuration
86 content_panels = Page.content_panels + [
87 FieldPanel('title_sv', classname="full title"),
88 FieldPanel('deadline'),
89 StreamFieldPanel('form_en'),
90 StreamFieldPanel('form_sv'),
91 ]
92
93 edit_handler = TabbedInterface([
94 ObjectList(content_panels, heading=_('Common')),
95 ObjectList([StreamFieldPanel('results_en')], heading=_('English')),
96 ObjectList([StreamFieldPanel('results_sv')], heading=_('Swedish')),
97 ObjectList(
98 Page.promote_panels + Page.settings_panels, heading=_('Settings')
99 ),
100 ])
101
102 # Search index configuration
103 search_fields = Page.search_fields + [
104 index.SearchField('title_sv'),
105 index.FilterField('results_en'),
106 index.FilterField('results_sv'),
107 index.FilterField('deadline'),
108 ]
109
110 # Parent page / subpage type rules
111 parent_page_types = ['google.GoogleFormIndex']
112 subpage_types = []
113
114
115 class GoogleDriveBlock(blocks.StructBlock):
116 folder_id = blocks.CharBlock()
117 view = blocks.ChoiceBlock(choices=[
118 ('list', _('List')),
119 ('grid', _('Grid')),
120 ])
121 height = blocks.IntegerBlock()
122
123 class Meta:
124 label = _('Google Drive')
125 icon = 'fa-folder-open'
126 template = 'google/blocks/drive.html'
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/google/models.py b/src/google/models.py
--- a/src/google/models.py
+++ b/src/google/models.py
@@ -21,7 +21,7 @@
label = _('Google Form')
icon = 'fa-check-square-o'
template = 'google/blocks/form.html'
- group = _('Meta')
+ group = _('Embed')
class GoogleFormIndex(Page):
@@ -124,3 +124,4 @@
label = _('Google Drive')
icon = 'fa-folder-open'
template = 'google/blocks/drive.html'
+ group = _('Embed')
| {"golden_diff": "diff --git a/src/google/models.py b/src/google/models.py\n--- a/src/google/models.py\n+++ b/src/google/models.py\n@@ -21,7 +21,7 @@\n label = _('Google Form')\n icon = 'fa-check-square-o'\n template = 'google/blocks/form.html'\n- group = _('Meta')\n+ group = _('Embed')\n \n \n class GoogleFormIndex(Page):\n@@ -124,3 +124,4 @@\n label = _('Google Drive')\n icon = 'fa-folder-open'\n template = 'google/blocks/drive.html'\n+ group = _('Embed')\n", "issue": "Centre drive embeds\n<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: admin@utn.se -->\r\n\r\n### Description\r\n\r\nCenter drive embeds. Currently it looks a bit weird.\r\n\r\n\r\n\r\n\r\n### Steps to Reproduce\r\n\r\n1. [First Step]\r\n2. [Second Step]\r\n3. [and so on...]\r\n\r\n<!-- Please select the appropriate \"topic category\"/blue and \"issue type\"/yellow label -->\r\n\n", "before_files": [{"content": "from datetime import date\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom wagtail.wagtailadmin.edit_handlers import FieldPanel, StreamFieldPanel, \\\n TabbedInterface, ObjectList\nfrom wagtail.wagtailcore import blocks\nfrom wagtail.wagtailcore.fields import StreamField, RichTextField\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailsearch import index\n\nfrom blocks.models import WAGTAIL_STATIC_BLOCKTYPES\nfrom utils.translation import TranslatedField\n\n\nclass GoogleFormBlock(blocks.StructBlock):\n form_id = blocks.CharBlock()\n height = blocks.IntegerBlock()\n\n class Meta:\n label = _('Google Form')\n icon = 'fa-check-square-o'\n template = 'google/blocks/form.html'\n group = _('Meta')\n\n\nclass GoogleFormIndex(Page):\n title_sv = models.CharField(max_length=255)\n translated_title = TranslatedField('title', 'title_sv')\n\n description_en = RichTextField(\n verbose_name=_('English description'),\n blank=True,\n )\n description_sv = RichTextField(\n verbose_name=_('Swedish description'),\n blank=True,\n )\n description = TranslatedField('description_en', 'description_sv')\n\n # Editor panels configuration\n content_panels = Page.content_panels + [\n FieldPanel('title_sv', classname=\"full title\"),\n FieldPanel('description_en'),\n FieldPanel('description_sv'),\n ]\n\n # Sub-page type rules\n subpage_types = ['google.GoogleFormPage']\n\n def get_context(self, request, **kwargs):\n context = super(GoogleFormIndex, self).get_context(request, **kwargs)\n\n # Add extra variables and return the updated context\n context['google_forms'] = GoogleFormPage.objects.child_of(self).live()\\\n .order_by('-deadline')\n return context\n\n\nclass GoogleFormPage(Page):\n title_sv = models.CharField(max_length=255)\n translated_title = TranslatedField('title', 'title_sv')\n\n # TODO: Limit to one form!\n form_en = StreamField([('google_form', GoogleFormBlock())])\n form_sv = StreamField([('google_form', GoogleFormBlock())])\n form = TranslatedField('form_en', 'form_sv')\n\n deadline = models.DateField(verbose_name=_('Form deadline'))\n\n results_en = StreamField(\n WAGTAIL_STATIC_BLOCKTYPES,\n blank=True,\n )\n results_sv = StreamField(\n WAGTAIL_STATIC_BLOCKTYPES,\n blank=True,\n )\n results = TranslatedField('results_en', 'results_sv')\n\n @property\n def is_past_due(self) -> bool:\n return date.today() > self.deadline\n\n # Editor panels configuration\n content_panels = Page.content_panels + [\n FieldPanel('title_sv', classname=\"full title\"),\n FieldPanel('deadline'),\n StreamFieldPanel('form_en'),\n StreamFieldPanel('form_sv'),\n ]\n\n edit_handler = TabbedInterface([\n ObjectList(content_panels, heading=_('Common')),\n ObjectList([StreamFieldPanel('results_en')], heading=_('English')),\n ObjectList([StreamFieldPanel('results_sv')], heading=_('Swedish')),\n ObjectList(\n Page.promote_panels + Page.settings_panels, heading=_('Settings')\n ),\n ])\n\n # Search index configuration\n search_fields = Page.search_fields + [\n index.SearchField('title_sv'),\n index.FilterField('results_en'),\n index.FilterField('results_sv'),\n index.FilterField('deadline'),\n ]\n\n # Parent page / subpage type rules\n parent_page_types = ['google.GoogleFormIndex']\n subpage_types = []\n\n\nclass GoogleDriveBlock(blocks.StructBlock):\n folder_id = blocks.CharBlock()\n view = blocks.ChoiceBlock(choices=[\n ('list', _('List')),\n ('grid', _('Grid')),\n ])\n height = blocks.IntegerBlock()\n\n class Meta:\n label = _('Google Drive')\n icon = 'fa-folder-open'\n template = 'google/blocks/drive.html'\n", "path": "src/google/models.py"}], "after_files": [{"content": "from datetime import date\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom wagtail.wagtailadmin.edit_handlers import FieldPanel, StreamFieldPanel, \\\n TabbedInterface, ObjectList\nfrom wagtail.wagtailcore import blocks\nfrom wagtail.wagtailcore.fields import StreamField, RichTextField\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailsearch import index\n\nfrom blocks.models import WAGTAIL_STATIC_BLOCKTYPES\nfrom utils.translation import TranslatedField\n\n\nclass GoogleFormBlock(blocks.StructBlock):\n form_id = blocks.CharBlock()\n height = blocks.IntegerBlock()\n\n class Meta:\n label = _('Google Form')\n icon = 'fa-check-square-o'\n template = 'google/blocks/form.html'\n group = _('Embed')\n\n\nclass GoogleFormIndex(Page):\n title_sv = models.CharField(max_length=255)\n translated_title = TranslatedField('title', 'title_sv')\n\n description_en = RichTextField(\n verbose_name=_('English description'),\n blank=True,\n )\n description_sv = RichTextField(\n verbose_name=_('Swedish description'),\n blank=True,\n )\n description = TranslatedField('description_en', 'description_sv')\n\n # Editor panels configuration\n content_panels = Page.content_panels + [\n FieldPanel('title_sv', classname=\"full title\"),\n FieldPanel('description_en'),\n FieldPanel('description_sv'),\n ]\n\n # Sub-page type rules\n subpage_types = ['google.GoogleFormPage']\n\n def get_context(self, request, **kwargs):\n context = super(GoogleFormIndex, self).get_context(request, **kwargs)\n\n # Add extra variables and return the updated context\n context['google_forms'] = GoogleFormPage.objects.child_of(self).live()\\\n .order_by('-deadline')\n return context\n\n\nclass GoogleFormPage(Page):\n title_sv = models.CharField(max_length=255)\n translated_title = TranslatedField('title', 'title_sv')\n\n # TODO: Limit to one form!\n form_en = StreamField([('google_form', GoogleFormBlock())])\n form_sv = StreamField([('google_form', GoogleFormBlock())])\n form = TranslatedField('form_en', 'form_sv')\n\n deadline = models.DateField(verbose_name=_('Form deadline'))\n\n results_en = StreamField(\n WAGTAIL_STATIC_BLOCKTYPES,\n blank=True,\n )\n results_sv = StreamField(\n WAGTAIL_STATIC_BLOCKTYPES,\n blank=True,\n )\n results = TranslatedField('results_en', 'results_sv')\n\n @property\n def is_past_due(self) -> bool:\n return date.today() > self.deadline\n\n # Editor panels configuration\n content_panels = Page.content_panels + [\n FieldPanel('title_sv', classname=\"full title\"),\n FieldPanel('deadline'),\n StreamFieldPanel('form_en'),\n StreamFieldPanel('form_sv'),\n ]\n\n edit_handler = TabbedInterface([\n ObjectList(content_panels, heading=_('Common')),\n ObjectList([StreamFieldPanel('results_en')], heading=_('English')),\n ObjectList([StreamFieldPanel('results_sv')], heading=_('Swedish')),\n ObjectList(\n Page.promote_panels + Page.settings_panels, heading=_('Settings')\n ),\n ])\n\n # Search index configuration\n search_fields = Page.search_fields + [\n index.SearchField('title_sv'),\n index.FilterField('results_en'),\n index.FilterField('results_sv'),\n index.FilterField('deadline'),\n ]\n\n # Parent page / subpage type rules\n parent_page_types = ['google.GoogleFormIndex']\n subpage_types = []\n\n\nclass GoogleDriveBlock(blocks.StructBlock):\n folder_id = blocks.CharBlock()\n view = blocks.ChoiceBlock(choices=[\n ('list', _('List')),\n ('grid', _('Grid')),\n ])\n height = blocks.IntegerBlock()\n\n class Meta:\n label = _('Google Drive')\n icon = 'fa-folder-open'\n template = 'google/blocks/drive.html'\n group = _('Embed')\n", "path": "src/google/models.py"}]} | 1,559 | 134 |
gh_patches_debug_41589 | rasdani/github-patches | git_diff | getsentry__sentry-python-851 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Django 3.1 async views do not work
When using sentry versions greater than 0.16.3, (tested on 0.18.0), Django 3.1 aysnc views do not work.
```
log.py 224 ERROR Internal Server Error: /async_ok
Traceback (most recent call last):
File "/Users/williamchu/dev/sentry-python/.tox/py3.8-django-3.1/lib/python3.8/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/Users/williamchu/dev/sentry-python/.tox/py3.8-django-3.1/lib/python3.8/site-packages/django/core/handlers/base.py", line 186, in _get_response
self.check_response(response, callback)
File "/Users/williamchu/dev/sentry-python/.tox/py3.8-django-3.1/lib/python3.8/site-packages/django/core/handlers/base.py", line 312, in check_response
raise ValueError(
ValueError: The view tests.integrations.django.myapp.views.async_ok didn't return an HttpResponse object. It returned an unawaited coroutine instead. You may need to add an 'await' into your view.
```
I have made a branch with a test case to demonstrate this: https://github.com/uptickmetachu/sentry-python/tree/django3.1-test-async-view
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/integrations/django/views.py`
Content:
```
1 from sentry_sdk.hub import Hub
2 from sentry_sdk._types import MYPY
3 from sentry_sdk import _functools
4
5 if MYPY:
6 from typing import Any
7
8
9 def patch_views():
10 # type: () -> None
11
12 from django.core.handlers.base import BaseHandler
13 from sentry_sdk.integrations.django import DjangoIntegration
14
15 old_make_view_atomic = BaseHandler.make_view_atomic
16
17 @_functools.wraps(old_make_view_atomic)
18 def sentry_patched_make_view_atomic(self, *args, **kwargs):
19 # type: (Any, *Any, **Any) -> Any
20 callback = old_make_view_atomic(self, *args, **kwargs)
21
22 # XXX: The wrapper function is created for every request. Find more
23 # efficient way to wrap views (or build a cache?)
24
25 hub = Hub.current
26 integration = hub.get_integration(DjangoIntegration)
27
28 if integration is not None and integration.middleware_spans:
29
30 @_functools.wraps(callback)
31 def sentry_wrapped_callback(request, *args, **kwargs):
32 # type: (Any, *Any, **Any) -> Any
33 with hub.start_span(
34 op="django.view", description=request.resolver_match.view_name
35 ):
36 return callback(request, *args, **kwargs)
37
38 else:
39 sentry_wrapped_callback = callback
40
41 return sentry_wrapped_callback
42
43 BaseHandler.make_view_atomic = sentry_patched_make_view_atomic
44
```
Path: `sentry_sdk/integrations/django/asgi.py`
Content:
```
1 """
2 Instrumentation for Django 3.0
3
4 Since this file contains `async def` it is conditionally imported in
5 `sentry_sdk.integrations.django` (depending on the existence of
6 `django.core.handlers.asgi`.
7 """
8
9 from sentry_sdk import Hub
10 from sentry_sdk._types import MYPY
11
12 from sentry_sdk.integrations.django import DjangoIntegration
13 from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
14
15 if MYPY:
16 from typing import Any
17 from typing import Union
18
19 from django.http.response import HttpResponse
20
21
22 def patch_django_asgi_handler_impl(cls):
23 # type: (Any) -> None
24 old_app = cls.__call__
25
26 async def sentry_patched_asgi_handler(self, scope, receive, send):
27 # type: (Any, Any, Any, Any) -> Any
28 if Hub.current.get_integration(DjangoIntegration) is None:
29 return await old_app(self, scope, receive, send)
30
31 middleware = SentryAsgiMiddleware(
32 old_app.__get__(self, cls), unsafe_context_data=True
33 )._run_asgi3
34 return await middleware(scope, receive, send)
35
36 cls.__call__ = sentry_patched_asgi_handler
37
38
39 def patch_get_response_async(cls, _before_get_response):
40 # type: (Any, Any) -> None
41 old_get_response_async = cls.get_response_async
42
43 async def sentry_patched_get_response_async(self, request):
44 # type: (Any, Any) -> Union[HttpResponse, BaseException]
45 _before_get_response(request)
46 return await old_get_response_async(self, request)
47
48 cls.get_response_async = sentry_patched_get_response_async
49
50
51 def patch_channels_asgi_handler_impl(cls):
52 # type: (Any) -> None
53 old_app = cls.__call__
54
55 async def sentry_patched_asgi_handler(self, receive, send):
56 # type: (Any, Any, Any) -> Any
57 if Hub.current.get_integration(DjangoIntegration) is None:
58 return await old_app(self, receive, send)
59
60 middleware = SentryAsgiMiddleware(
61 lambda _scope: old_app.__get__(self, cls), unsafe_context_data=True
62 )
63
64 return await middleware(self.scope)(receive, send)
65
66 cls.__call__ = sentry_patched_asgi_handler
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sentry_sdk/integrations/django/asgi.py b/sentry_sdk/integrations/django/asgi.py
--- a/sentry_sdk/integrations/django/asgi.py
+++ b/sentry_sdk/integrations/django/asgi.py
@@ -6,10 +6,9 @@
`django.core.handlers.asgi`.
"""
-from sentry_sdk import Hub
+from sentry_sdk import Hub, _functools
from sentry_sdk._types import MYPY
-from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
if MYPY:
@@ -21,6 +20,9 @@
def patch_django_asgi_handler_impl(cls):
# type: (Any) -> None
+
+ from sentry_sdk.integrations.django import DjangoIntegration
+
old_app = cls.__call__
async def sentry_patched_asgi_handler(self, scope, receive, send):
@@ -50,6 +52,9 @@
def patch_channels_asgi_handler_impl(cls):
# type: (Any) -> None
+
+ from sentry_sdk.integrations.django import DjangoIntegration
+
old_app = cls.__call__
async def sentry_patched_asgi_handler(self, receive, send):
@@ -64,3 +69,17 @@
return await middleware(self.scope)(receive, send)
cls.__call__ = sentry_patched_asgi_handler
+
+
+def wrap_async_view(hub, callback):
+ # type: (Hub, Any) -> Any
+ @_functools.wraps(callback)
+ async def sentry_wrapped_callback(request, *args, **kwargs):
+ # type: (Any, *Any, **Any) -> Any
+
+ with hub.start_span(
+ op="django.view", description=request.resolver_match.view_name
+ ):
+ return await callback(request, *args, **kwargs)
+
+ return sentry_wrapped_callback
diff --git a/sentry_sdk/integrations/django/views.py b/sentry_sdk/integrations/django/views.py
--- a/sentry_sdk/integrations/django/views.py
+++ b/sentry_sdk/integrations/django/views.py
@@ -6,6 +6,18 @@
from typing import Any
+try:
+ from asyncio import iscoroutinefunction
+except ImportError:
+ iscoroutinefunction = None # type: ignore
+
+
+try:
+ from sentry_sdk.integrations.django.asgi import wrap_async_view
+except (ImportError, SyntaxError):
+ wrap_async_view = None # type: ignore
+
+
def patch_views():
# type: () -> None
@@ -27,13 +39,14 @@
if integration is not None and integration.middleware_spans:
- @_functools.wraps(callback)
- def sentry_wrapped_callback(request, *args, **kwargs):
- # type: (Any, *Any, **Any) -> Any
- with hub.start_span(
- op="django.view", description=request.resolver_match.view_name
- ):
- return callback(request, *args, **kwargs)
+ if (
+ iscoroutinefunction is not None
+ and wrap_async_view is not None
+ and iscoroutinefunction(callback)
+ ):
+ sentry_wrapped_callback = wrap_async_view(hub, callback)
+ else:
+ sentry_wrapped_callback = _wrap_sync_view(hub, callback)
else:
sentry_wrapped_callback = callback
@@ -41,3 +54,16 @@
return sentry_wrapped_callback
BaseHandler.make_view_atomic = sentry_patched_make_view_atomic
+
+
+def _wrap_sync_view(hub, callback):
+ # type: (Hub, Any) -> Any
+ @_functools.wraps(callback)
+ def sentry_wrapped_callback(request, *args, **kwargs):
+ # type: (Any, *Any, **Any) -> Any
+ with hub.start_span(
+ op="django.view", description=request.resolver_match.view_name
+ ):
+ return callback(request, *args, **kwargs)
+
+ return sentry_wrapped_callback
| {"golden_diff": "diff --git a/sentry_sdk/integrations/django/asgi.py b/sentry_sdk/integrations/django/asgi.py\n--- a/sentry_sdk/integrations/django/asgi.py\n+++ b/sentry_sdk/integrations/django/asgi.py\n@@ -6,10 +6,9 @@\n `django.core.handlers.asgi`.\n \"\"\"\n \n-from sentry_sdk import Hub\n+from sentry_sdk import Hub, _functools\n from sentry_sdk._types import MYPY\n \n-from sentry_sdk.integrations.django import DjangoIntegration\n from sentry_sdk.integrations.asgi import SentryAsgiMiddleware\n \n if MYPY:\n@@ -21,6 +20,9 @@\n \n def patch_django_asgi_handler_impl(cls):\n # type: (Any) -> None\n+\n+ from sentry_sdk.integrations.django import DjangoIntegration\n+\n old_app = cls.__call__\n \n async def sentry_patched_asgi_handler(self, scope, receive, send):\n@@ -50,6 +52,9 @@\n \n def patch_channels_asgi_handler_impl(cls):\n # type: (Any) -> None\n+\n+ from sentry_sdk.integrations.django import DjangoIntegration\n+\n old_app = cls.__call__\n \n async def sentry_patched_asgi_handler(self, receive, send):\n@@ -64,3 +69,17 @@\n return await middleware(self.scope)(receive, send)\n \n cls.__call__ = sentry_patched_asgi_handler\n+\n+\n+def wrap_async_view(hub, callback):\n+ # type: (Hub, Any) -> Any\n+ @_functools.wraps(callback)\n+ async def sentry_wrapped_callback(request, *args, **kwargs):\n+ # type: (Any, *Any, **Any) -> Any\n+\n+ with hub.start_span(\n+ op=\"django.view\", description=request.resolver_match.view_name\n+ ):\n+ return await callback(request, *args, **kwargs)\n+\n+ return sentry_wrapped_callback\ndiff --git a/sentry_sdk/integrations/django/views.py b/sentry_sdk/integrations/django/views.py\n--- a/sentry_sdk/integrations/django/views.py\n+++ b/sentry_sdk/integrations/django/views.py\n@@ -6,6 +6,18 @@\n from typing import Any\n \n \n+try:\n+ from asyncio import iscoroutinefunction\n+except ImportError:\n+ iscoroutinefunction = None # type: ignore\n+\n+\n+try:\n+ from sentry_sdk.integrations.django.asgi import wrap_async_view\n+except (ImportError, SyntaxError):\n+ wrap_async_view = None # type: ignore\n+\n+\n def patch_views():\n # type: () -> None\n \n@@ -27,13 +39,14 @@\n \n if integration is not None and integration.middleware_spans:\n \n- @_functools.wraps(callback)\n- def sentry_wrapped_callback(request, *args, **kwargs):\n- # type: (Any, *Any, **Any) -> Any\n- with hub.start_span(\n- op=\"django.view\", description=request.resolver_match.view_name\n- ):\n- return callback(request, *args, **kwargs)\n+ if (\n+ iscoroutinefunction is not None\n+ and wrap_async_view is not None\n+ and iscoroutinefunction(callback)\n+ ):\n+ sentry_wrapped_callback = wrap_async_view(hub, callback)\n+ else:\n+ sentry_wrapped_callback = _wrap_sync_view(hub, callback)\n \n else:\n sentry_wrapped_callback = callback\n@@ -41,3 +54,16 @@\n return sentry_wrapped_callback\n \n BaseHandler.make_view_atomic = sentry_patched_make_view_atomic\n+\n+\n+def _wrap_sync_view(hub, callback):\n+ # type: (Hub, Any) -> Any\n+ @_functools.wraps(callback)\n+ def sentry_wrapped_callback(request, *args, **kwargs):\n+ # type: (Any, *Any, **Any) -> Any\n+ with hub.start_span(\n+ op=\"django.view\", description=request.resolver_match.view_name\n+ ):\n+ return callback(request, *args, **kwargs)\n+\n+ return sentry_wrapped_callback\n", "issue": "Django 3.1 async views do not work\nWhen using sentry versions greater than 0.16.3, (tested on 0.18.0), Django 3.1 aysnc views do not work.\r\n\r\n```\r\nlog.py 224 ERROR Internal Server Error: /async_ok\r\nTraceback (most recent call last):\r\n File \"/Users/williamchu/dev/sentry-python/.tox/py3.8-django-3.1/lib/python3.8/site-packages/django/core/handlers/exception.py\", line 47, in inner\r\n response = get_response(request)\r\n File \"/Users/williamchu/dev/sentry-python/.tox/py3.8-django-3.1/lib/python3.8/site-packages/django/core/handlers/base.py\", line 186, in _get_response\r\n self.check_response(response, callback)\r\n File \"/Users/williamchu/dev/sentry-python/.tox/py3.8-django-3.1/lib/python3.8/site-packages/django/core/handlers/base.py\", line 312, in check_response\r\n raise ValueError(\r\nValueError: The view tests.integrations.django.myapp.views.async_ok didn't return an HttpResponse object. It returned an unawaited coroutine instead. You may need to add an 'await' into your view.\r\n```\r\n\r\nI have made a branch with a test case to demonstrate this: https://github.com/uptickmetachu/sentry-python/tree/django3.1-test-async-view\r\n\r\n\n", "before_files": [{"content": "from sentry_sdk.hub import Hub\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk import _functools\n\nif MYPY:\n from typing import Any\n\n\ndef patch_views():\n # type: () -> None\n\n from django.core.handlers.base import BaseHandler\n from sentry_sdk.integrations.django import DjangoIntegration\n\n old_make_view_atomic = BaseHandler.make_view_atomic\n\n @_functools.wraps(old_make_view_atomic)\n def sentry_patched_make_view_atomic(self, *args, **kwargs):\n # type: (Any, *Any, **Any) -> Any\n callback = old_make_view_atomic(self, *args, **kwargs)\n\n # XXX: The wrapper function is created for every request. Find more\n # efficient way to wrap views (or build a cache?)\n\n hub = Hub.current\n integration = hub.get_integration(DjangoIntegration)\n\n if integration is not None and integration.middleware_spans:\n\n @_functools.wraps(callback)\n def sentry_wrapped_callback(request, *args, **kwargs):\n # type: (Any, *Any, **Any) -> Any\n with hub.start_span(\n op=\"django.view\", description=request.resolver_match.view_name\n ):\n return callback(request, *args, **kwargs)\n\n else:\n sentry_wrapped_callback = callback\n\n return sentry_wrapped_callback\n\n BaseHandler.make_view_atomic = sentry_patched_make_view_atomic\n", "path": "sentry_sdk/integrations/django/views.py"}, {"content": "\"\"\"\nInstrumentation for Django 3.0\n\nSince this file contains `async def` it is conditionally imported in\n`sentry_sdk.integrations.django` (depending on the existence of\n`django.core.handlers.asgi`.\n\"\"\"\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk._types import MYPY\n\nfrom sentry_sdk.integrations.django import DjangoIntegration\nfrom sentry_sdk.integrations.asgi import SentryAsgiMiddleware\n\nif MYPY:\n from typing import Any\n from typing import Union\n\n from django.http.response import HttpResponse\n\n\ndef patch_django_asgi_handler_impl(cls):\n # type: (Any) -> None\n old_app = cls.__call__\n\n async def sentry_patched_asgi_handler(self, scope, receive, send):\n # type: (Any, Any, Any, Any) -> Any\n if Hub.current.get_integration(DjangoIntegration) is None:\n return await old_app(self, scope, receive, send)\n\n middleware = SentryAsgiMiddleware(\n old_app.__get__(self, cls), unsafe_context_data=True\n )._run_asgi3\n return await middleware(scope, receive, send)\n\n cls.__call__ = sentry_patched_asgi_handler\n\n\ndef patch_get_response_async(cls, _before_get_response):\n # type: (Any, Any) -> None\n old_get_response_async = cls.get_response_async\n\n async def sentry_patched_get_response_async(self, request):\n # type: (Any, Any) -> Union[HttpResponse, BaseException]\n _before_get_response(request)\n return await old_get_response_async(self, request)\n\n cls.get_response_async = sentry_patched_get_response_async\n\n\ndef patch_channels_asgi_handler_impl(cls):\n # type: (Any) -> None\n old_app = cls.__call__\n\n async def sentry_patched_asgi_handler(self, receive, send):\n # type: (Any, Any, Any) -> Any\n if Hub.current.get_integration(DjangoIntegration) is None:\n return await old_app(self, receive, send)\n\n middleware = SentryAsgiMiddleware(\n lambda _scope: old_app.__get__(self, cls), unsafe_context_data=True\n )\n\n return await middleware(self.scope)(receive, send)\n\n cls.__call__ = sentry_patched_asgi_handler\n", "path": "sentry_sdk/integrations/django/asgi.py"}], "after_files": [{"content": "from sentry_sdk.hub import Hub\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk import _functools\n\nif MYPY:\n from typing import Any\n\n\ntry:\n from asyncio import iscoroutinefunction\nexcept ImportError:\n iscoroutinefunction = None # type: ignore\n\n\ntry:\n from sentry_sdk.integrations.django.asgi import wrap_async_view\nexcept (ImportError, SyntaxError):\n wrap_async_view = None # type: ignore\n\n\ndef patch_views():\n # type: () -> None\n\n from django.core.handlers.base import BaseHandler\n from sentry_sdk.integrations.django import DjangoIntegration\n\n old_make_view_atomic = BaseHandler.make_view_atomic\n\n @_functools.wraps(old_make_view_atomic)\n def sentry_patched_make_view_atomic(self, *args, **kwargs):\n # type: (Any, *Any, **Any) -> Any\n callback = old_make_view_atomic(self, *args, **kwargs)\n\n # XXX: The wrapper function is created for every request. Find more\n # efficient way to wrap views (or build a cache?)\n\n hub = Hub.current\n integration = hub.get_integration(DjangoIntegration)\n\n if integration is not None and integration.middleware_spans:\n\n if (\n iscoroutinefunction is not None\n and wrap_async_view is not None\n and iscoroutinefunction(callback)\n ):\n sentry_wrapped_callback = wrap_async_view(hub, callback)\n else:\n sentry_wrapped_callback = _wrap_sync_view(hub, callback)\n\n else:\n sentry_wrapped_callback = callback\n\n return sentry_wrapped_callback\n\n BaseHandler.make_view_atomic = sentry_patched_make_view_atomic\n\n\ndef _wrap_sync_view(hub, callback):\n # type: (Hub, Any) -> Any\n @_functools.wraps(callback)\n def sentry_wrapped_callback(request, *args, **kwargs):\n # type: (Any, *Any, **Any) -> Any\n with hub.start_span(\n op=\"django.view\", description=request.resolver_match.view_name\n ):\n return callback(request, *args, **kwargs)\n\n return sentry_wrapped_callback\n", "path": "sentry_sdk/integrations/django/views.py"}, {"content": "\"\"\"\nInstrumentation for Django 3.0\n\nSince this file contains `async def` it is conditionally imported in\n`sentry_sdk.integrations.django` (depending on the existence of\n`django.core.handlers.asgi`.\n\"\"\"\n\nfrom sentry_sdk import Hub, _functools\nfrom sentry_sdk._types import MYPY\n\nfrom sentry_sdk.integrations.asgi import SentryAsgiMiddleware\n\nif MYPY:\n from typing import Any\n from typing import Union\n\n from django.http.response import HttpResponse\n\n\ndef patch_django_asgi_handler_impl(cls):\n # type: (Any) -> None\n\n from sentry_sdk.integrations.django import DjangoIntegration\n\n old_app = cls.__call__\n\n async def sentry_patched_asgi_handler(self, scope, receive, send):\n # type: (Any, Any, Any, Any) -> Any\n if Hub.current.get_integration(DjangoIntegration) is None:\n return await old_app(self, scope, receive, send)\n\n middleware = SentryAsgiMiddleware(\n old_app.__get__(self, cls), unsafe_context_data=True\n )._run_asgi3\n return await middleware(scope, receive, send)\n\n cls.__call__ = sentry_patched_asgi_handler\n\n\ndef patch_get_response_async(cls, _before_get_response):\n # type: (Any, Any) -> None\n old_get_response_async = cls.get_response_async\n\n async def sentry_patched_get_response_async(self, request):\n # type: (Any, Any) -> Union[HttpResponse, BaseException]\n _before_get_response(request)\n return await old_get_response_async(self, request)\n\n cls.get_response_async = sentry_patched_get_response_async\n\n\ndef patch_channels_asgi_handler_impl(cls):\n # type: (Any) -> None\n\n from sentry_sdk.integrations.django import DjangoIntegration\n\n old_app = cls.__call__\n\n async def sentry_patched_asgi_handler(self, receive, send):\n # type: (Any, Any, Any) -> Any\n if Hub.current.get_integration(DjangoIntegration) is None:\n return await old_app(self, receive, send)\n\n middleware = SentryAsgiMiddleware(\n lambda _scope: old_app.__get__(self, cls), unsafe_context_data=True\n )\n\n return await middleware(self.scope)(receive, send)\n\n cls.__call__ = sentry_patched_asgi_handler\n\n\ndef wrap_async_view(hub, callback):\n # type: (Hub, Any) -> Any\n @_functools.wraps(callback)\n async def sentry_wrapped_callback(request, *args, **kwargs):\n # type: (Any, *Any, **Any) -> Any\n\n with hub.start_span(\n op=\"django.view\", description=request.resolver_match.view_name\n ):\n return await callback(request, *args, **kwargs)\n\n return sentry_wrapped_callback\n", "path": "sentry_sdk/integrations/django/asgi.py"}]} | 1,669 | 954 |
gh_patches_debug_13321 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1587 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PearsonCorrCoef UnboundLocalError using ddp
I want to use PearsonCorrCoef in my pytorch lightning code with ddp strategy.
My num_outputs > 1
In x_step_end, I use CorrCoef.update(preds, targets)
In x_epoch_end, I use CorrCoef.compute().mean()
Always received
```
File "/home/xx/Documents/anconda3/envs/xx/lib/python3.8/site-packages/torchmetrics/metric.py", line 531, in wrapped_func
value = compute(*args, **kwargs)
File "/home/xx/Documents/anconda3/envs/xx/lib/python3.8/site-packages/torchmetrics/regression/pearson.py", line 152, in compute
_, _, var_x, var_y, corr_xy, n_total = _final_aggregation(
File "/home/xx/Documents/anconda3/envs/xx/lib/python3.8/site-packages/torchmetrics/regression/pearson.py", line 63, in _final_aggregation
return mean_x, mean_y, var_x, var_y, corr_xy, nb
UnboundLocalError: local variable 'mean_x' referenced before assignment
```
But when I change to dp strategy, it's fine.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/torchmetrics/regression/pearson.py`
Content:
```
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Any, List, Tuple
15
16 import torch
17 from torch import Tensor
18
19 from torchmetrics.functional.regression.pearson import _pearson_corrcoef_compute, _pearson_corrcoef_update
20 from torchmetrics.metric import Metric
21
22
23 def _final_aggregation(
24 means_x: Tensor,
25 means_y: Tensor,
26 vars_x: Tensor,
27 vars_y: Tensor,
28 corrs_xy: Tensor,
29 nbs: Tensor,
30 ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
31 """Aggregate the statistics from multiple devices.
32
33 Formula taken from here: `Aggregate the statistics from multiple devices`_
34 """
35 # assert len(means_x) > 1 and len(means_y) > 1 and len(vars_x) > 1 and len(vars_y) > 1 and len(corrs_xy) > 1
36 mx1, my1, vx1, vy1, cxy1, n1 = means_x[0], means_y[0], vars_x[0], vars_y[0], corrs_xy[0], nbs[0]
37 for i in range(1, len(means_x)):
38 mx2, my2, vx2, vy2, cxy2, n2 = means_x[i], means_y[i], vars_x[i], vars_y[i], corrs_xy[i], nbs[i]
39 nb = n1 + n2
40 mean_x = (n1 * mx1 + n2 * mx2) / nb
41 mean_y = (n1 * my1 + n2 * my2) / nb
42
43 # var_x
44 element_x1 = (n1 + 1) * mean_x - n1 * mx1
45 vx1 += (element_x1 - mx1) * (element_x1 - mean_x) - (element_x1 - mean_x) ** 2
46 element_x2 = (n2 + 1) * mean_x - n2 * mx2
47 vx2 += (element_x2 - mx2) * (element_x2 - mean_x) - (element_x2 - mean_x) ** 2
48 var_x = vx1 + vx2
49
50 # var_y
51 element_y1 = (n1 + 1) * mean_y - n1 * my1
52 vy1 += (element_y1 - my1) * (element_y1 - mean_y) - (element_y1 - mean_y) ** 2
53 element_y2 = (n2 + 1) * mean_y - n2 * my2
54 vy2 += (element_y2 - my2) * (element_y2 - mean_y) - (element_y2 - mean_y) ** 2
55 var_y = vy1 + vy2
56
57 # corr
58 cxy1 += (element_x1 - mx1) * (element_y1 - mean_y) - (element_x1 - mean_x) * (element_y1 - mean_y)
59 cxy2 += (element_x2 - mx2) * (element_y2 - mean_y) - (element_x2 - mean_x) * (element_y2 - mean_y)
60 corr_xy = cxy1 + cxy2
61
62 mx1, my1, vx1, vy1, cxy1, n1 = mean_x, mean_y, var_x, var_y, corr_xy, nb
63 return mean_x, mean_y, var_x, var_y, corr_xy, nb
64
65
66 class PearsonCorrCoef(Metric):
67 r"""Compute `Pearson Correlation Coefficient`_.
68
69 .. math::
70 P_{corr}(x,y) = \frac{cov(x,y)}{\sigma_x \sigma_y}
71
72 Where :math:`y` is a tensor of target values, and :math:`x` is a tensor of predictions.
73
74 As input to ``forward`` and ``update`` the metric accepts the following input:
75
76 - ``preds`` (:class:`~torch.Tensor`): either single output float tensor with shape ``(N,)``
77 or multioutput float tensor of shape ``(N,d)``
78 - ``target`` (:class:`~torch.Tensor`): either single output tensor with shape ``(N,)``
79 or multioutput tensor of shape ``(N,d)``
80
81 As output of ``forward`` and ``compute`` the metric returns the following output:
82
83 - ``pearson`` (:class:`~torch.Tensor`): A tensor with the Pearson Correlation Coefficient
84
85 Args:
86 num_outputs: Number of outputs in multioutput setting
87 kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
88
89 Example (single output regression):
90 >>> from torchmetrics import PearsonCorrCoef
91 >>> target = torch.tensor([3, -0.5, 2, 7])
92 >>> preds = torch.tensor([2.5, 0.0, 2, 8])
93 >>> pearson = PearsonCorrCoef()
94 >>> pearson(preds, target)
95 tensor(0.9849)
96
97 Example (multi output regression):
98 >>> from torchmetrics import PearsonCorrCoef
99 >>> target = torch.tensor([[3, -0.5], [2, 7]])
100 >>> preds = torch.tensor([[2.5, 0.0], [2, 8]])
101 >>> pearson = PearsonCorrCoef(num_outputs=2)
102 >>> pearson(preds, target)
103 tensor([1., 1.])
104 """
105 is_differentiable = True
106 higher_is_better = None # both -1 and 1 are optimal
107 full_state_update: bool = True
108 preds: List[Tensor]
109 target: List[Tensor]
110 mean_x: Tensor
111 mean_y: Tensor
112 var_x: Tensor
113 var_y: Tensor
114 corr_xy: Tensor
115 n_total: Tensor
116
117 def __init__(
118 self,
119 num_outputs: int = 1,
120 **kwargs: Any,
121 ) -> None:
122 super().__init__(**kwargs)
123 if not isinstance(num_outputs, int) and num_outputs < 1:
124 raise ValueError("Expected argument `num_outputs` to be an int larger than 0, but got {num_outputs}")
125 self.num_outputs = num_outputs
126
127 self.add_state("mean_x", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)
128 self.add_state("mean_y", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)
129 self.add_state("var_x", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)
130 self.add_state("var_y", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)
131 self.add_state("corr_xy", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)
132 self.add_state("n_total", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)
133
134 def update(self, preds: Tensor, target: Tensor) -> None:
135 """Update state with predictions and targets."""
136 self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total = _pearson_corrcoef_update(
137 preds,
138 target,
139 self.mean_x,
140 self.mean_y,
141 self.var_x,
142 self.var_y,
143 self.corr_xy,
144 self.n_total,
145 self.num_outputs,
146 )
147
148 def compute(self) -> Tensor:
149 """Compute pearson correlation coefficient over state."""
150 if (self.num_outputs == 1 and self.mean_x.numel() > 1) or (self.num_outputs > 1 and self.mean_x.ndim > 1):
151 # multiple devices, need further reduction
152 _, _, var_x, var_y, corr_xy, n_total = _final_aggregation(
153 self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total
154 )
155 else:
156 var_x = self.var_x
157 var_y = self.var_y
158 corr_xy = self.corr_xy
159 n_total = self.n_total
160 return _pearson_corrcoef_compute(var_x, var_y, corr_xy, n_total)
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/torchmetrics/regression/pearson.py b/src/torchmetrics/regression/pearson.py
--- a/src/torchmetrics/regression/pearson.py
+++ b/src/torchmetrics/regression/pearson.py
@@ -32,7 +32,8 @@
Formula taken from here: `Aggregate the statistics from multiple devices`_
"""
- # assert len(means_x) > 1 and len(means_y) > 1 and len(vars_x) > 1 and len(vars_y) > 1 and len(corrs_xy) > 1
+ if len(means_x) == 1:
+ return means_x[0], means_y[0], vars_x[0], vars_y[0], corrs_xy[0], nbs[0]
mx1, my1, vx1, vy1, cxy1, n1 = means_x[0], means_y[0], vars_x[0], vars_y[0], corrs_xy[0], nbs[0]
for i in range(1, len(means_x)):
mx2, my2, vx2, vy2, cxy2, n2 = means_x[i], means_y[i], vars_x[i], vars_y[i], corrs_xy[i], nbs[i]
| {"golden_diff": "diff --git a/src/torchmetrics/regression/pearson.py b/src/torchmetrics/regression/pearson.py\n--- a/src/torchmetrics/regression/pearson.py\n+++ b/src/torchmetrics/regression/pearson.py\n@@ -32,7 +32,8 @@\n \n Formula taken from here: `Aggregate the statistics from multiple devices`_\n \"\"\"\n- # assert len(means_x) > 1 and len(means_y) > 1 and len(vars_x) > 1 and len(vars_y) > 1 and len(corrs_xy) > 1\n+ if len(means_x) == 1:\n+ return means_x[0], means_y[0], vars_x[0], vars_y[0], corrs_xy[0], nbs[0]\n mx1, my1, vx1, vy1, cxy1, n1 = means_x[0], means_y[0], vars_x[0], vars_y[0], corrs_xy[0], nbs[0]\n for i in range(1, len(means_x)):\n mx2, my2, vx2, vy2, cxy2, n2 = means_x[i], means_y[i], vars_x[i], vars_y[i], corrs_xy[i], nbs[i]\n", "issue": "PearsonCorrCoef UnboundLocalError using ddp\nI want to use PearsonCorrCoef in my pytorch lightning code with ddp strategy.\r\nMy num_outputs > 1\r\nIn x_step_end, I use CorrCoef.update(preds, targets)\r\nIn x_epoch_end, I use CorrCoef.compute().mean()\r\nAlways received\r\n\r\n```\r\n File \"/home/xx/Documents/anconda3/envs/xx/lib/python3.8/site-packages/torchmetrics/metric.py\", line 531, in wrapped_func\r\n value = compute(*args, **kwargs)\r\n File \"/home/xx/Documents/anconda3/envs/xx/lib/python3.8/site-packages/torchmetrics/regression/pearson.py\", line 152, in compute\r\n _, _, var_x, var_y, corr_xy, n_total = _final_aggregation(\r\n File \"/home/xx/Documents/anconda3/envs/xx/lib/python3.8/site-packages/torchmetrics/regression/pearson.py\", line 63, in _final_aggregation\r\n return mean_x, mean_y, var_x, var_y, corr_xy, nb\r\nUnboundLocalError: local variable 'mean_x' referenced before assignment\r\n```\r\n\r\nBut when I change to dp strategy, it's fine.\r\n\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, List, Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.functional.regression.pearson import _pearson_corrcoef_compute, _pearson_corrcoef_update\nfrom torchmetrics.metric import Metric\n\n\ndef _final_aggregation(\n means_x: Tensor,\n means_y: Tensor,\n vars_x: Tensor,\n vars_y: Tensor,\n corrs_xy: Tensor,\n nbs: Tensor,\n) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Aggregate the statistics from multiple devices.\n\n Formula taken from here: `Aggregate the statistics from multiple devices`_\n \"\"\"\n # assert len(means_x) > 1 and len(means_y) > 1 and len(vars_x) > 1 and len(vars_y) > 1 and len(corrs_xy) > 1\n mx1, my1, vx1, vy1, cxy1, n1 = means_x[0], means_y[0], vars_x[0], vars_y[0], corrs_xy[0], nbs[0]\n for i in range(1, len(means_x)):\n mx2, my2, vx2, vy2, cxy2, n2 = means_x[i], means_y[i], vars_x[i], vars_y[i], corrs_xy[i], nbs[i]\n nb = n1 + n2\n mean_x = (n1 * mx1 + n2 * mx2) / nb\n mean_y = (n1 * my1 + n2 * my2) / nb\n\n # var_x\n element_x1 = (n1 + 1) * mean_x - n1 * mx1\n vx1 += (element_x1 - mx1) * (element_x1 - mean_x) - (element_x1 - mean_x) ** 2\n element_x2 = (n2 + 1) * mean_x - n2 * mx2\n vx2 += (element_x2 - mx2) * (element_x2 - mean_x) - (element_x2 - mean_x) ** 2\n var_x = vx1 + vx2\n\n # var_y\n element_y1 = (n1 + 1) * mean_y - n1 * my1\n vy1 += (element_y1 - my1) * (element_y1 - mean_y) - (element_y1 - mean_y) ** 2\n element_y2 = (n2 + 1) * mean_y - n2 * my2\n vy2 += (element_y2 - my2) * (element_y2 - mean_y) - (element_y2 - mean_y) ** 2\n var_y = vy1 + vy2\n\n # corr\n cxy1 += (element_x1 - mx1) * (element_y1 - mean_y) - (element_x1 - mean_x) * (element_y1 - mean_y)\n cxy2 += (element_x2 - mx2) * (element_y2 - mean_y) - (element_x2 - mean_x) * (element_y2 - mean_y)\n corr_xy = cxy1 + cxy2\n\n mx1, my1, vx1, vy1, cxy1, n1 = mean_x, mean_y, var_x, var_y, corr_xy, nb\n return mean_x, mean_y, var_x, var_y, corr_xy, nb\n\n\nclass PearsonCorrCoef(Metric):\n r\"\"\"Compute `Pearson Correlation Coefficient`_.\n\n .. math::\n P_{corr}(x,y) = \\frac{cov(x,y)}{\\sigma_x \\sigma_y}\n\n Where :math:`y` is a tensor of target values, and :math:`x` is a tensor of predictions.\n\n As input to ``forward`` and ``update`` the metric accepts the following input:\n\n - ``preds`` (:class:`~torch.Tensor`): either single output float tensor with shape ``(N,)``\n or multioutput float tensor of shape ``(N,d)``\n - ``target`` (:class:`~torch.Tensor`): either single output tensor with shape ``(N,)``\n or multioutput tensor of shape ``(N,d)``\n\n As output of ``forward`` and ``compute`` the metric returns the following output:\n\n - ``pearson`` (:class:`~torch.Tensor`): A tensor with the Pearson Correlation Coefficient\n\n Args:\n num_outputs: Number of outputs in multioutput setting\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Example (single output regression):\n >>> from torchmetrics import PearsonCorrCoef\n >>> target = torch.tensor([3, -0.5, 2, 7])\n >>> preds = torch.tensor([2.5, 0.0, 2, 8])\n >>> pearson = PearsonCorrCoef()\n >>> pearson(preds, target)\n tensor(0.9849)\n\n Example (multi output regression):\n >>> from torchmetrics import PearsonCorrCoef\n >>> target = torch.tensor([[3, -0.5], [2, 7]])\n >>> preds = torch.tensor([[2.5, 0.0], [2, 8]])\n >>> pearson = PearsonCorrCoef(num_outputs=2)\n >>> pearson(preds, target)\n tensor([1., 1.])\n \"\"\"\n is_differentiable = True\n higher_is_better = None # both -1 and 1 are optimal\n full_state_update: bool = True\n preds: List[Tensor]\n target: List[Tensor]\n mean_x: Tensor\n mean_y: Tensor\n var_x: Tensor\n var_y: Tensor\n corr_xy: Tensor\n n_total: Tensor\n\n def __init__(\n self,\n num_outputs: int = 1,\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n if not isinstance(num_outputs, int) and num_outputs < 1:\n raise ValueError(\"Expected argument `num_outputs` to be an int larger than 0, but got {num_outputs}\")\n self.num_outputs = num_outputs\n\n self.add_state(\"mean_x\", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)\n self.add_state(\"mean_y\", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)\n self.add_state(\"var_x\", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)\n self.add_state(\"var_y\", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)\n self.add_state(\"corr_xy\", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)\n self.add_state(\"n_total\", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)\n\n def update(self, preds: Tensor, target: Tensor) -> None:\n \"\"\"Update state with predictions and targets.\"\"\"\n self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total = _pearson_corrcoef_update(\n preds,\n target,\n self.mean_x,\n self.mean_y,\n self.var_x,\n self.var_y,\n self.corr_xy,\n self.n_total,\n self.num_outputs,\n )\n\n def compute(self) -> Tensor:\n \"\"\"Compute pearson correlation coefficient over state.\"\"\"\n if (self.num_outputs == 1 and self.mean_x.numel() > 1) or (self.num_outputs > 1 and self.mean_x.ndim > 1):\n # multiple devices, need further reduction\n _, _, var_x, var_y, corr_xy, n_total = _final_aggregation(\n self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total\n )\n else:\n var_x = self.var_x\n var_y = self.var_y\n corr_xy = self.corr_xy\n n_total = self.n_total\n return _pearson_corrcoef_compute(var_x, var_y, corr_xy, n_total)\n", "path": "src/torchmetrics/regression/pearson.py"}], "after_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, List, Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.functional.regression.pearson import _pearson_corrcoef_compute, _pearson_corrcoef_update\nfrom torchmetrics.metric import Metric\n\n\ndef _final_aggregation(\n means_x: Tensor,\n means_y: Tensor,\n vars_x: Tensor,\n vars_y: Tensor,\n corrs_xy: Tensor,\n nbs: Tensor,\n) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Aggregate the statistics from multiple devices.\n\n Formula taken from here: `Aggregate the statistics from multiple devices`_\n \"\"\"\n if len(means_x) == 1:\n return means_x[0], means_y[0], vars_x[0], vars_y[0], corrs_xy[0], nbs[0]\n mx1, my1, vx1, vy1, cxy1, n1 = means_x[0], means_y[0], vars_x[0], vars_y[0], corrs_xy[0], nbs[0]\n for i in range(1, len(means_x)):\n mx2, my2, vx2, vy2, cxy2, n2 = means_x[i], means_y[i], vars_x[i], vars_y[i], corrs_xy[i], nbs[i]\n nb = n1 + n2\n mean_x = (n1 * mx1 + n2 * mx2) / nb\n mean_y = (n1 * my1 + n2 * my2) / nb\n\n # var_x\n element_x1 = (n1 + 1) * mean_x - n1 * mx1\n vx1 += (element_x1 - mx1) * (element_x1 - mean_x) - (element_x1 - mean_x) ** 2\n element_x2 = (n2 + 1) * mean_x - n2 * mx2\n vx2 += (element_x2 - mx2) * (element_x2 - mean_x) - (element_x2 - mean_x) ** 2\n var_x = vx1 + vx2\n\n # var_y\n element_y1 = (n1 + 1) * mean_y - n1 * my1\n vy1 += (element_y1 - my1) * (element_y1 - mean_y) - (element_y1 - mean_y) ** 2\n element_y2 = (n2 + 1) * mean_y - n2 * my2\n vy2 += (element_y2 - my2) * (element_y2 - mean_y) - (element_y2 - mean_y) ** 2\n var_y = vy1 + vy2\n\n # corr\n cxy1 += (element_x1 - mx1) * (element_y1 - mean_y) - (element_x1 - mean_x) * (element_y1 - mean_y)\n cxy2 += (element_x2 - mx2) * (element_y2 - mean_y) - (element_x2 - mean_x) * (element_y2 - mean_y)\n corr_xy = cxy1 + cxy2\n\n mx1, my1, vx1, vy1, cxy1, n1 = mean_x, mean_y, var_x, var_y, corr_xy, nb\n return mean_x, mean_y, var_x, var_y, corr_xy, nb\n\n\nclass PearsonCorrCoef(Metric):\n r\"\"\"Compute `Pearson Correlation Coefficient`_.\n\n .. math::\n P_{corr}(x,y) = \\frac{cov(x,y)}{\\sigma_x \\sigma_y}\n\n Where :math:`y` is a tensor of target values, and :math:`x` is a tensor of predictions.\n\n As input to ``forward`` and ``update`` the metric accepts the following input:\n\n - ``preds`` (:class:`~torch.Tensor`): either single output float tensor with shape ``(N,)``\n or multioutput float tensor of shape ``(N,d)``\n - ``target`` (:class:`~torch.Tensor`): either single output tensor with shape ``(N,)``\n or multioutput tensor of shape ``(N,d)``\n\n As output of ``forward`` and ``compute`` the metric returns the following output:\n\n - ``pearson`` (:class:`~torch.Tensor`): A tensor with the Pearson Correlation Coefficient\n\n Args:\n num_outputs: Number of outputs in multioutput setting\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Example (single output regression):\n >>> from torchmetrics import PearsonCorrCoef\n >>> target = torch.tensor([3, -0.5, 2, 7])\n >>> preds = torch.tensor([2.5, 0.0, 2, 8])\n >>> pearson = PearsonCorrCoef()\n >>> pearson(preds, target)\n tensor(0.9849)\n\n Example (multi output regression):\n >>> from torchmetrics import PearsonCorrCoef\n >>> target = torch.tensor([[3, -0.5], [2, 7]])\n >>> preds = torch.tensor([[2.5, 0.0], [2, 8]])\n >>> pearson = PearsonCorrCoef(num_outputs=2)\n >>> pearson(preds, target)\n tensor([1., 1.])\n \"\"\"\n is_differentiable = True\n higher_is_better = None # both -1 and 1 are optimal\n full_state_update: bool = True\n preds: List[Tensor]\n target: List[Tensor]\n mean_x: Tensor\n mean_y: Tensor\n var_x: Tensor\n var_y: Tensor\n corr_xy: Tensor\n n_total: Tensor\n\n def __init__(\n self,\n num_outputs: int = 1,\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n if not isinstance(num_outputs, int) and num_outputs < 1:\n raise ValueError(\"Expected argument `num_outputs` to be an int larger than 0, but got {num_outputs}\")\n self.num_outputs = num_outputs\n\n self.add_state(\"mean_x\", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)\n self.add_state(\"mean_y\", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)\n self.add_state(\"var_x\", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)\n self.add_state(\"var_y\", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)\n self.add_state(\"corr_xy\", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)\n self.add_state(\"n_total\", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)\n\n def update(self, preds: Tensor, target: Tensor) -> None:\n \"\"\"Update state with predictions and targets.\"\"\"\n self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total = _pearson_corrcoef_update(\n preds,\n target,\n self.mean_x,\n self.mean_y,\n self.var_x,\n self.var_y,\n self.corr_xy,\n self.n_total,\n self.num_outputs,\n )\n\n def compute(self) -> Tensor:\n \"\"\"Compute pearson correlation coefficient over state.\"\"\"\n if (self.num_outputs == 1 and self.mean_x.numel() > 1) or (self.num_outputs > 1 and self.mean_x.ndim > 1):\n # multiple devices, need further reduction\n _, _, var_x, var_y, corr_xy, n_total = _final_aggregation(\n self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total\n )\n else:\n var_x = self.var_x\n var_y = self.var_y\n corr_xy = self.corr_xy\n n_total = self.n_total\n return _pearson_corrcoef_compute(var_x, var_y, corr_xy, n_total)\n", "path": "src/torchmetrics/regression/pearson.py"}]} | 2,795 | 284 |
gh_patches_debug_34853 | rasdani/github-patches | git_diff | scrapy__scrapy-5790 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OpenSSL unsafe legacy renegotiation disabled error
<!--
Thanks for taking an interest in Scrapy!
If you have a question that starts with "How to...", please see the Scrapy Community page: https://scrapy.org/community/.
The GitHub issue tracker's purpose is to deal with bug reports and feature requests for the project itself.
Keep in mind that by filing an issue, you are expected to comply with Scrapy's Code of Conduct, including treating everyone with respect: https://github.com/scrapy/scrapy/blob/master/CODE_OF_CONDUCT.md
The following is a suggested template to structure your issue, you can find more guidelines at https://doc.scrapy.org/en/latest/contributing.html#reporting-bugs
-->
### Description
I get an SSL issue on a working [site ](https://dorotheum.com)
`twisted.web._newclient.ResponseNeverReceived: [<twisted.python.failure.Failure OpenSSL.SSL.Error: [('SSL routines', '', 'unsafe legacy renegotiation disabled')]>]`
### Steps to Reproduce
1. `scrapy shell https://dorotheum.com`
**Expected behavior:** HTML page
**Actual behavior:** the error above
**Reproduces how often:** 100%
### Versions
```
Scrapy : 2.6.1
lxml : 4.8.0.0
libxml2 : 2.9.4
cssselect : 1.1.0
parsel : 1.6.0
w3lib : 1.22.0
Twisted : 22.4.0
Python : 3.9.12 (main, Mar 26 2022, 15:44:31) - [Clang 13.1.6 (clang-1316.0.21.2)]
pyOpenSSL : 22.0.0 (OpenSSL 3.0.3 3 May 2022)
cryptography : 37.0.2
Platform : macOS-12.2.1-arm64-arm-64bit
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/utils/ssl.py`
Content:
```
1 import OpenSSL
2 import OpenSSL._util as pyOpenSSLutil
3
4 from scrapy.utils.python import to_unicode
5
6
7 # The OpenSSL symbol is present since 1.1.1 but it's not currently supported in any version of pyOpenSSL.
8 # Using the binding directly, as this code does, requires cryptography 2.4.
9 SSL_OP_NO_TLSv1_3 = getattr(pyOpenSSLutil.lib, 'SSL_OP_NO_TLSv1_3', 0)
10
11
12 def ffi_buf_to_string(buf):
13 return to_unicode(pyOpenSSLutil.ffi.string(buf))
14
15
16 def x509name_to_string(x509name):
17 # from OpenSSL.crypto.X509Name.__repr__
18 result_buffer = pyOpenSSLutil.ffi.new("char[]", 512)
19 pyOpenSSLutil.lib.X509_NAME_oneline(x509name._name, result_buffer, len(result_buffer))
20
21 return ffi_buf_to_string(result_buffer)
22
23
24 def get_temp_key_info(ssl_object):
25 if not hasattr(pyOpenSSLutil.lib, 'SSL_get_server_tmp_key'): # requires OpenSSL 1.0.2
26 return None
27
28 # adapted from OpenSSL apps/s_cb.c::ssl_print_tmp_key()
29 temp_key_p = pyOpenSSLutil.ffi.new("EVP_PKEY **")
30 if not pyOpenSSLutil.lib.SSL_get_server_tmp_key(ssl_object, temp_key_p):
31 return None
32 temp_key = temp_key_p[0]
33 if temp_key == pyOpenSSLutil.ffi.NULL:
34 return None
35 temp_key = pyOpenSSLutil.ffi.gc(temp_key, pyOpenSSLutil.lib.EVP_PKEY_free)
36 key_info = []
37 key_type = pyOpenSSLutil.lib.EVP_PKEY_id(temp_key)
38 if key_type == pyOpenSSLutil.lib.EVP_PKEY_RSA:
39 key_info.append('RSA')
40 elif key_type == pyOpenSSLutil.lib.EVP_PKEY_DH:
41 key_info.append('DH')
42 elif key_type == pyOpenSSLutil.lib.EVP_PKEY_EC:
43 key_info.append('ECDH')
44 ec_key = pyOpenSSLutil.lib.EVP_PKEY_get1_EC_KEY(temp_key)
45 ec_key = pyOpenSSLutil.ffi.gc(ec_key, pyOpenSSLutil.lib.EC_KEY_free)
46 nid = pyOpenSSLutil.lib.EC_GROUP_get_curve_name(pyOpenSSLutil.lib.EC_KEY_get0_group(ec_key))
47 cname = pyOpenSSLutil.lib.EC_curve_nid2nist(nid)
48 if cname == pyOpenSSLutil.ffi.NULL:
49 cname = pyOpenSSLutil.lib.OBJ_nid2sn(nid)
50 key_info.append(ffi_buf_to_string(cname))
51 else:
52 key_info.append(ffi_buf_to_string(pyOpenSSLutil.lib.OBJ_nid2sn(key_type)))
53 key_info.append(f'{pyOpenSSLutil.lib.EVP_PKEY_bits(temp_key)} bits')
54 return ', '.join(key_info)
55
56
57 def get_openssl_version():
58 system_openssl = OpenSSL.SSL.SSLeay_version(
59 OpenSSL.SSL.SSLEAY_VERSION
60 ).decode('ascii', errors='replace')
61 return f'{OpenSSL.version.__version__} ({system_openssl})'
62
```
Path: `scrapy/core/downloader/tls.py`
Content:
```
1 import logging
2
3 from OpenSSL import SSL
4 from service_identity.exceptions import CertificateError
5 from twisted.internet._sslverify import ClientTLSOptions, verifyHostname, VerificationError
6 from twisted.internet.ssl import AcceptableCiphers
7
8 from scrapy.utils.ssl import x509name_to_string, get_temp_key_info
9
10 logger = logging.getLogger(__name__)
11
12
13 METHOD_TLS = 'TLS'
14 METHOD_TLSv10 = 'TLSv1.0'
15 METHOD_TLSv11 = 'TLSv1.1'
16 METHOD_TLSv12 = 'TLSv1.2'
17
18
19 openssl_methods = {
20 METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended)
21 METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only
22 METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only
23 METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only
24 }
25
26
27 class ScrapyClientTLSOptions(ClientTLSOptions):
28 """
29 SSL Client connection creator ignoring certificate verification errors
30 (for genuinely invalid certificates or bugs in verification code).
31
32 Same as Twisted's private _sslverify.ClientTLSOptions,
33 except that VerificationError, CertificateError and ValueError
34 exceptions are caught, so that the connection is not closed, only
35 logging warnings. Also, HTTPS connection parameters logging is added.
36 """
37
38 def __init__(self, hostname, ctx, verbose_logging=False):
39 super().__init__(hostname, ctx)
40 self.verbose_logging = verbose_logging
41
42 def _identityVerifyingInfoCallback(self, connection, where, ret):
43 if where & SSL.SSL_CB_HANDSHAKE_START:
44 connection.set_tlsext_host_name(self._hostnameBytes)
45 elif where & SSL.SSL_CB_HANDSHAKE_DONE:
46 if self.verbose_logging:
47 logger.debug('SSL connection to %s using protocol %s, cipher %s',
48 self._hostnameASCII,
49 connection.get_protocol_version_name(),
50 connection.get_cipher_name(),
51 )
52 server_cert = connection.get_peer_certificate()
53 logger.debug('SSL connection certificate: issuer "%s", subject "%s"',
54 x509name_to_string(server_cert.get_issuer()),
55 x509name_to_string(server_cert.get_subject()),
56 )
57 key_info = get_temp_key_info(connection._ssl)
58 if key_info:
59 logger.debug('SSL temp key: %s', key_info)
60
61 try:
62 verifyHostname(connection, self._hostnameASCII)
63 except (CertificateError, VerificationError) as e:
64 logger.warning(
65 'Remote certificate is not valid for hostname "%s"; %s',
66 self._hostnameASCII, e)
67
68 except ValueError as e:
69 logger.warning(
70 'Ignoring error while verifying certificate '
71 'from host "%s" (exception: %r)',
72 self._hostnameASCII, e)
73
74
75 DEFAULT_CIPHERS = AcceptableCiphers.fromOpenSSLCipherString('DEFAULT')
76
```
Path: `scrapy/core/downloader/contextfactory.py`
Content:
```
1 import warnings
2
3 from OpenSSL import SSL
4 from twisted.internet._sslverify import _setAcceptableProtocols
5 from twisted.internet.ssl import optionsForClientTLS, CertificateOptions, platformTrust, AcceptableCiphers
6 from twisted.web.client import BrowserLikePolicyForHTTPS
7 from twisted.web.iweb import IPolicyForHTTPS
8 from zope.interface.declarations import implementer
9 from zope.interface.verify import verifyObject
10
11 from scrapy.core.downloader.tls import DEFAULT_CIPHERS, openssl_methods, ScrapyClientTLSOptions
12 from scrapy.utils.misc import create_instance, load_object
13
14
15 @implementer(IPolicyForHTTPS)
16 class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):
17 """
18 Non-peer-certificate verifying HTTPS context factory
19
20 Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)
21 which allows TLS protocol negotiation
22
23 'A TLS/SSL connection established with [this method] may
24 understand the TLSv1, TLSv1.1 and TLSv1.2 protocols.'
25 """
26
27 def __init__(self, method=SSL.SSLv23_METHOD, tls_verbose_logging=False, tls_ciphers=None, *args, **kwargs):
28 super().__init__(*args, **kwargs)
29 self._ssl_method = method
30 self.tls_verbose_logging = tls_verbose_logging
31 if tls_ciphers:
32 self.tls_ciphers = AcceptableCiphers.fromOpenSSLCipherString(tls_ciphers)
33 else:
34 self.tls_ciphers = DEFAULT_CIPHERS
35
36 @classmethod
37 def from_settings(cls, settings, method=SSL.SSLv23_METHOD, *args, **kwargs):
38 tls_verbose_logging = settings.getbool('DOWNLOADER_CLIENT_TLS_VERBOSE_LOGGING')
39 tls_ciphers = settings['DOWNLOADER_CLIENT_TLS_CIPHERS']
40 return cls(method=method, tls_verbose_logging=tls_verbose_logging, tls_ciphers=tls_ciphers, *args, **kwargs)
41
42 def getCertificateOptions(self):
43 # setting verify=True will require you to provide CAs
44 # to verify against; in other words: it's not that simple
45
46 # backward-compatible SSL/TLS method:
47 #
48 # * this will respect `method` attribute in often recommended
49 # `ScrapyClientContextFactory` subclass
50 # (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133)
51 #
52 # * getattr() for `_ssl_method` attribute for context factories
53 # not calling super().__init__
54 return CertificateOptions(
55 verify=False,
56 method=getattr(self, 'method', getattr(self, '_ssl_method', None)),
57 fixBrokenPeers=True,
58 acceptableCiphers=self.tls_ciphers,
59 )
60
61 # kept for old-style HTTP/1.0 downloader context twisted calls,
62 # e.g. connectSSL()
63 def getContext(self, hostname=None, port=None):
64 return self.getCertificateOptions().getContext()
65
66 def creatorForNetloc(self, hostname, port):
67 return ScrapyClientTLSOptions(hostname.decode("ascii"), self.getContext(),
68 verbose_logging=self.tls_verbose_logging)
69
70
71 @implementer(IPolicyForHTTPS)
72 class BrowserLikeContextFactory(ScrapyClientContextFactory):
73 """
74 Twisted-recommended context factory for web clients.
75
76 Quoting the documentation of the :class:`~twisted.web.client.Agent` class:
77
78 The default is to use a
79 :class:`~twisted.web.client.BrowserLikePolicyForHTTPS`, so unless you
80 have special requirements you can leave this as-is.
81
82 :meth:`creatorForNetloc` is the same as
83 :class:`~twisted.web.client.BrowserLikePolicyForHTTPS` except this context
84 factory allows setting the TLS/SSL method to use.
85
86 The default OpenSSL method is ``TLS_METHOD`` (also called
87 ``SSLv23_METHOD``) which allows TLS protocol negotiation.
88 """
89
90 def creatorForNetloc(self, hostname, port):
91 # trustRoot set to platformTrust() will use the platform's root CAs.
92 #
93 # This means that a website like https://www.cacert.org will be rejected
94 # by default, since CAcert.org CA certificate is seldom shipped.
95 return optionsForClientTLS(
96 hostname=hostname.decode("ascii"),
97 trustRoot=platformTrust(),
98 extraCertificateOptions={'method': self._ssl_method},
99 )
100
101
102 @implementer(IPolicyForHTTPS)
103 class AcceptableProtocolsContextFactory:
104 """Context factory to used to override the acceptable protocols
105 to set up the [OpenSSL.SSL.Context] for doing NPN and/or ALPN
106 negotiation.
107 """
108
109 def __init__(self, context_factory, acceptable_protocols):
110 verifyObject(IPolicyForHTTPS, context_factory)
111 self._wrapped_context_factory = context_factory
112 self._acceptable_protocols = acceptable_protocols
113
114 def creatorForNetloc(self, hostname, port):
115 options = self._wrapped_context_factory.creatorForNetloc(hostname, port)
116 _setAcceptableProtocols(options._ctx, self._acceptable_protocols)
117 return options
118
119
120 def load_context_factory_from_settings(settings, crawler):
121 ssl_method = openssl_methods[settings.get('DOWNLOADER_CLIENT_TLS_METHOD')]
122 context_factory_cls = load_object(settings['DOWNLOADER_CLIENTCONTEXTFACTORY'])
123 # try method-aware context factory
124 try:
125 context_factory = create_instance(
126 objcls=context_factory_cls,
127 settings=settings,
128 crawler=crawler,
129 method=ssl_method,
130 )
131 except TypeError:
132 # use context factory defaults
133 context_factory = create_instance(
134 objcls=context_factory_cls,
135 settings=settings,
136 crawler=crawler,
137 )
138 msg = (
139 f"{settings['DOWNLOADER_CLIENTCONTEXTFACTORY']} does not accept "
140 "a `method` argument (type OpenSSL.SSL method, e.g. "
141 "OpenSSL.SSL.SSLv23_METHOD) and/or a `tls_verbose_logging` "
142 "argument and/or a `tls_ciphers` argument. Please, upgrade your "
143 "context factory class to handle them or ignore them."
144 )
145 warnings.warn(msg)
146
147 return context_factory
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/core/downloader/contextfactory.py b/scrapy/core/downloader/contextfactory.py
--- a/scrapy/core/downloader/contextfactory.py
+++ b/scrapy/core/downloader/contextfactory.py
@@ -61,7 +61,9 @@
# kept for old-style HTTP/1.0 downloader context twisted calls,
# e.g. connectSSL()
def getContext(self, hostname=None, port=None):
- return self.getCertificateOptions().getContext()
+ ctx = self.getCertificateOptions().getContext()
+ ctx.set_options(0x4) # OP_LEGACY_SERVER_CONNECT
+ return ctx
def creatorForNetloc(self, hostname, port):
return ScrapyClientTLSOptions(hostname.decode("ascii"), self.getContext(),
diff --git a/scrapy/core/downloader/tls.py b/scrapy/core/downloader/tls.py
--- a/scrapy/core/downloader/tls.py
+++ b/scrapy/core/downloader/tls.py
@@ -17,10 +17,10 @@
openssl_methods = {
- METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended)
- METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only
- METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only
- METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only
+ METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended)
+ METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only
+ METHOD_TLSv11: SSL.TLSv1_1_METHOD, # TLS 1.1 only
+ METHOD_TLSv12: SSL.TLSv1_2_METHOD, # TLS 1.2 only
}
diff --git a/scrapy/utils/ssl.py b/scrapy/utils/ssl.py
--- a/scrapy/utils/ssl.py
+++ b/scrapy/utils/ssl.py
@@ -1,14 +1,9 @@
-import OpenSSL
+import OpenSSL.SSL
import OpenSSL._util as pyOpenSSLutil
from scrapy.utils.python import to_unicode
-# The OpenSSL symbol is present since 1.1.1 but it's not currently supported in any version of pyOpenSSL.
-# Using the binding directly, as this code does, requires cryptography 2.4.
-SSL_OP_NO_TLSv1_3 = getattr(pyOpenSSLutil.lib, 'SSL_OP_NO_TLSv1_3', 0)
-
-
def ffi_buf_to_string(buf):
return to_unicode(pyOpenSSLutil.ffi.string(buf))
@@ -22,9 +17,6 @@
def get_temp_key_info(ssl_object):
- if not hasattr(pyOpenSSLutil.lib, 'SSL_get_server_tmp_key'): # requires OpenSSL 1.0.2
- return None
-
# adapted from OpenSSL apps/s_cb.c::ssl_print_tmp_key()
temp_key_p = pyOpenSSLutil.ffi.new("EVP_PKEY **")
if not pyOpenSSLutil.lib.SSL_get_server_tmp_key(ssl_object, temp_key_p):
| {"golden_diff": "diff --git a/scrapy/core/downloader/contextfactory.py b/scrapy/core/downloader/contextfactory.py\n--- a/scrapy/core/downloader/contextfactory.py\n+++ b/scrapy/core/downloader/contextfactory.py\n@@ -61,7 +61,9 @@\n # kept for old-style HTTP/1.0 downloader context twisted calls,\n # e.g. connectSSL()\n def getContext(self, hostname=None, port=None):\n- return self.getCertificateOptions().getContext()\n+ ctx = self.getCertificateOptions().getContext()\n+ ctx.set_options(0x4) # OP_LEGACY_SERVER_CONNECT\n+ return ctx\n \n def creatorForNetloc(self, hostname, port):\n return ScrapyClientTLSOptions(hostname.decode(\"ascii\"), self.getContext(),\ndiff --git a/scrapy/core/downloader/tls.py b/scrapy/core/downloader/tls.py\n--- a/scrapy/core/downloader/tls.py\n+++ b/scrapy/core/downloader/tls.py\n@@ -17,10 +17,10 @@\n \n \n openssl_methods = {\n- METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended)\n- METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only\n- METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only\n- METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only\n+ METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended)\n+ METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only\n+ METHOD_TLSv11: SSL.TLSv1_1_METHOD, # TLS 1.1 only\n+ METHOD_TLSv12: SSL.TLSv1_2_METHOD, # TLS 1.2 only\n }\n \n \ndiff --git a/scrapy/utils/ssl.py b/scrapy/utils/ssl.py\n--- a/scrapy/utils/ssl.py\n+++ b/scrapy/utils/ssl.py\n@@ -1,14 +1,9 @@\n-import OpenSSL\n+import OpenSSL.SSL\n import OpenSSL._util as pyOpenSSLutil\n \n from scrapy.utils.python import to_unicode\n \n \n-# The OpenSSL symbol is present since 1.1.1 but it's not currently supported in any version of pyOpenSSL.\n-# Using the binding directly, as this code does, requires cryptography 2.4.\n-SSL_OP_NO_TLSv1_3 = getattr(pyOpenSSLutil.lib, 'SSL_OP_NO_TLSv1_3', 0)\n-\n-\n def ffi_buf_to_string(buf):\n return to_unicode(pyOpenSSLutil.ffi.string(buf))\n \n@@ -22,9 +17,6 @@\n \n \n def get_temp_key_info(ssl_object):\n- if not hasattr(pyOpenSSLutil.lib, 'SSL_get_server_tmp_key'): # requires OpenSSL 1.0.2\n- return None\n-\n # adapted from OpenSSL apps/s_cb.c::ssl_print_tmp_key()\n temp_key_p = pyOpenSSLutil.ffi.new(\"EVP_PKEY **\")\n if not pyOpenSSLutil.lib.SSL_get_server_tmp_key(ssl_object, temp_key_p):\n", "issue": "OpenSSL unsafe legacy renegotiation disabled error\n<!--\r\n\r\nThanks for taking an interest in Scrapy!\r\n\r\nIf you have a question that starts with \"How to...\", please see the Scrapy Community page: https://scrapy.org/community/.\r\nThe GitHub issue tracker's purpose is to deal with bug reports and feature requests for the project itself.\r\n\r\nKeep in mind that by filing an issue, you are expected to comply with Scrapy's Code of Conduct, including treating everyone with respect: https://github.com/scrapy/scrapy/blob/master/CODE_OF_CONDUCT.md\r\n\r\nThe following is a suggested template to structure your issue, you can find more guidelines at https://doc.scrapy.org/en/latest/contributing.html#reporting-bugs\r\n\r\n-->\r\n\r\n### Description\r\n\r\nI get an SSL issue on a working [site ](https://dorotheum.com) \r\n`twisted.web._newclient.ResponseNeverReceived: [<twisted.python.failure.Failure OpenSSL.SSL.Error: [('SSL routines', '', 'unsafe legacy renegotiation disabled')]>]`\r\n\r\n### Steps to Reproduce\r\n\r\n1. `scrapy shell https://dorotheum.com`\r\n\r\n**Expected behavior:** HTML page\r\n\r\n**Actual behavior:** the error above\r\n\r\n**Reproduces how often:** 100%\r\n\r\n### Versions\r\n\r\n```\r\nScrapy : 2.6.1\r\nlxml : 4.8.0.0\r\nlibxml2 : 2.9.4\r\ncssselect : 1.1.0\r\nparsel : 1.6.0\r\nw3lib : 1.22.0\r\nTwisted : 22.4.0\r\nPython : 3.9.12 (main, Mar 26 2022, 15:44:31) - [Clang 13.1.6 (clang-1316.0.21.2)]\r\npyOpenSSL : 22.0.0 (OpenSSL 3.0.3 3 May 2022)\r\ncryptography : 37.0.2\r\nPlatform : macOS-12.2.1-arm64-arm-64bit\r\n```\r\n\n", "before_files": [{"content": "import OpenSSL\nimport OpenSSL._util as pyOpenSSLutil\n\nfrom scrapy.utils.python import to_unicode\n\n\n# The OpenSSL symbol is present since 1.1.1 but it's not currently supported in any version of pyOpenSSL.\n# Using the binding directly, as this code does, requires cryptography 2.4.\nSSL_OP_NO_TLSv1_3 = getattr(pyOpenSSLutil.lib, 'SSL_OP_NO_TLSv1_3', 0)\n\n\ndef ffi_buf_to_string(buf):\n return to_unicode(pyOpenSSLutil.ffi.string(buf))\n\n\ndef x509name_to_string(x509name):\n # from OpenSSL.crypto.X509Name.__repr__\n result_buffer = pyOpenSSLutil.ffi.new(\"char[]\", 512)\n pyOpenSSLutil.lib.X509_NAME_oneline(x509name._name, result_buffer, len(result_buffer))\n\n return ffi_buf_to_string(result_buffer)\n\n\ndef get_temp_key_info(ssl_object):\n if not hasattr(pyOpenSSLutil.lib, 'SSL_get_server_tmp_key'): # requires OpenSSL 1.0.2\n return None\n\n # adapted from OpenSSL apps/s_cb.c::ssl_print_tmp_key()\n temp_key_p = pyOpenSSLutil.ffi.new(\"EVP_PKEY **\")\n if not pyOpenSSLutil.lib.SSL_get_server_tmp_key(ssl_object, temp_key_p):\n return None\n temp_key = temp_key_p[0]\n if temp_key == pyOpenSSLutil.ffi.NULL:\n return None\n temp_key = pyOpenSSLutil.ffi.gc(temp_key, pyOpenSSLutil.lib.EVP_PKEY_free)\n key_info = []\n key_type = pyOpenSSLutil.lib.EVP_PKEY_id(temp_key)\n if key_type == pyOpenSSLutil.lib.EVP_PKEY_RSA:\n key_info.append('RSA')\n elif key_type == pyOpenSSLutil.lib.EVP_PKEY_DH:\n key_info.append('DH')\n elif key_type == pyOpenSSLutil.lib.EVP_PKEY_EC:\n key_info.append('ECDH')\n ec_key = pyOpenSSLutil.lib.EVP_PKEY_get1_EC_KEY(temp_key)\n ec_key = pyOpenSSLutil.ffi.gc(ec_key, pyOpenSSLutil.lib.EC_KEY_free)\n nid = pyOpenSSLutil.lib.EC_GROUP_get_curve_name(pyOpenSSLutil.lib.EC_KEY_get0_group(ec_key))\n cname = pyOpenSSLutil.lib.EC_curve_nid2nist(nid)\n if cname == pyOpenSSLutil.ffi.NULL:\n cname = pyOpenSSLutil.lib.OBJ_nid2sn(nid)\n key_info.append(ffi_buf_to_string(cname))\n else:\n key_info.append(ffi_buf_to_string(pyOpenSSLutil.lib.OBJ_nid2sn(key_type)))\n key_info.append(f'{pyOpenSSLutil.lib.EVP_PKEY_bits(temp_key)} bits')\n return ', '.join(key_info)\n\n\ndef get_openssl_version():\n system_openssl = OpenSSL.SSL.SSLeay_version(\n OpenSSL.SSL.SSLEAY_VERSION\n ).decode('ascii', errors='replace')\n return f'{OpenSSL.version.__version__} ({system_openssl})'\n", "path": "scrapy/utils/ssl.py"}, {"content": "import logging\n\nfrom OpenSSL import SSL\nfrom service_identity.exceptions import CertificateError\nfrom twisted.internet._sslverify import ClientTLSOptions, verifyHostname, VerificationError\nfrom twisted.internet.ssl import AcceptableCiphers\n\nfrom scrapy.utils.ssl import x509name_to_string, get_temp_key_info\n\nlogger = logging.getLogger(__name__)\n\n\nMETHOD_TLS = 'TLS'\nMETHOD_TLSv10 = 'TLSv1.0'\nMETHOD_TLSv11 = 'TLSv1.1'\nMETHOD_TLSv12 = 'TLSv1.2'\n\n\nopenssl_methods = {\n METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended)\n METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only\n METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only\n METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only\n}\n\n\nclass ScrapyClientTLSOptions(ClientTLSOptions):\n \"\"\"\n SSL Client connection creator ignoring certificate verification errors\n (for genuinely invalid certificates or bugs in verification code).\n\n Same as Twisted's private _sslverify.ClientTLSOptions,\n except that VerificationError, CertificateError and ValueError\n exceptions are caught, so that the connection is not closed, only\n logging warnings. Also, HTTPS connection parameters logging is added.\n \"\"\"\n\n def __init__(self, hostname, ctx, verbose_logging=False):\n super().__init__(hostname, ctx)\n self.verbose_logging = verbose_logging\n\n def _identityVerifyingInfoCallback(self, connection, where, ret):\n if where & SSL.SSL_CB_HANDSHAKE_START:\n connection.set_tlsext_host_name(self._hostnameBytes)\n elif where & SSL.SSL_CB_HANDSHAKE_DONE:\n if self.verbose_logging:\n logger.debug('SSL connection to %s using protocol %s, cipher %s',\n self._hostnameASCII,\n connection.get_protocol_version_name(),\n connection.get_cipher_name(),\n )\n server_cert = connection.get_peer_certificate()\n logger.debug('SSL connection certificate: issuer \"%s\", subject \"%s\"',\n x509name_to_string(server_cert.get_issuer()),\n x509name_to_string(server_cert.get_subject()),\n )\n key_info = get_temp_key_info(connection._ssl)\n if key_info:\n logger.debug('SSL temp key: %s', key_info)\n\n try:\n verifyHostname(connection, self._hostnameASCII)\n except (CertificateError, VerificationError) as e:\n logger.warning(\n 'Remote certificate is not valid for hostname \"%s\"; %s',\n self._hostnameASCII, e)\n\n except ValueError as e:\n logger.warning(\n 'Ignoring error while verifying certificate '\n 'from host \"%s\" (exception: %r)',\n self._hostnameASCII, e)\n\n\nDEFAULT_CIPHERS = AcceptableCiphers.fromOpenSSLCipherString('DEFAULT')\n", "path": "scrapy/core/downloader/tls.py"}, {"content": "import warnings\n\nfrom OpenSSL import SSL\nfrom twisted.internet._sslverify import _setAcceptableProtocols\nfrom twisted.internet.ssl import optionsForClientTLS, CertificateOptions, platformTrust, AcceptableCiphers\nfrom twisted.web.client import BrowserLikePolicyForHTTPS\nfrom twisted.web.iweb import IPolicyForHTTPS\nfrom zope.interface.declarations import implementer\nfrom zope.interface.verify import verifyObject\n\nfrom scrapy.core.downloader.tls import DEFAULT_CIPHERS, openssl_methods, ScrapyClientTLSOptions\nfrom scrapy.utils.misc import create_instance, load_object\n\n\n@implementer(IPolicyForHTTPS)\nclass ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):\n \"\"\"\n Non-peer-certificate verifying HTTPS context factory\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation\n\n 'A TLS/SSL connection established with [this method] may\n understand the TLSv1, TLSv1.1 and TLSv1.2 protocols.'\n \"\"\"\n\n def __init__(self, method=SSL.SSLv23_METHOD, tls_verbose_logging=False, tls_ciphers=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._ssl_method = method\n self.tls_verbose_logging = tls_verbose_logging\n if tls_ciphers:\n self.tls_ciphers = AcceptableCiphers.fromOpenSSLCipherString(tls_ciphers)\n else:\n self.tls_ciphers = DEFAULT_CIPHERS\n\n @classmethod\n def from_settings(cls, settings, method=SSL.SSLv23_METHOD, *args, **kwargs):\n tls_verbose_logging = settings.getbool('DOWNLOADER_CLIENT_TLS_VERBOSE_LOGGING')\n tls_ciphers = settings['DOWNLOADER_CLIENT_TLS_CIPHERS']\n return cls(method=method, tls_verbose_logging=tls_verbose_logging, tls_ciphers=tls_ciphers, *args, **kwargs)\n\n def getCertificateOptions(self):\n # setting verify=True will require you to provide CAs\n # to verify against; in other words: it's not that simple\n\n # backward-compatible SSL/TLS method:\n #\n # * this will respect `method` attribute in often recommended\n # `ScrapyClientContextFactory` subclass\n # (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133)\n #\n # * getattr() for `_ssl_method` attribute for context factories\n # not calling super().__init__\n return CertificateOptions(\n verify=False,\n method=getattr(self, 'method', getattr(self, '_ssl_method', None)),\n fixBrokenPeers=True,\n acceptableCiphers=self.tls_ciphers,\n )\n\n # kept for old-style HTTP/1.0 downloader context twisted calls,\n # e.g. connectSSL()\n def getContext(self, hostname=None, port=None):\n return self.getCertificateOptions().getContext()\n\n def creatorForNetloc(self, hostname, port):\n return ScrapyClientTLSOptions(hostname.decode(\"ascii\"), self.getContext(),\n verbose_logging=self.tls_verbose_logging)\n\n\n@implementer(IPolicyForHTTPS)\nclass BrowserLikeContextFactory(ScrapyClientContextFactory):\n \"\"\"\n Twisted-recommended context factory for web clients.\n\n Quoting the documentation of the :class:`~twisted.web.client.Agent` class:\n\n The default is to use a\n :class:`~twisted.web.client.BrowserLikePolicyForHTTPS`, so unless you\n have special requirements you can leave this as-is.\n\n :meth:`creatorForNetloc` is the same as\n :class:`~twisted.web.client.BrowserLikePolicyForHTTPS` except this context\n factory allows setting the TLS/SSL method to use.\n\n The default OpenSSL method is ``TLS_METHOD`` (also called\n ``SSLv23_METHOD``) which allows TLS protocol negotiation.\n \"\"\"\n\n def creatorForNetloc(self, hostname, port):\n # trustRoot set to platformTrust() will use the platform's root CAs.\n #\n # This means that a website like https://www.cacert.org will be rejected\n # by default, since CAcert.org CA certificate is seldom shipped.\n return optionsForClientTLS(\n hostname=hostname.decode(\"ascii\"),\n trustRoot=platformTrust(),\n extraCertificateOptions={'method': self._ssl_method},\n )\n\n\n@implementer(IPolicyForHTTPS)\nclass AcceptableProtocolsContextFactory:\n \"\"\"Context factory to used to override the acceptable protocols\n to set up the [OpenSSL.SSL.Context] for doing NPN and/or ALPN\n negotiation.\n \"\"\"\n\n def __init__(self, context_factory, acceptable_protocols):\n verifyObject(IPolicyForHTTPS, context_factory)\n self._wrapped_context_factory = context_factory\n self._acceptable_protocols = acceptable_protocols\n\n def creatorForNetloc(self, hostname, port):\n options = self._wrapped_context_factory.creatorForNetloc(hostname, port)\n _setAcceptableProtocols(options._ctx, self._acceptable_protocols)\n return options\n\n\ndef load_context_factory_from_settings(settings, crawler):\n ssl_method = openssl_methods[settings.get('DOWNLOADER_CLIENT_TLS_METHOD')]\n context_factory_cls = load_object(settings['DOWNLOADER_CLIENTCONTEXTFACTORY'])\n # try method-aware context factory\n try:\n context_factory = create_instance(\n objcls=context_factory_cls,\n settings=settings,\n crawler=crawler,\n method=ssl_method,\n )\n except TypeError:\n # use context factory defaults\n context_factory = create_instance(\n objcls=context_factory_cls,\n settings=settings,\n crawler=crawler,\n )\n msg = (\n f\"{settings['DOWNLOADER_CLIENTCONTEXTFACTORY']} does not accept \"\n \"a `method` argument (type OpenSSL.SSL method, e.g. \"\n \"OpenSSL.SSL.SSLv23_METHOD) and/or a `tls_verbose_logging` \"\n \"argument and/or a `tls_ciphers` argument. Please, upgrade your \"\n \"context factory class to handle them or ignore them.\"\n )\n warnings.warn(msg)\n\n return context_factory\n", "path": "scrapy/core/downloader/contextfactory.py"}], "after_files": [{"content": "import OpenSSL.SSL\nimport OpenSSL._util as pyOpenSSLutil\n\nfrom scrapy.utils.python import to_unicode\n\n\ndef ffi_buf_to_string(buf):\n return to_unicode(pyOpenSSLutil.ffi.string(buf))\n\n\ndef x509name_to_string(x509name):\n # from OpenSSL.crypto.X509Name.__repr__\n result_buffer = pyOpenSSLutil.ffi.new(\"char[]\", 512)\n pyOpenSSLutil.lib.X509_NAME_oneline(x509name._name, result_buffer, len(result_buffer))\n\n return ffi_buf_to_string(result_buffer)\n\n\ndef get_temp_key_info(ssl_object):\n # adapted from OpenSSL apps/s_cb.c::ssl_print_tmp_key()\n temp_key_p = pyOpenSSLutil.ffi.new(\"EVP_PKEY **\")\n if not pyOpenSSLutil.lib.SSL_get_server_tmp_key(ssl_object, temp_key_p):\n return None\n temp_key = temp_key_p[0]\n if temp_key == pyOpenSSLutil.ffi.NULL:\n return None\n temp_key = pyOpenSSLutil.ffi.gc(temp_key, pyOpenSSLutil.lib.EVP_PKEY_free)\n key_info = []\n key_type = pyOpenSSLutil.lib.EVP_PKEY_id(temp_key)\n if key_type == pyOpenSSLutil.lib.EVP_PKEY_RSA:\n key_info.append('RSA')\n elif key_type == pyOpenSSLutil.lib.EVP_PKEY_DH:\n key_info.append('DH')\n elif key_type == pyOpenSSLutil.lib.EVP_PKEY_EC:\n key_info.append('ECDH')\n ec_key = pyOpenSSLutil.lib.EVP_PKEY_get1_EC_KEY(temp_key)\n ec_key = pyOpenSSLutil.ffi.gc(ec_key, pyOpenSSLutil.lib.EC_KEY_free)\n nid = pyOpenSSLutil.lib.EC_GROUP_get_curve_name(pyOpenSSLutil.lib.EC_KEY_get0_group(ec_key))\n cname = pyOpenSSLutil.lib.EC_curve_nid2nist(nid)\n if cname == pyOpenSSLutil.ffi.NULL:\n cname = pyOpenSSLutil.lib.OBJ_nid2sn(nid)\n key_info.append(ffi_buf_to_string(cname))\n else:\n key_info.append(ffi_buf_to_string(pyOpenSSLutil.lib.OBJ_nid2sn(key_type)))\n key_info.append(f'{pyOpenSSLutil.lib.EVP_PKEY_bits(temp_key)} bits')\n return ', '.join(key_info)\n\n\ndef get_openssl_version():\n system_openssl = OpenSSL.SSL.SSLeay_version(\n OpenSSL.SSL.SSLEAY_VERSION\n ).decode('ascii', errors='replace')\n return f'{OpenSSL.version.__version__} ({system_openssl})'\n", "path": "scrapy/utils/ssl.py"}, {"content": "import logging\n\nfrom OpenSSL import SSL\nfrom service_identity.exceptions import CertificateError\nfrom twisted.internet._sslverify import ClientTLSOptions, verifyHostname, VerificationError\nfrom twisted.internet.ssl import AcceptableCiphers\n\nfrom scrapy.utils.ssl import x509name_to_string, get_temp_key_info\n\nlogger = logging.getLogger(__name__)\n\n\nMETHOD_TLS = 'TLS'\nMETHOD_TLSv10 = 'TLSv1.0'\nMETHOD_TLSv11 = 'TLSv1.1'\nMETHOD_TLSv12 = 'TLSv1.2'\n\n\nopenssl_methods = {\n METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended)\n METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only\n METHOD_TLSv11: SSL.TLSv1_1_METHOD, # TLS 1.1 only\n METHOD_TLSv12: SSL.TLSv1_2_METHOD, # TLS 1.2 only\n}\n\n\nclass ScrapyClientTLSOptions(ClientTLSOptions):\n \"\"\"\n SSL Client connection creator ignoring certificate verification errors\n (for genuinely invalid certificates or bugs in verification code).\n\n Same as Twisted's private _sslverify.ClientTLSOptions,\n except that VerificationError, CertificateError and ValueError\n exceptions are caught, so that the connection is not closed, only\n logging warnings. Also, HTTPS connection parameters logging is added.\n \"\"\"\n\n def __init__(self, hostname, ctx, verbose_logging=False):\n super().__init__(hostname, ctx)\n self.verbose_logging = verbose_logging\n\n def _identityVerifyingInfoCallback(self, connection, where, ret):\n if where & SSL.SSL_CB_HANDSHAKE_START:\n connection.set_tlsext_host_name(self._hostnameBytes)\n elif where & SSL.SSL_CB_HANDSHAKE_DONE:\n if self.verbose_logging:\n logger.debug('SSL connection to %s using protocol %s, cipher %s',\n self._hostnameASCII,\n connection.get_protocol_version_name(),\n connection.get_cipher_name(),\n )\n server_cert = connection.get_peer_certificate()\n logger.debug('SSL connection certificate: issuer \"%s\", subject \"%s\"',\n x509name_to_string(server_cert.get_issuer()),\n x509name_to_string(server_cert.get_subject()),\n )\n key_info = get_temp_key_info(connection._ssl)\n if key_info:\n logger.debug('SSL temp key: %s', key_info)\n\n try:\n verifyHostname(connection, self._hostnameASCII)\n except (CertificateError, VerificationError) as e:\n logger.warning(\n 'Remote certificate is not valid for hostname \"%s\"; %s',\n self._hostnameASCII, e)\n\n except ValueError as e:\n logger.warning(\n 'Ignoring error while verifying certificate '\n 'from host \"%s\" (exception: %r)',\n self._hostnameASCII, e)\n\n\nDEFAULT_CIPHERS = AcceptableCiphers.fromOpenSSLCipherString('DEFAULT')\n", "path": "scrapy/core/downloader/tls.py"}, {"content": "import warnings\n\nfrom OpenSSL import SSL\nfrom twisted.internet._sslverify import _setAcceptableProtocols\nfrom twisted.internet.ssl import optionsForClientTLS, CertificateOptions, platformTrust, AcceptableCiphers\nfrom twisted.web.client import BrowserLikePolicyForHTTPS\nfrom twisted.web.iweb import IPolicyForHTTPS\nfrom zope.interface.declarations import implementer\nfrom zope.interface.verify import verifyObject\n\nfrom scrapy.core.downloader.tls import DEFAULT_CIPHERS, openssl_methods, ScrapyClientTLSOptions\nfrom scrapy.utils.misc import create_instance, load_object\n\n\n@implementer(IPolicyForHTTPS)\nclass ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):\n \"\"\"\n Non-peer-certificate verifying HTTPS context factory\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation\n\n 'A TLS/SSL connection established with [this method] may\n understand the TLSv1, TLSv1.1 and TLSv1.2 protocols.'\n \"\"\"\n\n def __init__(self, method=SSL.SSLv23_METHOD, tls_verbose_logging=False, tls_ciphers=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._ssl_method = method\n self.tls_verbose_logging = tls_verbose_logging\n if tls_ciphers:\n self.tls_ciphers = AcceptableCiphers.fromOpenSSLCipherString(tls_ciphers)\n else:\n self.tls_ciphers = DEFAULT_CIPHERS\n\n @classmethod\n def from_settings(cls, settings, method=SSL.SSLv23_METHOD, *args, **kwargs):\n tls_verbose_logging = settings.getbool('DOWNLOADER_CLIENT_TLS_VERBOSE_LOGGING')\n tls_ciphers = settings['DOWNLOADER_CLIENT_TLS_CIPHERS']\n return cls(method=method, tls_verbose_logging=tls_verbose_logging, tls_ciphers=tls_ciphers, *args, **kwargs)\n\n def getCertificateOptions(self):\n # setting verify=True will require you to provide CAs\n # to verify against; in other words: it's not that simple\n\n # backward-compatible SSL/TLS method:\n #\n # * this will respect `method` attribute in often recommended\n # `ScrapyClientContextFactory` subclass\n # (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133)\n #\n # * getattr() for `_ssl_method` attribute for context factories\n # not calling super().__init__\n return CertificateOptions(\n verify=False,\n method=getattr(self, 'method', getattr(self, '_ssl_method', None)),\n fixBrokenPeers=True,\n acceptableCiphers=self.tls_ciphers,\n )\n\n # kept for old-style HTTP/1.0 downloader context twisted calls,\n # e.g. connectSSL()\n def getContext(self, hostname=None, port=None):\n ctx = self.getCertificateOptions().getContext()\n ctx.set_options(0x4) # OP_LEGACY_SERVER_CONNECT\n return ctx\n\n def creatorForNetloc(self, hostname, port):\n return ScrapyClientTLSOptions(hostname.decode(\"ascii\"), self.getContext(),\n verbose_logging=self.tls_verbose_logging)\n\n\n@implementer(IPolicyForHTTPS)\nclass BrowserLikeContextFactory(ScrapyClientContextFactory):\n \"\"\"\n Twisted-recommended context factory for web clients.\n\n Quoting the documentation of the :class:`~twisted.web.client.Agent` class:\n\n The default is to use a\n :class:`~twisted.web.client.BrowserLikePolicyForHTTPS`, so unless you\n have special requirements you can leave this as-is.\n\n :meth:`creatorForNetloc` is the same as\n :class:`~twisted.web.client.BrowserLikePolicyForHTTPS` except this context\n factory allows setting the TLS/SSL method to use.\n\n The default OpenSSL method is ``TLS_METHOD`` (also called\n ``SSLv23_METHOD``) which allows TLS protocol negotiation.\n \"\"\"\n\n def creatorForNetloc(self, hostname, port):\n # trustRoot set to platformTrust() will use the platform's root CAs.\n #\n # This means that a website like https://www.cacert.org will be rejected\n # by default, since CAcert.org CA certificate is seldom shipped.\n return optionsForClientTLS(\n hostname=hostname.decode(\"ascii\"),\n trustRoot=platformTrust(),\n extraCertificateOptions={'method': self._ssl_method},\n )\n\n\n@implementer(IPolicyForHTTPS)\nclass AcceptableProtocolsContextFactory:\n \"\"\"Context factory to used to override the acceptable protocols\n to set up the [OpenSSL.SSL.Context] for doing NPN and/or ALPN\n negotiation.\n \"\"\"\n\n def __init__(self, context_factory, acceptable_protocols):\n verifyObject(IPolicyForHTTPS, context_factory)\n self._wrapped_context_factory = context_factory\n self._acceptable_protocols = acceptable_protocols\n\n def creatorForNetloc(self, hostname, port):\n options = self._wrapped_context_factory.creatorForNetloc(hostname, port)\n _setAcceptableProtocols(options._ctx, self._acceptable_protocols)\n return options\n\n\ndef load_context_factory_from_settings(settings, crawler):\n ssl_method = openssl_methods[settings.get('DOWNLOADER_CLIENT_TLS_METHOD')]\n context_factory_cls = load_object(settings['DOWNLOADER_CLIENTCONTEXTFACTORY'])\n # try method-aware context factory\n try:\n context_factory = create_instance(\n objcls=context_factory_cls,\n settings=settings,\n crawler=crawler,\n method=ssl_method,\n )\n except TypeError:\n # use context factory defaults\n context_factory = create_instance(\n objcls=context_factory_cls,\n settings=settings,\n crawler=crawler,\n )\n msg = (\n f\"{settings['DOWNLOADER_CLIENTCONTEXTFACTORY']} does not accept \"\n \"a `method` argument (type OpenSSL.SSL method, e.g. \"\n \"OpenSSL.SSL.SSLv23_METHOD) and/or a `tls_verbose_logging` \"\n \"argument and/or a `tls_ciphers` argument. Please, upgrade your \"\n \"context factory class to handle them or ignore them.\"\n )\n warnings.warn(msg)\n\n return context_factory\n", "path": "scrapy/core/downloader/contextfactory.py"}]} | 4,048 | 719 |
gh_patches_debug_35141 | rasdani/github-patches | git_diff | modin-project__modin-1137 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Parallel agents for TeamCity
Right now, TeamCity is taking >5 hours to complete. We should create parallel agents for non-dependent builds to run concurrently. The breakdown should be as follows:
1. `MODIN_ENGINE=ray`, `MODIN_ENGINE=dask`, `MODIN_ENGINE=python`
a. `python -m pytest modin/pandas/test/test_dataframe.py::TestDataFrameReduction_A`
b. `python -m pytest modin/pandas/test/test_dataframe.py::TestDataFrameReduction_B`
c. `python -m pytest modin/pandas/test/test_dataframe.py::TestDataFrameBinary`
d. `python -m pytest modin/pandas/test/test_dataframe.py::TestDataFrameMapMetadata`
e. `python -m pytest modin/pandas/test/test_dataframe.py::TestDataFrameUDF`
f. `python -m pytest modin/pandas/test/test_dataframe.py::TestDataFrameDefault`
g. `python -m pytest modin/pandas/test/test_dataframe.py::TestDataFrameWindow`
h. `python -m pytest modin/pandas/test/test_dataframe.py::TestDataFrameIndexing`
i. `python -m pytest modin/pandas/test/test_dataframe.py::TestDataFrameIter`
j. `python -m pytest modin/pandas/test/test_dataframe.py::TestDataFrameJoinSort`
k. `python -m pytest modin/pandas/test/test_groupby.py`
l. `python -m pytest modin/pandas/test/test_series.py modin/pandas/test/test_concat.py modin/pandas/test/test_reshape.py modin/pandas/test/test_general.py modin/pandas/test/test_io.py modin/pandas/test/test_io_exp.py`
2. `MODIN_ENGINE=ray MODIN_EXPERIMENTAL=True MODIN_BACKEND=pyarrow python -m pytest modin/pandas/test/test_io.py::test_from_csv`
In total, 37 agents. This does not include agents we will need for Windows and MacOS.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modin/pandas/__init__.py`
Content:
```
1 import pandas
2
3 __pandas_version__ = "1.0.1"
4
5 if pandas.__version__ != __pandas_version__:
6 import warnings
7
8 warnings.warn(
9 "The pandas version installed does not match the required pandas version in "
10 "Modin. This may cause undesired side effects!".format(__pandas_version__)
11 )
12
13 from pandas import (
14 eval,
15 unique,
16 value_counts,
17 cut,
18 to_numeric,
19 factorize,
20 test,
21 qcut,
22 date_range,
23 period_range,
24 Index,
25 MultiIndex,
26 CategoricalIndex,
27 bdate_range,
28 DatetimeIndex,
29 Timedelta,
30 Timestamp,
31 to_timedelta,
32 set_eng_float_format,
33 options,
34 set_option,
35 NaT,
36 PeriodIndex,
37 Categorical,
38 Interval,
39 UInt8Dtype,
40 UInt16Dtype,
41 UInt32Dtype,
42 UInt64Dtype,
43 SparseDtype,
44 Int8Dtype,
45 Int16Dtype,
46 Int32Dtype,
47 Int64Dtype,
48 StringDtype,
49 BooleanDtype,
50 CategoricalDtype,
51 DatetimeTZDtype,
52 IntervalDtype,
53 PeriodDtype,
54 RangeIndex,
55 Int64Index,
56 UInt64Index,
57 Float64Index,
58 TimedeltaIndex,
59 IntervalIndex,
60 IndexSlice,
61 Grouper,
62 array,
63 Period,
64 show_versions,
65 DateOffset,
66 timedelta_range,
67 infer_freq,
68 interval_range,
69 ExcelWriter,
70 datetime,
71 NamedAgg,
72 NA,
73 )
74 import threading
75 import os
76 import types
77 import sys
78
79 from .. import __version__
80 from .concat import concat
81 from .dataframe import DataFrame
82 from .datetimes import to_datetime
83 from .io import (
84 read_csv,
85 read_parquet,
86 read_json,
87 read_html,
88 read_clipboard,
89 read_excel,
90 read_hdf,
91 read_feather,
92 read_stata,
93 read_sas,
94 read_pickle,
95 read_sql,
96 read_gbq,
97 read_table,
98 read_fwf,
99 read_sql_table,
100 read_sql_query,
101 read_spss,
102 ExcelFile,
103 to_pickle,
104 HDFStore,
105 json_normalize,
106 read_orc,
107 )
108 from .reshape import get_dummies, melt, crosstab, lreshape, wide_to_long
109 from .series import Series
110 from .general import (
111 isna,
112 isnull,
113 merge,
114 merge_asof,
115 merge_ordered,
116 pivot_table,
117 notnull,
118 notna,
119 pivot,
120 )
121 from .plotting import Plotting as plotting
122 from .. import __execution_engine__ as execution_engine
123
124 # Set this so that Pandas doesn't try to multithread by itself
125 os.environ["OMP_NUM_THREADS"] = "1"
126 num_cpus = 1
127
128
129 def initialize_ray():
130 import ray
131
132 """Initializes ray based on environment variables and internal defaults."""
133 if threading.current_thread().name == "MainThread":
134 import secrets
135
136 plasma_directory = None
137 cluster = os.environ.get("MODIN_RAY_CLUSTER", None)
138 redis_address = os.environ.get("MODIN_REDIS_ADDRESS", None)
139 redis_password = secrets.token_hex(16)
140 if cluster == "True" and redis_address is not None:
141 # We only start ray in a cluster setting for the head node.
142 ray.init(
143 include_webui=False,
144 ignore_reinit_error=True,
145 redis_address=redis_address,
146 redis_password=redis_password,
147 logging_level=100,
148 )
149 elif cluster is None:
150 object_store_memory = os.environ.get("MODIN_MEMORY", None)
151 if os.environ.get("MODIN_OUT_OF_CORE", "False").title() == "True":
152 from tempfile import gettempdir
153
154 plasma_directory = gettempdir()
155 # We may have already set the memory from the environment variable, we don't
156 # want to overwrite that value if we have.
157 if object_store_memory is None:
158 # Round down to the nearest Gigabyte.
159 mem_bytes = ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9
160 # Default to 8x memory for out of core
161 object_store_memory = 8 * mem_bytes
162 # In case anything failed above, we can still improve the memory for Modin.
163 if object_store_memory is None:
164 # Round down to the nearest Gigabyte.
165 object_store_memory = int(
166 0.6 * ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9
167 )
168 # If the memory pool is smaller than 2GB, just use the default in ray.
169 if object_store_memory == 0:
170 object_store_memory = None
171 else:
172 object_store_memory = int(object_store_memory)
173 ray.init(
174 include_webui=False,
175 ignore_reinit_error=True,
176 plasma_directory=plasma_directory,
177 object_store_memory=object_store_memory,
178 redis_address=redis_address,
179 redis_password=redis_password,
180 logging_level=100,
181 memory=object_store_memory,
182 )
183 # Register custom serializer for method objects to avoid warning message.
184 # We serialize `MethodType` objects when we use AxisPartition operations.
185 ray.register_custom_serializer(types.MethodType, use_pickle=True)
186
187 # Register a fix import function to run on all_workers including the driver.
188 # This is a hack solution to fix #647, #746
189 def move_stdlib_ahead_of_site_packages(*args):
190 site_packages_path = None
191 site_packages_path_index = -1
192 for i, path in enumerate(sys.path):
193 if sys.exec_prefix in path and path.endswith("site-packages"):
194 site_packages_path = path
195 site_packages_path_index = i
196 # break on first found
197 break
198
199 if site_packages_path is not None:
200 # stdlib packages layout as follows:
201 # - python3.x
202 # - typing.py
203 # - site-packages/
204 # - pandas
205 # So extracting the dirname of the site_packages can point us
206 # to the directory containing standard libraries.
207 sys.path.insert(
208 site_packages_path_index, os.path.dirname(site_packages_path)
209 )
210
211 move_stdlib_ahead_of_site_packages()
212 ray.worker.global_worker.run_function_on_all_workers(
213 move_stdlib_ahead_of_site_packages
214 )
215
216
217 if execution_engine == "Ray":
218 import ray
219
220 initialize_ray()
221 num_cpus = ray.cluster_resources()["CPU"]
222 elif execution_engine == "Dask": # pragma: no cover
223 from distributed.client import get_client
224 import warnings
225
226 if threading.current_thread().name == "MainThread":
227 warnings.warn("The Dask Engine for Modin is experimental.")
228 try:
229 client = get_client()
230 except ValueError:
231 from distributed import Client
232 import multiprocessing
233
234 num_cpus = multiprocessing.cpu_count()
235 client = Client(n_workers=num_cpus)
236 elif execution_engine != "Python":
237 raise ImportError("Unrecognized execution engine: {}.".format(execution_engine))
238
239 DEFAULT_NPARTITIONS = max(4, int(num_cpus))
240
241 __all__ = [
242 "DataFrame",
243 "Series",
244 "read_csv",
245 "read_parquet",
246 "read_json",
247 "read_html",
248 "read_clipboard",
249 "read_excel",
250 "read_hdf",
251 "read_feather",
252 "read_stata",
253 "read_sas",
254 "read_pickle",
255 "read_sql",
256 "read_gbq",
257 "read_table",
258 "read_spss",
259 "read_orc",
260 "json_normalize",
261 "concat",
262 "eval",
263 "unique",
264 "value_counts",
265 "cut",
266 "to_numeric",
267 "factorize",
268 "test",
269 "qcut",
270 "to_datetime",
271 "get_dummies",
272 "isna",
273 "isnull",
274 "merge",
275 "pivot_table",
276 "date_range",
277 "Index",
278 "MultiIndex",
279 "Series",
280 "bdate_range",
281 "period_range",
282 "DatetimeIndex",
283 "to_timedelta",
284 "set_eng_float_format",
285 "options",
286 "set_option",
287 "CategoricalIndex",
288 "Timedelta",
289 "Timestamp",
290 "NaT",
291 "PeriodIndex",
292 "Categorical",
293 "__version__",
294 "melt",
295 "crosstab",
296 "plotting",
297 "Interval",
298 "UInt8Dtype",
299 "UInt16Dtype",
300 "UInt32Dtype",
301 "UInt64Dtype",
302 "SparseDtype",
303 "Int8Dtype",
304 "Int16Dtype",
305 "Int32Dtype",
306 "Int64Dtype",
307 "CategoricalDtype",
308 "DatetimeTZDtype",
309 "IntervalDtype",
310 "PeriodDtype",
311 "BooleanDtype",
312 "StringDtype",
313 "NA",
314 "RangeIndex",
315 "Int64Index",
316 "UInt64Index",
317 "Float64Index",
318 "TimedeltaIndex",
319 "IntervalIndex",
320 "IndexSlice",
321 "Grouper",
322 "array",
323 "Period",
324 "show_versions",
325 "DateOffset",
326 "timedelta_range",
327 "infer_freq",
328 "interval_range",
329 "ExcelWriter",
330 "read_fwf",
331 "read_sql_table",
332 "read_sql_query",
333 "ExcelFile",
334 "to_pickle",
335 "HDFStore",
336 "lreshape",
337 "wide_to_long",
338 "merge_asof",
339 "merge_ordered",
340 "notnull",
341 "notna",
342 "pivot",
343 "datetime",
344 "NamedAgg",
345 "DEFAULT_NPARTITIONS",
346 ]
347
348 del pandas
349
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modin/pandas/__init__.py b/modin/pandas/__init__.py
--- a/modin/pandas/__init__.py
+++ b/modin/pandas/__init__.py
@@ -75,6 +75,7 @@
import os
import types
import sys
+import multiprocessing
from .. import __version__
from .concat import concat
@@ -134,12 +135,14 @@
import secrets
plasma_directory = None
+ num_cpus = os.environ.get("MODIN_CPUS", None) or multiprocessing.cpu_count()
cluster = os.environ.get("MODIN_RAY_CLUSTER", None)
redis_address = os.environ.get("MODIN_REDIS_ADDRESS", None)
redis_password = secrets.token_hex(16)
if cluster == "True" and redis_address is not None:
# We only start ray in a cluster setting for the head node.
ray.init(
+ num_cpus=int(num_cpus),
include_webui=False,
ignore_reinit_error=True,
redis_address=redis_address,
@@ -171,6 +174,7 @@
else:
object_store_memory = int(object_store_memory)
ray.init(
+ num_cpus=int(num_cpus),
include_webui=False,
ignore_reinit_error=True,
plasma_directory=plasma_directory,
@@ -229,10 +233,9 @@
client = get_client()
except ValueError:
from distributed import Client
- import multiprocessing
- num_cpus = multiprocessing.cpu_count()
- client = Client(n_workers=num_cpus)
+ num_cpus = os.environ.get("MODIN_CPUS", None) or multiprocessing.cpu_count()
+ client = Client(n_workers=int(num_cpus))
elif execution_engine != "Python":
raise ImportError("Unrecognized execution engine: {}.".format(execution_engine))
| {"golden_diff": "diff --git a/modin/pandas/__init__.py b/modin/pandas/__init__.py\n--- a/modin/pandas/__init__.py\n+++ b/modin/pandas/__init__.py\n@@ -75,6 +75,7 @@\n import os\n import types\n import sys\n+import multiprocessing\n \n from .. import __version__\n from .concat import concat\n@@ -134,12 +135,14 @@\n import secrets\n \n plasma_directory = None\n+ num_cpus = os.environ.get(\"MODIN_CPUS\", None) or multiprocessing.cpu_count()\n cluster = os.environ.get(\"MODIN_RAY_CLUSTER\", None)\n redis_address = os.environ.get(\"MODIN_REDIS_ADDRESS\", None)\n redis_password = secrets.token_hex(16)\n if cluster == \"True\" and redis_address is not None:\n # We only start ray in a cluster setting for the head node.\n ray.init(\n+ num_cpus=int(num_cpus),\n include_webui=False,\n ignore_reinit_error=True,\n redis_address=redis_address,\n@@ -171,6 +174,7 @@\n else:\n object_store_memory = int(object_store_memory)\n ray.init(\n+ num_cpus=int(num_cpus),\n include_webui=False,\n ignore_reinit_error=True,\n plasma_directory=plasma_directory,\n@@ -229,10 +233,9 @@\n client = get_client()\n except ValueError:\n from distributed import Client\n- import multiprocessing\n \n- num_cpus = multiprocessing.cpu_count()\n- client = Client(n_workers=num_cpus)\n+ num_cpus = os.environ.get(\"MODIN_CPUS\", None) or multiprocessing.cpu_count()\n+ client = Client(n_workers=int(num_cpus))\n elif execution_engine != \"Python\":\n raise ImportError(\"Unrecognized execution engine: {}.\".format(execution_engine))\n", "issue": "Parallel agents for TeamCity\nRight now, TeamCity is taking >5 hours to complete. We should create parallel agents for non-dependent builds to run concurrently. The breakdown should be as follows:\r\n\r\n1.\t`MODIN_ENGINE=ray`, `MODIN_ENGINE=dask`, `MODIN_ENGINE=python`\r\na.\t`python -m pytest modin/pandas/test/test_dataframe.py::TestDataFrameReduction_A`\r\nb.\t`python -m pytest modin/pandas/test/test_dataframe.py::TestDataFrameReduction_B`\r\nc.\t`python -m pytest modin/pandas/test/test_dataframe.py::TestDataFrameBinary`\r\nd.\t`python -m pytest modin/pandas/test/test_dataframe.py::TestDataFrameMapMetadata`\r\ne.\t`python -m pytest modin/pandas/test/test_dataframe.py::TestDataFrameUDF`\r\nf.\t`python -m pytest modin/pandas/test/test_dataframe.py::TestDataFrameDefault`\r\ng.\t`python -m pytest modin/pandas/test/test_dataframe.py::TestDataFrameWindow`\r\nh.\t`python -m pytest modin/pandas/test/test_dataframe.py::TestDataFrameIndexing`\r\ni.\t`python -m pytest modin/pandas/test/test_dataframe.py::TestDataFrameIter`\r\nj.\t`python -m pytest modin/pandas/test/test_dataframe.py::TestDataFrameJoinSort`\r\nk.\t`python -m pytest modin/pandas/test/test_groupby.py`\r\nl.\t`python -m pytest modin/pandas/test/test_series.py modin/pandas/test/test_concat.py modin/pandas/test/test_reshape.py modin/pandas/test/test_general.py modin/pandas/test/test_io.py modin/pandas/test/test_io_exp.py`\r\n2.\t`MODIN_ENGINE=ray MODIN_EXPERIMENTAL=True MODIN_BACKEND=pyarrow python -m pytest modin/pandas/test/test_io.py::test_from_csv`\r\n\r\nIn total, 37 agents. This does not include agents we will need for Windows and MacOS. \r\n\n", "before_files": [{"content": "import pandas\n\n__pandas_version__ = \"1.0.1\"\n\nif pandas.__version__ != __pandas_version__:\n import warnings\n\n warnings.warn(\n \"The pandas version installed does not match the required pandas version in \"\n \"Modin. This may cause undesired side effects!\".format(__pandas_version__)\n )\n\nfrom pandas import (\n eval,\n unique,\n value_counts,\n cut,\n to_numeric,\n factorize,\n test,\n qcut,\n date_range,\n period_range,\n Index,\n MultiIndex,\n CategoricalIndex,\n bdate_range,\n DatetimeIndex,\n Timedelta,\n Timestamp,\n to_timedelta,\n set_eng_float_format,\n options,\n set_option,\n NaT,\n PeriodIndex,\n Categorical,\n Interval,\n UInt8Dtype,\n UInt16Dtype,\n UInt32Dtype,\n UInt64Dtype,\n SparseDtype,\n Int8Dtype,\n Int16Dtype,\n Int32Dtype,\n Int64Dtype,\n StringDtype,\n BooleanDtype,\n CategoricalDtype,\n DatetimeTZDtype,\n IntervalDtype,\n PeriodDtype,\n RangeIndex,\n Int64Index,\n UInt64Index,\n Float64Index,\n TimedeltaIndex,\n IntervalIndex,\n IndexSlice,\n Grouper,\n array,\n Period,\n show_versions,\n DateOffset,\n timedelta_range,\n infer_freq,\n interval_range,\n ExcelWriter,\n datetime,\n NamedAgg,\n NA,\n)\nimport threading\nimport os\nimport types\nimport sys\n\nfrom .. import __version__\nfrom .concat import concat\nfrom .dataframe import DataFrame\nfrom .datetimes import to_datetime\nfrom .io import (\n read_csv,\n read_parquet,\n read_json,\n read_html,\n read_clipboard,\n read_excel,\n read_hdf,\n read_feather,\n read_stata,\n read_sas,\n read_pickle,\n read_sql,\n read_gbq,\n read_table,\n read_fwf,\n read_sql_table,\n read_sql_query,\n read_spss,\n ExcelFile,\n to_pickle,\n HDFStore,\n json_normalize,\n read_orc,\n)\nfrom .reshape import get_dummies, melt, crosstab, lreshape, wide_to_long\nfrom .series import Series\nfrom .general import (\n isna,\n isnull,\n merge,\n merge_asof,\n merge_ordered,\n pivot_table,\n notnull,\n notna,\n pivot,\n)\nfrom .plotting import Plotting as plotting\nfrom .. import __execution_engine__ as execution_engine\n\n# Set this so that Pandas doesn't try to multithread by itself\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nnum_cpus = 1\n\n\ndef initialize_ray():\n import ray\n\n \"\"\"Initializes ray based on environment variables and internal defaults.\"\"\"\n if threading.current_thread().name == \"MainThread\":\n import secrets\n\n plasma_directory = None\n cluster = os.environ.get(\"MODIN_RAY_CLUSTER\", None)\n redis_address = os.environ.get(\"MODIN_REDIS_ADDRESS\", None)\n redis_password = secrets.token_hex(16)\n if cluster == \"True\" and redis_address is not None:\n # We only start ray in a cluster setting for the head node.\n ray.init(\n include_webui=False,\n ignore_reinit_error=True,\n redis_address=redis_address,\n redis_password=redis_password,\n logging_level=100,\n )\n elif cluster is None:\n object_store_memory = os.environ.get(\"MODIN_MEMORY\", None)\n if os.environ.get(\"MODIN_OUT_OF_CORE\", \"False\").title() == \"True\":\n from tempfile import gettempdir\n\n plasma_directory = gettempdir()\n # We may have already set the memory from the environment variable, we don't\n # want to overwrite that value if we have.\n if object_store_memory is None:\n # Round down to the nearest Gigabyte.\n mem_bytes = ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9\n # Default to 8x memory for out of core\n object_store_memory = 8 * mem_bytes\n # In case anything failed above, we can still improve the memory for Modin.\n if object_store_memory is None:\n # Round down to the nearest Gigabyte.\n object_store_memory = int(\n 0.6 * ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9\n )\n # If the memory pool is smaller than 2GB, just use the default in ray.\n if object_store_memory == 0:\n object_store_memory = None\n else:\n object_store_memory = int(object_store_memory)\n ray.init(\n include_webui=False,\n ignore_reinit_error=True,\n plasma_directory=plasma_directory,\n object_store_memory=object_store_memory,\n redis_address=redis_address,\n redis_password=redis_password,\n logging_level=100,\n memory=object_store_memory,\n )\n # Register custom serializer for method objects to avoid warning message.\n # We serialize `MethodType` objects when we use AxisPartition operations.\n ray.register_custom_serializer(types.MethodType, use_pickle=True)\n\n # Register a fix import function to run on all_workers including the driver.\n # This is a hack solution to fix #647, #746\n def move_stdlib_ahead_of_site_packages(*args):\n site_packages_path = None\n site_packages_path_index = -1\n for i, path in enumerate(sys.path):\n if sys.exec_prefix in path and path.endswith(\"site-packages\"):\n site_packages_path = path\n site_packages_path_index = i\n # break on first found\n break\n\n if site_packages_path is not None:\n # stdlib packages layout as follows:\n # - python3.x\n # - typing.py\n # - site-packages/\n # - pandas\n # So extracting the dirname of the site_packages can point us\n # to the directory containing standard libraries.\n sys.path.insert(\n site_packages_path_index, os.path.dirname(site_packages_path)\n )\n\n move_stdlib_ahead_of_site_packages()\n ray.worker.global_worker.run_function_on_all_workers(\n move_stdlib_ahead_of_site_packages\n )\n\n\nif execution_engine == \"Ray\":\n import ray\n\n initialize_ray()\n num_cpus = ray.cluster_resources()[\"CPU\"]\nelif execution_engine == \"Dask\": # pragma: no cover\n from distributed.client import get_client\n import warnings\n\n if threading.current_thread().name == \"MainThread\":\n warnings.warn(\"The Dask Engine for Modin is experimental.\")\n try:\n client = get_client()\n except ValueError:\n from distributed import Client\n import multiprocessing\n\n num_cpus = multiprocessing.cpu_count()\n client = Client(n_workers=num_cpus)\nelif execution_engine != \"Python\":\n raise ImportError(\"Unrecognized execution engine: {}.\".format(execution_engine))\n\nDEFAULT_NPARTITIONS = max(4, int(num_cpus))\n\n__all__ = [\n \"DataFrame\",\n \"Series\",\n \"read_csv\",\n \"read_parquet\",\n \"read_json\",\n \"read_html\",\n \"read_clipboard\",\n \"read_excel\",\n \"read_hdf\",\n \"read_feather\",\n \"read_stata\",\n \"read_sas\",\n \"read_pickle\",\n \"read_sql\",\n \"read_gbq\",\n \"read_table\",\n \"read_spss\",\n \"read_orc\",\n \"json_normalize\",\n \"concat\",\n \"eval\",\n \"unique\",\n \"value_counts\",\n \"cut\",\n \"to_numeric\",\n \"factorize\",\n \"test\",\n \"qcut\",\n \"to_datetime\",\n \"get_dummies\",\n \"isna\",\n \"isnull\",\n \"merge\",\n \"pivot_table\",\n \"date_range\",\n \"Index\",\n \"MultiIndex\",\n \"Series\",\n \"bdate_range\",\n \"period_range\",\n \"DatetimeIndex\",\n \"to_timedelta\",\n \"set_eng_float_format\",\n \"options\",\n \"set_option\",\n \"CategoricalIndex\",\n \"Timedelta\",\n \"Timestamp\",\n \"NaT\",\n \"PeriodIndex\",\n \"Categorical\",\n \"__version__\",\n \"melt\",\n \"crosstab\",\n \"plotting\",\n \"Interval\",\n \"UInt8Dtype\",\n \"UInt16Dtype\",\n \"UInt32Dtype\",\n \"UInt64Dtype\",\n \"SparseDtype\",\n \"Int8Dtype\",\n \"Int16Dtype\",\n \"Int32Dtype\",\n \"Int64Dtype\",\n \"CategoricalDtype\",\n \"DatetimeTZDtype\",\n \"IntervalDtype\",\n \"PeriodDtype\",\n \"BooleanDtype\",\n \"StringDtype\",\n \"NA\",\n \"RangeIndex\",\n \"Int64Index\",\n \"UInt64Index\",\n \"Float64Index\",\n \"TimedeltaIndex\",\n \"IntervalIndex\",\n \"IndexSlice\",\n \"Grouper\",\n \"array\",\n \"Period\",\n \"show_versions\",\n \"DateOffset\",\n \"timedelta_range\",\n \"infer_freq\",\n \"interval_range\",\n \"ExcelWriter\",\n \"read_fwf\",\n \"read_sql_table\",\n \"read_sql_query\",\n \"ExcelFile\",\n \"to_pickle\",\n \"HDFStore\",\n \"lreshape\",\n \"wide_to_long\",\n \"merge_asof\",\n \"merge_ordered\",\n \"notnull\",\n \"notna\",\n \"pivot\",\n \"datetime\",\n \"NamedAgg\",\n \"DEFAULT_NPARTITIONS\",\n]\n\ndel pandas\n", "path": "modin/pandas/__init__.py"}], "after_files": [{"content": "import pandas\n\n__pandas_version__ = \"1.0.1\"\n\nif pandas.__version__ != __pandas_version__:\n import warnings\n\n warnings.warn(\n \"The pandas version installed does not match the required pandas version in \"\n \"Modin. This may cause undesired side effects!\".format(__pandas_version__)\n )\n\nfrom pandas import (\n eval,\n unique,\n value_counts,\n cut,\n to_numeric,\n factorize,\n test,\n qcut,\n date_range,\n period_range,\n Index,\n MultiIndex,\n CategoricalIndex,\n bdate_range,\n DatetimeIndex,\n Timedelta,\n Timestamp,\n to_timedelta,\n set_eng_float_format,\n options,\n set_option,\n NaT,\n PeriodIndex,\n Categorical,\n Interval,\n UInt8Dtype,\n UInt16Dtype,\n UInt32Dtype,\n UInt64Dtype,\n SparseDtype,\n Int8Dtype,\n Int16Dtype,\n Int32Dtype,\n Int64Dtype,\n StringDtype,\n BooleanDtype,\n CategoricalDtype,\n DatetimeTZDtype,\n IntervalDtype,\n PeriodDtype,\n RangeIndex,\n Int64Index,\n UInt64Index,\n Float64Index,\n TimedeltaIndex,\n IntervalIndex,\n IndexSlice,\n Grouper,\n array,\n Period,\n show_versions,\n DateOffset,\n timedelta_range,\n infer_freq,\n interval_range,\n ExcelWriter,\n datetime,\n NamedAgg,\n NA,\n)\nimport threading\nimport os\nimport types\nimport sys\nimport multiprocessing\n\nfrom .. import __version__\nfrom .concat import concat\nfrom .dataframe import DataFrame\nfrom .datetimes import to_datetime\nfrom .io import (\n read_csv,\n read_parquet,\n read_json,\n read_html,\n read_clipboard,\n read_excel,\n read_hdf,\n read_feather,\n read_stata,\n read_sas,\n read_pickle,\n read_sql,\n read_gbq,\n read_table,\n read_fwf,\n read_sql_table,\n read_sql_query,\n read_spss,\n ExcelFile,\n to_pickle,\n HDFStore,\n json_normalize,\n read_orc,\n)\nfrom .reshape import get_dummies, melt, crosstab, lreshape, wide_to_long\nfrom .series import Series\nfrom .general import (\n isna,\n isnull,\n merge,\n merge_asof,\n merge_ordered,\n pivot_table,\n notnull,\n notna,\n pivot,\n)\nfrom .plotting import Plotting as plotting\nfrom .. import __execution_engine__ as execution_engine\n\n# Set this so that Pandas doesn't try to multithread by itself\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nnum_cpus = 1\n\n\ndef initialize_ray():\n import ray\n\n \"\"\"Initializes ray based on environment variables and internal defaults.\"\"\"\n if threading.current_thread().name == \"MainThread\":\n import secrets\n\n plasma_directory = None\n num_cpus = os.environ.get(\"MODIN_CPUS\", None) or multiprocessing.cpu_count()\n cluster = os.environ.get(\"MODIN_RAY_CLUSTER\", None)\n redis_address = os.environ.get(\"MODIN_REDIS_ADDRESS\", None)\n redis_password = secrets.token_hex(16)\n if cluster == \"True\" and redis_address is not None:\n # We only start ray in a cluster setting for the head node.\n ray.init(\n num_cpus=int(num_cpus),\n include_webui=False,\n ignore_reinit_error=True,\n redis_address=redis_address,\n redis_password=redis_password,\n logging_level=100,\n )\n elif cluster is None:\n object_store_memory = os.environ.get(\"MODIN_MEMORY\", None)\n if os.environ.get(\"MODIN_OUT_OF_CORE\", \"False\").title() == \"True\":\n from tempfile import gettempdir\n\n plasma_directory = gettempdir()\n # We may have already set the memory from the environment variable, we don't\n # want to overwrite that value if we have.\n if object_store_memory is None:\n # Round down to the nearest Gigabyte.\n mem_bytes = ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9\n # Default to 8x memory for out of core\n object_store_memory = 8 * mem_bytes\n # In case anything failed above, we can still improve the memory for Modin.\n if object_store_memory is None:\n # Round down to the nearest Gigabyte.\n object_store_memory = int(\n 0.6 * ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9\n )\n # If the memory pool is smaller than 2GB, just use the default in ray.\n if object_store_memory == 0:\n object_store_memory = None\n else:\n object_store_memory = int(object_store_memory)\n ray.init(\n num_cpus=int(num_cpus),\n include_webui=False,\n ignore_reinit_error=True,\n plasma_directory=plasma_directory,\n object_store_memory=object_store_memory,\n redis_address=redis_address,\n redis_password=redis_password,\n logging_level=100,\n memory=object_store_memory,\n )\n # Register custom serializer for method objects to avoid warning message.\n # We serialize `MethodType` objects when we use AxisPartition operations.\n ray.register_custom_serializer(types.MethodType, use_pickle=True)\n\n # Register a fix import function to run on all_workers including the driver.\n # This is a hack solution to fix #647, #746\n def move_stdlib_ahead_of_site_packages(*args):\n site_packages_path = None\n site_packages_path_index = -1\n for i, path in enumerate(sys.path):\n if sys.exec_prefix in path and path.endswith(\"site-packages\"):\n site_packages_path = path\n site_packages_path_index = i\n # break on first found\n break\n\n if site_packages_path is not None:\n # stdlib packages layout as follows:\n # - python3.x\n # - typing.py\n # - site-packages/\n # - pandas\n # So extracting the dirname of the site_packages can point us\n # to the directory containing standard libraries.\n sys.path.insert(\n site_packages_path_index, os.path.dirname(site_packages_path)\n )\n\n move_stdlib_ahead_of_site_packages()\n ray.worker.global_worker.run_function_on_all_workers(\n move_stdlib_ahead_of_site_packages\n )\n\n\nif execution_engine == \"Ray\":\n import ray\n\n initialize_ray()\n num_cpus = ray.cluster_resources()[\"CPU\"]\nelif execution_engine == \"Dask\": # pragma: no cover\n from distributed.client import get_client\n import warnings\n\n if threading.current_thread().name == \"MainThread\":\n warnings.warn(\"The Dask Engine for Modin is experimental.\")\n try:\n client = get_client()\n except ValueError:\n from distributed import Client\n\n num_cpus = os.environ.get(\"MODIN_CPUS\", None) or multiprocessing.cpu_count()\n client = Client(n_workers=int(num_cpus))\nelif execution_engine != \"Python\":\n raise ImportError(\"Unrecognized execution engine: {}.\".format(execution_engine))\n\nDEFAULT_NPARTITIONS = max(4, int(num_cpus))\n\n__all__ = [\n \"DataFrame\",\n \"Series\",\n \"read_csv\",\n \"read_parquet\",\n \"read_json\",\n \"read_html\",\n \"read_clipboard\",\n \"read_excel\",\n \"read_hdf\",\n \"read_feather\",\n \"read_stata\",\n \"read_sas\",\n \"read_pickle\",\n \"read_sql\",\n \"read_gbq\",\n \"read_table\",\n \"read_spss\",\n \"read_orc\",\n \"json_normalize\",\n \"concat\",\n \"eval\",\n \"unique\",\n \"value_counts\",\n \"cut\",\n \"to_numeric\",\n \"factorize\",\n \"test\",\n \"qcut\",\n \"to_datetime\",\n \"get_dummies\",\n \"isna\",\n \"isnull\",\n \"merge\",\n \"pivot_table\",\n \"date_range\",\n \"Index\",\n \"MultiIndex\",\n \"Series\",\n \"bdate_range\",\n \"period_range\",\n \"DatetimeIndex\",\n \"to_timedelta\",\n \"set_eng_float_format\",\n \"options\",\n \"set_option\",\n \"CategoricalIndex\",\n \"Timedelta\",\n \"Timestamp\",\n \"NaT\",\n \"PeriodIndex\",\n \"Categorical\",\n \"__version__\",\n \"melt\",\n \"crosstab\",\n \"plotting\",\n \"Interval\",\n \"UInt8Dtype\",\n \"UInt16Dtype\",\n \"UInt32Dtype\",\n \"UInt64Dtype\",\n \"SparseDtype\",\n \"Int8Dtype\",\n \"Int16Dtype\",\n \"Int32Dtype\",\n \"Int64Dtype\",\n \"CategoricalDtype\",\n \"DatetimeTZDtype\",\n \"IntervalDtype\",\n \"PeriodDtype\",\n \"BooleanDtype\",\n \"StringDtype\",\n \"NA\",\n \"RangeIndex\",\n \"Int64Index\",\n \"UInt64Index\",\n \"Float64Index\",\n \"TimedeltaIndex\",\n \"IntervalIndex\",\n \"IndexSlice\",\n \"Grouper\",\n \"array\",\n \"Period\",\n \"show_versions\",\n \"DateOffset\",\n \"timedelta_range\",\n \"infer_freq\",\n \"interval_range\",\n \"ExcelWriter\",\n \"read_fwf\",\n \"read_sql_table\",\n \"read_sql_query\",\n \"ExcelFile\",\n \"to_pickle\",\n \"HDFStore\",\n \"lreshape\",\n \"wide_to_long\",\n \"merge_asof\",\n \"merge_ordered\",\n \"notnull\",\n \"notna\",\n \"pivot\",\n \"datetime\",\n \"NamedAgg\",\n \"DEFAULT_NPARTITIONS\",\n]\n\ndel pandas\n", "path": "modin/pandas/__init__.py"}]} | 3,764 | 401 |
gh_patches_debug_3080 | rasdani/github-patches | git_diff | google__turbinia-1099 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plaso VSS option incorrect
https://github.com/log2timeline/plaso/blob/9cc50c972b257d6cbbea38fa8b39f0bf027e0960/plaso/cli/storage_media_tool.py#L581
^ option should be --no_vss in below location
https://github.com/google/turbinia/blob/86158a95a0b134978628c1680d0997667ec7c935/turbinia/workers/plaso.py#L43
Please check how this will work if recipes pass in the --vss_stores option
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `turbinia/workers/binary_extractor.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2015 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Task to extract binary files from an evidence object provided."""
16
17 from __future__ import unicode_literals
18
19 import logging
20 import json
21 import os
22 import textwrap
23
24 from turbinia import TurbiniaException
25 from turbinia import config
26 from turbinia.evidence import EvidenceState as state
27 from turbinia.workers import TurbiniaTask
28 from turbinia.evidence import BinaryExtraction
29
30
31 class BinaryExtractorTask(TurbiniaTask):
32 """Extract binaries out of evidence and provide JSON file with hashes.
33
34 Attributes:
35 json_path(str): path to output JSON file.
36 binary_extraction_dir(str): path to extraction directory.
37 """
38
39 REQUIRED_STATES = [state.ATTACHED]
40
41 TASK_CONFIG = {
42 # This is an arbitrary path that will be put into a custom artifact
43 # definition so that the files at this path are extracted. See the path
44 # specification format in the ForensicArtifacts documentation:
45 # https://artifacts.readthedocs.io/en/latest/sources/Format-specification.html
46 'binary_extraction_path': None
47 }
48
49 def __init__(self, *args, **kwargs):
50 """Initializes BinaryExtractorTask."""
51 super(BinaryExtractorTask, self).__init__(*args, **kwargs)
52 self.json_path = None
53 self.binary_extraction_dir = None
54
55 def check_extraction(self):
56 """Checks counts for extracted binaries and hashes.
57
58 Returns:
59 Tuple(
60 binary_cnt(int): Number of extracted binaries.
61 hash_cnt(int): Number of extracted hashes.
62 )
63 """
64
65 # Check if hashes.json file was generated.
66 if not os.path.exists(self.json_path):
67 raise TurbiniaException(
68 'The file {0:s} was not found. Please ensure you '
69 'have Plaso version 20191203 or greater deployed'.format(
70 self.json_path))
71
72 with open(self.json_path) as json_file:
73 hashes = json.load(json_file)
74
75 binary_cnt = sum(
76 len(files) for _, _, files in os.walk(self.binary_extraction_dir)) - 1
77 hash_cnt = len(hashes)
78
79 return (binary_cnt, hash_cnt)
80
81 def run(self, evidence, result):
82 """Task that extracts binaries with image_export.py.
83
84 Args:
85 evidence (Evidence object): The evidence we will process.
86 result (TurbiniaTaskResult): The object to place task results into.
87
88 Returns:
89 TurbiniaTaskResult object.
90 """
91
92 config.LoadConfig()
93 binary_extraction_evidence = BinaryExtraction()
94
95 binary_extraction_evidence.local_path = self.output_dir
96 binary_extraction_evidence.uncompressed_directory = self.output_dir
97 image_export_log = os.path.join(self.output_dir, 'binary_extraction.log')
98 self.binary_extraction_dir = os.path.join(
99 self.output_dir, 'extracted_binaries')
100 self.json_path = os.path.join(self.binary_extraction_dir, 'hashes.json')
101
102 cmd = [
103 'image_export.py', '--partitions', 'all', '--volumes', 'all',
104 '--no_vss', '--unattended', '--logfile', image_export_log
105 ]
106
107 if self.task_config.get('binary_extraction_path'):
108 artifact_dir = os.path.join(self.tmp_dir, 'artifacts')
109 artifact_file = os.path.join(artifact_dir, 'artifacts.yaml')
110 os.mkdir(artifact_dir)
111 binary_extraction_path = self.task_config.get('binary_extraction_path')
112 result.log(
113 'Using custom artifact path {0:s}'.format(binary_extraction_path))
114
115 artifact_text = textwrap.dedent(
116 """
117 name: TurbiniaCustomArtifact
118 doc: Ad hoc artifact created for file extraction.
119 sources:
120 - type: FILE
121 attributes:
122 paths: ['{0:s}']
123 """)
124 artifact_text = artifact_text.format(binary_extraction_path)
125
126 with open(artifact_file, 'wb') as artifact:
127 artifact.write(artifact_text.encode('utf-8'))
128 cmd.extend([
129 '--custom_artifact_definitions', artifact_file, '--artifact_filters',
130 'TurbiniaCustomArtifact'
131 ])
132 else:
133 cmd.extend(['--signatures', 'elf,exe_mz'])
134
135 if evidence.credentials:
136 for credential_type, credential_data in evidence.credentials:
137 cmd.extend([
138 '--credential', '{0:s}:{1:s}'.format(
139 credential_type, credential_data)
140 ])
141
142 if config.DEBUG_TASKS or self.task_config.get('debug_tasks'):
143 cmd.append('-d')
144 cmd.extend(['-w', self.binary_extraction_dir, evidence.local_path])
145
146 result.log('Running image_export as [{0:s}]'.format(' '.join(cmd)))
147 self.execute(
148 cmd, result, log_files=[image_export_log, self.json_path],
149 new_evidence=[binary_extraction_evidence])
150
151 try:
152 binary_cnt, hash_cnt = self.check_extraction()
153 except TurbiniaException as exception:
154 message = 'File extraction failed: {0!s}'.format(exception)
155 result.close(self, success=False, status=message)
156 return result
157
158 status = (
159 'Extracted {0:d} hashes and {1:d} files from the '
160 'evidence.'.format(hash_cnt, binary_cnt))
161
162 if hash_cnt != binary_cnt:
163 result.log(
164 'Number of extracted binaries is not equal to the number '
165 'of extracted hashes. This might indicate issues with '
166 'image_export.py. Check binary_extraction.log for more '
167 'details.', logging.WARNING)
168
169 binary_extraction_evidence.compress()
170 result.close(self, success=True, status=status)
171
172 return result
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/turbinia/workers/binary_extractor.py b/turbinia/workers/binary_extractor.py
--- a/turbinia/workers/binary_extractor.py
+++ b/turbinia/workers/binary_extractor.py
@@ -101,7 +101,7 @@
cmd = [
'image_export.py', '--partitions', 'all', '--volumes', 'all',
- '--no_vss', '--unattended', '--logfile', image_export_log
+ '--vss_stores', 'none', '--unattended', '--logfile', image_export_log
]
if self.task_config.get('binary_extraction_path'):
| {"golden_diff": "diff --git a/turbinia/workers/binary_extractor.py b/turbinia/workers/binary_extractor.py\n--- a/turbinia/workers/binary_extractor.py\n+++ b/turbinia/workers/binary_extractor.py\n@@ -101,7 +101,7 @@\n \n cmd = [\n 'image_export.py', '--partitions', 'all', '--volumes', 'all',\n- '--no_vss', '--unattended', '--logfile', image_export_log\n+ '--vss_stores', 'none', '--unattended', '--logfile', image_export_log\n ]\n \n if self.task_config.get('binary_extraction_path'):\n", "issue": "plaso VSS option incorrect\nhttps://github.com/log2timeline/plaso/blob/9cc50c972b257d6cbbea38fa8b39f0bf027e0960/plaso/cli/storage_media_tool.py#L581\r\n\r\n^ option should be --no_vss in below location\r\nhttps://github.com/google/turbinia/blob/86158a95a0b134978628c1680d0997667ec7c935/turbinia/workers/plaso.py#L43\r\n\r\nPlease check how this will work if recipes pass in the --vss_stores option\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task to extract binary files from an evidence object provided.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport json\nimport os\nimport textwrap\n\nfrom turbinia import TurbiniaException\nfrom turbinia import config\nfrom turbinia.evidence import EvidenceState as state\nfrom turbinia.workers import TurbiniaTask\nfrom turbinia.evidence import BinaryExtraction\n\n\nclass BinaryExtractorTask(TurbiniaTask):\n \"\"\"Extract binaries out of evidence and provide JSON file with hashes.\n\n Attributes:\n json_path(str): path to output JSON file.\n binary_extraction_dir(str): path to extraction directory.\n \"\"\"\n\n REQUIRED_STATES = [state.ATTACHED]\n\n TASK_CONFIG = {\n # This is an arbitrary path that will be put into a custom artifact\n # definition so that the files at this path are extracted. See the path\n # specification format in the ForensicArtifacts documentation:\n # https://artifacts.readthedocs.io/en/latest/sources/Format-specification.html\n 'binary_extraction_path': None\n }\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initializes BinaryExtractorTask.\"\"\"\n super(BinaryExtractorTask, self).__init__(*args, **kwargs)\n self.json_path = None\n self.binary_extraction_dir = None\n\n def check_extraction(self):\n \"\"\"Checks counts for extracted binaries and hashes.\n\n Returns:\n Tuple(\n binary_cnt(int): Number of extracted binaries.\n hash_cnt(int): Number of extracted hashes.\n )\n \"\"\"\n\n # Check if hashes.json file was generated.\n if not os.path.exists(self.json_path):\n raise TurbiniaException(\n 'The file {0:s} was not found. Please ensure you '\n 'have Plaso version 20191203 or greater deployed'.format(\n self.json_path))\n\n with open(self.json_path) as json_file:\n hashes = json.load(json_file)\n\n binary_cnt = sum(\n len(files) for _, _, files in os.walk(self.binary_extraction_dir)) - 1\n hash_cnt = len(hashes)\n\n return (binary_cnt, hash_cnt)\n\n def run(self, evidence, result):\n \"\"\"Task that extracts binaries with image_export.py.\n\n Args:\n evidence (Evidence object): The evidence we will process.\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n\n config.LoadConfig()\n binary_extraction_evidence = BinaryExtraction()\n\n binary_extraction_evidence.local_path = self.output_dir\n binary_extraction_evidence.uncompressed_directory = self.output_dir\n image_export_log = os.path.join(self.output_dir, 'binary_extraction.log')\n self.binary_extraction_dir = os.path.join(\n self.output_dir, 'extracted_binaries')\n self.json_path = os.path.join(self.binary_extraction_dir, 'hashes.json')\n\n cmd = [\n 'image_export.py', '--partitions', 'all', '--volumes', 'all',\n '--no_vss', '--unattended', '--logfile', image_export_log\n ]\n\n if self.task_config.get('binary_extraction_path'):\n artifact_dir = os.path.join(self.tmp_dir, 'artifacts')\n artifact_file = os.path.join(artifact_dir, 'artifacts.yaml')\n os.mkdir(artifact_dir)\n binary_extraction_path = self.task_config.get('binary_extraction_path')\n result.log(\n 'Using custom artifact path {0:s}'.format(binary_extraction_path))\n\n artifact_text = textwrap.dedent(\n \"\"\"\n name: TurbiniaCustomArtifact\n doc: Ad hoc artifact created for file extraction.\n sources:\n - type: FILE\n attributes:\n paths: ['{0:s}']\n \"\"\")\n artifact_text = artifact_text.format(binary_extraction_path)\n\n with open(artifact_file, 'wb') as artifact:\n artifact.write(artifact_text.encode('utf-8'))\n cmd.extend([\n '--custom_artifact_definitions', artifact_file, '--artifact_filters',\n 'TurbiniaCustomArtifact'\n ])\n else:\n cmd.extend(['--signatures', 'elf,exe_mz'])\n\n if evidence.credentials:\n for credential_type, credential_data in evidence.credentials:\n cmd.extend([\n '--credential', '{0:s}:{1:s}'.format(\n credential_type, credential_data)\n ])\n\n if config.DEBUG_TASKS or self.task_config.get('debug_tasks'):\n cmd.append('-d')\n cmd.extend(['-w', self.binary_extraction_dir, evidence.local_path])\n\n result.log('Running image_export as [{0:s}]'.format(' '.join(cmd)))\n self.execute(\n cmd, result, log_files=[image_export_log, self.json_path],\n new_evidence=[binary_extraction_evidence])\n\n try:\n binary_cnt, hash_cnt = self.check_extraction()\n except TurbiniaException as exception:\n message = 'File extraction failed: {0!s}'.format(exception)\n result.close(self, success=False, status=message)\n return result\n\n status = (\n 'Extracted {0:d} hashes and {1:d} files from the '\n 'evidence.'.format(hash_cnt, binary_cnt))\n\n if hash_cnt != binary_cnt:\n result.log(\n 'Number of extracted binaries is not equal to the number '\n 'of extracted hashes. This might indicate issues with '\n 'image_export.py. Check binary_extraction.log for more '\n 'details.', logging.WARNING)\n\n binary_extraction_evidence.compress()\n result.close(self, success=True, status=status)\n\n return result\n", "path": "turbinia/workers/binary_extractor.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task to extract binary files from an evidence object provided.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport json\nimport os\nimport textwrap\n\nfrom turbinia import TurbiniaException\nfrom turbinia import config\nfrom turbinia.evidence import EvidenceState as state\nfrom turbinia.workers import TurbiniaTask\nfrom turbinia.evidence import BinaryExtraction\n\n\nclass BinaryExtractorTask(TurbiniaTask):\n \"\"\"Extract binaries out of evidence and provide JSON file with hashes.\n\n Attributes:\n json_path(str): path to output JSON file.\n binary_extraction_dir(str): path to extraction directory.\n \"\"\"\n\n REQUIRED_STATES = [state.ATTACHED]\n\n TASK_CONFIG = {\n # This is an arbitrary path that will be put into a custom artifact\n # definition so that the files at this path are extracted. See the path\n # specification format in the ForensicArtifacts documentation:\n # https://artifacts.readthedocs.io/en/latest/sources/Format-specification.html\n 'binary_extraction_path': None\n }\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initializes BinaryExtractorTask.\"\"\"\n super(BinaryExtractorTask, self).__init__(*args, **kwargs)\n self.json_path = None\n self.binary_extraction_dir = None\n\n def check_extraction(self):\n \"\"\"Checks counts for extracted binaries and hashes.\n\n Returns:\n Tuple(\n binary_cnt(int): Number of extracted binaries.\n hash_cnt(int): Number of extracted hashes.\n )\n \"\"\"\n\n # Check if hashes.json file was generated.\n if not os.path.exists(self.json_path):\n raise TurbiniaException(\n 'The file {0:s} was not found. Please ensure you '\n 'have Plaso version 20191203 or greater deployed'.format(\n self.json_path))\n\n with open(self.json_path) as json_file:\n hashes = json.load(json_file)\n\n binary_cnt = sum(\n len(files) for _, _, files in os.walk(self.binary_extraction_dir)) - 1\n hash_cnt = len(hashes)\n\n return (binary_cnt, hash_cnt)\n\n def run(self, evidence, result):\n \"\"\"Task that extracts binaries with image_export.py.\n\n Args:\n evidence (Evidence object): The evidence we will process.\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n\n config.LoadConfig()\n binary_extraction_evidence = BinaryExtraction()\n\n binary_extraction_evidence.local_path = self.output_dir\n binary_extraction_evidence.uncompressed_directory = self.output_dir\n image_export_log = os.path.join(self.output_dir, 'binary_extraction.log')\n self.binary_extraction_dir = os.path.join(\n self.output_dir, 'extracted_binaries')\n self.json_path = os.path.join(self.binary_extraction_dir, 'hashes.json')\n\n cmd = [\n 'image_export.py', '--partitions', 'all', '--volumes', 'all',\n '--vss_stores', 'none', '--unattended', '--logfile', image_export_log\n ]\n\n if self.task_config.get('binary_extraction_path'):\n artifact_dir = os.path.join(self.tmp_dir, 'artifacts')\n artifact_file = os.path.join(artifact_dir, 'artifacts.yaml')\n os.mkdir(artifact_dir)\n binary_extraction_path = self.task_config.get('binary_extraction_path')\n result.log(\n 'Using custom artifact path {0:s}'.format(binary_extraction_path))\n\n artifact_text = textwrap.dedent(\n \"\"\"\n name: TurbiniaCustomArtifact\n doc: Ad hoc artifact created for file extraction.\n sources:\n - type: FILE\n attributes:\n paths: ['{0:s}']\n \"\"\")\n artifact_text = artifact_text.format(binary_extraction_path)\n\n with open(artifact_file, 'wb') as artifact:\n artifact.write(artifact_text.encode('utf-8'))\n cmd.extend([\n '--custom_artifact_definitions', artifact_file, '--artifact_filters',\n 'TurbiniaCustomArtifact'\n ])\n else:\n cmd.extend(['--signatures', 'elf,exe_mz'])\n\n if evidence.credentials:\n for credential_type, credential_data in evidence.credentials:\n cmd.extend([\n '--credential', '{0:s}:{1:s}'.format(\n credential_type, credential_data)\n ])\n\n if config.DEBUG_TASKS or self.task_config.get('debug_tasks'):\n cmd.append('-d')\n cmd.extend(['-w', self.binary_extraction_dir, evidence.local_path])\n\n result.log('Running image_export as [{0:s}]'.format(' '.join(cmd)))\n self.execute(\n cmd, result, log_files=[image_export_log, self.json_path],\n new_evidence=[binary_extraction_evidence])\n\n try:\n binary_cnt, hash_cnt = self.check_extraction()\n except TurbiniaException as exception:\n message = 'File extraction failed: {0!s}'.format(exception)\n result.close(self, success=False, status=message)\n return result\n\n status = (\n 'Extracted {0:d} hashes and {1:d} files from the '\n 'evidence.'.format(hash_cnt, binary_cnt))\n\n if hash_cnt != binary_cnt:\n result.log(\n 'Number of extracted binaries is not equal to the number '\n 'of extracted hashes. This might indicate issues with '\n 'image_export.py. Check binary_extraction.log for more '\n 'details.', logging.WARNING)\n\n binary_extraction_evidence.compress()\n result.close(self, success=True, status=status)\n\n return result\n", "path": "turbinia/workers/binary_extractor.py"}]} | 2,178 | 145 |
gh_patches_debug_797 | rasdani/github-patches | git_diff | pre-commit__pre-commit-167 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
npmrc causes npm to install to home directory instead of nodeenv
Here is what happened when I tried to get eslint installed:
```
$ pre-commit run --all-files
eslint..............................................................................................................................................................................................................................................................................................................Failed
hookid: eslint
xargs: eslint: No such file or directory
```
Moving .npmrc to nope.npmrc fixed the issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import find_packages
2 from setuptools import setup
3
4
5 setup(
6 name='pre_commit',
7 description=(
8 'A framework for managing and maintaining multi-language pre-commit '
9 'hooks.'
10 ),
11 url='https://github.com/pre-commit/pre-commit',
12 version='0.2.9',
13
14 author='Anthony Sottile',
15 author_email='asottile@umich.edu',
16
17 platforms='linux',
18 classifiers=[
19 'License :: OSI Approved :: MIT License',
20 'Programming Language :: Python :: 2',
21 'Programming Language :: Python :: 2.6',
22 'Programming Language :: Python :: 2.7',
23 'Programming Language :: Python :: 3',
24 'Programming Language :: Python :: 3.3',
25 'Programming Language :: Python :: 3.4',
26 'Programming Language :: Python :: Implementation :: CPython',
27 'Programming Language :: Python :: Implementation :: PyPy',
28 ],
29
30 packages=find_packages('.', exclude=('tests*', 'testing*')),
31 package_data={
32 'pre_commit': [
33 'resources/pre-commit-hook',
34 'resources/rbenv.tar.gz',
35 'resources/ruby-build.tar.gz',
36 'resources/ruby-download.tar.gz',
37 ]
38 },
39 install_requires=[
40 'argparse',
41 'aspy.yaml',
42 'cached-property',
43 'jsonschema',
44 'nodeenv>=0.9.4',
45 'ordereddict',
46 'plumbum',
47 'pyyaml',
48 'simplejson',
49 'virtualenv',
50 ],
51 entry_points={
52 'console_scripts': [
53 'pre-commit = pre_commit.main:main',
54 'validate-config = pre_commit.clientlib.validate_config:run',
55 'validate-manifest = pre_commit.clientlib.validate_manifest:run',
56 ],
57 },
58 )
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -41,7 +41,7 @@
'aspy.yaml',
'cached-property',
'jsonschema',
- 'nodeenv>=0.9.4',
+ 'nodeenv>=0.11.1',
'ordereddict',
'plumbum',
'pyyaml',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -41,7 +41,7 @@\n 'aspy.yaml',\n 'cached-property',\n 'jsonschema',\n- 'nodeenv>=0.9.4',\n+ 'nodeenv>=0.11.1',\n 'ordereddict',\n 'plumbum',\n 'pyyaml',\n", "issue": "npmrc causes npm to install to home directory instead of nodeenv\nHere is what happened when I tried to get eslint installed: \n\n```\n$ pre-commit run --all-files\neslint..............................................................................................................................................................................................................................................................................................................Failed\nhookid: eslint\n\nxargs: eslint: No such file or directory\n```\n\nMoving .npmrc to nope.npmrc fixed the issue.\n\n", "before_files": [{"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n\nsetup(\n name='pre_commit',\n description=(\n 'A framework for managing and maintaining multi-language pre-commit '\n 'hooks.'\n ),\n url='https://github.com/pre-commit/pre-commit',\n version='0.2.9',\n\n author='Anthony Sottile',\n author_email='asottile@umich.edu',\n\n platforms='linux',\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n\n packages=find_packages('.', exclude=('tests*', 'testing*')),\n package_data={\n 'pre_commit': [\n 'resources/pre-commit-hook',\n 'resources/rbenv.tar.gz',\n 'resources/ruby-build.tar.gz',\n 'resources/ruby-download.tar.gz',\n ]\n },\n install_requires=[\n 'argparse',\n 'aspy.yaml',\n 'cached-property',\n 'jsonschema',\n 'nodeenv>=0.9.4',\n 'ordereddict',\n 'plumbum',\n 'pyyaml',\n 'simplejson',\n 'virtualenv',\n ],\n entry_points={\n 'console_scripts': [\n 'pre-commit = pre_commit.main:main',\n 'validate-config = pre_commit.clientlib.validate_config:run',\n 'validate-manifest = pre_commit.clientlib.validate_manifest:run',\n ],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n\nsetup(\n name='pre_commit',\n description=(\n 'A framework for managing and maintaining multi-language pre-commit '\n 'hooks.'\n ),\n url='https://github.com/pre-commit/pre-commit',\n version='0.2.9',\n\n author='Anthony Sottile',\n author_email='asottile@umich.edu',\n\n platforms='linux',\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n\n packages=find_packages('.', exclude=('tests*', 'testing*')),\n package_data={\n 'pre_commit': [\n 'resources/pre-commit-hook',\n 'resources/rbenv.tar.gz',\n 'resources/ruby-build.tar.gz',\n 'resources/ruby-download.tar.gz',\n ]\n },\n install_requires=[\n 'argparse',\n 'aspy.yaml',\n 'cached-property',\n 'jsonschema',\n 'nodeenv>=0.11.1',\n 'ordereddict',\n 'plumbum',\n 'pyyaml',\n 'simplejson',\n 'virtualenv',\n ],\n entry_points={\n 'console_scripts': [\n 'pre-commit = pre_commit.main:main',\n 'validate-config = pre_commit.clientlib.validate_config:run',\n 'validate-manifest = pre_commit.clientlib.validate_manifest:run',\n ],\n },\n)\n", "path": "setup.py"}]} | 823 | 89 |
gh_patches_debug_20336 | rasdani/github-patches | git_diff | numpy__numpy-12268 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
integrate content/images on broadcasting in docs
https://scipy.github.io/old-wiki/pages/EricsBroadcastingDoc explains broadcasting well (including useful diagrams) and is linked to from https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html. It is the only link to https://scipy.github.io/old-wiki left.
This content should be integrated in the user guide. There's also useful code to draw such diagrams at https://jakevdp.github.io/PythonDataScienceHandbook/02.05-computation-on-arrays-broadcasting.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numpy/doc/broadcasting.py`
Content:
```
1 """
2 ========================
3 Broadcasting over arrays
4 ========================
5
6 The term broadcasting describes how numpy treats arrays with different
7 shapes during arithmetic operations. Subject to certain constraints,
8 the smaller array is "broadcast" across the larger array so that they
9 have compatible shapes. Broadcasting provides a means of vectorizing
10 array operations so that looping occurs in C instead of Python. It does
11 this without making needless copies of data and usually leads to
12 efficient algorithm implementations. There are, however, cases where
13 broadcasting is a bad idea because it leads to inefficient use of memory
14 that slows computation.
15
16 NumPy operations are usually done on pairs of arrays on an
17 element-by-element basis. In the simplest case, the two arrays must
18 have exactly the same shape, as in the following example:
19
20 >>> a = np.array([1.0, 2.0, 3.0])
21 >>> b = np.array([2.0, 2.0, 2.0])
22 >>> a * b
23 array([ 2., 4., 6.])
24
25 NumPy's broadcasting rule relaxes this constraint when the arrays'
26 shapes meet certain constraints. The simplest broadcasting example occurs
27 when an array and a scalar value are combined in an operation:
28
29 >>> a = np.array([1.0, 2.0, 3.0])
30 >>> b = 2.0
31 >>> a * b
32 array([ 2., 4., 6.])
33
34 The result is equivalent to the previous example where ``b`` was an array.
35 We can think of the scalar ``b`` being *stretched* during the arithmetic
36 operation into an array with the same shape as ``a``. The new elements in
37 ``b`` are simply copies of the original scalar. The stretching analogy is
38 only conceptual. NumPy is smart enough to use the original scalar value
39 without actually making copies, so that broadcasting operations are as
40 memory and computationally efficient as possible.
41
42 The code in the second example is more efficient than that in the first
43 because broadcasting moves less memory around during the multiplication
44 (``b`` is a scalar rather than an array).
45
46 General Broadcasting Rules
47 ==========================
48 When operating on two arrays, NumPy compares their shapes element-wise.
49 It starts with the trailing dimensions, and works its way forward. Two
50 dimensions are compatible when
51
52 1) they are equal, or
53 2) one of them is 1
54
55 If these conditions are not met, a
56 ``ValueError: operands could not be broadcast together`` exception is
57 thrown, indicating that the arrays have incompatible shapes. The size of
58 the resulting array is the maximum size along each dimension of the input
59 arrays.
60
61 Arrays do not need to have the same *number* of dimensions. For example,
62 if you have a ``256x256x3`` array of RGB values, and you want to scale
63 each color in the image by a different value, you can multiply the image
64 by a one-dimensional array with 3 values. Lining up the sizes of the
65 trailing axes of these arrays according to the broadcast rules, shows that
66 they are compatible::
67
68 Image (3d array): 256 x 256 x 3
69 Scale (1d array): 3
70 Result (3d array): 256 x 256 x 3
71
72 When either of the dimensions compared is one, the other is
73 used. In other words, dimensions with size 1 are stretched or "copied"
74 to match the other.
75
76 In the following example, both the ``A`` and ``B`` arrays have axes with
77 length one that are expanded to a larger size during the broadcast
78 operation::
79
80 A (4d array): 8 x 1 x 6 x 1
81 B (3d array): 7 x 1 x 5
82 Result (4d array): 8 x 7 x 6 x 5
83
84 Here are some more examples::
85
86 A (2d array): 5 x 4
87 B (1d array): 1
88 Result (2d array): 5 x 4
89
90 A (2d array): 5 x 4
91 B (1d array): 4
92 Result (2d array): 5 x 4
93
94 A (3d array): 15 x 3 x 5
95 B (3d array): 15 x 1 x 5
96 Result (3d array): 15 x 3 x 5
97
98 A (3d array): 15 x 3 x 5
99 B (2d array): 3 x 5
100 Result (3d array): 15 x 3 x 5
101
102 A (3d array): 15 x 3 x 5
103 B (2d array): 3 x 1
104 Result (3d array): 15 x 3 x 5
105
106 Here are examples of shapes that do not broadcast::
107
108 A (1d array): 3
109 B (1d array): 4 # trailing dimensions do not match
110
111 A (2d array): 2 x 1
112 B (3d array): 8 x 4 x 3 # second from last dimensions mismatched
113
114 An example of broadcasting in practice::
115
116 >>> x = np.arange(4)
117 >>> xx = x.reshape(4,1)
118 >>> y = np.ones(5)
119 >>> z = np.ones((3,4))
120
121 >>> x.shape
122 (4,)
123
124 >>> y.shape
125 (5,)
126
127 >>> x + y
128 ValueError: operands could not be broadcast together with shapes (4,) (5,)
129
130 >>> xx.shape
131 (4, 1)
132
133 >>> y.shape
134 (5,)
135
136 >>> (xx + y).shape
137 (4, 5)
138
139 >>> xx + y
140 array([[ 1., 1., 1., 1., 1.],
141 [ 2., 2., 2., 2., 2.],
142 [ 3., 3., 3., 3., 3.],
143 [ 4., 4., 4., 4., 4.]])
144
145 >>> x.shape
146 (4,)
147
148 >>> z.shape
149 (3, 4)
150
151 >>> (x + z).shape
152 (3, 4)
153
154 >>> x + z
155 array([[ 1., 2., 3., 4.],
156 [ 1., 2., 3., 4.],
157 [ 1., 2., 3., 4.]])
158
159 Broadcasting provides a convenient way of taking the outer product (or
160 any other outer operation) of two arrays. The following example shows an
161 outer addition operation of two 1-d arrays::
162
163 >>> a = np.array([0.0, 10.0, 20.0, 30.0])
164 >>> b = np.array([1.0, 2.0, 3.0])
165 >>> a[:, np.newaxis] + b
166 array([[ 1., 2., 3.],
167 [ 11., 12., 13.],
168 [ 21., 22., 23.],
169 [ 31., 32., 33.]])
170
171 Here the ``newaxis`` index operator inserts a new axis into ``a``,
172 making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array
173 with ``b``, which has shape ``(3,)``, yields a ``4x3`` array.
174
175 See `this article <https://scipy.github.io/old-wiki/pages/EricsBroadcastingDoc>`_
176 for illustrations of broadcasting concepts.
177
178 """
179 from __future__ import division, absolute_import, print_function
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/numpy/doc/broadcasting.py b/numpy/doc/broadcasting.py
--- a/numpy/doc/broadcasting.py
+++ b/numpy/doc/broadcasting.py
@@ -3,6 +3,12 @@
Broadcasting over arrays
========================
+.. note::
+ See `this article
+ <https://numpy.org/devdocs/user/theory.broadcasting.html>`_
+ for illustrations of broadcasting concepts.
+
+
The term broadcasting describes how numpy treats arrays with different
shapes during arithmetic operations. Subject to certain constraints,
the smaller array is "broadcast" across the larger array so that they
@@ -172,8 +178,5 @@
making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array
with ``b``, which has shape ``(3,)``, yields a ``4x3`` array.
-See `this article <https://scipy.github.io/old-wiki/pages/EricsBroadcastingDoc>`_
-for illustrations of broadcasting concepts.
-
"""
from __future__ import division, absolute_import, print_function
| {"golden_diff": "diff --git a/numpy/doc/broadcasting.py b/numpy/doc/broadcasting.py\n--- a/numpy/doc/broadcasting.py\n+++ b/numpy/doc/broadcasting.py\n@@ -3,6 +3,12 @@\n Broadcasting over arrays\n ========================\n \n+.. note::\n+ See `this article\n+ <https://numpy.org/devdocs/user/theory.broadcasting.html>`_\n+ for illustrations of broadcasting concepts.\n+\n+\n The term broadcasting describes how numpy treats arrays with different\n shapes during arithmetic operations. Subject to certain constraints,\n the smaller array is \"broadcast\" across the larger array so that they\n@@ -172,8 +178,5 @@\n making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array\n with ``b``, which has shape ``(3,)``, yields a ``4x3`` array.\n \n-See `this article <https://scipy.github.io/old-wiki/pages/EricsBroadcastingDoc>`_\n-for illustrations of broadcasting concepts.\n-\n \"\"\"\n from __future__ import division, absolute_import, print_function\n", "issue": "integrate content/images on broadcasting in docs\nhttps://scipy.github.io/old-wiki/pages/EricsBroadcastingDoc explains broadcasting well (including useful diagrams) and is linked to from https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html. It is the only link to https://scipy.github.io/old-wiki left.\r\n\r\nThis content should be integrated in the user guide. There's also useful code to draw such diagrams at https://jakevdp.github.io/PythonDataScienceHandbook/02.05-computation-on-arrays-broadcasting.html\n", "before_files": [{"content": "\"\"\"\n========================\nBroadcasting over arrays\n========================\n\nThe term broadcasting describes how numpy treats arrays with different\nshapes during arithmetic operations. Subject to certain constraints,\nthe smaller array is \"broadcast\" across the larger array so that they\nhave compatible shapes. Broadcasting provides a means of vectorizing\narray operations so that looping occurs in C instead of Python. It does\nthis without making needless copies of data and usually leads to\nefficient algorithm implementations. There are, however, cases where\nbroadcasting is a bad idea because it leads to inefficient use of memory\nthat slows computation.\n\nNumPy operations are usually done on pairs of arrays on an\nelement-by-element basis. In the simplest case, the two arrays must\nhave exactly the same shape, as in the following example:\n\n >>> a = np.array([1.0, 2.0, 3.0])\n >>> b = np.array([2.0, 2.0, 2.0])\n >>> a * b\n array([ 2., 4., 6.])\n\nNumPy's broadcasting rule relaxes this constraint when the arrays'\nshapes meet certain constraints. The simplest broadcasting example occurs\nwhen an array and a scalar value are combined in an operation:\n\n>>> a = np.array([1.0, 2.0, 3.0])\n>>> b = 2.0\n>>> a * b\narray([ 2., 4., 6.])\n\nThe result is equivalent to the previous example where ``b`` was an array.\nWe can think of the scalar ``b`` being *stretched* during the arithmetic\noperation into an array with the same shape as ``a``. The new elements in\n``b`` are simply copies of the original scalar. The stretching analogy is\nonly conceptual. NumPy is smart enough to use the original scalar value\nwithout actually making copies, so that broadcasting operations are as\nmemory and computationally efficient as possible.\n\nThe code in the second example is more efficient than that in the first\nbecause broadcasting moves less memory around during the multiplication\n(``b`` is a scalar rather than an array).\n\nGeneral Broadcasting Rules\n==========================\nWhen operating on two arrays, NumPy compares their shapes element-wise.\nIt starts with the trailing dimensions, and works its way forward. Two\ndimensions are compatible when\n\n1) they are equal, or\n2) one of them is 1\n\nIf these conditions are not met, a\n``ValueError: operands could not be broadcast together`` exception is \nthrown, indicating that the arrays have incompatible shapes. The size of \nthe resulting array is the maximum size along each dimension of the input \narrays.\n\nArrays do not need to have the same *number* of dimensions. For example,\nif you have a ``256x256x3`` array of RGB values, and you want to scale\neach color in the image by a different value, you can multiply the image\nby a one-dimensional array with 3 values. Lining up the sizes of the\ntrailing axes of these arrays according to the broadcast rules, shows that\nthey are compatible::\n\n Image (3d array): 256 x 256 x 3\n Scale (1d array): 3\n Result (3d array): 256 x 256 x 3\n\nWhen either of the dimensions compared is one, the other is\nused. In other words, dimensions with size 1 are stretched or \"copied\"\nto match the other.\n\nIn the following example, both the ``A`` and ``B`` arrays have axes with\nlength one that are expanded to a larger size during the broadcast\noperation::\n\n A (4d array): 8 x 1 x 6 x 1\n B (3d array): 7 x 1 x 5\n Result (4d array): 8 x 7 x 6 x 5\n\nHere are some more examples::\n\n A (2d array): 5 x 4\n B (1d array): 1\n Result (2d array): 5 x 4\n\n A (2d array): 5 x 4\n B (1d array): 4\n Result (2d array): 5 x 4\n\n A (3d array): 15 x 3 x 5\n B (3d array): 15 x 1 x 5\n Result (3d array): 15 x 3 x 5\n\n A (3d array): 15 x 3 x 5\n B (2d array): 3 x 5\n Result (3d array): 15 x 3 x 5\n\n A (3d array): 15 x 3 x 5\n B (2d array): 3 x 1\n Result (3d array): 15 x 3 x 5\n\nHere are examples of shapes that do not broadcast::\n\n A (1d array): 3\n B (1d array): 4 # trailing dimensions do not match\n\n A (2d array): 2 x 1\n B (3d array): 8 x 4 x 3 # second from last dimensions mismatched\n\nAn example of broadcasting in practice::\n\n >>> x = np.arange(4)\n >>> xx = x.reshape(4,1)\n >>> y = np.ones(5)\n >>> z = np.ones((3,4))\n\n >>> x.shape\n (4,)\n\n >>> y.shape\n (5,)\n\n >>> x + y\n ValueError: operands could not be broadcast together with shapes (4,) (5,)\n\n >>> xx.shape\n (4, 1)\n\n >>> y.shape\n (5,)\n\n >>> (xx + y).shape\n (4, 5)\n\n >>> xx + y\n array([[ 1., 1., 1., 1., 1.],\n [ 2., 2., 2., 2., 2.],\n [ 3., 3., 3., 3., 3.],\n [ 4., 4., 4., 4., 4.]])\n\n >>> x.shape\n (4,)\n\n >>> z.shape\n (3, 4)\n\n >>> (x + z).shape\n (3, 4)\n\n >>> x + z\n array([[ 1., 2., 3., 4.],\n [ 1., 2., 3., 4.],\n [ 1., 2., 3., 4.]])\n\nBroadcasting provides a convenient way of taking the outer product (or\nany other outer operation) of two arrays. The following example shows an\nouter addition operation of two 1-d arrays::\n\n >>> a = np.array([0.0, 10.0, 20.0, 30.0])\n >>> b = np.array([1.0, 2.0, 3.0])\n >>> a[:, np.newaxis] + b\n array([[ 1., 2., 3.],\n [ 11., 12., 13.],\n [ 21., 22., 23.],\n [ 31., 32., 33.]])\n\nHere the ``newaxis`` index operator inserts a new axis into ``a``,\nmaking it a two-dimensional ``4x1`` array. Combining the ``4x1`` array\nwith ``b``, which has shape ``(3,)``, yields a ``4x3`` array.\n\nSee `this article <https://scipy.github.io/old-wiki/pages/EricsBroadcastingDoc>`_\nfor illustrations of broadcasting concepts.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n", "path": "numpy/doc/broadcasting.py"}], "after_files": [{"content": "\"\"\"\n========================\nBroadcasting over arrays\n========================\n\n.. note::\n See `this article\n <https://numpy.org/devdocs/user/theory.broadcasting.html>`_\n for illustrations of broadcasting concepts.\n\n\nThe term broadcasting describes how numpy treats arrays with different\nshapes during arithmetic operations. Subject to certain constraints,\nthe smaller array is \"broadcast\" across the larger array so that they\nhave compatible shapes. Broadcasting provides a means of vectorizing\narray operations so that looping occurs in C instead of Python. It does\nthis without making needless copies of data and usually leads to\nefficient algorithm implementations. There are, however, cases where\nbroadcasting is a bad idea because it leads to inefficient use of memory\nthat slows computation.\n\nNumPy operations are usually done on pairs of arrays on an\nelement-by-element basis. In the simplest case, the two arrays must\nhave exactly the same shape, as in the following example:\n\n >>> a = np.array([1.0, 2.0, 3.0])\n >>> b = np.array([2.0, 2.0, 2.0])\n >>> a * b\n array([ 2., 4., 6.])\n\nNumPy's broadcasting rule relaxes this constraint when the arrays'\nshapes meet certain constraints. The simplest broadcasting example occurs\nwhen an array and a scalar value are combined in an operation:\n\n>>> a = np.array([1.0, 2.0, 3.0])\n>>> b = 2.0\n>>> a * b\narray([ 2., 4., 6.])\n\nThe result is equivalent to the previous example where ``b`` was an array.\nWe can think of the scalar ``b`` being *stretched* during the arithmetic\noperation into an array with the same shape as ``a``. The new elements in\n``b`` are simply copies of the original scalar. The stretching analogy is\nonly conceptual. NumPy is smart enough to use the original scalar value\nwithout actually making copies, so that broadcasting operations are as\nmemory and computationally efficient as possible.\n\nThe code in the second example is more efficient than that in the first\nbecause broadcasting moves less memory around during the multiplication\n(``b`` is a scalar rather than an array).\n\nGeneral Broadcasting Rules\n==========================\nWhen operating on two arrays, NumPy compares their shapes element-wise.\nIt starts with the trailing dimensions, and works its way forward. Two\ndimensions are compatible when\n\n1) they are equal, or\n2) one of them is 1\n\nIf these conditions are not met, a\n``ValueError: operands could not be broadcast together`` exception is \nthrown, indicating that the arrays have incompatible shapes. The size of \nthe resulting array is the maximum size along each dimension of the input \narrays.\n\nArrays do not need to have the same *number* of dimensions. For example,\nif you have a ``256x256x3`` array of RGB values, and you want to scale\neach color in the image by a different value, you can multiply the image\nby a one-dimensional array with 3 values. Lining up the sizes of the\ntrailing axes of these arrays according to the broadcast rules, shows that\nthey are compatible::\n\n Image (3d array): 256 x 256 x 3\n Scale (1d array): 3\n Result (3d array): 256 x 256 x 3\n\nWhen either of the dimensions compared is one, the other is\nused. In other words, dimensions with size 1 are stretched or \"copied\"\nto match the other.\n\nIn the following example, both the ``A`` and ``B`` arrays have axes with\nlength one that are expanded to a larger size during the broadcast\noperation::\n\n A (4d array): 8 x 1 x 6 x 1\n B (3d array): 7 x 1 x 5\n Result (4d array): 8 x 7 x 6 x 5\n\nHere are some more examples::\n\n A (2d array): 5 x 4\n B (1d array): 1\n Result (2d array): 5 x 4\n\n A (2d array): 5 x 4\n B (1d array): 4\n Result (2d array): 5 x 4\n\n A (3d array): 15 x 3 x 5\n B (3d array): 15 x 1 x 5\n Result (3d array): 15 x 3 x 5\n\n A (3d array): 15 x 3 x 5\n B (2d array): 3 x 5\n Result (3d array): 15 x 3 x 5\n\n A (3d array): 15 x 3 x 5\n B (2d array): 3 x 1\n Result (3d array): 15 x 3 x 5\n\nHere are examples of shapes that do not broadcast::\n\n A (1d array): 3\n B (1d array): 4 # trailing dimensions do not match\n\n A (2d array): 2 x 1\n B (3d array): 8 x 4 x 3 # second from last dimensions mismatched\n\nAn example of broadcasting in practice::\n\n >>> x = np.arange(4)\n >>> xx = x.reshape(4,1)\n >>> y = np.ones(5)\n >>> z = np.ones((3,4))\n\n >>> x.shape\n (4,)\n\n >>> y.shape\n (5,)\n\n >>> x + y\n ValueError: operands could not be broadcast together with shapes (4,) (5,)\n\n >>> xx.shape\n (4, 1)\n\n >>> y.shape\n (5,)\n\n >>> (xx + y).shape\n (4, 5)\n\n >>> xx + y\n array([[ 1., 1., 1., 1., 1.],\n [ 2., 2., 2., 2., 2.],\n [ 3., 3., 3., 3., 3.],\n [ 4., 4., 4., 4., 4.]])\n\n >>> x.shape\n (4,)\n\n >>> z.shape\n (3, 4)\n\n >>> (x + z).shape\n (3, 4)\n\n >>> x + z\n array([[ 1., 2., 3., 4.],\n [ 1., 2., 3., 4.],\n [ 1., 2., 3., 4.]])\n\nBroadcasting provides a convenient way of taking the outer product (or\nany other outer operation) of two arrays. The following example shows an\nouter addition operation of two 1-d arrays::\n\n >>> a = np.array([0.0, 10.0, 20.0, 30.0])\n >>> b = np.array([1.0, 2.0, 3.0])\n >>> a[:, np.newaxis] + b\n array([[ 1., 2., 3.],\n [ 11., 12., 13.],\n [ 21., 22., 23.],\n [ 31., 32., 33.]])\n\nHere the ``newaxis`` index operator inserts a new axis into ``a``,\nmaking it a two-dimensional ``4x1`` array. Combining the ``4x1`` array\nwith ``b``, which has shape ``(3,)``, yields a ``4x3`` array.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n", "path": "numpy/doc/broadcasting.py"}]} | 2,605 | 235 |
gh_patches_debug_20356 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-5615 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tools/c7n-org - azure subscription generation includes disabled subscriptions
per report on gitter.
ngibbondaimler - We used azuresubs.py from c7n-org to generate a list of our subscriptions, however it's picking up disabled subscriptions and c7n-org throws an exception when it tries to read from a disabled sub to apply policy. Is there a suggested workaround for this?
Stefan Gordon -
I believe the return from the subscription API list call includes a state attribute, something like "state": "Enabled" - So for your scenario perhaps you can just add a check on that value at https://github.com/cloud-custodian/cloud-custodian/blob/master/tools/c7n_org/scripts/azuresubs.py#L34
Additionally if you can file an issue with the error you are getting in c7n-org I would say that we should update it to handle this error properly. Generating a list without those is an easy workaround but it shouldn't fail on them.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/c7n_org/scripts/azuresubs.py`
Content:
```
1 # Copyright 2018 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import click
16 from c7n_azure.session import Session
17 from c7n.utils import yaml_dump
18 from azure.mgmt.resource.subscriptions import SubscriptionClient
19
20
21 @click.command()
22 @click.option(
23 '-f', '--output', type=click.File('w'),
24 help="File to store the generated config (default stdout)")
25 def main(output):
26 """
27 Generate a c7n-org subscriptions config file
28 """
29
30 client = SubscriptionClient(Session().get_credentials())
31 subs = [sub.serialize(True) for sub in client.subscriptions.list()]
32 results = []
33 for sub in subs:
34 sub_info = {
35 'subscription_id': sub['subscriptionId'],
36 'name': sub['displayName']
37 }
38 results.append(sub_info)
39
40 print(yaml_dump({'subscriptions': results}), file=output)
41
42
43 if __name__ == '__main__':
44 main()
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/c7n_org/scripts/azuresubs.py b/tools/c7n_org/scripts/azuresubs.py
--- a/tools/c7n_org/scripts/azuresubs.py
+++ b/tools/c7n_org/scripts/azuresubs.py
@@ -22,7 +22,12 @@
@click.option(
'-f', '--output', type=click.File('w'),
help="File to store the generated config (default stdout)")
-def main(output):
+@click.option(
+ '-s', '--state', multiple=True, type=click.Choice(
+ ['Enabled', 'Warned', 'PastDue', 'Disabled', 'Deleted']),
+ default=('Enabled',),
+ help="File to store the generated config (default stdout)")
+def main(output, state):
"""
Generate a c7n-org subscriptions config file
"""
@@ -31,6 +36,8 @@
subs = [sub.serialize(True) for sub in client.subscriptions.list()]
results = []
for sub in subs:
+ if state and sub['state'] not in state:
+ continue
sub_info = {
'subscription_id': sub['subscriptionId'],
'name': sub['displayName']
| {"golden_diff": "diff --git a/tools/c7n_org/scripts/azuresubs.py b/tools/c7n_org/scripts/azuresubs.py\n--- a/tools/c7n_org/scripts/azuresubs.py\n+++ b/tools/c7n_org/scripts/azuresubs.py\n@@ -22,7 +22,12 @@\n @click.option(\n '-f', '--output', type=click.File('w'),\n help=\"File to store the generated config (default stdout)\")\n-def main(output):\n+@click.option(\n+ '-s', '--state', multiple=True, type=click.Choice(\n+ ['Enabled', 'Warned', 'PastDue', 'Disabled', 'Deleted']),\n+ default=('Enabled',),\n+ help=\"File to store the generated config (default stdout)\")\n+def main(output, state):\n \"\"\"\n Generate a c7n-org subscriptions config file\n \"\"\"\n@@ -31,6 +36,8 @@\n subs = [sub.serialize(True) for sub in client.subscriptions.list()]\n results = []\n for sub in subs:\n+ if state and sub['state'] not in state:\n+ continue\n sub_info = {\n 'subscription_id': sub['subscriptionId'],\n 'name': sub['displayName']\n", "issue": "tools/c7n-org - azure subscription generation includes disabled subscriptions\n\r\nper report on gitter.\r\n\r\nngibbondaimler - We used azuresubs.py from c7n-org to generate a list of our subscriptions, however it's picking up disabled subscriptions and c7n-org throws an exception when it tries to read from a disabled sub to apply policy. Is there a suggested workaround for this?\r\n\r\n\r\nStefan Gordon -\r\nI believe the return from the subscription API list call includes a state attribute, something like \"state\": \"Enabled\" - So for your scenario perhaps you can just add a check on that value at https://github.com/cloud-custodian/cloud-custodian/blob/master/tools/c7n_org/scripts/azuresubs.py#L34\r\nAdditionally if you can file an issue with the error you are getting in c7n-org I would say that we should update it to handle this error properly. Generating a list without those is an easy workaround but it shouldn't fail on them.\r\n\n", "before_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport click\nfrom c7n_azure.session import Session\nfrom c7n.utils import yaml_dump\nfrom azure.mgmt.resource.subscriptions import SubscriptionClient\n\n\n@click.command()\n@click.option(\n '-f', '--output', type=click.File('w'),\n help=\"File to store the generated config (default stdout)\")\ndef main(output):\n \"\"\"\n Generate a c7n-org subscriptions config file\n \"\"\"\n\n client = SubscriptionClient(Session().get_credentials())\n subs = [sub.serialize(True) for sub in client.subscriptions.list()]\n results = []\n for sub in subs:\n sub_info = {\n 'subscription_id': sub['subscriptionId'],\n 'name': sub['displayName']\n }\n results.append(sub_info)\n\n print(yaml_dump({'subscriptions': results}), file=output)\n\n\nif __name__ == '__main__':\n main()\n", "path": "tools/c7n_org/scripts/azuresubs.py"}], "after_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport click\nfrom c7n_azure.session import Session\nfrom c7n.utils import yaml_dump\nfrom azure.mgmt.resource.subscriptions import SubscriptionClient\n\n\n@click.command()\n@click.option(\n '-f', '--output', type=click.File('w'),\n help=\"File to store the generated config (default stdout)\")\n@click.option(\n '-s', '--state', multiple=True, type=click.Choice(\n ['Enabled', 'Warned', 'PastDue', 'Disabled', 'Deleted']),\n default=('Enabled',),\n help=\"File to store the generated config (default stdout)\")\ndef main(output, state):\n \"\"\"\n Generate a c7n-org subscriptions config file\n \"\"\"\n\n client = SubscriptionClient(Session().get_credentials())\n subs = [sub.serialize(True) for sub in client.subscriptions.list()]\n results = []\n for sub in subs:\n if state and sub['state'] not in state:\n continue\n sub_info = {\n 'subscription_id': sub['subscriptionId'],\n 'name': sub['displayName']\n }\n results.append(sub_info)\n\n print(yaml_dump({'subscriptions': results}), file=output)\n\n\nif __name__ == '__main__':\n main()\n", "path": "tools/c7n_org/scripts/azuresubs.py"}]} | 869 | 265 |
gh_patches_debug_1667 | rasdani/github-patches | git_diff | learningequality__kolibri-1464 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
hide not-recent learners on 'coach - recent activity' tab
See similar issue for channels: https://github.com/learningequality/kolibri/pull/1406
Now we need to do the same thing for when you drill deeper and reach the learners list. For example here, we're showing all learners regardless of whether or not they've had recent activity:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/plugins/coach/serializers.py`
Content:
```
1 from dateutil.parser import parse
2
3 from django.db.models import Case, Count, F, IntegerField, Sum, Value as V, When
4 from django.db.models.functions import Coalesce
5 from kolibri.auth.models import FacilityUser
6 from kolibri.content.models import ContentNode
7 from kolibri.logger.models import ContentSummaryLog
8 from le_utils.constants import content_kinds
9 from rest_framework import serializers
10
11 from .utils.return_users import get_members_or_user
12
13
14 class UserReportSerializer(serializers.ModelSerializer):
15 progress = serializers.SerializerMethodField()
16 last_active = serializers.SerializerMethodField()
17
18 class Meta:
19 model = FacilityUser
20 fields = (
21 'pk', 'full_name', 'progress', 'last_active',
22 )
23
24 def get_progress(self, target_user):
25 content_node = ContentNode.objects.get(pk=self.context['view'].kwargs['content_node_id'])
26 # progress details for a topic node and everything under it
27 if content_node.kind == content_kinds.TOPIC:
28 kind_counts = content_node.get_descendant_kind_counts()
29 topic_details = ContentSummaryLog.objects \
30 .filter_by_topic(content_node) \
31 .filter(user=target_user) \
32 .values('kind') \
33 .annotate(total_progress=Sum('progress')) \
34 .annotate(log_count_total=Count('pk')) \
35 .annotate(log_count_complete=Sum(Case(When(progress=1, then=1), default=0, output_field=IntegerField())))
36 # evaluate queryset so we can add data for kinds that do not have logs
37 topic_details = list(topic_details)
38 for kind in topic_details:
39 del kind_counts[kind['kind']]
40 for key in kind_counts:
41 topic_details.append({'kind': key, 'total_progress': 0, 'log_count_total': 0, 'log_count_complete': 0})
42 return topic_details
43 else:
44 # progress details for a leaf node (exercise, video, etc.)
45 leaf_details = ContentSummaryLog.objects \
46 .filter(user=target_user) \
47 .filter(content_id=content_node.content_id) \
48 .annotate(total_progress=F('progress')) \
49 .values('kind', 'time_spent', 'total_progress')
50 return leaf_details if leaf_details else [{'kind': content_node.kind, 'time_spent': 0, 'total_progress': 0}]
51
52 def get_last_active(self, target_user):
53 content_node = ContentNode.objects.get(pk=self.context['view'].kwargs['content_node_id'])
54 try:
55 if content_node.kind == content_kinds.TOPIC:
56 return ContentSummaryLog.objects \
57 .filter_by_topic(content_node) \
58 .filter(user=target_user) \
59 .latest('end_timestamp').end_timestamp
60 else:
61 return ContentSummaryLog.objects \
62 .filter(user=target_user) \
63 .get(content_id=content_node.content_id).end_timestamp
64 except ContentSummaryLog.DoesNotExist:
65 return None
66
67
68 class ContentReportSerializer(serializers.ModelSerializer):
69 progress = serializers.SerializerMethodField()
70 last_active = serializers.SerializerMethodField()
71 parent = serializers.SerializerMethodField()
72
73 class Meta:
74 model = ContentNode
75 fields = (
76 'pk', 'content_id', 'title', 'progress', 'kind', 'last_active', 'parent',
77 )
78
79 def get_progress(self, target_node):
80 kwargs = self.context['view'].kwargs
81 if target_node.kind == content_kinds.TOPIC:
82 kind_counts = target_node.get_descendant_kind_counts()
83 # filter logs by each kind under target node, and sum progress over logs
84 progress_query = ContentSummaryLog.objects \
85 .filter_by_topic(target_node) \
86 .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']))
87 if kwargs.get('last_active_time'):
88 progress_query.filter(end_timestamp__gte=parse(kwargs.get('last_active_time')))
89 progress = progress_query.values('kind') \
90 .annotate(total_progress=Sum('progress'))
91 # add kind counts under this node to progress dict
92 for kind in progress:
93 kind['node_count'] = kind_counts[kind['kind']]
94 del kind_counts[kind['kind']]
95 # evaluate queryset so we can add data for kinds that do not have logs
96 progress = list(progress)
97 for key in kind_counts:
98 progress.append({'kind': key, 'node_count': kind_counts[key], 'total_progress': 0})
99 return progress
100 else:
101 # filter logs by a specific leaf node and compute stats over queryset
102 leaf_node_stats_query = ContentSummaryLog.objects \
103 .filter(content_id=target_node.content_id) \
104 .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']))
105 if kwargs.get('last_active_time'):
106 leaf_node_stats_query.filter(end_timestamp__gte=parse(kwargs.get('last_active_time')))
107 leaf_node_stats = leaf_node_stats_query.aggregate(
108 total_progress=Coalesce(Sum('progress'), V(0)),
109 log_count_total=Coalesce(Count('pk'), V(0)),
110 log_count_complete=Coalesce(Sum(Case(When(progress=1, then=1), default=0, output_field=IntegerField())), V(0)))
111 return [leaf_node_stats] # return as array for consistency in api
112
113 def get_last_active(self, target_node):
114 kwargs = self.context['view'].kwargs
115 try:
116 if target_node.kind == content_kinds.TOPIC:
117 return ContentSummaryLog.objects \
118 .filter_by_topic(target_node) \
119 .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id'])) \
120 .latest('end_timestamp').end_timestamp
121 else:
122 return ContentSummaryLog.objects \
123 .filter(content_id=target_node.content_id) \
124 .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id'])) \
125 .latest('end_timestamp').end_timestamp
126 except ContentSummaryLog.DoesNotExist:
127 return None
128
129 def get_parent(self, target_node):
130 # returns immediate parent
131 return target_node.get_ancestors().values('pk', 'title').last()
132
133
134 class ContentSummarySerializer(ContentReportSerializer):
135 ancestors = serializers.SerializerMethodField()
136 num_users = serializers.SerializerMethodField()
137
138 class Meta:
139 model = ContentNode
140 fields = (
141 'pk', 'content_id', 'title', 'progress', 'kind', 'last_active', 'ancestors', 'num_users',
142 )
143
144 def get_ancestors(self, target_node):
145 """
146 in descending order (root ancestor first, immediate parent last)
147 """
148 return target_node.get_ancestors().values('pk', 'title')
149
150 def get_num_users(self, target_node):
151 kwargs = self.context['view'].kwargs
152 return len(get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']))
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/plugins/coach/serializers.py b/kolibri/plugins/coach/serializers.py
--- a/kolibri/plugins/coach/serializers.py
+++ b/kolibri/plugins/coach/serializers.py
@@ -18,7 +18,7 @@
class Meta:
model = FacilityUser
fields = (
- 'pk', 'full_name', 'progress', 'last_active',
+ 'pk', 'username', 'full_name', 'progress', 'last_active',
)
def get_progress(self, target_user):
| {"golden_diff": "diff --git a/kolibri/plugins/coach/serializers.py b/kolibri/plugins/coach/serializers.py\n--- a/kolibri/plugins/coach/serializers.py\n+++ b/kolibri/plugins/coach/serializers.py\n@@ -18,7 +18,7 @@\n class Meta:\n model = FacilityUser\n fields = (\n- 'pk', 'full_name', 'progress', 'last_active',\n+ 'pk', 'username', 'full_name', 'progress', 'last_active',\n )\n \n def get_progress(self, target_user):\n", "issue": "hide not-recent learners on 'coach - recent activity' tab\nSee similar issue for channels: https://github.com/learningequality/kolibri/pull/1406\r\n\r\nNow we need to do the same thing for when you drill deeper and reach the learners list. For example here, we're showing all learners regardless of whether or not they've had recent activity:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from dateutil.parser import parse\n\nfrom django.db.models import Case, Count, F, IntegerField, Sum, Value as V, When\nfrom django.db.models.functions import Coalesce\nfrom kolibri.auth.models import FacilityUser\nfrom kolibri.content.models import ContentNode\nfrom kolibri.logger.models import ContentSummaryLog\nfrom le_utils.constants import content_kinds\nfrom rest_framework import serializers\n\nfrom .utils.return_users import get_members_or_user\n\n\nclass UserReportSerializer(serializers.ModelSerializer):\n progress = serializers.SerializerMethodField()\n last_active = serializers.SerializerMethodField()\n\n class Meta:\n model = FacilityUser\n fields = (\n 'pk', 'full_name', 'progress', 'last_active',\n )\n\n def get_progress(self, target_user):\n content_node = ContentNode.objects.get(pk=self.context['view'].kwargs['content_node_id'])\n # progress details for a topic node and everything under it\n if content_node.kind == content_kinds.TOPIC:\n kind_counts = content_node.get_descendant_kind_counts()\n topic_details = ContentSummaryLog.objects \\\n .filter_by_topic(content_node) \\\n .filter(user=target_user) \\\n .values('kind') \\\n .annotate(total_progress=Sum('progress')) \\\n .annotate(log_count_total=Count('pk')) \\\n .annotate(log_count_complete=Sum(Case(When(progress=1, then=1), default=0, output_field=IntegerField())))\n # evaluate queryset so we can add data for kinds that do not have logs\n topic_details = list(topic_details)\n for kind in topic_details:\n del kind_counts[kind['kind']]\n for key in kind_counts:\n topic_details.append({'kind': key, 'total_progress': 0, 'log_count_total': 0, 'log_count_complete': 0})\n return topic_details\n else:\n # progress details for a leaf node (exercise, video, etc.)\n leaf_details = ContentSummaryLog.objects \\\n .filter(user=target_user) \\\n .filter(content_id=content_node.content_id) \\\n .annotate(total_progress=F('progress')) \\\n .values('kind', 'time_spent', 'total_progress')\n return leaf_details if leaf_details else [{'kind': content_node.kind, 'time_spent': 0, 'total_progress': 0}]\n\n def get_last_active(self, target_user):\n content_node = ContentNode.objects.get(pk=self.context['view'].kwargs['content_node_id'])\n try:\n if content_node.kind == content_kinds.TOPIC:\n return ContentSummaryLog.objects \\\n .filter_by_topic(content_node) \\\n .filter(user=target_user) \\\n .latest('end_timestamp').end_timestamp\n else:\n return ContentSummaryLog.objects \\\n .filter(user=target_user) \\\n .get(content_id=content_node.content_id).end_timestamp\n except ContentSummaryLog.DoesNotExist:\n return None\n\n\nclass ContentReportSerializer(serializers.ModelSerializer):\n progress = serializers.SerializerMethodField()\n last_active = serializers.SerializerMethodField()\n parent = serializers.SerializerMethodField()\n\n class Meta:\n model = ContentNode\n fields = (\n 'pk', 'content_id', 'title', 'progress', 'kind', 'last_active', 'parent',\n )\n\n def get_progress(self, target_node):\n kwargs = self.context['view'].kwargs\n if target_node.kind == content_kinds.TOPIC:\n kind_counts = target_node.get_descendant_kind_counts()\n # filter logs by each kind under target node, and sum progress over logs\n progress_query = ContentSummaryLog.objects \\\n .filter_by_topic(target_node) \\\n .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']))\n if kwargs.get('last_active_time'):\n progress_query.filter(end_timestamp__gte=parse(kwargs.get('last_active_time')))\n progress = progress_query.values('kind') \\\n .annotate(total_progress=Sum('progress'))\n # add kind counts under this node to progress dict\n for kind in progress:\n kind['node_count'] = kind_counts[kind['kind']]\n del kind_counts[kind['kind']]\n # evaluate queryset so we can add data for kinds that do not have logs\n progress = list(progress)\n for key in kind_counts:\n progress.append({'kind': key, 'node_count': kind_counts[key], 'total_progress': 0})\n return progress\n else:\n # filter logs by a specific leaf node and compute stats over queryset\n leaf_node_stats_query = ContentSummaryLog.objects \\\n .filter(content_id=target_node.content_id) \\\n .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']))\n if kwargs.get('last_active_time'):\n leaf_node_stats_query.filter(end_timestamp__gte=parse(kwargs.get('last_active_time')))\n leaf_node_stats = leaf_node_stats_query.aggregate(\n total_progress=Coalesce(Sum('progress'), V(0)),\n log_count_total=Coalesce(Count('pk'), V(0)),\n log_count_complete=Coalesce(Sum(Case(When(progress=1, then=1), default=0, output_field=IntegerField())), V(0)))\n return [leaf_node_stats] # return as array for consistency in api\n\n def get_last_active(self, target_node):\n kwargs = self.context['view'].kwargs\n try:\n if target_node.kind == content_kinds.TOPIC:\n return ContentSummaryLog.objects \\\n .filter_by_topic(target_node) \\\n .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id'])) \\\n .latest('end_timestamp').end_timestamp\n else:\n return ContentSummaryLog.objects \\\n .filter(content_id=target_node.content_id) \\\n .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id'])) \\\n .latest('end_timestamp').end_timestamp\n except ContentSummaryLog.DoesNotExist:\n return None\n\n def get_parent(self, target_node):\n # returns immediate parent\n return target_node.get_ancestors().values('pk', 'title').last()\n\n\nclass ContentSummarySerializer(ContentReportSerializer):\n ancestors = serializers.SerializerMethodField()\n num_users = serializers.SerializerMethodField()\n\n class Meta:\n model = ContentNode\n fields = (\n 'pk', 'content_id', 'title', 'progress', 'kind', 'last_active', 'ancestors', 'num_users',\n )\n\n def get_ancestors(self, target_node):\n \"\"\"\n in descending order (root ancestor first, immediate parent last)\n \"\"\"\n return target_node.get_ancestors().values('pk', 'title')\n\n def get_num_users(self, target_node):\n kwargs = self.context['view'].kwargs\n return len(get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']))\n", "path": "kolibri/plugins/coach/serializers.py"}], "after_files": [{"content": "from dateutil.parser import parse\n\nfrom django.db.models import Case, Count, F, IntegerField, Sum, Value as V, When\nfrom django.db.models.functions import Coalesce\nfrom kolibri.auth.models import FacilityUser\nfrom kolibri.content.models import ContentNode\nfrom kolibri.logger.models import ContentSummaryLog\nfrom le_utils.constants import content_kinds\nfrom rest_framework import serializers\n\nfrom .utils.return_users import get_members_or_user\n\n\nclass UserReportSerializer(serializers.ModelSerializer):\n progress = serializers.SerializerMethodField()\n last_active = serializers.SerializerMethodField()\n\n class Meta:\n model = FacilityUser\n fields = (\n 'pk', 'username', 'full_name', 'progress', 'last_active',\n )\n\n def get_progress(self, target_user):\n content_node = ContentNode.objects.get(pk=self.context['view'].kwargs['content_node_id'])\n # progress details for a topic node and everything under it\n if content_node.kind == content_kinds.TOPIC:\n kind_counts = content_node.get_descendant_kind_counts()\n topic_details = ContentSummaryLog.objects \\\n .filter_by_topic(content_node) \\\n .filter(user=target_user) \\\n .values('kind') \\\n .annotate(total_progress=Sum('progress')) \\\n .annotate(log_count_total=Count('pk')) \\\n .annotate(log_count_complete=Sum(Case(When(progress=1, then=1), default=0, output_field=IntegerField())))\n # evaluate queryset so we can add data for kinds that do not have logs\n topic_details = list(topic_details)\n for kind in topic_details:\n del kind_counts[kind['kind']]\n for key in kind_counts:\n topic_details.append({'kind': key, 'total_progress': 0, 'log_count_total': 0, 'log_count_complete': 0})\n return topic_details\n else:\n # progress details for a leaf node (exercise, video, etc.)\n leaf_details = ContentSummaryLog.objects \\\n .filter(user=target_user) \\\n .filter(content_id=content_node.content_id) \\\n .annotate(total_progress=F('progress')) \\\n .values('kind', 'time_spent', 'total_progress')\n return leaf_details if leaf_details else [{'kind': content_node.kind, 'time_spent': 0, 'total_progress': 0}]\n\n def get_last_active(self, target_user):\n content_node = ContentNode.objects.get(pk=self.context['view'].kwargs['content_node_id'])\n try:\n if content_node.kind == content_kinds.TOPIC:\n return ContentSummaryLog.objects \\\n .filter_by_topic(content_node) \\\n .filter(user=target_user) \\\n .latest('end_timestamp').end_timestamp\n else:\n return ContentSummaryLog.objects \\\n .filter(user=target_user) \\\n .get(content_id=content_node.content_id).end_timestamp\n except ContentSummaryLog.DoesNotExist:\n return None\n\n\nclass ContentReportSerializer(serializers.ModelSerializer):\n progress = serializers.SerializerMethodField()\n last_active = serializers.SerializerMethodField()\n parent = serializers.SerializerMethodField()\n\n class Meta:\n model = ContentNode\n fields = (\n 'pk', 'content_id', 'title', 'progress', 'kind', 'last_active', 'parent',\n )\n\n def get_progress(self, target_node):\n kwargs = self.context['view'].kwargs\n if target_node.kind == content_kinds.TOPIC:\n kind_counts = target_node.get_descendant_kind_counts()\n # filter logs by each kind under target node, and sum progress over logs\n progress_query = ContentSummaryLog.objects \\\n .filter_by_topic(target_node) \\\n .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']))\n if kwargs.get('last_active_time'):\n progress_query.filter(end_timestamp__gte=parse(kwargs.get('last_active_time')))\n progress = progress_query.values('kind') \\\n .annotate(total_progress=Sum('progress'))\n # add kind counts under this node to progress dict\n for kind in progress:\n kind['node_count'] = kind_counts[kind['kind']]\n del kind_counts[kind['kind']]\n # evaluate queryset so we can add data for kinds that do not have logs\n progress = list(progress)\n for key in kind_counts:\n progress.append({'kind': key, 'node_count': kind_counts[key], 'total_progress': 0})\n return progress\n else:\n # filter logs by a specific leaf node and compute stats over queryset\n leaf_node_stats_query = ContentSummaryLog.objects \\\n .filter(content_id=target_node.content_id) \\\n .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']))\n if kwargs.get('last_active_time'):\n leaf_node_stats_query.filter(end_timestamp__gte=parse(kwargs.get('last_active_time')))\n leaf_node_stats = leaf_node_stats_query.aggregate(\n total_progress=Coalesce(Sum('progress'), V(0)),\n log_count_total=Coalesce(Count('pk'), V(0)),\n log_count_complete=Coalesce(Sum(Case(When(progress=1, then=1), default=0, output_field=IntegerField())), V(0)))\n return [leaf_node_stats] # return as array for consistency in api\n\n def get_last_active(self, target_node):\n kwargs = self.context['view'].kwargs\n try:\n if target_node.kind == content_kinds.TOPIC:\n return ContentSummaryLog.objects \\\n .filter_by_topic(target_node) \\\n .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id'])) \\\n .latest('end_timestamp').end_timestamp\n else:\n return ContentSummaryLog.objects \\\n .filter(content_id=target_node.content_id) \\\n .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id'])) \\\n .latest('end_timestamp').end_timestamp\n except ContentSummaryLog.DoesNotExist:\n return None\n\n def get_parent(self, target_node):\n # returns immediate parent\n return target_node.get_ancestors().values('pk', 'title').last()\n\n\nclass ContentSummarySerializer(ContentReportSerializer):\n ancestors = serializers.SerializerMethodField()\n num_users = serializers.SerializerMethodField()\n\n class Meta:\n model = ContentNode\n fields = (\n 'pk', 'content_id', 'title', 'progress', 'kind', 'last_active', 'ancestors', 'num_users',\n )\n\n def get_ancestors(self, target_node):\n \"\"\"\n in descending order (root ancestor first, immediate parent last)\n \"\"\"\n return target_node.get_ancestors().values('pk', 'title')\n\n def get_num_users(self, target_node):\n kwargs = self.context['view'].kwargs\n return len(get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']))\n", "path": "kolibri/plugins/coach/serializers.py"}]} | 2,210 | 125 |
gh_patches_debug_13047 | rasdani/github-patches | git_diff | doccano__doccano-1558 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mutli-label text classification export issues: same classes but in different orders
How to reproduce the behaviour
---------
<!-- Before submitting an issue, make sure to check the docs and closed issues and FAQ to see if any of the solutions work for you. https://github.com/doccano/doccano/wiki/Frequently-Asked-Questions -->
We are two annotators on a multi-label classification project. When I export the annotations, for some examples, me and my co-annotator have put the same labels, but on the exported CSV, they do not appear in the same order:
Annotator 1:
| text | labels |
| example 1 | label1#label2#label3 |
Annotator 2:
| text | labels |
| example 1 | label2#label3#label1 |
As I try to use these CSVs for comparing our annotations, this brings more difficulty.
<!-- Include a code example or the steps that led to the problem. Please try to be as specific as possible. -->
Your Environment
---------
<!-- Include details of your environment.-->
* Operating System: Debian
* Python Version Used: Don't know, I pulled the latest version from Docker Hub
* When you install doccano: 3 days ago
* How did you install doccano (Heroku button etc): Docker
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/api/views/download/writer.py`
Content:
```
1 import abc
2 import csv
3 import itertools
4 import json
5 import os
6 import uuid
7 import zipfile
8 from collections import defaultdict
9 from typing import Dict, Iterable, Iterator, List
10
11 from .data import Record
12
13
14 class BaseWriter:
15
16 def __init__(self, tmpdir: str):
17 self.tmpdir = tmpdir
18
19 @abc.abstractmethod
20 def write(self, records: Iterator[Record]) -> str:
21 raise NotImplementedError()
22
23 def write_zip(self, filenames: Iterable):
24 save_file = '{}.zip'.format(os.path.join(self.tmpdir, str(uuid.uuid4())))
25 with zipfile.ZipFile(save_file, 'w', compression=zipfile.ZIP_DEFLATED) as zf:
26 for file in filenames:
27 zf.write(filename=file, arcname=os.path.basename(file))
28 return save_file
29
30
31 class LineWriter(BaseWriter):
32 extension = 'txt'
33
34 def write(self, records: Iterator[Record]) -> str:
35 files = {}
36 for record in records:
37 filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')
38 if filename not in files:
39 f = open(filename, mode='a')
40 files[filename] = f
41 f = files[filename]
42 line = self.create_line(record)
43 f.write(f'{line}\n')
44 for f in files.values():
45 f.close()
46 save_file = self.write_zip(files)
47 for file in files:
48 os.remove(file)
49 return save_file
50
51 @abc.abstractmethod
52 def create_line(self, record) -> str:
53 raise NotImplementedError()
54
55
56 class CsvWriter(BaseWriter):
57 extension = 'csv'
58
59 def write(self, records: Iterator[Record]) -> str:
60 writers = {}
61 file_handlers = set()
62 records = list(records)
63 header = self.create_header(records)
64 for record in records:
65 filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')
66 if filename not in writers:
67 f = open(filename, mode='a', encoding='utf-8')
68 writer = csv.DictWriter(f, header)
69 writer.writeheader()
70 writers[filename] = writer
71 file_handlers.add(f)
72 writer = writers[filename]
73 line = self.create_line(record)
74 writer.writerow(line)
75
76 for f in file_handlers:
77 f.close()
78 save_file = self.write_zip(writers)
79 for file in writers:
80 os.remove(file)
81 return save_file
82
83 def create_line(self, record) -> Dict:
84 return {
85 'id': record.id,
86 'data': record.data,
87 'label': '#'.join(record.label),
88 **record.metadata
89 }
90
91 def create_header(self, records: List[Record]) -> Iterable[str]:
92 header = ['id', 'data', 'label']
93 header += sorted(set(itertools.chain(*[r.metadata.keys() for r in records])))
94 return header
95
96
97 class JSONWriter(BaseWriter):
98 extension = 'json'
99
100 def write(self, records: Iterator[Record]) -> str:
101 writers = {}
102 contents = defaultdict(list)
103 for record in records:
104 filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')
105 if filename not in writers:
106 f = open(filename, mode='a', encoding='utf-8')
107 writers[filename] = f
108 line = self.create_line(record)
109 contents[filename].append(line)
110
111 for filename, f in writers.items():
112 content = contents[filename]
113 json.dump(content, f, ensure_ascii=False)
114 f.close()
115
116 save_file = self.write_zip(writers)
117 for file in writers:
118 os.remove(file)
119 return save_file
120
121 def create_line(self, record) -> Dict:
122 return {
123 'id': record.id,
124 'data': record.data,
125 'label': record.label,
126 **record.metadata
127 }
128
129
130 class JSONLWriter(LineWriter):
131 extension = 'jsonl'
132
133 def create_line(self, record):
134 return json.dumps({
135 'id': record.id,
136 'data': record.data,
137 'label': record.label,
138 **record.metadata
139 }, ensure_ascii=False)
140
141
142 class FastTextWriter(LineWriter):
143 extension = 'txt'
144
145 def create_line(self, record):
146 line = [f'__label__{label}' for label in record.label]
147 line.append(record.data)
148 line = ' '.join(line)
149 return line
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/api/views/download/writer.py b/backend/api/views/download/writer.py
--- a/backend/api/views/download/writer.py
+++ b/backend/api/views/download/writer.py
@@ -84,7 +84,7 @@
return {
'id': record.id,
'data': record.data,
- 'label': '#'.join(record.label),
+ 'label': '#'.join(sorted(record.label)),
**record.metadata
}
@@ -144,6 +144,7 @@
def create_line(self, record):
line = [f'__label__{label}' for label in record.label]
+ line.sort()
line.append(record.data)
line = ' '.join(line)
return line
| {"golden_diff": "diff --git a/backend/api/views/download/writer.py b/backend/api/views/download/writer.py\n--- a/backend/api/views/download/writer.py\n+++ b/backend/api/views/download/writer.py\n@@ -84,7 +84,7 @@\n return {\n 'id': record.id,\n 'data': record.data,\n- 'label': '#'.join(record.label),\n+ 'label': '#'.join(sorted(record.label)),\n **record.metadata\n }\n \n@@ -144,6 +144,7 @@\n \n def create_line(self, record):\n line = [f'__label__{label}' for label in record.label]\n+ line.sort()\n line.append(record.data)\n line = ' '.join(line)\n return line\n", "issue": "Mutli-label text classification export issues: same classes but in different orders\nHow to reproduce the behaviour\r\n---------\r\n<!-- Before submitting an issue, make sure to check the docs and closed issues and FAQ to see if any of the solutions work for you. https://github.com/doccano/doccano/wiki/Frequently-Asked-Questions -->\r\nWe are two annotators on a multi-label classification project. When I export the annotations, for some examples, me and my co-annotator have put the same labels, but on the exported CSV, they do not appear in the same order:\r\n\r\nAnnotator 1:\r\n\r\n| text | labels |\r\n| example 1 | label1#label2#label3 |\r\n\r\nAnnotator 2:\r\n\r\n| text | labels |\r\n| example 1 | label2#label3#label1 |\r\n\r\nAs I try to use these CSVs for comparing our annotations, this brings more difficulty.\r\n\r\n<!-- Include a code example or the steps that led to the problem. Please try to be as specific as possible. -->\r\n\r\nYour Environment\r\n---------\r\n<!-- Include details of your environment.-->\r\n* Operating System: Debian\r\n* Python Version Used: Don't know, I pulled the latest version from Docker Hub\r\n* When you install doccano: 3 days ago\r\n* How did you install doccano (Heroku button etc): Docker\r\n\n", "before_files": [{"content": "import abc\nimport csv\nimport itertools\nimport json\nimport os\nimport uuid\nimport zipfile\nfrom collections import defaultdict\nfrom typing import Dict, Iterable, Iterator, List\n\nfrom .data import Record\n\n\nclass BaseWriter:\n\n def __init__(self, tmpdir: str):\n self.tmpdir = tmpdir\n\n @abc.abstractmethod\n def write(self, records: Iterator[Record]) -> str:\n raise NotImplementedError()\n\n def write_zip(self, filenames: Iterable):\n save_file = '{}.zip'.format(os.path.join(self.tmpdir, str(uuid.uuid4())))\n with zipfile.ZipFile(save_file, 'w', compression=zipfile.ZIP_DEFLATED) as zf:\n for file in filenames:\n zf.write(filename=file, arcname=os.path.basename(file))\n return save_file\n\n\nclass LineWriter(BaseWriter):\n extension = 'txt'\n\n def write(self, records: Iterator[Record]) -> str:\n files = {}\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in files:\n f = open(filename, mode='a')\n files[filename] = f\n f = files[filename]\n line = self.create_line(record)\n f.write(f'{line}\\n')\n for f in files.values():\n f.close()\n save_file = self.write_zip(files)\n for file in files:\n os.remove(file)\n return save_file\n\n @abc.abstractmethod\n def create_line(self, record) -> str:\n raise NotImplementedError()\n\n\nclass CsvWriter(BaseWriter):\n extension = 'csv'\n\n def write(self, records: Iterator[Record]) -> str:\n writers = {}\n file_handlers = set()\n records = list(records)\n header = self.create_header(records)\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in writers:\n f = open(filename, mode='a', encoding='utf-8')\n writer = csv.DictWriter(f, header)\n writer.writeheader()\n writers[filename] = writer\n file_handlers.add(f)\n writer = writers[filename]\n line = self.create_line(record)\n writer.writerow(line)\n\n for f in file_handlers:\n f.close()\n save_file = self.write_zip(writers)\n for file in writers:\n os.remove(file)\n return save_file\n\n def create_line(self, record) -> Dict:\n return {\n 'id': record.id,\n 'data': record.data,\n 'label': '#'.join(record.label),\n **record.metadata\n }\n\n def create_header(self, records: List[Record]) -> Iterable[str]:\n header = ['id', 'data', 'label']\n header += sorted(set(itertools.chain(*[r.metadata.keys() for r in records])))\n return header\n\n\nclass JSONWriter(BaseWriter):\n extension = 'json'\n\n def write(self, records: Iterator[Record]) -> str:\n writers = {}\n contents = defaultdict(list)\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in writers:\n f = open(filename, mode='a', encoding='utf-8')\n writers[filename] = f\n line = self.create_line(record)\n contents[filename].append(line)\n\n for filename, f in writers.items():\n content = contents[filename]\n json.dump(content, f, ensure_ascii=False)\n f.close()\n\n save_file = self.write_zip(writers)\n for file in writers:\n os.remove(file)\n return save_file\n\n def create_line(self, record) -> Dict:\n return {\n 'id': record.id,\n 'data': record.data,\n 'label': record.label,\n **record.metadata\n }\n\n\nclass JSONLWriter(LineWriter):\n extension = 'jsonl'\n\n def create_line(self, record):\n return json.dumps({\n 'id': record.id,\n 'data': record.data,\n 'label': record.label,\n **record.metadata\n }, ensure_ascii=False)\n\n\nclass FastTextWriter(LineWriter):\n extension = 'txt'\n\n def create_line(self, record):\n line = [f'__label__{label}' for label in record.label]\n line.append(record.data)\n line = ' '.join(line)\n return line\n", "path": "backend/api/views/download/writer.py"}], "after_files": [{"content": "import abc\nimport csv\nimport itertools\nimport json\nimport os\nimport uuid\nimport zipfile\nfrom collections import defaultdict\nfrom typing import Dict, Iterable, Iterator, List\n\nfrom .data import Record\n\n\nclass BaseWriter:\n\n def __init__(self, tmpdir: str):\n self.tmpdir = tmpdir\n\n @abc.abstractmethod\n def write(self, records: Iterator[Record]) -> str:\n raise NotImplementedError()\n\n def write_zip(self, filenames: Iterable):\n save_file = '{}.zip'.format(os.path.join(self.tmpdir, str(uuid.uuid4())))\n with zipfile.ZipFile(save_file, 'w', compression=zipfile.ZIP_DEFLATED) as zf:\n for file in filenames:\n zf.write(filename=file, arcname=os.path.basename(file))\n return save_file\n\n\nclass LineWriter(BaseWriter):\n extension = 'txt'\n\n def write(self, records: Iterator[Record]) -> str:\n files = {}\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in files:\n f = open(filename, mode='a')\n files[filename] = f\n f = files[filename]\n line = self.create_line(record)\n f.write(f'{line}\\n')\n for f in files.values():\n f.close()\n save_file = self.write_zip(files)\n for file in files:\n os.remove(file)\n return save_file\n\n @abc.abstractmethod\n def create_line(self, record) -> str:\n raise NotImplementedError()\n\n\nclass CsvWriter(BaseWriter):\n extension = 'csv'\n\n def write(self, records: Iterator[Record]) -> str:\n writers = {}\n file_handlers = set()\n records = list(records)\n header = self.create_header(records)\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in writers:\n f = open(filename, mode='a', encoding='utf-8')\n writer = csv.DictWriter(f, header)\n writer.writeheader()\n writers[filename] = writer\n file_handlers.add(f)\n writer = writers[filename]\n line = self.create_line(record)\n writer.writerow(line)\n\n for f in file_handlers:\n f.close()\n save_file = self.write_zip(writers)\n for file in writers:\n os.remove(file)\n return save_file\n\n def create_line(self, record) -> Dict:\n return {\n 'id': record.id,\n 'data': record.data,\n 'label': '#'.join(sorted(record.label)),\n **record.metadata\n }\n\n def create_header(self, records: List[Record]) -> Iterable[str]:\n header = ['id', 'data', 'label']\n header += sorted(set(itertools.chain(*[r.metadata.keys() for r in records])))\n return header\n\n\nclass JSONWriter(BaseWriter):\n extension = 'json'\n\n def write(self, records: Iterator[Record]) -> str:\n writers = {}\n contents = defaultdict(list)\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in writers:\n f = open(filename, mode='a', encoding='utf-8')\n writers[filename] = f\n line = self.create_line(record)\n contents[filename].append(line)\n\n for filename, f in writers.items():\n content = contents[filename]\n json.dump(content, f, ensure_ascii=False)\n f.close()\n\n save_file = self.write_zip(writers)\n for file in writers:\n os.remove(file)\n return save_file\n\n def create_line(self, record) -> Dict:\n return {\n 'id': record.id,\n 'data': record.data,\n 'label': record.label,\n **record.metadata\n }\n\n\nclass JSONLWriter(LineWriter):\n extension = 'jsonl'\n\n def create_line(self, record):\n return json.dumps({\n 'id': record.id,\n 'data': record.data,\n 'label': record.label,\n **record.metadata\n }, ensure_ascii=False)\n\n\nclass FastTextWriter(LineWriter):\n extension = 'txt'\n\n def create_line(self, record):\n line = [f'__label__{label}' for label in record.label]\n line.sort()\n line.append(record.data)\n line = ' '.join(line)\n return line\n", "path": "backend/api/views/download/writer.py"}]} | 1,841 | 164 |
gh_patches_debug_33390 | rasdani/github-patches | git_diff | kivy__kivy-1947 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TextInput crashes while using clipboard bubble
After opening clipboard bubble, keyboard doesn't close anymore.
Moreover, when closing application, it gives:
```
I/python ( 4932): [INFO ] [Clipboard ] Provider: dummy(['clipboard_android'] ignored)
I/python ( 4932): [INFO ] [Base ] Leaving application in progress...
I/python ( 4932): Python for android ended.
W/dalvikvm( 4932): threadid=10: thread exiting with uncaught exception (group=0x4001d560)
E/AndroidRuntime( 4932): FATAL EXCEPTION: Thread-11
E/AndroidRuntime( 4932): java.lang.NoClassDefFoundError: android.content.ClipData
E/AndroidRuntime( 4932): at org.renpy.android.SDLSurfaceView.nativeInit(Native Method)
E/AndroidRuntime( 4932): at org.renpy.android.SDLSurfaceView.run(SDLSurfaceView.java:725)
E/AndroidRuntime( 4932): at java.lang.Thread.run(Thread.java:1019)
E/AndroidRuntime( 4932): Caused by: java.lang.ClassNotFoundException: android.content.ClipData in loader dalvik.system.PathClassLoader[/data/app/org.emanuele.LyricsDL-2.apk]
E/AndroidRuntime( 4932): at dalvik.system.PathClassLoader.findClass(PathClassLoader.java:240)
E/AndroidRuntime( 4932): at java.lang.ClassLoader.loadClass(ClassLoader.java:551)
E/AndroidRuntime( 4932): at java.lang.ClassLoader.loadClass(ClassLoader.java:511)
E/AndroidRuntime( 4932): ... 3 more
```
If specifing "use_bubble: False" it works correctly, but clipboard is obviously disabled.
android sdk 14
kivy 1.8.0
## <bountysource-plugin>
Want to back this issue? **[Post a bounty on it!](https://www.bountysource.com/issues/1436926-textinput-crashes-while-using-clipboard-bubble?utm_campaign=plugin&utm_content=tracker%2F42681&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F42681&utm_medium=issues&utm_source=github).
</bountysource-plugin>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/core/clipboard/clipboard_android.py`
Content:
```
1 '''
2 Clipboard Android
3 =================
4
5 Android implementation of Clipboard provider, using Pyjnius.
6 '''
7
8 __all__ = ('ClipboardAndroid', )
9
10 from kivy.core.clipboard import ClipboardBase
11 from jnius import autoclass
12 from android.runnable import run_on_ui_thread
13
14 AndroidString = autoclass('java.lang.String')
15 PythonActivity = autoclass('org.renpy.android.PythonActivity')
16 Context = autoclass('android.content.Context')
17 ClipData = autoclass('android.content.ClipData')
18 ClipDescription = autoclass('android.content.ClipDescription')
19
20
21 class ClipboardAndroid(ClipboardBase):
22
23 def __init__(self):
24 super(ClipboardAndroid, self).__init__()
25 self._clipboard = None
26 self._data = dict()
27 self._data['text/plain'] = None
28 self._data['application/data'] = None
29 PythonActivity._clipboard = None
30
31 def get(self, mimetype='text/plain'):
32 return self._get(mimetype)
33
34 def put(self, data, mimetype='text/plain'):
35 self._set(data, mimetype)
36
37 def get_types(self):
38 return list(self._data.keys())
39
40 @run_on_ui_thread
41 def _initialize_clipboard(self):
42 PythonActivity._clipboard = PythonActivity.getSystemService(
43 Context.CLIPBOARD_SERVICE)
44
45 def _get_clipboard(f):
46 def called(*args, **kargs):
47 self = args[0]
48 if not PythonActivity._clipboard:
49 self._initialize_clipboard()
50 import time
51 while not PythonActivity._clipboard:
52 time.sleep(.01)
53 return f(*args, **kargs)
54 return called
55
56 @_get_clipboard
57 def _get(self, mimetype='text/plain'):
58 clippy = PythonActivity._clipboard
59 primary_clip = clippy.getPrimaryClip()
60 if primary_clip and clippy.getPrimaryClipDescription().hasMimeType(
61 ClipDescription.MIMETYPE_TEXT_PLAIN):
62 data = primary_clip.getItemAt(0).getText().toString()
63 else:
64 # TODO: non text data types Not yet implemented
65 data = ''
66 return data
67
68 @_get_clipboard
69 def _set(self, data, mimetype):
70 clippy = PythonActivity._clipboard
71 new_clip = ClipData.newPlainText(AndroidString(""),
72 AndroidString(data))
73 # put text data onto clipboard
74 clippy.setPrimaryClip(new_clip)
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kivy/core/clipboard/clipboard_android.py b/kivy/core/clipboard/clipboard_android.py
--- a/kivy/core/clipboard/clipboard_android.py
+++ b/kivy/core/clipboard/clipboard_android.py
@@ -14,8 +14,8 @@
AndroidString = autoclass('java.lang.String')
PythonActivity = autoclass('org.renpy.android.PythonActivity')
Context = autoclass('android.content.Context')
-ClipData = autoclass('android.content.ClipData')
-ClipDescription = autoclass('android.content.ClipDescription')
+VER = autoclass('android.os.Build$VERSION')
+sdk = VER.SDK_INT
class ClipboardAndroid(ClipboardBase):
@@ -56,19 +56,29 @@
@_get_clipboard
def _get(self, mimetype='text/plain'):
clippy = PythonActivity._clipboard
- primary_clip = clippy.getPrimaryClip()
- if primary_clip and clippy.getPrimaryClipDescription().hasMimeType(
- ClipDescription.MIMETYPE_TEXT_PLAIN):
- data = primary_clip.getItemAt(0).getText().toString()
+ if sdk < 11:
+ data = clippy.getText().toString()
else:
- # TODO: non text data types Not yet implemented
- data = ''
+ ClipDescription = autoclass('android.content.ClipDescription')
+ primary_clip = clippy.getPrimaryClip()
+ if primary_clip and clippy.getPrimaryClipDescription().hasMimeType(
+ ClipDescription.MIMETYPE_TEXT_PLAIN):
+ data = primary_clip.getItemAt(0).getText().toString()
+ else:
+ # TODO: non text data types Not yet implemented
+ data = ''
return data
@_get_clipboard
def _set(self, data, mimetype):
clippy = PythonActivity._clipboard
- new_clip = ClipData.newPlainText(AndroidString(""),
+
+ if sdk < 11:
+ #versions previous to honeycomb
+ clippy.setText(AndroidString(data))
+ else:
+ ClipData = autoclass('android.content.ClipData')
+ new_clip = ClipData.newPlainText(AndroidString(""),
AndroidString(data))
- # put text data onto clipboard
- clippy.setPrimaryClip(new_clip)
+ # put text data onto clipboard
+ clippy.setPrimaryClip(new_clip)
| {"golden_diff": "diff --git a/kivy/core/clipboard/clipboard_android.py b/kivy/core/clipboard/clipboard_android.py\n--- a/kivy/core/clipboard/clipboard_android.py\n+++ b/kivy/core/clipboard/clipboard_android.py\n@@ -14,8 +14,8 @@\n AndroidString = autoclass('java.lang.String')\n PythonActivity = autoclass('org.renpy.android.PythonActivity')\n Context = autoclass('android.content.Context')\n-ClipData = autoclass('android.content.ClipData')\n-ClipDescription = autoclass('android.content.ClipDescription')\n+VER = autoclass('android.os.Build$VERSION')\n+sdk = VER.SDK_INT\n \n \n class ClipboardAndroid(ClipboardBase):\n@@ -56,19 +56,29 @@\n @_get_clipboard\n def _get(self, mimetype='text/plain'):\n clippy = PythonActivity._clipboard\n- primary_clip = clippy.getPrimaryClip()\n- if primary_clip and clippy.getPrimaryClipDescription().hasMimeType(\n- ClipDescription.MIMETYPE_TEXT_PLAIN):\n- data = primary_clip.getItemAt(0).getText().toString()\n+ if sdk < 11:\n+ data = clippy.getText().toString()\n else:\n- # TODO: non text data types Not yet implemented\n- data = ''\n+ ClipDescription = autoclass('android.content.ClipDescription')\n+ primary_clip = clippy.getPrimaryClip()\n+ if primary_clip and clippy.getPrimaryClipDescription().hasMimeType(\n+ ClipDescription.MIMETYPE_TEXT_PLAIN):\n+ data = primary_clip.getItemAt(0).getText().toString()\n+ else:\n+ # TODO: non text data types Not yet implemented\n+ data = ''\n return data\n \n @_get_clipboard\n def _set(self, data, mimetype):\n clippy = PythonActivity._clipboard\n- new_clip = ClipData.newPlainText(AndroidString(\"\"),\n+\n+ if sdk < 11:\n+ #versions previous to honeycomb\n+ clippy.setText(AndroidString(data))\n+ else:\n+ ClipData = autoclass('android.content.ClipData')\n+ new_clip = ClipData.newPlainText(AndroidString(\"\"),\n AndroidString(data))\n- # put text data onto clipboard\n- clippy.setPrimaryClip(new_clip)\n+ # put text data onto clipboard\n+ clippy.setPrimaryClip(new_clip)\n", "issue": "TextInput crashes while using clipboard bubble\nAfter opening clipboard bubble, keyboard doesn't close anymore.\nMoreover, when closing application, it gives:\n\n```\nI/python ( 4932): [INFO ] [Clipboard ] Provider: dummy(['clipboard_android'] ignored)\nI/python ( 4932): [INFO ] [Base ] Leaving application in progress...\nI/python ( 4932): Python for android ended.\nW/dalvikvm( 4932): threadid=10: thread exiting with uncaught exception (group=0x4001d560)\nE/AndroidRuntime( 4932): FATAL EXCEPTION: Thread-11\nE/AndroidRuntime( 4932): java.lang.NoClassDefFoundError: android.content.ClipData\nE/AndroidRuntime( 4932): at org.renpy.android.SDLSurfaceView.nativeInit(Native Method)\nE/AndroidRuntime( 4932): at org.renpy.android.SDLSurfaceView.run(SDLSurfaceView.java:725)\nE/AndroidRuntime( 4932): at java.lang.Thread.run(Thread.java:1019)\nE/AndroidRuntime( 4932): Caused by: java.lang.ClassNotFoundException: android.content.ClipData in loader dalvik.system.PathClassLoader[/data/app/org.emanuele.LyricsDL-2.apk]\nE/AndroidRuntime( 4932): at dalvik.system.PathClassLoader.findClass(PathClassLoader.java:240)\nE/AndroidRuntime( 4932): at java.lang.ClassLoader.loadClass(ClassLoader.java:551)\nE/AndroidRuntime( 4932): at java.lang.ClassLoader.loadClass(ClassLoader.java:511)\nE/AndroidRuntime( 4932): ... 3 more\n```\n\nIf specifing \"use_bubble: False\" it works correctly, but clipboard is obviously disabled.\n\nandroid sdk 14\nkivy 1.8.0\n## <bountysource-plugin>\n\nWant to back this issue? **[Post a bounty on it!](https://www.bountysource.com/issues/1436926-textinput-crashes-while-using-clipboard-bubble?utm_campaign=plugin&utm_content=tracker%2F42681&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F42681&utm_medium=issues&utm_source=github).\n</bountysource-plugin>\n\n", "before_files": [{"content": "'''\nClipboard Android\n=================\n\nAndroid implementation of Clipboard provider, using Pyjnius.\n'''\n\n__all__ = ('ClipboardAndroid', )\n\nfrom kivy.core.clipboard import ClipboardBase\nfrom jnius import autoclass\nfrom android.runnable import run_on_ui_thread\n\nAndroidString = autoclass('java.lang.String')\nPythonActivity = autoclass('org.renpy.android.PythonActivity')\nContext = autoclass('android.content.Context')\nClipData = autoclass('android.content.ClipData')\nClipDescription = autoclass('android.content.ClipDescription')\n\n\nclass ClipboardAndroid(ClipboardBase):\n\n def __init__(self):\n super(ClipboardAndroid, self).__init__()\n self._clipboard = None\n self._data = dict()\n self._data['text/plain'] = None\n self._data['application/data'] = None\n PythonActivity._clipboard = None\n\n def get(self, mimetype='text/plain'):\n return self._get(mimetype)\n\n def put(self, data, mimetype='text/plain'):\n self._set(data, mimetype)\n\n def get_types(self):\n return list(self._data.keys())\n\n @run_on_ui_thread\n def _initialize_clipboard(self):\n PythonActivity._clipboard = PythonActivity.getSystemService(\n Context.CLIPBOARD_SERVICE)\n\n def _get_clipboard(f):\n def called(*args, **kargs):\n self = args[0]\n if not PythonActivity._clipboard:\n self._initialize_clipboard()\n import time\n while not PythonActivity._clipboard:\n time.sleep(.01)\n return f(*args, **kargs)\n return called\n\n @_get_clipboard\n def _get(self, mimetype='text/plain'):\n clippy = PythonActivity._clipboard\n primary_clip = clippy.getPrimaryClip()\n if primary_clip and clippy.getPrimaryClipDescription().hasMimeType(\n ClipDescription.MIMETYPE_TEXT_PLAIN):\n data = primary_clip.getItemAt(0).getText().toString()\n else:\n # TODO: non text data types Not yet implemented\n data = ''\n return data\n\n @_get_clipboard\n def _set(self, data, mimetype):\n clippy = PythonActivity._clipboard\n new_clip = ClipData.newPlainText(AndroidString(\"\"),\n AndroidString(data))\n # put text data onto clipboard\n clippy.setPrimaryClip(new_clip)\n", "path": "kivy/core/clipboard/clipboard_android.py"}], "after_files": [{"content": "'''\nClipboard Android\n=================\n\nAndroid implementation of Clipboard provider, using Pyjnius.\n'''\n\n__all__ = ('ClipboardAndroid', )\n\nfrom kivy.core.clipboard import ClipboardBase\nfrom jnius import autoclass\nfrom android.runnable import run_on_ui_thread\n\nAndroidString = autoclass('java.lang.String')\nPythonActivity = autoclass('org.renpy.android.PythonActivity')\nContext = autoclass('android.content.Context')\nVER = autoclass('android.os.Build$VERSION')\nsdk = VER.SDK_INT\n\n\nclass ClipboardAndroid(ClipboardBase):\n\n def __init__(self):\n super(ClipboardAndroid, self).__init__()\n self._clipboard = None\n self._data = dict()\n self._data['text/plain'] = None\n self._data['application/data'] = None\n PythonActivity._clipboard = None\n\n def get(self, mimetype='text/plain'):\n return self._get(mimetype)\n\n def put(self, data, mimetype='text/plain'):\n self._set(data, mimetype)\n\n def get_types(self):\n return list(self._data.keys())\n\n @run_on_ui_thread\n def _initialize_clipboard(self):\n PythonActivity._clipboard = PythonActivity.getSystemService(\n Context.CLIPBOARD_SERVICE)\n\n def _get_clipboard(f):\n def called(*args, **kargs):\n self = args[0]\n if not PythonActivity._clipboard:\n self._initialize_clipboard()\n import time\n while not PythonActivity._clipboard:\n time.sleep(.01)\n return f(*args, **kargs)\n return called\n\n @_get_clipboard\n def _get(self, mimetype='text/plain'):\n clippy = PythonActivity._clipboard\n if sdk < 11:\n data = clippy.getText().toString()\n else:\n ClipDescription = autoclass('android.content.ClipDescription')\n primary_clip = clippy.getPrimaryClip()\n if primary_clip and clippy.getPrimaryClipDescription().hasMimeType(\n ClipDescription.MIMETYPE_TEXT_PLAIN):\n data = primary_clip.getItemAt(0).getText().toString()\n else:\n # TODO: non text data types Not yet implemented\n data = ''\n return data\n\n @_get_clipboard\n def _set(self, data, mimetype):\n clippy = PythonActivity._clipboard\n\n if sdk < 11:\n #versions previous to honeycomb\n clippy.setText(AndroidString(data))\n else:\n ClipData = autoclass('android.content.ClipData')\n new_clip = ClipData.newPlainText(AndroidString(\"\"),\n AndroidString(data))\n # put text data onto clipboard\n clippy.setPrimaryClip(new_clip)\n", "path": "kivy/core/clipboard/clipboard_android.py"}]} | 1,496 | 514 |
gh_patches_debug_9553 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3064 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Not really possible to override settings
## Description
I want to override the mathsar settings in order to allow LDAP login using django-auth-ldap. I changed the `config/settings/production.py` file that mentions: `# Override default settings ` and added the needed configuration.
This worked fine however that file is under version control so if it is changed on the origin I'll get a conflict and would need to also merge my changes. The usual way to implement this functionality is to add a *non tracked* `local.py` file that would contain any extra configuration for each environment (either dev or production) and import *that* file from the corresponding file. I.e the production.py would be changed to:
```python
# Override default settings
try:
from .local import *
except ImportError:
pass
```
This way, if the local.py file is there it will be used to override the config but if it isnt' there it will be ignored.
## Expected behavior
Being able to override django settings for my environment *without* keeping a fork.
## To Reproduce
Change the production.py file and you'll see that it's version controlled so it can't be easily changed!
## Environment
Not needed
## Additional context
I'd be happy to provide a PR implementing the functionality described here, i.e allow an untracked local.py file to override django settings for each *user/environment*.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `config/settings/development.py`
Content:
```
1 from config.settings.common_settings import * # noqa
2
3 # Override default settings
4
```
Path: `config/settings/production.py`
Content:
```
1 from config.settings.common_settings import * # noqa
2
3 # Override default settings
4
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/config/settings/development.py b/config/settings/development.py
--- a/config/settings/development.py
+++ b/config/settings/development.py
@@ -1,3 +1,10 @@
from config.settings.common_settings import * # noqa
# Override default settings
+
+
+# Use a local.py module for settings that shouldn't be version tracked
+try:
+ from .local import * # noqa
+except ImportError:
+ pass
diff --git a/config/settings/production.py b/config/settings/production.py
--- a/config/settings/production.py
+++ b/config/settings/production.py
@@ -1,3 +1,10 @@
from config.settings.common_settings import * # noqa
# Override default settings
+
+
+# Use a local.py module for settings that shouldn't be version tracked
+try:
+ from .local import * # noqa
+except ImportError:
+ pass
| {"golden_diff": "diff --git a/config/settings/development.py b/config/settings/development.py\n--- a/config/settings/development.py\n+++ b/config/settings/development.py\n@@ -1,3 +1,10 @@\n from config.settings.common_settings import * # noqa\n \n # Override default settings\n+\n+\n+# Use a local.py module for settings that shouldn't be version tracked\n+try:\n+ from .local import * # noqa\n+except ImportError:\n+ pass\ndiff --git a/config/settings/production.py b/config/settings/production.py\n--- a/config/settings/production.py\n+++ b/config/settings/production.py\n@@ -1,3 +1,10 @@\n from config.settings.common_settings import * # noqa\n \n # Override default settings\n+\n+\n+# Use a local.py module for settings that shouldn't be version tracked\n+try:\n+ from .local import * # noqa \n+except ImportError:\n+ pass\n", "issue": "Not really possible to override settings\n## Description\r\nI want to override the mathsar settings in order to allow LDAP login using django-auth-ldap. I changed the `config/settings/production.py` file that mentions: `# Override default settings ` and added the needed configuration. \r\n\r\nThis worked fine however that file is under version control so if it is changed on the origin I'll get a conflict and would need to also merge my changes. The usual way to implement this functionality is to add a *non tracked* `local.py` file that would contain any extra configuration for each environment (either dev or production) and import *that* file from the corresponding file. I.e the production.py would be changed to:\r\n\r\n```python\r\n# Override default settings \r\n\r\ntry:\r\n from .local import *\r\nexcept ImportError:\r\n pass\r\n```\r\n\r\nThis way, if the local.py file is there it will be used to override the config but if it isnt' there it will be ignored. \r\n\r\n## Expected behavior\r\nBeing able to override django settings for my environment *without* keeping a fork.\r\n\r\n## To Reproduce\r\nChange the production.py file and you'll see that it's version controlled so it can't be easily changed!\r\n\r\n## Environment\r\nNot needed\r\n\r\n## Additional context\r\nI'd be happy to provide a PR implementing the functionality described here, i.e allow an untracked local.py file to override django settings for each *user/environment*.\n", "before_files": [{"content": "from config.settings.common_settings import * # noqa\n\n# Override default settings\n", "path": "config/settings/development.py"}, {"content": "from config.settings.common_settings import * # noqa\n\n# Override default settings\n", "path": "config/settings/production.py"}], "after_files": [{"content": "from config.settings.common_settings import * # noqa\n\n# Override default settings\n\n\n# Use a local.py module for settings that shouldn't be version tracked\ntry:\n from .local import * # noqa\nexcept ImportError:\n pass\n", "path": "config/settings/development.py"}, {"content": "from config.settings.common_settings import * # noqa\n\n# Override default settings\n\n\n# Use a local.py module for settings that shouldn't be version tracked\ntry:\n from .local import * # noqa \nexcept ImportError:\n pass\n", "path": "config/settings/production.py"}]} | 608 | 192 |
gh_patches_debug_4675 | rasdani/github-patches | git_diff | pypa__pip-5931 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip uses deprecated SafeConfigParser
* Pip version: 9.0.1
* Python version: 3.6.1
* Operating system: Mac OS X 10.12.4
### Description:
With `error::DeprecationWarning` in `PYTHONWARNINGS`:
```
pip uninstall -y faker
/Users/davidchudzicki/.cache/hypothesis-build-runtimes/.tox/py36-full/lib/python3.6/site-packages/pip/pep425tags.py:260: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses
import imp
Exception:
Traceback (most recent call last):
File "/Users/davidchudzicki/.cache/hypothesis-build-runtimes/.tox/py36-full/lib/python3.6/site-packages/pip/basecommand.py", line 215, in main
status = self.run(options, args)
File "/Users/davidchudzicki/.cache/hypothesis-build-runtimes/.tox/py36-full/lib/python3.6/site-packages/pip/commands/uninstall.py", line 76, in run
requirement_set.uninstall(auto_confirm=options.yes)
File "/Users/davidchudzicki/.cache/hypothesis-build-runtimes/.tox/py36-full/lib/python3.6/site-packages/pip/req/req_set.py", line 346, in uninstall
req.uninstall(auto_confirm=auto_confirm)
File "/Users/davidchudzicki/.cache/hypothesis-build-runtimes/.tox/py36-full/lib/python3.6/site-packages/pip/req/req_install.py", line 732, in uninstall
config = configparser.SafeConfigParser(**options)
File "/Users/davidchudzicki/.cache/hypothesis-build-runtimes/versions/python3.6/lib/python3.6/configparser.py", line 1212, in __init__
DeprecationWarning, stacklevel=2
DeprecationWarning: The SafeConfigParser class has been renamed to ConfigParser in Python 3.2. This alias will be removed in future versions. Use ConfigParser directly instead.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/vcs/mercurial.py`
Content:
```
1 from __future__ import absolute_import
2
3 import logging
4 import os
5
6 from pip._vendor.six.moves import configparser
7
8 from pip._internal.download import path_to_url
9 from pip._internal.utils.misc import display_path, make_vcs_requirement_url
10 from pip._internal.utils.temp_dir import TempDirectory
11 from pip._internal.vcs import VersionControl, vcs
12
13 logger = logging.getLogger(__name__)
14
15
16 class Mercurial(VersionControl):
17 name = 'hg'
18 dirname = '.hg'
19 repo_name = 'clone'
20 schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http')
21
22 def get_base_rev_args(self, rev):
23 return [rev]
24
25 def export(self, location):
26 """Export the Hg repository at the url to the destination location"""
27 with TempDirectory(kind="export") as temp_dir:
28 self.unpack(temp_dir.path)
29
30 self.run_command(
31 ['archive', location], show_stdout=False, cwd=temp_dir.path
32 )
33
34 def fetch_new(self, dest, url, rev_options):
35 rev_display = rev_options.to_display()
36 logger.info(
37 'Cloning hg %s%s to %s',
38 url,
39 rev_display,
40 display_path(dest),
41 )
42 self.run_command(['clone', '--noupdate', '-q', url, dest])
43 cmd_args = ['update', '-q'] + rev_options.to_args()
44 self.run_command(cmd_args, cwd=dest)
45
46 def switch(self, dest, url, rev_options):
47 repo_config = os.path.join(dest, self.dirname, 'hgrc')
48 config = configparser.SafeConfigParser()
49 try:
50 config.read(repo_config)
51 config.set('paths', 'default', url)
52 with open(repo_config, 'w') as config_file:
53 config.write(config_file)
54 except (OSError, configparser.NoSectionError) as exc:
55 logger.warning(
56 'Could not switch Mercurial repository to %s: %s', url, exc,
57 )
58 else:
59 cmd_args = ['update', '-q'] + rev_options.to_args()
60 self.run_command(cmd_args, cwd=dest)
61
62 def update(self, dest, url, rev_options):
63 self.run_command(['pull', '-q'], cwd=dest)
64 cmd_args = ['update', '-q'] + rev_options.to_args()
65 self.run_command(cmd_args, cwd=dest)
66
67 def get_url(self, location):
68 url = self.run_command(
69 ['showconfig', 'paths.default'],
70 show_stdout=False, cwd=location).strip()
71 if self._is_local_repository(url):
72 url = path_to_url(url)
73 return url.strip()
74
75 def get_revision(self, location):
76 current_revision = self.run_command(
77 ['parents', '--template={rev}'],
78 show_stdout=False, cwd=location).strip()
79 return current_revision
80
81 def get_revision_hash(self, location):
82 current_rev_hash = self.run_command(
83 ['parents', '--template={node}'],
84 show_stdout=False, cwd=location).strip()
85 return current_rev_hash
86
87 def get_src_requirement(self, dist, location):
88 repo = self.get_url(location)
89 if not repo.lower().startswith('hg:'):
90 repo = 'hg+' + repo
91 current_rev_hash = self.get_revision_hash(location)
92 egg_project_name = dist.egg_name().split('-', 1)[0]
93 return make_vcs_requirement_url(repo, current_rev_hash,
94 egg_project_name)
95
96 def is_commit_id_equal(self, dest, name):
97 """Always assume the versions don't match"""
98 return False
99
100
101 vcs.register(Mercurial)
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pip/_internal/vcs/mercurial.py b/src/pip/_internal/vcs/mercurial.py
--- a/src/pip/_internal/vcs/mercurial.py
+++ b/src/pip/_internal/vcs/mercurial.py
@@ -45,7 +45,7 @@
def switch(self, dest, url, rev_options):
repo_config = os.path.join(dest, self.dirname, 'hgrc')
- config = configparser.SafeConfigParser()
+ config = configparser.RawConfigParser()
try:
config.read(repo_config)
config.set('paths', 'default', url)
| {"golden_diff": "diff --git a/src/pip/_internal/vcs/mercurial.py b/src/pip/_internal/vcs/mercurial.py\n--- a/src/pip/_internal/vcs/mercurial.py\n+++ b/src/pip/_internal/vcs/mercurial.py\n@@ -45,7 +45,7 @@\n \n def switch(self, dest, url, rev_options):\n repo_config = os.path.join(dest, self.dirname, 'hgrc')\n- config = configparser.SafeConfigParser()\n+ config = configparser.RawConfigParser()\n try:\n config.read(repo_config)\n config.set('paths', 'default', url)\n", "issue": "pip uses deprecated SafeConfigParser\n* Pip version: 9.0.1\r\n* Python version: 3.6.1\r\n* Operating system: Mac OS X 10.12.4\r\n\r\n### Description:\r\n\r\nWith `error::DeprecationWarning` in `PYTHONWARNINGS`:\r\n\r\n```\r\npip uninstall -y faker\r\n/Users/davidchudzicki/.cache/hypothesis-build-runtimes/.tox/py36-full/lib/python3.6/site-packages/pip/pep425tags.py:260: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses\r\n import imp\r\nException:\r\nTraceback (most recent call last):\r\n File \"/Users/davidchudzicki/.cache/hypothesis-build-runtimes/.tox/py36-full/lib/python3.6/site-packages/pip/basecommand.py\", line 215, in main\r\n status = self.run(options, args)\r\n File \"/Users/davidchudzicki/.cache/hypothesis-build-runtimes/.tox/py36-full/lib/python3.6/site-packages/pip/commands/uninstall.py\", line 76, in run\r\n requirement_set.uninstall(auto_confirm=options.yes)\r\n File \"/Users/davidchudzicki/.cache/hypothesis-build-runtimes/.tox/py36-full/lib/python3.6/site-packages/pip/req/req_set.py\", line 346, in uninstall\r\n req.uninstall(auto_confirm=auto_confirm)\r\n File \"/Users/davidchudzicki/.cache/hypothesis-build-runtimes/.tox/py36-full/lib/python3.6/site-packages/pip/req/req_install.py\", line 732, in uninstall\r\n config = configparser.SafeConfigParser(**options)\r\n File \"/Users/davidchudzicki/.cache/hypothesis-build-runtimes/versions/python3.6/lib/python3.6/configparser.py\", line 1212, in __init__\r\n DeprecationWarning, stacklevel=2\r\nDeprecationWarning: The SafeConfigParser class has been renamed to ConfigParser in Python 3.2. This alias will be removed in future versions. Use ConfigParser directly instead.\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport os\n\nfrom pip._vendor.six.moves import configparser\n\nfrom pip._internal.download import path_to_url\nfrom pip._internal.utils.misc import display_path, make_vcs_requirement_url\nfrom pip._internal.utils.temp_dir import TempDirectory\nfrom pip._internal.vcs import VersionControl, vcs\n\nlogger = logging.getLogger(__name__)\n\n\nclass Mercurial(VersionControl):\n name = 'hg'\n dirname = '.hg'\n repo_name = 'clone'\n schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http')\n\n def get_base_rev_args(self, rev):\n return [rev]\n\n def export(self, location):\n \"\"\"Export the Hg repository at the url to the destination location\"\"\"\n with TempDirectory(kind=\"export\") as temp_dir:\n self.unpack(temp_dir.path)\n\n self.run_command(\n ['archive', location], show_stdout=False, cwd=temp_dir.path\n )\n\n def fetch_new(self, dest, url, rev_options):\n rev_display = rev_options.to_display()\n logger.info(\n 'Cloning hg %s%s to %s',\n url,\n rev_display,\n display_path(dest),\n )\n self.run_command(['clone', '--noupdate', '-q', url, dest])\n cmd_args = ['update', '-q'] + rev_options.to_args()\n self.run_command(cmd_args, cwd=dest)\n\n def switch(self, dest, url, rev_options):\n repo_config = os.path.join(dest, self.dirname, 'hgrc')\n config = configparser.SafeConfigParser()\n try:\n config.read(repo_config)\n config.set('paths', 'default', url)\n with open(repo_config, 'w') as config_file:\n config.write(config_file)\n except (OSError, configparser.NoSectionError) as exc:\n logger.warning(\n 'Could not switch Mercurial repository to %s: %s', url, exc,\n )\n else:\n cmd_args = ['update', '-q'] + rev_options.to_args()\n self.run_command(cmd_args, cwd=dest)\n\n def update(self, dest, url, rev_options):\n self.run_command(['pull', '-q'], cwd=dest)\n cmd_args = ['update', '-q'] + rev_options.to_args()\n self.run_command(cmd_args, cwd=dest)\n\n def get_url(self, location):\n url = self.run_command(\n ['showconfig', 'paths.default'],\n show_stdout=False, cwd=location).strip()\n if self._is_local_repository(url):\n url = path_to_url(url)\n return url.strip()\n\n def get_revision(self, location):\n current_revision = self.run_command(\n ['parents', '--template={rev}'],\n show_stdout=False, cwd=location).strip()\n return current_revision\n\n def get_revision_hash(self, location):\n current_rev_hash = self.run_command(\n ['parents', '--template={node}'],\n show_stdout=False, cwd=location).strip()\n return current_rev_hash\n\n def get_src_requirement(self, dist, location):\n repo = self.get_url(location)\n if not repo.lower().startswith('hg:'):\n repo = 'hg+' + repo\n current_rev_hash = self.get_revision_hash(location)\n egg_project_name = dist.egg_name().split('-', 1)[0]\n return make_vcs_requirement_url(repo, current_rev_hash,\n egg_project_name)\n\n def is_commit_id_equal(self, dest, name):\n \"\"\"Always assume the versions don't match\"\"\"\n return False\n\n\nvcs.register(Mercurial)\n", "path": "src/pip/_internal/vcs/mercurial.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport os\n\nfrom pip._vendor.six.moves import configparser\n\nfrom pip._internal.download import path_to_url\nfrom pip._internal.utils.misc import display_path, make_vcs_requirement_url\nfrom pip._internal.utils.temp_dir import TempDirectory\nfrom pip._internal.vcs import VersionControl, vcs\n\nlogger = logging.getLogger(__name__)\n\n\nclass Mercurial(VersionControl):\n name = 'hg'\n dirname = '.hg'\n repo_name = 'clone'\n schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http')\n\n def get_base_rev_args(self, rev):\n return [rev]\n\n def export(self, location):\n \"\"\"Export the Hg repository at the url to the destination location\"\"\"\n with TempDirectory(kind=\"export\") as temp_dir:\n self.unpack(temp_dir.path)\n\n self.run_command(\n ['archive', location], show_stdout=False, cwd=temp_dir.path\n )\n\n def fetch_new(self, dest, url, rev_options):\n rev_display = rev_options.to_display()\n logger.info(\n 'Cloning hg %s%s to %s',\n url,\n rev_display,\n display_path(dest),\n )\n self.run_command(['clone', '--noupdate', '-q', url, dest])\n cmd_args = ['update', '-q'] + rev_options.to_args()\n self.run_command(cmd_args, cwd=dest)\n\n def switch(self, dest, url, rev_options):\n repo_config = os.path.join(dest, self.dirname, 'hgrc')\n config = configparser.RawConfigParser()\n try:\n config.read(repo_config)\n config.set('paths', 'default', url)\n with open(repo_config, 'w') as config_file:\n config.write(config_file)\n except (OSError, configparser.NoSectionError) as exc:\n logger.warning(\n 'Could not switch Mercurial repository to %s: %s', url, exc,\n )\n else:\n cmd_args = ['update', '-q'] + rev_options.to_args()\n self.run_command(cmd_args, cwd=dest)\n\n def update(self, dest, url, rev_options):\n self.run_command(['pull', '-q'], cwd=dest)\n cmd_args = ['update', '-q'] + rev_options.to_args()\n self.run_command(cmd_args, cwd=dest)\n\n def get_url(self, location):\n url = self.run_command(\n ['showconfig', 'paths.default'],\n show_stdout=False, cwd=location).strip()\n if self._is_local_repository(url):\n url = path_to_url(url)\n return url.strip()\n\n def get_revision(self, location):\n current_revision = self.run_command(\n ['parents', '--template={rev}'],\n show_stdout=False, cwd=location).strip()\n return current_revision\n\n def get_revision_hash(self, location):\n current_rev_hash = self.run_command(\n ['parents', '--template={node}'],\n show_stdout=False, cwd=location).strip()\n return current_rev_hash\n\n def get_src_requirement(self, dist, location):\n repo = self.get_url(location)\n if not repo.lower().startswith('hg:'):\n repo = 'hg+' + repo\n current_rev_hash = self.get_revision_hash(location)\n egg_project_name = dist.egg_name().split('-', 1)[0]\n return make_vcs_requirement_url(repo, current_rev_hash,\n egg_project_name)\n\n def is_commit_id_equal(self, dest, name):\n \"\"\"Always assume the versions don't match\"\"\"\n return False\n\n\nvcs.register(Mercurial)\n", "path": "src/pip/_internal/vcs/mercurial.py"}]} | 1,756 | 140 |
gh_patches_debug_7285 | rasdani/github-patches | git_diff | spotify__luigi-700 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug on luigi.contrib.ftp.AtomicFtpfile
on luigi.contrib.ftp.AtomicFtpfile, self.path is accessed before being set.
```
luigi/contrib/ftp.py Line 170 Character 26
167: """
168:
169: def __init__(self, fs, path):
170: self.__tmp_path = self.path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)
Access to member 'path' before its definition line 172
171: self._fs = fs
172: self.path = path
173: super(AtomicFtpfile, self).__init__(self.__tmp_path, 'w')
```
captured by [landscape.io](https://landscape.io/github/steenzout/luigi/4/messages/error)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `luigi/contrib/ftp.py`
Content:
```
1 """
2 This library is a wrapper of ftplib.
3 It is convenient to move data from/to FTP.
4
5 There is an example on how to use it (example/ftp_experiment_outputs.py)
6
7 You can also find unittest for each class.
8
9 Be aware that normal ftp do not provide secure communication.
10 """
11 import datetime
12 import os
13 import random
14 import ftplib
15 import luigi
16 import luigi.target
17 import luigi.format
18 from luigi.format import FileWrapper
19
20
21 class RemoteFileSystem(luigi.target.FileSystem):
22
23 def __init__(self, host, username=None, password=None, port=21, tls=False):
24 self.host = host
25 self.username = username
26 self.password = password
27 self.port = port
28 self.tls = tls
29
30 def _connect(self):
31 """
32 Log in to ftp.
33 """
34 if self.tls:
35 self.ftpcon = ftplib.FTP_TLS()
36 else:
37 self.ftpcon = ftplib.FTP()
38 self.ftpcon.connect(self.host, self.port)
39 self.ftpcon.login(self.username, self.password)
40 if self.tls:
41 self.ftpcon.prot_p()
42
43 def exists(self, path, mtime=None):
44 """
45 Return `True` if file or directory at `path` exist, False otherwise.
46
47 Additional check on modified time when mtime is passed in.
48
49 Return False if the file's modified time is older mtime.
50 """
51 self._connect()
52 files = self.ftpcon.nlst(path)
53
54 result = False
55 if files:
56 if mtime:
57 mdtm = self.ftpcon.sendcmd('MDTM ' + path)
58 modified = datetime.datetime.strptime(mdtm[4:], "%Y%m%d%H%M%S")
59 result = modified > mtime
60 else:
61 result = True
62
63 self.ftpcon.quit()
64
65 return result
66
67 def _rm_recursive(self, ftp, path):
68 """
69 Recursively delete a directory tree on a remote server.
70
71 Source: https://gist.github.com/artlogic/2632647
72 """
73 wd = ftp.pwd()
74
75 try:
76 names = ftp.nlst(path)
77 except ftplib.all_errors as e:
78 # some FTP servers complain when you try and list non-existent paths
79 return
80
81 for name in names:
82 if os.path.split(name)[1] in ('.', '..'):
83 continue
84
85 try:
86 ftp.cwd(name) # if we can cwd to it, it's a folder
87 ftp.cwd(wd) # don't try a nuke a folder we're in
88 self._rm_recursive(ftp, name)
89 except ftplib.all_errors:
90 ftp.delete(name)
91
92 try:
93 ftp.rmd(path)
94 except ftplib.all_errors as e:
95 print('_rm_recursive: Could not remove {0}: {1}'.format(path, e))
96
97 def remove(self, path, recursive=True):
98 """
99 Remove file or directory at location ``path``.
100
101 :param path: a path within the FileSystem to remove.
102 :type path: str
103 :param recursive: if the path is a directory, recursively remove the directory and
104 all of its descendants. Defaults to ``True``.
105 :type recursive: bool
106 """
107 self._connect()
108
109 if recursive:
110 self._rm_recursive(self.ftpcon, path)
111 else:
112 try:
113 # try delete file
114 self.ftpcon.delete(path)
115 except ftplib.all_errors:
116 # it is a folder, delete it
117 self.ftpcon.rmd(path)
118
119 self.ftpcon.quit()
120
121 def put(self, local_path, path):
122 # create parent folder if not exists
123 self._connect()
124
125 normpath = os.path.normpath(path)
126 folder = os.path.dirname(normpath)
127
128 # create paths if do not exists
129 for subfolder in folder.split(os.sep):
130 if subfolder and subfolder not in self.ftpcon.nlst():
131 self.ftpcon.mkd(subfolder)
132
133 self.ftpcon.cwd(subfolder)
134
135 # go back to ftp root folder
136 self.ftpcon.cwd("/")
137
138 # random file name
139 tmp_path = folder + os.sep + 'luigi-tmp-%09d' % random.randrange(0, 1e10)
140
141 self.ftpcon.storbinary('STOR %s' % tmp_path, open(local_path, 'rb'))
142 self.ftpcon.rename(tmp_path, normpath)
143
144 self.ftpcon.quit()
145
146 def get(self, path, local_path):
147 # Create folder if it does not exist
148 normpath = os.path.normpath(local_path)
149 folder = os.path.dirname(normpath)
150 if folder and not os.path.exists(folder):
151 os.makedirs(folder)
152
153 tmp_local_path = local_path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)
154 # download file
155 self._connect()
156 self.ftpcon.retrbinary('RETR %s' % path, open(tmp_local_path, 'wb').write)
157 self.ftpcon.quit()
158
159 os.rename(tmp_local_path, local_path)
160
161
162 class AtomicFtpfile(file):
163 """
164 Simple class that writes to a temp file and upload to ftp on close().
165
166 Also cleans up the temp file if close is not invoked.
167 """
168
169 def __init__(self, fs, path):
170 self.__tmp_path = self.path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)
171 self._fs = fs
172 self.path = path
173 super(AtomicFtpfile, self).__init__(self.__tmp_path, 'w')
174
175 def close(self):
176 # close and upload file to ftp
177 super(AtomicFtpfile, self).close()
178 self._fs.put(self.__tmp_path, self.path)
179 os.remove(self.__tmp_path)
180
181 def __del__(self):
182 if os.path.exists(self.__tmp_path):
183 os.remove(self.__tmp_path)
184
185 @property
186 def tmp_path(self):
187 return self.__tmp_path
188
189 @property
190 def fs(self):
191 return self._fs
192
193 def __exit__(self, exc_type, exc, traceback):
194 """
195 Close/commit the file if there are no exception
196
197 Upload file to ftp
198 """
199 if exc_type:
200 return
201 return file.__exit__(self, exc_type, exc, traceback)
202
203
204 class RemoteTarget(luigi.target.FileSystemTarget):
205 """
206 Target used for reading from remote files.
207
208 The target is implemented using ssh commands streaming data over the network.
209 """
210
211 def __init__(self, path, host, format=None, username=None, password=None, port=21, mtime=None, tls=False):
212 self.path = path
213 self.mtime = mtime
214 self.format = format
215 self.tls = tls
216 self._fs = RemoteFileSystem(host, username, password, port, tls)
217
218 @property
219 def fs(self):
220 return self._fs
221
222 def open(self, mode):
223 """
224 Open the FileSystem target.
225
226 This method returns a file-like object which can either be read from or written to depending
227 on the specified mode.
228
229 :param mode: the mode `r` opens the FileSystemTarget in read-only mode, whereas `w` will
230 open the FileSystemTarget in write mode. Subclasses can implement
231 additional options.
232 :type mode: str
233 """
234 if mode == 'w':
235 if self.format:
236 return self.format.pipe_writer(AtomicFtpfile(self._fs, self.path))
237 else:
238 return AtomicFtpfile(self._fs, self.path)
239
240 elif mode == 'r':
241 self.__tmp_path = self.path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)
242 # download file to local
243 self._fs.get(self.path, self.__tmp_path)
244
245 # manage tmp file
246 fileobj = FileWrapper(open(self.__tmp_path, 'r'))
247 if self.format:
248 return self.format.pipe_reader(fileobj)
249 return fileobj
250 else:
251 raise Exception('mode must be r/w')
252
253 def exists(self):
254 return self.fs.exists(self.path, self.mtime)
255
256 def put(self, local_path):
257 self.fs.put(local_path, self.path)
258
259 def get(self, local_path):
260 self.fs.get(self.path, local_path)
261
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/luigi/contrib/ftp.py b/luigi/contrib/ftp.py
--- a/luigi/contrib/ftp.py
+++ b/luigi/contrib/ftp.py
@@ -167,7 +167,14 @@
"""
def __init__(self, fs, path):
- self.__tmp_path = self.path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)
+ """
+ Initializes an AtomicFtpfile instance.
+
+ :param fs:
+ :param path:
+ :type path: str
+ """
+ self.__tmp_path = '%s-luigi-tmp-%09d' % (path, random.randrange(0, 1e10))
self._fs = fs
self.path = path
super(AtomicFtpfile, self).__init__(self.__tmp_path, 'w')
| {"golden_diff": "diff --git a/luigi/contrib/ftp.py b/luigi/contrib/ftp.py\n--- a/luigi/contrib/ftp.py\n+++ b/luigi/contrib/ftp.py\n@@ -167,7 +167,14 @@\n \"\"\"\n \n def __init__(self, fs, path):\n- self.__tmp_path = self.path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)\n+ \"\"\"\n+ Initializes an AtomicFtpfile instance.\n+\n+ :param fs:\n+ :param path:\n+ :type path: str\n+ \"\"\"\n+ self.__tmp_path = '%s-luigi-tmp-%09d' % (path, random.randrange(0, 1e10))\n self._fs = fs\n self.path = path\n super(AtomicFtpfile, self).__init__(self.__tmp_path, 'w')\n", "issue": "bug on luigi.contrib.ftp.AtomicFtpfile\non luigi.contrib.ftp.AtomicFtpfile, self.path is accessed before being set.\n\n```\nluigi/contrib/ftp.py Line 170 Character 26\n167: \"\"\"\n168: \n169: def __init__(self, fs, path):\n170: self.__tmp_path = self.path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)\n Access to member 'path' before its definition line 172\n171: self._fs = fs\n172: self.path = path\n173: super(AtomicFtpfile, self).__init__(self.__tmp_path, 'w')\n```\n\ncaptured by [landscape.io](https://landscape.io/github/steenzout/luigi/4/messages/error)\n\n", "before_files": [{"content": "\"\"\"\nThis library is a wrapper of ftplib.\nIt is convenient to move data from/to FTP.\n\nThere is an example on how to use it (example/ftp_experiment_outputs.py)\n\nYou can also find unittest for each class.\n\nBe aware that normal ftp do not provide secure communication.\n\"\"\"\nimport datetime\nimport os\nimport random\nimport ftplib\nimport luigi\nimport luigi.target\nimport luigi.format\nfrom luigi.format import FileWrapper\n\n\nclass RemoteFileSystem(luigi.target.FileSystem):\n\n def __init__(self, host, username=None, password=None, port=21, tls=False):\n self.host = host\n self.username = username\n self.password = password\n self.port = port\n self.tls = tls\n\n def _connect(self):\n \"\"\"\n Log in to ftp.\n \"\"\"\n if self.tls:\n self.ftpcon = ftplib.FTP_TLS()\n else:\n self.ftpcon = ftplib.FTP()\n self.ftpcon.connect(self.host, self.port)\n self.ftpcon.login(self.username, self.password)\n if self.tls:\n self.ftpcon.prot_p()\n\n def exists(self, path, mtime=None):\n \"\"\"\n Return `True` if file or directory at `path` exist, False otherwise.\n\n Additional check on modified time when mtime is passed in.\n\n Return False if the file's modified time is older mtime.\n \"\"\"\n self._connect()\n files = self.ftpcon.nlst(path)\n\n result = False\n if files:\n if mtime:\n mdtm = self.ftpcon.sendcmd('MDTM ' + path)\n modified = datetime.datetime.strptime(mdtm[4:], \"%Y%m%d%H%M%S\")\n result = modified > mtime\n else:\n result = True\n\n self.ftpcon.quit()\n\n return result\n\n def _rm_recursive(self, ftp, path):\n \"\"\"\n Recursively delete a directory tree on a remote server.\n\n Source: https://gist.github.com/artlogic/2632647\n \"\"\"\n wd = ftp.pwd()\n\n try:\n names = ftp.nlst(path)\n except ftplib.all_errors as e:\n # some FTP servers complain when you try and list non-existent paths\n return\n\n for name in names:\n if os.path.split(name)[1] in ('.', '..'):\n continue\n\n try:\n ftp.cwd(name) # if we can cwd to it, it's a folder\n ftp.cwd(wd) # don't try a nuke a folder we're in\n self._rm_recursive(ftp, name)\n except ftplib.all_errors:\n ftp.delete(name)\n\n try:\n ftp.rmd(path)\n except ftplib.all_errors as e:\n print('_rm_recursive: Could not remove {0}: {1}'.format(path, e))\n\n def remove(self, path, recursive=True):\n \"\"\"\n Remove file or directory at location ``path``.\n\n :param path: a path within the FileSystem to remove.\n :type path: str\n :param recursive: if the path is a directory, recursively remove the directory and\n all of its descendants. Defaults to ``True``.\n :type recursive: bool\n \"\"\"\n self._connect()\n\n if recursive:\n self._rm_recursive(self.ftpcon, path)\n else:\n try:\n # try delete file\n self.ftpcon.delete(path)\n except ftplib.all_errors:\n # it is a folder, delete it\n self.ftpcon.rmd(path)\n\n self.ftpcon.quit()\n\n def put(self, local_path, path):\n # create parent folder if not exists\n self._connect()\n\n normpath = os.path.normpath(path)\n folder = os.path.dirname(normpath)\n\n # create paths if do not exists\n for subfolder in folder.split(os.sep):\n if subfolder and subfolder not in self.ftpcon.nlst():\n self.ftpcon.mkd(subfolder)\n\n self.ftpcon.cwd(subfolder)\n\n # go back to ftp root folder\n self.ftpcon.cwd(\"/\")\n\n # random file name\n tmp_path = folder + os.sep + 'luigi-tmp-%09d' % random.randrange(0, 1e10)\n\n self.ftpcon.storbinary('STOR %s' % tmp_path, open(local_path, 'rb'))\n self.ftpcon.rename(tmp_path, normpath)\n\n self.ftpcon.quit()\n\n def get(self, path, local_path):\n # Create folder if it does not exist\n normpath = os.path.normpath(local_path)\n folder = os.path.dirname(normpath)\n if folder and not os.path.exists(folder):\n os.makedirs(folder)\n\n tmp_local_path = local_path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)\n # download file\n self._connect()\n self.ftpcon.retrbinary('RETR %s' % path, open(tmp_local_path, 'wb').write)\n self.ftpcon.quit()\n\n os.rename(tmp_local_path, local_path)\n\n\nclass AtomicFtpfile(file):\n \"\"\"\n Simple class that writes to a temp file and upload to ftp on close().\n\n Also cleans up the temp file if close is not invoked.\n \"\"\"\n\n def __init__(self, fs, path):\n self.__tmp_path = self.path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)\n self._fs = fs\n self.path = path\n super(AtomicFtpfile, self).__init__(self.__tmp_path, 'w')\n\n def close(self):\n # close and upload file to ftp\n super(AtomicFtpfile, self).close()\n self._fs.put(self.__tmp_path, self.path)\n os.remove(self.__tmp_path)\n\n def __del__(self):\n if os.path.exists(self.__tmp_path):\n os.remove(self.__tmp_path)\n\n @property\n def tmp_path(self):\n return self.__tmp_path\n\n @property\n def fs(self):\n return self._fs\n\n def __exit__(self, exc_type, exc, traceback):\n \"\"\"\n Close/commit the file if there are no exception\n\n Upload file to ftp\n \"\"\"\n if exc_type:\n return\n return file.__exit__(self, exc_type, exc, traceback)\n\n\nclass RemoteTarget(luigi.target.FileSystemTarget):\n \"\"\"\n Target used for reading from remote files.\n\n The target is implemented using ssh commands streaming data over the network.\n \"\"\"\n\n def __init__(self, path, host, format=None, username=None, password=None, port=21, mtime=None, tls=False):\n self.path = path\n self.mtime = mtime\n self.format = format\n self.tls = tls\n self._fs = RemoteFileSystem(host, username, password, port, tls)\n\n @property\n def fs(self):\n return self._fs\n\n def open(self, mode):\n \"\"\"\n Open the FileSystem target.\n\n This method returns a file-like object which can either be read from or written to depending\n on the specified mode.\n\n :param mode: the mode `r` opens the FileSystemTarget in read-only mode, whereas `w` will\n open the FileSystemTarget in write mode. Subclasses can implement\n additional options.\n :type mode: str\n \"\"\"\n if mode == 'w':\n if self.format:\n return self.format.pipe_writer(AtomicFtpfile(self._fs, self.path))\n else:\n return AtomicFtpfile(self._fs, self.path)\n\n elif mode == 'r':\n self.__tmp_path = self.path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)\n # download file to local\n self._fs.get(self.path, self.__tmp_path)\n\n # manage tmp file\n fileobj = FileWrapper(open(self.__tmp_path, 'r'))\n if self.format:\n return self.format.pipe_reader(fileobj)\n return fileobj\n else:\n raise Exception('mode must be r/w')\n\n def exists(self):\n return self.fs.exists(self.path, self.mtime)\n\n def put(self, local_path):\n self.fs.put(local_path, self.path)\n\n def get(self, local_path):\n self.fs.get(self.path, local_path)\n", "path": "luigi/contrib/ftp.py"}], "after_files": [{"content": "\"\"\"\nThis library is a wrapper of ftplib.\nIt is convenient to move data from/to FTP.\n\nThere is an example on how to use it (example/ftp_experiment_outputs.py)\n\nYou can also find unittest for each class.\n\nBe aware that normal ftp do not provide secure communication.\n\"\"\"\nimport datetime\nimport os\nimport random\nimport ftplib\nimport luigi\nimport luigi.target\nimport luigi.format\nfrom luigi.format import FileWrapper\n\n\nclass RemoteFileSystem(luigi.target.FileSystem):\n\n def __init__(self, host, username=None, password=None, port=21, tls=False):\n self.host = host\n self.username = username\n self.password = password\n self.port = port\n self.tls = tls\n\n def _connect(self):\n \"\"\"\n Log in to ftp.\n \"\"\"\n if self.tls:\n self.ftpcon = ftplib.FTP_TLS()\n else:\n self.ftpcon = ftplib.FTP()\n self.ftpcon.connect(self.host, self.port)\n self.ftpcon.login(self.username, self.password)\n if self.tls:\n self.ftpcon.prot_p()\n\n def exists(self, path, mtime=None):\n \"\"\"\n Return `True` if file or directory at `path` exist, False otherwise.\n\n Additional check on modified time when mtime is passed in.\n\n Return False if the file's modified time is older mtime.\n \"\"\"\n self._connect()\n files = self.ftpcon.nlst(path)\n\n result = False\n if files:\n if mtime:\n mdtm = self.ftpcon.sendcmd('MDTM ' + path)\n modified = datetime.datetime.strptime(mdtm[4:], \"%Y%m%d%H%M%S\")\n result = modified > mtime\n else:\n result = True\n\n self.ftpcon.quit()\n\n return result\n\n def _rm_recursive(self, ftp, path):\n \"\"\"\n Recursively delete a directory tree on a remote server.\n\n Source: https://gist.github.com/artlogic/2632647\n \"\"\"\n wd = ftp.pwd()\n\n try:\n names = ftp.nlst(path)\n except ftplib.all_errors as e:\n # some FTP servers complain when you try and list non-existent paths\n return\n\n for name in names:\n if os.path.split(name)[1] in ('.', '..'):\n continue\n\n try:\n ftp.cwd(name) # if we can cwd to it, it's a folder\n ftp.cwd(wd) # don't try a nuke a folder we're in\n self._rm_recursive(ftp, name)\n except ftplib.all_errors:\n ftp.delete(name)\n\n try:\n ftp.rmd(path)\n except ftplib.all_errors as e:\n print('_rm_recursive: Could not remove {0}: {1}'.format(path, e))\n\n def remove(self, path, recursive=True):\n \"\"\"\n Remove file or directory at location ``path``.\n\n :param path: a path within the FileSystem to remove.\n :type path: str\n :param recursive: if the path is a directory, recursively remove the directory and\n all of its descendants. Defaults to ``True``.\n :type recursive: bool\n \"\"\"\n self._connect()\n\n if recursive:\n self._rm_recursive(self.ftpcon, path)\n else:\n try:\n # try delete file\n self.ftpcon.delete(path)\n except ftplib.all_errors:\n # it is a folder, delete it\n self.ftpcon.rmd(path)\n\n self.ftpcon.quit()\n\n def put(self, local_path, path):\n # create parent folder if not exists\n self._connect()\n\n normpath = os.path.normpath(path)\n folder = os.path.dirname(normpath)\n\n # create paths if do not exists\n for subfolder in folder.split(os.sep):\n if subfolder and subfolder not in self.ftpcon.nlst():\n self.ftpcon.mkd(subfolder)\n\n self.ftpcon.cwd(subfolder)\n\n # go back to ftp root folder\n self.ftpcon.cwd(\"/\")\n\n # random file name\n tmp_path = folder + os.sep + 'luigi-tmp-%09d' % random.randrange(0, 1e10)\n\n self.ftpcon.storbinary('STOR %s' % tmp_path, open(local_path, 'rb'))\n self.ftpcon.rename(tmp_path, normpath)\n\n self.ftpcon.quit()\n\n def get(self, path, local_path):\n # Create folder if it does not exist\n normpath = os.path.normpath(local_path)\n folder = os.path.dirname(normpath)\n if folder and not os.path.exists(folder):\n os.makedirs(folder)\n\n tmp_local_path = local_path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)\n # download file\n self._connect()\n self.ftpcon.retrbinary('RETR %s' % path, open(tmp_local_path, 'wb').write)\n self.ftpcon.quit()\n\n os.rename(tmp_local_path, local_path)\n\n\nclass AtomicFtpfile(file):\n \"\"\"\n Simple class that writes to a temp file and upload to ftp on close().\n\n Also cleans up the temp file if close is not invoked.\n \"\"\"\n\n def __init__(self, fs, path):\n \"\"\"\n Initializes an AtomicFtpfile instance.\n\n :param fs:\n :param path:\n :type path: str\n \"\"\"\n self.__tmp_path = '%s-luigi-tmp-%09d' % (path, random.randrange(0, 1e10))\n self._fs = fs\n self.path = path\n super(AtomicFtpfile, self).__init__(self.__tmp_path, 'w')\n\n def close(self):\n # close and upload file to ftp\n super(AtomicFtpfile, self).close()\n self._fs.put(self.__tmp_path, self.path)\n os.remove(self.__tmp_path)\n\n def __del__(self):\n if os.path.exists(self.__tmp_path):\n os.remove(self.__tmp_path)\n\n @property\n def tmp_path(self):\n return self.__tmp_path\n\n @property\n def fs(self):\n return self._fs\n\n def __exit__(self, exc_type, exc, traceback):\n \"\"\"\n Close/commit the file if there are no exception\n\n Upload file to ftp\n \"\"\"\n if exc_type:\n return\n return file.__exit__(self, exc_type, exc, traceback)\n\n\nclass RemoteTarget(luigi.target.FileSystemTarget):\n \"\"\"\n Target used for reading from remote files.\n\n The target is implemented using ssh commands streaming data over the network.\n \"\"\"\n\n def __init__(self, path, host, format=None, username=None, password=None, port=21, mtime=None, tls=False):\n self.path = path\n self.mtime = mtime\n self.format = format\n self.tls = tls\n self._fs = RemoteFileSystem(host, username, password, port, tls)\n\n @property\n def fs(self):\n return self._fs\n\n def open(self, mode):\n \"\"\"\n Open the FileSystem target.\n\n This method returns a file-like object which can either be read from or written to depending\n on the specified mode.\n\n :param mode: the mode `r` opens the FileSystemTarget in read-only mode, whereas `w` will\n open the FileSystemTarget in write mode. Subclasses can implement\n additional options.\n :type mode: str\n \"\"\"\n if mode == 'w':\n if self.format:\n return self.format.pipe_writer(AtomicFtpfile(self._fs, self.path))\n else:\n return AtomicFtpfile(self._fs, self.path)\n\n elif mode == 'r':\n self.__tmp_path = self.path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)\n # download file to local\n self._fs.get(self.path, self.__tmp_path)\n\n # manage tmp file\n fileobj = FileWrapper(open(self.__tmp_path, 'r'))\n if self.format:\n return self.format.pipe_reader(fileobj)\n return fileobj\n else:\n raise Exception('mode must be r/w')\n\n def exists(self):\n return self.fs.exists(self.path, self.mtime)\n\n def put(self, local_path):\n self.fs.put(local_path, self.path)\n\n def get(self, local_path):\n self.fs.get(self.path, local_path)\n", "path": "luigi/contrib/ftp.py"}]} | 3,001 | 209 |
gh_patches_debug_3922 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-773 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hide TensorBoard service REST call logs
Currently these logs appear on master pod's log which is not necessary at user level:
```
I0624 15:50:54.834580 140272641951488 _internal.py:122] ::ffff:10.138.0.35 - - [24/Jun/2019 15:50:54] "GET /data/plugin/scalars/tags HTTP/1.1" 200 -
W0624 15:50:55.150964 140556554397440 servicer.py:195] Task result for outdated version 30 dropped
I0624 15:50:57.245738 140272633558784 _internal.py:122] ::ffff:10.36.1.1 - - [24/Jun/2019 15:50:57] "GET /data/environment HTTP/1.1" 200 -
I0624 15:50:57.250612 140271752836864 _internal.py:122] ::ffff:10.138.0.35 - - [24/Jun/2019 15:50:57] "GET /data/experiments HTTP/1.1" 200 -
I0624 15:50:57.252254 140272105903872 _internal.py:122] ::ffff:10.36.1.1 - - [24/Jun/2019 15:50:57] "GET /data/runs HTTP/1.1" 200 -
I0624 15:50:57.255696 140272641951488 _internal.py:122] ::ffff:10.138.0.35 - - [24/Jun/2019 15:50:57] "GET /data/plugins_listing HTTP/1.1" 200 -
I0624 15:50:57.430979 140272641951488 _internal.py:122] ::ffff:10.138.0.35 - - [24/Jun/2019 15:50:57] "GET /data/plugin/scalars/tags HTTP/1.1" 200 -
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/elasticdl/master/tensorboard_service.py`
Content:
```
1 import datetime
2 import subprocess
3 import time
4
5 import tensorflow as tf
6
7 import numpy as np
8
9
10 class TensorboardService(object):
11 """Tensorboard Service implementation"""
12
13 def __init__(self, tensorboard_log_dir):
14 """
15 Arguments:
16 tensorboard_log_dir: The log directory for Tensorboard.
17 """
18 _current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
19 self._tensorboard_log_dir = tensorboard_log_dir + _current_time
20 self._initialize_summary_writer()
21 self.tb_process = None
22
23 def _initialize_summary_writer(self):
24 self.summary_writer = tf.summary.create_file_writer(
25 self._tensorboard_log_dir
26 )
27
28 def write_dict_to_summary(self, dictionary, version):
29 with self.summary_writer.as_default():
30 for k, v in dictionary.items():
31 if isinstance(v, np.ndarray) and len(v) == 1:
32 v = v[0]
33 tf.summary.scalar(k, v, step=version)
34
35 def start(self):
36 self.tb_process = subprocess.Popen(
37 ["tensorboard --logdir " + self._tensorboard_log_dir], shell=True
38 )
39
40 def keep_running(self):
41 while self.tb_process.poll() is None:
42 time.sleep(10)
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/python/elasticdl/master/tensorboard_service.py b/elasticdl/python/elasticdl/master/tensorboard_service.py
--- a/elasticdl/python/elasticdl/master/tensorboard_service.py
+++ b/elasticdl/python/elasticdl/master/tensorboard_service.py
@@ -34,7 +34,10 @@
def start(self):
self.tb_process = subprocess.Popen(
- ["tensorboard --logdir " + self._tensorboard_log_dir], shell=True
+ ["tensorboard --logdir " + self._tensorboard_log_dir],
+ shell=True,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.STDOUT,
)
def keep_running(self):
| {"golden_diff": "diff --git a/elasticdl/python/elasticdl/master/tensorboard_service.py b/elasticdl/python/elasticdl/master/tensorboard_service.py\n--- a/elasticdl/python/elasticdl/master/tensorboard_service.py\n+++ b/elasticdl/python/elasticdl/master/tensorboard_service.py\n@@ -34,7 +34,10 @@\n \n def start(self):\n self.tb_process = subprocess.Popen(\n- [\"tensorboard --logdir \" + self._tensorboard_log_dir], shell=True\n+ [\"tensorboard --logdir \" + self._tensorboard_log_dir],\n+ shell=True,\n+ stdout=subprocess.DEVNULL,\n+ stderr=subprocess.STDOUT,\n )\n \n def keep_running(self):\n", "issue": "Hide TensorBoard service REST call logs\nCurrently these logs appear on master pod's log which is not necessary at user level:\r\n```\r\nI0624 15:50:54.834580 140272641951488 _internal.py:122] ::ffff:10.138.0.35 - - [24/Jun/2019 15:50:54] \"GET /data/plugin/scalars/tags HTTP/1.1\" 200 -\r\nW0624 15:50:55.150964 140556554397440 servicer.py:195] Task result for outdated version 30 dropped\r\nI0624 15:50:57.245738 140272633558784 _internal.py:122] ::ffff:10.36.1.1 - - [24/Jun/2019 15:50:57] \"GET /data/environment HTTP/1.1\" 200 -\r\nI0624 15:50:57.250612 140271752836864 _internal.py:122] ::ffff:10.138.0.35 - - [24/Jun/2019 15:50:57] \"GET /data/experiments HTTP/1.1\" 200 -\r\nI0624 15:50:57.252254 140272105903872 _internal.py:122] ::ffff:10.36.1.1 - - [24/Jun/2019 15:50:57] \"GET /data/runs HTTP/1.1\" 200 -\r\nI0624 15:50:57.255696 140272641951488 _internal.py:122] ::ffff:10.138.0.35 - - [24/Jun/2019 15:50:57] \"GET /data/plugins_listing HTTP/1.1\" 200 -\r\nI0624 15:50:57.430979 140272641951488 _internal.py:122] ::ffff:10.138.0.35 - - [24/Jun/2019 15:50:57] \"GET /data/plugin/scalars/tags HTTP/1.1\" 200 -\r\n```\n", "before_files": [{"content": "import datetime\nimport subprocess\nimport time\n\nimport tensorflow as tf\n\nimport numpy as np\n\n\nclass TensorboardService(object):\n \"\"\"Tensorboard Service implementation\"\"\"\n\n def __init__(self, tensorboard_log_dir):\n \"\"\"\n Arguments:\n tensorboard_log_dir: The log directory for Tensorboard.\n \"\"\"\n _current_time = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n self._tensorboard_log_dir = tensorboard_log_dir + _current_time\n self._initialize_summary_writer()\n self.tb_process = None\n\n def _initialize_summary_writer(self):\n self.summary_writer = tf.summary.create_file_writer(\n self._tensorboard_log_dir\n )\n\n def write_dict_to_summary(self, dictionary, version):\n with self.summary_writer.as_default():\n for k, v in dictionary.items():\n if isinstance(v, np.ndarray) and len(v) == 1:\n v = v[0]\n tf.summary.scalar(k, v, step=version)\n\n def start(self):\n self.tb_process = subprocess.Popen(\n [\"tensorboard --logdir \" + self._tensorboard_log_dir], shell=True\n )\n\n def keep_running(self):\n while self.tb_process.poll() is None:\n time.sleep(10)\n", "path": "elasticdl/python/elasticdl/master/tensorboard_service.py"}], "after_files": [{"content": "import datetime\nimport subprocess\nimport time\n\nimport tensorflow as tf\n\nimport numpy as np\n\n\nclass TensorboardService(object):\n \"\"\"Tensorboard Service implementation\"\"\"\n\n def __init__(self, tensorboard_log_dir):\n \"\"\"\n Arguments:\n tensorboard_log_dir: The log directory for Tensorboard.\n \"\"\"\n _current_time = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n self._tensorboard_log_dir = tensorboard_log_dir + _current_time\n self._initialize_summary_writer()\n self.tb_process = None\n\n def _initialize_summary_writer(self):\n self.summary_writer = tf.summary.create_file_writer(\n self._tensorboard_log_dir\n )\n\n def write_dict_to_summary(self, dictionary, version):\n with self.summary_writer.as_default():\n for k, v in dictionary.items():\n if isinstance(v, np.ndarray) and len(v) == 1:\n v = v[0]\n tf.summary.scalar(k, v, step=version)\n\n def start(self):\n self.tb_process = subprocess.Popen(\n [\"tensorboard --logdir \" + self._tensorboard_log_dir],\n shell=True,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.STDOUT,\n )\n\n def keep_running(self):\n while self.tb_process.poll() is None:\n time.sleep(10)\n", "path": "elasticdl/python/elasticdl/master/tensorboard_service.py"}]} | 1,293 | 158 |
gh_patches_debug_23947 | rasdani/github-patches | git_diff | encode__uvicorn-881 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError when connection is closed in connect()
I have the same problem then #185 but I think, I can reproduce it.
When you close a connection in the connect() method of a WebsocketConsumer, it works fine with daphne but uvicorn raises a TypeError
```
Traceback (most recent call last):
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/uvicorn/protocols/websockets/websockets_impl.py", line 140, in run_asgi
result = await asgi(self.asgi_receive, self.asgi_send)
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/sessions.py", line 179, in __call__
return await self.inner(receive, self.send)
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/middleware.py", line 41, in coroutine_call
await inner_instance(receive, send)
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/consumer.py", line 59, in __call__
[receive, self.channel_receive], self.dispatch
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/utils.py", line 59, in await_many_dispatch
await task
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/utils.py", line 51, in await_many_dispatch
result = task.result()
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/uvicorn/protocols/websockets/websockets_impl.py", line 222, in asgi_receive
data = await self.recv()
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/websockets/protocol.py", line 419, in recv
return_when=asyncio.FIRST_COMPLETED,
File "/usr/lib/python3.7/asyncio/tasks.py", line 361, in wait
fs = {ensure_future(f, loop=loop) for f in set(fs)}
File "/usr/lib/python3.7/asyncio/tasks.py", line 361, in <setcomp>
fs = {ensure_future(f, loop=loop) for f in set(fs)}
File "/usr/lib/python3.7/asyncio/tasks.py", line 592, in ensure_future
raise TypeError('An asyncio.Future, a coroutine or an awaitable is '
TypeError: An asyncio.Future, a coroutine or an awaitable is required
```
I created this branch on the django-channels-example repository: https://github.com/ostcar/channels-examples/tree/uvicorn-test
All I changed was, that the websocket connection is always closed: https://github.com/andrewgodwin/channels-examples/compare/master...ostcar:uvicorn-test
I think the problem is, that you call `websockets.WebSocketServerProtocol.recv()` before the websocket connection is open so `websockets.WebSocketServerProtocol.connection_open()` was not called. In this case the attribute `transfer_data_task` is still None. So when `websockets.WebSocketServerProtocol.recv()` calls
```python
yield from asyncio.wait(
[pop_message_waiter, self.transfer_data_task],
loop=self.loop,
return_when=asyncio.FIRST_COMPLETED,
)
```
it tries to await `None` what causes the exception.
I don't know, if this is a but in the websockets package or if you should check `self.closed` before calling `self.recv()`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `uvicorn/protocols/websockets/websockets_impl.py`
Content:
```
1 import asyncio
2 import http
3 import logging
4 from urllib.parse import unquote
5
6 import websockets
7
8 from uvicorn.protocols.utils import get_local_addr, get_remote_addr, is_ssl
9
10
11 class Server:
12 closing = False
13
14 def register(self, ws):
15 pass
16
17 def unregister(self, ws):
18 pass
19
20 def is_serving(self):
21 return not self.closing
22
23
24 class WebSocketProtocol(websockets.WebSocketServerProtocol):
25 def __init__(self, config, server_state, _loop=None):
26 if not config.loaded:
27 config.load()
28
29 self.config = config
30 self.app = config.loaded_app
31 self.loop = _loop or asyncio.get_event_loop()
32 self.logger = logging.getLogger("uvicorn.error")
33 self.root_path = config.root_path
34
35 # Shared server state
36 self.connections = server_state.connections
37 self.tasks = server_state.tasks
38
39 # Connection state
40 self.transport = None
41 self.server = None
42 self.client = None
43 self.scheme = None
44
45 # Connection events
46 self.scope = None
47 self.handshake_started_event = asyncio.Event()
48 self.handshake_completed_event = asyncio.Event()
49 self.closed_event = asyncio.Event()
50 self.initial_response = None
51 self.connect_sent = False
52 self.accepted_subprotocol = None
53 self.transfer_data_task = None
54
55 self.ws_server = Server()
56
57 super().__init__(ws_handler=self.ws_handler, ws_server=self.ws_server)
58
59 def connection_made(self, transport):
60 self.connections.add(self)
61 self.transport = transport
62 self.server = get_local_addr(transport)
63 self.client = get_remote_addr(transport)
64 self.scheme = "wss" if is_ssl(transport) else "ws"
65 super().connection_made(transport)
66
67 def connection_lost(self, exc):
68 self.connections.remove(self)
69 self.handshake_completed_event.set()
70 super().connection_lost(exc)
71
72 def shutdown(self):
73 self.ws_server.closing = True
74 self.transport.close()
75
76 def on_task_complete(self, task):
77 self.tasks.discard(task)
78
79 async def process_request(self, path, headers):
80 """
81 This hook is called to determine if the websocket should return
82 an HTTP response and close.
83
84 Our behavior here is to start the ASGI application, and then wait
85 for either `accept` or `close` in order to determine if we should
86 close the connection.
87 """
88 path_portion, _, query_string = path.partition("?")
89
90 websockets.handshake.check_request(headers)
91
92 subprotocols = []
93 for header in headers.get_all("Sec-WebSocket-Protocol"):
94 subprotocols.extend([token.strip() for token in header.split(",")])
95
96 asgi_headers = [
97 (name.encode("ascii"), value.encode("ascii"))
98 for name, value in headers.raw_items()
99 ]
100
101 self.scope = {
102 "type": "websocket",
103 "asgi": {"version": self.config.asgi_version, "spec_version": "2.1"},
104 "scheme": self.scheme,
105 "server": self.server,
106 "client": self.client,
107 "root_path": self.root_path,
108 "path": unquote(path_portion),
109 "raw_path": path_portion,
110 "query_string": query_string.encode("ascii"),
111 "headers": asgi_headers,
112 "subprotocols": subprotocols,
113 }
114 task = self.loop.create_task(self.run_asgi())
115 task.add_done_callback(self.on_task_complete)
116 self.tasks.add(task)
117 await self.handshake_started_event.wait()
118 return self.initial_response
119
120 def process_subprotocol(self, headers, available_subprotocols):
121 """
122 We override the standard 'process_subprotocol' behavior here so that
123 we return whatever subprotocol is sent in the 'accept' message.
124 """
125 return self.accepted_subprotocol
126
127 def send_500_response(self):
128 msg = b"Internal Server Error"
129 content = [
130 b"HTTP/1.1 500 Internal Server Error\r\n"
131 b"content-type: text/plain; charset=utf-8\r\n",
132 b"content-length: " + str(len(msg)).encode("ascii") + b"\r\n",
133 b"connection: close\r\n",
134 b"\r\n",
135 msg,
136 ]
137 self.transport.write(b"".join(content))
138
139 async def ws_handler(self, protocol, path):
140 """
141 This is the main handler function for the 'websockets' implementation
142 to call into. We just wait for close then return, and instead allow
143 'send' and 'receive' events to drive the flow.
144 """
145 self.handshake_completed_event.set()
146 await self.closed_event.wait()
147
148 async def run_asgi(self):
149 """
150 Wrapper around the ASGI callable, handling exceptions and unexpected
151 termination states.
152 """
153 try:
154 result = await self.app(self.scope, self.asgi_receive, self.asgi_send)
155 except BaseException as exc:
156 self.closed_event.set()
157 msg = "Exception in ASGI application\n"
158 self.logger.error(msg, exc_info=exc)
159 if not self.handshake_started_event.is_set():
160 self.send_500_response()
161 else:
162 await self.handshake_completed_event.wait()
163 self.transport.close()
164 else:
165 self.closed_event.set()
166 if not self.handshake_started_event.is_set():
167 msg = "ASGI callable returned without sending handshake."
168 self.logger.error(msg)
169 self.send_500_response()
170 self.transport.close()
171 elif result is not None:
172 msg = "ASGI callable should return None, but returned '%s'."
173 self.logger.error(msg, result)
174 await self.handshake_completed_event.wait()
175 self.transport.close()
176
177 async def asgi_send(self, message):
178 message_type = message["type"]
179
180 if not self.handshake_started_event.is_set():
181 if message_type == "websocket.accept":
182 self.logger.info(
183 '%s - "WebSocket %s" [accepted]',
184 self.scope["client"],
185 self.scope["root_path"] + self.scope["path"],
186 )
187 self.initial_response = None
188 self.accepted_subprotocol = message.get("subprotocol")
189 self.handshake_started_event.set()
190
191 elif message_type == "websocket.close":
192 self.logger.info(
193 '%s - "WebSocket %s" 403',
194 self.scope["client"],
195 self.scope["root_path"] + self.scope["path"],
196 )
197 self.initial_response = (http.HTTPStatus.FORBIDDEN, [], b"")
198 self.handshake_started_event.set()
199 self.closed_event.set()
200
201 else:
202 msg = (
203 "Expected ASGI message 'websocket.accept' or 'websocket.close', "
204 "but got '%s'."
205 )
206 raise RuntimeError(msg % message_type)
207
208 elif not self.closed_event.is_set():
209 await self.handshake_completed_event.wait()
210
211 if message_type == "websocket.send":
212 bytes_data = message.get("bytes")
213 text_data = message.get("text")
214 data = text_data if bytes_data is None else bytes_data
215 await self.send(data)
216
217 elif message_type == "websocket.close":
218 code = message.get("code", 1000)
219 self.close_code = code # for WebSocketServerProtocol
220 await self.close(code)
221 self.closed_event.set()
222
223 else:
224 msg = (
225 "Expected ASGI message 'websocket.send' or 'websocket.close',"
226 " but got '%s'."
227 )
228 raise RuntimeError(msg % message_type)
229
230 else:
231 msg = "Unexpected ASGI message '%s', after sending 'websocket.close'."
232 raise RuntimeError(msg % message_type)
233
234 async def asgi_receive(self):
235 if not self.connect_sent:
236 self.connect_sent = True
237 return {"type": "websocket.connect"}
238
239 await self.handshake_completed_event.wait()
240
241 if self.closed_event.is_set():
242 # If the client disconnected: WebSocketServerProtocol set self.close_code.
243 # If the handshake failed or the app closed before handshake completion,
244 # use 1006 Abnormal Closure.
245 code = getattr(self, "close_code", 1006)
246 return {"type": "websocket.disconnect", "code": code}
247
248 try:
249 data = await self.recv()
250 except websockets.ConnectionClosed as exc:
251 return {"type": "websocket.disconnect", "code": exc.code}
252
253 msg = {"type": "websocket.receive"}
254
255 if isinstance(data, str):
256 msg["text"] = data
257 else:
258 msg["bytes"] = data
259
260 return msg
261
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/uvicorn/protocols/websockets/websockets_impl.py b/uvicorn/protocols/websockets/websockets_impl.py
--- a/uvicorn/protocols/websockets/websockets_impl.py
+++ b/uvicorn/protocols/websockets/websockets_impl.py
@@ -216,7 +216,6 @@
elif message_type == "websocket.close":
code = message.get("code", 1000)
- self.close_code = code # for WebSocketServerProtocol
await self.close(code)
self.closed_event.set()
@@ -237,15 +236,8 @@
return {"type": "websocket.connect"}
await self.handshake_completed_event.wait()
-
- if self.closed_event.is_set():
- # If the client disconnected: WebSocketServerProtocol set self.close_code.
- # If the handshake failed or the app closed before handshake completion,
- # use 1006 Abnormal Closure.
- code = getattr(self, "close_code", 1006)
- return {"type": "websocket.disconnect", "code": code}
-
try:
+ await self.ensure_open()
data = await self.recv()
except websockets.ConnectionClosed as exc:
return {"type": "websocket.disconnect", "code": exc.code}
| {"golden_diff": "diff --git a/uvicorn/protocols/websockets/websockets_impl.py b/uvicorn/protocols/websockets/websockets_impl.py\n--- a/uvicorn/protocols/websockets/websockets_impl.py\n+++ b/uvicorn/protocols/websockets/websockets_impl.py\n@@ -216,7 +216,6 @@\n \n elif message_type == \"websocket.close\":\n code = message.get(\"code\", 1000)\n- self.close_code = code # for WebSocketServerProtocol\n await self.close(code)\n self.closed_event.set()\n \n@@ -237,15 +236,8 @@\n return {\"type\": \"websocket.connect\"}\n \n await self.handshake_completed_event.wait()\n-\n- if self.closed_event.is_set():\n- # If the client disconnected: WebSocketServerProtocol set self.close_code.\n- # If the handshake failed or the app closed before handshake completion,\n- # use 1006 Abnormal Closure.\n- code = getattr(self, \"close_code\", 1006)\n- return {\"type\": \"websocket.disconnect\", \"code\": code}\n-\n try:\n+ await self.ensure_open()\n data = await self.recv()\n except websockets.ConnectionClosed as exc:\n return {\"type\": \"websocket.disconnect\", \"code\": exc.code}\n", "issue": "TypeError when connection is closed in connect()\nI have the same problem then #185 but I think, I can reproduce it.\r\n\r\nWhen you close a connection in the connect() method of a WebsocketConsumer, it works fine with daphne but uvicorn raises a TypeError \r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/uvicorn/protocols/websockets/websockets_impl.py\", line 140, in run_asgi\r\n result = await asgi(self.asgi_receive, self.asgi_send)\r\n File \"/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/sessions.py\", line 179, in __call__\r\n return await self.inner(receive, self.send)\r\n File \"/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/middleware.py\", line 41, in coroutine_call\r\n await inner_instance(receive, send)\r\n File \"/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/consumer.py\", line 59, in __call__\r\n [receive, self.channel_receive], self.dispatch\r\n File \"/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/utils.py\", line 59, in await_many_dispatch\r\n await task\r\n File \"/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/utils.py\", line 51, in await_many_dispatch\r\n result = task.result()\r\n File \"/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/uvicorn/protocols/websockets/websockets_impl.py\", line 222, in asgi_receive\r\n data = await self.recv()\r\n File \"/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/websockets/protocol.py\", line 419, in recv\r\n return_when=asyncio.FIRST_COMPLETED,\r\n File \"/usr/lib/python3.7/asyncio/tasks.py\", line 361, in wait\r\n fs = {ensure_future(f, loop=loop) for f in set(fs)}\r\n File \"/usr/lib/python3.7/asyncio/tasks.py\", line 361, in <setcomp>\r\n fs = {ensure_future(f, loop=loop) for f in set(fs)}\r\n File \"/usr/lib/python3.7/asyncio/tasks.py\", line 592, in ensure_future\r\n raise TypeError('An asyncio.Future, a coroutine or an awaitable is '\r\nTypeError: An asyncio.Future, a coroutine or an awaitable is required\r\n```\r\n\r\nI created this branch on the django-channels-example repository: https://github.com/ostcar/channels-examples/tree/uvicorn-test\r\n\r\nAll I changed was, that the websocket connection is always closed: https://github.com/andrewgodwin/channels-examples/compare/master...ostcar:uvicorn-test\r\n\r\nI think the problem is, that you call `websockets.WebSocketServerProtocol.recv()` before the websocket connection is open so `websockets.WebSocketServerProtocol.connection_open()` was not called. In this case the attribute `transfer_data_task` is still None. So when `websockets.WebSocketServerProtocol.recv()` calls \r\n\r\n```python\r\nyield from asyncio.wait(\r\n [pop_message_waiter, self.transfer_data_task],\r\n loop=self.loop,\r\n return_when=asyncio.FIRST_COMPLETED,\r\n )\r\n```\r\nit tries to await `None` what causes the exception.\r\n\r\nI don't know, if this is a but in the websockets package or if you should check `self.closed` before calling `self.recv()`\r\n\n", "before_files": [{"content": "import asyncio\nimport http\nimport logging\nfrom urllib.parse import unquote\n\nimport websockets\n\nfrom uvicorn.protocols.utils import get_local_addr, get_remote_addr, is_ssl\n\n\nclass Server:\n closing = False\n\n def register(self, ws):\n pass\n\n def unregister(self, ws):\n pass\n\n def is_serving(self):\n return not self.closing\n\n\nclass WebSocketProtocol(websockets.WebSocketServerProtocol):\n def __init__(self, config, server_state, _loop=None):\n if not config.loaded:\n config.load()\n\n self.config = config\n self.app = config.loaded_app\n self.loop = _loop or asyncio.get_event_loop()\n self.logger = logging.getLogger(\"uvicorn.error\")\n self.root_path = config.root_path\n\n # Shared server state\n self.connections = server_state.connections\n self.tasks = server_state.tasks\n\n # Connection state\n self.transport = None\n self.server = None\n self.client = None\n self.scheme = None\n\n # Connection events\n self.scope = None\n self.handshake_started_event = asyncio.Event()\n self.handshake_completed_event = asyncio.Event()\n self.closed_event = asyncio.Event()\n self.initial_response = None\n self.connect_sent = False\n self.accepted_subprotocol = None\n self.transfer_data_task = None\n\n self.ws_server = Server()\n\n super().__init__(ws_handler=self.ws_handler, ws_server=self.ws_server)\n\n def connection_made(self, transport):\n self.connections.add(self)\n self.transport = transport\n self.server = get_local_addr(transport)\n self.client = get_remote_addr(transport)\n self.scheme = \"wss\" if is_ssl(transport) else \"ws\"\n super().connection_made(transport)\n\n def connection_lost(self, exc):\n self.connections.remove(self)\n self.handshake_completed_event.set()\n super().connection_lost(exc)\n\n def shutdown(self):\n self.ws_server.closing = True\n self.transport.close()\n\n def on_task_complete(self, task):\n self.tasks.discard(task)\n\n async def process_request(self, path, headers):\n \"\"\"\n This hook is called to determine if the websocket should return\n an HTTP response and close.\n\n Our behavior here is to start the ASGI application, and then wait\n for either `accept` or `close` in order to determine if we should\n close the connection.\n \"\"\"\n path_portion, _, query_string = path.partition(\"?\")\n\n websockets.handshake.check_request(headers)\n\n subprotocols = []\n for header in headers.get_all(\"Sec-WebSocket-Protocol\"):\n subprotocols.extend([token.strip() for token in header.split(\",\")])\n\n asgi_headers = [\n (name.encode(\"ascii\"), value.encode(\"ascii\"))\n for name, value in headers.raw_items()\n ]\n\n self.scope = {\n \"type\": \"websocket\",\n \"asgi\": {\"version\": self.config.asgi_version, \"spec_version\": \"2.1\"},\n \"scheme\": self.scheme,\n \"server\": self.server,\n \"client\": self.client,\n \"root_path\": self.root_path,\n \"path\": unquote(path_portion),\n \"raw_path\": path_portion,\n \"query_string\": query_string.encode(\"ascii\"),\n \"headers\": asgi_headers,\n \"subprotocols\": subprotocols,\n }\n task = self.loop.create_task(self.run_asgi())\n task.add_done_callback(self.on_task_complete)\n self.tasks.add(task)\n await self.handshake_started_event.wait()\n return self.initial_response\n\n def process_subprotocol(self, headers, available_subprotocols):\n \"\"\"\n We override the standard 'process_subprotocol' behavior here so that\n we return whatever subprotocol is sent in the 'accept' message.\n \"\"\"\n return self.accepted_subprotocol\n\n def send_500_response(self):\n msg = b\"Internal Server Error\"\n content = [\n b\"HTTP/1.1 500 Internal Server Error\\r\\n\"\n b\"content-type: text/plain; charset=utf-8\\r\\n\",\n b\"content-length: \" + str(len(msg)).encode(\"ascii\") + b\"\\r\\n\",\n b\"connection: close\\r\\n\",\n b\"\\r\\n\",\n msg,\n ]\n self.transport.write(b\"\".join(content))\n\n async def ws_handler(self, protocol, path):\n \"\"\"\n This is the main handler function for the 'websockets' implementation\n to call into. We just wait for close then return, and instead allow\n 'send' and 'receive' events to drive the flow.\n \"\"\"\n self.handshake_completed_event.set()\n await self.closed_event.wait()\n\n async def run_asgi(self):\n \"\"\"\n Wrapper around the ASGI callable, handling exceptions and unexpected\n termination states.\n \"\"\"\n try:\n result = await self.app(self.scope, self.asgi_receive, self.asgi_send)\n except BaseException as exc:\n self.closed_event.set()\n msg = \"Exception in ASGI application\\n\"\n self.logger.error(msg, exc_info=exc)\n if not self.handshake_started_event.is_set():\n self.send_500_response()\n else:\n await self.handshake_completed_event.wait()\n self.transport.close()\n else:\n self.closed_event.set()\n if not self.handshake_started_event.is_set():\n msg = \"ASGI callable returned without sending handshake.\"\n self.logger.error(msg)\n self.send_500_response()\n self.transport.close()\n elif result is not None:\n msg = \"ASGI callable should return None, but returned '%s'.\"\n self.logger.error(msg, result)\n await self.handshake_completed_event.wait()\n self.transport.close()\n\n async def asgi_send(self, message):\n message_type = message[\"type\"]\n\n if not self.handshake_started_event.is_set():\n if message_type == \"websocket.accept\":\n self.logger.info(\n '%s - \"WebSocket %s\" [accepted]',\n self.scope[\"client\"],\n self.scope[\"root_path\"] + self.scope[\"path\"],\n )\n self.initial_response = None\n self.accepted_subprotocol = message.get(\"subprotocol\")\n self.handshake_started_event.set()\n\n elif message_type == \"websocket.close\":\n self.logger.info(\n '%s - \"WebSocket %s\" 403',\n self.scope[\"client\"],\n self.scope[\"root_path\"] + self.scope[\"path\"],\n )\n self.initial_response = (http.HTTPStatus.FORBIDDEN, [], b\"\")\n self.handshake_started_event.set()\n self.closed_event.set()\n\n else:\n msg = (\n \"Expected ASGI message 'websocket.accept' or 'websocket.close', \"\n \"but got '%s'.\"\n )\n raise RuntimeError(msg % message_type)\n\n elif not self.closed_event.is_set():\n await self.handshake_completed_event.wait()\n\n if message_type == \"websocket.send\":\n bytes_data = message.get(\"bytes\")\n text_data = message.get(\"text\")\n data = text_data if bytes_data is None else bytes_data\n await self.send(data)\n\n elif message_type == \"websocket.close\":\n code = message.get(\"code\", 1000)\n self.close_code = code # for WebSocketServerProtocol\n await self.close(code)\n self.closed_event.set()\n\n else:\n msg = (\n \"Expected ASGI message 'websocket.send' or 'websocket.close',\"\n \" but got '%s'.\"\n )\n raise RuntimeError(msg % message_type)\n\n else:\n msg = \"Unexpected ASGI message '%s', after sending 'websocket.close'.\"\n raise RuntimeError(msg % message_type)\n\n async def asgi_receive(self):\n if not self.connect_sent:\n self.connect_sent = True\n return {\"type\": \"websocket.connect\"}\n\n await self.handshake_completed_event.wait()\n\n if self.closed_event.is_set():\n # If the client disconnected: WebSocketServerProtocol set self.close_code.\n # If the handshake failed or the app closed before handshake completion,\n # use 1006 Abnormal Closure.\n code = getattr(self, \"close_code\", 1006)\n return {\"type\": \"websocket.disconnect\", \"code\": code}\n\n try:\n data = await self.recv()\n except websockets.ConnectionClosed as exc:\n return {\"type\": \"websocket.disconnect\", \"code\": exc.code}\n\n msg = {\"type\": \"websocket.receive\"}\n\n if isinstance(data, str):\n msg[\"text\"] = data\n else:\n msg[\"bytes\"] = data\n\n return msg\n", "path": "uvicorn/protocols/websockets/websockets_impl.py"}], "after_files": [{"content": "import asyncio\nimport http\nimport logging\nfrom urllib.parse import unquote\n\nimport websockets\n\nfrom uvicorn.protocols.utils import get_local_addr, get_remote_addr, is_ssl\n\n\nclass Server:\n closing = False\n\n def register(self, ws):\n pass\n\n def unregister(self, ws):\n pass\n\n def is_serving(self):\n return not self.closing\n\n\nclass WebSocketProtocol(websockets.WebSocketServerProtocol):\n def __init__(self, config, server_state, _loop=None):\n if not config.loaded:\n config.load()\n\n self.config = config\n self.app = config.loaded_app\n self.loop = _loop or asyncio.get_event_loop()\n self.logger = logging.getLogger(\"uvicorn.error\")\n self.root_path = config.root_path\n\n # Shared server state\n self.connections = server_state.connections\n self.tasks = server_state.tasks\n\n # Connection state\n self.transport = None\n self.server = None\n self.client = None\n self.scheme = None\n\n # Connection events\n self.scope = None\n self.handshake_started_event = asyncio.Event()\n self.handshake_completed_event = asyncio.Event()\n self.closed_event = asyncio.Event()\n self.initial_response = None\n self.connect_sent = False\n self.accepted_subprotocol = None\n self.transfer_data_task = None\n\n self.ws_server = Server()\n\n super().__init__(ws_handler=self.ws_handler, ws_server=self.ws_server)\n\n def connection_made(self, transport):\n self.connections.add(self)\n self.transport = transport\n self.server = get_local_addr(transport)\n self.client = get_remote_addr(transport)\n self.scheme = \"wss\" if is_ssl(transport) else \"ws\"\n super().connection_made(transport)\n\n def connection_lost(self, exc):\n self.connections.remove(self)\n self.handshake_completed_event.set()\n super().connection_lost(exc)\n\n def shutdown(self):\n self.ws_server.closing = True\n self.transport.close()\n\n def on_task_complete(self, task):\n self.tasks.discard(task)\n\n async def process_request(self, path, headers):\n \"\"\"\n This hook is called to determine if the websocket should return\n an HTTP response and close.\n\n Our behavior here is to start the ASGI application, and then wait\n for either `accept` or `close` in order to determine if we should\n close the connection.\n \"\"\"\n path_portion, _, query_string = path.partition(\"?\")\n\n websockets.handshake.check_request(headers)\n\n subprotocols = []\n for header in headers.get_all(\"Sec-WebSocket-Protocol\"):\n subprotocols.extend([token.strip() for token in header.split(\",\")])\n\n asgi_headers = [\n (name.encode(\"ascii\"), value.encode(\"ascii\"))\n for name, value in headers.raw_items()\n ]\n\n self.scope = {\n \"type\": \"websocket\",\n \"asgi\": {\"version\": self.config.asgi_version, \"spec_version\": \"2.1\"},\n \"scheme\": self.scheme,\n \"server\": self.server,\n \"client\": self.client,\n \"root_path\": self.root_path,\n \"path\": unquote(path_portion),\n \"raw_path\": path_portion,\n \"query_string\": query_string.encode(\"ascii\"),\n \"headers\": asgi_headers,\n \"subprotocols\": subprotocols,\n }\n task = self.loop.create_task(self.run_asgi())\n task.add_done_callback(self.on_task_complete)\n self.tasks.add(task)\n await self.handshake_started_event.wait()\n return self.initial_response\n\n def process_subprotocol(self, headers, available_subprotocols):\n \"\"\"\n We override the standard 'process_subprotocol' behavior here so that\n we return whatever subprotocol is sent in the 'accept' message.\n \"\"\"\n return self.accepted_subprotocol\n\n def send_500_response(self):\n msg = b\"Internal Server Error\"\n content = [\n b\"HTTP/1.1 500 Internal Server Error\\r\\n\"\n b\"content-type: text/plain; charset=utf-8\\r\\n\",\n b\"content-length: \" + str(len(msg)).encode(\"ascii\") + b\"\\r\\n\",\n b\"connection: close\\r\\n\",\n b\"\\r\\n\",\n msg,\n ]\n self.transport.write(b\"\".join(content))\n\n async def ws_handler(self, protocol, path):\n \"\"\"\n This is the main handler function for the 'websockets' implementation\n to call into. We just wait for close then return, and instead allow\n 'send' and 'receive' events to drive the flow.\n \"\"\"\n self.handshake_completed_event.set()\n await self.closed_event.wait()\n\n async def run_asgi(self):\n \"\"\"\n Wrapper around the ASGI callable, handling exceptions and unexpected\n termination states.\n \"\"\"\n try:\n result = await self.app(self.scope, self.asgi_receive, self.asgi_send)\n except BaseException as exc:\n self.closed_event.set()\n msg = \"Exception in ASGI application\\n\"\n self.logger.error(msg, exc_info=exc)\n if not self.handshake_started_event.is_set():\n self.send_500_response()\n else:\n await self.handshake_completed_event.wait()\n self.transport.close()\n else:\n self.closed_event.set()\n if not self.handshake_started_event.is_set():\n msg = \"ASGI callable returned without sending handshake.\"\n self.logger.error(msg)\n self.send_500_response()\n self.transport.close()\n elif result is not None:\n msg = \"ASGI callable should return None, but returned '%s'.\"\n self.logger.error(msg, result)\n await self.handshake_completed_event.wait()\n self.transport.close()\n\n async def asgi_send(self, message):\n message_type = message[\"type\"]\n\n if not self.handshake_started_event.is_set():\n if message_type == \"websocket.accept\":\n self.logger.info(\n '%s - \"WebSocket %s\" [accepted]',\n self.scope[\"client\"],\n self.scope[\"root_path\"] + self.scope[\"path\"],\n )\n self.initial_response = None\n self.accepted_subprotocol = message.get(\"subprotocol\")\n self.handshake_started_event.set()\n\n elif message_type == \"websocket.close\":\n self.logger.info(\n '%s - \"WebSocket %s\" 403',\n self.scope[\"client\"],\n self.scope[\"root_path\"] + self.scope[\"path\"],\n )\n self.initial_response = (http.HTTPStatus.FORBIDDEN, [], b\"\")\n self.handshake_started_event.set()\n self.closed_event.set()\n\n else:\n msg = (\n \"Expected ASGI message 'websocket.accept' or 'websocket.close', \"\n \"but got '%s'.\"\n )\n raise RuntimeError(msg % message_type)\n\n elif not self.closed_event.is_set():\n await self.handshake_completed_event.wait()\n\n if message_type == \"websocket.send\":\n bytes_data = message.get(\"bytes\")\n text_data = message.get(\"text\")\n data = text_data if bytes_data is None else bytes_data\n await self.send(data)\n\n elif message_type == \"websocket.close\":\n code = message.get(\"code\", 1000)\n await self.close(code)\n self.closed_event.set()\n\n else:\n msg = (\n \"Expected ASGI message 'websocket.send' or 'websocket.close',\"\n \" but got '%s'.\"\n )\n raise RuntimeError(msg % message_type)\n\n else:\n msg = \"Unexpected ASGI message '%s', after sending 'websocket.close'.\"\n raise RuntimeError(msg % message_type)\n\n async def asgi_receive(self):\n if not self.connect_sent:\n self.connect_sent = True\n return {\"type\": \"websocket.connect\"}\n\n await self.handshake_completed_event.wait()\n try:\n await self.ensure_open()\n data = await self.recv()\n except websockets.ConnectionClosed as exc:\n return {\"type\": \"websocket.disconnect\", \"code\": exc.code}\n\n msg = {\"type\": \"websocket.receive\"}\n\n if isinstance(data, str):\n msg[\"text\"] = data\n else:\n msg[\"bytes\"] = data\n\n return msg\n", "path": "uvicorn/protocols/websockets/websockets_impl.py"}]} | 3,614 | 283 |
gh_patches_debug_29815 | rasdani/github-patches | git_diff | streamlink__streamlink-4553 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.nicolive: Niconico login broken (404)
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [ ] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest build from the master branch
### Description
The niconico function using login is broken. This error is shown:
404 Client Error: Not Found for url: https://account.nicovideo.jp/
The plugin seems to be pointing to https://account.nicovideo.jp/login/redirector which does not work anymore.
Pointing to https://account.nicovideo.jp/login instead should solve the issue.
### Debug log
```text
Used syntax:
streamlink -o "output.ts" https://live.nicovideo.jp/watch/[video-ID] best --niconico-email [mailaddress] --niconico-password [PW]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/nicolive.py`
Content:
```
1 """
2 $description Japanese live streaming and video hosting social platform.
3 $url live.nicovideo.jp
4 $type live, vod
5 $account Required by some streams
6 $notes Timeshift is supported
7 """
8
9 import logging
10 import re
11 from threading import Event
12 from urllib.parse import urljoin
13
14 from streamlink.exceptions import FatalPluginError
15 from streamlink.plugin import Plugin, PluginArgument, PluginArguments, PluginError, pluginmatcher
16 from streamlink.plugin.api import useragents, validate
17 from streamlink.plugin.api.websocket import WebsocketClient
18 from streamlink.stream.hls import HLSStream, HLSStreamReader
19 from streamlink.utils.parse import parse_json
20 from streamlink.utils.times import hours_minutes_seconds
21 from streamlink.utils.url import update_qsd
22
23 log = logging.getLogger(__name__)
24
25
26 class NicoLiveWsClient(WebsocketClient):
27 STREAM_OPENED_TIMEOUT = 6
28
29 ready: Event
30 opened: Event
31 hls_stream_url: str
32
33 def __init__(self, *args, **kwargs):
34 super().__init__(*args, **kwargs)
35 self.opened = Event()
36 self.ready = Event()
37
38 def on_open(self, wsapp):
39 super().on_open(wsapp)
40 self.send_playerversion()
41 self.send_getpermit()
42
43 def on_message(self, wsapp, data: str):
44 log.debug(f"Received: {data}")
45 message = parse_json(data)
46 msgtype = message.get("type")
47 msgdata = message.get("data", {})
48
49 if msgtype == "ping":
50 self.send_pong()
51
52 elif msgtype == "stream" and msgdata.get("protocol") == "hls" and msgdata.get("uri"):
53 self.hls_stream_url = msgdata.get("uri")
54 self.ready.set()
55 if self.opened.wait(self.STREAM_OPENED_TIMEOUT):
56 log.debug("Stream opened, keeping websocket connection alive")
57 else:
58 log.info("Closing websocket connection")
59 self.close()
60
61 elif msgtype == "disconnect":
62 reason = msgdata.get("reason", "Unknown reason")
63 log.info(f"Received disconnect message: {reason}")
64 self.close()
65
66 def send_playerversion(self):
67 self.send_json({
68 "type": "startWatching",
69 "data": {
70 "stream": {
71 "quality": "abr",
72 "protocol": "hls",
73 "latency": "high",
74 "chasePlay": False
75 },
76 "room": {
77 "protocol": "webSocket",
78 "commentable": True
79 },
80 "reconnect": False
81 }
82 })
83
84 def send_getpermit(self):
85 self.send_json({
86 "type": "getAkashic",
87 "data": {
88 "chasePlay": False
89 }
90 })
91
92 def send_pong(self):
93 self.send_json({"type": "pong"})
94 self.send_json({"type": "keepSeat"})
95
96
97 class NicoLiveHLSStreamReader(HLSStreamReader):
98 stream: "NicoLiveHLSStream"
99
100 def open(self):
101 self.stream.wsclient.opened.set()
102 super().open()
103
104 def close(self):
105 super().close()
106 self.stream.wsclient.close()
107
108
109 class NicoLiveHLSStream(HLSStream):
110 __reader__ = NicoLiveHLSStreamReader
111 wsclient: NicoLiveWsClient
112
113 def set_wsclient(self, wsclient: NicoLiveWsClient):
114 self.wsclient = wsclient
115
116
117 @pluginmatcher(re.compile(
118 r"https?://(?P<domain>live\d*\.nicovideo\.jp)/watch/(lv|co)\d+"
119 ))
120 class NicoLive(Plugin):
121 arguments = PluginArguments(
122 PluginArgument(
123 "email",
124 argument_name="niconico-email",
125 sensitive=True,
126 metavar="EMAIL",
127 help="The email or phone number associated with your Niconico account"
128 ),
129 PluginArgument(
130 "password",
131 argument_name="niconico-password",
132 sensitive=True,
133 metavar="PASSWORD",
134 help="The password of your Niconico account"
135 ),
136 PluginArgument(
137 "user-session",
138 argument_name="niconico-user-session",
139 sensitive=True,
140 metavar="VALUE",
141 help="Value of the user-session token \n(can be used in "
142 "case you do not want to put your password here)"
143 ),
144 PluginArgument(
145 "purge-credentials",
146 argument_name="niconico-purge-credentials",
147 action="store_true",
148 help="Purge cached Niconico credentials to initiate a new session and reauthenticate."
149 ),
150 PluginArgument(
151 "timeshift-offset",
152 type=hours_minutes_seconds,
153 argument_name="niconico-timeshift-offset",
154 metavar="[HH:]MM:SS",
155 default=None,
156 help="Amount of time to skip from the beginning of a stream. Default is 00:00:00."
157 )
158 )
159
160 STREAM_READY_TIMEOUT = 6
161 LOGIN_URL = "https://account.nicovideo.jp/login/redirector"
162 LOGIN_URL_PARAMS = {
163 "show_button_twitter": 1,
164 "show_button_facebook": 1,
165 "next_url": "/",
166 }
167
168 wsclient: NicoLiveWsClient
169
170 def _get_streams(self):
171 if self.get_option("purge_credentials"):
172 self.clear_cookies()
173 log.info("All credentials were successfully removed")
174
175 self.session.http.headers.update({
176 "User-Agent": useragents.CHROME,
177 })
178
179 self.niconico_web_login()
180
181 wss_api_url = self.get_wss_api_url()
182 if not wss_api_url:
183 log.error(
184 "Failed to get wss_api_url. "
185 "Please check if the URL is correct, "
186 "and make sure your account has access to the video."
187 )
188 return
189
190 self.wsclient = NicoLiveWsClient(self.session, wss_api_url)
191 self.wsclient.start()
192
193 hls_stream_url = self._get_hls_stream_url()
194 if not hls_stream_url:
195 return
196
197 offset = self.get_option("timeshift-offset")
198 if offset and "timeshift" in wss_api_url:
199 hls_stream_url = update_qsd(hls_stream_url, {"start": offset})
200
201 for quality, stream in NicoLiveHLSStream.parse_variant_playlist(self.session, hls_stream_url).items():
202 stream.set_wsclient(self.wsclient)
203 yield quality, stream
204
205 def _get_hls_stream_url(self):
206 log.debug(f"Waiting for permit (for at most {self.STREAM_READY_TIMEOUT} seconds)...")
207 if not self.wsclient.ready.wait(self.STREAM_READY_TIMEOUT) or not self.wsclient.is_alive():
208 log.error("Waiting for permit timed out.")
209 self.wsclient.close()
210 return
211
212 return self.wsclient.hls_stream_url
213
214 def get_wss_api_url(self):
215 try:
216 data = self.session.http.get(self.url, schema=validate.Schema(
217 validate.parse_html(),
218 validate.xml_find(".//script[@id='embedded-data'][@data-props]"),
219 validate.get("data-props"),
220 validate.parse_json(),
221 {"site": {
222 "relive": {
223 "webSocketUrl": validate.url(scheme="wss")
224 },
225 validate.optional("frontendId"): int
226 }},
227 validate.get("site"),
228 validate.union_get(("relive", "webSocketUrl"), "frontendId")
229 ))
230 except PluginError:
231 return
232
233 wss_api_url, frontend_id = data
234 if frontend_id is not None:
235 wss_api_url = update_qsd(wss_api_url, {"frontend_id": frontend_id})
236
237 return wss_api_url
238
239 def niconico_web_login(self):
240 user_session = self.get_option("user-session")
241 email = self.get_option("email")
242 password = self.get_option("password")
243
244 if user_session is not None:
245 log.info("Logging in via provided user session cookie")
246 self.session.http.cookies.set(
247 "user_session",
248 user_session,
249 path="/",
250 domain="nicovideo.jp"
251 )
252 self.save_cookies()
253
254 elif self.session.http.cookies.get("user_session"):
255 log.info("Logging in via cached user session cookie")
256
257 elif email is not None and password is not None:
258 log.info("Logging in via provided email and password")
259 root = self.session.http.post(
260 self.LOGIN_URL,
261 data={"mail_tel": email, "password": password},
262 params=self.LOGIN_URL_PARAMS,
263 schema=validate.Schema(validate.parse_html()))
264
265 input_with_value = {}
266 for elem in root.xpath(".//input"):
267 if elem.attrib.get("value"):
268 input_with_value[elem.attrib.get("name")] = elem.attrib.get("value")
269 else:
270 if elem.attrib.get("id") == "oneTimePw":
271 maxlength = int(elem.attrib.get("maxlength"))
272 try:
273 oneTimePw = self.input_ask("Enter the 6 digit number included in email")
274 if len(oneTimePw) > maxlength:
275 log.error("invalid user input")
276 return
277 except FatalPluginError:
278 return
279 input_with_value[elem.attrib.get("name")] = oneTimePw
280 else:
281 log.debug(f"unknown input: {elem.attrib.get('name')}")
282
283 root = self.session.http.post(
284 urljoin("https://account.nicovideo.jp", root.xpath("string(.//form[@action]/@action)")),
285 data=input_with_value,
286 schema=validate.Schema(validate.parse_html()))
287 log.debug(f"Cookies: {self.session.http.cookies.get_dict()}")
288 if self.session.http.cookies.get("user_session") is None:
289 error = root.xpath("string(//div[@class='formError']/div/text())")
290 log.warning(f"Login failed: {error or 'unknown reason'}")
291 else:
292 log.info("Logged in.")
293 self.save_cookies()
294
295
296 __plugin__ = NicoLive
297
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/nicolive.py b/src/streamlink/plugins/nicolive.py
--- a/src/streamlink/plugins/nicolive.py
+++ b/src/streamlink/plugins/nicolive.py
@@ -260,10 +260,16 @@
self.LOGIN_URL,
data={"mail_tel": email, "password": password},
params=self.LOGIN_URL_PARAMS,
- schema=validate.Schema(validate.parse_html()))
+ schema=validate.Schema(validate.parse_html()),
+ )
+
+ if self.session.http.cookies.get("user_session"):
+ log.info("Logged in.")
+ self.save_cookies()
+ return
input_with_value = {}
- for elem in root.xpath(".//input"):
+ for elem in root.xpath(".//form[@action]//input"):
if elem.attrib.get("value"):
input_with_value[elem.attrib.get("name")] = elem.attrib.get("value")
else:
@@ -283,7 +289,8 @@
root = self.session.http.post(
urljoin("https://account.nicovideo.jp", root.xpath("string(.//form[@action]/@action)")),
data=input_with_value,
- schema=validate.Schema(validate.parse_html()))
+ schema=validate.Schema(validate.parse_html()),
+ )
log.debug(f"Cookies: {self.session.http.cookies.get_dict()}")
if self.session.http.cookies.get("user_session") is None:
error = root.xpath("string(//div[@class='formError']/div/text())")
| {"golden_diff": "diff --git a/src/streamlink/plugins/nicolive.py b/src/streamlink/plugins/nicolive.py\n--- a/src/streamlink/plugins/nicolive.py\n+++ b/src/streamlink/plugins/nicolive.py\n@@ -260,10 +260,16 @@\n self.LOGIN_URL,\n data={\"mail_tel\": email, \"password\": password},\n params=self.LOGIN_URL_PARAMS,\n- schema=validate.Schema(validate.parse_html()))\n+ schema=validate.Schema(validate.parse_html()),\n+ )\n+\n+ if self.session.http.cookies.get(\"user_session\"):\n+ log.info(\"Logged in.\")\n+ self.save_cookies()\n+ return\n \n input_with_value = {}\n- for elem in root.xpath(\".//input\"):\n+ for elem in root.xpath(\".//form[@action]//input\"):\n if elem.attrib.get(\"value\"):\n input_with_value[elem.attrib.get(\"name\")] = elem.attrib.get(\"value\")\n else:\n@@ -283,7 +289,8 @@\n root = self.session.http.post(\n urljoin(\"https://account.nicovideo.jp\", root.xpath(\"string(.//form[@action]/@action)\")),\n data=input_with_value,\n- schema=validate.Schema(validate.parse_html()))\n+ schema=validate.Schema(validate.parse_html()),\n+ )\n log.debug(f\"Cookies: {self.session.http.cookies.get_dict()}\")\n if self.session.http.cookies.get(\"user_session\") is None:\n error = root.xpath(\"string(//div[@class='formError']/div/text())\")\n", "issue": "plugins.nicolive: Niconico login broken (404)\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [ ] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest build from the master branch\n\n### Description\n\nThe niconico function using login is broken. This error is shown:\r\n404 Client Error: Not Found for url: https://account.nicovideo.jp/\r\n\r\nThe plugin seems to be pointing to https://account.nicovideo.jp/login/redirector which does not work anymore.\r\nPointing to https://account.nicovideo.jp/login instead should solve the issue.\n\n### Debug log\n\n```text\nUsed syntax:\r\nstreamlink -o \"output.ts\" https://live.nicovideo.jp/watch/[video-ID] best --niconico-email [mailaddress] --niconico-password [PW]\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Japanese live streaming and video hosting social platform.\n$url live.nicovideo.jp\n$type live, vod\n$account Required by some streams\n$notes Timeshift is supported\n\"\"\"\n\nimport logging\nimport re\nfrom threading import Event\nfrom urllib.parse import urljoin\n\nfrom streamlink.exceptions import FatalPluginError\nfrom streamlink.plugin import Plugin, PluginArgument, PluginArguments, PluginError, pluginmatcher\nfrom streamlink.plugin.api import useragents, validate\nfrom streamlink.plugin.api.websocket import WebsocketClient\nfrom streamlink.stream.hls import HLSStream, HLSStreamReader\nfrom streamlink.utils.parse import parse_json\nfrom streamlink.utils.times import hours_minutes_seconds\nfrom streamlink.utils.url import update_qsd\n\nlog = logging.getLogger(__name__)\n\n\nclass NicoLiveWsClient(WebsocketClient):\n STREAM_OPENED_TIMEOUT = 6\n\n ready: Event\n opened: Event\n hls_stream_url: str\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.opened = Event()\n self.ready = Event()\n\n def on_open(self, wsapp):\n super().on_open(wsapp)\n self.send_playerversion()\n self.send_getpermit()\n\n def on_message(self, wsapp, data: str):\n log.debug(f\"Received: {data}\")\n message = parse_json(data)\n msgtype = message.get(\"type\")\n msgdata = message.get(\"data\", {})\n\n if msgtype == \"ping\":\n self.send_pong()\n\n elif msgtype == \"stream\" and msgdata.get(\"protocol\") == \"hls\" and msgdata.get(\"uri\"):\n self.hls_stream_url = msgdata.get(\"uri\")\n self.ready.set()\n if self.opened.wait(self.STREAM_OPENED_TIMEOUT):\n log.debug(\"Stream opened, keeping websocket connection alive\")\n else:\n log.info(\"Closing websocket connection\")\n self.close()\n\n elif msgtype == \"disconnect\":\n reason = msgdata.get(\"reason\", \"Unknown reason\")\n log.info(f\"Received disconnect message: {reason}\")\n self.close()\n\n def send_playerversion(self):\n self.send_json({\n \"type\": \"startWatching\",\n \"data\": {\n \"stream\": {\n \"quality\": \"abr\",\n \"protocol\": \"hls\",\n \"latency\": \"high\",\n \"chasePlay\": False\n },\n \"room\": {\n \"protocol\": \"webSocket\",\n \"commentable\": True\n },\n \"reconnect\": False\n }\n })\n\n def send_getpermit(self):\n self.send_json({\n \"type\": \"getAkashic\",\n \"data\": {\n \"chasePlay\": False\n }\n })\n\n def send_pong(self):\n self.send_json({\"type\": \"pong\"})\n self.send_json({\"type\": \"keepSeat\"})\n\n\nclass NicoLiveHLSStreamReader(HLSStreamReader):\n stream: \"NicoLiveHLSStream\"\n\n def open(self):\n self.stream.wsclient.opened.set()\n super().open()\n\n def close(self):\n super().close()\n self.stream.wsclient.close()\n\n\nclass NicoLiveHLSStream(HLSStream):\n __reader__ = NicoLiveHLSStreamReader\n wsclient: NicoLiveWsClient\n\n def set_wsclient(self, wsclient: NicoLiveWsClient):\n self.wsclient = wsclient\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?P<domain>live\\d*\\.nicovideo\\.jp)/watch/(lv|co)\\d+\"\n))\nclass NicoLive(Plugin):\n arguments = PluginArguments(\n PluginArgument(\n \"email\",\n argument_name=\"niconico-email\",\n sensitive=True,\n metavar=\"EMAIL\",\n help=\"The email or phone number associated with your Niconico account\"\n ),\n PluginArgument(\n \"password\",\n argument_name=\"niconico-password\",\n sensitive=True,\n metavar=\"PASSWORD\",\n help=\"The password of your Niconico account\"\n ),\n PluginArgument(\n \"user-session\",\n argument_name=\"niconico-user-session\",\n sensitive=True,\n metavar=\"VALUE\",\n help=\"Value of the user-session token \\n(can be used in \"\n \"case you do not want to put your password here)\"\n ),\n PluginArgument(\n \"purge-credentials\",\n argument_name=\"niconico-purge-credentials\",\n action=\"store_true\",\n help=\"Purge cached Niconico credentials to initiate a new session and reauthenticate.\"\n ),\n PluginArgument(\n \"timeshift-offset\",\n type=hours_minutes_seconds,\n argument_name=\"niconico-timeshift-offset\",\n metavar=\"[HH:]MM:SS\",\n default=None,\n help=\"Amount of time to skip from the beginning of a stream. Default is 00:00:00.\"\n )\n )\n\n STREAM_READY_TIMEOUT = 6\n LOGIN_URL = \"https://account.nicovideo.jp/login/redirector\"\n LOGIN_URL_PARAMS = {\n \"show_button_twitter\": 1,\n \"show_button_facebook\": 1,\n \"next_url\": \"/\",\n }\n\n wsclient: NicoLiveWsClient\n\n def _get_streams(self):\n if self.get_option(\"purge_credentials\"):\n self.clear_cookies()\n log.info(\"All credentials were successfully removed\")\n\n self.session.http.headers.update({\n \"User-Agent\": useragents.CHROME,\n })\n\n self.niconico_web_login()\n\n wss_api_url = self.get_wss_api_url()\n if not wss_api_url:\n log.error(\n \"Failed to get wss_api_url. \"\n \"Please check if the URL is correct, \"\n \"and make sure your account has access to the video.\"\n )\n return\n\n self.wsclient = NicoLiveWsClient(self.session, wss_api_url)\n self.wsclient.start()\n\n hls_stream_url = self._get_hls_stream_url()\n if not hls_stream_url:\n return\n\n offset = self.get_option(\"timeshift-offset\")\n if offset and \"timeshift\" in wss_api_url:\n hls_stream_url = update_qsd(hls_stream_url, {\"start\": offset})\n\n for quality, stream in NicoLiveHLSStream.parse_variant_playlist(self.session, hls_stream_url).items():\n stream.set_wsclient(self.wsclient)\n yield quality, stream\n\n def _get_hls_stream_url(self):\n log.debug(f\"Waiting for permit (for at most {self.STREAM_READY_TIMEOUT} seconds)...\")\n if not self.wsclient.ready.wait(self.STREAM_READY_TIMEOUT) or not self.wsclient.is_alive():\n log.error(\"Waiting for permit timed out.\")\n self.wsclient.close()\n return\n\n return self.wsclient.hls_stream_url\n\n def get_wss_api_url(self):\n try:\n data = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_find(\".//script[@id='embedded-data'][@data-props]\"),\n validate.get(\"data-props\"),\n validate.parse_json(),\n {\"site\": {\n \"relive\": {\n \"webSocketUrl\": validate.url(scheme=\"wss\")\n },\n validate.optional(\"frontendId\"): int\n }},\n validate.get(\"site\"),\n validate.union_get((\"relive\", \"webSocketUrl\"), \"frontendId\")\n ))\n except PluginError:\n return\n\n wss_api_url, frontend_id = data\n if frontend_id is not None:\n wss_api_url = update_qsd(wss_api_url, {\"frontend_id\": frontend_id})\n\n return wss_api_url\n\n def niconico_web_login(self):\n user_session = self.get_option(\"user-session\")\n email = self.get_option(\"email\")\n password = self.get_option(\"password\")\n\n if user_session is not None:\n log.info(\"Logging in via provided user session cookie\")\n self.session.http.cookies.set(\n \"user_session\",\n user_session,\n path=\"/\",\n domain=\"nicovideo.jp\"\n )\n self.save_cookies()\n\n elif self.session.http.cookies.get(\"user_session\"):\n log.info(\"Logging in via cached user session cookie\")\n\n elif email is not None and password is not None:\n log.info(\"Logging in via provided email and password\")\n root = self.session.http.post(\n self.LOGIN_URL,\n data={\"mail_tel\": email, \"password\": password},\n params=self.LOGIN_URL_PARAMS,\n schema=validate.Schema(validate.parse_html()))\n\n input_with_value = {}\n for elem in root.xpath(\".//input\"):\n if elem.attrib.get(\"value\"):\n input_with_value[elem.attrib.get(\"name\")] = elem.attrib.get(\"value\")\n else:\n if elem.attrib.get(\"id\") == \"oneTimePw\":\n maxlength = int(elem.attrib.get(\"maxlength\"))\n try:\n oneTimePw = self.input_ask(\"Enter the 6 digit number included in email\")\n if len(oneTimePw) > maxlength:\n log.error(\"invalid user input\")\n return\n except FatalPluginError:\n return\n input_with_value[elem.attrib.get(\"name\")] = oneTimePw\n else:\n log.debug(f\"unknown input: {elem.attrib.get('name')}\")\n\n root = self.session.http.post(\n urljoin(\"https://account.nicovideo.jp\", root.xpath(\"string(.//form[@action]/@action)\")),\n data=input_with_value,\n schema=validate.Schema(validate.parse_html()))\n log.debug(f\"Cookies: {self.session.http.cookies.get_dict()}\")\n if self.session.http.cookies.get(\"user_session\") is None:\n error = root.xpath(\"string(//div[@class='formError']/div/text())\")\n log.warning(f\"Login failed: {error or 'unknown reason'}\")\n else:\n log.info(\"Logged in.\")\n self.save_cookies()\n\n\n__plugin__ = NicoLive\n", "path": "src/streamlink/plugins/nicolive.py"}], "after_files": [{"content": "\"\"\"\n$description Japanese live streaming and video hosting social platform.\n$url live.nicovideo.jp\n$type live, vod\n$account Required by some streams\n$notes Timeshift is supported\n\"\"\"\n\nimport logging\nimport re\nfrom threading import Event\nfrom urllib.parse import urljoin\n\nfrom streamlink.exceptions import FatalPluginError\nfrom streamlink.plugin import Plugin, PluginArgument, PluginArguments, PluginError, pluginmatcher\nfrom streamlink.plugin.api import useragents, validate\nfrom streamlink.plugin.api.websocket import WebsocketClient\nfrom streamlink.stream.hls import HLSStream, HLSStreamReader\nfrom streamlink.utils.parse import parse_json\nfrom streamlink.utils.times import hours_minutes_seconds\nfrom streamlink.utils.url import update_qsd\n\nlog = logging.getLogger(__name__)\n\n\nclass NicoLiveWsClient(WebsocketClient):\n STREAM_OPENED_TIMEOUT = 6\n\n ready: Event\n opened: Event\n hls_stream_url: str\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.opened = Event()\n self.ready = Event()\n\n def on_open(self, wsapp):\n super().on_open(wsapp)\n self.send_playerversion()\n self.send_getpermit()\n\n def on_message(self, wsapp, data: str):\n log.debug(f\"Received: {data}\")\n message = parse_json(data)\n msgtype = message.get(\"type\")\n msgdata = message.get(\"data\", {})\n\n if msgtype == \"ping\":\n self.send_pong()\n\n elif msgtype == \"stream\" and msgdata.get(\"protocol\") == \"hls\" and msgdata.get(\"uri\"):\n self.hls_stream_url = msgdata.get(\"uri\")\n self.ready.set()\n if self.opened.wait(self.STREAM_OPENED_TIMEOUT):\n log.debug(\"Stream opened, keeping websocket connection alive\")\n else:\n log.info(\"Closing websocket connection\")\n self.close()\n\n elif msgtype == \"disconnect\":\n reason = msgdata.get(\"reason\", \"Unknown reason\")\n log.info(f\"Received disconnect message: {reason}\")\n self.close()\n\n def send_playerversion(self):\n self.send_json({\n \"type\": \"startWatching\",\n \"data\": {\n \"stream\": {\n \"quality\": \"abr\",\n \"protocol\": \"hls\",\n \"latency\": \"high\",\n \"chasePlay\": False\n },\n \"room\": {\n \"protocol\": \"webSocket\",\n \"commentable\": True\n },\n \"reconnect\": False\n }\n })\n\n def send_getpermit(self):\n self.send_json({\n \"type\": \"getAkashic\",\n \"data\": {\n \"chasePlay\": False\n }\n })\n\n def send_pong(self):\n self.send_json({\"type\": \"pong\"})\n self.send_json({\"type\": \"keepSeat\"})\n\n\nclass NicoLiveHLSStreamReader(HLSStreamReader):\n stream: \"NicoLiveHLSStream\"\n\n def open(self):\n self.stream.wsclient.opened.set()\n super().open()\n\n def close(self):\n super().close()\n self.stream.wsclient.close()\n\n\nclass NicoLiveHLSStream(HLSStream):\n __reader__ = NicoLiveHLSStreamReader\n wsclient: NicoLiveWsClient\n\n def set_wsclient(self, wsclient: NicoLiveWsClient):\n self.wsclient = wsclient\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?P<domain>live\\d*\\.nicovideo\\.jp)/watch/(lv|co)\\d+\"\n))\nclass NicoLive(Plugin):\n arguments = PluginArguments(\n PluginArgument(\n \"email\",\n argument_name=\"niconico-email\",\n sensitive=True,\n metavar=\"EMAIL\",\n help=\"The email or phone number associated with your Niconico account\"\n ),\n PluginArgument(\n \"password\",\n argument_name=\"niconico-password\",\n sensitive=True,\n metavar=\"PASSWORD\",\n help=\"The password of your Niconico account\"\n ),\n PluginArgument(\n \"user-session\",\n argument_name=\"niconico-user-session\",\n sensitive=True,\n metavar=\"VALUE\",\n help=\"Value of the user-session token \\n(can be used in \"\n \"case you do not want to put your password here)\"\n ),\n PluginArgument(\n \"purge-credentials\",\n argument_name=\"niconico-purge-credentials\",\n action=\"store_true\",\n help=\"Purge cached Niconico credentials to initiate a new session and reauthenticate.\"\n ),\n PluginArgument(\n \"timeshift-offset\",\n type=hours_minutes_seconds,\n argument_name=\"niconico-timeshift-offset\",\n metavar=\"[HH:]MM:SS\",\n default=None,\n help=\"Amount of time to skip from the beginning of a stream. Default is 00:00:00.\"\n )\n )\n\n STREAM_READY_TIMEOUT = 6\n LOGIN_URL = \"https://account.nicovideo.jp/login/redirector\"\n LOGIN_URL_PARAMS = {\n \"show_button_twitter\": 1,\n \"show_button_facebook\": 1,\n \"next_url\": \"/\",\n }\n\n wsclient: NicoLiveWsClient\n\n def _get_streams(self):\n if self.get_option(\"purge_credentials\"):\n self.clear_cookies()\n log.info(\"All credentials were successfully removed\")\n\n self.session.http.headers.update({\n \"User-Agent\": useragents.CHROME,\n })\n\n self.niconico_web_login()\n\n wss_api_url = self.get_wss_api_url()\n if not wss_api_url:\n log.error(\n \"Failed to get wss_api_url. \"\n \"Please check if the URL is correct, \"\n \"and make sure your account has access to the video.\"\n )\n return\n\n self.wsclient = NicoLiveWsClient(self.session, wss_api_url)\n self.wsclient.start()\n\n hls_stream_url = self._get_hls_stream_url()\n if not hls_stream_url:\n return\n\n offset = self.get_option(\"timeshift-offset\")\n if offset and \"timeshift\" in wss_api_url:\n hls_stream_url = update_qsd(hls_stream_url, {\"start\": offset})\n\n for quality, stream in NicoLiveHLSStream.parse_variant_playlist(self.session, hls_stream_url).items():\n stream.set_wsclient(self.wsclient)\n yield quality, stream\n\n def _get_hls_stream_url(self):\n log.debug(f\"Waiting for permit (for at most {self.STREAM_READY_TIMEOUT} seconds)...\")\n if not self.wsclient.ready.wait(self.STREAM_READY_TIMEOUT) or not self.wsclient.is_alive():\n log.error(\"Waiting for permit timed out.\")\n self.wsclient.close()\n return\n\n return self.wsclient.hls_stream_url\n\n def get_wss_api_url(self):\n try:\n data = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_find(\".//script[@id='embedded-data'][@data-props]\"),\n validate.get(\"data-props\"),\n validate.parse_json(),\n {\"site\": {\n \"relive\": {\n \"webSocketUrl\": validate.url(scheme=\"wss\")\n },\n validate.optional(\"frontendId\"): int\n }},\n validate.get(\"site\"),\n validate.union_get((\"relive\", \"webSocketUrl\"), \"frontendId\")\n ))\n except PluginError:\n return\n\n wss_api_url, frontend_id = data\n if frontend_id is not None:\n wss_api_url = update_qsd(wss_api_url, {\"frontend_id\": frontend_id})\n\n return wss_api_url\n\n def niconico_web_login(self):\n user_session = self.get_option(\"user-session\")\n email = self.get_option(\"email\")\n password = self.get_option(\"password\")\n\n if user_session is not None:\n log.info(\"Logging in via provided user session cookie\")\n self.session.http.cookies.set(\n \"user_session\",\n user_session,\n path=\"/\",\n domain=\"nicovideo.jp\"\n )\n self.save_cookies()\n\n elif self.session.http.cookies.get(\"user_session\"):\n log.info(\"Logging in via cached user session cookie\")\n\n elif email is not None and password is not None:\n log.info(\"Logging in via provided email and password\")\n root = self.session.http.post(\n self.LOGIN_URL,\n data={\"mail_tel\": email, \"password\": password},\n params=self.LOGIN_URL_PARAMS,\n schema=validate.Schema(validate.parse_html()),\n )\n\n if self.session.http.cookies.get(\"user_session\"):\n log.info(\"Logged in.\")\n self.save_cookies()\n return\n\n input_with_value = {}\n for elem in root.xpath(\".//form[@action]//input\"):\n if elem.attrib.get(\"value\"):\n input_with_value[elem.attrib.get(\"name\")] = elem.attrib.get(\"value\")\n else:\n if elem.attrib.get(\"id\") == \"oneTimePw\":\n maxlength = int(elem.attrib.get(\"maxlength\"))\n try:\n oneTimePw = self.input_ask(\"Enter the 6 digit number included in email\")\n if len(oneTimePw) > maxlength:\n log.error(\"invalid user input\")\n return\n except FatalPluginError:\n return\n input_with_value[elem.attrib.get(\"name\")] = oneTimePw\n else:\n log.debug(f\"unknown input: {elem.attrib.get('name')}\")\n\n root = self.session.http.post(\n urljoin(\"https://account.nicovideo.jp\", root.xpath(\"string(.//form[@action]/@action)\")),\n data=input_with_value,\n schema=validate.Schema(validate.parse_html()),\n )\n log.debug(f\"Cookies: {self.session.http.cookies.get_dict()}\")\n if self.session.http.cookies.get(\"user_session\") is None:\n error = root.xpath(\"string(//div[@class='formError']/div/text())\")\n log.warning(f\"Login failed: {error or 'unknown reason'}\")\n else:\n log.info(\"Logged in.\")\n self.save_cookies()\n\n\n__plugin__ = NicoLive\n", "path": "src/streamlink/plugins/nicolive.py"}]} | 3,484 | 329 |
gh_patches_debug_24722 | rasdani/github-patches | git_diff | bids-standard__pybids-329 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
validator.get_path_values raises KeyError: 'regexp'
What a great tutorial - keeps bearing fruits... Continuing to #267 I was exploring new features of the validator, and found `get_path_values` which seems want ta path, so echoing `is_file` I pointed it to /sub-02/ses-01/anat/sub-02_ses-01_T2w.json to just get
```
--------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-71-00eb4b2b179c> in <module>()
----> 1 validator.get_path_values('/sub-02/ses-01/anat/sub-02_ses-01_T2w.json')
/home/yoh/proj/bids/pybids/bids/layout/validation.pyc in get_path_values(self, path)
164 values = {}
165
--> 166 regexps = self.get_regular_expressions('path.json')
167
168 # capture subject
/home/yoh/proj/bids/pybids/bids/layout/validation.pyc in get_regular_expressions(self, filename)
144 rule = rules[key]
145
--> 146 regexp = rule["regexp"]
147
148 if "tokens" in rule:
KeyError: 'regexp'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bids/layout/validation.py`
Content:
```
1 """Tools for validating BIDS projects."""
2
3 import re
4 import json
5 from os.path import join, abspath, dirname
6
7
8 __all__ = ['BIDSValidator']
9
10
11 class BIDSValidator():
12 """An object for BIDS (Brain Imaging Data Structure) verification in a data.
13
14 The main method of this class is `is_bids()`. You should use it for
15 checking whether a file path compatible with BIDS.
16
17 Parameters
18 ----------
19 index_associated : bool, default: True
20 Specifies if an associated data should be checked. If it is true then
21 any file paths in directories `code/`, `derivatives/`, `sourcedata/`
22 and `stimuli/` will pass the validation, else they won't.
23
24 Examples
25 --------
26 >>> from bids.layout import BIDSValidator
27 >>> validator = BIDSValidator()
28 >>> filepaths = ["/sub-01/anat/sub-01_rec-CSD_T1w.nii.gz",
29 >>> "/sub-01/anat/sub-01_acq-23_rec-CSD_T1w.exe", #wrong extension
30 >>> "/participants.tsv"]
31 >>> for filepath in filepaths:
32 >>> print( validator.is_bids(filepath) )
33 True
34 False
35 True
36 """
37
38 def __init__(self, index_associated=True):
39 self.rule_dir = join(dirname(abspath(__file__)),'config', 'validator')
40 self.index_associated = index_associated
41
42 def is_bids(self, path):
43 """Check if a file path appropriate for BIDS.
44
45 Main method of the validator. uses other class methods for checking
46 different aspects of the file path.
47
48 Parameters
49 ----------
50 path: string
51 A path of a file you want to check.
52
53 Examples
54 --------
55 >>> from bids.layout import BIDSValidator
56 >>> validator = BIDSValidator()
57 >>> validator.is_bids("/sub-01/ses-test/anat/sub-01_ses-test_rec-CSD_run-23_T1w.nii.gz")
58 True
59 >>> validator.is_bids("/sub-01/ses-test/sub-01_run-01_dwi.bvec") # missed session in the filename
60 False
61 """
62 conditions = []
63
64 conditions.append(self.is_top_level(path))
65 conditions.append(self.is_associated_data(path))
66 conditions.append(self.is_session_level(path))
67 conditions.append(self.is_subject_level(path))
68 conditions.append(self.is_phenotypic(path))
69 conditions.append(self.is_file(path))
70
71 return (any(conditions))
72
73 def is_top_level(self, path):
74 """Check if the file has appropriate name for a top-level file."""
75 with open(join(self.rule_dir, 'fixed_top_level_names.json'), 'r') as f:
76 fixed_top_level_json = json.load(f)
77 fixed_top_level_names = fixed_top_level_json['fixed_top_level_names']
78
79 regexps = self.get_regular_expressions('top_level_rules.json')
80
81 conditions = [False if re.compile(x).search(path) is None else True
82 for x in regexps]
83
84 conditions.append(path in fixed_top_level_names)
85
86 return (any(conditions))
87
88 def is_associated_data(self, path):
89 """Check if file is appropriate associated data."""
90 if not self.index_associated:
91 return False
92
93 regexps = self.get_regular_expressions('associated_data_rules.json')
94
95 conditions = [(re.compile(x).search(path) is not None)
96 for x in regexps]
97
98 return any(conditions)
99
100 def is_session_level(self, path):
101 """Check if the file has appropriate name for a session level."""
102 regexps = self.get_regular_expressions('session_level_rules.json')
103
104 conditions = [self.conditional_match(x, path) for x in regexps]
105
106 return (any(conditions))
107
108 def is_subject_level(self, path):
109 """Check if the file has appropriate name for a subject level."""
110 regexps = self.get_regular_expressions('subject_level_rules.json')
111
112 conditions = [(re.compile(x).search(path) is not None)
113 for x in regexps]
114
115 return (any(conditions))
116
117 def is_phenotypic(self, path):
118 """Check if file is phenotypic data."""
119 regexps = self.get_regular_expressions('phenotypic_rules.json')
120
121 conditions = [(re.compile(x).search(path) is not None)
122 for x in regexps]
123
124 return (any(conditions))
125
126 def is_file(self, path):
127 """Check if file is phenotypic data."""
128 regexps = self.get_regular_expressions('file_level_rules.json')
129
130 conditions = [(re.compile(x).search(path) is not None)
131 for x in regexps]
132
133 return (any(conditions))
134
135 def get_regular_expressions(self, filename):
136 """Get regular expressions from file."""
137 regexps = []
138
139 filename = join(self.rule_dir, filename)
140
141 with open(filename, 'r') as f:
142 rules = json.load(f)
143
144 for key in list(rules.keys()):
145 rule = rules[key]
146
147 regexp = rule["regexp"]
148
149 if "tokens" in rule:
150 tokens = rule["tokens"]
151
152 for token in list(tokens):
153 regexp = regexp.replace(token, "|".join(tokens[token]))
154
155 regexps.append(regexp)
156
157 return regexps
158
159 def get_path_values(self, path):
160 """Takes a file path and returns values found for the following path
161 keys:
162 sub-
163 ses-
164 """
165 values = {}
166
167 regexps = self.get_regular_expressions('path.json')
168
169 # capture subject
170 for paths in ['sub', 'ses']:
171 match = re.compile(regexps[paths]).findall(path)
172 values[paths] = match[1] if match & match[1] else None
173
174 return values
175
176 def conditional_match(self, expression, path):
177 match = re.compile(expression).findall(path)
178 match = match[0] if len(match) >= 1 else False
179 # adapted from JS code and JS does not support conditional groups
180 if (match):
181 return ((match[1] == match[2][1:]) | (not match[1]))
182 else:
183 return False
184
185 def validate_sequences(layout, config):
186 """Checks files in BIDS project match user defined expectations.
187
188 This method is a wrapper for the check_duplicate_files() and
189 check_expected_files() methods. Use it to check whether there are
190 files with duplicate content within the BIDS data set and to check
191 the number of data set files against a user customized configuration
192 file. Returns a named tuple of three data frames: duplicates, summary, and problems.
193
194
195 Parameters
196 ----------
197 layout: BIDSLayout class
198 A BIDSLayout path of a data set.
199
200 config: string
201 Path to customized configuration file. Requires `runs` as an input.
202 See the sample config for an example (bids/layout/tests/data/sample_validation_config.json).
203
204
205 Examples
206 --------
207 >>> layout = bids.grabbids.BIDSLayout('/path_to/sample_project_root')
208 >>> dfs = validate_sequences(layout, 'pybids/bids/layout/tests/data/sample_validation_config.json')
209 >>> dfs.duplicates
210 # Put example output here
211 >>> df.summary
212 # Put example output here
213 >>> df.problems
214 # Put example output here
215 """
216
217 duplicate_file_df = check_duplicate_files(layout)
218 summary_df, problem_df = check_expected_files(layout, config)
219 output = namedtuple('output', ['duplicates', 'summary', 'problems'])
220 return output(duplicate_file_df, summary_df, problem_df)
221
222
223 def check_duplicate_files(layout):
224 """Checks images in BIDS project are not duplicated.
225
226 Check whether any files have duplicate content within the
227 BIDS data set. Returns a data frame: duplicate_file_df.
228
229
230 Parameters
231 ----------
232 layout: BIDSLayout class
233 A BIDSLayout path of a data set.
234
235
236 Examples
237 --------
238 >>> layout = bids.grabbids.BIDSLayout('/path_to/sample_project_root')
239 >>> duplicate_file_df = check_duplicate_files(layout)
240 >>> duplicate_file_df
241 # Put example output here
242
243
244 Notes
245 ------
246 Returns a data frame in which the first column is the file
247 identifier and the second column is the path to the file.
248 Files with matching identifiers have the same content.
249 """
250
251 import pandas as pd
252 def md5(fname):
253 hash_md5 = hashlib.md5()
254 with open(fname, "rb") as f:
255 for chunk in iter(lambda: f.read(4096), b""):
256 hash_md5.update(chunk)
257 return hash_md5.hexdigest()
258 hash_map = {}
259 all_niftis = layout.get(return_type="file", extensions='.nii.gz')
260 for nifti_file in all_niftis:
261 md5sum = md5(nifti_file)
262 if md5sum in hash_map:
263 hash_map[md5sum].append(nifti_file)
264 else:
265 hash_map[md5sum] = [nifti_file]
266 df = pd.DataFrame.from_dict(hash_map, orient='index')
267 pruned_df = df.stack().reset_index().drop(columns='level_1')
268 out_df = pruned_df.rename(columns={'level_0': 'hash', 0: 'filename'})
269 return out_df
270
271
272 def check_expected_files(layout, config):
273 """Checks files in BIDS project match user defined expectations.
274
275 This method checks the number of data set files against a user customized
276 configuration file. Returns two data frames: summary_df, problem_df.
277
278
279 Parameters
280 ----------
281 layout: BIDSLayout class
282 A BIDSLayout path of a data set.
283
284 config: string
285 Path to customized configuration file.
286
287
288 Examples
289 --------
290 >>> layout = bids.grabbids.BIDSLayout('/path_to/sample_project_root')
291 >>> summary_df, problem_df = check_expected_files(layout, 'pybids/bids/layout/tests/data/sample_validation_config.json')
292 >>> summary_df
293 # Put example output here
294 >>> problem_df
295 # Put example output here
296
297
298 Notes
299 --------
300
301 `runs` is a mandatory field in the config file.
302
303 The configuration file can take any keys that are valid arguments for
304 pybids `layout.get()` Values shoud match those in the BIDS file names.
305 See the sample config for an example (bids/layout/tests/data/sample_validation_config.json).
306 The more specific keys are provided, the more informative the output will be.
307
308 """
309
310 import pandas as pd
311 dictlist = []
312 with open(config) as f:
313 json_data = json.load(f)
314 subjects = layout.get_subjects()
315 for sub in subjects:
316 for scan_params_d in json_data['sequences']:
317 scan_params = scan_params_d.copy()
318 seq_params = {i: scan_params[i] for i in scan_params if i != 'runs'}
319 actual_runs = layout.get(return_type='obj', subject=sub, extensions='.nii.gz', **seq_params)
320 scan_params['subject'] = sub
321 scan_params['runs_found'] = len(actual_runs)
322 scan_params['problem'] = len(actual_runs) != scan_params['runs']
323 dictlist.append(scan_params)
324 summary_df = pd.DataFrame(dictlist)
325 problem_df = summary_df.loc[summary_df['problem'] == True]
326 return summary_df, problem_df
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bids/layout/validation.py b/bids/layout/validation.py
--- a/bids/layout/validation.py
+++ b/bids/layout/validation.py
@@ -3,6 +3,7 @@
import re
import json
from os.path import join, abspath, dirname
+from collections import namedtuple
__all__ = ['BIDSValidator']
@@ -156,23 +157,6 @@
return regexps
- def get_path_values(self, path):
- """Takes a file path and returns values found for the following path
- keys:
- sub-
- ses-
- """
- values = {}
-
- regexps = self.get_regular_expressions('path.json')
-
- # capture subject
- for paths in ['sub', 'ses']:
- match = re.compile(regexps[paths]).findall(path)
- values[paths] = match[1] if match & match[1] else None
-
- return values
-
def conditional_match(self, expression, path):
match = re.compile(expression).findall(path)
match = match[0] if len(match) >= 1 else False
@@ -249,6 +233,7 @@
"""
import pandas as pd
+ import hashlib
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
| {"golden_diff": "diff --git a/bids/layout/validation.py b/bids/layout/validation.py\n--- a/bids/layout/validation.py\n+++ b/bids/layout/validation.py\n@@ -3,6 +3,7 @@\n import re\n import json\n from os.path import join, abspath, dirname\n+from collections import namedtuple\n \n \n __all__ = ['BIDSValidator']\n@@ -156,23 +157,6 @@\n \n return regexps\n \n- def get_path_values(self, path):\n- \"\"\"Takes a file path and returns values found for the following path\n- keys:\n- sub-\n- ses-\n- \"\"\"\n- values = {}\n-\n- regexps = self.get_regular_expressions('path.json')\n-\n- # capture subject\n- for paths in ['sub', 'ses']:\n- match = re.compile(regexps[paths]).findall(path)\n- values[paths] = match[1] if match & match[1] else None\n-\n- return values\n-\n def conditional_match(self, expression, path):\n match = re.compile(expression).findall(path)\n match = match[0] if len(match) >= 1 else False\n@@ -249,6 +233,7 @@\n \"\"\"\n \n import pandas as pd\n+ import hashlib\n def md5(fname):\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n", "issue": "validator.get_path_values raises KeyError: 'regexp'\nWhat a great tutorial - keeps bearing fruits... Continuing to #267 I was exploring new features of the validator, and found `get_path_values` which seems want ta path, so echoing `is_file` I pointed it to /sub-02/ses-01/anat/sub-02_ses-01_T2w.json to just get \r\n```\r\n--------------------------------------------------------------------------\r\nKeyError Traceback (most recent call last)\r\n<ipython-input-71-00eb4b2b179c> in <module>()\r\n----> 1 validator.get_path_values('/sub-02/ses-01/anat/sub-02_ses-01_T2w.json')\r\n\r\n/home/yoh/proj/bids/pybids/bids/layout/validation.pyc in get_path_values(self, path)\r\n 164 values = {}\r\n 165 \r\n--> 166 regexps = self.get_regular_expressions('path.json')\r\n 167 \r\n 168 # capture subject\r\n\r\n/home/yoh/proj/bids/pybids/bids/layout/validation.pyc in get_regular_expressions(self, filename)\r\n 144 rule = rules[key]\r\n 145 \r\n--> 146 regexp = rule[\"regexp\"]\r\n 147 \r\n 148 if \"tokens\" in rule:\r\n\r\nKeyError: 'regexp'\r\n```\n", "before_files": [{"content": "\"\"\"Tools for validating BIDS projects.\"\"\"\n\nimport re\nimport json\nfrom os.path import join, abspath, dirname\n\n\n__all__ = ['BIDSValidator']\n\n\nclass BIDSValidator():\n \"\"\"An object for BIDS (Brain Imaging Data Structure) verification in a data.\n\n The main method of this class is `is_bids()`. You should use it for\n checking whether a file path compatible with BIDS.\n\n Parameters\n ----------\n index_associated : bool, default: True\n Specifies if an associated data should be checked. If it is true then\n any file paths in directories `code/`, `derivatives/`, `sourcedata/`\n and `stimuli/` will pass the validation, else they won't.\n\n Examples\n --------\n >>> from bids.layout import BIDSValidator\n >>> validator = BIDSValidator()\n >>> filepaths = [\"/sub-01/anat/sub-01_rec-CSD_T1w.nii.gz\",\n >>> \"/sub-01/anat/sub-01_acq-23_rec-CSD_T1w.exe\", #wrong extension\n >>> \"/participants.tsv\"]\n >>> for filepath in filepaths:\n >>> print( validator.is_bids(filepath) )\n True\n False\n True\n \"\"\"\n\n def __init__(self, index_associated=True):\n self.rule_dir = join(dirname(abspath(__file__)),'config', 'validator')\n self.index_associated = index_associated\n\n def is_bids(self, path):\n \"\"\"Check if a file path appropriate for BIDS.\n\n Main method of the validator. uses other class methods for checking\n different aspects of the file path.\n\n Parameters\n ----------\n path: string\n A path of a file you want to check.\n\n Examples\n --------\n >>> from bids.layout import BIDSValidator\n >>> validator = BIDSValidator()\n >>> validator.is_bids(\"/sub-01/ses-test/anat/sub-01_ses-test_rec-CSD_run-23_T1w.nii.gz\")\n True\n >>> validator.is_bids(\"/sub-01/ses-test/sub-01_run-01_dwi.bvec\") # missed session in the filename\n False\n \"\"\"\n conditions = []\n\n conditions.append(self.is_top_level(path))\n conditions.append(self.is_associated_data(path))\n conditions.append(self.is_session_level(path))\n conditions.append(self.is_subject_level(path))\n conditions.append(self.is_phenotypic(path))\n conditions.append(self.is_file(path))\n\n return (any(conditions))\n\n def is_top_level(self, path):\n \"\"\"Check if the file has appropriate name for a top-level file.\"\"\"\n with open(join(self.rule_dir, 'fixed_top_level_names.json'), 'r') as f:\n fixed_top_level_json = json.load(f)\n fixed_top_level_names = fixed_top_level_json['fixed_top_level_names']\n\n regexps = self.get_regular_expressions('top_level_rules.json')\n\n conditions = [False if re.compile(x).search(path) is None else True\n for x in regexps]\n\n conditions.append(path in fixed_top_level_names)\n\n return (any(conditions))\n\n def is_associated_data(self, path):\n \"\"\"Check if file is appropriate associated data.\"\"\"\n if not self.index_associated:\n return False\n\n regexps = self.get_regular_expressions('associated_data_rules.json')\n\n conditions = [(re.compile(x).search(path) is not None)\n for x in regexps]\n\n return any(conditions)\n\n def is_session_level(self, path):\n \"\"\"Check if the file has appropriate name for a session level.\"\"\"\n regexps = self.get_regular_expressions('session_level_rules.json')\n\n conditions = [self.conditional_match(x, path) for x in regexps]\n\n return (any(conditions))\n\n def is_subject_level(self, path):\n \"\"\"Check if the file has appropriate name for a subject level.\"\"\"\n regexps = self.get_regular_expressions('subject_level_rules.json')\n\n conditions = [(re.compile(x).search(path) is not None)\n for x in regexps]\n\n return (any(conditions))\n\n def is_phenotypic(self, path):\n \"\"\"Check if file is phenotypic data.\"\"\"\n regexps = self.get_regular_expressions('phenotypic_rules.json')\n\n conditions = [(re.compile(x).search(path) is not None)\n for x in regexps]\n\n return (any(conditions))\n\n def is_file(self, path):\n \"\"\"Check if file is phenotypic data.\"\"\"\n regexps = self.get_regular_expressions('file_level_rules.json')\n\n conditions = [(re.compile(x).search(path) is not None)\n for x in regexps]\n\n return (any(conditions))\n\n def get_regular_expressions(self, filename):\n \"\"\"Get regular expressions from file.\"\"\"\n regexps = []\n\n filename = join(self.rule_dir, filename)\n\n with open(filename, 'r') as f:\n rules = json.load(f)\n\n for key in list(rules.keys()):\n rule = rules[key]\n\n regexp = rule[\"regexp\"]\n\n if \"tokens\" in rule:\n tokens = rule[\"tokens\"]\n\n for token in list(tokens):\n regexp = regexp.replace(token, \"|\".join(tokens[token]))\n\n regexps.append(regexp)\n\n return regexps\n\n def get_path_values(self, path):\n \"\"\"Takes a file path and returns values found for the following path\n keys:\n sub-\n ses-\n \"\"\"\n values = {}\n\n regexps = self.get_regular_expressions('path.json')\n\n # capture subject\n for paths in ['sub', 'ses']:\n match = re.compile(regexps[paths]).findall(path)\n values[paths] = match[1] if match & match[1] else None\n\n return values\n\n def conditional_match(self, expression, path):\n match = re.compile(expression).findall(path)\n match = match[0] if len(match) >= 1 else False\n # adapted from JS code and JS does not support conditional groups\n if (match):\n return ((match[1] == match[2][1:]) | (not match[1]))\n else:\n return False\n\ndef validate_sequences(layout, config):\n \"\"\"Checks files in BIDS project match user defined expectations.\n\n This method is a wrapper for the check_duplicate_files() and \n check_expected_files() methods. Use it to check whether there are\n files with duplicate content within the BIDS data set and to check\n the number of data set files against a user customized configuration\n file. Returns a named tuple of three data frames: duplicates, summary, and problems.\n\n\n Parameters\n ----------\n layout: BIDSLayout class\n A BIDSLayout path of a data set.\n\n config: string\n Path to customized configuration file. Requires `runs` as an input.\n See the sample config for an example (bids/layout/tests/data/sample_validation_config.json).\n\n\n Examples\n --------\n >>> layout = bids.grabbids.BIDSLayout('/path_to/sample_project_root')\n >>> dfs = validate_sequences(layout, 'pybids/bids/layout/tests/data/sample_validation_config.json')\n >>> dfs.duplicates\n # Put example output here\n >>> df.summary\n # Put example output here\n >>> df.problems\n # Put example output here\n \"\"\"\n \n duplicate_file_df = check_duplicate_files(layout)\n summary_df, problem_df = check_expected_files(layout, config)\n output = namedtuple('output', ['duplicates', 'summary', 'problems'])\n return output(duplicate_file_df, summary_df, problem_df)\n \n \ndef check_duplicate_files(layout):\n \"\"\"Checks images in BIDS project are not duplicated.\n\n Check whether any files have duplicate content within the \n BIDS data set. Returns a data frame: duplicate_file_df.\n\n\n Parameters\n ----------\n layout: BIDSLayout class\n A BIDSLayout path of a data set.\n\n\n Examples\n --------\n >>> layout = bids.grabbids.BIDSLayout('/path_to/sample_project_root')\n >>> duplicate_file_df = check_duplicate_files(layout)\n >>> duplicate_file_df\n # Put example output here\n\n\n Notes\n ------\n Returns a data frame in which the first column is the file\n identifier and the second column is the path to the file.\n Files with matching identifiers have the same content.\n \"\"\"\n \n import pandas as pd\n def md5(fname):\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()\n hash_map = {}\n all_niftis = layout.get(return_type=\"file\", extensions='.nii.gz')\n for nifti_file in all_niftis:\n md5sum = md5(nifti_file)\n if md5sum in hash_map:\n hash_map[md5sum].append(nifti_file)\n else:\n hash_map[md5sum] = [nifti_file]\n df = pd.DataFrame.from_dict(hash_map, orient='index') \n pruned_df = df.stack().reset_index().drop(columns='level_1')\n out_df = pruned_df.rename(columns={'level_0': 'hash', 0: 'filename'})\n return out_df\n \n \ndef check_expected_files(layout, config):\n \"\"\"Checks files in BIDS project match user defined expectations.\n\n This method checks the number of data set files against a user customized \n configuration file. Returns two data frames: summary_df, problem_df.\n\n\n Parameters\n ----------\n layout: BIDSLayout class\n A BIDSLayout path of a data set.\n\n config: string\n Path to customized configuration file.\n\n\n Examples\n --------\n >>> layout = bids.grabbids.BIDSLayout('/path_to/sample_project_root')\n >>> summary_df, problem_df = check_expected_files(layout, 'pybids/bids/layout/tests/data/sample_validation_config.json')\n >>> summary_df\n # Put example output here\n >>> problem_df\n # Put example output here\n\n\n Notes\n --------\n\n `runs` is a mandatory field in the config file.\n \n The configuration file can take any keys that are valid arguments for\n pybids `layout.get()` Values shoud match those in the BIDS file names. \n See the sample config for an example (bids/layout/tests/data/sample_validation_config.json).\n The more specific keys are provided, the more informative the output will be.\n\n \"\"\"\n\n import pandas as pd\n dictlist = []\n with open(config) as f:\n json_data = json.load(f)\n subjects = layout.get_subjects()\n for sub in subjects: \n for scan_params_d in json_data['sequences']:\n scan_params = scan_params_d.copy()\n seq_params = {i: scan_params[i] for i in scan_params if i != 'runs'}\n actual_runs = layout.get(return_type='obj', subject=sub, extensions='.nii.gz', **seq_params)\n scan_params['subject'] = sub\n scan_params['runs_found'] = len(actual_runs)\n scan_params['problem'] = len(actual_runs) != scan_params['runs']\n dictlist.append(scan_params)\n summary_df = pd.DataFrame(dictlist)\n problem_df = summary_df.loc[summary_df['problem'] == True]\n return summary_df, problem_df", "path": "bids/layout/validation.py"}], "after_files": [{"content": "\"\"\"Tools for validating BIDS projects.\"\"\"\n\nimport re\nimport json\nfrom os.path import join, abspath, dirname\nfrom collections import namedtuple\n\n\n__all__ = ['BIDSValidator']\n\n\nclass BIDSValidator():\n \"\"\"An object for BIDS (Brain Imaging Data Structure) verification in a data.\n\n The main method of this class is `is_bids()`. You should use it for\n checking whether a file path compatible with BIDS.\n\n Parameters\n ----------\n index_associated : bool, default: True\n Specifies if an associated data should be checked. If it is true then\n any file paths in directories `code/`, `derivatives/`, `sourcedata/`\n and `stimuli/` will pass the validation, else they won't.\n\n Examples\n --------\n >>> from bids.layout import BIDSValidator\n >>> validator = BIDSValidator()\n >>> filepaths = [\"/sub-01/anat/sub-01_rec-CSD_T1w.nii.gz\",\n >>> \"/sub-01/anat/sub-01_acq-23_rec-CSD_T1w.exe\", #wrong extension\n >>> \"/participants.tsv\"]\n >>> for filepath in filepaths:\n >>> print( validator.is_bids(filepath) )\n True\n False\n True\n \"\"\"\n\n def __init__(self, index_associated=True):\n self.rule_dir = join(dirname(abspath(__file__)),'config', 'validator')\n self.index_associated = index_associated\n\n def is_bids(self, path):\n \"\"\"Check if a file path appropriate for BIDS.\n\n Main method of the validator. uses other class methods for checking\n different aspects of the file path.\n\n Parameters\n ----------\n path: string\n A path of a file you want to check.\n\n Examples\n --------\n >>> from bids.layout import BIDSValidator\n >>> validator = BIDSValidator()\n >>> validator.is_bids(\"/sub-01/ses-test/anat/sub-01_ses-test_rec-CSD_run-23_T1w.nii.gz\")\n True\n >>> validator.is_bids(\"/sub-01/ses-test/sub-01_run-01_dwi.bvec\") # missed session in the filename\n False\n \"\"\"\n conditions = []\n\n conditions.append(self.is_top_level(path))\n conditions.append(self.is_associated_data(path))\n conditions.append(self.is_session_level(path))\n conditions.append(self.is_subject_level(path))\n conditions.append(self.is_phenotypic(path))\n conditions.append(self.is_file(path))\n\n return (any(conditions))\n\n def is_top_level(self, path):\n \"\"\"Check if the file has appropriate name for a top-level file.\"\"\"\n with open(join(self.rule_dir, 'fixed_top_level_names.json'), 'r') as f:\n fixed_top_level_json = json.load(f)\n fixed_top_level_names = fixed_top_level_json['fixed_top_level_names']\n\n regexps = self.get_regular_expressions('top_level_rules.json')\n\n conditions = [False if re.compile(x).search(path) is None else True\n for x in regexps]\n\n conditions.append(path in fixed_top_level_names)\n\n return (any(conditions))\n\n def is_associated_data(self, path):\n \"\"\"Check if file is appropriate associated data.\"\"\"\n if not self.index_associated:\n return False\n\n regexps = self.get_regular_expressions('associated_data_rules.json')\n\n conditions = [(re.compile(x).search(path) is not None)\n for x in regexps]\n\n return any(conditions)\n\n def is_session_level(self, path):\n \"\"\"Check if the file has appropriate name for a session level.\"\"\"\n regexps = self.get_regular_expressions('session_level_rules.json')\n\n conditions = [self.conditional_match(x, path) for x in regexps]\n\n return (any(conditions))\n\n def is_subject_level(self, path):\n \"\"\"Check if the file has appropriate name for a subject level.\"\"\"\n regexps = self.get_regular_expressions('subject_level_rules.json')\n\n conditions = [(re.compile(x).search(path) is not None)\n for x in regexps]\n\n return (any(conditions))\n\n def is_phenotypic(self, path):\n \"\"\"Check if file is phenotypic data.\"\"\"\n regexps = self.get_regular_expressions('phenotypic_rules.json')\n\n conditions = [(re.compile(x).search(path) is not None)\n for x in regexps]\n\n return (any(conditions))\n\n def is_file(self, path):\n \"\"\"Check if file is phenotypic data.\"\"\"\n regexps = self.get_regular_expressions('file_level_rules.json')\n\n conditions = [(re.compile(x).search(path) is not None)\n for x in regexps]\n\n return (any(conditions))\n\n def get_regular_expressions(self, filename):\n \"\"\"Get regular expressions from file.\"\"\"\n regexps = []\n\n filename = join(self.rule_dir, filename)\n\n with open(filename, 'r') as f:\n rules = json.load(f)\n\n for key in list(rules.keys()):\n rule = rules[key]\n\n regexp = rule[\"regexp\"]\n\n if \"tokens\" in rule:\n tokens = rule[\"tokens\"]\n\n for token in list(tokens):\n regexp = regexp.replace(token, \"|\".join(tokens[token]))\n\n regexps.append(regexp)\n\n return regexps\n\n def conditional_match(self, expression, path):\n match = re.compile(expression).findall(path)\n match = match[0] if len(match) >= 1 else False\n # adapted from JS code and JS does not support conditional groups\n if (match):\n return ((match[1] == match[2][1:]) | (not match[1]))\n else:\n return False\n\ndef validate_sequences(layout, config):\n \"\"\"Checks files in BIDS project match user defined expectations.\n\n This method is a wrapper for the check_duplicate_files() and \n check_expected_files() methods. Use it to check whether there are\n files with duplicate content within the BIDS data set and to check\n the number of data set files against a user customized configuration\n file. Returns a named tuple of three data frames: duplicates, summary, and problems.\n\n\n Parameters\n ----------\n layout: BIDSLayout class\n A BIDSLayout path of a data set.\n\n config: string\n Path to customized configuration file. Requires `runs` as an input.\n See the sample config for an example (bids/layout/tests/data/sample_validation_config.json).\n\n\n Examples\n --------\n >>> layout = bids.grabbids.BIDSLayout('/path_to/sample_project_root')\n >>> dfs = validate_sequences(layout, 'pybids/bids/layout/tests/data/sample_validation_config.json')\n >>> dfs.duplicates\n # Put example output here\n >>> df.summary\n # Put example output here\n >>> df.problems\n # Put example output here\n \"\"\"\n \n duplicate_file_df = check_duplicate_files(layout)\n summary_df, problem_df = check_expected_files(layout, config)\n output = namedtuple('output', ['duplicates', 'summary', 'problems'])\n return output(duplicate_file_df, summary_df, problem_df)\n \n \ndef check_duplicate_files(layout):\n \"\"\"Checks images in BIDS project are not duplicated.\n\n Check whether any files have duplicate content within the \n BIDS data set. Returns a data frame: duplicate_file_df.\n\n\n Parameters\n ----------\n layout: BIDSLayout class\n A BIDSLayout path of a data set.\n\n\n Examples\n --------\n >>> layout = bids.grabbids.BIDSLayout('/path_to/sample_project_root')\n >>> duplicate_file_df = check_duplicate_files(layout)\n >>> duplicate_file_df\n # Put example output here\n\n\n Notes\n ------\n Returns a data frame in which the first column is the file\n identifier and the second column is the path to the file.\n Files with matching identifiers have the same content.\n \"\"\"\n \n import pandas as pd\n import hashlib\n def md5(fname):\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()\n hash_map = {}\n all_niftis = layout.get(return_type=\"file\", extensions='.nii.gz')\n for nifti_file in all_niftis:\n md5sum = md5(nifti_file)\n if md5sum in hash_map:\n hash_map[md5sum].append(nifti_file)\n else:\n hash_map[md5sum] = [nifti_file]\n df = pd.DataFrame.from_dict(hash_map, orient='index') \n pruned_df = df.stack().reset_index().drop(columns='level_1')\n out_df = pruned_df.rename(columns={'level_0': 'hash', 0: 'filename'})\n return out_df\n \n \ndef check_expected_files(layout, config):\n \"\"\"Checks files in BIDS project match user defined expectations.\n\n This method checks the number of data set files against a user customized \n configuration file. Returns two data frames: summary_df, problem_df.\n\n\n Parameters\n ----------\n layout: BIDSLayout class\n A BIDSLayout path of a data set.\n\n config: string\n Path to customized configuration file.\n\n\n Examples\n --------\n >>> layout = bids.grabbids.BIDSLayout('/path_to/sample_project_root')\n >>> summary_df, problem_df = check_expected_files(layout, 'pybids/bids/layout/tests/data/sample_validation_config.json')\n >>> summary_df\n # Put example output here\n >>> problem_df\n # Put example output here\n\n\n Notes\n --------\n\n `runs` is a mandatory field in the config file.\n \n The configuration file can take any keys that are valid arguments for\n pybids `layout.get()` Values shoud match those in the BIDS file names. \n See the sample config for an example (bids/layout/tests/data/sample_validation_config.json).\n The more specific keys are provided, the more informative the output will be.\n\n \"\"\"\n\n import pandas as pd\n dictlist = []\n with open(config) as f:\n json_data = json.load(f)\n subjects = layout.get_subjects()\n for sub in subjects: \n for scan_params_d in json_data['sequences']:\n scan_params = scan_params_d.copy()\n seq_params = {i: scan_params[i] for i in scan_params if i != 'runs'}\n actual_runs = layout.get(return_type='obj', subject=sub, extensions='.nii.gz', **seq_params)\n scan_params['subject'] = sub\n scan_params['runs_found'] = len(actual_runs)\n scan_params['problem'] = len(actual_runs) != scan_params['runs']\n dictlist.append(scan_params)\n summary_df = pd.DataFrame(dictlist)\n problem_df = summary_df.loc[summary_df['problem'] == True]\n return summary_df, problem_df", "path": "bids/layout/validation.py"}]} | 3,955 | 311 |
gh_patches_debug_38013 | rasdani/github-patches | git_diff | beeware__toga-1113 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Asking how to use "file_types" in open_file_dialog
When I bookmark a list like the example below
` def action_open_file_dialog(self, multi_var, widget=None):
filenames = self.main_window.open_file_dialog(
title="Open file with Toga",
multiselect=multi_var,
file_types=['gif', 'tiff', 'jpeg', 'bmp', 'png', 'webp']
)`
The result is a column list, which will prevent the window from displaying all images in the correct format in the list.
How does the window show all the listed formats, instead of having to select files one by one or All files. Similar to the example in HTML.
input type="file"
id="avatar" name="avatar"
accept="image/png, image/jpeg"
https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/file
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/winforms/toga_winforms/window.py`
Content:
```
1 from toga import GROUP_BREAK, SECTION_BREAK
2
3 from .libs import Size, WinForms
4
5
6 class WinFormsViewport:
7 def __init__(self, native, frame):
8 self.native = native
9 self.frame = frame
10 self.baseline_dpi = 96
11
12 @property
13 def width(self):
14 # Treat `native=None` as a 0x0 viewport
15 if self.native is None:
16 return 0
17 return self.native.ClientSize.Width
18
19 @property
20 def height(self):
21 if self.native is None:
22 return 0
23 # Subtract any vertical shift of the frame. This is to allow
24 # for toolbars, or any other viewport-level decoration.
25 return self.native.ClientSize.Height - self.frame.vertical_shift
26
27 @property
28 def dpi(self):
29 if self.native is None:
30 return self.baseline_dpi
31 return self.native.CreateGraphics().DpiX
32
33
34 class Window:
35 def __init__(self, interface):
36 self.interface = interface
37 self.interface._impl = self
38 self.create()
39
40 def create(self):
41 self.native = WinForms.Form(self)
42 self.native.ClientSize = Size(*self.interface._size)
43 self.native.interface = self.interface
44 self.native.Resize += self.winforms_resize
45 self.toolbar_native = None
46 self.toolbar_items = None
47
48 def create_toolbar(self):
49 self.toolbar_native = WinForms.ToolStrip()
50 for cmd in self.interface.toolbar:
51 if cmd == GROUP_BREAK:
52 item = WinForms.ToolStripSeparator()
53 elif cmd == SECTION_BREAK:
54 item = WinForms.ToolStripSeparator()
55 else:
56 if cmd.icon is not None:
57 native_icon = cmd.icon._impl.native
58 item = WinForms.ToolStripMenuItem(cmd.label, native_icon.ToBitmap())
59 else:
60 item = WinForms.ToolStripMenuItem(cmd.label)
61 item.Click += cmd._impl.as_handler()
62 cmd._impl.native.append(item)
63 self.toolbar_native.Items.Add(item)
64
65 def set_position(self, position):
66 pass
67
68 def set_size(self, size):
69 self.native.ClientSize = Size(*self.interface._size)
70
71 def set_app(self, app):
72 if app is None:
73 return
74 icon_impl = app.interface.icon._impl
75 if icon_impl is None:
76 return
77 self.native.Icon = icon_impl.native
78
79 @property
80 def vertical_shift(self):
81 # vertical shift is the toolbar height or 0
82 result = 0
83 try:
84 result += self.native.interface._impl.toolbar_native.Height
85 except AttributeError:
86 pass
87 try:
88 result += self.native.interface._impl.native.MainMenuStrip.Height
89 except AttributeError:
90 pass
91 return result
92
93 def set_content(self, widget):
94 if self.toolbar_native:
95 self.native.Controls.Add(self.toolbar_native)
96 # Create the lookup table of menu items,
97 # then force the creation of the menus.
98 self.native.Controls.Add(widget.native)
99
100 # Set the widget's viewport to be based on the window's content.
101 widget.viewport = WinFormsViewport(native=self.native, frame=self)
102 widget.frame = self
103
104 # Add all children to the content widget.
105 for child in widget.interface.children:
106 child._impl.container = widget
107
108 def set_title(self, title):
109 self.native.Text = title
110
111 def show(self):
112 # The first render of the content will establish the
113 # minimum possible content size; use that to enforce
114 # a minimum window size.
115 TITLEBAR_HEIGHT = WinForms.SystemInformation.CaptionHeight
116 # Now that the content is visible, we can do our initial hinting,
117 # and use that as the basis for setting the minimum window size.
118 self.interface.content._impl.rehint()
119 self.interface.content.style.layout(
120 self.interface.content,
121 WinFormsViewport(native=None, frame=None),
122 )
123 self.native.MinimumSize = Size(
124 int(self.interface.content.layout.width),
125 int(self.interface.content.layout.height) + TITLEBAR_HEIGHT
126 )
127 self.interface.content.refresh()
128
129 self.native.Show()
130
131 def winforms_FormClosing(self, event, handler):
132 if self.interface.app.on_exit:
133 self.interface.app.on_exit(self.interface.app)
134
135 def set_full_screen(self, is_full_screen):
136 self.interface.factory.not_implemented('Window.set_full_screen()')
137
138 def on_close(self):
139 pass
140
141 def close(self):
142 self.native.Close()
143
144 def winforms_resize(self, sender, args):
145 if self.interface.content:
146 # Re-layout the content
147 self.interface.content.refresh()
148
149 def info_dialog(self, title, message):
150 return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK)
151
152 def question_dialog(self, title, message):
153 result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.YesNo)
154 return result
155
156 def confirm_dialog(self, title, message):
157 result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OKCancel)
158 # this returns 1 (DialogResult.OK enum) for OK and 2 for Cancel
159 return True if result == WinForms.DialogResult.OK else False
160
161 def error_dialog(self, title, message):
162 return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK,
163 WinForms.MessageBoxIcon.Error)
164
165 def stack_trace_dialog(self, title, message, content, retry=False):
166 pass
167
168 def save_file_dialog(self, title, suggested_filename, file_types):
169 dialog = WinForms.SaveFileDialog()
170 dialog.Title = title
171 if suggested_filename is not None:
172 dialog.FileName = suggested_filename
173 if file_types is not None:
174 dialog.Filter = self.build_filter(file_types)
175 if dialog.ShowDialog() == WinForms.DialogResult.OK:
176 return dialog.FileName
177 else:
178 raise ValueError("No filename provided in the save file dialog")
179
180 def open_file_dialog(self, title, initial_directory, file_types, multiselect):
181 dialog = WinForms.OpenFileDialog()
182 dialog.Title = title
183 if initial_directory is not None:
184 dialog.InitialDirectory = initial_directory
185 if file_types is not None:
186 dialog.Filter = self.build_filter(file_types)
187 if multiselect:
188 dialog.Multiselect = True
189 if dialog.ShowDialog() == WinForms.DialogResult.OK:
190 return dialog.FileNames if multiselect else dialog.FileName
191 else:
192 raise ValueError("No filename provided in the open file dialog")
193
194 def select_folder_dialog(self, title, initial_directory, multiselect):
195 dialog = WinForms.FolderBrowserDialog()
196 dialog.Title = title
197 if initial_directory is not None:
198 dialog.InitialDirectory = initial_directory
199
200 if dialog.ShowDialog() == WinForms.DialogResult.OK:
201 return [dialog.SelectedPath]
202 else:
203 raise ValueError("No folder provided in the select folder dialog")
204
205 def build_filter(self, file_types):
206 file_string = "{0} files (*.{0})|*.{0}"
207 return '|'.join([file_string.format(ext) for ext in file_types]) + \
208 "|All files (*.*)|*.*"
209
```
Path: `examples/dialogs/dialogs/app.py`
Content:
```
1 import toga
2 from toga.constants import COLUMN
3 from toga.style import Pack
4
5
6 class ExampledialogsApp(toga.App):
7 # Button callback functions
8 def do_clear(self, widget, **kwargs):
9 self.label.text = "Ready."
10
11 def action_info_dialog(self, widget):
12 self.main_window.info_dialog('Toga', 'THIS! IS! TOGA!!')
13 self.label.text = 'Information was provided.'
14
15 def action_question_dialog(self, widget):
16 if self.main_window.question_dialog('Toga', 'Is this cool or what?'):
17 self.label.text = 'User said yes!'
18 self.main_window.info_dialog('Happiness', 'I know, right! :-)')
19 else:
20 self.label.text = 'User says no...'
21 self.main_window.info_dialog('Shucks...', "Well aren't you a spoilsport... :-(")
22
23 def action_confirm_dialog(self, widget):
24 if self.main_window.question_dialog('Toga', 'Are you sure you want to?'):
25 self.label.text = 'Lets do it!'
26 else:
27 self.label.text = "Left it as it was."
28
29 def action_error_dialog(self, widget):
30 self.main_window.error_dialog('Toga', "Well that didn't work... or did it?")
31 self.label.text = 'Oh noes...'
32
33 def action_open_file_dialog(self, widget):
34 try:
35 fname = self.main_window.open_file_dialog(
36 title="Open file with Toga",
37 multiselect=False
38 )
39 if fname is not None:
40 self.label.text = "File to open:" + fname
41 else:
42 self.label.text = "No file selected!"
43 except ValueError:
44 self.label.text = "Open file dialog was canceled"
45
46 def action_open_file_dialog_multi(self, widget):
47 try:
48 filenames = self.main_window.open_file_dialog(
49 title="Open file with Toga",
50 multiselect=True
51 )
52 if filenames is not None:
53 msg = "Files to open: {}".format(', '.join(filenames))
54 self.label.text = msg
55 else:
56 self.label.text = "No files selected!"
57
58 except ValueError:
59 self.label.text = "Open file dialog was canceled"
60
61 def action_select_folder_dialog(self, widget):
62 try:
63 path_names = self.main_window.select_folder_dialog(
64 title="Select folder with Toga"
65 )
66 self.label.text = "Folder selected:" + ','.join([path for path in path_names])
67 except ValueError:
68 self.label.text = "Folder select dialog was canceled"
69
70 def action_select_folder_dialog_multi(self, widget):
71 try:
72 path_names = self.main_window.select_folder_dialog(
73 title="Select multiple folders with Toga",
74 multiselect=True
75 )
76 self.label.text = "Folders selected:" + ','.join([path for path in path_names])
77 except ValueError:
78 self.label.text = "Folders select dialog was canceled"
79
80 def action_save_file_dialog(self, widget):
81 fname = 'Toga_file.txt'
82 try:
83 save_path = self.main_window.save_file_dialog(
84 "Save file with Toga",
85 suggested_filename=fname)
86 if save_path is not None:
87 self.label.text = "File saved with Toga:" + save_path
88 else:
89 self.label.text = "Save file dialog was canceled"
90 except ValueError:
91 self.label.text = "Save file dialog was canceled"
92
93 def startup(self):
94 # Set up main window
95 self.main_window = toga.MainWindow(title=self.name)
96
97 # Label to show responses.
98 self.label = toga.Label('Ready.', style=Pack(padding_top=20))
99
100 # Buttons
101 btn_style = Pack(flex=1)
102 btn_info = toga.Button('Info', on_press=self.action_info_dialog, style=btn_style)
103 btn_question = toga.Button('Question', on_press=self.action_question_dialog, style=btn_style)
104 btn_confirm = toga.Button('Confirm', on_press=self.action_confirm_dialog, style=btn_style)
105 btn_error = toga.Button('Error', on_press=self.action_error_dialog, style=btn_style)
106 btn_open = toga.Button('Open File', on_press=self.action_open_file_dialog, style=btn_style)
107 btn_open_multi = toga.Button(
108 'Open File (Multiple)',
109 on_press=self.action_open_file_dialog_multi,
110 style=btn_style
111 )
112 btn_save = toga.Button('Save File', on_press=self.action_save_file_dialog, style=btn_style)
113 btn_select = toga.Button('Select Folder', on_press=self.action_select_folder_dialog, style=btn_style)
114 btn_select_multi = toga.Button(
115 'Select Folders',
116 on_press=self.action_select_folder_dialog_multi,
117 style=btn_style
118 )
119
120 btn_clear = toga.Button('Clear', on_press=self.do_clear, style=btn_style)
121
122 # Outermost box
123 box = toga.Box(
124 children=[
125 btn_info,
126 btn_question,
127 btn_confirm,
128 btn_error,
129 btn_open,
130 btn_save,
131 btn_select,
132 btn_select_multi,
133 btn_open_multi,
134 btn_clear,
135 self.label
136 ],
137 style=Pack(
138 flex=1,
139 direction=COLUMN,
140 padding=10
141 )
142 )
143
144 # Add the content on the main window
145 self.main_window.content = box
146
147 # Show the main window
148 self.main_window.show()
149
150
151 def main():
152 return ExampledialogsApp('Dialogs', 'org.beeware.widgets.dialogs')
153
154
155 if __name__ == '__main__':
156 app = main()
157 app.main_loop()
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/dialogs/dialogs/app.py b/examples/dialogs/dialogs/app.py
--- a/examples/dialogs/dialogs/app.py
+++ b/examples/dialogs/dialogs/app.py
@@ -43,6 +43,20 @@
except ValueError:
self.label.text = "Open file dialog was canceled"
+ def action_open_file_filtered_dialog(self, widget):
+ try:
+ fname = self.main_window.open_file_dialog(
+ title="Open file with Toga",
+ multiselect=False,
+ file_types=['doc', 'txt'],
+ )
+ if fname is not None:
+ self.label.text = "File to open:" + fname
+ else:
+ self.label.text = "No file selected!"
+ except ValueError:
+ self.label.text = "Open file dialog was canceled"
+
def action_open_file_dialog_multi(self, widget):
try:
filenames = self.main_window.open_file_dialog(
@@ -104,6 +118,11 @@
btn_confirm = toga.Button('Confirm', on_press=self.action_confirm_dialog, style=btn_style)
btn_error = toga.Button('Error', on_press=self.action_error_dialog, style=btn_style)
btn_open = toga.Button('Open File', on_press=self.action_open_file_dialog, style=btn_style)
+ btn_open_filtered = toga.Button(
+ 'Open File (Filtered)',
+ on_press=self.action_open_file_filtered_dialog,
+ style=btn_style
+ )
btn_open_multi = toga.Button(
'Open File (Multiple)',
on_press=self.action_open_file_dialog_multi,
@@ -127,6 +146,7 @@
btn_confirm,
btn_error,
btn_open,
+ btn_open_filtered,
btn_save,
btn_select,
btn_select_multi,
diff --git a/src/winforms/toga_winforms/window.py b/src/winforms/toga_winforms/window.py
--- a/src/winforms/toga_winforms/window.py
+++ b/src/winforms/toga_winforms/window.py
@@ -203,6 +203,18 @@
raise ValueError("No folder provided in the select folder dialog")
def build_filter(self, file_types):
- file_string = "{0} files (*.{0})|*.{0}"
- return '|'.join([file_string.format(ext) for ext in file_types]) + \
- "|All files (*.*)|*.*"
+ filters = [
+ "{0} files (*.{0})|*.{0}".format(ext)
+ for ext in file_types
+ ] + [
+ "All files (*.*)|*.*"
+ ]
+
+ if len(file_types) > 1:
+ filters.insert(0, "All matching files ({0})|{0}".format(
+ ';'.join([
+ '*.{0}'.format(ext)
+ for ext in file_types
+ ])
+ ))
+ return '|'.join(filters)
| {"golden_diff": "diff --git a/examples/dialogs/dialogs/app.py b/examples/dialogs/dialogs/app.py\n--- a/examples/dialogs/dialogs/app.py\n+++ b/examples/dialogs/dialogs/app.py\n@@ -43,6 +43,20 @@\n except ValueError:\n self.label.text = \"Open file dialog was canceled\"\n \n+ def action_open_file_filtered_dialog(self, widget):\n+ try:\n+ fname = self.main_window.open_file_dialog(\n+ title=\"Open file with Toga\",\n+ multiselect=False,\n+ file_types=['doc', 'txt'],\n+ )\n+ if fname is not None:\n+ self.label.text = \"File to open:\" + fname\n+ else:\n+ self.label.text = \"No file selected!\"\n+ except ValueError:\n+ self.label.text = \"Open file dialog was canceled\"\n+\n def action_open_file_dialog_multi(self, widget):\n try:\n filenames = self.main_window.open_file_dialog(\n@@ -104,6 +118,11 @@\n btn_confirm = toga.Button('Confirm', on_press=self.action_confirm_dialog, style=btn_style)\n btn_error = toga.Button('Error', on_press=self.action_error_dialog, style=btn_style)\n btn_open = toga.Button('Open File', on_press=self.action_open_file_dialog, style=btn_style)\n+ btn_open_filtered = toga.Button(\n+ 'Open File (Filtered)',\n+ on_press=self.action_open_file_filtered_dialog,\n+ style=btn_style\n+ )\n btn_open_multi = toga.Button(\n 'Open File (Multiple)',\n on_press=self.action_open_file_dialog_multi,\n@@ -127,6 +146,7 @@\n btn_confirm,\n btn_error,\n btn_open,\n+ btn_open_filtered,\n btn_save,\n btn_select,\n btn_select_multi,\ndiff --git a/src/winforms/toga_winforms/window.py b/src/winforms/toga_winforms/window.py\n--- a/src/winforms/toga_winforms/window.py\n+++ b/src/winforms/toga_winforms/window.py\n@@ -203,6 +203,18 @@\n raise ValueError(\"No folder provided in the select folder dialog\")\n \n def build_filter(self, file_types):\n- file_string = \"{0} files (*.{0})|*.{0}\"\n- return '|'.join([file_string.format(ext) for ext in file_types]) + \\\n- \"|All files (*.*)|*.*\"\n+ filters = [\n+ \"{0} files (*.{0})|*.{0}\".format(ext)\n+ for ext in file_types\n+ ] + [\n+ \"All files (*.*)|*.*\"\n+ ]\n+\n+ if len(file_types) > 1:\n+ filters.insert(0, \"All matching files ({0})|{0}\".format(\n+ ';'.join([\n+ '*.{0}'.format(ext)\n+ for ext in file_types\n+ ])\n+ ))\n+ return '|'.join(filters)\n", "issue": "Asking how to use \"file_types\" in open_file_dialog\nWhen I bookmark a list like the example below\r\n` def action_open_file_dialog(self, multi_var, widget=None):\r\n filenames = self.main_window.open_file_dialog(\r\n title=\"Open file with Toga\",\r\n multiselect=multi_var,\r\n file_types=['gif', 'tiff', 'jpeg', 'bmp', 'png', 'webp']\r\n )`\r\nThe result is a column list, which will prevent the window from displaying all images in the correct format in the list.\r\nHow does the window show all the listed formats, instead of having to select files one by one or All files. Similar to the example in HTML.\r\ninput type=\"file\"\r\n id=\"avatar\" name=\"avatar\"\r\n accept=\"image/png, image/jpeg\"\r\nhttps://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/file\n", "before_files": [{"content": "from toga import GROUP_BREAK, SECTION_BREAK\n\nfrom .libs import Size, WinForms\n\n\nclass WinFormsViewport:\n def __init__(self, native, frame):\n self.native = native\n self.frame = frame\n self.baseline_dpi = 96\n\n @property\n def width(self):\n # Treat `native=None` as a 0x0 viewport\n if self.native is None:\n return 0\n return self.native.ClientSize.Width\n\n @property\n def height(self):\n if self.native is None:\n return 0\n # Subtract any vertical shift of the frame. This is to allow\n # for toolbars, or any other viewport-level decoration.\n return self.native.ClientSize.Height - self.frame.vertical_shift\n\n @property\n def dpi(self):\n if self.native is None:\n return self.baseline_dpi\n return self.native.CreateGraphics().DpiX\n\n\nclass Window:\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n self.create()\n\n def create(self):\n self.native = WinForms.Form(self)\n self.native.ClientSize = Size(*self.interface._size)\n self.native.interface = self.interface\n self.native.Resize += self.winforms_resize\n self.toolbar_native = None\n self.toolbar_items = None\n\n def create_toolbar(self):\n self.toolbar_native = WinForms.ToolStrip()\n for cmd in self.interface.toolbar:\n if cmd == GROUP_BREAK:\n item = WinForms.ToolStripSeparator()\n elif cmd == SECTION_BREAK:\n item = WinForms.ToolStripSeparator()\n else:\n if cmd.icon is not None:\n native_icon = cmd.icon._impl.native\n item = WinForms.ToolStripMenuItem(cmd.label, native_icon.ToBitmap())\n else:\n item = WinForms.ToolStripMenuItem(cmd.label)\n item.Click += cmd._impl.as_handler()\n cmd._impl.native.append(item)\n self.toolbar_native.Items.Add(item)\n\n def set_position(self, position):\n pass\n\n def set_size(self, size):\n self.native.ClientSize = Size(*self.interface._size)\n\n def set_app(self, app):\n if app is None:\n return\n icon_impl = app.interface.icon._impl\n if icon_impl is None:\n return\n self.native.Icon = icon_impl.native\n\n @property\n def vertical_shift(self):\n # vertical shift is the toolbar height or 0\n result = 0\n try:\n result += self.native.interface._impl.toolbar_native.Height\n except AttributeError:\n pass\n try:\n result += self.native.interface._impl.native.MainMenuStrip.Height\n except AttributeError:\n pass\n return result\n\n def set_content(self, widget):\n if self.toolbar_native:\n self.native.Controls.Add(self.toolbar_native)\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self.native.Controls.Add(widget.native)\n\n # Set the widget's viewport to be based on the window's content.\n widget.viewport = WinFormsViewport(native=self.native, frame=self)\n widget.frame = self\n\n # Add all children to the content widget.\n for child in widget.interface.children:\n child._impl.container = widget\n\n def set_title(self, title):\n self.native.Text = title\n\n def show(self):\n # The first render of the content will establish the\n # minimum possible content size; use that to enforce\n # a minimum window size.\n TITLEBAR_HEIGHT = WinForms.SystemInformation.CaptionHeight\n # Now that the content is visible, we can do our initial hinting,\n # and use that as the basis for setting the minimum window size.\n self.interface.content._impl.rehint()\n self.interface.content.style.layout(\n self.interface.content,\n WinFormsViewport(native=None, frame=None),\n )\n self.native.MinimumSize = Size(\n int(self.interface.content.layout.width),\n int(self.interface.content.layout.height) + TITLEBAR_HEIGHT\n )\n self.interface.content.refresh()\n\n self.native.Show()\n\n def winforms_FormClosing(self, event, handler):\n if self.interface.app.on_exit:\n self.interface.app.on_exit(self.interface.app)\n\n def set_full_screen(self, is_full_screen):\n self.interface.factory.not_implemented('Window.set_full_screen()')\n\n def on_close(self):\n pass\n\n def close(self):\n self.native.Close()\n\n def winforms_resize(self, sender, args):\n if self.interface.content:\n # Re-layout the content\n self.interface.content.refresh()\n\n def info_dialog(self, title, message):\n return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK)\n\n def question_dialog(self, title, message):\n result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.YesNo)\n return result\n\n def confirm_dialog(self, title, message):\n result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OKCancel)\n # this returns 1 (DialogResult.OK enum) for OK and 2 for Cancel\n return True if result == WinForms.DialogResult.OK else False\n\n def error_dialog(self, title, message):\n return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK,\n WinForms.MessageBoxIcon.Error)\n\n def stack_trace_dialog(self, title, message, content, retry=False):\n pass\n\n def save_file_dialog(self, title, suggested_filename, file_types):\n dialog = WinForms.SaveFileDialog()\n dialog.Title = title\n if suggested_filename is not None:\n dialog.FileName = suggested_filename\n if file_types is not None:\n dialog.Filter = self.build_filter(file_types)\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return dialog.FileName\n else:\n raise ValueError(\"No filename provided in the save file dialog\")\n\n def open_file_dialog(self, title, initial_directory, file_types, multiselect):\n dialog = WinForms.OpenFileDialog()\n dialog.Title = title\n if initial_directory is not None:\n dialog.InitialDirectory = initial_directory\n if file_types is not None:\n dialog.Filter = self.build_filter(file_types)\n if multiselect:\n dialog.Multiselect = True\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return dialog.FileNames if multiselect else dialog.FileName\n else:\n raise ValueError(\"No filename provided in the open file dialog\")\n\n def select_folder_dialog(self, title, initial_directory, multiselect):\n dialog = WinForms.FolderBrowserDialog()\n dialog.Title = title\n if initial_directory is not None:\n dialog.InitialDirectory = initial_directory\n\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return [dialog.SelectedPath]\n else:\n raise ValueError(\"No folder provided in the select folder dialog\")\n\n def build_filter(self, file_types):\n file_string = \"{0} files (*.{0})|*.{0}\"\n return '|'.join([file_string.format(ext) for ext in file_types]) + \\\n \"|All files (*.*)|*.*\"\n", "path": "src/winforms/toga_winforms/window.py"}, {"content": "import toga\nfrom toga.constants import COLUMN\nfrom toga.style import Pack\n\n\nclass ExampledialogsApp(toga.App):\n # Button callback functions\n def do_clear(self, widget, **kwargs):\n self.label.text = \"Ready.\"\n\n def action_info_dialog(self, widget):\n self.main_window.info_dialog('Toga', 'THIS! IS! TOGA!!')\n self.label.text = 'Information was provided.'\n\n def action_question_dialog(self, widget):\n if self.main_window.question_dialog('Toga', 'Is this cool or what?'):\n self.label.text = 'User said yes!'\n self.main_window.info_dialog('Happiness', 'I know, right! :-)')\n else:\n self.label.text = 'User says no...'\n self.main_window.info_dialog('Shucks...', \"Well aren't you a spoilsport... :-(\")\n\n def action_confirm_dialog(self, widget):\n if self.main_window.question_dialog('Toga', 'Are you sure you want to?'):\n self.label.text = 'Lets do it!'\n else:\n self.label.text = \"Left it as it was.\"\n\n def action_error_dialog(self, widget):\n self.main_window.error_dialog('Toga', \"Well that didn't work... or did it?\")\n self.label.text = 'Oh noes...'\n\n def action_open_file_dialog(self, widget):\n try:\n fname = self.main_window.open_file_dialog(\n title=\"Open file with Toga\",\n multiselect=False\n )\n if fname is not None:\n self.label.text = \"File to open:\" + fname\n else:\n self.label.text = \"No file selected!\"\n except ValueError:\n self.label.text = \"Open file dialog was canceled\"\n\n def action_open_file_dialog_multi(self, widget):\n try:\n filenames = self.main_window.open_file_dialog(\n title=\"Open file with Toga\",\n multiselect=True\n )\n if filenames is not None:\n msg = \"Files to open: {}\".format(', '.join(filenames))\n self.label.text = msg\n else:\n self.label.text = \"No files selected!\"\n\n except ValueError:\n self.label.text = \"Open file dialog was canceled\"\n\n def action_select_folder_dialog(self, widget):\n try:\n path_names = self.main_window.select_folder_dialog(\n title=\"Select folder with Toga\"\n )\n self.label.text = \"Folder selected:\" + ','.join([path for path in path_names])\n except ValueError:\n self.label.text = \"Folder select dialog was canceled\"\n\n def action_select_folder_dialog_multi(self, widget):\n try:\n path_names = self.main_window.select_folder_dialog(\n title=\"Select multiple folders with Toga\",\n multiselect=True\n )\n self.label.text = \"Folders selected:\" + ','.join([path for path in path_names])\n except ValueError:\n self.label.text = \"Folders select dialog was canceled\"\n\n def action_save_file_dialog(self, widget):\n fname = 'Toga_file.txt'\n try:\n save_path = self.main_window.save_file_dialog(\n \"Save file with Toga\",\n suggested_filename=fname)\n if save_path is not None:\n self.label.text = \"File saved with Toga:\" + save_path\n else:\n self.label.text = \"Save file dialog was canceled\"\n except ValueError:\n self.label.text = \"Save file dialog was canceled\"\n\n def startup(self):\n # Set up main window\n self.main_window = toga.MainWindow(title=self.name)\n\n # Label to show responses.\n self.label = toga.Label('Ready.', style=Pack(padding_top=20))\n\n # Buttons\n btn_style = Pack(flex=1)\n btn_info = toga.Button('Info', on_press=self.action_info_dialog, style=btn_style)\n btn_question = toga.Button('Question', on_press=self.action_question_dialog, style=btn_style)\n btn_confirm = toga.Button('Confirm', on_press=self.action_confirm_dialog, style=btn_style)\n btn_error = toga.Button('Error', on_press=self.action_error_dialog, style=btn_style)\n btn_open = toga.Button('Open File', on_press=self.action_open_file_dialog, style=btn_style)\n btn_open_multi = toga.Button(\n 'Open File (Multiple)',\n on_press=self.action_open_file_dialog_multi,\n style=btn_style\n )\n btn_save = toga.Button('Save File', on_press=self.action_save_file_dialog, style=btn_style)\n btn_select = toga.Button('Select Folder', on_press=self.action_select_folder_dialog, style=btn_style)\n btn_select_multi = toga.Button(\n 'Select Folders',\n on_press=self.action_select_folder_dialog_multi,\n style=btn_style\n )\n\n btn_clear = toga.Button('Clear', on_press=self.do_clear, style=btn_style)\n\n # Outermost box\n box = toga.Box(\n children=[\n btn_info,\n btn_question,\n btn_confirm,\n btn_error,\n btn_open,\n btn_save,\n btn_select,\n btn_select_multi,\n btn_open_multi,\n btn_clear,\n self.label\n ],\n style=Pack(\n flex=1,\n direction=COLUMN,\n padding=10\n )\n )\n\n # Add the content on the main window\n self.main_window.content = box\n\n # Show the main window\n self.main_window.show()\n\n\ndef main():\n return ExampledialogsApp('Dialogs', 'org.beeware.widgets.dialogs')\n\n\nif __name__ == '__main__':\n app = main()\n app.main_loop()\n", "path": "examples/dialogs/dialogs/app.py"}], "after_files": [{"content": "from toga import GROUP_BREAK, SECTION_BREAK\n\nfrom .libs import Size, WinForms\n\n\nclass WinFormsViewport:\n def __init__(self, native, frame):\n self.native = native\n self.frame = frame\n self.baseline_dpi = 96\n\n @property\n def width(self):\n # Treat `native=None` as a 0x0 viewport\n if self.native is None:\n return 0\n return self.native.ClientSize.Width\n\n @property\n def height(self):\n if self.native is None:\n return 0\n # Subtract any vertical shift of the frame. This is to allow\n # for toolbars, or any other viewport-level decoration.\n return self.native.ClientSize.Height - self.frame.vertical_shift\n\n @property\n def dpi(self):\n if self.native is None:\n return self.baseline_dpi\n return self.native.CreateGraphics().DpiX\n\n\nclass Window:\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n self.create()\n\n def create(self):\n self.native = WinForms.Form(self)\n self.native.ClientSize = Size(*self.interface._size)\n self.native.interface = self.interface\n self.native.Resize += self.winforms_resize\n self.toolbar_native = None\n self.toolbar_items = None\n\n def create_toolbar(self):\n self.toolbar_native = WinForms.ToolStrip()\n for cmd in self.interface.toolbar:\n if cmd == GROUP_BREAK:\n item = WinForms.ToolStripSeparator()\n elif cmd == SECTION_BREAK:\n item = WinForms.ToolStripSeparator()\n else:\n if cmd.icon is not None:\n native_icon = cmd.icon._impl.native\n item = WinForms.ToolStripMenuItem(cmd.label, native_icon.ToBitmap())\n else:\n item = WinForms.ToolStripMenuItem(cmd.label)\n item.Click += cmd._impl.as_handler()\n cmd._impl.native.append(item)\n self.toolbar_native.Items.Add(item)\n\n def set_position(self, position):\n pass\n\n def set_size(self, size):\n self.native.ClientSize = Size(*self.interface._size)\n\n def set_app(self, app):\n if app is None:\n return\n icon_impl = app.interface.icon._impl\n if icon_impl is None:\n return\n self.native.Icon = icon_impl.native\n\n @property\n def vertical_shift(self):\n # vertical shift is the toolbar height or 0\n result = 0\n try:\n result += self.native.interface._impl.toolbar_native.Height\n except AttributeError:\n pass\n try:\n result += self.native.interface._impl.native.MainMenuStrip.Height\n except AttributeError:\n pass\n return result\n\n def set_content(self, widget):\n if self.toolbar_native:\n self.native.Controls.Add(self.toolbar_native)\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self.native.Controls.Add(widget.native)\n\n # Set the widget's viewport to be based on the window's content.\n widget.viewport = WinFormsViewport(native=self.native, frame=self)\n widget.frame = self\n\n # Add all children to the content widget.\n for child in widget.interface.children:\n child._impl.container = widget\n\n def set_title(self, title):\n self.native.Text = title\n\n def show(self):\n # The first render of the content will establish the\n # minimum possible content size; use that to enforce\n # a minimum window size.\n TITLEBAR_HEIGHT = WinForms.SystemInformation.CaptionHeight\n # Now that the content is visible, we can do our initial hinting,\n # and use that as the basis for setting the minimum window size.\n self.interface.content._impl.rehint()\n self.interface.content.style.layout(\n self.interface.content,\n WinFormsViewport(native=None, frame=None),\n )\n self.native.MinimumSize = Size(\n int(self.interface.content.layout.width),\n int(self.interface.content.layout.height) + TITLEBAR_HEIGHT\n )\n self.interface.content.refresh()\n\n self.native.Show()\n\n def winforms_FormClosing(self, event, handler):\n if self.interface.app.on_exit:\n self.interface.app.on_exit(self.interface.app)\n\n def set_full_screen(self, is_full_screen):\n self.interface.factory.not_implemented('Window.set_full_screen()')\n\n def on_close(self):\n pass\n\n def close(self):\n self.native.Close()\n\n def winforms_resize(self, sender, args):\n if self.interface.content:\n # Re-layout the content\n self.interface.content.refresh()\n\n def info_dialog(self, title, message):\n return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK)\n\n def question_dialog(self, title, message):\n result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.YesNo)\n return result\n\n def confirm_dialog(self, title, message):\n result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OKCancel)\n # this returns 1 (DialogResult.OK enum) for OK and 2 for Cancel\n return True if result == WinForms.DialogResult.OK else False\n\n def error_dialog(self, title, message):\n return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK,\n WinForms.MessageBoxIcon.Error)\n\n def stack_trace_dialog(self, title, message, content, retry=False):\n pass\n\n def save_file_dialog(self, title, suggested_filename, file_types):\n dialog = WinForms.SaveFileDialog()\n dialog.Title = title\n if suggested_filename is not None:\n dialog.FileName = suggested_filename\n if file_types is not None:\n dialog.Filter = self.build_filter(file_types)\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return dialog.FileName\n else:\n raise ValueError(\"No filename provided in the save file dialog\")\n\n def open_file_dialog(self, title, initial_directory, file_types, multiselect):\n dialog = WinForms.OpenFileDialog()\n dialog.Title = title\n if initial_directory is not None:\n dialog.InitialDirectory = initial_directory\n if file_types is not None:\n dialog.Filter = self.build_filter(file_types)\n if multiselect:\n dialog.Multiselect = True\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return dialog.FileNames if multiselect else dialog.FileName\n else:\n raise ValueError(\"No filename provided in the open file dialog\")\n\n def select_folder_dialog(self, title, initial_directory, multiselect):\n dialog = WinForms.FolderBrowserDialog()\n dialog.Title = title\n if initial_directory is not None:\n dialog.InitialDirectory = initial_directory\n\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return [dialog.SelectedPath]\n else:\n raise ValueError(\"No folder provided in the select folder dialog\")\n\n def build_filter(self, file_types):\n filters = [\n \"{0} files (*.{0})|*.{0}\".format(ext)\n for ext in file_types\n ] + [\n \"All files (*.*)|*.*\"\n ]\n\n if len(file_types) > 1:\n filters.insert(0, \"All matching files ({0})|{0}\".format(\n ';'.join([\n '*.{0}'.format(ext)\n for ext in file_types\n ])\n ))\n return '|'.join(filters)\n", "path": "src/winforms/toga_winforms/window.py"}, {"content": "import toga\nfrom toga.constants import COLUMN\nfrom toga.style import Pack\n\n\nclass ExampledialogsApp(toga.App):\n # Button callback functions\n def do_clear(self, widget, **kwargs):\n self.label.text = \"Ready.\"\n\n def action_info_dialog(self, widget):\n self.main_window.info_dialog('Toga', 'THIS! IS! TOGA!!')\n self.label.text = 'Information was provided.'\n\n def action_question_dialog(self, widget):\n if self.main_window.question_dialog('Toga', 'Is this cool or what?'):\n self.label.text = 'User said yes!'\n self.main_window.info_dialog('Happiness', 'I know, right! :-)')\n else:\n self.label.text = 'User says no...'\n self.main_window.info_dialog('Shucks...', \"Well aren't you a spoilsport... :-(\")\n\n def action_confirm_dialog(self, widget):\n if self.main_window.question_dialog('Toga', 'Are you sure you want to?'):\n self.label.text = 'Lets do it!'\n else:\n self.label.text = \"Left it as it was.\"\n\n def action_error_dialog(self, widget):\n self.main_window.error_dialog('Toga', \"Well that didn't work... or did it?\")\n self.label.text = 'Oh noes...'\n\n def action_open_file_dialog(self, widget):\n try:\n fname = self.main_window.open_file_dialog(\n title=\"Open file with Toga\",\n multiselect=False\n )\n if fname is not None:\n self.label.text = \"File to open:\" + fname\n else:\n self.label.text = \"No file selected!\"\n except ValueError:\n self.label.text = \"Open file dialog was canceled\"\n\n def action_open_file_filtered_dialog(self, widget):\n try:\n fname = self.main_window.open_file_dialog(\n title=\"Open file with Toga\",\n multiselect=False,\n file_types=['doc', 'txt'],\n )\n if fname is not None:\n self.label.text = \"File to open:\" + fname\n else:\n self.label.text = \"No file selected!\"\n except ValueError:\n self.label.text = \"Open file dialog was canceled\"\n\n def action_open_file_dialog_multi(self, widget):\n try:\n filenames = self.main_window.open_file_dialog(\n title=\"Open file with Toga\",\n multiselect=True\n )\n if filenames is not None:\n msg = \"Files to open: {}\".format(', '.join(filenames))\n self.label.text = msg\n else:\n self.label.text = \"No files selected!\"\n\n except ValueError:\n self.label.text = \"Open file dialog was canceled\"\n\n def action_select_folder_dialog(self, widget):\n try:\n path_names = self.main_window.select_folder_dialog(\n title=\"Select folder with Toga\"\n )\n self.label.text = \"Folder selected:\" + ','.join([path for path in path_names])\n except ValueError:\n self.label.text = \"Folder select dialog was canceled\"\n\n def action_select_folder_dialog_multi(self, widget):\n try:\n path_names = self.main_window.select_folder_dialog(\n title=\"Select multiple folders with Toga\",\n multiselect=True\n )\n self.label.text = \"Folders selected:\" + ','.join([path for path in path_names])\n except ValueError:\n self.label.text = \"Folders select dialog was canceled\"\n\n def action_save_file_dialog(self, widget):\n fname = 'Toga_file.txt'\n try:\n save_path = self.main_window.save_file_dialog(\n \"Save file with Toga\",\n suggested_filename=fname)\n if save_path is not None:\n self.label.text = \"File saved with Toga:\" + save_path\n else:\n self.label.text = \"Save file dialog was canceled\"\n except ValueError:\n self.label.text = \"Save file dialog was canceled\"\n\n def startup(self):\n # Set up main window\n self.main_window = toga.MainWindow(title=self.name)\n\n # Label to show responses.\n self.label = toga.Label('Ready.', style=Pack(padding_top=20))\n\n # Buttons\n btn_style = Pack(flex=1)\n btn_info = toga.Button('Info', on_press=self.action_info_dialog, style=btn_style)\n btn_question = toga.Button('Question', on_press=self.action_question_dialog, style=btn_style)\n btn_confirm = toga.Button('Confirm', on_press=self.action_confirm_dialog, style=btn_style)\n btn_error = toga.Button('Error', on_press=self.action_error_dialog, style=btn_style)\n btn_open = toga.Button('Open File', on_press=self.action_open_file_dialog, style=btn_style)\n btn_open_filtered = toga.Button(\n 'Open File (Filtered)',\n on_press=self.action_open_file_filtered_dialog,\n style=btn_style\n )\n btn_open_multi = toga.Button(\n 'Open File (Multiple)',\n on_press=self.action_open_file_dialog_multi,\n style=btn_style\n )\n btn_save = toga.Button('Save File', on_press=self.action_save_file_dialog, style=btn_style)\n btn_select = toga.Button('Select Folder', on_press=self.action_select_folder_dialog, style=btn_style)\n btn_select_multi = toga.Button(\n 'Select Folders',\n on_press=self.action_select_folder_dialog_multi,\n style=btn_style\n )\n\n btn_clear = toga.Button('Clear', on_press=self.do_clear, style=btn_style)\n\n # Outermost box\n box = toga.Box(\n children=[\n btn_info,\n btn_question,\n btn_confirm,\n btn_error,\n btn_open,\n btn_open_filtered,\n btn_save,\n btn_select,\n btn_select_multi,\n btn_open_multi,\n btn_clear,\n self.label\n ],\n style=Pack(\n flex=1,\n direction=COLUMN,\n padding=10\n )\n )\n\n # Add the content on the main window\n self.main_window.content = box\n\n # Show the main window\n self.main_window.show()\n\n\ndef main():\n return ExampledialogsApp('Dialogs', 'org.beeware.widgets.dialogs')\n\n\nif __name__ == '__main__':\n app = main()\n app.main_loop()\n", "path": "examples/dialogs/dialogs/app.py"}]} | 4,061 | 653 |
gh_patches_debug_7411 | rasdani/github-patches | git_diff | holoviz__holoviews-1983 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bokeh server warning message
the code server = renderer.app(dmap, show=True, new_window=True)
results in a warning message
WARNING:root:BokehServerWidgets24371: Setting non-parameter attribute display_options={'fps': 20} using a mechanism intended only for parameters
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `holoviews/plotting/bokeh/widgets.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import math
4 import json
5 from functools import partial
6
7 import param
8 import numpy as np
9 from bokeh.models.widgets import Select, Slider, AutocompleteInput, TextInput, Div
10 from bokeh.layouts import widgetbox, row, column
11
12 from ...core import Store, NdMapping, OrderedDict
13 from ...core.util import (drop_streams, unique_array, isnumeric,
14 wrap_tuple_streams, unicode)
15 from ..widgets import NdWidget, SelectionWidget, ScrubberWidget
16 from .util import serialize_json
17
18
19
20 class BokehServerWidgets(param.Parameterized):
21 """
22 BokehServerWidgets create bokeh widgets corresponding to all the
23 key dimensions found on a BokehPlot instance. It currently supports
24 to types of widgets sliders (which may be discrete or continuous)
25 and dropdown widgets letting you select non-numeric values.
26 """
27
28 editable = param.Boolean(default=False, doc="""
29 Whether the slider text fields should be editable. Disabled
30 by default for a more compact widget layout.""")
31
32 position = param.ObjectSelector(default='right',
33 objects=['right', 'left', 'above', 'below'])
34
35 sizing_mode = param.ObjectSelector(default='fixed',
36 objects=['fixed', 'stretch_both', 'scale_width',
37 'scale_height', 'scale_both'])
38
39 width = param.Integer(default=250, doc="""
40 Width of the widget box in pixels""")
41
42 basejs = param.String(default=None, precedence=-1, doc="""
43 Defines the local CSS file to be loaded for this widget.""")
44
45 extensionjs = param.String(default=None, precedence=-1, doc="""
46 Optional javascript extension file for a particular backend.""")
47
48 css = param.String(default=None, precedence=-1, doc="""
49 Defines the local CSS file to be loaded for this widget.""")
50
51 def __init__(self, plot, renderer=None, **params):
52 super(BokehServerWidgets, self).__init__(**params)
53 self.plot = plot
54 streams = []
55 for stream in plot.streams:
56 if any(k in plot.dimensions for k in stream.contents):
57 streams.append(stream)
58 self.dimensions, self.keys = drop_streams(streams,
59 plot.dimensions,
60 plot.keys)
61 if renderer is None:
62 backend = Store.current_backend
63 self.renderer = Store.renderers[backend]
64 else:
65 self.renderer = renderer
66 # Create mock NdMapping to hold the common dimensions and keys
67 self.mock_obj = NdMapping([(k, None) for k in self.keys],
68 kdims=self.dimensions)
69 self.widgets, self.lookups = self.get_widgets()
70 self.reverse_lookups = {d: {v: k for k, v in item.items()}
71 for d, item in self.lookups.items()}
72 self.subplots = {}
73 if self.plot.renderer.mode == 'default':
74 self.attach_callbacks()
75 self.state = self.init_layout()
76 self._queue = []
77
78
79 @classmethod
80 def create_widget(self, dim, holomap=None, editable=False):
81 """"
82 Given a Dimension creates bokeh widgets to select along that
83 dimension. For numeric data a slider widget is created which
84 may be either discrete, if a holomap is supplied or the
85 Dimension.values are set, or a continuous widget for
86 DynamicMaps. If the slider is discrete the returned mapping
87 defines a mapping between values and labels making it possible
88 sync the two slider and label widgets. For non-numeric data
89 a simple dropdown selection widget is generated.
90 """
91 label, mapping = None, None
92 if holomap is None:
93 if dim.values:
94 if all(isnumeric(v) for v in dim.values):
95 values = dim.values
96 labels = [unicode(dim.pprint_value(v)) for v in dim.values]
97 if editable:
98 label = AutocompleteInput(value=labels[0], completions=labels,
99 title=dim.pprint_label)
100 else:
101 label = Div(text='<b>%s</b>' % dim.pprint_value_string(labels[0]))
102 widget = Slider(value=0, start=0, end=len(dim.values)-1, title=None, step=1)
103 mapping = list(zip(values, labels))
104 else:
105 values = [(v, dim.pprint_value(v)) for v in dim.values]
106 widget = Select(title=dim.pprint_label, value=values[0][0],
107 options=values)
108 else:
109 start = dim.soft_range[0] if dim.soft_range[0] else dim.range[0]
110 end = dim.soft_range[1] if dim.soft_range[1] else dim.range[1]
111 dim_range = end - start
112 int_type = isinstance(dim.type, type) and issubclass(dim.type, int)
113 if isinstance(dim_range, int) or int_type:
114 step = 1
115 elif dim.step is not None:
116 step = dim.step
117 else:
118 step = 10**((round(math.log10(dim_range))-3))
119 if editable:
120 label = TextInput(value=str(start), title=dim.pprint_label)
121 else:
122 label = Div(text='<b>%s</b>' % dim.pprint_value_string(start))
123 widget = Slider(value=start, start=start,
124 end=end, step=step, title=None)
125 else:
126 values = (dim.values if dim.values else
127 list(unique_array(holomap.dimension_values(dim.name))))
128 labels = [dim.pprint_value(v) for v in values]
129 if isinstance(values[0], np.datetime64) or isnumeric(values[0]):
130 if editable:
131 label = AutocompleteInput(value=labels[0], completions=labels,
132 title=dim.pprint_label)
133 else:
134 label = Div(text='<b>%s</b>' % (dim.pprint_value_string(labels[0])))
135 widget = Slider(value=0, start=0, end=len(values)-1, title=None, step=1)
136 else:
137 widget = Select(title=dim.pprint_label, value=values[0],
138 options=list(zip(values, labels)))
139 mapping = list(zip(values, labels))
140 return widget, label, mapping
141
142
143 def get_widgets(self):
144 """
145 Creates a set of widgets representing the dimensions on the
146 plot object used to instantiate the widgets class.
147 """
148 widgets = OrderedDict()
149 mappings = {}
150 for dim in self.mock_obj.kdims:
151 holomap = None if self.plot.dynamic else self.mock_obj
152 widget, label, mapping = self.create_widget(dim, holomap, self.editable)
153 if label is not None and not isinstance(label, Div):
154 label.on_change('value', partial(self.on_change, dim, 'label'))
155 widget.on_change('value', partial(self.on_change, dim, 'widget'))
156 widgets[dim.pprint_label] = (label, widget)
157 if mapping:
158 mappings[dim.pprint_label] = OrderedDict(mapping)
159 return widgets, mappings
160
161
162 def init_layout(self):
163 widgets = [widget for d in self.widgets.values()
164 for widget in d if widget]
165 wbox = widgetbox(widgets, width=self.width)
166 if self.position in ['right', 'below']:
167 plots = [self.plot.state, wbox]
168 else:
169 plots = [wbox, self.plot.state]
170 layout_fn = row if self.position in ['left', 'right'] else column
171 layout = layout_fn(plots, sizing_mode=self.sizing_mode)
172 return layout
173
174
175 def attach_callbacks(self):
176 """
177 Attach callbacks to interact with Comms.
178 """
179 pass
180
181
182 def on_change(self, dim, widget_type, attr, old, new):
183 self._queue.append((dim, widget_type, attr, old, new))
184 if self.update not in self.plot.document._session_callbacks:
185 self.plot.document.add_timeout_callback(self.update, 50)
186
187
188 def update(self):
189 """
190 Handle update events on bokeh server.
191 """
192 if not self._queue:
193 return
194 dim, widget_type, attr, old, new = self._queue[-1]
195 dim_label = dim.pprint_label
196
197 label, widget = self.widgets[dim_label]
198 if widget_type == 'label':
199 if isinstance(label, AutocompleteInput):
200 value = [new]
201 widget.value = value
202 else:
203 widget.value = float(new)
204 elif label:
205 lookups = self.lookups.get(dim_label)
206 if not self.editable:
207 if lookups:
208 new = list(lookups.keys())[widget.value]
209 label.text = '<b>%s</b>' % dim.pprint_value_string(new)
210 elif isinstance(label, AutocompleteInput):
211 text = lookups[new]
212 label.value = text
213 else:
214 label.value = dim.pprint_value(new)
215
216 key = []
217 for dim, (label, widget) in self.widgets.items():
218 lookups = self.lookups.get(dim)
219 if label and lookups:
220 val = list(lookups.keys())[widget.value]
221 else:
222 val = widget.value
223 key.append(val)
224 key = wrap_tuple_streams(tuple(key), self.plot.dimensions,
225 self.plot.streams)
226 self.plot.update(key)
227
228
229
230 class BokehWidget(NdWidget):
231
232 css = param.String(default='bokehwidgets.css', doc="""
233 Defines the local CSS file to be loaded for this widget.""")
234
235 extensionjs = param.String(default='bokehwidgets.js', doc="""
236 Optional javascript extension file for a particular backend.""")
237
238 def _get_data(self):
239 # Get initial frame to draw immediately
240 init_frame = self._plot_figure(0, fig_format='html')
241 data = super(BokehWidget, self)._get_data()
242 return dict(data, init_frame=init_frame)
243
244 def encode_frames(self, frames):
245 if self.export_json:
246 self.save_json(frames)
247 frames = {}
248 else:
249 frames = json.dumps(frames).replace('</', r'<\/')
250 return frames
251
252 def _plot_figure(self, idx, fig_format='json'):
253 """
254 Returns the figure in html format on the
255 first call and
256 """
257 self.plot.update(idx)
258 if self.embed or fig_format == 'html':
259 if fig_format == 'html':
260 msg = self.renderer.html(self.plot, fig_format)
261 else:
262 json_patch = self.renderer.diff(self.plot, serialize=False)
263 msg = dict(patch=json_patch, root=self.plot.state._id)
264 msg = serialize_json(msg)
265 return msg
266
267
268 class BokehSelectionWidget(BokehWidget, SelectionWidget):
269 pass
270
271
272 class BokehScrubberWidget(BokehWidget, ScrubberWidget):
273 pass
274
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/holoviews/plotting/bokeh/widgets.py b/holoviews/plotting/bokeh/widgets.py
--- a/holoviews/plotting/bokeh/widgets.py
+++ b/holoviews/plotting/bokeh/widgets.py
@@ -25,6 +25,9 @@
and dropdown widgets letting you select non-numeric values.
"""
+ display_options = param.Dict(default={}, doc="""
+ Additional options controlling display options of the widgets.""")
+
editable = param.Boolean(default=False, doc="""
Whether the slider text fields should be editable. Disabled
by default for a more compact widget layout.""")
| {"golden_diff": "diff --git a/holoviews/plotting/bokeh/widgets.py b/holoviews/plotting/bokeh/widgets.py\n--- a/holoviews/plotting/bokeh/widgets.py\n+++ b/holoviews/plotting/bokeh/widgets.py\n@@ -25,6 +25,9 @@\n and dropdown widgets letting you select non-numeric values.\n \"\"\"\n \n+ display_options = param.Dict(default={}, doc=\"\"\"\n+ Additional options controlling display options of the widgets.\"\"\")\n+\n editable = param.Boolean(default=False, doc=\"\"\"\n Whether the slider text fields should be editable. Disabled\n by default for a more compact widget layout.\"\"\")\n", "issue": "Bokeh server warning message\nthe code server = renderer.app(dmap, show=True, new_window=True)\r\nresults in a warning message\r\nWARNING:root:BokehServerWidgets24371: Setting non-parameter attribute display_options={'fps': 20} using a mechanism intended only for parameters\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport math\nimport json\nfrom functools import partial\n\nimport param\nimport numpy as np\nfrom bokeh.models.widgets import Select, Slider, AutocompleteInput, TextInput, Div\nfrom bokeh.layouts import widgetbox, row, column\n\nfrom ...core import Store, NdMapping, OrderedDict\nfrom ...core.util import (drop_streams, unique_array, isnumeric,\n wrap_tuple_streams, unicode)\nfrom ..widgets import NdWidget, SelectionWidget, ScrubberWidget\nfrom .util import serialize_json\n\n\n\nclass BokehServerWidgets(param.Parameterized):\n \"\"\"\n BokehServerWidgets create bokeh widgets corresponding to all the\n key dimensions found on a BokehPlot instance. It currently supports\n to types of widgets sliders (which may be discrete or continuous)\n and dropdown widgets letting you select non-numeric values.\n \"\"\"\n\n editable = param.Boolean(default=False, doc=\"\"\"\n Whether the slider text fields should be editable. Disabled\n by default for a more compact widget layout.\"\"\")\n\n position = param.ObjectSelector(default='right',\n objects=['right', 'left', 'above', 'below'])\n\n sizing_mode = param.ObjectSelector(default='fixed',\n objects=['fixed', 'stretch_both', 'scale_width',\n 'scale_height', 'scale_both'])\n\n width = param.Integer(default=250, doc=\"\"\"\n Width of the widget box in pixels\"\"\")\n\n basejs = param.String(default=None, precedence=-1, doc=\"\"\"\n Defines the local CSS file to be loaded for this widget.\"\"\")\n\n extensionjs = param.String(default=None, precedence=-1, doc=\"\"\"\n Optional javascript extension file for a particular backend.\"\"\")\n\n css = param.String(default=None, precedence=-1, doc=\"\"\"\n Defines the local CSS file to be loaded for this widget.\"\"\")\n\n def __init__(self, plot, renderer=None, **params):\n super(BokehServerWidgets, self).__init__(**params)\n self.plot = plot\n streams = []\n for stream in plot.streams:\n if any(k in plot.dimensions for k in stream.contents):\n streams.append(stream)\n self.dimensions, self.keys = drop_streams(streams,\n plot.dimensions,\n plot.keys)\n if renderer is None:\n backend = Store.current_backend\n self.renderer = Store.renderers[backend]\n else:\n self.renderer = renderer\n # Create mock NdMapping to hold the common dimensions and keys\n self.mock_obj = NdMapping([(k, None) for k in self.keys],\n kdims=self.dimensions)\n self.widgets, self.lookups = self.get_widgets()\n self.reverse_lookups = {d: {v: k for k, v in item.items()}\n for d, item in self.lookups.items()}\n self.subplots = {}\n if self.plot.renderer.mode == 'default':\n self.attach_callbacks()\n self.state = self.init_layout()\n self._queue = []\n\n\n @classmethod\n def create_widget(self, dim, holomap=None, editable=False):\n \"\"\"\"\n Given a Dimension creates bokeh widgets to select along that\n dimension. For numeric data a slider widget is created which\n may be either discrete, if a holomap is supplied or the\n Dimension.values are set, or a continuous widget for\n DynamicMaps. If the slider is discrete the returned mapping\n defines a mapping between values and labels making it possible\n sync the two slider and label widgets. For non-numeric data\n a simple dropdown selection widget is generated.\n \"\"\"\n label, mapping = None, None\n if holomap is None:\n if dim.values:\n if all(isnumeric(v) for v in dim.values):\n values = dim.values\n labels = [unicode(dim.pprint_value(v)) for v in dim.values]\n if editable:\n label = AutocompleteInput(value=labels[0], completions=labels,\n title=dim.pprint_label)\n else:\n label = Div(text='<b>%s</b>' % dim.pprint_value_string(labels[0]))\n widget = Slider(value=0, start=0, end=len(dim.values)-1, title=None, step=1)\n mapping = list(zip(values, labels))\n else:\n values = [(v, dim.pprint_value(v)) for v in dim.values]\n widget = Select(title=dim.pprint_label, value=values[0][0],\n options=values)\n else:\n start = dim.soft_range[0] if dim.soft_range[0] else dim.range[0]\n end = dim.soft_range[1] if dim.soft_range[1] else dim.range[1]\n dim_range = end - start\n int_type = isinstance(dim.type, type) and issubclass(dim.type, int)\n if isinstance(dim_range, int) or int_type:\n step = 1\n elif dim.step is not None:\n step = dim.step\n else:\n step = 10**((round(math.log10(dim_range))-3))\n if editable:\n label = TextInput(value=str(start), title=dim.pprint_label)\n else:\n label = Div(text='<b>%s</b>' % dim.pprint_value_string(start))\n widget = Slider(value=start, start=start,\n end=end, step=step, title=None)\n else:\n values = (dim.values if dim.values else\n list(unique_array(holomap.dimension_values(dim.name))))\n labels = [dim.pprint_value(v) for v in values]\n if isinstance(values[0], np.datetime64) or isnumeric(values[0]):\n if editable:\n label = AutocompleteInput(value=labels[0], completions=labels,\n title=dim.pprint_label)\n else:\n label = Div(text='<b>%s</b>' % (dim.pprint_value_string(labels[0])))\n widget = Slider(value=0, start=0, end=len(values)-1, title=None, step=1)\n else:\n widget = Select(title=dim.pprint_label, value=values[0],\n options=list(zip(values, labels)))\n mapping = list(zip(values, labels))\n return widget, label, mapping\n\n\n def get_widgets(self):\n \"\"\"\n Creates a set of widgets representing the dimensions on the\n plot object used to instantiate the widgets class.\n \"\"\"\n widgets = OrderedDict()\n mappings = {}\n for dim in self.mock_obj.kdims:\n holomap = None if self.plot.dynamic else self.mock_obj\n widget, label, mapping = self.create_widget(dim, holomap, self.editable)\n if label is not None and not isinstance(label, Div):\n label.on_change('value', partial(self.on_change, dim, 'label'))\n widget.on_change('value', partial(self.on_change, dim, 'widget'))\n widgets[dim.pprint_label] = (label, widget)\n if mapping:\n mappings[dim.pprint_label] = OrderedDict(mapping)\n return widgets, mappings\n\n\n def init_layout(self):\n widgets = [widget for d in self.widgets.values()\n for widget in d if widget]\n wbox = widgetbox(widgets, width=self.width)\n if self.position in ['right', 'below']:\n plots = [self.plot.state, wbox]\n else:\n plots = [wbox, self.plot.state]\n layout_fn = row if self.position in ['left', 'right'] else column\n layout = layout_fn(plots, sizing_mode=self.sizing_mode)\n return layout\n\n\n def attach_callbacks(self):\n \"\"\"\n Attach callbacks to interact with Comms.\n \"\"\"\n pass\n\n\n def on_change(self, dim, widget_type, attr, old, new):\n self._queue.append((dim, widget_type, attr, old, new))\n if self.update not in self.plot.document._session_callbacks:\n self.plot.document.add_timeout_callback(self.update, 50)\n\n\n def update(self):\n \"\"\"\n Handle update events on bokeh server.\n \"\"\"\n if not self._queue:\n return\n dim, widget_type, attr, old, new = self._queue[-1]\n dim_label = dim.pprint_label\n\n label, widget = self.widgets[dim_label]\n if widget_type == 'label':\n if isinstance(label, AutocompleteInput):\n value = [new]\n widget.value = value\n else:\n widget.value = float(new)\n elif label:\n lookups = self.lookups.get(dim_label)\n if not self.editable:\n if lookups:\n new = list(lookups.keys())[widget.value]\n label.text = '<b>%s</b>' % dim.pprint_value_string(new)\n elif isinstance(label, AutocompleteInput):\n text = lookups[new]\n label.value = text\n else:\n label.value = dim.pprint_value(new)\n\n key = []\n for dim, (label, widget) in self.widgets.items():\n lookups = self.lookups.get(dim)\n if label and lookups:\n val = list(lookups.keys())[widget.value]\n else:\n val = widget.value\n key.append(val)\n key = wrap_tuple_streams(tuple(key), self.plot.dimensions,\n self.plot.streams)\n self.plot.update(key)\n\n\n\nclass BokehWidget(NdWidget):\n\n css = param.String(default='bokehwidgets.css', doc=\"\"\"\n Defines the local CSS file to be loaded for this widget.\"\"\")\n\n extensionjs = param.String(default='bokehwidgets.js', doc=\"\"\"\n Optional javascript extension file for a particular backend.\"\"\")\n\n def _get_data(self):\n # Get initial frame to draw immediately\n init_frame = self._plot_figure(0, fig_format='html')\n data = super(BokehWidget, self)._get_data()\n return dict(data, init_frame=init_frame)\n\n def encode_frames(self, frames):\n if self.export_json:\n self.save_json(frames)\n frames = {}\n else:\n frames = json.dumps(frames).replace('</', r'<\\/')\n return frames\n\n def _plot_figure(self, idx, fig_format='json'):\n \"\"\"\n Returns the figure in html format on the\n first call and\n \"\"\"\n self.plot.update(idx)\n if self.embed or fig_format == 'html':\n if fig_format == 'html':\n msg = self.renderer.html(self.plot, fig_format)\n else:\n json_patch = self.renderer.diff(self.plot, serialize=False)\n msg = dict(patch=json_patch, root=self.plot.state._id)\n msg = serialize_json(msg)\n return msg\n\n\nclass BokehSelectionWidget(BokehWidget, SelectionWidget):\n pass\n\n\nclass BokehScrubberWidget(BokehWidget, ScrubberWidget):\n pass\n", "path": "holoviews/plotting/bokeh/widgets.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport math\nimport json\nfrom functools import partial\n\nimport param\nimport numpy as np\nfrom bokeh.models.widgets import Select, Slider, AutocompleteInput, TextInput, Div\nfrom bokeh.layouts import widgetbox, row, column\n\nfrom ...core import Store, NdMapping, OrderedDict\nfrom ...core.util import (drop_streams, unique_array, isnumeric,\n wrap_tuple_streams, unicode)\nfrom ..widgets import NdWidget, SelectionWidget, ScrubberWidget\nfrom .util import serialize_json\n\n\n\nclass BokehServerWidgets(param.Parameterized):\n \"\"\"\n BokehServerWidgets create bokeh widgets corresponding to all the\n key dimensions found on a BokehPlot instance. It currently supports\n to types of widgets sliders (which may be discrete or continuous)\n and dropdown widgets letting you select non-numeric values.\n \"\"\"\n\n display_options = param.Dict(default={}, doc=\"\"\"\n Additional options controlling display options of the widgets.\"\"\")\n\n editable = param.Boolean(default=False, doc=\"\"\"\n Whether the slider text fields should be editable. Disabled\n by default for a more compact widget layout.\"\"\")\n\n position = param.ObjectSelector(default='right',\n objects=['right', 'left', 'above', 'below'])\n\n sizing_mode = param.ObjectSelector(default='fixed',\n objects=['fixed', 'stretch_both', 'scale_width',\n 'scale_height', 'scale_both'])\n\n width = param.Integer(default=250, doc=\"\"\"\n Width of the widget box in pixels\"\"\")\n\n basejs = param.String(default=None, precedence=-1, doc=\"\"\"\n Defines the local CSS file to be loaded for this widget.\"\"\")\n\n extensionjs = param.String(default=None, precedence=-1, doc=\"\"\"\n Optional javascript extension file for a particular backend.\"\"\")\n\n css = param.String(default=None, precedence=-1, doc=\"\"\"\n Defines the local CSS file to be loaded for this widget.\"\"\")\n\n def __init__(self, plot, renderer=None, **params):\n super(BokehServerWidgets, self).__init__(**params)\n self.plot = plot\n streams = []\n for stream in plot.streams:\n if any(k in plot.dimensions for k in stream.contents):\n streams.append(stream)\n self.dimensions, self.keys = drop_streams(streams,\n plot.dimensions,\n plot.keys)\n if renderer is None:\n backend = Store.current_backend\n self.renderer = Store.renderers[backend]\n else:\n self.renderer = renderer\n # Create mock NdMapping to hold the common dimensions and keys\n self.mock_obj = NdMapping([(k, None) for k in self.keys],\n kdims=self.dimensions)\n self.widgets, self.lookups = self.get_widgets()\n self.reverse_lookups = {d: {v: k for k, v in item.items()}\n for d, item in self.lookups.items()}\n self.subplots = {}\n if self.plot.renderer.mode == 'default':\n self.attach_callbacks()\n self.state = self.init_layout()\n self._queue = []\n\n\n @classmethod\n def create_widget(self, dim, holomap=None, editable=False):\n \"\"\"\"\n Given a Dimension creates bokeh widgets to select along that\n dimension. For numeric data a slider widget is created which\n may be either discrete, if a holomap is supplied or the\n Dimension.values are set, or a continuous widget for\n DynamicMaps. If the slider is discrete the returned mapping\n defines a mapping between values and labels making it possible\n sync the two slider and label widgets. For non-numeric data\n a simple dropdown selection widget is generated.\n \"\"\"\n label, mapping = None, None\n if holomap is None:\n if dim.values:\n if all(isnumeric(v) for v in dim.values):\n values = dim.values\n labels = [unicode(dim.pprint_value(v)) for v in dim.values]\n if editable:\n label = AutocompleteInput(value=labels[0], completions=labels,\n title=dim.pprint_label)\n else:\n label = Div(text='<b>%s</b>' % dim.pprint_value_string(labels[0]))\n widget = Slider(value=0, start=0, end=len(dim.values)-1, title=None, step=1)\n mapping = list(zip(values, labels))\n else:\n values = [(v, dim.pprint_value(v)) for v in dim.values]\n widget = Select(title=dim.pprint_label, value=values[0][0],\n options=values)\n else:\n start = dim.soft_range[0] if dim.soft_range[0] else dim.range[0]\n end = dim.soft_range[1] if dim.soft_range[1] else dim.range[1]\n dim_range = end - start\n int_type = isinstance(dim.type, type) and issubclass(dim.type, int)\n if isinstance(dim_range, int) or int_type:\n step = 1\n elif dim.step is not None:\n step = dim.step\n else:\n step = 10**((round(math.log10(dim_range))-3))\n if editable:\n label = TextInput(value=str(start), title=dim.pprint_label)\n else:\n label = Div(text='<b>%s</b>' % dim.pprint_value_string(start))\n widget = Slider(value=start, start=start,\n end=end, step=step, title=None)\n else:\n values = (dim.values if dim.values else\n list(unique_array(holomap.dimension_values(dim.name))))\n labels = [dim.pprint_value(v) for v in values]\n if isinstance(values[0], np.datetime64) or isnumeric(values[0]):\n if editable:\n label = AutocompleteInput(value=labels[0], completions=labels,\n title=dim.pprint_label)\n else:\n label = Div(text='<b>%s</b>' % (dim.pprint_value_string(labels[0])))\n widget = Slider(value=0, start=0, end=len(values)-1, title=None, step=1)\n else:\n widget = Select(title=dim.pprint_label, value=values[0],\n options=list(zip(values, labels)))\n mapping = list(zip(values, labels))\n return widget, label, mapping\n\n\n def get_widgets(self):\n \"\"\"\n Creates a set of widgets representing the dimensions on the\n plot object used to instantiate the widgets class.\n \"\"\"\n widgets = OrderedDict()\n mappings = {}\n for dim in self.mock_obj.kdims:\n holomap = None if self.plot.dynamic else self.mock_obj\n widget, label, mapping = self.create_widget(dim, holomap, self.editable)\n if label is not None and not isinstance(label, Div):\n label.on_change('value', partial(self.on_change, dim, 'label'))\n widget.on_change('value', partial(self.on_change, dim, 'widget'))\n widgets[dim.pprint_label] = (label, widget)\n if mapping:\n mappings[dim.pprint_label] = OrderedDict(mapping)\n return widgets, mappings\n\n\n def init_layout(self):\n widgets = [widget for d in self.widgets.values()\n for widget in d if widget]\n wbox = widgetbox(widgets, width=self.width)\n if self.position in ['right', 'below']:\n plots = [self.plot.state, wbox]\n else:\n plots = [wbox, self.plot.state]\n layout_fn = row if self.position in ['left', 'right'] else column\n layout = layout_fn(plots, sizing_mode=self.sizing_mode)\n return layout\n\n\n def attach_callbacks(self):\n \"\"\"\n Attach callbacks to interact with Comms.\n \"\"\"\n pass\n\n\n def on_change(self, dim, widget_type, attr, old, new):\n self._queue.append((dim, widget_type, attr, old, new))\n if self.update not in self.plot.document._session_callbacks:\n self.plot.document.add_timeout_callback(self.update, 50)\n\n\n def update(self):\n \"\"\"\n Handle update events on bokeh server.\n \"\"\"\n if not self._queue:\n return\n dim, widget_type, attr, old, new = self._queue[-1]\n dim_label = dim.pprint_label\n\n label, widget = self.widgets[dim_label]\n if widget_type == 'label':\n if isinstance(label, AutocompleteInput):\n value = [new]\n widget.value = value\n else:\n widget.value = float(new)\n elif label:\n lookups = self.lookups.get(dim_label)\n if not self.editable:\n if lookups:\n new = list(lookups.keys())[widget.value]\n label.text = '<b>%s</b>' % dim.pprint_value_string(new)\n elif isinstance(label, AutocompleteInput):\n text = lookups[new]\n label.value = text\n else:\n label.value = dim.pprint_value(new)\n\n key = []\n for dim, (label, widget) in self.widgets.items():\n lookups = self.lookups.get(dim)\n if label and lookups:\n val = list(lookups.keys())[widget.value]\n else:\n val = widget.value\n key.append(val)\n key = wrap_tuple_streams(tuple(key), self.plot.dimensions,\n self.plot.streams)\n self.plot.update(key)\n\n\n\nclass BokehWidget(NdWidget):\n\n css = param.String(default='bokehwidgets.css', doc=\"\"\"\n Defines the local CSS file to be loaded for this widget.\"\"\")\n\n extensionjs = param.String(default='bokehwidgets.js', doc=\"\"\"\n Optional javascript extension file for a particular backend.\"\"\")\n\n def _get_data(self):\n # Get initial frame to draw immediately\n init_frame = self._plot_figure(0, fig_format='html')\n data = super(BokehWidget, self)._get_data()\n return dict(data, init_frame=init_frame)\n\n def encode_frames(self, frames):\n if self.export_json:\n self.save_json(frames)\n frames = {}\n else:\n frames = json.dumps(frames).replace('</', r'<\\/')\n return frames\n\n def _plot_figure(self, idx, fig_format='json'):\n \"\"\"\n Returns the figure in html format on the\n first call and\n \"\"\"\n self.plot.update(idx)\n if self.embed or fig_format == 'html':\n if fig_format == 'html':\n msg = self.renderer.html(self.plot, fig_format)\n else:\n json_patch = self.renderer.diff(self.plot, serialize=False)\n msg = dict(patch=json_patch, root=self.plot.state._id)\n msg = serialize_json(msg)\n return msg\n\n\nclass BokehSelectionWidget(BokehWidget, SelectionWidget):\n pass\n\n\nclass BokehScrubberWidget(BokehWidget, ScrubberWidget):\n pass\n", "path": "holoviews/plotting/bokeh/widgets.py"}]} | 3,343 | 150 |
gh_patches_debug_24017 | rasdani/github-patches | git_diff | kubeflow__pipelines-4292 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
kfp component ml_engine deploy version param is incorrectly typed
### What steps did you take:
I tried to specify the `version` param to the gcp ml_engine deploy component via:
```
mlengine_deploy_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/1.0.0/components/gcp/ml_engine/deploy/component.yaml')
. . .
upload_task = mlengine_deploy_op(
model_id=model_id,
project_id=project_id,
python_version=python_version,
runtime_version=runtime_version,
version_id=version_id,
model_uri=model_uri,
model={
'onlinePredictionLogging': True,
'onlinePredictionConsoleLogging': True,
},
version={
'packageUris': package_uris,
'predictionClass': prediction_class,
},
)
```
### What happened:
Received a type error that str object (version) does not support assignment
```
Traceback (most recent call last):
File "/usr/local/lib/python2.7/runpy.py", line 174, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "/usr/local/lib/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/ml/kfp_component/launcher/__main__.py", line 34, in <module>
main()
File "/ml/kfp_component/launcher/__main__.py", line 31, in main
launch(args.file_or_module, args.args)
File "kfp_component/launcher/launcher.py", line 45, in launch
return fire.Fire(module, command=args, name=module.__name__)
File "/usr/local/lib/python2.7/site-packages/fire/core.py", line 127, in Fire
component_trace = _Fire(component, args, context, name)
File "/usr/local/lib/python2.7/site-packages/fire/core.py", line 366, in _Fire
component, remaining_args)
File "/usr/local/lib/python2.7/site-packages/fire/core.py", line 542, in _CallCallable
result = fn(*varargs, **kwargs)
File "kfp_component/google/ml_engine/_deploy.py", line 68, in deploy
wait_interval)
File "kfp_component/google/ml_engine/_create_version.py", line 55, in create_version
version['deploymentUri'] = deployemnt_uri
TypeError: 'str' object does not support item assignment
```
### What did you expect to happen:
I expected to be able to pass additional version parameters as a dictionary as described in https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions and https://github.com/kubeflow/pipelines/blob/master/components/gcp/ml_engine/deploy/component.yaml#L64:L69
However, this type is being declared as a str when it should be a dict: https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_create_version.py#L47
This is ultimately based on the assumption that fire is using the docstring to do type hinting.
### Environment:
How did you deploy Kubeflow Pipelines (KFP)?
Deployed via AI-Platform Pipelines
KFP version:
```
Build commit: 9c16e12
```
KFP SDK version:
```
kfp 0.5.1
kfp-server-api 0.5.0
```
/kind bug
/area sdk
flexible pipeline service (host) path in client SDK
when creating an SDK `Client()` the path to `ml-pipeline` API service is loaded from a hard coded value (`ml-pipeline.kubeflow.svc.cluster.local:8888`) which indicate a specific k8s namespace. it can be valuable to load that default value from an env variable, i.e. changing the line in `_client.py` from:
`config.host = host if host else Client.IN_CLUSTER_DNS_NAME`
to:
`config.host = host or os.environ.get('ML_PIPELINE_DNS_NAME',Client.IN_CLUSTER_DNS_NAME)`
also note that when a user provide the `host` parameter, the ipython output points to the API server and not to the UI service (see the logic in `_get_url_prefix()`), it seems like a potential bug
if its acceptable i can submit a PR for the line change above
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_create_version.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import json
16 import logging
17 import time
18 import re
19
20 from googleapiclient import errors
21 from fire import decorators
22
23 from kfp_component.core import KfpExecutionContext, display
24 from ._client import MLEngineClient
25 from .. import common as gcp_common
26 from ._common_ops import wait_existing_version, wait_for_operation_done
27
28 @decorators.SetParseFns(python_version=str, runtime_version=str)
29 def create_version(model_name, deployemnt_uri=None, version_id=None,
30 runtime_version=None, python_version=None, version=None,
31 replace_existing=False, wait_interval=30):
32 """Creates a MLEngine version and wait for the operation to be done.
33
34 Args:
35 model_name (str): required, the name of the parent model.
36 deployment_uri (str): optional, the Google Cloud Storage location of
37 the trained model used to create the version.
38 version_id (str): optional, the user provided short name of
39 the version. If it is not provided, the operation uses a random name.
40 runtime_version (str): optinal, the Cloud ML Engine runtime version
41 to use for this deployment. If not set, Cloud ML Engine uses
42 the default stable version, 1.0.
43 python_version (str): optinal, the version of Python used in prediction.
44 If not set, the default version is '2.7'. Python '3.5' is available
45 when runtimeVersion is set to '1.4' and above. Python '2.7' works
46 with all supported runtime versions.
47 version (str): optional, the payload of the new version.
48 replace_existing (boolean): boolean flag indicates whether to replace
49 existing version in case of conflict.
50 wait_interval (int): the interval to wait for a long running operation.
51 """
52 if not version:
53 version = {}
54 if deployemnt_uri:
55 version['deploymentUri'] = deployemnt_uri
56 if version_id:
57 version['name'] = version_id
58 if runtime_version:
59 version['runtimeVersion'] = runtime_version
60 if python_version:
61 version['pythonVersion'] = python_version
62
63 return CreateVersionOp(model_name, version,
64 replace_existing, wait_interval).execute_and_wait()
65
66 class CreateVersionOp:
67 def __init__(self, model_name, version,
68 replace_existing, wait_interval):
69 self._ml = MLEngineClient()
70 self._model_name = model_name
71 self._project_id, self._model_id = self._parse_model_name(model_name)
72 # The name of the version resource, which is in the format
73 # of projects/*/models/*/versions/*
74 self._version_name = None
75 # The user provide short name of the version.
76 self._version_id = None
77 # The full payload of the version resource.
78 self._version = version
79 self._replace_existing = replace_existing
80 self._wait_interval = wait_interval
81 self._create_operation_name = None
82 self._delete_operation_name = None
83
84 def execute_and_wait(self):
85 with KfpExecutionContext(on_cancel=self._cancel) as ctx:
86 self._set_version_name(ctx.context_id())
87 self._dump_metadata()
88 existing_version = wait_existing_version(self._ml,
89 self._version_name,
90 self._wait_interval)
91 if existing_version and self._is_dup_version(existing_version):
92 return self._handle_completed_version(existing_version)
93
94 if existing_version and self._replace_existing:
95 logging.info('Deleting existing version...')
96 self._delete_version_and_wait()
97 elif existing_version:
98 raise RuntimeError(
99 'Existing version conflicts with the name of the new version.')
100
101 created_version = self._create_version_and_wait()
102 return self._handle_completed_version(created_version)
103
104 def _parse_model_name(self, model_name):
105 match = re.search(r'^projects/([^/]+)/models/([^/]+)$', model_name)
106 if not match:
107 raise ValueError('model name "{}" is not in desired format.'.format(model_name))
108 return (match.group(1), match.group(2))
109
110 def _set_version_name(self, context_id):
111 name = self._version.get('name', None)
112 if not name:
113 name = 'ver_' + context_id
114 name = gcp_common.normalize_name(name)
115 self._version_id = name
116 self._version['name'] = name
117 self._version_name = '{}/versions/{}'.format(self._model_name, name)
118
119 def _cancel(self):
120 if self._delete_operation_name:
121 self._ml.cancel_operation(self._delete_operation_name)
122
123 if self._create_operation_name:
124 self._ml.cancel_operation(self._create_operation_name)
125
126 def _create_version_and_wait(self):
127 operation = self._ml.create_version(self._model_name, self._version)
128 # Cache operation name for cancellation.
129 self._create_operation_name = operation.get('name')
130 try:
131 operation = wait_for_operation_done(
132 self._ml,
133 self._create_operation_name,
134 'create version',
135 self._wait_interval)
136 finally:
137 self._create_operation_name = None
138 return operation.get('response', None)
139
140 def _delete_version_and_wait(self):
141 operation = self._ml.delete_version(self._version_name)
142 # Cache operation name for cancellation.
143 self._delete_operation_name = operation.get('name')
144 try:
145 wait_for_operation_done(
146 self._ml,
147 self._delete_operation_name,
148 'delete version',
149 self._wait_interval)
150 finally:
151 self._delete_operation_name = None
152
153 def _handle_completed_version(self, version):
154 state = version.get('state', None)
155 if state == 'FAILED':
156 error_message = version.get('errorMessage', 'Unknown failure')
157 raise RuntimeError('Version is in failed state: {}'.format(
158 error_message))
159 # Workaround issue that CMLE doesn't return the full version name.
160 version['name'] = self._version_name
161 self._dump_version(version)
162 return version
163
164 def _dump_metadata(self):
165 display.display(display.Link(
166 'https://console.cloud.google.com/mlengine/models/{}/versions/{}?project={}'.format(
167 self._model_id, self._version_id, self._project_id),
168 'Version Details'
169 ))
170 display.display(display.Markdown('''
171 ## Online Prediction
172
173 ### REST endpoint
174 The REST endpoint for online prediction is as follows:
175 ```
176 POST https://ml.googleapis.com/v1/{}:predict
177 ```
178 Try the REST endpoint in [Google OAuth 2.0 Playgound](https://developers.google.com/oauthplayground/#step3\
179 &apisSelect=https://www.googleapis.com/auth/cloud-platform&postData={{"instances":[]}}\
180 &url=https://ml.googleapis.com/v1/{}:predict&content_type=application/json&http_method=POST).
181
182 ### GCloud command
183 ```bash
184 gcloud ai-platform predict --model {} \
185 --version {} \
186 --json-instances instances.json
187 ```
188 '''.format(self._version_name, self._version_name, self._model_id, self._version_id)))
189
190 def _dump_version(self, version):
191 logging.info('Dumping version: {}'.format(version))
192 gcp_common.dump_file('/tmp/kfp/output/ml_engine/version.json', json.dumps(version))
193 gcp_common.dump_file('/tmp/kfp/output/ml_engine/version_name.txt', version['name'])
194
195 def _is_dup_version(self, existing_version):
196 return not gcp_common.check_resource_changed(
197 self._version,
198 existing_version,
199 ['description', 'deploymentUri',
200 'runtimeVersion', 'machineType', 'labels',
201 'framework', 'pythonVersion', 'autoScaling',
202 'manualScaling'])
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_create_version.py b/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_create_version.py
--- a/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_create_version.py
+++ b/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_create_version.py
@@ -44,7 +44,7 @@
If not set, the default version is '2.7'. Python '3.5' is available
when runtimeVersion is set to '1.4' and above. Python '2.7' works
with all supported runtime versions.
- version (str): optional, the payload of the new version.
+ version (dict): optional, the payload of the new version.
replace_existing (boolean): boolean flag indicates whether to replace
existing version in case of conflict.
wait_interval (int): the interval to wait for a long running operation.
@@ -199,4 +199,4 @@
['description', 'deploymentUri',
'runtimeVersion', 'machineType', 'labels',
'framework', 'pythonVersion', 'autoScaling',
- 'manualScaling'])
\ No newline at end of file
+ 'manualScaling'])
| {"golden_diff": "diff --git a/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_create_version.py b/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_create_version.py\n--- a/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_create_version.py\n+++ b/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_create_version.py\n@@ -44,7 +44,7 @@\n If not set, the default version is '2.7'. Python '3.5' is available\n when runtimeVersion is set to '1.4' and above. Python '2.7' works \n with all supported runtime versions.\n- version (str): optional, the payload of the new version.\n+ version (dict): optional, the payload of the new version.\n replace_existing (boolean): boolean flag indicates whether to replace \n existing version in case of conflict.\n wait_interval (int): the interval to wait for a long running operation.\n@@ -199,4 +199,4 @@\n ['description', 'deploymentUri', \n 'runtimeVersion', 'machineType', 'labels',\n 'framework', 'pythonVersion', 'autoScaling',\n- 'manualScaling'])\n\\ No newline at end of file\n+ 'manualScaling'])\n", "issue": "kfp component ml_engine deploy version param is incorrectly typed\n### What steps did you take:\r\n\r\nI tried to specify the `version` param to the gcp ml_engine deploy component via:\r\n\r\n```\r\n mlengine_deploy_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/1.0.0/components/gcp/ml_engine/deploy/component.yaml')\r\n\r\n. . .\r\n\r\n upload_task = mlengine_deploy_op(\r\n model_id=model_id,\r\n project_id=project_id,\r\n python_version=python_version,\r\n runtime_version=runtime_version,\r\n version_id=version_id,\r\n model_uri=model_uri,\r\n model={\r\n 'onlinePredictionLogging': True,\r\n 'onlinePredictionConsoleLogging': True,\r\n },\r\n version={\r\n 'packageUris': package_uris,\r\n 'predictionClass': prediction_class,\r\n },\r\n)\r\n```\r\n\r\n### What happened:\r\n\r\nReceived a type error that str object (version) does not support assignment\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python2.7/runpy.py\", line 174, in _run_module_as_main\r\n \"__main__\", fname, loader, pkg_name)\r\n File \"/usr/local/lib/python2.7/runpy.py\", line 72, in _run_code\r\n exec code in run_globals\r\n File \"/ml/kfp_component/launcher/__main__.py\", line 34, in <module>\r\n main()\r\n File \"/ml/kfp_component/launcher/__main__.py\", line 31, in main\r\n launch(args.file_or_module, args.args)\r\n File \"kfp_component/launcher/launcher.py\", line 45, in launch\r\n return fire.Fire(module, command=args, name=module.__name__)\r\n File \"/usr/local/lib/python2.7/site-packages/fire/core.py\", line 127, in Fire\r\n component_trace = _Fire(component, args, context, name)\r\n File \"/usr/local/lib/python2.7/site-packages/fire/core.py\", line 366, in _Fire\r\n component, remaining_args)\r\n File \"/usr/local/lib/python2.7/site-packages/fire/core.py\", line 542, in _CallCallable\r\n result = fn(*varargs, **kwargs)\r\n File \"kfp_component/google/ml_engine/_deploy.py\", line 68, in deploy\r\n wait_interval)\r\n File \"kfp_component/google/ml_engine/_create_version.py\", line 55, in create_version\r\n version['deploymentUri'] = deployemnt_uri\r\nTypeError: 'str' object does not support item assignment\r\n```\r\n\r\n### What did you expect to happen:\r\n\r\nI expected to be able to pass additional version parameters as a dictionary as described in https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions and https://github.com/kubeflow/pipelines/blob/master/components/gcp/ml_engine/deploy/component.yaml#L64:L69\r\n\r\nHowever, this type is being declared as a str when it should be a dict: https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_create_version.py#L47\r\n\r\nThis is ultimately based on the assumption that fire is using the docstring to do type hinting.\r\n\r\n### Environment:\r\nHow did you deploy Kubeflow Pipelines (KFP)?\r\nDeployed via AI-Platform Pipelines\r\n\r\nKFP version:\r\n```\r\nBuild commit: 9c16e12\r\n``` \r\n\r\nKFP SDK version: \r\n```\r\nkfp 0.5.1\r\nkfp-server-api 0.5.0\r\n```\r\n\r\n/kind bug\r\n/area sdk\nflexible pipeline service (host) path in client SDK \nwhen creating an SDK `Client()` the path to `ml-pipeline` API service is loaded from a hard coded value (`ml-pipeline.kubeflow.svc.cluster.local:8888`) which indicate a specific k8s namespace. it can be valuable to load that default value from an env variable, i.e. changing the line in `_client.py` from:\r\n\r\n`config.host = host if host else Client.IN_CLUSTER_DNS_NAME`\r\n\r\nto:\r\n\r\n`config.host = host or os.environ.get('ML_PIPELINE_DNS_NAME',Client.IN_CLUSTER_DNS_NAME)`\r\n\r\nalso note that when a user provide the `host` parameter, the ipython output points to the API server and not to the UI service (see the logic in `_get_url_prefix()`), it seems like a potential bug\r\n\r\nif its acceptable i can submit a PR for the line change above\r\n \n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport logging\nimport time\nimport re\n\nfrom googleapiclient import errors\nfrom fire import decorators\n\nfrom kfp_component.core import KfpExecutionContext, display\nfrom ._client import MLEngineClient\nfrom .. import common as gcp_common\nfrom ._common_ops import wait_existing_version, wait_for_operation_done\n\n@decorators.SetParseFns(python_version=str, runtime_version=str)\ndef create_version(model_name, deployemnt_uri=None, version_id=None, \n runtime_version=None, python_version=None, version=None, \n replace_existing=False, wait_interval=30):\n \"\"\"Creates a MLEngine version and wait for the operation to be done.\n\n Args:\n model_name (str): required, the name of the parent model.\n deployment_uri (str): optional, the Google Cloud Storage location of \n the trained model used to create the version.\n version_id (str): optional, the user provided short name of \n the version. If it is not provided, the operation uses a random name.\n runtime_version (str): optinal, the Cloud ML Engine runtime version \n to use for this deployment. If not set, Cloud ML Engine uses \n the default stable version, 1.0. \n python_version (str): optinal, the version of Python used in prediction. \n If not set, the default version is '2.7'. Python '3.5' is available\n when runtimeVersion is set to '1.4' and above. Python '2.7' works \n with all supported runtime versions.\n version (str): optional, the payload of the new version.\n replace_existing (boolean): boolean flag indicates whether to replace \n existing version in case of conflict.\n wait_interval (int): the interval to wait for a long running operation.\n \"\"\"\n if not version:\n version = {}\n if deployemnt_uri:\n version['deploymentUri'] = deployemnt_uri\n if version_id:\n version['name'] = version_id\n if runtime_version:\n version['runtimeVersion'] = runtime_version\n if python_version:\n version['pythonVersion'] = python_version\n\n return CreateVersionOp(model_name, version, \n replace_existing, wait_interval).execute_and_wait()\n\nclass CreateVersionOp:\n def __init__(self, model_name, version, \n replace_existing, wait_interval):\n self._ml = MLEngineClient()\n self._model_name = model_name\n self._project_id, self._model_id = self._parse_model_name(model_name)\n # The name of the version resource, which is in the format \n # of projects/*/models/*/versions/*\n self._version_name = None\n # The user provide short name of the version.\n self._version_id = None\n # The full payload of the version resource.\n self._version = version\n self._replace_existing = replace_existing\n self._wait_interval = wait_interval\n self._create_operation_name = None\n self._delete_operation_name = None\n\n def execute_and_wait(self):\n with KfpExecutionContext(on_cancel=self._cancel) as ctx:\n self._set_version_name(ctx.context_id())\n self._dump_metadata()\n existing_version = wait_existing_version(self._ml, \n self._version_name, \n self._wait_interval)\n if existing_version and self._is_dup_version(existing_version):\n return self._handle_completed_version(existing_version)\n\n if existing_version and self._replace_existing:\n logging.info('Deleting existing version...')\n self._delete_version_and_wait()\n elif existing_version:\n raise RuntimeError(\n 'Existing version conflicts with the name of the new version.')\n \n created_version = self._create_version_and_wait()\n return self._handle_completed_version(created_version)\n \n def _parse_model_name(self, model_name):\n match = re.search(r'^projects/([^/]+)/models/([^/]+)$', model_name)\n if not match:\n raise ValueError('model name \"{}\" is not in desired format.'.format(model_name))\n return (match.group(1), match.group(2))\n\n def _set_version_name(self, context_id):\n name = self._version.get('name', None)\n if not name:\n name = 'ver_' + context_id\n name = gcp_common.normalize_name(name)\n self._version_id = name\n self._version['name'] = name\n self._version_name = '{}/versions/{}'.format(self._model_name, name)\n\n def _cancel(self):\n if self._delete_operation_name:\n self._ml.cancel_operation(self._delete_operation_name)\n\n if self._create_operation_name:\n self._ml.cancel_operation(self._create_operation_name)\n\n def _create_version_and_wait(self):\n operation = self._ml.create_version(self._model_name, self._version)\n # Cache operation name for cancellation.\n self._create_operation_name = operation.get('name')\n try:\n operation = wait_for_operation_done(\n self._ml,\n self._create_operation_name, \n 'create version',\n self._wait_interval)\n finally:\n self._create_operation_name = None\n return operation.get('response', None)\n\n def _delete_version_and_wait(self):\n operation = self._ml.delete_version(self._version_name)\n # Cache operation name for cancellation.\n self._delete_operation_name = operation.get('name')\n try:\n wait_for_operation_done(\n self._ml,\n self._delete_operation_name, \n 'delete version',\n self._wait_interval)\n finally:\n self._delete_operation_name = None\n \n def _handle_completed_version(self, version):\n state = version.get('state', None)\n if state == 'FAILED':\n error_message = version.get('errorMessage', 'Unknown failure')\n raise RuntimeError('Version is in failed state: {}'.format(\n error_message))\n # Workaround issue that CMLE doesn't return the full version name.\n version['name'] = self._version_name\n self._dump_version(version)\n return version\n\n def _dump_metadata(self):\n display.display(display.Link(\n 'https://console.cloud.google.com/mlengine/models/{}/versions/{}?project={}'.format(\n self._model_id, self._version_id, self._project_id),\n 'Version Details'\n ))\n display.display(display.Markdown('''\n## Online Prediction\n\n### REST endpoint\nThe REST endpoint for online prediction is as follows:\n```\nPOST https://ml.googleapis.com/v1/{}:predict\n```\nTry the REST endpoint in [Google OAuth 2.0 Playgound](https://developers.google.com/oauthplayground/#step3\\\n&apisSelect=https://www.googleapis.com/auth/cloud-platform&postData={{\"instances\":[]}}\\\n&url=https://ml.googleapis.com/v1/{}:predict&content_type=application/json&http_method=POST).\n\n### GCloud command\n```bash\ngcloud ai-platform predict --model {} \\\n --version {} \\\n --json-instances instances.json\n```\n '''.format(self._version_name, self._version_name, self._model_id, self._version_id)))\n\n def _dump_version(self, version):\n logging.info('Dumping version: {}'.format(version))\n gcp_common.dump_file('/tmp/kfp/output/ml_engine/version.json', json.dumps(version))\n gcp_common.dump_file('/tmp/kfp/output/ml_engine/version_name.txt', version['name'])\n\n def _is_dup_version(self, existing_version):\n return not gcp_common.check_resource_changed(\n self._version,\n existing_version,\n ['description', 'deploymentUri', \n 'runtimeVersion', 'machineType', 'labels',\n 'framework', 'pythonVersion', 'autoScaling',\n 'manualScaling'])", "path": "components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_create_version.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport logging\nimport time\nimport re\n\nfrom googleapiclient import errors\nfrom fire import decorators\n\nfrom kfp_component.core import KfpExecutionContext, display\nfrom ._client import MLEngineClient\nfrom .. import common as gcp_common\nfrom ._common_ops import wait_existing_version, wait_for_operation_done\n\n@decorators.SetParseFns(python_version=str, runtime_version=str)\ndef create_version(model_name, deployemnt_uri=None, version_id=None, \n runtime_version=None, python_version=None, version=None, \n replace_existing=False, wait_interval=30):\n \"\"\"Creates a MLEngine version and wait for the operation to be done.\n\n Args:\n model_name (str): required, the name of the parent model.\n deployment_uri (str): optional, the Google Cloud Storage location of \n the trained model used to create the version.\n version_id (str): optional, the user provided short name of \n the version. If it is not provided, the operation uses a random name.\n runtime_version (str): optinal, the Cloud ML Engine runtime version \n to use for this deployment. If not set, Cloud ML Engine uses \n the default stable version, 1.0. \n python_version (str): optinal, the version of Python used in prediction. \n If not set, the default version is '2.7'. Python '3.5' is available\n when runtimeVersion is set to '1.4' and above. Python '2.7' works \n with all supported runtime versions.\n version (dict): optional, the payload of the new version.\n replace_existing (boolean): boolean flag indicates whether to replace \n existing version in case of conflict.\n wait_interval (int): the interval to wait for a long running operation.\n \"\"\"\n if not version:\n version = {}\n if deployemnt_uri:\n version['deploymentUri'] = deployemnt_uri\n if version_id:\n version['name'] = version_id\n if runtime_version:\n version['runtimeVersion'] = runtime_version\n if python_version:\n version['pythonVersion'] = python_version\n\n return CreateVersionOp(model_name, version, \n replace_existing, wait_interval).execute_and_wait()\n\nclass CreateVersionOp:\n def __init__(self, model_name, version, \n replace_existing, wait_interval):\n self._ml = MLEngineClient()\n self._model_name = model_name\n self._project_id, self._model_id = self._parse_model_name(model_name)\n # The name of the version resource, which is in the format \n # of projects/*/models/*/versions/*\n self._version_name = None\n # The user provide short name of the version.\n self._version_id = None\n # The full payload of the version resource.\n self._version = version\n self._replace_existing = replace_existing\n self._wait_interval = wait_interval\n self._create_operation_name = None\n self._delete_operation_name = None\n\n def execute_and_wait(self):\n with KfpExecutionContext(on_cancel=self._cancel) as ctx:\n self._set_version_name(ctx.context_id())\n self._dump_metadata()\n existing_version = wait_existing_version(self._ml, \n self._version_name, \n self._wait_interval)\n if existing_version and self._is_dup_version(existing_version):\n return self._handle_completed_version(existing_version)\n\n if existing_version and self._replace_existing:\n logging.info('Deleting existing version...')\n self._delete_version_and_wait()\n elif existing_version:\n raise RuntimeError(\n 'Existing version conflicts with the name of the new version.')\n \n created_version = self._create_version_and_wait()\n return self._handle_completed_version(created_version)\n \n def _parse_model_name(self, model_name):\n match = re.search(r'^projects/([^/]+)/models/([^/]+)$', model_name)\n if not match:\n raise ValueError('model name \"{}\" is not in desired format.'.format(model_name))\n return (match.group(1), match.group(2))\n\n def _set_version_name(self, context_id):\n name = self._version.get('name', None)\n if not name:\n name = 'ver_' + context_id\n name = gcp_common.normalize_name(name)\n self._version_id = name\n self._version['name'] = name\n self._version_name = '{}/versions/{}'.format(self._model_name, name)\n\n def _cancel(self):\n if self._delete_operation_name:\n self._ml.cancel_operation(self._delete_operation_name)\n\n if self._create_operation_name:\n self._ml.cancel_operation(self._create_operation_name)\n\n def _create_version_and_wait(self):\n operation = self._ml.create_version(self._model_name, self._version)\n # Cache operation name for cancellation.\n self._create_operation_name = operation.get('name')\n try:\n operation = wait_for_operation_done(\n self._ml,\n self._create_operation_name, \n 'create version',\n self._wait_interval)\n finally:\n self._create_operation_name = None\n return operation.get('response', None)\n\n def _delete_version_and_wait(self):\n operation = self._ml.delete_version(self._version_name)\n # Cache operation name for cancellation.\n self._delete_operation_name = operation.get('name')\n try:\n wait_for_operation_done(\n self._ml,\n self._delete_operation_name, \n 'delete version',\n self._wait_interval)\n finally:\n self._delete_operation_name = None\n \n def _handle_completed_version(self, version):\n state = version.get('state', None)\n if state == 'FAILED':\n error_message = version.get('errorMessage', 'Unknown failure')\n raise RuntimeError('Version is in failed state: {}'.format(\n error_message))\n # Workaround issue that CMLE doesn't return the full version name.\n version['name'] = self._version_name\n self._dump_version(version)\n return version\n\n def _dump_metadata(self):\n display.display(display.Link(\n 'https://console.cloud.google.com/mlengine/models/{}/versions/{}?project={}'.format(\n self._model_id, self._version_id, self._project_id),\n 'Version Details'\n ))\n display.display(display.Markdown('''\n## Online Prediction\n\n### REST endpoint\nThe REST endpoint for online prediction is as follows:\n```\nPOST https://ml.googleapis.com/v1/{}:predict\n```\nTry the REST endpoint in [Google OAuth 2.0 Playgound](https://developers.google.com/oauthplayground/#step3\\\n&apisSelect=https://www.googleapis.com/auth/cloud-platform&postData={{\"instances\":[]}}\\\n&url=https://ml.googleapis.com/v1/{}:predict&content_type=application/json&http_method=POST).\n\n### GCloud command\n```bash\ngcloud ai-platform predict --model {} \\\n --version {} \\\n --json-instances instances.json\n```\n '''.format(self._version_name, self._version_name, self._model_id, self._version_id)))\n\n def _dump_version(self, version):\n logging.info('Dumping version: {}'.format(version))\n gcp_common.dump_file('/tmp/kfp/output/ml_engine/version.json', json.dumps(version))\n gcp_common.dump_file('/tmp/kfp/output/ml_engine/version_name.txt', version['name'])\n\n def _is_dup_version(self, existing_version):\n return not gcp_common.check_resource_changed(\n self._version,\n existing_version,\n ['description', 'deploymentUri', \n 'runtimeVersion', 'machineType', 'labels',\n 'framework', 'pythonVersion', 'autoScaling',\n 'manualScaling'])\n", "path": "components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_create_version.py"}]} | 3,536 | 279 |
gh_patches_debug_23723 | rasdani/github-patches | git_diff | qutebrowser__qutebrowser-3468 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Doing :-w open crashes
When typing `:-w open ` (with trailing space):
```
17:56:49 DEBUG completion completer:_partition:141 partitioning ['-w', ' open', ' '] around position 8
17:56:49 DEBUG completion completer:_partition:153 partitioned: ['-w', 'open'] '' []
17:56:49 DEBUG completion completer:_update_completion:236 Updating completion: ['-w', 'open'] []
17:56:49 DEBUG completion completer:_get_new_completion:89 Before removing flags: ['-w', 'open']
17:56:49 DEBUG completion completer:_get_new_completion:91 After removing flags: ['open']
17:56:49 DEBUG completion debug:__exit__:264 Starting url completion took 6.5e-05 seconds.
17:56:49 ERROR misc crashsignal:exception_hook:216 Uncaught exception
Traceback (most recent call last):
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/completer.py", line 255, in _update_completion
model = func(*args, info=info)
TypeError: url() takes 0 positional arguments but 1 positional argument (and 1 keyword-only argument) were given
```
cc @rcorre
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qutebrowser/completion/completer.py`
Content:
```
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2014-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """Completer attached to a CompletionView."""
21
22 import attr
23 from PyQt5.QtCore import pyqtSlot, QObject, QTimer
24
25 from qutebrowser.config import config
26 from qutebrowser.commands import cmdutils, runners
27 from qutebrowser.utils import log, utils, debug
28 from qutebrowser.completion.models import miscmodels
29
30
31 @attr.s
32 class CompletionInfo:
33
34 """Context passed into all completion functions."""
35
36 config = attr.ib()
37 keyconf = attr.ib()
38 win_id = attr.ib()
39
40
41 class Completer(QObject):
42
43 """Completer which manages completions in a CompletionView.
44
45 Attributes:
46 _cmd: The statusbar Command object this completer belongs to.
47 _win_id: The id of the window that owns this object.
48 _timer: The timer used to trigger the completion update.
49 _last_cursor_pos: The old cursor position so we avoid double completion
50 updates.
51 _last_text: The old command text so we avoid double completion updates.
52 _last_completion_func: The completion function used for the last text.
53 """
54
55 def __init__(self, *, cmd, win_id, parent=None):
56 super().__init__(parent)
57 self._cmd = cmd
58 self._win_id = win_id
59 self._timer = QTimer()
60 self._timer.setSingleShot(True)
61 self._timer.setInterval(0)
62 self._timer.timeout.connect(self._update_completion)
63 self._last_cursor_pos = None
64 self._last_text = None
65 self._last_completion_func = None
66 self._cmd.update_completion.connect(self.schedule_completion_update)
67
68 def __repr__(self):
69 return utils.get_repr(self)
70
71 def _model(self):
72 """Convenience method to get the current completion model."""
73 completion = self.parent()
74 return completion.model()
75
76 def _get_new_completion(self, before_cursor, under_cursor):
77 """Get the completion function based on the current command text.
78
79 Args:
80 before_cursor: The command chunks before the cursor.
81 under_cursor: The command chunk under the cursor.
82
83 Return:
84 A completion model.
85 """
86 if '--' in before_cursor or under_cursor.startswith('-'):
87 # cursor on a flag or after an explicit split (--)
88 return None
89 log.completion.debug("Before removing flags: {}".format(before_cursor))
90 before_cursor = [x for x in before_cursor if not x.startswith('-')]
91 log.completion.debug("After removing flags: {}".format(before_cursor))
92 if not before_cursor:
93 # '|' or 'set|'
94 log.completion.debug('Starting command completion')
95 return miscmodels.command
96 try:
97 cmd = cmdutils.cmd_dict[before_cursor[0]]
98 except KeyError:
99 log.completion.debug("No completion for unknown command: {}"
100 .format(before_cursor[0]))
101 return None
102 argpos = len(before_cursor) - 1
103 try:
104 func = cmd.get_pos_arg_info(argpos).completion
105 except IndexError:
106 log.completion.debug("No completion in position {}".format(argpos))
107 return None
108 return func
109
110 def _quote(self, s):
111 """Quote s if it needs quoting for the commandline.
112
113 Note we don't use shlex.quote because that quotes a lot of shell
114 metachars we don't need to have quoted.
115 """
116 if not s:
117 return "''"
118 elif any(c in s for c in ' "\'\t\n\\'):
119 # use single quotes, and put single quotes into double quotes
120 # the string $'b is then quoted as '$'"'"'b'
121 return "'" + s.replace("'", "'\"'\"'") + "'"
122 else:
123 return s
124
125 def _partition(self):
126 """Divide the commandline text into chunks around the cursor position.
127
128 Return:
129 ([parts_before_cursor], 'part_under_cursor', [parts_after_cursor])
130 """
131 text = self._cmd.text()[len(self._cmd.prefix()):]
132 if not text or not text.strip():
133 # Only ":", empty part under the cursor with nothing before/after
134 return [], '', []
135 parser = runners.CommandParser()
136 result = parser.parse(text, fallback=True, keep=True)
137 parts = [x for x in result.cmdline if x]
138 pos = self._cmd.cursorPosition() - len(self._cmd.prefix())
139 pos = min(pos, len(text)) # Qt treats 2-byte UTF-16 chars as 2 chars
140 log.completion.debug('partitioning {} around position {}'.format(parts,
141 pos))
142 for i, part in enumerate(parts):
143 pos -= len(part)
144 if pos <= 0:
145 if part[pos-1:pos+1].isspace():
146 # cursor is in a space between two existing words
147 parts.insert(i, '')
148 prefix = [x.strip() for x in parts[:i]]
149 center = parts[i].strip()
150 # strip trailing whitepsace included as a separate token
151 postfix = [x.strip() for x in parts[i+1:] if not x.isspace()]
152 log.completion.debug(
153 "partitioned: {} '{}' {}".format(prefix, center, postfix))
154 return prefix, center, postfix
155
156 raise utils.Unreachable("Not all parts consumed: {}".format(parts))
157
158 @pyqtSlot(str)
159 def on_selection_changed(self, text):
160 """Change the completed part if a new item was selected.
161
162 Called from the views selectionChanged method.
163
164 Args:
165 text: Newly selected text.
166 """
167 if text is None:
168 return
169 before, center, after = self._partition()
170 log.completion.debug("Changing {} to '{}'".format(center, text))
171 try:
172 maxsplit = cmdutils.cmd_dict[before[0]].maxsplit
173 except (KeyError, IndexError):
174 maxsplit = None
175 if maxsplit is None:
176 text = self._quote(text)
177 model = self._model()
178 if model.count() == 1 and config.val.completion.quick:
179 # If we only have one item, we want to apply it immediately and go
180 # on to the next part, unless we are quick-completing the part
181 # after maxsplit, so that we don't keep offering completions
182 # (see issue #1519)
183 if maxsplit is not None and maxsplit < len(before):
184 self._change_completed_part(text, before, after)
185 else:
186 self._change_completed_part(text, before, after,
187 immediate=True)
188 else:
189 self._change_completed_part(text, before, after)
190
191 @pyqtSlot()
192 def schedule_completion_update(self):
193 """Schedule updating/enabling completion.
194
195 For performance reasons we don't want to block here, instead we do this
196 in the background.
197
198 We delay the update only if we've already input some text and ignore
199 updates if the text is shorter than completion.min_chars (unless we're
200 hitting backspace in which case updates won't be ignored).
201 """
202 _cmd, _sep, rest = self._cmd.text().partition(' ')
203 input_length = len(rest)
204 if (0 < input_length < config.val.completion.min_chars and
205 self._cmd.cursorPosition() > self._last_cursor_pos):
206 log.completion.debug("Ignoring update because the length of "
207 "the text is less than completion.min_chars.")
208 elif (self._cmd.cursorPosition() == self._last_cursor_pos and
209 self._cmd.text() == self._last_text):
210 log.completion.debug("Ignoring update because there were no "
211 "changes.")
212 else:
213 log.completion.debug("Scheduling completion update.")
214 start_delay = config.val.completion.delay if self._last_text else 0
215 self._timer.start(start_delay)
216 self._last_cursor_pos = self._cmd.cursorPosition()
217 self._last_text = self._cmd.text()
218
219 @pyqtSlot()
220 def _update_completion(self):
221 """Check if completions are available and activate them."""
222 completion = self.parent()
223
224 if self._cmd.prefix() != ':':
225 # This is a search or gibberish, so we don't need to complete
226 # anything (yet)
227 # FIXME complete searches
228 # https://github.com/qutebrowser/qutebrowser/issues/32
229 completion.set_model(None)
230 self._last_completion_func = None
231 return
232
233 before_cursor, pattern, after_cursor = self._partition()
234
235 log.completion.debug("Updating completion: {} {} {}".format(
236 before_cursor, pattern, after_cursor))
237
238 pattern = pattern.strip("'\"")
239 func = self._get_new_completion(before_cursor, pattern)
240
241 if func is None:
242 log.completion.debug('Clearing completion')
243 completion.set_model(None)
244 self._last_completion_func = None
245 return
246
247 if func != self._last_completion_func:
248 self._last_completion_func = func
249 args = (x for x in before_cursor[1:] if not x.startswith('-'))
250 with debug.log_time(log.completion, 'Starting {} completion'
251 .format(func.__name__)):
252 info = CompletionInfo(config=config.instance,
253 keyconf=config.key_instance,
254 win_id=self._win_id)
255 model = func(*args, info=info)
256 with debug.log_time(log.completion, 'Set completion model'):
257 completion.set_model(model)
258
259 completion.set_pattern(pattern)
260
261 def _change_completed_part(self, newtext, before, after, immediate=False):
262 """Change the part we're currently completing in the commandline.
263
264 Args:
265 text: The text to set (string) for the token under the cursor.
266 before: Commandline tokens before the token under the cursor.
267 after: Commandline tokens after the token under the cursor.
268 immediate: True if the text should be completed immediately
269 including a trailing space and we shouldn't continue
270 completing the current item.
271 """
272 text = self._cmd.prefix() + ' '.join(before + [newtext])
273 pos = len(text) + (1 if immediate else 0)
274 if after:
275 text += ' ' + ' '.join(after)
276 elif immediate:
277 # pad with a space if quick-completing the last entry
278 text += ' '
279 log.completion.debug("setting text = '{}', pos = {}".format(text, pos))
280
281 # generally, we don't want to let self._cmd emit cursorPositionChanged,
282 # because that'll schedule a completion update. That happens when
283 # tabbing through the completions, and we want to change the command
284 # text but we also want to keep the original completion list for the
285 # command the user manually entered. The exception is when we're
286 # immediately completing, in which case we *do* want to update the
287 # completion view so that we can start completing the next part
288 if not immediate:
289 self._cmd.blockSignals(True)
290
291 self._cmd.setText(text)
292 self._cmd.setCursorPosition(pos)
293 self._cmd.setFocus()
294
295 self._cmd.blockSignals(False)
296 self._cmd.show_cmd.emit()
297
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qutebrowser/completion/completer.py b/qutebrowser/completion/completer.py
--- a/qutebrowser/completion/completer.py
+++ b/qutebrowser/completion/completer.py
@@ -87,8 +87,6 @@
# cursor on a flag or after an explicit split (--)
return None
log.completion.debug("Before removing flags: {}".format(before_cursor))
- before_cursor = [x for x in before_cursor if not x.startswith('-')]
- log.completion.debug("After removing flags: {}".format(before_cursor))
if not before_cursor:
# '|' or 'set|'
log.completion.debug('Starting command completion')
@@ -99,6 +97,9 @@
log.completion.debug("No completion for unknown command: {}"
.format(before_cursor[0]))
return None
+
+ before_cursor = [x for x in before_cursor if not x.startswith('-')]
+ log.completion.debug("After removing flags: {}".format(before_cursor))
argpos = len(before_cursor) - 1
try:
func = cmd.get_pos_arg_info(argpos).completion
| {"golden_diff": "diff --git a/qutebrowser/completion/completer.py b/qutebrowser/completion/completer.py\n--- a/qutebrowser/completion/completer.py\n+++ b/qutebrowser/completion/completer.py\n@@ -87,8 +87,6 @@\n # cursor on a flag or after an explicit split (--)\n return None\n log.completion.debug(\"Before removing flags: {}\".format(before_cursor))\n- before_cursor = [x for x in before_cursor if not x.startswith('-')]\n- log.completion.debug(\"After removing flags: {}\".format(before_cursor))\n if not before_cursor:\n # '|' or 'set|'\n log.completion.debug('Starting command completion')\n@@ -99,6 +97,9 @@\n log.completion.debug(\"No completion for unknown command: {}\"\n .format(before_cursor[0]))\n return None\n+\n+ before_cursor = [x for x in before_cursor if not x.startswith('-')]\n+ log.completion.debug(\"After removing flags: {}\".format(before_cursor))\n argpos = len(before_cursor) - 1\n try:\n func = cmd.get_pos_arg_info(argpos).completion\n", "issue": "Doing :-w open crashes\nWhen typing `:-w open ` (with trailing space):\r\n\r\n```\r\n17:56:49 DEBUG completion completer:_partition:141 partitioning ['-w', ' open', ' '] around position 8\r\n17:56:49 DEBUG completion completer:_partition:153 partitioned: ['-w', 'open'] '' []\r\n17:56:49 DEBUG completion completer:_update_completion:236 Updating completion: ['-w', 'open'] []\r\n17:56:49 DEBUG completion completer:_get_new_completion:89 Before removing flags: ['-w', 'open']\r\n17:56:49 DEBUG completion completer:_get_new_completion:91 After removing flags: ['open']\r\n17:56:49 DEBUG completion debug:__exit__:264 Starting url completion took 6.5e-05 seconds.\r\n17:56:49 ERROR misc crashsignal:exception_hook:216 Uncaught exception\r\nTraceback (most recent call last):\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/completer.py\", line 255, in _update_completion\r\n model = func(*args, info=info)\r\nTypeError: url() takes 0 positional arguments but 1 positional argument (and 1 keyword-only argument) were given\r\n```\r\n\r\ncc @rcorre \n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Completer attached to a CompletionView.\"\"\"\n\nimport attr\nfrom PyQt5.QtCore import pyqtSlot, QObject, QTimer\n\nfrom qutebrowser.config import config\nfrom qutebrowser.commands import cmdutils, runners\nfrom qutebrowser.utils import log, utils, debug\nfrom qutebrowser.completion.models import miscmodels\n\n\n@attr.s\nclass CompletionInfo:\n\n \"\"\"Context passed into all completion functions.\"\"\"\n\n config = attr.ib()\n keyconf = attr.ib()\n win_id = attr.ib()\n\n\nclass Completer(QObject):\n\n \"\"\"Completer which manages completions in a CompletionView.\n\n Attributes:\n _cmd: The statusbar Command object this completer belongs to.\n _win_id: The id of the window that owns this object.\n _timer: The timer used to trigger the completion update.\n _last_cursor_pos: The old cursor position so we avoid double completion\n updates.\n _last_text: The old command text so we avoid double completion updates.\n _last_completion_func: The completion function used for the last text.\n \"\"\"\n\n def __init__(self, *, cmd, win_id, parent=None):\n super().__init__(parent)\n self._cmd = cmd\n self._win_id = win_id\n self._timer = QTimer()\n self._timer.setSingleShot(True)\n self._timer.setInterval(0)\n self._timer.timeout.connect(self._update_completion)\n self._last_cursor_pos = None\n self._last_text = None\n self._last_completion_func = None\n self._cmd.update_completion.connect(self.schedule_completion_update)\n\n def __repr__(self):\n return utils.get_repr(self)\n\n def _model(self):\n \"\"\"Convenience method to get the current completion model.\"\"\"\n completion = self.parent()\n return completion.model()\n\n def _get_new_completion(self, before_cursor, under_cursor):\n \"\"\"Get the completion function based on the current command text.\n\n Args:\n before_cursor: The command chunks before the cursor.\n under_cursor: The command chunk under the cursor.\n\n Return:\n A completion model.\n \"\"\"\n if '--' in before_cursor or under_cursor.startswith('-'):\n # cursor on a flag or after an explicit split (--)\n return None\n log.completion.debug(\"Before removing flags: {}\".format(before_cursor))\n before_cursor = [x for x in before_cursor if not x.startswith('-')]\n log.completion.debug(\"After removing flags: {}\".format(before_cursor))\n if not before_cursor:\n # '|' or 'set|'\n log.completion.debug('Starting command completion')\n return miscmodels.command\n try:\n cmd = cmdutils.cmd_dict[before_cursor[0]]\n except KeyError:\n log.completion.debug(\"No completion for unknown command: {}\"\n .format(before_cursor[0]))\n return None\n argpos = len(before_cursor) - 1\n try:\n func = cmd.get_pos_arg_info(argpos).completion\n except IndexError:\n log.completion.debug(\"No completion in position {}\".format(argpos))\n return None\n return func\n\n def _quote(self, s):\n \"\"\"Quote s if it needs quoting for the commandline.\n\n Note we don't use shlex.quote because that quotes a lot of shell\n metachars we don't need to have quoted.\n \"\"\"\n if not s:\n return \"''\"\n elif any(c in s for c in ' \"\\'\\t\\n\\\\'):\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"\n else:\n return s\n\n def _partition(self):\n \"\"\"Divide the commandline text into chunks around the cursor position.\n\n Return:\n ([parts_before_cursor], 'part_under_cursor', [parts_after_cursor])\n \"\"\"\n text = self._cmd.text()[len(self._cmd.prefix()):]\n if not text or not text.strip():\n # Only \":\", empty part under the cursor with nothing before/after\n return [], '', []\n parser = runners.CommandParser()\n result = parser.parse(text, fallback=True, keep=True)\n parts = [x for x in result.cmdline if x]\n pos = self._cmd.cursorPosition() - len(self._cmd.prefix())\n pos = min(pos, len(text)) # Qt treats 2-byte UTF-16 chars as 2 chars\n log.completion.debug('partitioning {} around position {}'.format(parts,\n pos))\n for i, part in enumerate(parts):\n pos -= len(part)\n if pos <= 0:\n if part[pos-1:pos+1].isspace():\n # cursor is in a space between two existing words\n parts.insert(i, '')\n prefix = [x.strip() for x in parts[:i]]\n center = parts[i].strip()\n # strip trailing whitepsace included as a separate token\n postfix = [x.strip() for x in parts[i+1:] if not x.isspace()]\n log.completion.debug(\n \"partitioned: {} '{}' {}\".format(prefix, center, postfix))\n return prefix, center, postfix\n\n raise utils.Unreachable(\"Not all parts consumed: {}\".format(parts))\n\n @pyqtSlot(str)\n def on_selection_changed(self, text):\n \"\"\"Change the completed part if a new item was selected.\n\n Called from the views selectionChanged method.\n\n Args:\n text: Newly selected text.\n \"\"\"\n if text is None:\n return\n before, center, after = self._partition()\n log.completion.debug(\"Changing {} to '{}'\".format(center, text))\n try:\n maxsplit = cmdutils.cmd_dict[before[0]].maxsplit\n except (KeyError, IndexError):\n maxsplit = None\n if maxsplit is None:\n text = self._quote(text)\n model = self._model()\n if model.count() == 1 and config.val.completion.quick:\n # If we only have one item, we want to apply it immediately and go\n # on to the next part, unless we are quick-completing the part\n # after maxsplit, so that we don't keep offering completions\n # (see issue #1519)\n if maxsplit is not None and maxsplit < len(before):\n self._change_completed_part(text, before, after)\n else:\n self._change_completed_part(text, before, after,\n immediate=True)\n else:\n self._change_completed_part(text, before, after)\n\n @pyqtSlot()\n def schedule_completion_update(self):\n \"\"\"Schedule updating/enabling completion.\n\n For performance reasons we don't want to block here, instead we do this\n in the background.\n\n We delay the update only if we've already input some text and ignore\n updates if the text is shorter than completion.min_chars (unless we're\n hitting backspace in which case updates won't be ignored).\n \"\"\"\n _cmd, _sep, rest = self._cmd.text().partition(' ')\n input_length = len(rest)\n if (0 < input_length < config.val.completion.min_chars and\n self._cmd.cursorPosition() > self._last_cursor_pos):\n log.completion.debug(\"Ignoring update because the length of \"\n \"the text is less than completion.min_chars.\")\n elif (self._cmd.cursorPosition() == self._last_cursor_pos and\n self._cmd.text() == self._last_text):\n log.completion.debug(\"Ignoring update because there were no \"\n \"changes.\")\n else:\n log.completion.debug(\"Scheduling completion update.\")\n start_delay = config.val.completion.delay if self._last_text else 0\n self._timer.start(start_delay)\n self._last_cursor_pos = self._cmd.cursorPosition()\n self._last_text = self._cmd.text()\n\n @pyqtSlot()\n def _update_completion(self):\n \"\"\"Check if completions are available and activate them.\"\"\"\n completion = self.parent()\n\n if self._cmd.prefix() != ':':\n # This is a search or gibberish, so we don't need to complete\n # anything (yet)\n # FIXME complete searches\n # https://github.com/qutebrowser/qutebrowser/issues/32\n completion.set_model(None)\n self._last_completion_func = None\n return\n\n before_cursor, pattern, after_cursor = self._partition()\n\n log.completion.debug(\"Updating completion: {} {} {}\".format(\n before_cursor, pattern, after_cursor))\n\n pattern = pattern.strip(\"'\\\"\")\n func = self._get_new_completion(before_cursor, pattern)\n\n if func is None:\n log.completion.debug('Clearing completion')\n completion.set_model(None)\n self._last_completion_func = None\n return\n\n if func != self._last_completion_func:\n self._last_completion_func = func\n args = (x for x in before_cursor[1:] if not x.startswith('-'))\n with debug.log_time(log.completion, 'Starting {} completion'\n .format(func.__name__)):\n info = CompletionInfo(config=config.instance,\n keyconf=config.key_instance,\n win_id=self._win_id)\n model = func(*args, info=info)\n with debug.log_time(log.completion, 'Set completion model'):\n completion.set_model(model)\n\n completion.set_pattern(pattern)\n\n def _change_completed_part(self, newtext, before, after, immediate=False):\n \"\"\"Change the part we're currently completing in the commandline.\n\n Args:\n text: The text to set (string) for the token under the cursor.\n before: Commandline tokens before the token under the cursor.\n after: Commandline tokens after the token under the cursor.\n immediate: True if the text should be completed immediately\n including a trailing space and we shouldn't continue\n completing the current item.\n \"\"\"\n text = self._cmd.prefix() + ' '.join(before + [newtext])\n pos = len(text) + (1 if immediate else 0)\n if after:\n text += ' ' + ' '.join(after)\n elif immediate:\n # pad with a space if quick-completing the last entry\n text += ' '\n log.completion.debug(\"setting text = '{}', pos = {}\".format(text, pos))\n\n # generally, we don't want to let self._cmd emit cursorPositionChanged,\n # because that'll schedule a completion update. That happens when\n # tabbing through the completions, and we want to change the command\n # text but we also want to keep the original completion list for the\n # command the user manually entered. The exception is when we're\n # immediately completing, in which case we *do* want to update the\n # completion view so that we can start completing the next part\n if not immediate:\n self._cmd.blockSignals(True)\n\n self._cmd.setText(text)\n self._cmd.setCursorPosition(pos)\n self._cmd.setFocus()\n\n self._cmd.blockSignals(False)\n self._cmd.show_cmd.emit()\n", "path": "qutebrowser/completion/completer.py"}], "after_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Completer attached to a CompletionView.\"\"\"\n\nimport attr\nfrom PyQt5.QtCore import pyqtSlot, QObject, QTimer\n\nfrom qutebrowser.config import config\nfrom qutebrowser.commands import cmdutils, runners\nfrom qutebrowser.utils import log, utils, debug\nfrom qutebrowser.completion.models import miscmodels\n\n\n@attr.s\nclass CompletionInfo:\n\n \"\"\"Context passed into all completion functions.\"\"\"\n\n config = attr.ib()\n keyconf = attr.ib()\n win_id = attr.ib()\n\n\nclass Completer(QObject):\n\n \"\"\"Completer which manages completions in a CompletionView.\n\n Attributes:\n _cmd: The statusbar Command object this completer belongs to.\n _win_id: The id of the window that owns this object.\n _timer: The timer used to trigger the completion update.\n _last_cursor_pos: The old cursor position so we avoid double completion\n updates.\n _last_text: The old command text so we avoid double completion updates.\n _last_completion_func: The completion function used for the last text.\n \"\"\"\n\n def __init__(self, *, cmd, win_id, parent=None):\n super().__init__(parent)\n self._cmd = cmd\n self._win_id = win_id\n self._timer = QTimer()\n self._timer.setSingleShot(True)\n self._timer.setInterval(0)\n self._timer.timeout.connect(self._update_completion)\n self._last_cursor_pos = None\n self._last_text = None\n self._last_completion_func = None\n self._cmd.update_completion.connect(self.schedule_completion_update)\n\n def __repr__(self):\n return utils.get_repr(self)\n\n def _model(self):\n \"\"\"Convenience method to get the current completion model.\"\"\"\n completion = self.parent()\n return completion.model()\n\n def _get_new_completion(self, before_cursor, under_cursor):\n \"\"\"Get the completion function based on the current command text.\n\n Args:\n before_cursor: The command chunks before the cursor.\n under_cursor: The command chunk under the cursor.\n\n Return:\n A completion model.\n \"\"\"\n if '--' in before_cursor or under_cursor.startswith('-'):\n # cursor on a flag or after an explicit split (--)\n return None\n log.completion.debug(\"Before removing flags: {}\".format(before_cursor))\n if not before_cursor:\n # '|' or 'set|'\n log.completion.debug('Starting command completion')\n return miscmodels.command\n try:\n cmd = cmdutils.cmd_dict[before_cursor[0]]\n except KeyError:\n log.completion.debug(\"No completion for unknown command: {}\"\n .format(before_cursor[0]))\n return None\n\n before_cursor = [x for x in before_cursor if not x.startswith('-')]\n log.completion.debug(\"After removing flags: {}\".format(before_cursor))\n argpos = len(before_cursor) - 1\n try:\n func = cmd.get_pos_arg_info(argpos).completion\n except IndexError:\n log.completion.debug(\"No completion in position {}\".format(argpos))\n return None\n return func\n\n def _quote(self, s):\n \"\"\"Quote s if it needs quoting for the commandline.\n\n Note we don't use shlex.quote because that quotes a lot of shell\n metachars we don't need to have quoted.\n \"\"\"\n if not s:\n return \"''\"\n elif any(c in s for c in ' \"\\'\\t\\n\\\\'):\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"\n else:\n return s\n\n def _partition(self):\n \"\"\"Divide the commandline text into chunks around the cursor position.\n\n Return:\n ([parts_before_cursor], 'part_under_cursor', [parts_after_cursor])\n \"\"\"\n text = self._cmd.text()[len(self._cmd.prefix()):]\n if not text or not text.strip():\n # Only \":\", empty part under the cursor with nothing before/after\n return [], '', []\n parser = runners.CommandParser()\n result = parser.parse(text, fallback=True, keep=True)\n parts = [x for x in result.cmdline if x]\n pos = self._cmd.cursorPosition() - len(self._cmd.prefix())\n pos = min(pos, len(text)) # Qt treats 2-byte UTF-16 chars as 2 chars\n log.completion.debug('partitioning {} around position {}'.format(parts,\n pos))\n for i, part in enumerate(parts):\n pos -= len(part)\n if pos <= 0:\n if part[pos-1:pos+1].isspace():\n # cursor is in a space between two existing words\n parts.insert(i, '')\n prefix = [x.strip() for x in parts[:i]]\n center = parts[i].strip()\n # strip trailing whitepsace included as a separate token\n postfix = [x.strip() for x in parts[i+1:] if not x.isspace()]\n log.completion.debug(\n \"partitioned: {} '{}' {}\".format(prefix, center, postfix))\n return prefix, center, postfix\n\n raise utils.Unreachable(\"Not all parts consumed: {}\".format(parts))\n\n @pyqtSlot(str)\n def on_selection_changed(self, text):\n \"\"\"Change the completed part if a new item was selected.\n\n Called from the views selectionChanged method.\n\n Args:\n text: Newly selected text.\n \"\"\"\n if text is None:\n return\n before, center, after = self._partition()\n log.completion.debug(\"Changing {} to '{}'\".format(center, text))\n try:\n maxsplit = cmdutils.cmd_dict[before[0]].maxsplit\n except (KeyError, IndexError):\n maxsplit = None\n if maxsplit is None:\n text = self._quote(text)\n model = self._model()\n if model.count() == 1 and config.val.completion.quick:\n # If we only have one item, we want to apply it immediately and go\n # on to the next part, unless we are quick-completing the part\n # after maxsplit, so that we don't keep offering completions\n # (see issue #1519)\n if maxsplit is not None and maxsplit < len(before):\n self._change_completed_part(text, before, after)\n else:\n self._change_completed_part(text, before, after,\n immediate=True)\n else:\n self._change_completed_part(text, before, after)\n\n @pyqtSlot()\n def schedule_completion_update(self):\n \"\"\"Schedule updating/enabling completion.\n\n For performance reasons we don't want to block here, instead we do this\n in the background.\n\n We delay the update only if we've already input some text and ignore\n updates if the text is shorter than completion.min_chars (unless we're\n hitting backspace in which case updates won't be ignored).\n \"\"\"\n _cmd, _sep, rest = self._cmd.text().partition(' ')\n input_length = len(rest)\n if (0 < input_length < config.val.completion.min_chars and\n self._cmd.cursorPosition() > self._last_cursor_pos):\n log.completion.debug(\"Ignoring update because the length of \"\n \"the text is less than completion.min_chars.\")\n elif (self._cmd.cursorPosition() == self._last_cursor_pos and\n self._cmd.text() == self._last_text):\n log.completion.debug(\"Ignoring update because there were no \"\n \"changes.\")\n else:\n log.completion.debug(\"Scheduling completion update.\")\n start_delay = config.val.completion.delay if self._last_text else 0\n self._timer.start(start_delay)\n self._last_cursor_pos = self._cmd.cursorPosition()\n self._last_text = self._cmd.text()\n\n @pyqtSlot()\n def _update_completion(self):\n \"\"\"Check if completions are available and activate them.\"\"\"\n completion = self.parent()\n\n if self._cmd.prefix() != ':':\n # This is a search or gibberish, so we don't need to complete\n # anything (yet)\n # FIXME complete searches\n # https://github.com/qutebrowser/qutebrowser/issues/32\n completion.set_model(None)\n self._last_completion_func = None\n return\n\n before_cursor, pattern, after_cursor = self._partition()\n\n log.completion.debug(\"Updating completion: {} {} {}\".format(\n before_cursor, pattern, after_cursor))\n\n pattern = pattern.strip(\"'\\\"\")\n func = self._get_new_completion(before_cursor, pattern)\n\n if func is None:\n log.completion.debug('Clearing completion')\n completion.set_model(None)\n self._last_completion_func = None\n return\n\n if func != self._last_completion_func:\n self._last_completion_func = func\n args = (x for x in before_cursor[1:] if not x.startswith('-'))\n with debug.log_time(log.completion, 'Starting {} completion'\n .format(func.__name__)):\n info = CompletionInfo(config=config.instance,\n keyconf=config.key_instance,\n win_id=self._win_id)\n model = func(*args, info=info)\n with debug.log_time(log.completion, 'Set completion model'):\n completion.set_model(model)\n\n completion.set_pattern(pattern)\n\n def _change_completed_part(self, newtext, before, after, immediate=False):\n \"\"\"Change the part we're currently completing in the commandline.\n\n Args:\n text: The text to set (string) for the token under the cursor.\n before: Commandline tokens before the token under the cursor.\n after: Commandline tokens after the token under the cursor.\n immediate: True if the text should be completed immediately\n including a trailing space and we shouldn't continue\n completing the current item.\n \"\"\"\n text = self._cmd.prefix() + ' '.join(before + [newtext])\n pos = len(text) + (1 if immediate else 0)\n if after:\n text += ' ' + ' '.join(after)\n elif immediate:\n # pad with a space if quick-completing the last entry\n text += ' '\n log.completion.debug(\"setting text = '{}', pos = {}\".format(text, pos))\n\n # generally, we don't want to let self._cmd emit cursorPositionChanged,\n # because that'll schedule a completion update. That happens when\n # tabbing through the completions, and we want to change the command\n # text but we also want to keep the original completion list for the\n # command the user manually entered. The exception is when we're\n # immediately completing, in which case we *do* want to update the\n # completion view so that we can start completing the next part\n if not immediate:\n self._cmd.blockSignals(True)\n\n self._cmd.setText(text)\n self._cmd.setCursorPosition(pos)\n self._cmd.setFocus()\n\n self._cmd.blockSignals(False)\n self._cmd.show_cmd.emit()\n", "path": "qutebrowser/completion/completer.py"}]} | 3,953 | 251 |
gh_patches_debug_32860 | rasdani/github-patches | git_diff | qutebrowser__qutebrowser-3702 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Crash with invalid dictionary filenames
Looks like this doesn't get caught properly on init:
```
16:39:44 DEBUG init app:_init_modules:468 Initializing websettings...
16:39:45 ERROR misc crashsignal:exception_hook:216 Uncaught exception
Traceback (most recent call last):
File "/bin/qutebrowser", line 11, in <module>
load_entry_point('qutebrowser==1.1.1', 'gui_scripts', 'qutebrowser')()
File "/usr/lib/python3.6/site-packages/qutebrowser/qutebrowser.py", line 188, in main
return app.run(args)
File "/usr/lib/python3.6/site-packages/qutebrowser/app.py", line 137, in run
init(args, crash_handler)
File "/usr/lib/python3.6/site-packages/qutebrowser/app.py", line 163, in init
_init_modules(args, crash_handler)
File "/usr/lib/python3.6/site-packages/qutebrowser/app.py", line 469, in _init_modules
websettings.init(args)
File "/usr/lib/python3.6/site-packages/qutebrowser/config/websettings.py", line 215, in init
webenginesettings.init(args)
File "/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/webenginesettings.py", line 297, in init
websettings.init_mappings(MAPPINGS)
File "/usr/lib/python3.6/site-packages/qutebrowser/config/websettings.py", line 198, in init_mappings
mapping.set(value)
File "/usr/lib/python3.6/site-packages/qutebrowser/config/websettings.py", line 72, in set
self._set(value, settings=settings)
File "/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/webenginesettings.py", line 154, in _set
filenames = [self._find_installed(code) for code in value]
File "/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/webenginesettings.py", line 154, in <listcomp>
filenames = [self._find_installed(code) for code in value]
File "/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/webenginesettings.py", line 143, in _find_installed
local_filename = spell.local_filename(code)
File "/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/spell.py", line 64, in local_filename
all_installed = local_files(code)
File "/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/spell.py", line 51, in local_files
for matching_dict in sorted(matching_dicts, key=version, reverse=True):
File "/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/spell.py", line 36, in version
.format(filename))
ValueError: the given dictionary file name is malformed: /usr/share/qt/qtwebengine_dictionaries/en-US.bdic
```
https://crashes.qutebrowser.org/view/63cd0d83
cc @elshize
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qutebrowser/browser/webengine/spell.py`
Content:
```
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2017-2018 Michal Siedlaczek <michal.siedlaczek@gmail.com>
4
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """Installing and configuring spell-checking for QtWebEngine."""
21
22 import glob
23 import os
24 import re
25
26 from PyQt5.QtCore import QLibraryInfo
27 from qutebrowser.utils import log
28
29
30 def version(filename):
31 """Extract the version number from the dictionary file name."""
32 version_re = re.compile(r".+-(?P<version>[0-9]+-[0-9]+?)\.bdic")
33 match = version_re.fullmatch(filename)
34 if match is None:
35 raise ValueError('the given dictionary file name is malformed: {}'
36 .format(filename))
37 return tuple(int(n) for n in match.group('version').split('-'))
38
39
40 def dictionary_dir():
41 """Return the path (str) to the QtWebEngine's dictionaries directory."""
42 datapath = QLibraryInfo.location(QLibraryInfo.DataPath)
43 return os.path.join(datapath, 'qtwebengine_dictionaries')
44
45
46 def local_files(code):
47 """Return all installed dictionaries for the given code."""
48 pathname = os.path.join(dictionary_dir(), '{}*.bdic'.format(code))
49 matching_dicts = glob.glob(pathname)
50 files = []
51 for matching_dict in sorted(matching_dicts, key=version, reverse=True):
52 filename = os.path.basename(matching_dict)
53 log.config.debug('Found file for dict {}: {}'.format(code, filename))
54 files.append(filename)
55 return files
56
57
58 def local_filename(code):
59 """Return the newest installed dictionary for the given code.
60
61 Return the filename of the installed dictionary with the highest version
62 number or None if the dictionary is not installed.
63 """
64 all_installed = local_files(code)
65 return os.path.splitext(all_installed[0])[0] if all_installed else None
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qutebrowser/browser/webengine/spell.py b/qutebrowser/browser/webengine/spell.py
--- a/qutebrowser/browser/webengine/spell.py
+++ b/qutebrowser/browser/webengine/spell.py
@@ -24,16 +24,18 @@
import re
from PyQt5.QtCore import QLibraryInfo
-from qutebrowser.utils import log
+from qutebrowser.utils import log, message
+
+dict_version_re = re.compile(r".+-(?P<version>[0-9]+-[0-9]+?)\.bdic")
def version(filename):
"""Extract the version number from the dictionary file name."""
- version_re = re.compile(r".+-(?P<version>[0-9]+-[0-9]+?)\.bdic")
- match = version_re.fullmatch(filename)
+ match = dict_version_re.match(filename)
if match is None:
- raise ValueError('the given dictionary file name is malformed: {}'
- .format(filename))
+ message.warning(
+ "Found a dictionary with a malformed name: {}".format(filename))
+ return None
return tuple(int(n) for n in match.group('version').split('-'))
@@ -44,15 +46,23 @@
def local_files(code):
- """Return all installed dictionaries for the given code."""
+ """Return all installed dictionaries for the given code.
+
+ The returned dictionaries are sorted by version, therefore the latest will
+ be the first element. The list will be empty if no dictionaries are found.
+ """
pathname = os.path.join(dictionary_dir(), '{}*.bdic'.format(code))
matching_dicts = glob.glob(pathname)
- files = []
- for matching_dict in sorted(matching_dicts, key=version, reverse=True):
- filename = os.path.basename(matching_dict)
- log.config.debug('Found file for dict {}: {}'.format(code, filename))
- files.append(filename)
- return files
+ versioned_dicts = []
+ for matching_dict in matching_dicts:
+ parsed_version = version(matching_dict)
+ if parsed_version is not None:
+ filename = os.path.basename(matching_dict)
+ log.config.debug('Found file for dict {}: {}'
+ .format(code, filename))
+ versioned_dicts.append((parsed_version, filename))
+ return [filename for version, filename
+ in sorted(versioned_dicts, reverse=True)]
def local_filename(code):
| {"golden_diff": "diff --git a/qutebrowser/browser/webengine/spell.py b/qutebrowser/browser/webengine/spell.py\n--- a/qutebrowser/browser/webengine/spell.py\n+++ b/qutebrowser/browser/webengine/spell.py\n@@ -24,16 +24,18 @@\n import re\n \n from PyQt5.QtCore import QLibraryInfo\n-from qutebrowser.utils import log\n+from qutebrowser.utils import log, message\n+\n+dict_version_re = re.compile(r\".+-(?P<version>[0-9]+-[0-9]+?)\\.bdic\")\n \n \n def version(filename):\n \"\"\"Extract the version number from the dictionary file name.\"\"\"\n- version_re = re.compile(r\".+-(?P<version>[0-9]+-[0-9]+?)\\.bdic\")\n- match = version_re.fullmatch(filename)\n+ match = dict_version_re.match(filename)\n if match is None:\n- raise ValueError('the given dictionary file name is malformed: {}'\n- .format(filename))\n+ message.warning(\n+ \"Found a dictionary with a malformed name: {}\".format(filename))\n+ return None\n return tuple(int(n) for n in match.group('version').split('-'))\n \n \n@@ -44,15 +46,23 @@\n \n \n def local_files(code):\n- \"\"\"Return all installed dictionaries for the given code.\"\"\"\n+ \"\"\"Return all installed dictionaries for the given code.\n+\n+ The returned dictionaries are sorted by version, therefore the latest will\n+ be the first element. The list will be empty if no dictionaries are found.\n+ \"\"\"\n pathname = os.path.join(dictionary_dir(), '{}*.bdic'.format(code))\n matching_dicts = glob.glob(pathname)\n- files = []\n- for matching_dict in sorted(matching_dicts, key=version, reverse=True):\n- filename = os.path.basename(matching_dict)\n- log.config.debug('Found file for dict {}: {}'.format(code, filename))\n- files.append(filename)\n- return files\n+ versioned_dicts = []\n+ for matching_dict in matching_dicts:\n+ parsed_version = version(matching_dict)\n+ if parsed_version is not None:\n+ filename = os.path.basename(matching_dict)\n+ log.config.debug('Found file for dict {}: {}'\n+ .format(code, filename))\n+ versioned_dicts.append((parsed_version, filename))\n+ return [filename for version, filename\n+ in sorted(versioned_dicts, reverse=True)]\n \n \n def local_filename(code):\n", "issue": "Crash with invalid dictionary filenames\nLooks like this doesn't get caught properly on init:\r\n\r\n```\r\n16:39:44 DEBUG init app:_init_modules:468 Initializing websettings...\r\n16:39:45 ERROR misc crashsignal:exception_hook:216 Uncaught exception\r\nTraceback (most recent call last):\r\n File \"/bin/qutebrowser\", line 11, in <module>\r\n load_entry_point('qutebrowser==1.1.1', 'gui_scripts', 'qutebrowser')()\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/qutebrowser.py\", line 188, in main\r\n return app.run(args)\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/app.py\", line 137, in run\r\n init(args, crash_handler)\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/app.py\", line 163, in init\r\n _init_modules(args, crash_handler)\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/app.py\", line 469, in _init_modules\r\n websettings.init(args)\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/config/websettings.py\", line 215, in init\r\n webenginesettings.init(args)\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/webenginesettings.py\", line 297, in init\r\n websettings.init_mappings(MAPPINGS)\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/config/websettings.py\", line 198, in init_mappings\r\n mapping.set(value)\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/config/websettings.py\", line 72, in set\r\n self._set(value, settings=settings)\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/webenginesettings.py\", line 154, in _set\r\n filenames = [self._find_installed(code) for code in value]\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/webenginesettings.py\", line 154, in <listcomp>\r\n filenames = [self._find_installed(code) for code in value]\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/webenginesettings.py\", line 143, in _find_installed\r\n local_filename = spell.local_filename(code)\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/spell.py\", line 64, in local_filename\r\n all_installed = local_files(code)\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/spell.py\", line 51, in local_files\r\n for matching_dict in sorted(matching_dicts, key=version, reverse=True):\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/spell.py\", line 36, in version\r\n .format(filename))\r\nValueError: the given dictionary file name is malformed: /usr/share/qt/qtwebengine_dictionaries/en-US.bdic\r\n```\r\n\r\nhttps://crashes.qutebrowser.org/view/63cd0d83\r\n\r\ncc @elshize \n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2017-2018 Michal Siedlaczek <michal.siedlaczek@gmail.com>\n\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Installing and configuring spell-checking for QtWebEngine.\"\"\"\n\nimport glob\nimport os\nimport re\n\nfrom PyQt5.QtCore import QLibraryInfo\nfrom qutebrowser.utils import log\n\n\ndef version(filename):\n \"\"\"Extract the version number from the dictionary file name.\"\"\"\n version_re = re.compile(r\".+-(?P<version>[0-9]+-[0-9]+?)\\.bdic\")\n match = version_re.fullmatch(filename)\n if match is None:\n raise ValueError('the given dictionary file name is malformed: {}'\n .format(filename))\n return tuple(int(n) for n in match.group('version').split('-'))\n\n\ndef dictionary_dir():\n \"\"\"Return the path (str) to the QtWebEngine's dictionaries directory.\"\"\"\n datapath = QLibraryInfo.location(QLibraryInfo.DataPath)\n return os.path.join(datapath, 'qtwebengine_dictionaries')\n\n\ndef local_files(code):\n \"\"\"Return all installed dictionaries for the given code.\"\"\"\n pathname = os.path.join(dictionary_dir(), '{}*.bdic'.format(code))\n matching_dicts = glob.glob(pathname)\n files = []\n for matching_dict in sorted(matching_dicts, key=version, reverse=True):\n filename = os.path.basename(matching_dict)\n log.config.debug('Found file for dict {}: {}'.format(code, filename))\n files.append(filename)\n return files\n\n\ndef local_filename(code):\n \"\"\"Return the newest installed dictionary for the given code.\n\n Return the filename of the installed dictionary with the highest version\n number or None if the dictionary is not installed.\n \"\"\"\n all_installed = local_files(code)\n return os.path.splitext(all_installed[0])[0] if all_installed else None\n", "path": "qutebrowser/browser/webengine/spell.py"}], "after_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2017-2018 Michal Siedlaczek <michal.siedlaczek@gmail.com>\n\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Installing and configuring spell-checking for QtWebEngine.\"\"\"\n\nimport glob\nimport os\nimport re\n\nfrom PyQt5.QtCore import QLibraryInfo\nfrom qutebrowser.utils import log, message\n\ndict_version_re = re.compile(r\".+-(?P<version>[0-9]+-[0-9]+?)\\.bdic\")\n\n\ndef version(filename):\n \"\"\"Extract the version number from the dictionary file name.\"\"\"\n match = dict_version_re.match(filename)\n if match is None:\n message.warning(\n \"Found a dictionary with a malformed name: {}\".format(filename))\n return None\n return tuple(int(n) for n in match.group('version').split('-'))\n\n\ndef dictionary_dir():\n \"\"\"Return the path (str) to the QtWebEngine's dictionaries directory.\"\"\"\n datapath = QLibraryInfo.location(QLibraryInfo.DataPath)\n return os.path.join(datapath, 'qtwebengine_dictionaries')\n\n\ndef local_files(code):\n \"\"\"Return all installed dictionaries for the given code.\n\n The returned dictionaries are sorted by version, therefore the latest will\n be the first element. The list will be empty if no dictionaries are found.\n \"\"\"\n pathname = os.path.join(dictionary_dir(), '{}*.bdic'.format(code))\n matching_dicts = glob.glob(pathname)\n versioned_dicts = []\n for matching_dict in matching_dicts:\n parsed_version = version(matching_dict)\n if parsed_version is not None:\n filename = os.path.basename(matching_dict)\n log.config.debug('Found file for dict {}: {}'\n .format(code, filename))\n versioned_dicts.append((parsed_version, filename))\n return [filename for version, filename\n in sorted(versioned_dicts, reverse=True)]\n\n\ndef local_filename(code):\n \"\"\"Return the newest installed dictionary for the given code.\n\n Return the filename of the installed dictionary with the highest version\n number or None if the dictionary is not installed.\n \"\"\"\n all_installed = local_files(code)\n return os.path.splitext(all_installed[0])[0] if all_installed else None\n", "path": "qutebrowser/browser/webengine/spell.py"}]} | 1,669 | 537 |
gh_patches_debug_4226 | rasdani/github-patches | git_diff | mlflow__mlflow-3598 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unpin sqlalchemy
Hi,
Currently mlflow pins sqlalchemy to `<= 1.3.13`. I wanted to use this package on a projects that requires a more updated version of sqlalchemy.
Would it be possible to unpit sqlalchemy or to change the constraint to disallow specific version of it? (like `!=1.3.14` etc?)
Thanks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 from importlib.machinery import SourceFileLoader
3 from setuptools import setup, find_packages
4
5 version = (
6 SourceFileLoader("mlflow.version", os.path.join("mlflow", "version.py")).load_module().VERSION
7 )
8
9
10 # Get a list of all files in the JS directory to include in our module
11 def package_files(directory):
12 paths = []
13 for (path, _, filenames) in os.walk(directory):
14 for filename in filenames:
15 paths.append(os.path.join("..", path, filename))
16 return paths
17
18
19 # Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build
20 # to include in the wheel, e.g. "../mlflow/server/js/build/index.html"
21 js_files = package_files("mlflow/server/js/build")
22 models_container_server_files = package_files("mlflow/models/container")
23 alembic_files = [
24 "../mlflow/store/db_migrations/alembic.ini",
25 "../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini",
26 ]
27
28 setup(
29 name="mlflow",
30 version=version,
31 packages=find_packages(exclude=["tests", "tests.*"]),
32 package_data={"mlflow": js_files + models_container_server_files + alembic_files},
33 install_requires=[
34 "alembic<=1.4.1",
35 # Required
36 "azure-storage-blob",
37 "click>=7.0",
38 "cloudpickle",
39 "databricks-cli>=0.8.7",
40 "requests>=2.17.3",
41 "six>=1.10.0",
42 'waitress; platform_system == "Windows"',
43 'gunicorn; platform_system != "Windows"',
44 "Flask",
45 "numpy",
46 "pandas",
47 "python-dateutil",
48 "protobuf>=3.6.0",
49 "gitpython>=2.1.0",
50 "pyyaml",
51 "querystring_parser",
52 "docker>=4.0.0",
53 "entrypoints",
54 # Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433
55 "sqlparse>=0.3.1",
56 # Required to run the MLflow server against SQL-backed storage
57 "sqlalchemy<=1.3.13",
58 "gorilla",
59 "prometheus-flask-exporter",
60 ],
61 extras_require={
62 "extras": [
63 "scikit-learn",
64 # Required to log artifacts and models to HDFS artifact locations
65 "pyarrow",
66 # Required to log artifacts and models to AWS S3 artifact locations
67 "boto3",
68 "mleap",
69 # Required to log artifacts and models to GCS artifact locations
70 "google-cloud-storage",
71 "azureml-core>=1.2.0",
72 # Required to log artifacts to SFTP artifact locations
73 "pysftp",
74 # Required by the mlflow.projects module, when running projects against
75 # a remote Kubernetes cluster
76 "kubernetes",
77 ],
78 "sqlserver": ["mlflow-dbstore",],
79 "aliyun-oss": ["aliyunstoreplugin",],
80 },
81 entry_points="""
82 [console_scripts]
83 mlflow=mlflow.cli:cli
84 """,
85 zip_safe=False,
86 author="Databricks",
87 description="MLflow: A Platform for ML Development and Productionization",
88 long_description=open("README.rst").read(),
89 license="Apache License 2.0",
90 classifiers=["Intended Audience :: Developers", "Programming Language :: Python :: 3.6",],
91 keywords="ml ai databricks",
92 url="https://mlflow.org/",
93 python_requires=">=3.5",
94 project_urls={
95 "Bug Tracker": "https://github.com/mlflow/mlflow/issues",
96 "Documentation": "https://mlflow.org/docs/latest/index.html",
97 "Source Code": "https://github.com/mlflow/mlflow",
98 },
99 )
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -54,7 +54,7 @@
# Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433
"sqlparse>=0.3.1",
# Required to run the MLflow server against SQL-backed storage
- "sqlalchemy<=1.3.13",
+ "sqlalchemy",
"gorilla",
"prometheus-flask-exporter",
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -54,7 +54,7 @@\n # Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433\n \"sqlparse>=0.3.1\",\n # Required to run the MLflow server against SQL-backed storage\n- \"sqlalchemy<=1.3.13\",\n+ \"sqlalchemy\",\n \"gorilla\",\n \"prometheus-flask-exporter\",\n ],\n", "issue": "Unpin sqlalchemy\nHi,\r\n\r\nCurrently mlflow pins sqlalchemy to `<= 1.3.13`. I wanted to use this package on a projects that requires a more updated version of sqlalchemy.\r\nWould it be possible to unpit sqlalchemy or to change the constraint to disallow specific version of it? (like `!=1.3.14` etc?)\r\n\r\nThanks\n", "before_files": [{"content": "import os\nfrom importlib.machinery import SourceFileLoader\nfrom setuptools import setup, find_packages\n\nversion = (\n SourceFileLoader(\"mlflow.version\", os.path.join(\"mlflow\", \"version.py\")).load_module().VERSION\n)\n\n\n# Get a list of all files in the JS directory to include in our module\ndef package_files(directory):\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join(\"..\", path, filename))\n return paths\n\n\n# Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build\n# to include in the wheel, e.g. \"../mlflow/server/js/build/index.html\"\njs_files = package_files(\"mlflow/server/js/build\")\nmodels_container_server_files = package_files(\"mlflow/models/container\")\nalembic_files = [\n \"../mlflow/store/db_migrations/alembic.ini\",\n \"../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini\",\n]\n\nsetup(\n name=\"mlflow\",\n version=version,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"mlflow\": js_files + models_container_server_files + alembic_files},\n install_requires=[\n \"alembic<=1.4.1\",\n # Required\n \"azure-storage-blob\",\n \"click>=7.0\",\n \"cloudpickle\",\n \"databricks-cli>=0.8.7\",\n \"requests>=2.17.3\",\n \"six>=1.10.0\",\n 'waitress; platform_system == \"Windows\"',\n 'gunicorn; platform_system != \"Windows\"',\n \"Flask\",\n \"numpy\",\n \"pandas\",\n \"python-dateutil\",\n \"protobuf>=3.6.0\",\n \"gitpython>=2.1.0\",\n \"pyyaml\",\n \"querystring_parser\",\n \"docker>=4.0.0\",\n \"entrypoints\",\n # Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433\n \"sqlparse>=0.3.1\",\n # Required to run the MLflow server against SQL-backed storage\n \"sqlalchemy<=1.3.13\",\n \"gorilla\",\n \"prometheus-flask-exporter\",\n ],\n extras_require={\n \"extras\": [\n \"scikit-learn\",\n # Required to log artifacts and models to HDFS artifact locations\n \"pyarrow\",\n # Required to log artifacts and models to AWS S3 artifact locations\n \"boto3\",\n \"mleap\",\n # Required to log artifacts and models to GCS artifact locations\n \"google-cloud-storage\",\n \"azureml-core>=1.2.0\",\n # Required to log artifacts to SFTP artifact locations\n \"pysftp\",\n # Required by the mlflow.projects module, when running projects against\n # a remote Kubernetes cluster\n \"kubernetes\",\n ],\n \"sqlserver\": [\"mlflow-dbstore\",],\n \"aliyun-oss\": [\"aliyunstoreplugin\",],\n },\n entry_points=\"\"\"\n [console_scripts]\n mlflow=mlflow.cli:cli\n \"\"\",\n zip_safe=False,\n author=\"Databricks\",\n description=\"MLflow: A Platform for ML Development and Productionization\",\n long_description=open(\"README.rst\").read(),\n license=\"Apache License 2.0\",\n classifiers=[\"Intended Audience :: Developers\", \"Programming Language :: Python :: 3.6\",],\n keywords=\"ml ai databricks\",\n url=\"https://mlflow.org/\",\n python_requires=\">=3.5\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/mlflow/mlflow/issues\",\n \"Documentation\": \"https://mlflow.org/docs/latest/index.html\",\n \"Source Code\": \"https://github.com/mlflow/mlflow\",\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nfrom importlib.machinery import SourceFileLoader\nfrom setuptools import setup, find_packages\n\nversion = (\n SourceFileLoader(\"mlflow.version\", os.path.join(\"mlflow\", \"version.py\")).load_module().VERSION\n)\n\n\n# Get a list of all files in the JS directory to include in our module\ndef package_files(directory):\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join(\"..\", path, filename))\n return paths\n\n\n# Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build\n# to include in the wheel, e.g. \"../mlflow/server/js/build/index.html\"\njs_files = package_files(\"mlflow/server/js/build\")\nmodels_container_server_files = package_files(\"mlflow/models/container\")\nalembic_files = [\n \"../mlflow/store/db_migrations/alembic.ini\",\n \"../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini\",\n]\n\nsetup(\n name=\"mlflow\",\n version=version,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"mlflow\": js_files + models_container_server_files + alembic_files},\n install_requires=[\n \"alembic<=1.4.1\",\n # Required\n \"azure-storage-blob\",\n \"click>=7.0\",\n \"cloudpickle\",\n \"databricks-cli>=0.8.7\",\n \"requests>=2.17.3\",\n \"six>=1.10.0\",\n 'waitress; platform_system == \"Windows\"',\n 'gunicorn; platform_system != \"Windows\"',\n \"Flask\",\n \"numpy\",\n \"pandas\",\n \"python-dateutil\",\n \"protobuf>=3.6.0\",\n \"gitpython>=2.1.0\",\n \"pyyaml\",\n \"querystring_parser\",\n \"docker>=4.0.0\",\n \"entrypoints\",\n # Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433\n \"sqlparse>=0.3.1\",\n # Required to run the MLflow server against SQL-backed storage\n \"sqlalchemy\",\n \"gorilla\",\n \"prometheus-flask-exporter\",\n ],\n extras_require={\n \"extras\": [\n \"scikit-learn\",\n # Required to log artifacts and models to HDFS artifact locations\n \"pyarrow\",\n # Required to log artifacts and models to AWS S3 artifact locations\n \"boto3\",\n \"mleap\",\n # Required to log artifacts and models to GCS artifact locations\n \"google-cloud-storage\",\n \"azureml-core>=1.2.0\",\n # Required to log artifacts to SFTP artifact locations\n \"pysftp\",\n # Required by the mlflow.projects module, when running projects against\n # a remote Kubernetes cluster\n \"kubernetes\",\n ],\n \"sqlserver\": [\"mlflow-dbstore\",],\n \"aliyun-oss\": [\"aliyunstoreplugin\",],\n },\n entry_points=\"\"\"\n [console_scripts]\n mlflow=mlflow.cli:cli\n \"\"\",\n zip_safe=False,\n author=\"Databricks\",\n description=\"MLflow: A Platform for ML Development and Productionization\",\n long_description=open(\"README.rst\").read(),\n license=\"Apache License 2.0\",\n classifiers=[\"Intended Audience :: Developers\", \"Programming Language :: Python :: 3.6\",],\n keywords=\"ml ai databricks\",\n url=\"https://mlflow.org/\",\n python_requires=\">=3.5\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/mlflow/mlflow/issues\",\n \"Documentation\": \"https://mlflow.org/docs/latest/index.html\",\n \"Source Code\": \"https://github.com/mlflow/mlflow\",\n },\n)\n", "path": "setup.py"}]} | 1,377 | 114 |
gh_patches_debug_31342 | rasdani/github-patches | git_diff | aws__aws-cli-1562 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] [aws kinesis] Unexpected output values by table & text format on describe-streams
- aws --version aws-cli/1.7.35 Python/2.7.5 Darwin/13.4.0
# normality
It expects "ACTIVE".
command (no output option):
``` bash
$ aws kinesis describe-stream --stream-name ${STREAM_NAME} --query 'StreamDescription.StreamStatus'
```
result:
``` text
"ACTIVE"
```
command (output json):
``` bash
$ aws kinesis describe-stream --stream-name ${STREAM_NAME} --query 'StreamDescription.StreamStatus' --output json
```
result:
``` text
"ACTIVE"
```
# BUG
It expects "ACTIVE".
command (output table):
``` bash
$ aws kinesis describe-stream --stream-name ${STREAM_NAME} --query 'StreamDescription.StreamStatus' --output table
```
result:
``` text
----------------
|DescribeStream|
+--------------+
```
command (output text):
``` bash
$ aws kinesis describe-stream --stream-name ${STREAM_NAME} --query 'StreamDescription.StreamStatus' --output text
```
result:
``` text
None
```
It happened not only "StreamStatus" but also "StreamName".
Thank you.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/customizations/paginate.py`
Content:
```
1 # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 """This module has customizations to unify paging paramters.
14
15 For any operation that can be paginated, we will:
16
17 * Hide the service specific pagination params. This can vary across
18 services and we're going to replace them with a consistent set of
19 arguments. The arguments will still work, but they are not
20 documented. This allows us to add a pagination config after
21 the fact and still remain backwards compatible with users that
22 were manually doing pagination.
23 * Add a ``--starting-token`` and a ``--max-items`` argument.
24
25 """
26 import logging
27 from functools import partial
28
29 from botocore import xform_name
30 from botocore.exceptions import DataNotFoundError
31 from botocore import model
32
33 from awscli.arguments import BaseCLIArgument
34
35
36 logger = logging.getLogger(__name__)
37
38
39 STARTING_TOKEN_HELP = """
40 <p>A token to specify where to start paginating. This is the
41 <code>NextToken</code> from a previously truncated response.</p>
42 """
43
44 MAX_ITEMS_HELP = """
45 <p>The total number of items to return. If the total number
46 of items available is more than the value specified in
47 max-items then a <code>NextToken</code> will
48 be provided in the output that you can use to resume pagination.
49 This <code>NextToken</code> response element should <b>not</b> be
50 used directly outside of the AWS CLI.</p>
51 """
52
53 PAGE_SIZE_HELP = """
54 <p>The size of each page.<p>
55 """
56
57
58 def register_pagination(event_handlers):
59 event_handlers.register('building-argument-table',
60 unify_paging_params)
61
62
63 def get_paginator_config(session, service_name, operation_name):
64 try:
65 paginator_model = session.get_paginator_model(service_name)
66 except DataNotFoundError:
67 return None
68 try:
69 operation_paginator_config = paginator_model.get_paginator(
70 operation_name)
71 except ValueError:
72 return None
73 return operation_paginator_config
74
75
76 def unify_paging_params(argument_table, operation_model, event_name,
77 session, **kwargs):
78 paginator_config = get_paginator_config(
79 session, operation_model.service_model.service_name,
80 operation_model.name)
81 if paginator_config is None:
82 # We only apply these customizations to paginated responses.
83 return
84 logger.debug("Modifying paging parameters for operation: %s",
85 operation_model.name)
86 _remove_existing_paging_arguments(argument_table, paginator_config)
87 parsed_args_event = event_name.replace('building-argument-table.',
88 'operation-args-parsed.')
89 shadowed_args = {}
90 add_paging_argument(argument_table, 'starting-token',
91 PageArgument('starting-token', STARTING_TOKEN_HELP,
92 parse_type='string',
93 serialized_name='StartingToken'),
94 shadowed_args)
95 input_members = operation_model.input_shape.members
96 type_name = 'integer'
97 if 'limit_key' in paginator_config:
98 limit_key_shape = input_members[paginator_config['limit_key']]
99 type_name = limit_key_shape.type_name
100 if type_name not in PageArgument.type_map:
101 raise TypeError(
102 ('Unsupported pagination type {0} for operation {1}'
103 ' and parameter {2}').format(
104 type_name, operation_model.name,
105 paginator_config['limit_key']))
106 add_paging_argument(argument_table, 'page-size',
107 PageArgument('page-size', PAGE_SIZE_HELP,
108 parse_type=type_name,
109 serialized_name='PageSize'),
110 shadowed_args)
111
112 add_paging_argument(argument_table, 'max-items',
113 PageArgument('max-items', MAX_ITEMS_HELP,
114 parse_type=type_name,
115 serialized_name='MaxItems'),
116 shadowed_args)
117 session.register(
118 parsed_args_event,
119 partial(check_should_enable_pagination,
120 list(_get_all_cli_input_tokens(paginator_config)),
121 shadowed_args, argument_table))
122
123
124 def add_paging_argument(argument_table, arg_name, argument, shadowed_args):
125 if arg_name in argument_table:
126 # If there's already an entry in the arg table for this argument,
127 # this means we're shadowing an argument for this operation. We
128 # need to store this later in case pagination is turned off because
129 # we put these arguments back.
130 # See the comment in check_should_enable_pagination() for more info.
131 shadowed_args[arg_name] = argument_table[arg_name]
132 argument_table[arg_name] = argument
133
134
135 def check_should_enable_pagination(input_tokens, shadowed_args, argument_table,
136 parsed_args, parsed_globals, **kwargs):
137 normalized_paging_args = ['start_token', 'max_items']
138 for token in input_tokens:
139 py_name = token.replace('-', '_')
140 if getattr(parsed_args, py_name) is not None and \
141 py_name not in normalized_paging_args:
142 # The user has specified a manual (undocumented) pagination arg.
143 # We need to automatically turn pagination off.
144 logger.debug("User has specified a manual pagination arg. "
145 "Automatically setting --no-paginate.")
146 parsed_globals.paginate = False
147 # Because we've now disabled pagination, there's a chance that
148 # we were shadowing arguments. For example, we inject a
149 # --max-items argument in unify_paging_params(). If the
150 # the operation also provides its own MaxItems (which we
151 # expose as --max-items) then our custom pagination arg
152 # was shadowing the customers arg. When we turn pagination
153 # off we need to put back the original argument which is
154 # what we're doing here.
155 for key, value in shadowed_args.items():
156 argument_table[key] = value
157
158
159 def _remove_existing_paging_arguments(argument_table, pagination_config):
160 for cli_name in _get_all_cli_input_tokens(pagination_config):
161 argument_table[cli_name]._UNDOCUMENTED = True
162
163
164 def _get_all_cli_input_tokens(pagination_config):
165 # Get all input tokens including the limit_key
166 # if it exists.
167 tokens = _get_input_tokens(pagination_config)
168 for token_name in tokens:
169 cli_name = xform_name(token_name, '-')
170 yield cli_name
171 if 'limit_key' in pagination_config:
172 key_name = pagination_config['limit_key']
173 cli_name = xform_name(key_name, '-')
174 yield cli_name
175
176
177 def _get_input_tokens(pagination_config):
178 tokens = pagination_config['input_token']
179 if not isinstance(tokens, list):
180 return [tokens]
181 return tokens
182
183
184 def _get_cli_name(param_objects, token_name):
185 for param in param_objects:
186 if param.name == token_name:
187 return param.cli_name.lstrip('-')
188
189
190 class PageArgument(BaseCLIArgument):
191 type_map = {
192 'string': str,
193 'integer': int,
194 }
195
196 def __init__(self, name, documentation, parse_type, serialized_name):
197 self.argument_model = model.Shape('PageArgument', {'type': 'string'})
198 self._name = name
199 self._serialized_name = serialized_name
200 self._documentation = documentation
201 self._parse_type = parse_type
202 self._required = False
203
204 @property
205 def cli_name(self):
206 return '--' + self._name
207
208 @property
209 def cli_type_name(self):
210 return self._parse_type
211
212 @property
213 def required(self):
214 return self._required
215
216 @required.setter
217 def required(self, value):
218 self._required = value
219
220 @property
221 def documentation(self):
222 return self._documentation
223
224 def add_to_parser(self, parser):
225 parser.add_argument(self.cli_name, dest=self.py_name,
226 type=self.type_map[self._parse_type])
227
228 def add_to_params(self, parameters, value):
229 if value is not None:
230 pagination_config = parameters.get('PaginationConfig', {})
231 pagination_config[self._serialized_name] = value
232 parameters['PaginationConfig'] = pagination_config
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awscli/customizations/paginate.py b/awscli/customizations/paginate.py
--- a/awscli/customizations/paginate.py
+++ b/awscli/customizations/paginate.py
@@ -56,8 +56,8 @@
def register_pagination(event_handlers):
- event_handlers.register('building-argument-table',
- unify_paging_params)
+ event_handlers.register('building-argument-table', unify_paging_params)
+ event_handlers.register_last('doc-description', add_paging_description)
def get_paginator_config(session, service_name, operation_name):
@@ -73,6 +73,35 @@
return operation_paginator_config
+def add_paging_description(help_command, **kwargs):
+ # This customization is only applied to the description of
+ # Operations, so we must filter out all other events.
+ if not isinstance(help_command.obj, model.OperationModel):
+ return
+ service_name = help_command.obj.service_model.service_name
+ paginator_config = get_paginator_config(
+ help_command.session, service_name, help_command.obj.name)
+ if not paginator_config:
+ return
+ help_command.doc.style.new_paragraph()
+ help_command.doc.writeln(
+ ('``%s`` is a paginated operation. Multiple API calls may be issued '
+ 'in order to retrieve the entire data set of results. You can '
+ 'disable pagination by providing the ``--no-paginate`` argument.')
+ % help_command.name)
+ # Only include result key information if it is present.
+ if paginator_config.get('result_key'):
+ queries = paginator_config['result_key']
+ if type(queries) is not list:
+ queries = [queries]
+ queries = ", ".join([('``%s``' % s) for s in queries])
+ help_command.doc.writeln(
+ ('When using ``--output text`` and the ``--query`` argument on a '
+ 'paginated response, the ``--query`` argument must extract data '
+ 'from the results of the following query expressions: %s')
+ % queries)
+
+
def unify_paging_params(argument_table, operation_model, event_name,
session, **kwargs):
paginator_config = get_paginator_config(
| {"golden_diff": "diff --git a/awscli/customizations/paginate.py b/awscli/customizations/paginate.py\n--- a/awscli/customizations/paginate.py\n+++ b/awscli/customizations/paginate.py\n@@ -56,8 +56,8 @@\n \n \n def register_pagination(event_handlers):\n- event_handlers.register('building-argument-table',\n- unify_paging_params)\n+ event_handlers.register('building-argument-table', unify_paging_params)\n+ event_handlers.register_last('doc-description', add_paging_description)\n \n \n def get_paginator_config(session, service_name, operation_name):\n@@ -73,6 +73,35 @@\n return operation_paginator_config\n \n \n+def add_paging_description(help_command, **kwargs):\n+ # This customization is only applied to the description of\n+ # Operations, so we must filter out all other events.\n+ if not isinstance(help_command.obj, model.OperationModel):\n+ return\n+ service_name = help_command.obj.service_model.service_name\n+ paginator_config = get_paginator_config(\n+ help_command.session, service_name, help_command.obj.name)\n+ if not paginator_config:\n+ return\n+ help_command.doc.style.new_paragraph()\n+ help_command.doc.writeln(\n+ ('``%s`` is a paginated operation. Multiple API calls may be issued '\n+ 'in order to retrieve the entire data set of results. You can '\n+ 'disable pagination by providing the ``--no-paginate`` argument.')\n+ % help_command.name)\n+ # Only include result key information if it is present.\n+ if paginator_config.get('result_key'):\n+ queries = paginator_config['result_key']\n+ if type(queries) is not list:\n+ queries = [queries]\n+ queries = \", \".join([('``%s``' % s) for s in queries])\n+ help_command.doc.writeln(\n+ ('When using ``--output text`` and the ``--query`` argument on a '\n+ 'paginated response, the ``--query`` argument must extract data '\n+ 'from the results of the following query expressions: %s')\n+ % queries)\n+\n+\n def unify_paging_params(argument_table, operation_model, event_name,\n session, **kwargs):\n paginator_config = get_paginator_config(\n", "issue": "[BUG] [aws kinesis] Unexpected output values by table & text format on describe-streams\n- aws --version aws-cli/1.7.35 Python/2.7.5 Darwin/13.4.0\n# normality\n\nIt expects \"ACTIVE\".\n\ncommand (no output option):\n\n``` bash\n$ aws kinesis describe-stream --stream-name ${STREAM_NAME} --query 'StreamDescription.StreamStatus'\n```\n\nresult:\n\n``` text\n\"ACTIVE\"\n```\n\ncommand (output json):\n\n``` bash\n$ aws kinesis describe-stream --stream-name ${STREAM_NAME} --query 'StreamDescription.StreamStatus' --output json\n```\n\nresult:\n\n``` text\n\"ACTIVE\"\n```\n# BUG\n\nIt expects \"ACTIVE\".\n\ncommand (output table):\n\n``` bash\n$ aws kinesis describe-stream --stream-name ${STREAM_NAME} --query 'StreamDescription.StreamStatus' --output table\n```\n\nresult:\n\n``` text\n----------------\n|DescribeStream|\n+--------------+\n```\n\ncommand (output text):\n\n``` bash\n$ aws kinesis describe-stream --stream-name ${STREAM_NAME} --query 'StreamDescription.StreamStatus' --output text\n```\n\nresult:\n\n``` text\nNone\n```\n\nIt happened not only \"StreamStatus\" but also \"StreamName\".\n\nThank you.\n\n", "before_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"This module has customizations to unify paging paramters.\n\nFor any operation that can be paginated, we will:\n\n * Hide the service specific pagination params. This can vary across\n services and we're going to replace them with a consistent set of\n arguments. The arguments will still work, but they are not\n documented. This allows us to add a pagination config after\n the fact and still remain backwards compatible with users that\n were manually doing pagination.\n * Add a ``--starting-token`` and a ``--max-items`` argument.\n\n\"\"\"\nimport logging\nfrom functools import partial\n\nfrom botocore import xform_name\nfrom botocore.exceptions import DataNotFoundError\nfrom botocore import model\n\nfrom awscli.arguments import BaseCLIArgument\n\n\nlogger = logging.getLogger(__name__)\n\n\nSTARTING_TOKEN_HELP = \"\"\"\n<p>A token to specify where to start paginating. This is the\n<code>NextToken</code> from a previously truncated response.</p>\n\"\"\"\n\nMAX_ITEMS_HELP = \"\"\"\n<p>The total number of items to return. If the total number\nof items available is more than the value specified in\nmax-items then a <code>NextToken</code> will\nbe provided in the output that you can use to resume pagination.\nThis <code>NextToken</code> response element should <b>not</b> be\nused directly outside of the AWS CLI.</p>\n\"\"\"\n\nPAGE_SIZE_HELP = \"\"\"\n<p>The size of each page.<p>\n\"\"\"\n\n\ndef register_pagination(event_handlers):\n event_handlers.register('building-argument-table',\n unify_paging_params)\n\n\ndef get_paginator_config(session, service_name, operation_name):\n try:\n paginator_model = session.get_paginator_model(service_name)\n except DataNotFoundError:\n return None\n try:\n operation_paginator_config = paginator_model.get_paginator(\n operation_name)\n except ValueError:\n return None\n return operation_paginator_config\n\n\ndef unify_paging_params(argument_table, operation_model, event_name,\n session, **kwargs):\n paginator_config = get_paginator_config(\n session, operation_model.service_model.service_name,\n operation_model.name)\n if paginator_config is None:\n # We only apply these customizations to paginated responses.\n return\n logger.debug(\"Modifying paging parameters for operation: %s\",\n operation_model.name)\n _remove_existing_paging_arguments(argument_table, paginator_config)\n parsed_args_event = event_name.replace('building-argument-table.',\n 'operation-args-parsed.')\n shadowed_args = {}\n add_paging_argument(argument_table, 'starting-token',\n PageArgument('starting-token', STARTING_TOKEN_HELP,\n parse_type='string',\n serialized_name='StartingToken'),\n shadowed_args)\n input_members = operation_model.input_shape.members\n type_name = 'integer'\n if 'limit_key' in paginator_config:\n limit_key_shape = input_members[paginator_config['limit_key']]\n type_name = limit_key_shape.type_name\n if type_name not in PageArgument.type_map:\n raise TypeError(\n ('Unsupported pagination type {0} for operation {1}'\n ' and parameter {2}').format(\n type_name, operation_model.name,\n paginator_config['limit_key']))\n add_paging_argument(argument_table, 'page-size',\n PageArgument('page-size', PAGE_SIZE_HELP,\n parse_type=type_name,\n serialized_name='PageSize'),\n shadowed_args)\n\n add_paging_argument(argument_table, 'max-items',\n PageArgument('max-items', MAX_ITEMS_HELP,\n parse_type=type_name,\n serialized_name='MaxItems'),\n shadowed_args)\n session.register(\n parsed_args_event,\n partial(check_should_enable_pagination,\n list(_get_all_cli_input_tokens(paginator_config)),\n shadowed_args, argument_table))\n\n\ndef add_paging_argument(argument_table, arg_name, argument, shadowed_args):\n if arg_name in argument_table:\n # If there's already an entry in the arg table for this argument,\n # this means we're shadowing an argument for this operation. We\n # need to store this later in case pagination is turned off because\n # we put these arguments back.\n # See the comment in check_should_enable_pagination() for more info.\n shadowed_args[arg_name] = argument_table[arg_name]\n argument_table[arg_name] = argument\n\n\ndef check_should_enable_pagination(input_tokens, shadowed_args, argument_table,\n parsed_args, parsed_globals, **kwargs):\n normalized_paging_args = ['start_token', 'max_items']\n for token in input_tokens:\n py_name = token.replace('-', '_')\n if getattr(parsed_args, py_name) is not None and \\\n py_name not in normalized_paging_args:\n # The user has specified a manual (undocumented) pagination arg.\n # We need to automatically turn pagination off.\n logger.debug(\"User has specified a manual pagination arg. \"\n \"Automatically setting --no-paginate.\")\n parsed_globals.paginate = False\n # Because we've now disabled pagination, there's a chance that\n # we were shadowing arguments. For example, we inject a\n # --max-items argument in unify_paging_params(). If the\n # the operation also provides its own MaxItems (which we\n # expose as --max-items) then our custom pagination arg\n # was shadowing the customers arg. When we turn pagination\n # off we need to put back the original argument which is\n # what we're doing here.\n for key, value in shadowed_args.items():\n argument_table[key] = value\n\n\ndef _remove_existing_paging_arguments(argument_table, pagination_config):\n for cli_name in _get_all_cli_input_tokens(pagination_config):\n argument_table[cli_name]._UNDOCUMENTED = True\n\n\ndef _get_all_cli_input_tokens(pagination_config):\n # Get all input tokens including the limit_key\n # if it exists.\n tokens = _get_input_tokens(pagination_config)\n for token_name in tokens:\n cli_name = xform_name(token_name, '-')\n yield cli_name\n if 'limit_key' in pagination_config:\n key_name = pagination_config['limit_key']\n cli_name = xform_name(key_name, '-')\n yield cli_name\n\n\ndef _get_input_tokens(pagination_config):\n tokens = pagination_config['input_token']\n if not isinstance(tokens, list):\n return [tokens]\n return tokens\n\n\ndef _get_cli_name(param_objects, token_name):\n for param in param_objects:\n if param.name == token_name:\n return param.cli_name.lstrip('-')\n\n\nclass PageArgument(BaseCLIArgument):\n type_map = {\n 'string': str,\n 'integer': int,\n }\n\n def __init__(self, name, documentation, parse_type, serialized_name):\n self.argument_model = model.Shape('PageArgument', {'type': 'string'})\n self._name = name\n self._serialized_name = serialized_name\n self._documentation = documentation\n self._parse_type = parse_type\n self._required = False\n\n @property\n def cli_name(self):\n return '--' + self._name\n\n @property\n def cli_type_name(self):\n return self._parse_type\n\n @property\n def required(self):\n return self._required\n\n @required.setter\n def required(self, value):\n self._required = value\n\n @property\n def documentation(self):\n return self._documentation\n\n def add_to_parser(self, parser):\n parser.add_argument(self.cli_name, dest=self.py_name,\n type=self.type_map[self._parse_type])\n\n def add_to_params(self, parameters, value):\n if value is not None:\n pagination_config = parameters.get('PaginationConfig', {})\n pagination_config[self._serialized_name] = value\n parameters['PaginationConfig'] = pagination_config\n", "path": "awscli/customizations/paginate.py"}], "after_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"This module has customizations to unify paging paramters.\n\nFor any operation that can be paginated, we will:\n\n * Hide the service specific pagination params. This can vary across\n services and we're going to replace them with a consistent set of\n arguments. The arguments will still work, but they are not\n documented. This allows us to add a pagination config after\n the fact and still remain backwards compatible with users that\n were manually doing pagination.\n * Add a ``--starting-token`` and a ``--max-items`` argument.\n\n\"\"\"\nimport logging\nfrom functools import partial\n\nfrom botocore import xform_name\nfrom botocore.exceptions import DataNotFoundError\nfrom botocore import model\n\nfrom awscli.arguments import BaseCLIArgument\n\n\nlogger = logging.getLogger(__name__)\n\n\nSTARTING_TOKEN_HELP = \"\"\"\n<p>A token to specify where to start paginating. This is the\n<code>NextToken</code> from a previously truncated response.</p>\n\"\"\"\n\nMAX_ITEMS_HELP = \"\"\"\n<p>The total number of items to return. If the total number\nof items available is more than the value specified in\nmax-items then a <code>NextToken</code> will\nbe provided in the output that you can use to resume pagination.\nThis <code>NextToken</code> response element should <b>not</b> be\nused directly outside of the AWS CLI.</p>\n\"\"\"\n\nPAGE_SIZE_HELP = \"\"\"\n<p>The size of each page.<p>\n\"\"\"\n\n\ndef register_pagination(event_handlers):\n event_handlers.register('building-argument-table', unify_paging_params)\n event_handlers.register_last('doc-description', add_paging_description)\n\n\ndef get_paginator_config(session, service_name, operation_name):\n try:\n paginator_model = session.get_paginator_model(service_name)\n except DataNotFoundError:\n return None\n try:\n operation_paginator_config = paginator_model.get_paginator(\n operation_name)\n except ValueError:\n return None\n return operation_paginator_config\n\n\ndef add_paging_description(help_command, **kwargs):\n # This customization is only applied to the description of\n # Operations, so we must filter out all other events.\n if not isinstance(help_command.obj, model.OperationModel):\n return\n service_name = help_command.obj.service_model.service_name\n paginator_config = get_paginator_config(\n help_command.session, service_name, help_command.obj.name)\n if not paginator_config:\n return\n help_command.doc.style.new_paragraph()\n help_command.doc.writeln(\n ('``%s`` is a paginated operation. Multiple API calls may be issued '\n 'in order to retrieve the entire data set of results. You can '\n 'disable pagination by providing the ``--no-paginate`` argument.')\n % help_command.name)\n # Only include result key information if it is present.\n if paginator_config.get('result_key'):\n queries = paginator_config['result_key']\n if type(queries) is not list:\n queries = [queries]\n queries = \", \".join([('``%s``' % s) for s in queries])\n help_command.doc.writeln(\n ('When using ``--output text`` and the ``--query`` argument on a '\n 'paginated response, the ``--query`` argument must extract data '\n 'from the results of the following query expressions: %s')\n % queries)\n\n\ndef unify_paging_params(argument_table, operation_model, event_name,\n session, **kwargs):\n paginator_config = get_paginator_config(\n session, operation_model.service_model.service_name,\n operation_model.name)\n if paginator_config is None:\n # We only apply these customizations to paginated responses.\n return\n logger.debug(\"Modifying paging parameters for operation: %s\",\n operation_model.name)\n _remove_existing_paging_arguments(argument_table, paginator_config)\n parsed_args_event = event_name.replace('building-argument-table.',\n 'operation-args-parsed.')\n shadowed_args = {}\n add_paging_argument(argument_table, 'starting-token',\n PageArgument('starting-token', STARTING_TOKEN_HELP,\n parse_type='string',\n serialized_name='StartingToken'),\n shadowed_args)\n input_members = operation_model.input_shape.members\n type_name = 'integer'\n if 'limit_key' in paginator_config:\n limit_key_shape = input_members[paginator_config['limit_key']]\n type_name = limit_key_shape.type_name\n if type_name not in PageArgument.type_map:\n raise TypeError(\n ('Unsupported pagination type {0} for operation {1}'\n ' and parameter {2}').format(\n type_name, operation_model.name,\n paginator_config['limit_key']))\n add_paging_argument(argument_table, 'page-size',\n PageArgument('page-size', PAGE_SIZE_HELP,\n parse_type=type_name,\n serialized_name='PageSize'),\n shadowed_args)\n\n add_paging_argument(argument_table, 'max-items',\n PageArgument('max-items', MAX_ITEMS_HELP,\n parse_type=type_name,\n serialized_name='MaxItems'),\n shadowed_args)\n session.register(\n parsed_args_event,\n partial(check_should_enable_pagination,\n list(_get_all_cli_input_tokens(paginator_config)),\n shadowed_args, argument_table))\n\n\ndef add_paging_argument(argument_table, arg_name, argument, shadowed_args):\n if arg_name in argument_table:\n # If there's already an entry in the arg table for this argument,\n # this means we're shadowing an argument for this operation. We\n # need to store this later in case pagination is turned off because\n # we put these arguments back.\n # See the comment in check_should_enable_pagination() for more info.\n shadowed_args[arg_name] = argument_table[arg_name]\n argument_table[arg_name] = argument\n\n\ndef check_should_enable_pagination(input_tokens, shadowed_args, argument_table,\n parsed_args, parsed_globals, **kwargs):\n normalized_paging_args = ['start_token', 'max_items']\n for token in input_tokens:\n py_name = token.replace('-', '_')\n if getattr(parsed_args, py_name) is not None and \\\n py_name not in normalized_paging_args:\n # The user has specified a manual (undocumented) pagination arg.\n # We need to automatically turn pagination off.\n logger.debug(\"User has specified a manual pagination arg. \"\n \"Automatically setting --no-paginate.\")\n parsed_globals.paginate = False\n # Because we've now disabled pagination, there's a chance that\n # we were shadowing arguments. For example, we inject a\n # --max-items argument in unify_paging_params(). If the\n # the operation also provides its own MaxItems (which we\n # expose as --max-items) then our custom pagination arg\n # was shadowing the customers arg. When we turn pagination\n # off we need to put back the original argument which is\n # what we're doing here.\n for key, value in shadowed_args.items():\n argument_table[key] = value\n\n\ndef _remove_existing_paging_arguments(argument_table, pagination_config):\n for cli_name in _get_all_cli_input_tokens(pagination_config):\n argument_table[cli_name]._UNDOCUMENTED = True\n\n\ndef _get_all_cli_input_tokens(pagination_config):\n # Get all input tokens including the limit_key\n # if it exists.\n tokens = _get_input_tokens(pagination_config)\n for token_name in tokens:\n cli_name = xform_name(token_name, '-')\n yield cli_name\n if 'limit_key' in pagination_config:\n key_name = pagination_config['limit_key']\n cli_name = xform_name(key_name, '-')\n yield cli_name\n\n\ndef _get_input_tokens(pagination_config):\n tokens = pagination_config['input_token']\n if not isinstance(tokens, list):\n return [tokens]\n return tokens\n\n\ndef _get_cli_name(param_objects, token_name):\n for param in param_objects:\n if param.name == token_name:\n return param.cli_name.lstrip('-')\n\n\nclass PageArgument(BaseCLIArgument):\n type_map = {\n 'string': str,\n 'integer': int,\n }\n\n def __init__(self, name, documentation, parse_type, serialized_name):\n self.argument_model = model.Shape('PageArgument', {'type': 'string'})\n self._name = name\n self._serialized_name = serialized_name\n self._documentation = documentation\n self._parse_type = parse_type\n self._required = False\n\n @property\n def cli_name(self):\n return '--' + self._name\n\n @property\n def cli_type_name(self):\n return self._parse_type\n\n @property\n def required(self):\n return self._required\n\n @required.setter\n def required(self, value):\n self._required = value\n\n @property\n def documentation(self):\n return self._documentation\n\n def add_to_parser(self, parser):\n parser.add_argument(self.cli_name, dest=self.py_name,\n type=self.type_map[self._parse_type])\n\n def add_to_params(self, parameters, value):\n if value is not None:\n pagination_config = parameters.get('PaginationConfig', {})\n pagination_config[self._serialized_name] = value\n parameters['PaginationConfig'] = pagination_config\n", "path": "awscli/customizations/paginate.py"}]} | 2,957 | 495 |
gh_patches_debug_5864 | rasdani/github-patches | git_diff | pyca__cryptography-1575 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Build automation fixes for 8th release
When triggering the wheel build the release automation does not appropriately wait for the build to complete but instead grabs the previous build. The previous attempted fix of adding a `sleep(3)` did not work around this issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tasks.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import getpass
8 import os
9 import time
10
11 import invoke
12
13 import requests
14
15
16 JENKINS_URL = "https://jenkins.cryptography.io/job/cryptography-wheel-builder"
17
18
19 def wait_for_build_completed(session):
20 # Wait 3 seconds before actually checking if the build is complete, to
21 # ensure that it had time to really start.
22 time.sleep(3)
23 while True:
24 response = session.get(
25 "{0}/lastBuild/api/json/".format(JENKINS_URL),
26 headers={
27 "Accept": "application/json",
28 }
29 )
30 response.raise_for_status()
31 if not response.json()["building"]:
32 assert response.json()["result"] == "SUCCESS"
33 break
34 time.sleep(0.1)
35
36
37 def download_artifacts(session):
38 response = session.get(
39 "{0}/lastBuild/api/json/".format(JENKINS_URL),
40 headers={
41 "Accept": "application/json"
42 }
43 )
44 response.raise_for_status()
45 assert not response.json()["building"]
46 assert response.json()["result"] == "SUCCESS"
47
48 paths = []
49
50 for run in response.json()["runs"]:
51 response = session.get(
52 run["url"] + "api/json/",
53 headers={
54 "Accept": "application/json",
55 }
56 )
57 response.raise_for_status()
58 for artifact in response.json()["artifacts"]:
59 response = session.get(
60 "{0}artifact/{1}".format(run["url"], artifact["relativePath"])
61 )
62 out_path = os.path.join(
63 os.path.dirname(__file__),
64 "dist",
65 artifact["fileName"],
66 )
67 with open(out_path, "wb") as f:
68 f.write(response.content)
69 paths.append(out_path)
70 return paths
71
72
73 @invoke.task
74 def release(version):
75 """
76 ``version`` should be a string like '0.4' or '1.0'.
77 """
78 invoke.run("git tag -s {0} -m '{0} release'".format(version))
79 invoke.run("git push --tags")
80
81 invoke.run("python setup.py sdist")
82 invoke.run("cd vectors/ && python setup.py sdist bdist_wheel")
83
84 invoke.run(
85 "twine upload -s dist/cryptography-{0}* "
86 "vectors/dist/cryptography_vectors-{0}*".format(version)
87 )
88
89 session = requests.Session()
90
91 # This tells the CDN to delete the cached response for the URL. We do this
92 # so that the Jenkins builders will see the new sdist immediately when they
93 # go to build the wheels.
94 response = session.request(
95 "PURGE", "https://pypi.python.org/simple/cryptography/"
96 )
97 response.raise_for_status()
98
99 username = getpass.getpass("Input the GitHub/Jenkins username: ")
100 token = getpass.getpass("Input the Jenkins token: ")
101 response = session.post(
102 "{0}/build".format(JENKINS_URL),
103 auth=requests.auth.HTTPBasicAuth(
104 username, token
105 ),
106 params={
107 "cause": "Building wheels for {0}".format(version)
108 }
109 )
110 response.raise_for_status()
111 wait_for_build_completed(session)
112 paths = download_artifacts(session)
113 invoke.run("twine upload {0}".format(" ".join(paths)))
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tasks.py b/tasks.py
--- a/tasks.py
+++ b/tasks.py
@@ -17,9 +17,9 @@
def wait_for_build_completed(session):
- # Wait 3 seconds before actually checking if the build is complete, to
+ # Wait 20 seconds before actually checking if the build is complete, to
# ensure that it had time to really start.
- time.sleep(3)
+ time.sleep(20)
while True:
response = session.get(
"{0}/lastBuild/api/json/".format(JENKINS_URL),
| {"golden_diff": "diff --git a/tasks.py b/tasks.py\n--- a/tasks.py\n+++ b/tasks.py\n@@ -17,9 +17,9 @@\n \n \n def wait_for_build_completed(session):\n- # Wait 3 seconds before actually checking if the build is complete, to\n+ # Wait 20 seconds before actually checking if the build is complete, to\n # ensure that it had time to really start.\n- time.sleep(3)\n+ time.sleep(20)\n while True:\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n", "issue": "Build automation fixes for 8th release\nWhen triggering the wheel build the release automation does not appropriately wait for the build to complete but instead grabs the previous build. The previous attempted fix of adding a `sleep(3)` did not work around this issue.\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport getpass\nimport os\nimport time\n\nimport invoke\n\nimport requests\n\n\nJENKINS_URL = \"https://jenkins.cryptography.io/job/cryptography-wheel-builder\"\n\n\ndef wait_for_build_completed(session):\n # Wait 3 seconds before actually checking if the build is complete, to\n # ensure that it had time to really start.\n time.sleep(3)\n while True:\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\",\n }\n )\n response.raise_for_status()\n if not response.json()[\"building\"]:\n assert response.json()[\"result\"] == \"SUCCESS\"\n break\n time.sleep(0.1)\n\n\ndef download_artifacts(session):\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\"\n }\n )\n response.raise_for_status()\n assert not response.json()[\"building\"]\n assert response.json()[\"result\"] == \"SUCCESS\"\n\n paths = []\n\n for run in response.json()[\"runs\"]:\n response = session.get(\n run[\"url\"] + \"api/json/\",\n headers={\n \"Accept\": \"application/json\",\n }\n )\n response.raise_for_status()\n for artifact in response.json()[\"artifacts\"]:\n response = session.get(\n \"{0}artifact/{1}\".format(run[\"url\"], artifact[\"relativePath\"])\n )\n out_path = os.path.join(\n os.path.dirname(__file__),\n \"dist\",\n artifact[\"fileName\"],\n )\n with open(out_path, \"wb\") as f:\n f.write(response.content)\n paths.append(out_path)\n return paths\n\n\n@invoke.task\ndef release(version):\n \"\"\"\n ``version`` should be a string like '0.4' or '1.0'.\n \"\"\"\n invoke.run(\"git tag -s {0} -m '{0} release'\".format(version))\n invoke.run(\"git push --tags\")\n\n invoke.run(\"python setup.py sdist\")\n invoke.run(\"cd vectors/ && python setup.py sdist bdist_wheel\")\n\n invoke.run(\n \"twine upload -s dist/cryptography-{0}* \"\n \"vectors/dist/cryptography_vectors-{0}*\".format(version)\n )\n\n session = requests.Session()\n\n # This tells the CDN to delete the cached response for the URL. We do this\n # so that the Jenkins builders will see the new sdist immediately when they\n # go to build the wheels.\n response = session.request(\n \"PURGE\", \"https://pypi.python.org/simple/cryptography/\"\n )\n response.raise_for_status()\n\n username = getpass.getpass(\"Input the GitHub/Jenkins username: \")\n token = getpass.getpass(\"Input the Jenkins token: \")\n response = session.post(\n \"{0}/build\".format(JENKINS_URL),\n auth=requests.auth.HTTPBasicAuth(\n username, token\n ),\n params={\n \"cause\": \"Building wheels for {0}\".format(version)\n }\n )\n response.raise_for_status()\n wait_for_build_completed(session)\n paths = download_artifacts(session)\n invoke.run(\"twine upload {0}\".format(\" \".join(paths)))\n", "path": "tasks.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport getpass\nimport os\nimport time\n\nimport invoke\n\nimport requests\n\n\nJENKINS_URL = \"https://jenkins.cryptography.io/job/cryptography-wheel-builder\"\n\n\ndef wait_for_build_completed(session):\n # Wait 20 seconds before actually checking if the build is complete, to\n # ensure that it had time to really start.\n time.sleep(20)\n while True:\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\",\n }\n )\n response.raise_for_status()\n if not response.json()[\"building\"]:\n assert response.json()[\"result\"] == \"SUCCESS\"\n break\n time.sleep(0.1)\n\n\ndef download_artifacts(session):\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\"\n }\n )\n response.raise_for_status()\n assert not response.json()[\"building\"]\n assert response.json()[\"result\"] == \"SUCCESS\"\n\n paths = []\n\n for run in response.json()[\"runs\"]:\n response = session.get(\n run[\"url\"] + \"api/json/\",\n headers={\n \"Accept\": \"application/json\",\n }\n )\n response.raise_for_status()\n for artifact in response.json()[\"artifacts\"]:\n response = session.get(\n \"{0}artifact/{1}\".format(run[\"url\"], artifact[\"relativePath\"])\n )\n out_path = os.path.join(\n os.path.dirname(__file__),\n \"dist\",\n artifact[\"fileName\"],\n )\n with open(out_path, \"wb\") as f:\n f.write(response.content)\n paths.append(out_path)\n return paths\n\n\n@invoke.task\ndef release(version):\n \"\"\"\n ``version`` should be a string like '0.4' or '1.0'.\n \"\"\"\n invoke.run(\"git tag -s {0} -m '{0} release'\".format(version))\n invoke.run(\"git push --tags\")\n\n invoke.run(\"python setup.py sdist\")\n invoke.run(\"cd vectors/ && python setup.py sdist bdist_wheel\")\n\n invoke.run(\n \"twine upload -s dist/cryptography-{0}* \"\n \"vectors/dist/cryptography_vectors-{0}*\".format(version)\n )\n\n session = requests.Session()\n\n # This tells the CDN to delete the cached response for the URL. We do this\n # so that the Jenkins builders will see the new sdist immediately when they\n # go to build the wheels.\n response = session.request(\n \"PURGE\", \"https://pypi.python.org/simple/cryptography/\"\n )\n response.raise_for_status()\n\n username = getpass.getpass(\"Input the GitHub/Jenkins username: \")\n token = getpass.getpass(\"Input the Jenkins token: \")\n response = session.post(\n \"{0}/build\".format(JENKINS_URL),\n auth=requests.auth.HTTPBasicAuth(\n username, token\n ),\n params={\n \"cause\": \"Building wheels for {0}\".format(version)\n }\n )\n response.raise_for_status()\n wait_for_build_completed(session)\n paths = download_artifacts(session)\n invoke.run(\"twine upload {0}\".format(\" \".join(paths)))\n", "path": "tasks.py"}]} | 1,310 | 130 |
gh_patches_debug_37607 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-1525 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[RFE] Results to include positive NACK - CVEs to which a system is NOT vulnerable
Use case: VMaaS provides list of CVEs to which a system is vulnerable, based on list of packages' NEVRAs installed on the system. This is overridden by results from the rules engine evaluation, which can check if a certain port is closed, or a configuration value prevents exploit of a vulnerability. Unless the rules engine results includes "we evaluated for vulnerability to CVE-x and the system is NOT vulnerable", we (vulnerability-engine) won't know to override the VMaaS results that report the system is vulnerable based on the version of the package installed.
Vulnerability-engine will be able to assume that a system is no longer vulnerable to a rules engine-reported CVE if it was reported as vulnerable in previous evaluation. However, a new system checking in with a fix already in place... vulnerability-engine won't know that the vulnerability is mitigated by a config setting, closed port, etc. unless rules engine explicitly reports a CVE that was evaluated and found the system NOT vulnerable.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/core/evaluators.py`
Content:
```
1 import logging
2 import sys
3
4 from ..formats import Formatter
5 from ..specs import Specs
6 from ..combiners.hostname import hostname as combiner_hostname
7 from ..parsers.branch_info import BranchInfo
8 from . import dr, plugins
9
10 log = logging.getLogger(__name__)
11
12
13 def get_simple_module_name(obj):
14 return dr.BASE_MODULE_NAMES.get(obj, None)
15
16
17 class Evaluator(Formatter):
18 def __init__(self, broker=None, stream=sys.stdout, incremental=False):
19 super(Evaluator, self).__init__(broker or dr.Broker(), stream)
20 self.rule_skips = []
21 self.rule_results = []
22 self.fingerprint_results = []
23 self.hostname = None
24 self.metadata = {}
25 self.metadata_keys = {}
26 self.incremental = incremental
27
28 def observer(self, comp, broker):
29 if comp is combiner_hostname and comp in broker:
30 self.hostname = broker[comp].fqdn
31
32 if plugins.is_rule(comp) and comp in broker:
33 self.handle_result(comp, broker[comp])
34
35 def preprocess(self):
36 self.broker.add_observer(self.observer)
37
38 def run_serial(self, graph=None):
39 dr.run(graph or dr.COMPONENTS[dr.GROUPS.single], broker=self.broker)
40
41 def run_incremental(self, graph=None):
42 for _ in dr.run_incremental(graph or dr.COMPONENTS[dr.GROUPS.single], broker=self.broker):
43 pass
44
45 def format_response(self, response):
46 """
47 To be overridden by subclasses to format the response sent back to the
48 client.
49 """
50 return response
51
52 def format_result(self, result):
53 """
54 To be overridden by subclasses to format individual rule results.
55 """
56 return result
57
58 def process(self, graph=None):
59 with self:
60 if self.incremental:
61 self.run_incremental(graph)
62 else:
63 self.run_serial(graph)
64 return self.get_response()
65
66
67 class SingleEvaluator(Evaluator):
68 def append_metadata(self, r):
69 for k, v in r.items():
70 if k != "type":
71 self.metadata[k] = v
72
73 def format_response(self, response):
74 return response
75
76 def get_response(self):
77 r = dict(self.metadata_keys)
78 r.update({
79 "system": {
80 "metadata": self.metadata,
81 "hostname": self.hostname
82 },
83 "reports": self.rule_results,
84 "fingerprints": self.fingerprint_results,
85 "skips": self.rule_skips,
86 })
87 return self.format_response(r)
88
89 def handle_result(self, plugin, r):
90 type_ = r["type"]
91 if type_ == "metadata":
92 self.append_metadata(r)
93 elif type_ == "rule":
94 self.rule_results.append(self.format_result({
95 "rule_id": "{0}|{1}".format(get_simple_module_name(plugin), r["error_key"]),
96 "details": r
97 }))
98 elif type_ == "fingerprint":
99 self.fingerprint_results.append(self.format_result({
100 "fingerprint_id": "{0}|{1}".format(get_simple_module_name(plugin), r["fingerprint_key"]),
101 "details": r
102 }))
103 elif type_ == "skip":
104 self.rule_skips.append(r)
105 elif type_ == "metadata_key":
106 self.metadata_keys[r["key"]] = r["value"]
107
108
109 class InsightsEvaluator(SingleEvaluator):
110 def __init__(self, broker=None, system_id=None, stream=sys.stdout, incremental=False):
111 super(InsightsEvaluator, self).__init__(broker, stream=sys.stdout, incremental=incremental)
112 self.system_id = system_id
113 self.branch_info = {}
114 self.product = "rhel"
115 self.type = "host"
116 self.release = None
117
118 def observer(self, comp, broker):
119 super(InsightsEvaluator, self).observer(comp, broker)
120 if comp is Specs.machine_id and comp in broker:
121 self.system_id = broker[Specs.machine_id].content[0].strip()
122
123 if comp is Specs.redhat_release and comp in broker:
124 self.release = broker[comp].content[0].strip()
125
126 if comp is BranchInfo and BranchInfo in broker:
127 self.branch_info = broker[comp].data
128
129 if comp is Specs.metadata_json and comp in broker:
130 md = broker[comp]
131 self.product = md.get("product_code")
132 self.type = md.get("role")
133
134 def format_result(self, result):
135 result["system_id"] = self.system_id
136 return result
137
138 def format_response(self, response):
139 system = response["system"]
140 system["remote_branch"] = self.branch_info.get("remote_branch")
141 system["remote_leaf"] = self.branch_info.get("remote_leaf")
142 system["system_id"] = self.system_id
143 system["product"] = self.product
144 system["type"] = self.type
145 if self.release:
146 system["metadata"]["release"] = self.release
147
148 return response
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/insights/core/evaluators.py b/insights/core/evaluators.py
--- a/insights/core/evaluators.py
+++ b/insights/core/evaluators.py
@@ -1,6 +1,9 @@
import logging
+import six
import sys
+from collections import defaultdict
+
from ..formats import Formatter
from ..specs import Specs
from ..combiners.hostname import hostname as combiner_hostname
@@ -17,9 +20,8 @@
class Evaluator(Formatter):
def __init__(self, broker=None, stream=sys.stdout, incremental=False):
super(Evaluator, self).__init__(broker or dr.Broker(), stream)
+ self.results = defaultdict(list)
self.rule_skips = []
- self.rule_results = []
- self.fingerprint_results = []
self.hostname = None
self.metadata = {}
self.metadata_keys = {}
@@ -80,30 +82,32 @@
"metadata": self.metadata,
"hostname": self.hostname
},
- "reports": self.rule_results,
- "fingerprints": self.fingerprint_results,
+ "reports": self.results["rule"],
+ "fingerprints": self.results["fingerprint"],
"skips": self.rule_skips,
})
+
+ for k, v in six.iteritems(self.results):
+ if k not in ("rule", "fingerprint"):
+ r[k] = v
+
return self.format_response(r)
def handle_result(self, plugin, r):
type_ = r["type"]
- if type_ == "metadata":
+
+ if type_ == "skip":
+ self.rule_skips.append(r)
+ elif type_ == "metadata":
self.append_metadata(r)
- elif type_ == "rule":
- self.rule_results.append(self.format_result({
- "rule_id": "{0}|{1}".format(get_simple_module_name(plugin), r["error_key"]),
- "details": r
- }))
- elif type_ == "fingerprint":
- self.fingerprint_results.append(self.format_result({
- "fingerprint_id": "{0}|{1}".format(get_simple_module_name(plugin), r["fingerprint_key"]),
+ elif type_ == "metadata_key":
+ self.metadata_keys[r.get_key()] = r["value"]
+ else:
+ response_id = "%s_id" % r.response_type
+ self.results[type_].append(self.format_result({
+ response_id: "{0}|{1}".format(get_simple_module_name(plugin), r.get_key()),
"details": r
}))
- elif type_ == "skip":
- self.rule_skips.append(r)
- elif type_ == "metadata_key":
- self.metadata_keys[r["key"]] = r["value"]
class InsightsEvaluator(SingleEvaluator):
| {"golden_diff": "diff --git a/insights/core/evaluators.py b/insights/core/evaluators.py\n--- a/insights/core/evaluators.py\n+++ b/insights/core/evaluators.py\n@@ -1,6 +1,9 @@\n import logging\n+import six\n import sys\n \n+from collections import defaultdict\n+\n from ..formats import Formatter\n from ..specs import Specs\n from ..combiners.hostname import hostname as combiner_hostname\n@@ -17,9 +20,8 @@\n class Evaluator(Formatter):\n def __init__(self, broker=None, stream=sys.stdout, incremental=False):\n super(Evaluator, self).__init__(broker or dr.Broker(), stream)\n+ self.results = defaultdict(list)\n self.rule_skips = []\n- self.rule_results = []\n- self.fingerprint_results = []\n self.hostname = None\n self.metadata = {}\n self.metadata_keys = {}\n@@ -80,30 +82,32 @@\n \"metadata\": self.metadata,\n \"hostname\": self.hostname\n },\n- \"reports\": self.rule_results,\n- \"fingerprints\": self.fingerprint_results,\n+ \"reports\": self.results[\"rule\"],\n+ \"fingerprints\": self.results[\"fingerprint\"],\n \"skips\": self.rule_skips,\n })\n+\n+ for k, v in six.iteritems(self.results):\n+ if k not in (\"rule\", \"fingerprint\"):\n+ r[k] = v\n+\n return self.format_response(r)\n \n def handle_result(self, plugin, r):\n type_ = r[\"type\"]\n- if type_ == \"metadata\":\n+\n+ if type_ == \"skip\":\n+ self.rule_skips.append(r)\n+ elif type_ == \"metadata\":\n self.append_metadata(r)\n- elif type_ == \"rule\":\n- self.rule_results.append(self.format_result({\n- \"rule_id\": \"{0}|{1}\".format(get_simple_module_name(plugin), r[\"error_key\"]),\n- \"details\": r\n- }))\n- elif type_ == \"fingerprint\":\n- self.fingerprint_results.append(self.format_result({\n- \"fingerprint_id\": \"{0}|{1}\".format(get_simple_module_name(plugin), r[\"fingerprint_key\"]),\n+ elif type_ == \"metadata_key\":\n+ self.metadata_keys[r.get_key()] = r[\"value\"]\n+ else:\n+ response_id = \"%s_id\" % r.response_type\n+ self.results[type_].append(self.format_result({\n+ response_id: \"{0}|{1}\".format(get_simple_module_name(plugin), r.get_key()),\n \"details\": r\n }))\n- elif type_ == \"skip\":\n- self.rule_skips.append(r)\n- elif type_ == \"metadata_key\":\n- self.metadata_keys[r[\"key\"]] = r[\"value\"]\n \n \n class InsightsEvaluator(SingleEvaluator):\n", "issue": "[RFE] Results to include positive NACK - CVEs to which a system is NOT vulnerable\nUse case: VMaaS provides list of CVEs to which a system is vulnerable, based on list of packages' NEVRAs installed on the system. This is overridden by results from the rules engine evaluation, which can check if a certain port is closed, or a configuration value prevents exploit of a vulnerability. Unless the rules engine results includes \"we evaluated for vulnerability to CVE-x and the system is NOT vulnerable\", we (vulnerability-engine) won't know to override the VMaaS results that report the system is vulnerable based on the version of the package installed.\r\n\r\nVulnerability-engine will be able to assume that a system is no longer vulnerable to a rules engine-reported CVE if it was reported as vulnerable in previous evaluation. However, a new system checking in with a fix already in place... vulnerability-engine won't know that the vulnerability is mitigated by a config setting, closed port, etc. unless rules engine explicitly reports a CVE that was evaluated and found the system NOT vulnerable.\n", "before_files": [{"content": "import logging\nimport sys\n\nfrom ..formats import Formatter\nfrom ..specs import Specs\nfrom ..combiners.hostname import hostname as combiner_hostname\nfrom ..parsers.branch_info import BranchInfo\nfrom . import dr, plugins\n\nlog = logging.getLogger(__name__)\n\n\ndef get_simple_module_name(obj):\n return dr.BASE_MODULE_NAMES.get(obj, None)\n\n\nclass Evaluator(Formatter):\n def __init__(self, broker=None, stream=sys.stdout, incremental=False):\n super(Evaluator, self).__init__(broker or dr.Broker(), stream)\n self.rule_skips = []\n self.rule_results = []\n self.fingerprint_results = []\n self.hostname = None\n self.metadata = {}\n self.metadata_keys = {}\n self.incremental = incremental\n\n def observer(self, comp, broker):\n if comp is combiner_hostname and comp in broker:\n self.hostname = broker[comp].fqdn\n\n if plugins.is_rule(comp) and comp in broker:\n self.handle_result(comp, broker[comp])\n\n def preprocess(self):\n self.broker.add_observer(self.observer)\n\n def run_serial(self, graph=None):\n dr.run(graph or dr.COMPONENTS[dr.GROUPS.single], broker=self.broker)\n\n def run_incremental(self, graph=None):\n for _ in dr.run_incremental(graph or dr.COMPONENTS[dr.GROUPS.single], broker=self.broker):\n pass\n\n def format_response(self, response):\n \"\"\"\n To be overridden by subclasses to format the response sent back to the\n client.\n \"\"\"\n return response\n\n def format_result(self, result):\n \"\"\"\n To be overridden by subclasses to format individual rule results.\n \"\"\"\n return result\n\n def process(self, graph=None):\n with self:\n if self.incremental:\n self.run_incremental(graph)\n else:\n self.run_serial(graph)\n return self.get_response()\n\n\nclass SingleEvaluator(Evaluator):\n def append_metadata(self, r):\n for k, v in r.items():\n if k != \"type\":\n self.metadata[k] = v\n\n def format_response(self, response):\n return response\n\n def get_response(self):\n r = dict(self.metadata_keys)\n r.update({\n \"system\": {\n \"metadata\": self.metadata,\n \"hostname\": self.hostname\n },\n \"reports\": self.rule_results,\n \"fingerprints\": self.fingerprint_results,\n \"skips\": self.rule_skips,\n })\n return self.format_response(r)\n\n def handle_result(self, plugin, r):\n type_ = r[\"type\"]\n if type_ == \"metadata\":\n self.append_metadata(r)\n elif type_ == \"rule\":\n self.rule_results.append(self.format_result({\n \"rule_id\": \"{0}|{1}\".format(get_simple_module_name(plugin), r[\"error_key\"]),\n \"details\": r\n }))\n elif type_ == \"fingerprint\":\n self.fingerprint_results.append(self.format_result({\n \"fingerprint_id\": \"{0}|{1}\".format(get_simple_module_name(plugin), r[\"fingerprint_key\"]),\n \"details\": r\n }))\n elif type_ == \"skip\":\n self.rule_skips.append(r)\n elif type_ == \"metadata_key\":\n self.metadata_keys[r[\"key\"]] = r[\"value\"]\n\n\nclass InsightsEvaluator(SingleEvaluator):\n def __init__(self, broker=None, system_id=None, stream=sys.stdout, incremental=False):\n super(InsightsEvaluator, self).__init__(broker, stream=sys.stdout, incremental=incremental)\n self.system_id = system_id\n self.branch_info = {}\n self.product = \"rhel\"\n self.type = \"host\"\n self.release = None\n\n def observer(self, comp, broker):\n super(InsightsEvaluator, self).observer(comp, broker)\n if comp is Specs.machine_id and comp in broker:\n self.system_id = broker[Specs.machine_id].content[0].strip()\n\n if comp is Specs.redhat_release and comp in broker:\n self.release = broker[comp].content[0].strip()\n\n if comp is BranchInfo and BranchInfo in broker:\n self.branch_info = broker[comp].data\n\n if comp is Specs.metadata_json and comp in broker:\n md = broker[comp]\n self.product = md.get(\"product_code\")\n self.type = md.get(\"role\")\n\n def format_result(self, result):\n result[\"system_id\"] = self.system_id\n return result\n\n def format_response(self, response):\n system = response[\"system\"]\n system[\"remote_branch\"] = self.branch_info.get(\"remote_branch\")\n system[\"remote_leaf\"] = self.branch_info.get(\"remote_leaf\")\n system[\"system_id\"] = self.system_id\n system[\"product\"] = self.product\n system[\"type\"] = self.type\n if self.release:\n system[\"metadata\"][\"release\"] = self.release\n\n return response\n", "path": "insights/core/evaluators.py"}], "after_files": [{"content": "import logging\nimport six\nimport sys\n\nfrom collections import defaultdict\n\nfrom ..formats import Formatter\nfrom ..specs import Specs\nfrom ..combiners.hostname import hostname as combiner_hostname\nfrom ..parsers.branch_info import BranchInfo\nfrom . import dr, plugins\n\nlog = logging.getLogger(__name__)\n\n\ndef get_simple_module_name(obj):\n return dr.BASE_MODULE_NAMES.get(obj, None)\n\n\nclass Evaluator(Formatter):\n def __init__(self, broker=None, stream=sys.stdout, incremental=False):\n super(Evaluator, self).__init__(broker or dr.Broker(), stream)\n self.results = defaultdict(list)\n self.rule_skips = []\n self.hostname = None\n self.metadata = {}\n self.metadata_keys = {}\n self.incremental = incremental\n\n def observer(self, comp, broker):\n if comp is combiner_hostname and comp in broker:\n self.hostname = broker[comp].fqdn\n\n if plugins.is_rule(comp) and comp in broker:\n self.handle_result(comp, broker[comp])\n\n def preprocess(self):\n self.broker.add_observer(self.observer)\n\n def run_serial(self, graph=None):\n dr.run(graph or dr.COMPONENTS[dr.GROUPS.single], broker=self.broker)\n\n def run_incremental(self, graph=None):\n for _ in dr.run_incremental(graph or dr.COMPONENTS[dr.GROUPS.single], broker=self.broker):\n pass\n\n def format_response(self, response):\n \"\"\"\n To be overridden by subclasses to format the response sent back to the\n client.\n \"\"\"\n return response\n\n def format_result(self, result):\n \"\"\"\n To be overridden by subclasses to format individual rule results.\n \"\"\"\n return result\n\n def process(self, graph=None):\n with self:\n if self.incremental:\n self.run_incremental(graph)\n else:\n self.run_serial(graph)\n return self.get_response()\n\n\nclass SingleEvaluator(Evaluator):\n def append_metadata(self, r):\n for k, v in r.items():\n if k != \"type\":\n self.metadata[k] = v\n\n def format_response(self, response):\n return response\n\n def get_response(self):\n r = dict(self.metadata_keys)\n r.update({\n \"system\": {\n \"metadata\": self.metadata,\n \"hostname\": self.hostname\n },\n \"reports\": self.results[\"rule\"],\n \"fingerprints\": self.results[\"fingerprint\"],\n \"skips\": self.rule_skips,\n })\n\n for k, v in six.iteritems(self.results):\n if k not in (\"rule\", \"fingerprint\"):\n r[k] = v\n\n return self.format_response(r)\n\n def handle_result(self, plugin, r):\n type_ = r[\"type\"]\n\n if type_ == \"skip\":\n self.rule_skips.append(r)\n elif type_ == \"metadata\":\n self.append_metadata(r)\n elif type_ == \"metadata_key\":\n self.metadata_keys[r.get_key()] = r[\"value\"]\n else:\n response_id = \"%s_id\" % r.response_type\n self.results[type_].append(self.format_result({\n response_id: \"{0}|{1}\".format(get_simple_module_name(plugin), r.get_key()),\n \"details\": r\n }))\n\n\nclass InsightsEvaluator(SingleEvaluator):\n def __init__(self, broker=None, system_id=None, stream=sys.stdout, incremental=False):\n super(InsightsEvaluator, self).__init__(broker, stream=sys.stdout, incremental=incremental)\n self.system_id = system_id\n self.branch_info = {}\n self.product = \"rhel\"\n self.type = \"host\"\n self.release = None\n\n def observer(self, comp, broker):\n super(InsightsEvaluator, self).observer(comp, broker)\n if comp is Specs.machine_id and comp in broker:\n self.system_id = broker[Specs.machine_id].content[0].strip()\n\n if comp is Specs.redhat_release and comp in broker:\n self.release = broker[comp].content[0].strip()\n\n if comp is BranchInfo and BranchInfo in broker:\n self.branch_info = broker[comp].data\n\n if comp is Specs.metadata_json and comp in broker:\n md = broker[comp]\n self.product = md.get(\"product_code\")\n self.type = md.get(\"role\")\n\n def format_result(self, result):\n result[\"system_id\"] = self.system_id\n return result\n\n def format_response(self, response):\n system = response[\"system\"]\n system[\"remote_branch\"] = self.branch_info.get(\"remote_branch\")\n system[\"remote_leaf\"] = self.branch_info.get(\"remote_leaf\")\n system[\"system_id\"] = self.system_id\n system[\"product\"] = self.product\n system[\"type\"] = self.type\n if self.release:\n system[\"metadata\"][\"release\"] = self.release\n\n return response\n", "path": "insights/core/evaluators.py"}]} | 1,885 | 621 |
gh_patches_debug_4530 | rasdani/github-patches | git_diff | ivy-llc__ivy-16060 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cross
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/tensor/linalg.py`
Content:
```
1 # global
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
4 from ivy.functional.frontends.paddle import promote_types_of_paddle_inputs
5 from ivy.functional.frontends.paddle.func_wrapper import (
6 to_ivy_arrays_and_back,
7 )
8
9
10 # matmul
11 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
12 @to_ivy_arrays_and_back
13 def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
14 x, y = promote_types_of_paddle_inputs(x, y)
15 return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)
16
17
18 # norm
19 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
20 @to_ivy_arrays_and_back
21 def norm(x, p="fro", axis=None, keepdim=False, name=None):
22 if axis is None and p is not None:
23 if p == "fro":
24 p = 2
25 ret = ivy.vector_norm(x.flatten(), ord=p, axis=-1)
26 if keepdim:
27 ret = ret.reshape([1] * len(x.shape))
28 if len(ret.shape) == 0:
29 return ivy.array([ret])
30 return ret
31
32 if isinstance(axis, tuple):
33 axis = list(axis)
34 if isinstance(axis, list) and len(axis) == 1:
35 axis = axis[0]
36
37 if isinstance(axis, int):
38 if p == "fro":
39 p = 2
40 if p in [0, 1, 2, ivy.inf, -ivy.inf]:
41 ret = ivy.vector_norm(x, ord=p, axis=axis, keepdims=keepdim)
42 elif isinstance(p, (int, float)):
43 ret = ivy.pow(
44 ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),
45 float(1.0 / p),
46 )
47
48 elif isinstance(axis, list) and len(axis) == 2:
49 if p == 0:
50 raise ValueError
51 elif p == 1:
52 ret = ivy.sum(ivy.abs(x), axis=axis, keepdims=keepdim)
53 elif p == 2 or p == "fro":
54 ret = ivy.matrix_norm(x, ord="fro", axis=axis, keepdims=keepdim)
55 elif p == ivy.inf:
56 ret = ivy.max(ivy.abs(x), axis=axis, keepdims=keepdim)
57 elif p == -ivy.inf:
58 ret = ivy.min(ivy.abs(x), axis=axis, keepdims=keepdim)
59 elif isinstance(p, (int, float)) and p > 0:
60 ret = ivy.pow(
61 ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),
62 float(1.0 / p),
63 )
64 else:
65 raise ValueError
66
67 else:
68 raise ValueError
69
70 if len(ret.shape) == 0:
71 ret = ivy.array(
72 [ret]
73 ) # this is done so as to match shape of output from paddle
74 return ret
75
76
77 # eig
78 @to_ivy_arrays_and_back
79 def eig(x, name=None):
80 return ivy.eig(x)
81
82
83 # eigvals
84 @to_ivy_arrays_and_back
85 def eigvals(x, name=None):
86 return ivy.eigvals(x)
87
88
89 # eigvalsh
90 @to_ivy_arrays_and_back
91 def eigvalsh(x, UPLO="L", name=None):
92 return ivy.eigvalsh(x, UPLO=UPLO)
93
94
95 # eigh
96 @to_ivy_arrays_and_back
97 def eigh(x, UPLO="L", name=None):
98 return ivy.eigh(x, UPLO=UPLO)
99
100
101 # pinv
102 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
103 @to_ivy_arrays_and_back
104 def pinv(x, rcond=1e-15, hermitian=False, name=None):
105 # TODO: Add hermitian functionality
106 return ivy.pinv(x, rtol=rcond)
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/paddle/tensor/linalg.py b/ivy/functional/frontends/paddle/tensor/linalg.py
--- a/ivy/functional/frontends/paddle/tensor/linalg.py
+++ b/ivy/functional/frontends/paddle/tensor/linalg.py
@@ -7,6 +7,15 @@
)
+@with_supported_dtypes(
+ {"2.4.2 and below": ("float32", "float64", "int32", "int64")}, "paddle"
+)
+@to_ivy_arrays_and_back
+def cross(x, y, /, *, axis=9, name=None):
+ x, y = promote_types_of_paddle_inputs(x, y)
+ return ivy.cross(x, y, axis=axis)
+
+
# matmul
@with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
@to_ivy_arrays_and_back
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/linalg.py b/ivy/functional/frontends/paddle/tensor/linalg.py\n--- a/ivy/functional/frontends/paddle/tensor/linalg.py\n+++ b/ivy/functional/frontends/paddle/tensor/linalg.py\n@@ -7,6 +7,15 @@\n )\n \n \n+@with_supported_dtypes(\n+ {\"2.4.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n+)\n+@to_ivy_arrays_and_back\n+def cross(x, y, /, *, axis=9, name=None):\n+ x, y = promote_types_of_paddle_inputs(x, y)\n+ return ivy.cross(x, y, axis=axis)\n+\n+\n # matmul\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n @to_ivy_arrays_and_back\n", "issue": "cross\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle import promote_types_of_paddle_inputs\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n# matmul\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matmul(x, y, transpose_x=False, transpose_y=False, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# norm\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef norm(x, p=\"fro\", axis=None, keepdim=False, name=None):\n if axis is None and p is not None:\n if p == \"fro\":\n p = 2\n ret = ivy.vector_norm(x.flatten(), ord=p, axis=-1)\n if keepdim:\n ret = ret.reshape([1] * len(x.shape))\n if len(ret.shape) == 0:\n return ivy.array([ret])\n return ret\n\n if isinstance(axis, tuple):\n axis = list(axis)\n if isinstance(axis, list) and len(axis) == 1:\n axis = axis[0]\n\n if isinstance(axis, int):\n if p == \"fro\":\n p = 2\n if p in [0, 1, 2, ivy.inf, -ivy.inf]:\n ret = ivy.vector_norm(x, ord=p, axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)):\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n\n elif isinstance(axis, list) and len(axis) == 2:\n if p == 0:\n raise ValueError\n elif p == 1:\n ret = ivy.sum(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == 2 or p == \"fro\":\n ret = ivy.matrix_norm(x, ord=\"fro\", axis=axis, keepdims=keepdim)\n elif p == ivy.inf:\n ret = ivy.max(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == -ivy.inf:\n ret = ivy.min(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)) and p > 0:\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n else:\n raise ValueError\n\n else:\n raise ValueError\n\n if len(ret.shape) == 0:\n ret = ivy.array(\n [ret]\n ) # this is done so as to match shape of output from paddle\n return ret\n\n\n# eig\n@to_ivy_arrays_and_back\ndef eig(x, name=None):\n return ivy.eig(x)\n\n\n# eigvals\n@to_ivy_arrays_and_back\ndef eigvals(x, name=None):\n return ivy.eigvals(x)\n\n\n# eigvalsh\n@to_ivy_arrays_and_back\ndef eigvalsh(x, UPLO=\"L\", name=None):\n return ivy.eigvalsh(x, UPLO=UPLO)\n\n\n# eigh\n@to_ivy_arrays_and_back\ndef eigh(x, UPLO=\"L\", name=None):\n return ivy.eigh(x, UPLO=UPLO)\n\n\n# pinv\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pinv(x, rcond=1e-15, hermitian=False, name=None):\n # TODO: Add hermitian functionality\n return ivy.pinv(x, rtol=rcond)\n", "path": "ivy/functional/frontends/paddle/tensor/linalg.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle import promote_types_of_paddle_inputs\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef cross(x, y, /, *, axis=9, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.cross(x, y, axis=axis)\n\n\n# matmul\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matmul(x, y, transpose_x=False, transpose_y=False, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# norm\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef norm(x, p=\"fro\", axis=None, keepdim=False, name=None):\n if axis is None and p is not None:\n if p == \"fro\":\n p = 2\n ret = ivy.vector_norm(x.flatten(), ord=p, axis=-1)\n if keepdim:\n ret = ret.reshape([1] * len(x.shape))\n if len(ret.shape) == 0:\n return ivy.array([ret])\n return ret\n\n if isinstance(axis, tuple):\n axis = list(axis)\n if isinstance(axis, list) and len(axis) == 1:\n axis = axis[0]\n\n if isinstance(axis, int):\n if p == \"fro\":\n p = 2\n if p in [0, 1, 2, ivy.inf, -ivy.inf]:\n ret = ivy.vector_norm(x, ord=p, axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)):\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n\n elif isinstance(axis, list) and len(axis) == 2:\n if p == 0:\n raise ValueError\n elif p == 1:\n ret = ivy.sum(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == 2 or p == \"fro\":\n ret = ivy.matrix_norm(x, ord=\"fro\", axis=axis, keepdims=keepdim)\n elif p == ivy.inf:\n ret = ivy.max(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == -ivy.inf:\n ret = ivy.min(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)) and p > 0:\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n else:\n raise ValueError\n\n else:\n raise ValueError\n\n if len(ret.shape) == 0:\n ret = ivy.array(\n [ret]\n ) # this is done so as to match shape of output from paddle\n return ret\n\n\n# eig\n@to_ivy_arrays_and_back\ndef eig(x, name=None):\n return ivy.eig(x)\n\n\n# eigvals\n@to_ivy_arrays_and_back\ndef eigvals(x, name=None):\n return ivy.eigvals(x)\n\n\n# eigvalsh\n@to_ivy_arrays_and_back\ndef eigvalsh(x, UPLO=\"L\", name=None):\n return ivy.eigvalsh(x, UPLO=UPLO)\n\n\n# eigh\n@to_ivy_arrays_and_back\ndef eigh(x, UPLO=\"L\", name=None):\n return ivy.eigh(x, UPLO=UPLO)\n\n\n# pinv\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pinv(x, rcond=1e-15, hermitian=False, name=None):\n # TODO: Add hermitian functionality\n return ivy.pinv(x, rtol=rcond)\n", "path": "ivy/functional/frontends/paddle/tensor/linalg.py"}]} | 1,438 | 223 |
gh_patches_debug_22157 | rasdani/github-patches | git_diff | lutris__lutris-1197 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change "Import Games" to something more clear (like "Configure library importing")
I personally feel like the current name for that menu is confusing, misleading and does't represent its actual purpose. I personally think something like "Configure library importing" will describe the menu much better, but if you disagree, any suggestions are appreciated.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/gui/sync.py`
Content:
```
1 import gi
2 gi.require_version('Gtk', '3.0')
3 from gi.repository import Gtk, Gio
4
5 from lutris.gui.widgets.utils import get_runner_icon
6 from lutris.gui.dialogs import NoticeDialog
7 from lutris.services import get_services
8 from lutris.settings import read_setting, write_setting
9 from lutris.util.jobs import AsyncCall
10
11
12 class ServiceSyncRow(Gtk.HBox):
13
14 def __init__(self, service):
15 super(ServiceSyncRow, self).__init__()
16 self.set_spacing(20)
17
18 self.identifier = service.__name__.split('.')[-1]
19 name = service.NAME
20
21 icon = get_runner_icon(self.identifier)
22 self.pack_start(icon, False, False, 0)
23
24 label = Gtk.Label(xalign=0)
25 label.set_markup("<b>{}</b>".format(name))
26 self.pack_start(label, True, True, 0)
27
28 actions = Gtk.VBox()
29 self.pack_start(actions, False, False, 0)
30
31 sync_switch = Gtk.Switch()
32 sync_switch.set_tooltip_text("Sync when Lutris starts")
33 sync_switch.props.valign = Gtk.Align.CENTER
34 sync_switch.connect('notify::active', self.on_switch_changed)
35 if read_setting('sync_at_startup', self.identifier) == 'True':
36 sync_switch.set_state(True)
37 actions.pack_start(sync_switch, False, False, 0)
38
39 sync_button = Gtk.Button("Sync")
40 sync_button.set_tooltip_text("Sync now")
41 sync_button.connect('clicked', self.on_sync_button_clicked, service.sync_with_lutris)
42 actions.pack_start(sync_button, False, False, 0)
43
44 def on_sync_button_clicked(self, button, sync_method):
45 AsyncCall(sync_method, callback=self.on_service_synced)
46
47 def on_service_synced(self, caller, data):
48 parent = self.get_toplevel()
49 if not isinstance(parent, Gtk.Window):
50 # The sync dialog may have closed
51 parent = Gio.Application.get_default().props.active_window
52 NoticeDialog("Games synced", parent=parent)
53
54 def on_switch_changed(self, switch, data):
55 state = switch.get_active()
56 write_setting('sync_at_startup', state, self.identifier)
57
58
59 class SyncServiceDialog(Gtk.Dialog):
60
61 def __init__(self, parent=None):
62 Gtk.Dialog.__init__(self, title="Import local games", parent=parent)
63 self.connect("delete-event", lambda *x: self.destroy())
64 self.set_border_width(10)
65 self.set_size_request(512, 0)
66
67 box_outer = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
68 self.get_content_area().add(box_outer)
69
70 description_label = Gtk.Label()
71 description_label.set_markup("You can import games from local game sources, \n"
72 "you can also choose to sync everytime Lutris starts")
73 box_outer.pack_start(description_label, False, False, 5)
74
75 separator = Gtk.Separator()
76 box_outer.pack_start(separator, False, False, 0)
77
78 for service in get_services():
79 sync_row = ServiceSyncRow(service)
80 box_outer.pack_start(sync_row, False, True, 0)
81 box_outer.show_all()
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lutris/gui/sync.py b/lutris/gui/sync.py
--- a/lutris/gui/sync.py
+++ b/lutris/gui/sync.py
@@ -59,7 +59,7 @@
class SyncServiceDialog(Gtk.Dialog):
def __init__(self, parent=None):
- Gtk.Dialog.__init__(self, title="Import local games", parent=parent)
+ Gtk.Dialog.__init__(self, title="Configure local game import", parent=parent)
self.connect("delete-event", lambda *x: self.destroy())
self.set_border_width(10)
self.set_size_request(512, 0)
@@ -68,8 +68,8 @@
self.get_content_area().add(box_outer)
description_label = Gtk.Label()
- description_label.set_markup("You can import games from local game sources, \n"
- "you can also choose to sync everytime Lutris starts")
+ description_label.set_markup("You can choose which local game sources will get synced each\n"
+ "time Lutris starts, or launch an immediate import of games.")
box_outer.pack_start(description_label, False, False, 5)
separator = Gtk.Separator()
| {"golden_diff": "diff --git a/lutris/gui/sync.py b/lutris/gui/sync.py\n--- a/lutris/gui/sync.py\n+++ b/lutris/gui/sync.py\n@@ -59,7 +59,7 @@\n class SyncServiceDialog(Gtk.Dialog):\n \n def __init__(self, parent=None):\n- Gtk.Dialog.__init__(self, title=\"Import local games\", parent=parent)\n+ Gtk.Dialog.__init__(self, title=\"Configure local game import\", parent=parent)\n self.connect(\"delete-event\", lambda *x: self.destroy())\n self.set_border_width(10)\n self.set_size_request(512, 0)\n@@ -68,8 +68,8 @@\n self.get_content_area().add(box_outer)\n \n description_label = Gtk.Label()\n- description_label.set_markup(\"You can import games from local game sources, \\n\"\n- \"you can also choose to sync everytime Lutris starts\")\n+ description_label.set_markup(\"You can choose which local game sources will get synced each\\n\"\n+ \"time Lutris starts, or launch an immediate import of games.\")\n box_outer.pack_start(description_label, False, False, 5)\n \n separator = Gtk.Separator()\n", "issue": "Change \"Import Games\" to something more clear (like \"Configure library importing\")\nI personally feel like the current name for that menu is confusing, misleading and does't represent its actual purpose. I personally think something like \"Configure library importing\" will describe the menu much better, but if you disagree, any suggestions are appreciated.\n", "before_files": [{"content": "import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Gio\n\nfrom lutris.gui.widgets.utils import get_runner_icon\nfrom lutris.gui.dialogs import NoticeDialog\nfrom lutris.services import get_services\nfrom lutris.settings import read_setting, write_setting\nfrom lutris.util.jobs import AsyncCall\n\n\nclass ServiceSyncRow(Gtk.HBox):\n\n def __init__(self, service):\n super(ServiceSyncRow, self).__init__()\n self.set_spacing(20)\n\n self.identifier = service.__name__.split('.')[-1]\n name = service.NAME\n\n icon = get_runner_icon(self.identifier)\n self.pack_start(icon, False, False, 0)\n\n label = Gtk.Label(xalign=0)\n label.set_markup(\"<b>{}</b>\".format(name))\n self.pack_start(label, True, True, 0)\n\n actions = Gtk.VBox()\n self.pack_start(actions, False, False, 0)\n\n sync_switch = Gtk.Switch()\n sync_switch.set_tooltip_text(\"Sync when Lutris starts\")\n sync_switch.props.valign = Gtk.Align.CENTER\n sync_switch.connect('notify::active', self.on_switch_changed)\n if read_setting('sync_at_startup', self.identifier) == 'True':\n sync_switch.set_state(True)\n actions.pack_start(sync_switch, False, False, 0)\n\n sync_button = Gtk.Button(\"Sync\")\n sync_button.set_tooltip_text(\"Sync now\")\n sync_button.connect('clicked', self.on_sync_button_clicked, service.sync_with_lutris)\n actions.pack_start(sync_button, False, False, 0)\n\n def on_sync_button_clicked(self, button, sync_method):\n AsyncCall(sync_method, callback=self.on_service_synced)\n\n def on_service_synced(self, caller, data):\n parent = self.get_toplevel()\n if not isinstance(parent, Gtk.Window):\n # The sync dialog may have closed\n parent = Gio.Application.get_default().props.active_window\n NoticeDialog(\"Games synced\", parent=parent)\n\n def on_switch_changed(self, switch, data):\n state = switch.get_active()\n write_setting('sync_at_startup', state, self.identifier)\n\n\nclass SyncServiceDialog(Gtk.Dialog):\n\n def __init__(self, parent=None):\n Gtk.Dialog.__init__(self, title=\"Import local games\", parent=parent)\n self.connect(\"delete-event\", lambda *x: self.destroy())\n self.set_border_width(10)\n self.set_size_request(512, 0)\n\n box_outer = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)\n self.get_content_area().add(box_outer)\n\n description_label = Gtk.Label()\n description_label.set_markup(\"You can import games from local game sources, \\n\"\n \"you can also choose to sync everytime Lutris starts\")\n box_outer.pack_start(description_label, False, False, 5)\n\n separator = Gtk.Separator()\n box_outer.pack_start(separator, False, False, 0)\n\n for service in get_services():\n sync_row = ServiceSyncRow(service)\n box_outer.pack_start(sync_row, False, True, 0)\n box_outer.show_all()\n", "path": "lutris/gui/sync.py"}], "after_files": [{"content": "import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Gio\n\nfrom lutris.gui.widgets.utils import get_runner_icon\nfrom lutris.gui.dialogs import NoticeDialog\nfrom lutris.services import get_services\nfrom lutris.settings import read_setting, write_setting\nfrom lutris.util.jobs import AsyncCall\n\n\nclass ServiceSyncRow(Gtk.HBox):\n\n def __init__(self, service):\n super(ServiceSyncRow, self).__init__()\n self.set_spacing(20)\n\n self.identifier = service.__name__.split('.')[-1]\n name = service.NAME\n\n icon = get_runner_icon(self.identifier)\n self.pack_start(icon, False, False, 0)\n\n label = Gtk.Label(xalign=0)\n label.set_markup(\"<b>{}</b>\".format(name))\n self.pack_start(label, True, True, 0)\n\n actions = Gtk.VBox()\n self.pack_start(actions, False, False, 0)\n\n sync_switch = Gtk.Switch()\n sync_switch.set_tooltip_text(\"Sync when Lutris starts\")\n sync_switch.props.valign = Gtk.Align.CENTER\n sync_switch.connect('notify::active', self.on_switch_changed)\n if read_setting('sync_at_startup', self.identifier) == 'True':\n sync_switch.set_state(True)\n actions.pack_start(sync_switch, False, False, 0)\n\n sync_button = Gtk.Button(\"Sync\")\n sync_button.set_tooltip_text(\"Sync now\")\n sync_button.connect('clicked', self.on_sync_button_clicked, service.sync_with_lutris)\n actions.pack_start(sync_button, False, False, 0)\n\n def on_sync_button_clicked(self, button, sync_method):\n AsyncCall(sync_method, callback=self.on_service_synced)\n\n def on_service_synced(self, caller, data):\n parent = self.get_toplevel()\n if not isinstance(parent, Gtk.Window):\n # The sync dialog may have closed\n parent = Gio.Application.get_default().props.active_window\n NoticeDialog(\"Games synced\", parent=parent)\n\n def on_switch_changed(self, switch, data):\n state = switch.get_active()\n write_setting('sync_at_startup', state, self.identifier)\n\n\nclass SyncServiceDialog(Gtk.Dialog):\n\n def __init__(self, parent=None):\n Gtk.Dialog.__init__(self, title=\"Configure local game import\", parent=parent)\n self.connect(\"delete-event\", lambda *x: self.destroy())\n self.set_border_width(10)\n self.set_size_request(512, 0)\n\n box_outer = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)\n self.get_content_area().add(box_outer)\n\n description_label = Gtk.Label()\n description_label.set_markup(\"You can choose which local game sources will get synced each\\n\"\n \"time Lutris starts, or launch an immediate import of games.\")\n box_outer.pack_start(description_label, False, False, 5)\n\n separator = Gtk.Separator()\n box_outer.pack_start(separator, False, False, 0)\n\n for service in get_services():\n sync_row = ServiceSyncRow(service)\n box_outer.pack_start(sync_row, False, True, 0)\n box_outer.show_all()\n", "path": "lutris/gui/sync.py"}]} | 1,168 | 269 |
gh_patches_debug_38060 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-5102 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix Costa Coffee (US) which has switched from Amasty to a JSON API
In the latest weekly run, Costa Coffee (costacoffee_us) is now broken with the Amasty "amlocator" AJAX endpoint no longer existing. In it's place appears to be a JSON API that returns store details (locations and addresses):
https://us.costacoffee.com/api/cf/?locale=en-US&include=2&content_type=storeLocatorStore&limit=500&fields.location[near]=33.77804102,-84.38068933
This appears to be a fairly easy fix to rewrite the spider to use the new JSON API (with a higher limit than 500).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/costacoffee_us.py`
Content:
```
1 import json
2 import re
3
4 import scrapy
5 from scrapy import Selector
6
7 from locations.categories import Categories, apply_category
8 from locations.items import Feature
9
10
11 class CostaCoffeeUSSpider(scrapy.Spider):
12 name = "costacoffee_us"
13 item_attributes = {"brand": "Costa Coffee", "brand_wikidata": "Q608845"}
14 allowed_domains = ["us.costacoffee.com"]
15 start_urls = ["https://us.costacoffee.com/amlocator/index/ajax"]
16
17 def parse(self, response):
18 script = response.xpath('//script[contains(text(), "amLocator")]/text()').extract_first()
19
20 start = script.index("jsonLocations: ") + len("jsonLocations: ")
21 stop = script.index("imageLocations")
22
23 locations = script[start:stop].strip().strip(",")
24 items = json.loads(locations)["items"]
25
26 for store in items:
27 item = Feature()
28 item["ref"] = store["id"]
29 item["lat"] = store["lat"]
30 item["lon"] = store["lng"]
31
32 html = Selector(text=store["popup_html"])
33
34 item["name"] = html.xpath('//*[@class="amlocator-title"]/text()').get()
35
36 for line in html.xpath('//div[@class="amlocator-info-popup"]/text()').getall():
37 line = line.strip()
38 if m := re.match(r"City: (.*)", line):
39 item["city"] = m.group(1)
40 elif m := re.match(r"Zip: (.*)", line):
41 item["postcode"] = m.group(1)
42 elif m := re.match(r"Address: (.*)", line):
43 item["street_address"] = m.group(1)
44 elif m := re.match(r"State: (.*)", line):
45 item["state"] = m.group(1)
46
47 apply_category(Categories.COFFEE_SHOP, item)
48
49 yield item
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/costacoffee_us.py b/locations/spiders/costacoffee_us.py
--- a/locations/spiders/costacoffee_us.py
+++ b/locations/spiders/costacoffee_us.py
@@ -1,49 +1,38 @@
-import json
-import re
-
-import scrapy
-from scrapy import Selector
+from scrapy import Spider
+from scrapy.http import JsonRequest
from locations.categories import Categories, apply_category
-from locations.items import Feature
+from locations.dict_parser import DictParser
+from locations.hours import DAYS_FULL, OpeningHours
-class CostaCoffeeUSSpider(scrapy.Spider):
+class CostaCoffeeUSSpider(Spider):
name = "costacoffee_us"
item_attributes = {"brand": "Costa Coffee", "brand_wikidata": "Q608845"}
allowed_domains = ["us.costacoffee.com"]
- start_urls = ["https://us.costacoffee.com/amlocator/index/ajax"]
-
- def parse(self, response):
- script = response.xpath('//script[contains(text(), "amLocator")]/text()').extract_first()
-
- start = script.index("jsonLocations: ") + len("jsonLocations: ")
- stop = script.index("imageLocations")
-
- locations = script[start:stop].strip().strip(",")
- items = json.loads(locations)["items"]
-
- for store in items:
- item = Feature()
- item["ref"] = store["id"]
- item["lat"] = store["lat"]
- item["lon"] = store["lng"]
+ start_urls = ["https://us.costacoffee.com/api/cf/?content_type=storeLocatorStore"]
+ page_size = 1000
- html = Selector(text=store["popup_html"])
-
- item["name"] = html.xpath('//*[@class="amlocator-title"]/text()').get()
-
- for line in html.xpath('//div[@class="amlocator-info-popup"]/text()').getall():
- line = line.strip()
- if m := re.match(r"City: (.*)", line):
- item["city"] = m.group(1)
- elif m := re.match(r"Zip: (.*)", line):
- item["postcode"] = m.group(1)
- elif m := re.match(r"Address: (.*)", line):
- item["street_address"] = m.group(1)
- elif m := re.match(r"State: (.*)", line):
- item["state"] = m.group(1)
+ def start_requests(self):
+ for url in self.start_urls:
+ yield JsonRequest(url=f"{url}&limit={self.page_size}")
+ def parse(self, response):
+ for location in response.json()["items"]:
+ item = DictParser.parse(location["fields"])
+ item["ref"] = location["sys"]["id"]
+ item["addr_full"] = location["fields"]["storeAddress"]
+ item["opening_hours"] = OpeningHours()
+ for day_name in [s.lower() for s in DAYS_FULL]:
+ open_time = location["fields"].get(f"{day_name}Opening")
+ close_time = location["fields"].get(f"{day_name}Closing")
+ if open_time and "24 HOURS" in open_time.upper():
+ item["opening_hours"].add_range(day_name, "00:00", "24:00")
+ elif open_time and close_time:
+ item["opening_hours"].add_range(day_name, open_time, close_time)
apply_category(Categories.COFFEE_SHOP, item)
-
yield item
+
+ offset = response.json()["skip"]
+ if offset + response.json()["limit"] < response.json()["total"]:
+ yield JsonRequest(url=f"{response.request.url}&limit={self.page_size}&offset={offset}")
| {"golden_diff": "diff --git a/locations/spiders/costacoffee_us.py b/locations/spiders/costacoffee_us.py\n--- a/locations/spiders/costacoffee_us.py\n+++ b/locations/spiders/costacoffee_us.py\n@@ -1,49 +1,38 @@\n-import json\n-import re\n-\n-import scrapy\n-from scrapy import Selector\n+from scrapy import Spider\n+from scrapy.http import JsonRequest\n \n from locations.categories import Categories, apply_category\n-from locations.items import Feature\n+from locations.dict_parser import DictParser\n+from locations.hours import DAYS_FULL, OpeningHours\n \n \n-class CostaCoffeeUSSpider(scrapy.Spider):\n+class CostaCoffeeUSSpider(Spider):\n name = \"costacoffee_us\"\n item_attributes = {\"brand\": \"Costa Coffee\", \"brand_wikidata\": \"Q608845\"}\n allowed_domains = [\"us.costacoffee.com\"]\n- start_urls = [\"https://us.costacoffee.com/amlocator/index/ajax\"]\n-\n- def parse(self, response):\n- script = response.xpath('//script[contains(text(), \"amLocator\")]/text()').extract_first()\n-\n- start = script.index(\"jsonLocations: \") + len(\"jsonLocations: \")\n- stop = script.index(\"imageLocations\")\n-\n- locations = script[start:stop].strip().strip(\",\")\n- items = json.loads(locations)[\"items\"]\n-\n- for store in items:\n- item = Feature()\n- item[\"ref\"] = store[\"id\"]\n- item[\"lat\"] = store[\"lat\"]\n- item[\"lon\"] = store[\"lng\"]\n+ start_urls = [\"https://us.costacoffee.com/api/cf/?content_type=storeLocatorStore\"]\n+ page_size = 1000\n \n- html = Selector(text=store[\"popup_html\"])\n-\n- item[\"name\"] = html.xpath('//*[@class=\"amlocator-title\"]/text()').get()\n-\n- for line in html.xpath('//div[@class=\"amlocator-info-popup\"]/text()').getall():\n- line = line.strip()\n- if m := re.match(r\"City: (.*)\", line):\n- item[\"city\"] = m.group(1)\n- elif m := re.match(r\"Zip: (.*)\", line):\n- item[\"postcode\"] = m.group(1)\n- elif m := re.match(r\"Address: (.*)\", line):\n- item[\"street_address\"] = m.group(1)\n- elif m := re.match(r\"State: (.*)\", line):\n- item[\"state\"] = m.group(1)\n+ def start_requests(self):\n+ for url in self.start_urls:\n+ yield JsonRequest(url=f\"{url}&limit={self.page_size}\")\n \n+ def parse(self, response):\n+ for location in response.json()[\"items\"]:\n+ item = DictParser.parse(location[\"fields\"])\n+ item[\"ref\"] = location[\"sys\"][\"id\"]\n+ item[\"addr_full\"] = location[\"fields\"][\"storeAddress\"]\n+ item[\"opening_hours\"] = OpeningHours()\n+ for day_name in [s.lower() for s in DAYS_FULL]:\n+ open_time = location[\"fields\"].get(f\"{day_name}Opening\")\n+ close_time = location[\"fields\"].get(f\"{day_name}Closing\")\n+ if open_time and \"24 HOURS\" in open_time.upper():\n+ item[\"opening_hours\"].add_range(day_name, \"00:00\", \"24:00\")\n+ elif open_time and close_time:\n+ item[\"opening_hours\"].add_range(day_name, open_time, close_time)\n apply_category(Categories.COFFEE_SHOP, item)\n-\n yield item\n+\n+ offset = response.json()[\"skip\"]\n+ if offset + response.json()[\"limit\"] < response.json()[\"total\"]:\n+ yield JsonRequest(url=f\"{response.request.url}&limit={self.page_size}&offset={offset}\")\n", "issue": "Fix Costa Coffee (US) which has switched from Amasty to a JSON API\nIn the latest weekly run, Costa Coffee (costacoffee_us) is now broken with the Amasty \"amlocator\" AJAX endpoint no longer existing. In it's place appears to be a JSON API that returns store details (locations and addresses):\r\n\r\nhttps://us.costacoffee.com/api/cf/?locale=en-US&include=2&content_type=storeLocatorStore&limit=500&fields.location[near]=33.77804102,-84.38068933\r\n\r\nThis appears to be a fairly easy fix to rewrite the spider to use the new JSON API (with a higher limit than 500).\n", "before_files": [{"content": "import json\nimport re\n\nimport scrapy\nfrom scrapy import Selector\n\nfrom locations.categories import Categories, apply_category\nfrom locations.items import Feature\n\n\nclass CostaCoffeeUSSpider(scrapy.Spider):\n name = \"costacoffee_us\"\n item_attributes = {\"brand\": \"Costa Coffee\", \"brand_wikidata\": \"Q608845\"}\n allowed_domains = [\"us.costacoffee.com\"]\n start_urls = [\"https://us.costacoffee.com/amlocator/index/ajax\"]\n\n def parse(self, response):\n script = response.xpath('//script[contains(text(), \"amLocator\")]/text()').extract_first()\n\n start = script.index(\"jsonLocations: \") + len(\"jsonLocations: \")\n stop = script.index(\"imageLocations\")\n\n locations = script[start:stop].strip().strip(\",\")\n items = json.loads(locations)[\"items\"]\n\n for store in items:\n item = Feature()\n item[\"ref\"] = store[\"id\"]\n item[\"lat\"] = store[\"lat\"]\n item[\"lon\"] = store[\"lng\"]\n\n html = Selector(text=store[\"popup_html\"])\n\n item[\"name\"] = html.xpath('//*[@class=\"amlocator-title\"]/text()').get()\n\n for line in html.xpath('//div[@class=\"amlocator-info-popup\"]/text()').getall():\n line = line.strip()\n if m := re.match(r\"City: (.*)\", line):\n item[\"city\"] = m.group(1)\n elif m := re.match(r\"Zip: (.*)\", line):\n item[\"postcode\"] = m.group(1)\n elif m := re.match(r\"Address: (.*)\", line):\n item[\"street_address\"] = m.group(1)\n elif m := re.match(r\"State: (.*)\", line):\n item[\"state\"] = m.group(1)\n\n apply_category(Categories.COFFEE_SHOP, item)\n\n yield item\n", "path": "locations/spiders/costacoffee_us.py"}], "after_files": [{"content": "from scrapy import Spider\nfrom scrapy.http import JsonRequest\n\nfrom locations.categories import Categories, apply_category\nfrom locations.dict_parser import DictParser\nfrom locations.hours import DAYS_FULL, OpeningHours\n\n\nclass CostaCoffeeUSSpider(Spider):\n name = \"costacoffee_us\"\n item_attributes = {\"brand\": \"Costa Coffee\", \"brand_wikidata\": \"Q608845\"}\n allowed_domains = [\"us.costacoffee.com\"]\n start_urls = [\"https://us.costacoffee.com/api/cf/?content_type=storeLocatorStore\"]\n page_size = 1000\n\n def start_requests(self):\n for url in self.start_urls:\n yield JsonRequest(url=f\"{url}&limit={self.page_size}\")\n\n def parse(self, response):\n for location in response.json()[\"items\"]:\n item = DictParser.parse(location[\"fields\"])\n item[\"ref\"] = location[\"sys\"][\"id\"]\n item[\"addr_full\"] = location[\"fields\"][\"storeAddress\"]\n item[\"opening_hours\"] = OpeningHours()\n for day_name in [s.lower() for s in DAYS_FULL]:\n open_time = location[\"fields\"].get(f\"{day_name}Opening\")\n close_time = location[\"fields\"].get(f\"{day_name}Closing\")\n if open_time and \"24 HOURS\" in open_time.upper():\n item[\"opening_hours\"].add_range(day_name, \"00:00\", \"24:00\")\n elif open_time and close_time:\n item[\"opening_hours\"].add_range(day_name, open_time, close_time)\n apply_category(Categories.COFFEE_SHOP, item)\n yield item\n\n offset = response.json()[\"skip\"]\n if offset + response.json()[\"limit\"] < response.json()[\"total\"]:\n yield JsonRequest(url=f\"{response.request.url}&limit={self.page_size}&offset={offset}\")\n", "path": "locations/spiders/costacoffee_us.py"}]} | 926 | 846 |
gh_patches_debug_9916 | rasdani/github-patches | git_diff | optuna__optuna-3123 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Migrate mypy version to 0.910
As of today (Nov. 28th, 2021), the latest version of mypy is 0.910. On the other hand, now Optuna uses 0.790.
I think it is better to migrate to the latest version, but mypy 0.910 emits a lot of errors for the current Optuna codes like the following:
```
optuna/storages/_rdb/alembic/versions/v2.6.0.a_.py:21: error: Function is missing a return type annotation
optuna/storages/_rdb/alembic/versions/v2.6.0.a_.py:21: note: Use "-> None" if function does not return a value
optuna/storages/_rdb/alembic/versions/v2.6.0.a_.py:34: error: Function is missing a return type annotation
optuna/storages/_rdb/alembic/versions/v2.6.0.a_.py:34: note: Use "-> None" if function does not return a value
...
```
Thus we also need to add some fixes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 from typing import Dict
3 from typing import List
4 from typing import Optional
5
6 import pkg_resources
7 from setuptools import find_packages
8 from setuptools import setup
9
10
11 def get_version() -> str:
12
13 version_filepath = os.path.join(os.path.dirname(__file__), "optuna", "version.py")
14 with open(version_filepath) as f:
15 for line in f:
16 if line.startswith("__version__"):
17 return line.strip().split()[-1][1:-1]
18 assert False
19
20
21 def get_long_description() -> str:
22
23 readme_filepath = os.path.join(os.path.dirname(__file__), "README.md")
24 with open(readme_filepath) as f:
25 return f.read()
26
27
28 def get_install_requires() -> List[str]:
29
30 requirements = [
31 "alembic",
32 "cliff",
33 "cmaes>=0.8.2",
34 "colorlog",
35 "numpy",
36 "packaging>=20.0",
37 "scipy!=1.4.0",
38 "sqlalchemy>=1.1.0",
39 "tqdm",
40 "PyYAML", # Only used in `optuna/cli.py`.
41 ]
42 return requirements
43
44
45 def get_tests_require() -> List[str]:
46
47 return get_extras_require()["testing"]
48
49
50 def get_extras_require() -> Dict[str, List[str]]:
51
52 requirements = {
53 # TODO(HideakiImamura) Unpin mypy version after fixing "Duplicate modules" error in
54 # tutorials.
55 "checking": ["black", "hacking", "isort", "mypy==0.790", "blackdoc"],
56 "codecov": ["codecov", "pytest-cov"],
57 "doctest": [
58 "cma",
59 "matplotlib>=3.0.0",
60 "pandas",
61 "plotly>=4.0.0",
62 "scikit-learn>=0.24.2",
63 "scikit-optimize",
64 "mlflow<1.22.0",
65 ],
66 "document": [
67 # TODO(nzw): Remove the version constraint after resolving the issue
68 # https://github.com/optuna/optuna/issues/2658.
69 "sphinx<4.0.0",
70 "sphinx_rtd_theme",
71 "sphinx-copybutton",
72 "sphinx-gallery",
73 "sphinx-plotly-directive",
74 "pillow",
75 "matplotlib",
76 "scikit-learn",
77 "plotly>=4.0.0", # optuna/visualization.
78 "pandas",
79 "lightgbm",
80 "torch==1.8.0",
81 "torchvision==0.9.0",
82 "torchaudio==0.8.0",
83 "thop",
84 ],
85 "experimental": ["redis"],
86 "testing": [
87 "chainer>=5.0.0",
88 "cma",
89 "fakeredis",
90 "lightgbm",
91 "matplotlib>=3.0.0",
92 "mlflow<1.22.0",
93 "mpi4py",
94 "mxnet",
95 "pandas",
96 "plotly>=4.0.0",
97 "pytest",
98 "scikit-learn>=0.24.2",
99 "scikit-optimize",
100 "xgboost",
101 "tensorflow",
102 "tensorflow-datasets",
103 "pytorch-ignite",
104 # TODO(nzw0301): remove the upper version constraint when the callback supports
105 # pytorch-lightning==1.5.0.
106 "pytorch-lightning>=1.0.2,<1.5.0",
107 "skorch",
108 "catalyst>=21.3",
109 "torch==1.8.0 ; sys_platform=='darwin'",
110 "torch==1.8.0+cpu ; sys_platform!='darwin'",
111 "torchvision==0.9.0 ; sys_platform=='darwin'",
112 "torchvision==0.9.0+cpu ; sys_platform!='darwin'",
113 "torchaudio==0.8.0",
114 "allennlp>=2.2.0,<2.7.0",
115 "botorch>=0.4.0 ; python_version>'3.6'",
116 "fastai",
117 ],
118 "tests": [
119 "fakeredis",
120 "pytest",
121 ],
122 "optional": [
123 "matplotlib>=3.0.0", # optuna/visualization/matplotlib
124 "pandas", # optuna/study.py
125 "plotly>=4.0.0", # optuna/visualization.
126 "redis", # optuna/storages/redis.py.
127 "scikit-learn>=0.24.2",
128 # optuna/visualization/param_importances.py.
129 ],
130 "integration": [
131 "chainer>=5.0.0",
132 "cma",
133 "lightgbm",
134 "mlflow<1.22.0",
135 "wandb",
136 "mpi4py",
137 "mxnet",
138 "pandas",
139 "scikit-learn>=0.24.2",
140 "scikit-optimize",
141 "xgboost",
142 "tensorflow",
143 "tensorflow-datasets",
144 "pytorch-ignite",
145 # TODO(nzw0301): remove the upper version constraint when the callback supports
146 # pytorch-lightning==1.5.0.
147 "pytorch-lightning>=1.0.2,<1.5.0",
148 "skorch",
149 "catalyst>=21.3",
150 "torch==1.8.0 ; sys_platform=='darwin'",
151 "torch==1.8.0+cpu ; sys_platform!='darwin'",
152 "torchvision==0.9.0 ; sys_platform=='darwin'",
153 "torchvision==0.9.0+cpu ; sys_platform!='darwin'",
154 "torchaudio==0.8.0",
155 "allennlp>=2.2.0,<2.7.0",
156 "botorch>=0.4.0 ; python_version>'3.6'",
157 "fastai",
158 ],
159 "benchmark": [
160 "asv",
161 "virtualenv",
162 ],
163 }
164
165 return requirements
166
167
168 def find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:
169
170 for pkg in pkgs:
171 try:
172 return pkg_resources.get_distribution(pkg)
173 except pkg_resources.DistributionNotFound:
174 pass
175 return None
176
177
178 setup(
179 name="optuna",
180 version=get_version(),
181 description="A hyperparameter optimization framework",
182 long_description=get_long_description(),
183 long_description_content_type="text/markdown",
184 author="Takuya Akiba",
185 author_email="akiba@preferred.jp",
186 url="https://optuna.org/",
187 packages=find_packages(exclude=("tests", "tests.*", "benchmarks")),
188 package_data={
189 "optuna": [
190 "storages/_rdb/alembic.ini",
191 "storages/_rdb/alembic/*.*",
192 "storages/_rdb/alembic/versions/*.*",
193 "py.typed",
194 ]
195 },
196 python_requires=">=3.6",
197 install_requires=get_install_requires(),
198 tests_require=get_tests_require(),
199 extras_require=get_extras_require(),
200 entry_points={
201 "console_scripts": ["optuna = optuna.cli:main"],
202 "optuna.command": [
203 "create-study = optuna.cli:_CreateStudy",
204 "delete-study = optuna.cli:_DeleteStudy",
205 "study set-user-attr = optuna.cli:_StudySetUserAttribute",
206 "studies = optuna.cli:_Studies",
207 "trials = optuna.cli:_Trials",
208 "best-trial = optuna.cli:_BestTrial",
209 "best-trials = optuna.cli:_BestTrials",
210 "study optimize = optuna.cli:_StudyOptimize",
211 "storage upgrade = optuna.cli:_StorageUpgrade",
212 "ask = optuna.cli:_Ask",
213 "tell = optuna.cli:_Tell",
214 ],
215 },
216 classifiers=[
217 "Development Status :: 5 - Production/Stable",
218 "Intended Audience :: Science/Research",
219 "Intended Audience :: Developers",
220 "License :: OSI Approved :: MIT License",
221 "Programming Language :: Python :: 3",
222 "Programming Language :: Python :: 3.6",
223 "Programming Language :: Python :: 3.7",
224 "Programming Language :: Python :: 3.8",
225 "Programming Language :: Python :: 3.9",
226 "Programming Language :: Python :: 3 :: Only",
227 "Topic :: Scientific/Engineering",
228 "Topic :: Scientific/Engineering :: Mathematics",
229 "Topic :: Scientific/Engineering :: Artificial Intelligence",
230 "Topic :: Software Development",
231 "Topic :: Software Development :: Libraries",
232 "Topic :: Software Development :: Libraries :: Python Modules",
233 ],
234 )
235
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -50,9 +50,16 @@
def get_extras_require() -> Dict[str, List[str]]:
requirements = {
- # TODO(HideakiImamura) Unpin mypy version after fixing "Duplicate modules" error in
- # tutorials.
- "checking": ["black", "hacking", "isort", "mypy==0.790", "blackdoc"],
+ "checking": [
+ "black",
+ "hacking",
+ "isort",
+ "blackdoc",
+ "mypy",
+ "types-setuptools",
+ "types-redis",
+ "types-PyYAML",
+ ],
"codecov": ["codecov", "pytest-cov"],
"doctest": [
"cma",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -50,9 +50,16 @@\n def get_extras_require() -> Dict[str, List[str]]:\n \n requirements = {\n- # TODO(HideakiImamura) Unpin mypy version after fixing \"Duplicate modules\" error in\n- # tutorials.\n- \"checking\": [\"black\", \"hacking\", \"isort\", \"mypy==0.790\", \"blackdoc\"],\n+ \"checking\": [\n+ \"black\",\n+ \"hacking\",\n+ \"isort\",\n+ \"blackdoc\",\n+ \"mypy\",\n+ \"types-setuptools\",\n+ \"types-redis\",\n+ \"types-PyYAML\",\n+ ],\n \"codecov\": [\"codecov\", \"pytest-cov\"],\n \"doctest\": [\n \"cma\",\n", "issue": "Migrate mypy version to 0.910\nAs of today (Nov. 28th, 2021), the latest version of mypy is 0.910. On the other hand, now Optuna uses 0.790.\r\nI think it is better to migrate to the latest version, but mypy 0.910 emits a lot of errors for the current Optuna codes like the following:\r\n\r\n```\r\noptuna/storages/_rdb/alembic/versions/v2.6.0.a_.py:21: error: Function is missing a return type annotation\r\noptuna/storages/_rdb/alembic/versions/v2.6.0.a_.py:21: note: Use \"-> None\" if function does not return a value\r\noptuna/storages/_rdb/alembic/versions/v2.6.0.a_.py:34: error: Function is missing a return type annotation\r\noptuna/storages/_rdb/alembic/versions/v2.6.0.a_.py:34: note: Use \"-> None\" if function does not return a value\r\n...\r\n```\r\n\r\nThus we also need to add some fixes.\r\n\n", "before_files": [{"content": "import os\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\nimport pkg_resources\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\ndef get_version() -> str:\n\n version_filepath = os.path.join(os.path.dirname(__file__), \"optuna\", \"version.py\")\n with open(version_filepath) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ndef get_long_description() -> str:\n\n readme_filepath = os.path.join(os.path.dirname(__file__), \"README.md\")\n with open(readme_filepath) as f:\n return f.read()\n\n\ndef get_install_requires() -> List[str]:\n\n requirements = [\n \"alembic\",\n \"cliff\",\n \"cmaes>=0.8.2\",\n \"colorlog\",\n \"numpy\",\n \"packaging>=20.0\",\n \"scipy!=1.4.0\",\n \"sqlalchemy>=1.1.0\",\n \"tqdm\",\n \"PyYAML\", # Only used in `optuna/cli.py`.\n ]\n return requirements\n\n\ndef get_tests_require() -> List[str]:\n\n return get_extras_require()[\"testing\"]\n\n\ndef get_extras_require() -> Dict[str, List[str]]:\n\n requirements = {\n # TODO(HideakiImamura) Unpin mypy version after fixing \"Duplicate modules\" error in\n # tutorials.\n \"checking\": [\"black\", \"hacking\", \"isort\", \"mypy==0.790\", \"blackdoc\"],\n \"codecov\": [\"codecov\", \"pytest-cov\"],\n \"doctest\": [\n \"cma\",\n \"matplotlib>=3.0.0\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"scikit-learn>=0.24.2\",\n \"scikit-optimize\",\n \"mlflow<1.22.0\",\n ],\n \"document\": [\n # TODO(nzw): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/2658.\n \"sphinx<4.0.0\",\n \"sphinx_rtd_theme\",\n \"sphinx-copybutton\",\n \"sphinx-gallery\",\n \"sphinx-plotly-directive\",\n \"pillow\",\n \"matplotlib\",\n \"scikit-learn\",\n \"plotly>=4.0.0\", # optuna/visualization.\n \"pandas\",\n \"lightgbm\",\n \"torch==1.8.0\",\n \"torchvision==0.9.0\",\n \"torchaudio==0.8.0\",\n \"thop\",\n ],\n \"experimental\": [\"redis\"],\n \"testing\": [\n \"chainer>=5.0.0\",\n \"cma\",\n \"fakeredis\",\n \"lightgbm\",\n \"matplotlib>=3.0.0\",\n \"mlflow<1.22.0\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"pytest\",\n \"scikit-learn>=0.24.2\",\n \"scikit-optimize\",\n \"xgboost\",\n \"tensorflow\",\n \"tensorflow-datasets\",\n \"pytorch-ignite\",\n # TODO(nzw0301): remove the upper version constraint when the callback supports\n # pytorch-lightning==1.5.0.\n \"pytorch-lightning>=1.0.2,<1.5.0\",\n \"skorch\",\n \"catalyst>=21.3\",\n \"torch==1.8.0 ; sys_platform=='darwin'\",\n \"torch==1.8.0+cpu ; sys_platform!='darwin'\",\n \"torchvision==0.9.0 ; sys_platform=='darwin'\",\n \"torchvision==0.9.0+cpu ; sys_platform!='darwin'\",\n \"torchaudio==0.8.0\",\n \"allennlp>=2.2.0,<2.7.0\",\n \"botorch>=0.4.0 ; python_version>'3.6'\",\n \"fastai\",\n ],\n \"tests\": [\n \"fakeredis\",\n \"pytest\",\n ],\n \"optional\": [\n \"matplotlib>=3.0.0\", # optuna/visualization/matplotlib\n \"pandas\", # optuna/study.py\n \"plotly>=4.0.0\", # optuna/visualization.\n \"redis\", # optuna/storages/redis.py.\n \"scikit-learn>=0.24.2\",\n # optuna/visualization/param_importances.py.\n ],\n \"integration\": [\n \"chainer>=5.0.0\",\n \"cma\",\n \"lightgbm\",\n \"mlflow<1.22.0\",\n \"wandb\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"scikit-learn>=0.24.2\",\n \"scikit-optimize\",\n \"xgboost\",\n \"tensorflow\",\n \"tensorflow-datasets\",\n \"pytorch-ignite\",\n # TODO(nzw0301): remove the upper version constraint when the callback supports\n # pytorch-lightning==1.5.0.\n \"pytorch-lightning>=1.0.2,<1.5.0\",\n \"skorch\",\n \"catalyst>=21.3\",\n \"torch==1.8.0 ; sys_platform=='darwin'\",\n \"torch==1.8.0+cpu ; sys_platform!='darwin'\",\n \"torchvision==0.9.0 ; sys_platform=='darwin'\",\n \"torchvision==0.9.0+cpu ; sys_platform!='darwin'\",\n \"torchaudio==0.8.0\",\n \"allennlp>=2.2.0,<2.7.0\",\n \"botorch>=0.4.0 ; python_version>'3.6'\",\n \"fastai\",\n ],\n \"benchmark\": [\n \"asv\",\n \"virtualenv\",\n ],\n }\n\n return requirements\n\n\ndef find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:\n\n for pkg in pkgs:\n try:\n return pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n pass\n return None\n\n\nsetup(\n name=\"optuna\",\n version=get_version(),\n description=\"A hyperparameter optimization framework\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Takuya Akiba\",\n author_email=\"akiba@preferred.jp\",\n url=\"https://optuna.org/\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\", \"benchmarks\")),\n package_data={\n \"optuna\": [\n \"storages/_rdb/alembic.ini\",\n \"storages/_rdb/alembic/*.*\",\n \"storages/_rdb/alembic/versions/*.*\",\n \"py.typed\",\n ]\n },\n python_requires=\">=3.6\",\n install_requires=get_install_requires(),\n tests_require=get_tests_require(),\n extras_require=get_extras_require(),\n entry_points={\n \"console_scripts\": [\"optuna = optuna.cli:main\"],\n \"optuna.command\": [\n \"create-study = optuna.cli:_CreateStudy\",\n \"delete-study = optuna.cli:_DeleteStudy\",\n \"study set-user-attr = optuna.cli:_StudySetUserAttribute\",\n \"studies = optuna.cli:_Studies\",\n \"trials = optuna.cli:_Trials\",\n \"best-trial = optuna.cli:_BestTrial\",\n \"best-trials = optuna.cli:_BestTrials\",\n \"study optimize = optuna.cli:_StudyOptimize\",\n \"storage upgrade = optuna.cli:_StorageUpgrade\",\n \"ask = optuna.cli:_Ask\",\n \"tell = optuna.cli:_Tell\",\n ],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\nimport pkg_resources\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\ndef get_version() -> str:\n\n version_filepath = os.path.join(os.path.dirname(__file__), \"optuna\", \"version.py\")\n with open(version_filepath) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ndef get_long_description() -> str:\n\n readme_filepath = os.path.join(os.path.dirname(__file__), \"README.md\")\n with open(readme_filepath) as f:\n return f.read()\n\n\ndef get_install_requires() -> List[str]:\n\n requirements = [\n \"alembic\",\n \"cliff\",\n \"cmaes>=0.8.2\",\n \"colorlog\",\n \"numpy\",\n \"packaging>=20.0\",\n \"scipy!=1.4.0\",\n \"sqlalchemy>=1.1.0\",\n \"tqdm\",\n \"PyYAML\", # Only used in `optuna/cli.py`.\n ]\n return requirements\n\n\ndef get_tests_require() -> List[str]:\n\n return get_extras_require()[\"testing\"]\n\n\ndef get_extras_require() -> Dict[str, List[str]]:\n\n requirements = {\n \"checking\": [\n \"black\",\n \"hacking\",\n \"isort\",\n \"blackdoc\",\n \"mypy\",\n \"types-setuptools\",\n \"types-redis\",\n \"types-PyYAML\",\n ],\n \"codecov\": [\"codecov\", \"pytest-cov\"],\n \"doctest\": [\n \"cma\",\n \"matplotlib>=3.0.0\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"scikit-learn>=0.24.2\",\n \"scikit-optimize\",\n \"mlflow<1.22.0\",\n ],\n \"document\": [\n # TODO(nzw): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/2658.\n \"sphinx<4.0.0\",\n \"sphinx_rtd_theme\",\n \"sphinx-copybutton\",\n \"sphinx-gallery\",\n \"sphinx-plotly-directive\",\n \"pillow\",\n \"matplotlib\",\n \"scikit-learn\",\n \"plotly>=4.0.0\", # optuna/visualization.\n \"pandas\",\n \"lightgbm\",\n \"torch==1.8.0\",\n \"torchvision==0.9.0\",\n \"torchaudio==0.8.0\",\n \"thop\",\n ],\n \"experimental\": [\"redis\"],\n \"testing\": [\n \"chainer>=5.0.0\",\n \"cma\",\n \"fakeredis\",\n \"lightgbm\",\n \"matplotlib>=3.0.0\",\n \"mlflow<1.22.0\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"pytest\",\n \"scikit-learn>=0.24.2\",\n \"scikit-optimize\",\n \"xgboost\",\n \"tensorflow\",\n \"tensorflow-datasets\",\n \"pytorch-ignite\",\n # TODO(nzw0301): remove the upper version constraint when the callback supports\n # pytorch-lightning==1.5.0.\n \"pytorch-lightning>=1.0.2,<1.5.0\",\n \"skorch\",\n \"catalyst>=21.3\",\n \"torch==1.8.0 ; sys_platform=='darwin'\",\n \"torch==1.8.0+cpu ; sys_platform!='darwin'\",\n \"torchvision==0.9.0 ; sys_platform=='darwin'\",\n \"torchvision==0.9.0+cpu ; sys_platform!='darwin'\",\n \"torchaudio==0.8.0\",\n \"allennlp>=2.2.0,<2.7.0\",\n \"botorch>=0.4.0 ; python_version>'3.6'\",\n \"fastai\",\n ],\n \"tests\": [\n \"fakeredis\",\n \"pytest\",\n ],\n \"optional\": [\n \"matplotlib>=3.0.0\", # optuna/visualization/matplotlib\n \"pandas\", # optuna/study.py\n \"plotly>=4.0.0\", # optuna/visualization.\n \"redis\", # optuna/storages/redis.py.\n \"scikit-learn>=0.24.2\",\n # optuna/visualization/param_importances.py.\n ],\n \"integration\": [\n \"chainer>=5.0.0\",\n \"cma\",\n \"lightgbm\",\n \"mlflow<1.22.0\",\n \"wandb\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"scikit-learn>=0.24.2\",\n \"scikit-optimize\",\n \"xgboost\",\n \"tensorflow\",\n \"tensorflow-datasets\",\n \"pytorch-ignite\",\n # TODO(nzw0301): remove the upper version constraint when the callback supports\n # pytorch-lightning==1.5.0.\n \"pytorch-lightning>=1.0.2,<1.5.0\",\n \"skorch\",\n \"catalyst>=21.3\",\n \"torch==1.8.0 ; sys_platform=='darwin'\",\n \"torch==1.8.0+cpu ; sys_platform!='darwin'\",\n \"torchvision==0.9.0 ; sys_platform=='darwin'\",\n \"torchvision==0.9.0+cpu ; sys_platform!='darwin'\",\n \"torchaudio==0.8.0\",\n \"allennlp>=2.2.0,<2.7.0\",\n \"botorch>=0.4.0 ; python_version>'3.6'\",\n \"fastai\",\n ],\n \"benchmark\": [\n \"asv\",\n \"virtualenv\",\n ],\n }\n\n return requirements\n\n\ndef find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:\n\n for pkg in pkgs:\n try:\n return pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n pass\n return None\n\n\nsetup(\n name=\"optuna\",\n version=get_version(),\n description=\"A hyperparameter optimization framework\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Takuya Akiba\",\n author_email=\"akiba@preferred.jp\",\n url=\"https://optuna.org/\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\", \"benchmarks\")),\n package_data={\n \"optuna\": [\n \"storages/_rdb/alembic.ini\",\n \"storages/_rdb/alembic/*.*\",\n \"storages/_rdb/alembic/versions/*.*\",\n \"py.typed\",\n ]\n },\n python_requires=\">=3.6\",\n install_requires=get_install_requires(),\n tests_require=get_tests_require(),\n extras_require=get_extras_require(),\n entry_points={\n \"console_scripts\": [\"optuna = optuna.cli:main\"],\n \"optuna.command\": [\n \"create-study = optuna.cli:_CreateStudy\",\n \"delete-study = optuna.cli:_DeleteStudy\",\n \"study set-user-attr = optuna.cli:_StudySetUserAttribute\",\n \"studies = optuna.cli:_Studies\",\n \"trials = optuna.cli:_Trials\",\n \"best-trial = optuna.cli:_BestTrial\",\n \"best-trials = optuna.cli:_BestTrials\",\n \"study optimize = optuna.cli:_StudyOptimize\",\n \"storage upgrade = optuna.cli:_StorageUpgrade\",\n \"ask = optuna.cli:_Ask\",\n \"tell = optuna.cli:_Tell\",\n ],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}]} | 3,057 | 196 |
gh_patches_debug_6880 | rasdani/github-patches | git_diff | scikit-image__scikit-image-6733 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invalid f-string in _warnings
### Description:
```
f4978b1149 skimage/_shared/_warnings.py (Jarrod Millman 2022-10-11 17:14:49 -0700 145) msg = f"No warning raised matching:\n{{'\n'.join(remaining)}}"
c0a0490eed skimage/_shared/_warnings.py (Steven Silvester 2014-12-23 10:59:47 -0600 146) raise ValueError(msg)
```
That f-string cannot render correctly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/_shared/_warnings.py`
Content:
```
1 from contextlib import contextmanager
2 import sys
3 import warnings
4 import re
5 import functools
6 import os
7
8 __all__ = ['all_warnings', 'expected_warnings', 'warn']
9
10
11 # A version of `warnings.warn` with a default stacklevel of 2.
12 # functool is used so as not to increase the call stack accidentally
13 warn = functools.partial(warnings.warn, stacklevel=2)
14
15
16 @contextmanager
17 def all_warnings():
18 """
19 Context for use in testing to ensure that all warnings are raised.
20
21 Examples
22 --------
23 >>> import warnings
24 >>> def foo():
25 ... warnings.warn(RuntimeWarning("bar"), stacklevel=2)
26
27 We raise the warning once, while the warning filter is set to "once".
28 Hereafter, the warning is invisible, even with custom filters:
29
30 >>> with warnings.catch_warnings():
31 ... warnings.simplefilter('once')
32 ... foo() # doctest: +SKIP
33
34 We can now run ``foo()`` without a warning being raised:
35
36 >>> from numpy.testing import assert_warns
37 >>> foo() # doctest: +SKIP
38
39 To catch the warning, we call in the help of ``all_warnings``:
40
41 >>> with all_warnings():
42 ... assert_warns(RuntimeWarning, foo)
43 """
44 # _warnings.py is on the critical import path.
45 # Since this is a testing only function, we lazy import inspect.
46 import inspect
47 # Whenever a warning is triggered, Python adds a __warningregistry__
48 # member to the *calling* module. The exercise here is to find
49 # and eradicate all those breadcrumbs that were left lying around.
50 #
51 # We proceed by first searching all parent calling frames and explicitly
52 # clearing their warning registries (necessary for the doctests above to
53 # pass). Then, we search for all submodules of skimage and clear theirs
54 # as well (necessary for the skimage test suite to pass).
55
56 frame = inspect.currentframe()
57 if frame:
58 for f in inspect.getouterframes(frame):
59 f[0].f_locals['__warningregistry__'] = {}
60 del frame
61
62 for mod_name, mod in list(sys.modules.items()):
63 try:
64 mod.__warningregistry__.clear()
65 except AttributeError:
66 pass
67
68 with warnings.catch_warnings(record=True) as w:
69 warnings.simplefilter("always")
70 yield w
71
72
73 @contextmanager
74 def expected_warnings(matching):
75 r"""Context for use in testing to catch known warnings matching regexes
76
77 Parameters
78 ----------
79 matching : None or a list of strings or compiled regexes
80 Regexes for the desired warning to catch
81 If matching is None, this behaves as a no-op.
82
83 Examples
84 --------
85 >>> import numpy as np
86 >>> rng = np.random.default_rng()
87 >>> image = rng.integers(0, 2**16, size=(100, 100), dtype=np.uint16)
88 >>> # rank filters are slow when bit-depth exceeds 10 bits
89 >>> from skimage import filters
90 >>> with expected_warnings(['Bad rank filter performance']):
91 ... median_filtered = filters.rank.median(image)
92
93 Notes
94 -----
95 Uses `all_warnings` to ensure all warnings are raised.
96 Upon exiting, it checks the recorded warnings for the desired matching
97 pattern(s).
98 Raises a ValueError if any match was not found or an unexpected
99 warning was raised.
100 Allows for three types of behaviors: `and`, `or`, and `optional` matches.
101 This is done to accommodate different build environments or loop conditions
102 that may produce different warnings. The behaviors can be combined.
103 If you pass multiple patterns, you get an orderless `and`, where all of the
104 warnings must be raised.
105 If you use the `|` operator in a pattern, you can catch one of several
106 warnings.
107 Finally, you can use `|\A\Z` in a pattern to signify it as optional.
108
109 """
110 if isinstance(matching, str):
111 raise ValueError('``matching`` should be a list of strings and not '
112 'a string itself.')
113
114 # Special case for disabling the context manager
115 if matching is None:
116 yield None
117 return
118
119 strict_warnings = os.environ.get('SKIMAGE_TEST_STRICT_WARNINGS', '1')
120 if strict_warnings.lower() == 'true':
121 strict_warnings = True
122 elif strict_warnings.lower() == 'false':
123 strict_warnings = False
124 else:
125 strict_warnings = bool(int(strict_warnings))
126
127 with all_warnings() as w:
128 # enter context
129 yield w
130 # exited user context, check the recorded warnings
131 # Allow users to provide None
132 while None in matching:
133 matching.remove(None)
134 remaining = [m for m in matching if r'\A\Z' not in m.split('|')]
135 for warn in w:
136 found = False
137 for match in matching:
138 if re.search(match, str(warn.message)) is not None:
139 found = True
140 if match in remaining:
141 remaining.remove(match)
142 if strict_warnings and not found:
143 raise ValueError(f'Unexpected warning: {str(warn.message)}')
144 if strict_warnings and (len(remaining) > 0):
145 msg = f"No warning raised matching:\n{{'\n'.join(remaining)}}"
146 raise ValueError(msg)
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skimage/_shared/_warnings.py b/skimage/_shared/_warnings.py
--- a/skimage/_shared/_warnings.py
+++ b/skimage/_shared/_warnings.py
@@ -142,5 +142,6 @@
if strict_warnings and not found:
raise ValueError(f'Unexpected warning: {str(warn.message)}')
if strict_warnings and (len(remaining) > 0):
- msg = f"No warning raised matching:\n{{'\n'.join(remaining)}}"
+ newline = "\n"
+ msg = f"No warning raised matching:{newline}{newline.join(remaining)}"
raise ValueError(msg)
| {"golden_diff": "diff --git a/skimage/_shared/_warnings.py b/skimage/_shared/_warnings.py\n--- a/skimage/_shared/_warnings.py\n+++ b/skimage/_shared/_warnings.py\n@@ -142,5 +142,6 @@\n if strict_warnings and not found:\n raise ValueError(f'Unexpected warning: {str(warn.message)}')\n if strict_warnings and (len(remaining) > 0):\n- msg = f\"No warning raised matching:\\n{{'\\n'.join(remaining)}}\"\n+ newline = \"\\n\"\n+ msg = f\"No warning raised matching:{newline}{newline.join(remaining)}\"\n raise ValueError(msg)\n", "issue": "Invalid f-string in _warnings\n### Description:\r\n\r\n```\r\nf4978b1149 skimage/_shared/_warnings.py (Jarrod Millman 2022-10-11 17:14:49 -0700 145) msg = f\"No warning raised matching:\\n{{'\\n'.join(remaining)}}\"\r\nc0a0490eed skimage/_shared/_warnings.py (Steven Silvester 2014-12-23 10:59:47 -0600 146) raise ValueError(msg)\r\n```\r\n\r\nThat f-string cannot render correctly.\r\n\r\n\n", "before_files": [{"content": "from contextlib import contextmanager\nimport sys\nimport warnings\nimport re\nimport functools\nimport os\n\n__all__ = ['all_warnings', 'expected_warnings', 'warn']\n\n\n# A version of `warnings.warn` with a default stacklevel of 2.\n# functool is used so as not to increase the call stack accidentally\nwarn = functools.partial(warnings.warn, stacklevel=2)\n\n\n@contextmanager\ndef all_warnings():\n \"\"\"\n Context for use in testing to ensure that all warnings are raised.\n\n Examples\n --------\n >>> import warnings\n >>> def foo():\n ... warnings.warn(RuntimeWarning(\"bar\"), stacklevel=2)\n\n We raise the warning once, while the warning filter is set to \"once\".\n Hereafter, the warning is invisible, even with custom filters:\n\n >>> with warnings.catch_warnings():\n ... warnings.simplefilter('once')\n ... foo() # doctest: +SKIP\n\n We can now run ``foo()`` without a warning being raised:\n\n >>> from numpy.testing import assert_warns\n >>> foo() # doctest: +SKIP\n\n To catch the warning, we call in the help of ``all_warnings``:\n\n >>> with all_warnings():\n ... assert_warns(RuntimeWarning, foo)\n \"\"\"\n # _warnings.py is on the critical import path.\n # Since this is a testing only function, we lazy import inspect.\n import inspect\n # Whenever a warning is triggered, Python adds a __warningregistry__\n # member to the *calling* module. The exercise here is to find\n # and eradicate all those breadcrumbs that were left lying around.\n #\n # We proceed by first searching all parent calling frames and explicitly\n # clearing their warning registries (necessary for the doctests above to\n # pass). Then, we search for all submodules of skimage and clear theirs\n # as well (necessary for the skimage test suite to pass).\n\n frame = inspect.currentframe()\n if frame:\n for f in inspect.getouterframes(frame):\n f[0].f_locals['__warningregistry__'] = {}\n del frame\n\n for mod_name, mod in list(sys.modules.items()):\n try:\n mod.__warningregistry__.clear()\n except AttributeError:\n pass\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n yield w\n\n\n@contextmanager\ndef expected_warnings(matching):\n r\"\"\"Context for use in testing to catch known warnings matching regexes\n\n Parameters\n ----------\n matching : None or a list of strings or compiled regexes\n Regexes for the desired warning to catch\n If matching is None, this behaves as a no-op.\n\n Examples\n --------\n >>> import numpy as np\n >>> rng = np.random.default_rng()\n >>> image = rng.integers(0, 2**16, size=(100, 100), dtype=np.uint16)\n >>> # rank filters are slow when bit-depth exceeds 10 bits\n >>> from skimage import filters\n >>> with expected_warnings(['Bad rank filter performance']):\n ... median_filtered = filters.rank.median(image)\n\n Notes\n -----\n Uses `all_warnings` to ensure all warnings are raised.\n Upon exiting, it checks the recorded warnings for the desired matching\n pattern(s).\n Raises a ValueError if any match was not found or an unexpected\n warning was raised.\n Allows for three types of behaviors: `and`, `or`, and `optional` matches.\n This is done to accommodate different build environments or loop conditions\n that may produce different warnings. The behaviors can be combined.\n If you pass multiple patterns, you get an orderless `and`, where all of the\n warnings must be raised.\n If you use the `|` operator in a pattern, you can catch one of several\n warnings.\n Finally, you can use `|\\A\\Z` in a pattern to signify it as optional.\n\n \"\"\"\n if isinstance(matching, str):\n raise ValueError('``matching`` should be a list of strings and not '\n 'a string itself.')\n\n # Special case for disabling the context manager\n if matching is None:\n yield None\n return\n\n strict_warnings = os.environ.get('SKIMAGE_TEST_STRICT_WARNINGS', '1')\n if strict_warnings.lower() == 'true':\n strict_warnings = True\n elif strict_warnings.lower() == 'false':\n strict_warnings = False\n else:\n strict_warnings = bool(int(strict_warnings))\n\n with all_warnings() as w:\n # enter context\n yield w\n # exited user context, check the recorded warnings\n # Allow users to provide None\n while None in matching:\n matching.remove(None)\n remaining = [m for m in matching if r'\\A\\Z' not in m.split('|')]\n for warn in w:\n found = False\n for match in matching:\n if re.search(match, str(warn.message)) is not None:\n found = True\n if match in remaining:\n remaining.remove(match)\n if strict_warnings and not found:\n raise ValueError(f'Unexpected warning: {str(warn.message)}')\n if strict_warnings and (len(remaining) > 0):\n msg = f\"No warning raised matching:\\n{{'\\n'.join(remaining)}}\"\n raise ValueError(msg)\n", "path": "skimage/_shared/_warnings.py"}], "after_files": [{"content": "from contextlib import contextmanager\nimport sys\nimport warnings\nimport re\nimport functools\nimport os\n\n__all__ = ['all_warnings', 'expected_warnings', 'warn']\n\n\n# A version of `warnings.warn` with a default stacklevel of 2.\n# functool is used so as not to increase the call stack accidentally\nwarn = functools.partial(warnings.warn, stacklevel=2)\n\n\n@contextmanager\ndef all_warnings():\n \"\"\"\n Context for use in testing to ensure that all warnings are raised.\n\n Examples\n --------\n >>> import warnings\n >>> def foo():\n ... warnings.warn(RuntimeWarning(\"bar\"), stacklevel=2)\n\n We raise the warning once, while the warning filter is set to \"once\".\n Hereafter, the warning is invisible, even with custom filters:\n\n >>> with warnings.catch_warnings():\n ... warnings.simplefilter('once')\n ... foo() # doctest: +SKIP\n\n We can now run ``foo()`` without a warning being raised:\n\n >>> from numpy.testing import assert_warns\n >>> foo() # doctest: +SKIP\n\n To catch the warning, we call in the help of ``all_warnings``:\n\n >>> with all_warnings():\n ... assert_warns(RuntimeWarning, foo)\n \"\"\"\n # _warnings.py is on the critical import path.\n # Since this is a testing only function, we lazy import inspect.\n import inspect\n # Whenever a warning is triggered, Python adds a __warningregistry__\n # member to the *calling* module. The exercise here is to find\n # and eradicate all those breadcrumbs that were left lying around.\n #\n # We proceed by first searching all parent calling frames and explicitly\n # clearing their warning registries (necessary for the doctests above to\n # pass). Then, we search for all submodules of skimage and clear theirs\n # as well (necessary for the skimage test suite to pass).\n\n frame = inspect.currentframe()\n if frame:\n for f in inspect.getouterframes(frame):\n f[0].f_locals['__warningregistry__'] = {}\n del frame\n\n for mod_name, mod in list(sys.modules.items()):\n try:\n mod.__warningregistry__.clear()\n except AttributeError:\n pass\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n yield w\n\n\n@contextmanager\ndef expected_warnings(matching):\n r\"\"\"Context for use in testing to catch known warnings matching regexes\n\n Parameters\n ----------\n matching : None or a list of strings or compiled regexes\n Regexes for the desired warning to catch\n If matching is None, this behaves as a no-op.\n\n Examples\n --------\n >>> import numpy as np\n >>> rng = np.random.default_rng()\n >>> image = rng.integers(0, 2**16, size=(100, 100), dtype=np.uint16)\n >>> # rank filters are slow when bit-depth exceeds 10 bits\n >>> from skimage import filters\n >>> with expected_warnings(['Bad rank filter performance']):\n ... median_filtered = filters.rank.median(image)\n\n Notes\n -----\n Uses `all_warnings` to ensure all warnings are raised.\n Upon exiting, it checks the recorded warnings for the desired matching\n pattern(s).\n Raises a ValueError if any match was not found or an unexpected\n warning was raised.\n Allows for three types of behaviors: `and`, `or`, and `optional` matches.\n This is done to accommodate different build environments or loop conditions\n that may produce different warnings. The behaviors can be combined.\n If you pass multiple patterns, you get an orderless `and`, where all of the\n warnings must be raised.\n If you use the `|` operator in a pattern, you can catch one of several\n warnings.\n Finally, you can use `|\\A\\Z` in a pattern to signify it as optional.\n\n \"\"\"\n if isinstance(matching, str):\n raise ValueError('``matching`` should be a list of strings and not '\n 'a string itself.')\n\n # Special case for disabling the context manager\n if matching is None:\n yield None\n return\n\n strict_warnings = os.environ.get('SKIMAGE_TEST_STRICT_WARNINGS', '1')\n if strict_warnings.lower() == 'true':\n strict_warnings = True\n elif strict_warnings.lower() == 'false':\n strict_warnings = False\n else:\n strict_warnings = bool(int(strict_warnings))\n\n with all_warnings() as w:\n # enter context\n yield w\n # exited user context, check the recorded warnings\n # Allow users to provide None\n while None in matching:\n matching.remove(None)\n remaining = [m for m in matching if r'\\A\\Z' not in m.split('|')]\n for warn in w:\n found = False\n for match in matching:\n if re.search(match, str(warn.message)) is not None:\n found = True\n if match in remaining:\n remaining.remove(match)\n if strict_warnings and not found:\n raise ValueError(f'Unexpected warning: {str(warn.message)}')\n if strict_warnings and (len(remaining) > 0):\n newline = \"\\n\"\n msg = f\"No warning raised matching:{newline}{newline.join(remaining)}\"\n raise ValueError(msg)\n", "path": "skimage/_shared/_warnings.py"}]} | 1,903 | 144 |
gh_patches_debug_53391 | rasdani/github-patches | git_diff | dask__distributed-8116 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
P2P with null partitions fails
I came across a use case where P2P shuffling fails when a column in a partition has all null values. Here's a minimal reproducer:
```python
import numpy as np
import pandas as pd
import dask.dataframe as dd
from dask.distributed import Client
def make_partition(i):
"""Return null column for one partition"""
if i == 1:
return pd.DataFrame({"a": np.random.random(10), "b": None})
return pd.DataFrame({"a": np.random.random(10), "b": np.random.random(10)})
if __name__ == "__main__":
with Client() as client:
ddf = dd.from_map(make_partition, range(10))
result = ddf.set_index("a", shuffle="p2p").compute()
print(result)
```
which raises the following error:
```
Traceback (most recent call last):
File "/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/shuffle/_shuffle.py", line 96, in shuffle_barrier
return _get_worker_plugin().barrier(id, run_ids)
File "/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/shuffle/_worker_plugin.py", line 925, in barrier
result = sync(self.worker.loop, self._barrier, shuffle_id, run_ids)
File "/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/utils.py", line 426, in sync
raise exc.with_traceback(tb)
File "/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/utils.py", line 399, in f
result = yield future
File "/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/tornado/gen.py", line 767, in run
value = future.result()
File "/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/shuffle/_worker_plugin.py", line 689, in _barrier
await shuffle.barrier()
File "/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/shuffle/_worker_plugin.py", line 116, in barrier
await self.scheduler.shuffle_barrier(id=self.id, run_id=self.run_id)
File "/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/core.py", line 1374, in send_recv_from_rpc
return await send_recv(comm=comm, op=key, **kwargs)
File "/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/core.py", line 1158, in send_recv
raise exc.with_traceback(tb)
File "/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/core.py", line 930, in _handle_comm
result = await result
File "/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/shuffle/_scheduler_plugin.py", line 139, in barrier
await self.scheduler.broadcast(
File "/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/scheduler.py", line 6169, in broadcast
results = await All(
File "/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/utils.py", line 252, in All
result = await tasks.next()
File "/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/scheduler.py", line 6147, in send_message
resp = await send_recv(
File "/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/core.py", line 1160, in send_recv
raise Exception(response["exception_text"])
Exception: ArrowInvalid('Schema at index 1 was different: \na: double\nb: null\n_partitions: int64\n__index_level_0__: int64\nvs\na: double\nb: double\n_partitions: int64\n__index_level_0__: int64')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/james/projects/dask/dask/test-p2p-shuffle.py", line 16, in <module>
result = ddf.set_index("a", shuffle="p2p").compute()
File "/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/shuffle/_shuffle.py", line 98, in shuffle_barrier
raise RuntimeError(f"shuffle_barrier failed during shuffle {id}") from e
RuntimeError: shuffle_barrier failed during shuffle 2b30bc4838ba6b632ee7d432b2b31dc8
```
Interestingly this snippet _usually_ fails, but I happened to notice it sometimes runs successfully which is in itself is also interesting
```
b
a
0.015788 0.677673
0.019857 0.481580
0.027898 0.564877
0.031679 0.442530
0.048167 0.990417
... ...
0.957410 0.651139
0.969251 NaN
0.976877 0.369628
0.984942 NaN
0.999345 0.926310
[100 rows x 1 columns]
```
cc @hendrikmakait for visibility
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `distributed/shuffle/_arrow.py`
Content:
```
1 from __future__ import annotations
2
3 from io import BytesIO
4 from typing import TYPE_CHECKING
5
6 from packaging.version import parse
7
8 if TYPE_CHECKING:
9 import pandas as pd
10 import pyarrow as pa
11
12
13 def check_dtype_support(meta_input: pd.DataFrame) -> None:
14 import pandas as pd
15
16 for name in meta_input:
17 column = meta_input[name]
18 # FIXME: PyArrow does not support complex numbers: https://issues.apache.org/jira/browse/ARROW-638
19 if pd.api.types.is_complex_dtype(column):
20 raise TypeError(
21 f"p2p does not support data of type '{column.dtype}' found in column '{name}'."
22 )
23 # FIXME: PyArrow does not support sparse data: https://issues.apache.org/jira/browse/ARROW-8679
24 if isinstance(column.dtype, pd.SparseDtype):
25 raise TypeError("p2p does not support sparse data found in column '{name}'")
26
27
28 def check_minimal_arrow_version() -> None:
29 """Verify that the the correct version of pyarrow is installed to support
30 the P2P extension.
31
32 Raises a RuntimeError in case pyarrow is not installed or installed version
33 is not recent enough.
34 """
35 # First version to introduce Table.sort_by
36 minversion = "7.0.0"
37 try:
38 import pyarrow as pa
39 except ImportError:
40 raise RuntimeError(f"P2P shuffling requires pyarrow>={minversion}")
41
42 if parse(pa.__version__) < parse(minversion):
43 raise RuntimeError(
44 f"P2P shuffling requires pyarrow>={minversion} but only found {pa.__version__}"
45 )
46
47
48 def convert_partition(data: bytes, meta: pd.DataFrame) -> pd.DataFrame:
49 import pyarrow as pa
50
51 from dask.dataframe.dispatch import from_pyarrow_table_dispatch
52
53 file = BytesIO(data)
54 end = len(data)
55 shards = []
56 while file.tell() < end:
57 sr = pa.RecordBatchStreamReader(file)
58 shards.append(sr.read_all())
59 table = pa.concat_tables(shards, promote=True)
60
61 df = from_pyarrow_table_dispatch(meta, table, self_destruct=True)
62 return df.astype(meta.dtypes, copy=False)
63
64
65 def list_of_buffers_to_table(data: list[bytes]) -> pa.Table:
66 """Convert a list of arrow buffers and a schema to an Arrow Table"""
67 import pyarrow as pa
68
69 return pa.concat_tables(deserialize_table(buffer) for buffer in data)
70
71
72 def serialize_table(table: pa.Table) -> bytes:
73 import pyarrow as pa
74
75 stream = pa.BufferOutputStream()
76 with pa.ipc.new_stream(stream, table.schema) as writer:
77 writer.write_table(table)
78 return stream.getvalue().to_pybytes()
79
80
81 def deserialize_table(buffer: bytes) -> pa.Table:
82 import pyarrow as pa
83
84 with pa.ipc.open_stream(pa.py_buffer(buffer)) as reader:
85 return reader.read_all()
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/distributed/shuffle/_arrow.py b/distributed/shuffle/_arrow.py
--- a/distributed/shuffle/_arrow.py
+++ b/distributed/shuffle/_arrow.py
@@ -79,7 +79,9 @@
"""Convert a list of arrow buffers and a schema to an Arrow Table"""
import pyarrow as pa
- return pa.concat_tables(deserialize_table(buffer) for buffer in data)
+ return pa.concat_tables(
+ (deserialize_table(buffer) for buffer in data), promote=True
+ )
def serialize_table(table: pa.Table) -> bytes:
| {"golden_diff": "diff --git a/distributed/shuffle/_arrow.py b/distributed/shuffle/_arrow.py\n--- a/distributed/shuffle/_arrow.py\n+++ b/distributed/shuffle/_arrow.py\n@@ -79,7 +79,9 @@\n \"\"\"Convert a list of arrow buffers and a schema to an Arrow Table\"\"\"\n import pyarrow as pa\n \n- return pa.concat_tables(deserialize_table(buffer) for buffer in data)\n+ return pa.concat_tables(\n+ (deserialize_table(buffer) for buffer in data), promote=True\n+ )\n \n \n def serialize_table(table: pa.Table) -> bytes:\n", "issue": "P2P with null partitions fails\nI came across a use case where P2P shuffling fails when a column in a partition has all null values. Here's a minimal reproducer:\r\n\r\n```python\r\nimport numpy as np\r\nimport pandas as pd\r\nimport dask.dataframe as dd\r\nfrom dask.distributed import Client\r\n\r\ndef make_partition(i):\r\n \"\"\"Return null column for one partition\"\"\"\r\n if i == 1:\r\n return pd.DataFrame({\"a\": np.random.random(10), \"b\": None})\r\n return pd.DataFrame({\"a\": np.random.random(10), \"b\": np.random.random(10)})\r\n\r\n\r\nif __name__ == \"__main__\":\r\n with Client() as client:\r\n ddf = dd.from_map(make_partition, range(10))\r\n result = ddf.set_index(\"a\", shuffle=\"p2p\").compute()\r\n print(result)\r\n```\r\n\r\nwhich raises the following error:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/shuffle/_shuffle.py\", line 96, in shuffle_barrier\r\n return _get_worker_plugin().barrier(id, run_ids)\r\n File \"/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/shuffle/_worker_plugin.py\", line 925, in barrier\r\n result = sync(self.worker.loop, self._barrier, shuffle_id, run_ids)\r\n File \"/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/utils.py\", line 426, in sync\r\n raise exc.with_traceback(tb)\r\n File \"/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/utils.py\", line 399, in f\r\n result = yield future\r\n File \"/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/tornado/gen.py\", line 767, in run\r\n value = future.result()\r\n File \"/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/shuffle/_worker_plugin.py\", line 689, in _barrier\r\n await shuffle.barrier()\r\n File \"/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/shuffle/_worker_plugin.py\", line 116, in barrier\r\n await self.scheduler.shuffle_barrier(id=self.id, run_id=self.run_id)\r\n File \"/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/core.py\", line 1374, in send_recv_from_rpc\r\n return await send_recv(comm=comm, op=key, **kwargs)\r\n File \"/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/core.py\", line 1158, in send_recv\r\n raise exc.with_traceback(tb)\r\n File \"/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/core.py\", line 930, in _handle_comm\r\n result = await result\r\n File \"/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/shuffle/_scheduler_plugin.py\", line 139, in barrier\r\n await self.scheduler.broadcast(\r\n File \"/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/scheduler.py\", line 6169, in broadcast\r\n results = await All(\r\n File \"/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/utils.py\", line 252, in All\r\n result = await tasks.next()\r\n File \"/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/scheduler.py\", line 6147, in send_message\r\n resp = await send_recv(\r\n File \"/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/core.py\", line 1160, in send_recv\r\n raise Exception(response[\"exception_text\"])\r\nException: ArrowInvalid('Schema at index 1 was different: \\na: double\\nb: null\\n_partitions: int64\\n__index_level_0__: int64\\nvs\\na: double\\nb: double\\n_partitions: int64\\n__index_level_0__: int64')\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/Users/james/projects/dask/dask/test-p2p-shuffle.py\", line 16, in <module>\r\n result = ddf.set_index(\"a\", shuffle=\"p2p\").compute()\r\n File \"/Users/james/mambaforge/envs/dask-py39/lib/python3.9/site-packages/distributed/shuffle/_shuffle.py\", line 98, in shuffle_barrier\r\n raise RuntimeError(f\"shuffle_barrier failed during shuffle {id}\") from e\r\nRuntimeError: shuffle_barrier failed during shuffle 2b30bc4838ba6b632ee7d432b2b31dc8\r\n```\r\n\r\nInterestingly this snippet _usually_ fails, but I happened to notice it sometimes runs successfully which is in itself is also interesting \r\n\r\n```\r\n b\r\na\r\n0.015788 0.677673\r\n0.019857 0.481580\r\n0.027898 0.564877\r\n0.031679 0.442530\r\n0.048167 0.990417\r\n... ...\r\n0.957410 0.651139\r\n0.969251 NaN\r\n0.976877 0.369628\r\n0.984942 NaN\r\n0.999345 0.926310\r\n\r\n[100 rows x 1 columns]\r\n```\r\n\r\ncc @hendrikmakait for visibility \n", "before_files": [{"content": "from __future__ import annotations\n\nfrom io import BytesIO\nfrom typing import TYPE_CHECKING\n\nfrom packaging.version import parse\n\nif TYPE_CHECKING:\n import pandas as pd\n import pyarrow as pa\n\n\ndef check_dtype_support(meta_input: pd.DataFrame) -> None:\n import pandas as pd\n\n for name in meta_input:\n column = meta_input[name]\n # FIXME: PyArrow does not support complex numbers: https://issues.apache.org/jira/browse/ARROW-638\n if pd.api.types.is_complex_dtype(column):\n raise TypeError(\n f\"p2p does not support data of type '{column.dtype}' found in column '{name}'.\"\n )\n # FIXME: PyArrow does not support sparse data: https://issues.apache.org/jira/browse/ARROW-8679\n if isinstance(column.dtype, pd.SparseDtype):\n raise TypeError(\"p2p does not support sparse data found in column '{name}'\")\n\n\ndef check_minimal_arrow_version() -> None:\n \"\"\"Verify that the the correct version of pyarrow is installed to support\n the P2P extension.\n\n Raises a RuntimeError in case pyarrow is not installed or installed version\n is not recent enough.\n \"\"\"\n # First version to introduce Table.sort_by\n minversion = \"7.0.0\"\n try:\n import pyarrow as pa\n except ImportError:\n raise RuntimeError(f\"P2P shuffling requires pyarrow>={minversion}\")\n\n if parse(pa.__version__) < parse(minversion):\n raise RuntimeError(\n f\"P2P shuffling requires pyarrow>={minversion} but only found {pa.__version__}\"\n )\n\n\ndef convert_partition(data: bytes, meta: pd.DataFrame) -> pd.DataFrame:\n import pyarrow as pa\n\n from dask.dataframe.dispatch import from_pyarrow_table_dispatch\n\n file = BytesIO(data)\n end = len(data)\n shards = []\n while file.tell() < end:\n sr = pa.RecordBatchStreamReader(file)\n shards.append(sr.read_all())\n table = pa.concat_tables(shards, promote=True)\n\n df = from_pyarrow_table_dispatch(meta, table, self_destruct=True)\n return df.astype(meta.dtypes, copy=False)\n\n\ndef list_of_buffers_to_table(data: list[bytes]) -> pa.Table:\n \"\"\"Convert a list of arrow buffers and a schema to an Arrow Table\"\"\"\n import pyarrow as pa\n\n return pa.concat_tables(deserialize_table(buffer) for buffer in data)\n\n\ndef serialize_table(table: pa.Table) -> bytes:\n import pyarrow as pa\n\n stream = pa.BufferOutputStream()\n with pa.ipc.new_stream(stream, table.schema) as writer:\n writer.write_table(table)\n return stream.getvalue().to_pybytes()\n\n\ndef deserialize_table(buffer: bytes) -> pa.Table:\n import pyarrow as pa\n\n with pa.ipc.open_stream(pa.py_buffer(buffer)) as reader:\n return reader.read_all()\n", "path": "distributed/shuffle/_arrow.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom io import BytesIO\nfrom typing import TYPE_CHECKING\n\nfrom packaging.version import parse\n\nif TYPE_CHECKING:\n import pandas as pd\n import pyarrow as pa\n\n\ndef check_dtype_support(meta_input: pd.DataFrame) -> None:\n import pandas as pd\n\n for name in meta_input:\n column = meta_input[name]\n # FIXME: PyArrow does not support complex numbers: https://issues.apache.org/jira/browse/ARROW-638\n if pd.api.types.is_complex_dtype(column):\n raise TypeError(\n f\"p2p does not support data of type '{column.dtype}' found in column '{name}'.\"\n )\n # FIXME: PyArrow does not support sparse data: https://issues.apache.org/jira/browse/ARROW-8679\n if isinstance(column.dtype, pd.SparseDtype):\n raise TypeError(\"p2p does not support sparse data found in column '{name}'\")\n\n\ndef check_minimal_arrow_version() -> None:\n \"\"\"Verify that the the correct version of pyarrow is installed to support\n the P2P extension.\n\n Raises a RuntimeError in case pyarrow is not installed or installed version\n is not recent enough.\n \"\"\"\n # First version to introduce Table.sort_by\n minversion = \"7.0.0\"\n try:\n import pyarrow as pa\n except ImportError:\n raise RuntimeError(f\"P2P shuffling requires pyarrow>={minversion}\")\n\n if parse(pa.__version__) < parse(minversion):\n raise RuntimeError(\n f\"P2P shuffling requires pyarrow>={minversion} but only found {pa.__version__}\"\n )\n\n\ndef convert_partition(data: bytes, meta: pd.DataFrame) -> pd.DataFrame:\n import pandas as pd\n import pyarrow as pa\n\n from dask.dataframe.dispatch import from_pyarrow_table_dispatch\n\n file = BytesIO(data)\n end = len(data)\n shards = []\n while file.tell() < end:\n sr = pa.RecordBatchStreamReader(file)\n shards.append(sr.read_all())\n table = pa.concat_tables(shards, promote=True)\n\n def default_types_mapper(pyarrow_dtype: pa.DataType) -> object:\n # Avoid converting strings from `string[pyarrow]` to `string[python]`\n # if we have *some* `string[pyarrow]`\n if (\n pyarrow_dtype in {pa.large_string(), pa.string()}\n and pd.StringDtype(\"pyarrow\") in meta.dtypes.values\n ):\n return pd.StringDtype(\"pyarrow\")\n return None\n\n df = from_pyarrow_table_dispatch(\n meta, table, self_destruct=True, types_mapper=default_types_mapper\n )\n return df.astype(meta.dtypes, copy=False)\n\n\ndef list_of_buffers_to_table(data: list[bytes]) -> pa.Table:\n \"\"\"Convert a list of arrow buffers and a schema to an Arrow Table\"\"\"\n import pyarrow as pa\n\n return pa.concat_tables(\n (deserialize_table(buffer) for buffer in data), promote=True\n )\n\n\ndef serialize_table(table: pa.Table) -> bytes:\n import pyarrow as pa\n\n stream = pa.BufferOutputStream()\n with pa.ipc.new_stream(stream, table.schema) as writer:\n writer.write_table(table)\n return stream.getvalue().to_pybytes()\n\n\ndef deserialize_table(buffer: bytes) -> pa.Table:\n import pyarrow as pa\n\n with pa.ipc.open_stream(pa.py_buffer(buffer)) as reader:\n return reader.read_all()\n", "path": "distributed/shuffle/_arrow.py"}]} | 2,474 | 128 |
gh_patches_debug_21453 | rasdani/github-patches | git_diff | mozmeao__basket-1036 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add content-type header for Acoustic Transact requests
Transact customers using Oauth for submissions should add header:
`Content-Type : text/plain` or
`Content-Type : text/xml`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `basket/news/backends/acoustic.py`
Content:
```
1 import logging
2
3 from django.conf import settings
4 from django.utils.encoding import force_bytes
5
6 from lxml import etree
7 from requests import ConnectionError
8 from silverpop.api import Silverpop, SilverpopResponseException
9
10 logger = logging.getLogger(__name__)
11 XML_HEADER = '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
12
13
14 def process_response(resp):
15 logger.debug("Response: %s" % resp.text)
16 response = etree.fromstring(resp.text.encode("utf-8"))
17 failure = response.find(".//FAILURES/FAILURE")
18 if failure:
19 raise SilverpopResponseException(failure.attrib["description"])
20
21 fault = response.find(".//Fault/FaultString")
22 if fault:
23 raise SilverpopResponseException(fault.text)
24
25 return response
26
27
28 def process_tx_response(resp):
29 logger.debug("Response: %s" % resp.text)
30 response = etree.fromstring(resp.text.encode("utf-8"))
31 errors = response.findall(".//ERROR_STRING")
32 if errors:
33 for e in errors:
34 if e.text:
35 raise SilverpopResponseException(e.text)
36
37 return response
38
39
40 def xml_tag(tag, value=None, cdata=False, **attrs):
41 xmlt = etree.Element(tag, attrs)
42 if value:
43 if cdata:
44 xmlt.text = etree.CDATA(value)
45 else:
46 xmlt.text = value
47
48 return xmlt
49
50
51 def transact_xml(to, campaign_id, fields=None, bcc=None, save_to_db=False):
52 fields = fields or {}
53 bcc = bcc or []
54 if isinstance(bcc, str):
55 bcc = [bcc]
56
57 root = xml_tag("XTMAILING")
58 root.append(xml_tag("CAMPAIGN_ID", campaign_id))
59 if "transaction_id" in fields:
60 root.append(xml_tag("TRANSACTION_ID", fields["transaction_id"]))
61
62 root.append(xml_tag("SEND_AS_BATCH", "false"))
63 root.append(xml_tag("NO_RETRY_ON_FAILURE", "false"))
64 if fields and save_to_db:
65 save_cols_tag = xml_tag("SAVE_COLUMNS")
66 root.append(save_cols_tag)
67 for name in fields:
68 save_cols_tag.append(xml_tag("COLUMN_NAME", name))
69
70 recipient_tag = xml_tag("RECIPIENT")
71 root.append(recipient_tag)
72 recipient_tag.append(xml_tag("EMAIL", to))
73 for addr in bcc:
74 recipient_tag.append(xml_tag("BCC", addr))
75 recipient_tag.append(xml_tag("BODY_TYPE", "HTML"))
76 for name, value in fields.items():
77 p_tag = xml_tag("PERSONALIZATION")
78 p_tag.append(xml_tag("TAG_NAME", name))
79 p_tag.append(xml_tag("VALUE", value))
80 recipient_tag.append(p_tag)
81
82 return XML_HEADER + etree.tostring(root, encoding="unicode")
83
84
85 class Acoustic(Silverpop):
86 def _call(self, xml):
87 logger.debug("Request: %s" % xml)
88 try:
89 response = self.session.post(
90 self.api_endpoint,
91 data=force_bytes(xml),
92 timeout=10,
93 )
94 except ConnectionError:
95 # try one more time
96 response = self.session.post(
97 self.api_endpoint,
98 data=force_bytes(xml),
99 timeout=10,
100 )
101
102 return process_response(response)
103
104
105 class AcousticTransact(Silverpop):
106 api_xt_endpoint = "https://transact-campaign-us-%s.goacoustic.com/XTMail"
107
108 def __init__(self, client_id, client_secret, refresh_token, server_number):
109 self.api_xt_endpoint = self.api_xt_endpoint % server_number
110 super().__init__(client_id, client_secret, refresh_token, server_number)
111
112 def _call_xt(self, xml):
113 logger.debug("Request: %s" % xml)
114 response = self.session.post(
115 self.api_xt_endpoint,
116 data=force_bytes(xml),
117 timeout=10,
118 )
119 return process_tx_response(response)
120
121 def send_mail(self, to, campaign_id, fields=None, bcc=None, save_to_db=False):
122 self._call_xt(transact_xml(to, campaign_id, fields, bcc, save_to_db))
123
124
125 acoustic = Acoustic(
126 client_id=settings.ACOUSTIC_CLIENT_ID,
127 client_secret=settings.ACOUSTIC_CLIENT_SECRET,
128 refresh_token=settings.ACOUSTIC_REFRESH_TOKEN,
129 server_number=settings.ACOUSTIC_SERVER_NUMBER,
130 )
131 acoustic_tx = AcousticTransact(
132 client_id=settings.ACOUSTIC_TX_CLIENT_ID,
133 client_secret=settings.ACOUSTIC_TX_CLIENT_SECRET,
134 refresh_token=settings.ACOUSTIC_TX_REFRESH_TOKEN,
135 server_number=settings.ACOUSTIC_TX_SERVER_NUMBER,
136 )
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/basket/news/backends/acoustic.py b/basket/news/backends/acoustic.py
--- a/basket/news/backends/acoustic.py
+++ b/basket/news/backends/acoustic.py
@@ -90,6 +90,7 @@
self.api_endpoint,
data=force_bytes(xml),
timeout=10,
+ headers={"Content-Type": "text/xml"},
)
except ConnectionError:
# try one more time
@@ -97,6 +98,7 @@
self.api_endpoint,
data=force_bytes(xml),
timeout=10,
+ headers={"Content-Type": "text/xml"},
)
return process_response(response)
@@ -115,6 +117,7 @@
self.api_xt_endpoint,
data=force_bytes(xml),
timeout=10,
+ headers={"Content-Type": "text/xml"},
)
return process_tx_response(response)
| {"golden_diff": "diff --git a/basket/news/backends/acoustic.py b/basket/news/backends/acoustic.py\n--- a/basket/news/backends/acoustic.py\n+++ b/basket/news/backends/acoustic.py\n@@ -90,6 +90,7 @@\n self.api_endpoint,\n data=force_bytes(xml),\n timeout=10,\n+ headers={\"Content-Type\": \"text/xml\"},\n )\n except ConnectionError:\n # try one more time\n@@ -97,6 +98,7 @@\n self.api_endpoint,\n data=force_bytes(xml),\n timeout=10,\n+ headers={\"Content-Type\": \"text/xml\"},\n )\n \n return process_response(response)\n@@ -115,6 +117,7 @@\n self.api_xt_endpoint,\n data=force_bytes(xml),\n timeout=10,\n+ headers={\"Content-Type\": \"text/xml\"},\n )\n return process_tx_response(response)\n", "issue": "Add content-type header for Acoustic Transact requests\nTransact customers using Oauth for submissions should add header: \r\n\r\n`Content-Type : text/plain` or \r\n`Content-Type : text/xml`\r\n\n", "before_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.utils.encoding import force_bytes\n\nfrom lxml import etree\nfrom requests import ConnectionError\nfrom silverpop.api import Silverpop, SilverpopResponseException\n\nlogger = logging.getLogger(__name__)\nXML_HEADER = '<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>'\n\n\ndef process_response(resp):\n logger.debug(\"Response: %s\" % resp.text)\n response = etree.fromstring(resp.text.encode(\"utf-8\"))\n failure = response.find(\".//FAILURES/FAILURE\")\n if failure:\n raise SilverpopResponseException(failure.attrib[\"description\"])\n\n fault = response.find(\".//Fault/FaultString\")\n if fault:\n raise SilverpopResponseException(fault.text)\n\n return response\n\n\ndef process_tx_response(resp):\n logger.debug(\"Response: %s\" % resp.text)\n response = etree.fromstring(resp.text.encode(\"utf-8\"))\n errors = response.findall(\".//ERROR_STRING\")\n if errors:\n for e in errors:\n if e.text:\n raise SilverpopResponseException(e.text)\n\n return response\n\n\ndef xml_tag(tag, value=None, cdata=False, **attrs):\n xmlt = etree.Element(tag, attrs)\n if value:\n if cdata:\n xmlt.text = etree.CDATA(value)\n else:\n xmlt.text = value\n\n return xmlt\n\n\ndef transact_xml(to, campaign_id, fields=None, bcc=None, save_to_db=False):\n fields = fields or {}\n bcc = bcc or []\n if isinstance(bcc, str):\n bcc = [bcc]\n\n root = xml_tag(\"XTMAILING\")\n root.append(xml_tag(\"CAMPAIGN_ID\", campaign_id))\n if \"transaction_id\" in fields:\n root.append(xml_tag(\"TRANSACTION_ID\", fields[\"transaction_id\"]))\n\n root.append(xml_tag(\"SEND_AS_BATCH\", \"false\"))\n root.append(xml_tag(\"NO_RETRY_ON_FAILURE\", \"false\"))\n if fields and save_to_db:\n save_cols_tag = xml_tag(\"SAVE_COLUMNS\")\n root.append(save_cols_tag)\n for name in fields:\n save_cols_tag.append(xml_tag(\"COLUMN_NAME\", name))\n\n recipient_tag = xml_tag(\"RECIPIENT\")\n root.append(recipient_tag)\n recipient_tag.append(xml_tag(\"EMAIL\", to))\n for addr in bcc:\n recipient_tag.append(xml_tag(\"BCC\", addr))\n recipient_tag.append(xml_tag(\"BODY_TYPE\", \"HTML\"))\n for name, value in fields.items():\n p_tag = xml_tag(\"PERSONALIZATION\")\n p_tag.append(xml_tag(\"TAG_NAME\", name))\n p_tag.append(xml_tag(\"VALUE\", value))\n recipient_tag.append(p_tag)\n\n return XML_HEADER + etree.tostring(root, encoding=\"unicode\")\n\n\nclass Acoustic(Silverpop):\n def _call(self, xml):\n logger.debug(\"Request: %s\" % xml)\n try:\n response = self.session.post(\n self.api_endpoint,\n data=force_bytes(xml),\n timeout=10,\n )\n except ConnectionError:\n # try one more time\n response = self.session.post(\n self.api_endpoint,\n data=force_bytes(xml),\n timeout=10,\n )\n\n return process_response(response)\n\n\nclass AcousticTransact(Silverpop):\n api_xt_endpoint = \"https://transact-campaign-us-%s.goacoustic.com/XTMail\"\n\n def __init__(self, client_id, client_secret, refresh_token, server_number):\n self.api_xt_endpoint = self.api_xt_endpoint % server_number\n super().__init__(client_id, client_secret, refresh_token, server_number)\n\n def _call_xt(self, xml):\n logger.debug(\"Request: %s\" % xml)\n response = self.session.post(\n self.api_xt_endpoint,\n data=force_bytes(xml),\n timeout=10,\n )\n return process_tx_response(response)\n\n def send_mail(self, to, campaign_id, fields=None, bcc=None, save_to_db=False):\n self._call_xt(transact_xml(to, campaign_id, fields, bcc, save_to_db))\n\n\nacoustic = Acoustic(\n client_id=settings.ACOUSTIC_CLIENT_ID,\n client_secret=settings.ACOUSTIC_CLIENT_SECRET,\n refresh_token=settings.ACOUSTIC_REFRESH_TOKEN,\n server_number=settings.ACOUSTIC_SERVER_NUMBER,\n)\nacoustic_tx = AcousticTransact(\n client_id=settings.ACOUSTIC_TX_CLIENT_ID,\n client_secret=settings.ACOUSTIC_TX_CLIENT_SECRET,\n refresh_token=settings.ACOUSTIC_TX_REFRESH_TOKEN,\n server_number=settings.ACOUSTIC_TX_SERVER_NUMBER,\n)\n", "path": "basket/news/backends/acoustic.py"}], "after_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.utils.encoding import force_bytes\n\nfrom lxml import etree\nfrom requests import ConnectionError\nfrom silverpop.api import Silverpop, SilverpopResponseException\n\nlogger = logging.getLogger(__name__)\nXML_HEADER = '<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>'\n\n\ndef process_response(resp):\n logger.debug(\"Response: %s\" % resp.text)\n response = etree.fromstring(resp.text.encode(\"utf-8\"))\n failure = response.find(\".//FAILURES/FAILURE\")\n if failure:\n raise SilverpopResponseException(failure.attrib[\"description\"])\n\n fault = response.find(\".//Fault/FaultString\")\n if fault:\n raise SilverpopResponseException(fault.text)\n\n return response\n\n\ndef process_tx_response(resp):\n logger.debug(\"Response: %s\" % resp.text)\n response = etree.fromstring(resp.text.encode(\"utf-8\"))\n errors = response.findall(\".//ERROR_STRING\")\n if errors:\n for e in errors:\n if e.text:\n raise SilverpopResponseException(e.text)\n\n return response\n\n\ndef xml_tag(tag, value=None, cdata=False, **attrs):\n xmlt = etree.Element(tag, attrs)\n if value:\n if cdata:\n xmlt.text = etree.CDATA(value)\n else:\n xmlt.text = value\n\n return xmlt\n\n\ndef transact_xml(to, campaign_id, fields=None, bcc=None, save_to_db=False):\n fields = fields or {}\n bcc = bcc or []\n if isinstance(bcc, str):\n bcc = [bcc]\n\n root = xml_tag(\"XTMAILING\")\n root.append(xml_tag(\"CAMPAIGN_ID\", campaign_id))\n if \"transaction_id\" in fields:\n root.append(xml_tag(\"TRANSACTION_ID\", fields[\"transaction_id\"]))\n\n root.append(xml_tag(\"SEND_AS_BATCH\", \"false\"))\n root.append(xml_tag(\"NO_RETRY_ON_FAILURE\", \"false\"))\n if fields and save_to_db:\n save_cols_tag = xml_tag(\"SAVE_COLUMNS\")\n root.append(save_cols_tag)\n for name in fields:\n save_cols_tag.append(xml_tag(\"COLUMN_NAME\", name))\n\n recipient_tag = xml_tag(\"RECIPIENT\")\n root.append(recipient_tag)\n recipient_tag.append(xml_tag(\"EMAIL\", to))\n for addr in bcc:\n recipient_tag.append(xml_tag(\"BCC\", addr))\n recipient_tag.append(xml_tag(\"BODY_TYPE\", \"HTML\"))\n for name, value in fields.items():\n p_tag = xml_tag(\"PERSONALIZATION\")\n p_tag.append(xml_tag(\"TAG_NAME\", name))\n p_tag.append(xml_tag(\"VALUE\", value))\n recipient_tag.append(p_tag)\n\n return XML_HEADER + etree.tostring(root, encoding=\"unicode\")\n\n\nclass Acoustic(Silverpop):\n def _call(self, xml):\n logger.debug(\"Request: %s\" % xml)\n try:\n response = self.session.post(\n self.api_endpoint,\n data=force_bytes(xml),\n timeout=10,\n headers={\"Content-Type\": \"text/xml\"},\n )\n except ConnectionError:\n # try one more time\n response = self.session.post(\n self.api_endpoint,\n data=force_bytes(xml),\n timeout=10,\n headers={\"Content-Type\": \"text/xml\"},\n )\n\n return process_response(response)\n\n\nclass AcousticTransact(Silverpop):\n api_xt_endpoint = \"https://transact-campaign-us-%s.goacoustic.com/XTMail\"\n\n def __init__(self, client_id, client_secret, refresh_token, server_number):\n self.api_xt_endpoint = self.api_xt_endpoint % server_number\n super().__init__(client_id, client_secret, refresh_token, server_number)\n\n def _call_xt(self, xml):\n logger.debug(\"Request: %s\" % xml)\n response = self.session.post(\n self.api_xt_endpoint,\n data=force_bytes(xml),\n timeout=10,\n headers={\"Content-Type\": \"text/xml\"},\n )\n return process_tx_response(response)\n\n def send_mail(self, to, campaign_id, fields=None, bcc=None, save_to_db=False):\n self._call_xt(transact_xml(to, campaign_id, fields, bcc, save_to_db))\n\n\nacoustic = Acoustic(\n client_id=settings.ACOUSTIC_CLIENT_ID,\n client_secret=settings.ACOUSTIC_CLIENT_SECRET,\n refresh_token=settings.ACOUSTIC_REFRESH_TOKEN,\n server_number=settings.ACOUSTIC_SERVER_NUMBER,\n)\nacoustic_tx = AcousticTransact(\n client_id=settings.ACOUSTIC_TX_CLIENT_ID,\n client_secret=settings.ACOUSTIC_TX_CLIENT_SECRET,\n refresh_token=settings.ACOUSTIC_TX_REFRESH_TOKEN,\n server_number=settings.ACOUSTIC_TX_SERVER_NUMBER,\n)\n", "path": "basket/news/backends/acoustic.py"}]} | 1,614 | 202 |
gh_patches_debug_12994 | rasdani/github-patches | git_diff | getsentry__sentry-2846 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SIGTERM doesn't kill Sentry
I'm using the hosted version of Sentry, and I've set it up with `systemd`.
By default, when stopping, `systemd` uses `SIGTERM` to inform Sentry that it should pack up and "call it a day".
However, `sentry` starts killing itself off, then decides that it has a little more work to do, and starts right back up. Note the section `...brutally killing workers`:

```
$ /opt/sentry/bin/sentry --config=/etc/sentry/sentry.conf.py start'
Running service: 'http'
*** Starting uWSGI 2.0.12 (64bit) on [Tue Mar 15 13:25:08 2016] ***
compiled with version: 5.3.0 on 13 March 2016 22:11:11
os: Linux-4.4.5-1-ARCH #1 SMP PREEMPT Thu Mar 10 07:38:19 CET 2016
nodename: ***`
machine: x86_64
clock source: unix
pcre jit disabled
detected number of CPU cores: 8
current working directory: /home/sentry
detected binary path: /opt/sentry/bin/uwsgi
your processes number limit is 31932
your memory page size is 4096 bytes
detected max file descriptor number: 1024
lock engine: pthread robust mutexes
thunder lock: enabled
uwsgi socket 0 bound to TCP address 0.0.0.0:9000 fd 3
Python version: 2.7.11 (default, Mar 3 2016, 11:00:04) [GCC 5.3.0]
Set PythonHome to /opt/sentry
Python main interpreter initialized at 0x1c3fef0
python threads support enabled
your server socket listen backlog is limited to 100 connections
your mercy for graceful operations on workers is 60 seconds
setting request body buffering size to 65536 bytes
mapped 1922048 bytes (1877 KB) for 12 cores
*** Operational MODE: preforking+threaded ***
spawned uWSGI master process (pid: 1657)
spawned uWSGI worker 1 (pid: 1665, cores: 4)
spawned uWSGI worker 2 (pid: 1666, cores: 4)
spawned uWSGI worker 3 (pid: 1667, cores: 4)
WSGI app 0 (mountpoint='') ready in 3 seconds on interpreter 0x1c3fef0 pid: 1665 (default app)
WSGI app 0 (mountpoint='') ready in 4 seconds on interpreter 0x1c3fef0 pid: 1667 (default app)
WSGI app 0 (mountpoint='') ready in 5 seconds on interpreter 0x1c3fef0 pid: 1666 (default app)
// Right here, SIGTERM sent to sentry
...brutally killing workers...
worker 1 buried after 1 seconds
worker 2 buried after 1 seconds
worker 3 buried after 1 seconds
binary reloading uWSGI...
chdir() to /home/sentry
closing all non-uwsgi socket fds > 2 (max_fd = 1024)...
found fd 3 mapped to socket 0 (0.0.0.0:9000)
running /opt/sentry/bin/uwsgi
*** Starting uWSGI 2.0.12 (64bit) on [Tue Mar 15 13:25:29 2016] ***
compiled with version: 5.3.0 on 13 March 2016 22:11:11
os: Linux-4.4.5-1-ARCH #1 SMP PREEMPT Thu Mar 10 07:38:19 CET 2016
nodename: ***
machine: x86_64
clock source: unix
pcre jit disabled
detected number of CPU cores: 8
current working directory: /home/sentry
detected binary path: /opt/sentry/bin/uwsgi
your processes number limit is 31932
your memory page size is 4096 bytes
detected max file descriptor number: 1024
lock engine: pthread robust mutexes
thunder lock: enabled
uwsgi socket 0 inherited INET address 0.0.0.0:9000 fd 3
Python version: 2.7.11 (default, Mar 3 2016, 11:00:04) [GCC 5.3.0]
Set PythonHome to /opt/sentry
Python main interpreter initialized at 0x1bbf930
python threads support enabled
your server socket listen backlog is limited to 100 connections
your mercy for graceful operations on workers is 60 seconds
setting request body buffering size to 65536 bytes
mapped 1922048 bytes (1877 KB) for 12 cores
*** Operational MODE: preforking+threaded ***
gracefully (RE)spawned uWSGI master process (pid: 1657)
spawned uWSGI worker 1 (pid: 1702, cores: 4)
spawned uWSGI worker 2 (pid: 1703, cores: 4)
spawned uWSGI worker 3 (pid: 1704, cores: 4)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/services/http.py`
Content:
```
1 """
2 sentry.services.http
3 ~~~~~~~~~~~~~~~~~~~~
4
5 :copyright: (c) 2010-2016 by the Sentry Team, see AUTHORS for more details.
6 :license: BSD, see LICENSE for more details.
7 """
8 from __future__ import absolute_import, print_function
9
10 import os
11 import sys
12 from sentry.services.base import Service
13
14
15 def convert_options_to_env(options):
16 for k, v in options.iteritems():
17 if v is None:
18 continue
19 key = 'UWSGI_' + k.upper().replace('-', '_')
20 if isinstance(v, basestring):
21 value = v
22 elif v is True:
23 value = 'true'
24 elif v is False:
25 value = 'false'
26 elif isinstance(v, (int, long)):
27 value = str(v)
28 else:
29 raise TypeError('Unknown option type: %r (%s)' % (k, type(v)))
30 yield key, value
31
32
33 class SentryHTTPServer(Service):
34 name = 'http'
35
36 def __init__(self, host=None, port=None, debug=False, workers=None,
37 validate=True):
38 from django.conf import settings
39
40 if validate:
41 self.validate_settings()
42
43 host = host or settings.SENTRY_WEB_HOST
44 port = port or settings.SENTRY_WEB_PORT
45
46 options = (settings.SENTRY_WEB_OPTIONS or {}).copy()
47 options.setdefault('module', 'sentry.wsgi:application')
48 options.setdefault('protocol', 'http')
49 options.setdefault('auto-procname', True)
50 options.setdefault('procname-prefix-spaced', '[Sentry]')
51 options.setdefault('workers', 3)
52 options.setdefault('threads', 4)
53 options.setdefault('http-timeout', 30)
54 options.setdefault('vacuum', True)
55 options.setdefault('thunder-lock', True)
56 options.setdefault('log-x-forwarded-for', False)
57 options.setdefault('buffer-size', 32768)
58 options.setdefault('post-buffering', 65536)
59 options.setdefault('limit-post', 20971520)
60 options.setdefault('need-app', True)
61 options.setdefault('disable-logging', False)
62 options.setdefault('memory-report', True)
63 options.setdefault('reload-on-rss', 600)
64 options.setdefault('ignore-sigpipe', True)
65 options.setdefault('ignore-write-errors', True)
66 options.setdefault('disable-write-exception', True)
67 options.setdefault('virtualenv', sys.prefix)
68 options.setdefault('log-format', '%(addr) - %(user) [%(ltime)] "%(method) %(uri) %(proto)" %(status) %(size) "%(referer)" "%(uagent)"')
69
70 options.setdefault('%s-socket' % options['protocol'], '%s:%s' % (host, port))
71
72 # We only need to set uid/gid when stepping down from root, but if
73 # we are trying to run as root, then ignore it entirely.
74 uid = os.getuid()
75 if uid > 0:
76 options.setdefault('uid', uid)
77 gid = os.getgid()
78 if gid > 0:
79 options.setdefault('gid', gid)
80
81 # Required arguments that should not be overridden
82 options['master'] = True
83 options['enable-threads'] = True
84 options['lazy-apps'] = True
85 options['single-interpreter'] = True
86
87 if workers:
88 options['workers'] = workers
89
90 # Old options from gunicorn
91 if 'bind' in options:
92 options['%s-socket' % options['protocol']] = options.pop('bind')
93 if 'accesslog' in options:
94 if options['accesslog'] != '-':
95 options['logto'] = options['accesslog']
96 del options['accesslog']
97 if 'errorlog' in options:
98 if options['errorlog'] != '-':
99 options['logto2'] = options['errorlog']
100 del options['errorlog']
101 if 'timeout' in options:
102 options['http-timeout'] = options.pop('timeout')
103 if 'proc_name' in options:
104 options['procname-prefix-spaced'] = options.pop('proc_name')
105 if 'secure_scheme_headers' in options:
106 del options['secure_scheme_headers']
107 if 'loglevel' in options:
108 del options['loglevel']
109
110 self.options = options
111
112 def validate_settings(self):
113 from django.conf import settings as django_settings
114 from sentry.utils.settings import validate_settings
115
116 validate_settings(django_settings)
117
118 def run(self):
119 # Move all of the options into UWSGI_ env vars
120 for k, v in convert_options_to_env(self.options):
121 os.environ.setdefault(k, v)
122
123 # This has already been validated inside __init__
124 os.environ['SENTRY_SKIP_BACKEND_VALIDATION'] = '1'
125
126 # Look up the bin directory where `sentry` exists, which should be
127 # sys.argv[0], then inject that to the front of our PATH so we can reliably
128 # find the `uwsgi` that's installed when inside virtualenv.
129 # This is so the virtualenv doesn't need to be sourced in, which effectively
130 # does exactly this.
131 virtualenv_path = os.path.dirname(os.path.abspath(sys.argv[0]))
132 current_path = os.environ.get('PATH', '')
133 if virtualenv_path not in current_path:
134 os.environ['PATH'] = '%s:%s' % (virtualenv_path, current_path)
135
136 os.execvp('uwsgi', ('uwsgi',))
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/services/http.py b/src/sentry/services/http.py
--- a/src/sentry/services/http.py
+++ b/src/sentry/services/http.py
@@ -65,6 +65,7 @@
options.setdefault('ignore-write-errors', True)
options.setdefault('disable-write-exception', True)
options.setdefault('virtualenv', sys.prefix)
+ options.setdefault('die-on-term', True)
options.setdefault('log-format', '%(addr) - %(user) [%(ltime)] "%(method) %(uri) %(proto)" %(status) %(size) "%(referer)" "%(uagent)"')
options.setdefault('%s-socket' % options['protocol'], '%s:%s' % (host, port))
| {"golden_diff": "diff --git a/src/sentry/services/http.py b/src/sentry/services/http.py\n--- a/src/sentry/services/http.py\n+++ b/src/sentry/services/http.py\n@@ -65,6 +65,7 @@\n options.setdefault('ignore-write-errors', True)\n options.setdefault('disable-write-exception', True)\n options.setdefault('virtualenv', sys.prefix)\n+ options.setdefault('die-on-term', True)\n options.setdefault('log-format', '%(addr) - %(user) [%(ltime)] \"%(method) %(uri) %(proto)\" %(status) %(size) \"%(referer)\" \"%(uagent)\"')\n \n options.setdefault('%s-socket' % options['protocol'], '%s:%s' % (host, port))\n", "issue": "SIGTERM doesn't kill Sentry\nI'm using the hosted version of Sentry, and I've set it up with `systemd`.\nBy default, when stopping, `systemd` uses `SIGTERM` to inform Sentry that it should pack up and \"call it a day\".\n\nHowever, `sentry` starts killing itself off, then decides that it has a little more work to do, and starts right back up. Note the section `...brutally killing workers`:\n\n\n\n```\n$ /opt/sentry/bin/sentry --config=/etc/sentry/sentry.conf.py start'\nRunning service: 'http'\n*** Starting uWSGI 2.0.12 (64bit) on [Tue Mar 15 13:25:08 2016] ***\ncompiled with version: 5.3.0 on 13 March 2016 22:11:11\nos: Linux-4.4.5-1-ARCH #1 SMP PREEMPT Thu Mar 10 07:38:19 CET 2016\nnodename: ***`\nmachine: x86_64\nclock source: unix\npcre jit disabled\ndetected number of CPU cores: 8\ncurrent working directory: /home/sentry\ndetected binary path: /opt/sentry/bin/uwsgi\nyour processes number limit is 31932\nyour memory page size is 4096 bytes\ndetected max file descriptor number: 1024\nlock engine: pthread robust mutexes\nthunder lock: enabled\nuwsgi socket 0 bound to TCP address 0.0.0.0:9000 fd 3\nPython version: 2.7.11 (default, Mar 3 2016, 11:00:04) [GCC 5.3.0]\nSet PythonHome to /opt/sentry\nPython main interpreter initialized at 0x1c3fef0\npython threads support enabled\nyour server socket listen backlog is limited to 100 connections\nyour mercy for graceful operations on workers is 60 seconds\nsetting request body buffering size to 65536 bytes\nmapped 1922048 bytes (1877 KB) for 12 cores\n*** Operational MODE: preforking+threaded ***\nspawned uWSGI master process (pid: 1657)\nspawned uWSGI worker 1 (pid: 1665, cores: 4)\nspawned uWSGI worker 2 (pid: 1666, cores: 4)\nspawned uWSGI worker 3 (pid: 1667, cores: 4)\nWSGI app 0 (mountpoint='') ready in 3 seconds on interpreter 0x1c3fef0 pid: 1665 (default app)\nWSGI app 0 (mountpoint='') ready in 4 seconds on interpreter 0x1c3fef0 pid: 1667 (default app)\nWSGI app 0 (mountpoint='') ready in 5 seconds on interpreter 0x1c3fef0 pid: 1666 (default app)\n\n// Right here, SIGTERM sent to sentry\n\n...brutally killing workers...\nworker 1 buried after 1 seconds\nworker 2 buried after 1 seconds\nworker 3 buried after 1 seconds\nbinary reloading uWSGI...\nchdir() to /home/sentry\nclosing all non-uwsgi socket fds > 2 (max_fd = 1024)...\nfound fd 3 mapped to socket 0 (0.0.0.0:9000)\nrunning /opt/sentry/bin/uwsgi\n*** Starting uWSGI 2.0.12 (64bit) on [Tue Mar 15 13:25:29 2016] ***\ncompiled with version: 5.3.0 on 13 March 2016 22:11:11\nos: Linux-4.4.5-1-ARCH #1 SMP PREEMPT Thu Mar 10 07:38:19 CET 2016\nnodename: ***\nmachine: x86_64\nclock source: unix\npcre jit disabled\ndetected number of CPU cores: 8\ncurrent working directory: /home/sentry\ndetected binary path: /opt/sentry/bin/uwsgi\nyour processes number limit is 31932\nyour memory page size is 4096 bytes\ndetected max file descriptor number: 1024\nlock engine: pthread robust mutexes\nthunder lock: enabled\nuwsgi socket 0 inherited INET address 0.0.0.0:9000 fd 3\nPython version: 2.7.11 (default, Mar 3 2016, 11:00:04) [GCC 5.3.0]\nSet PythonHome to /opt/sentry\nPython main interpreter initialized at 0x1bbf930\npython threads support enabled\nyour server socket listen backlog is limited to 100 connections\nyour mercy for graceful operations on workers is 60 seconds\nsetting request body buffering size to 65536 bytes\nmapped 1922048 bytes (1877 KB) for 12 cores\n*** Operational MODE: preforking+threaded ***\ngracefully (RE)spawned uWSGI master process (pid: 1657)\nspawned uWSGI worker 1 (pid: 1702, cores: 4)\nspawned uWSGI worker 2 (pid: 1703, cores: 4)\nspawned uWSGI worker 3 (pid: 1704, cores: 4)\n\n```\n\n", "before_files": [{"content": "\"\"\"\nsentry.services.http\n~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2010-2016 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport sys\nfrom sentry.services.base import Service\n\n\ndef convert_options_to_env(options):\n for k, v in options.iteritems():\n if v is None:\n continue\n key = 'UWSGI_' + k.upper().replace('-', '_')\n if isinstance(v, basestring):\n value = v\n elif v is True:\n value = 'true'\n elif v is False:\n value = 'false'\n elif isinstance(v, (int, long)):\n value = str(v)\n else:\n raise TypeError('Unknown option type: %r (%s)' % (k, type(v)))\n yield key, value\n\n\nclass SentryHTTPServer(Service):\n name = 'http'\n\n def __init__(self, host=None, port=None, debug=False, workers=None,\n validate=True):\n from django.conf import settings\n\n if validate:\n self.validate_settings()\n\n host = host or settings.SENTRY_WEB_HOST\n port = port or settings.SENTRY_WEB_PORT\n\n options = (settings.SENTRY_WEB_OPTIONS or {}).copy()\n options.setdefault('module', 'sentry.wsgi:application')\n options.setdefault('protocol', 'http')\n options.setdefault('auto-procname', True)\n options.setdefault('procname-prefix-spaced', '[Sentry]')\n options.setdefault('workers', 3)\n options.setdefault('threads', 4)\n options.setdefault('http-timeout', 30)\n options.setdefault('vacuum', True)\n options.setdefault('thunder-lock', True)\n options.setdefault('log-x-forwarded-for', False)\n options.setdefault('buffer-size', 32768)\n options.setdefault('post-buffering', 65536)\n options.setdefault('limit-post', 20971520)\n options.setdefault('need-app', True)\n options.setdefault('disable-logging', False)\n options.setdefault('memory-report', True)\n options.setdefault('reload-on-rss', 600)\n options.setdefault('ignore-sigpipe', True)\n options.setdefault('ignore-write-errors', True)\n options.setdefault('disable-write-exception', True)\n options.setdefault('virtualenv', sys.prefix)\n options.setdefault('log-format', '%(addr) - %(user) [%(ltime)] \"%(method) %(uri) %(proto)\" %(status) %(size) \"%(referer)\" \"%(uagent)\"')\n\n options.setdefault('%s-socket' % options['protocol'], '%s:%s' % (host, port))\n\n # We only need to set uid/gid when stepping down from root, but if\n # we are trying to run as root, then ignore it entirely.\n uid = os.getuid()\n if uid > 0:\n options.setdefault('uid', uid)\n gid = os.getgid()\n if gid > 0:\n options.setdefault('gid', gid)\n\n # Required arguments that should not be overridden\n options['master'] = True\n options['enable-threads'] = True\n options['lazy-apps'] = True\n options['single-interpreter'] = True\n\n if workers:\n options['workers'] = workers\n\n # Old options from gunicorn\n if 'bind' in options:\n options['%s-socket' % options['protocol']] = options.pop('bind')\n if 'accesslog' in options:\n if options['accesslog'] != '-':\n options['logto'] = options['accesslog']\n del options['accesslog']\n if 'errorlog' in options:\n if options['errorlog'] != '-':\n options['logto2'] = options['errorlog']\n del options['errorlog']\n if 'timeout' in options:\n options['http-timeout'] = options.pop('timeout')\n if 'proc_name' in options:\n options['procname-prefix-spaced'] = options.pop('proc_name')\n if 'secure_scheme_headers' in options:\n del options['secure_scheme_headers']\n if 'loglevel' in options:\n del options['loglevel']\n\n self.options = options\n\n def validate_settings(self):\n from django.conf import settings as django_settings\n from sentry.utils.settings import validate_settings\n\n validate_settings(django_settings)\n\n def run(self):\n # Move all of the options into UWSGI_ env vars\n for k, v in convert_options_to_env(self.options):\n os.environ.setdefault(k, v)\n\n # This has already been validated inside __init__\n os.environ['SENTRY_SKIP_BACKEND_VALIDATION'] = '1'\n\n # Look up the bin directory where `sentry` exists, which should be\n # sys.argv[0], then inject that to the front of our PATH so we can reliably\n # find the `uwsgi` that's installed when inside virtualenv.\n # This is so the virtualenv doesn't need to be sourced in, which effectively\n # does exactly this.\n virtualenv_path = os.path.dirname(os.path.abspath(sys.argv[0]))\n current_path = os.environ.get('PATH', '')\n if virtualenv_path not in current_path:\n os.environ['PATH'] = '%s:%s' % (virtualenv_path, current_path)\n\n os.execvp('uwsgi', ('uwsgi',))\n", "path": "src/sentry/services/http.py"}], "after_files": [{"content": "\"\"\"\nsentry.services.http\n~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2010-2016 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport sys\nfrom sentry.services.base import Service\n\n\ndef convert_options_to_env(options):\n for k, v in options.iteritems():\n if v is None:\n continue\n key = 'UWSGI_' + k.upper().replace('-', '_')\n if isinstance(v, basestring):\n value = v\n elif v is True:\n value = 'true'\n elif v is False:\n value = 'false'\n elif isinstance(v, (int, long)):\n value = str(v)\n else:\n raise TypeError('Unknown option type: %r (%s)' % (k, type(v)))\n yield key, value\n\n\nclass SentryHTTPServer(Service):\n name = 'http'\n\n def __init__(self, host=None, port=None, debug=False, workers=None,\n validate=True):\n from django.conf import settings\n\n if validate:\n self.validate_settings()\n\n host = host or settings.SENTRY_WEB_HOST\n port = port or settings.SENTRY_WEB_PORT\n\n options = (settings.SENTRY_WEB_OPTIONS or {}).copy()\n options.setdefault('module', 'sentry.wsgi:application')\n options.setdefault('protocol', 'http')\n options.setdefault('auto-procname', True)\n options.setdefault('procname-prefix-spaced', '[Sentry]')\n options.setdefault('workers', 3)\n options.setdefault('threads', 4)\n options.setdefault('http-timeout', 30)\n options.setdefault('vacuum', True)\n options.setdefault('thunder-lock', True)\n options.setdefault('log-x-forwarded-for', False)\n options.setdefault('buffer-size', 32768)\n options.setdefault('post-buffering', 65536)\n options.setdefault('limit-post', 20971520)\n options.setdefault('need-app', True)\n options.setdefault('disable-logging', False)\n options.setdefault('memory-report', True)\n options.setdefault('reload-on-rss', 600)\n options.setdefault('ignore-sigpipe', True)\n options.setdefault('ignore-write-errors', True)\n options.setdefault('disable-write-exception', True)\n options.setdefault('virtualenv', sys.prefix)\n options.setdefault('die-on-term', True)\n options.setdefault('log-format', '%(addr) - %(user) [%(ltime)] \"%(method) %(uri) %(proto)\" %(status) %(size) \"%(referer)\" \"%(uagent)\"')\n\n options.setdefault('%s-socket' % options['protocol'], '%s:%s' % (host, port))\n\n # We only need to set uid/gid when stepping down from root, but if\n # we are trying to run as root, then ignore it entirely.\n uid = os.getuid()\n if uid > 0:\n options.setdefault('uid', uid)\n gid = os.getgid()\n if gid > 0:\n options.setdefault('gid', gid)\n\n # Required arguments that should not be overridden\n options['master'] = True\n options['enable-threads'] = True\n options['lazy-apps'] = True\n options['single-interpreter'] = True\n\n if workers:\n options['workers'] = workers\n\n # Old options from gunicorn\n if 'bind' in options:\n options['%s-socket' % options['protocol']] = options.pop('bind')\n if 'accesslog' in options:\n if options['accesslog'] != '-':\n options['logto'] = options['accesslog']\n del options['accesslog']\n if 'errorlog' in options:\n if options['errorlog'] != '-':\n options['logto2'] = options['errorlog']\n del options['errorlog']\n if 'timeout' in options:\n options['http-timeout'] = options.pop('timeout')\n if 'proc_name' in options:\n options['procname-prefix-spaced'] = options.pop('proc_name')\n if 'secure_scheme_headers' in options:\n del options['secure_scheme_headers']\n if 'loglevel' in options:\n del options['loglevel']\n\n self.options = options\n\n def validate_settings(self):\n from django.conf import settings as django_settings\n from sentry.utils.settings import validate_settings\n\n validate_settings(django_settings)\n\n def run(self):\n # Move all of the options into UWSGI_ env vars\n for k, v in convert_options_to_env(self.options):\n os.environ.setdefault(k, v)\n\n # This has already been validated inside __init__\n os.environ['SENTRY_SKIP_BACKEND_VALIDATION'] = '1'\n\n # Look up the bin directory where `sentry` exists, which should be\n # sys.argv[0], then inject that to the front of our PATH so we can reliably\n # find the `uwsgi` that's installed when inside virtualenv.\n # This is so the virtualenv doesn't need to be sourced in, which effectively\n # does exactly this.\n virtualenv_path = os.path.dirname(os.path.abspath(sys.argv[0]))\n current_path = os.environ.get('PATH', '')\n if virtualenv_path not in current_path:\n os.environ['PATH'] = '%s:%s' % (virtualenv_path, current_path)\n\n os.execvp('uwsgi', ('uwsgi',))\n", "path": "src/sentry/services/http.py"}]} | 3,126 | 161 |
gh_patches_debug_35291 | rasdani/github-patches | git_diff | 3cn-ecn__nantralPlatform-403 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mot de passe perdu
Cette fonctionnalité de fonctionne pas sur mon ordinateur mais fonctionne sur l'ordinateur de Gabin Schieffer <br/> Proposé par julie.geffraye@eleves.ec-nantes.fr
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `server/apps/account/urls.py`
Content:
```
1 from django.conf.urls import url
2 from django.urls import path
3
4 from .views import *
5
6 app_name = 'account'
7
8 urlpatterns = [
9 path('login', AuthView.as_view(), name='login'),
10 path('logout', LogoutView.as_view(), name='logout'),
11 path('registration', RegistrationView.as_view(), name='registration'),
12 path('registration/temporary/<int:id>/approve', ApproveTemporaryRegistrationView.as_view(),
13 name='temp-req-approve'),
14 path('registration/temporary/<int:id>/deny', DenyTemporaryRegistrationView.as_view(),
15 name='temp-req-deny'),
16 path('registration/temporary', TemporaryRegistrationView.as_view(),
17 name='temporary-registration'),
18 path('activate/<slug:uidb64>/<slug:token>/',
19 ConfirmUser.as_view(), name='confirm'),
20 path('activate/<slug:uidb64>/<slug:token>/temporary',
21 ConfirmUserTemporary.as_view(), name='confirm-temporary'),
22 path('permanent', PermanentAccountUpgradeView.as_view(),
23 name='upgrade-permanent'),
24 path('forgotten', ForgottenPassView.as_view(), name='forgotten_pass'),
25 path('reset_pass/<slug:uidb64>/<slug:token>',
26 PasswordResetConfirmCustomView.as_view(), name='reset_pass'),
27 path('<slug:user_id>/student', redirect_to_student, name='redirect-student'),
28 ]
29
```
Path: `server/apps/account/views.py`
Content:
```
1 from datetime import date
2 from typing import Any, Dict, Union
3 from django.conf import settings
4 from django.contrib.auth import login, logout
5 from django.contrib.sites.shortcuts import get_current_site
6 from django.http.response import HttpResponse
7 from django.views.generic.edit import FormView
8 from django.shortcuts import get_object_or_404
9
10 from apps.utils.accessMixins import UserIsSuperAdmin
11 from .forms import SignUpForm, LoginForm, ForgottenPassForm, TemporaryRequestSignUpForm, UpgradePermanentAccountForm
12 from .tokens import account_activation_token
13 from django.contrib import messages
14 from django.shortcuts import render, redirect
15 from django.template.loader import render_to_string
16 from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
17 from django.utils.encoding import force_bytes, force_text
18 from django.urls import reverse, reverse_lazy
19
20 from django.contrib.auth.views import PasswordResetConfirmView
21 from django.views import View
22
23 from django.contrib.auth.forms import SetPasswordForm
24 from django.contrib.auth.mixins import LoginRequiredMixin
25
26 from django.contrib.auth.models import User
27 from apps.student.models import Student
28
29 from .emailAuthBackend import EmailBackend
30 from .models import TemporaryAccessRequest
31 from .utils import user_creation, send_email_confirmation
32
33
34 class RegistrationView(FormView):
35 template_name = 'account/registration.html'
36 form_class = SignUpForm
37
38 def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:
39 context = super().get_context_data(**kwargs)
40 context['temporary_registration'] = settings.TEMPORARY_ACCOUNTS_DATE_LIMIT >= date.today()
41 return context
42
43 def form_valid(self, form):
44 user_creation(form, self.request)
45 return redirect(reverse('home:home'))
46
47
48 class TemporaryRegistrationView(FormView):
49 form_class = TemporaryRequestSignUpForm
50 template_name = 'account/temporary_registration.html'
51
52 def dispatch(self, request, *args: Any, **kwargs: Any):
53 """Do not allow to use this view outside of allowed temporary accounts windows."""
54 if not settings.TEMPORARY_ACCOUNTS_DATE_LIMIT >= date.today():
55 return redirect(reverse('account:registration'))
56 return super().dispatch(request, *args, **kwargs)
57
58 def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:
59 context = super().get_context_data(**kwargs)
60 context['DEADLINE_TEMPORARY_REGISTRATION'] = settings.TEMPORARY_ACCOUNTS_DATE_LIMIT
61 return context
62
63 def form_valid(self, form) -> HttpResponse:
64 user_creation(form, self.request)
65 return redirect(reverse('home:home'))
66
67
68 class ConfirmUser(View):
69 def get(self, request, uidb64, token):
70 tempAccessReq: Union[TemporaryAccessRequest, None] = None
71 try:
72 uid = force_text(urlsafe_base64_decode(uidb64))
73 user = User.objects.get(pk=uid)
74 except (TypeError, ValueError, OverflowError, User.DoesNotExist):
75 return render(self.request, 'account/activation_invalid.html')
76 # checking if the user is not a temporary one
77 try:
78 tempAccessReq: TemporaryAccessRequest = TemporaryAccessRequest.objects.get(
79 user=user.pk)
80 if not tempAccessReq.approved:
81 return render(self.request, 'account/activation_invalid.html')
82 except TemporaryAccessRequest.DoesNotExist:
83 tempAccessReq = None
84 # checking if the token is valid.
85 if account_activation_token.check_token(user, token):
86 # if valid set active true
87 user.is_active = True
88 if tempAccessReq is not None:
89 user.email = tempAccessReq.final_email
90 tempAccessReq.delete()
91 messages.warning(
92 request, f'Dorénavant vous devez utiliser {user.email} pour vous connecter.')
93 user.save()
94 login(self.request, user,
95 backend='apps.account.emailAuthBackend.EmailBackend')
96 messages.success(request, 'Votre compte est desormais actif !')
97 return redirect(reverse('home:home'))
98 else:
99 return render(self.request, 'account/activation_invalid.html')
100
101
102 class AuthView(FormView):
103 template_name = 'account/login.html'
104 form_class = LoginForm
105
106 def get(self, request):
107 if request.user.is_authenticated:
108 user = request.user
109 message = f'Vous etes déjà connecté en tant que {user.first_name.title()}.'
110 messages.warning(request, message)
111 return redirect(reverse('home:home'))
112 else:
113 return super(AuthView, AuthView).get(self, request)
114
115 def form_invalid(self, form):
116 message = f'Veuillez vous connecter avec votre adresse mail ECN.'
117 messages.warning(self.request, message)
118 return redirect(reverse('account:login'))
119
120 def form_valid(self, form):
121 username = form.cleaned_data['email']
122 password = form.cleaned_data['password']
123 user = EmailBackend.authenticate(username=username, password=password)
124 if user is not None:
125 if user.is_active:
126 message = f'Bonjour {user.first_name.title()} !'
127 messages.success(self.request, message)
128 else:
129 if settings.TEMPORARY_ACCOUNTS_DATE_LIMIT >= date.today():
130 # During certain periods allow temporary accounts.
131 try:
132 temporaryAccessRequest: TemporaryAccessRequest = TemporaryAccessRequest.objects.get(
133 user=user
134 )
135 if not temporaryAccessRequest.mail_valid:
136 message = 'Votre compte n\'est pas encore actif.\
137 Veuillez cliquer sur le lien envoyé par mail pour l\'\
138 activer.'
139 messages.error(self.request, message)
140 return redirect(reverse('account:login'))
141 if temporaryAccessRequest.approved_until <= date.today():
142 message = 'Votre compte n\'a pas encore été approuvé.\
143 On vous prévient par mail dès que c\'est le cas.'
144 messages.error(self.request, message)
145 return redirect(reverse('account:login'))
146 message = f'Votre compte n\'est pas encore définitif.\
147 Veuillez le valider <a href="{reverse("account:upgrade-permanent")}">ici</a>.\
148 Attention après le {temporaryAccessRequest.approved_until}\
149 vous ne pourrez plus vous connecter si vous n\'avez pas renseigné votre adresse Centrale.'
150 messages.warning(self.request, message)
151 except TemporaryAccessRequest.DoesNotExist:
152 messages.error(
153 self.request, 'Identifiant inconnu ou mot de passe invalide.')
154 return redirect(reverse('account:login'))
155 else:
156 messages.warning(
157 self.request, 'Votre compte n\'est pas encore actif. Veuillez cliquer sur le lien dans \'email.')
158 login(self.request, user,
159 backend='apps.account.emailAuthBackend.EmailBackend')
160 return redirect(reverse('home:home'))
161 else:
162 messages.error(
163 self.request, 'Identifiant inconnu ou mot de passe invalide.')
164 return redirect(reverse('account:login'))
165
166
167 class LogoutView(View):
168 def get(self, request):
169 logout(request)
170 messages.success(request, 'Vous avez été déconnecté.')
171 return redirect(reverse('account:login'))
172
173
174 class ForgottenPassView(FormView):
175 form_class = ForgottenPassForm
176 template_name = 'account/forgotten_pass.html'
177
178 def form_valid(self, form):
179 user = User.objects.get(email=form.cleaned_data['email'])
180 if user is not None:
181 subject = '[Nantral Platform] Reinitialisation de votre mot de passe'
182 current_site = get_current_site(self.request)
183 message = render_to_string('account/mail/password_request.html', {
184 'user': user,
185 'domain': current_site.domain,
186 'uidb64': urlsafe_base64_encode(force_bytes(user.pk)),
187 # method will generate a hash value with user related data
188 'token': account_activation_token.make_token(user),
189 })
190 user.email_user(
191 subject, message, 'accounts@nantral-platform.fr', html_message=message)
192 messages.success(
193 self.request, 'Un email de récuperation a été envoyé si cette adresse existe.')
194 return redirect(reverse('account:login'))
195
196
197 class PasswordResetConfirmCustomView(PasswordResetConfirmView):
198 template_name = 'account/reset_password.html'
199 post_reset_login = True
200 post_reset_login_backend = 'apps.account.emailAuthBackend.EmailBackend'
201 form_class = SetPasswordForm
202 token_generator = account_activation_token
203 success_url = reverse_lazy('home:home')
204
205
206 def redirect_to_student(request, user_id):
207 user = User.objects.get(id=user_id)
208 student = Student.objects.get(user=user)
209 return redirect('student:update', student.pk)
210
211
212 class ABCApprovalTemporaryResgistrationView(UserIsSuperAdmin, View):
213 def get(self, request, id):
214 self.temp_req: TemporaryAccessRequest = get_object_or_404(
215 TemporaryAccessRequest, id=id)
216
217 if self.temp_req.approved:
218 messages.warning(request, f'Cette requête a déjà été approuvée.')
219 return redirect(reverse('home:home'))
220
221
222 class ApproveTemporaryRegistrationView(ABCApprovalTemporaryResgistrationView):
223 def get(self, request, id):
224 super().get(request, id)
225 self.temp_req.approve()
226 messages.success(
227 request, f'Vous avez accepté la demande de {self.temp_req.user.first_name} {self.temp_req.user.last_name}')
228 return redirect(reverse('home:home'))
229
230
231 class DenyTemporaryRegistrationView(ABCApprovalTemporaryResgistrationView):
232 def get(self, request, id):
233 super().get(request, id)
234 messages.success(
235 request, f'Vous avez refusé la demande de {self.temp_req.user.first_name} {self.temp_req.user.last_name}')
236 self.temp_req.deny()
237 return redirect(reverse('home:home'))
238
239
240 class ConfirmUserTemporary(View):
241 def get(self, request, uidb64, token):
242 try:
243 uid = force_text(urlsafe_base64_decode(uidb64))
244 user = User.objects.get(pk=uid)
245 except (TypeError, ValueError, OverflowError, User.DoesNotExist):
246 user = None
247 # checking if the user exists, if the token is valid.
248 if user is not None and account_activation_token.check_token(user, token):
249 try:
250 temp_req: TemporaryAccessRequest = TemporaryAccessRequest.objects.get(
251 user=user)
252 temp_req.mail_valid = True
253 temp_req.save()
254 except TemporaryAccessRequest.DoesNotExist:
255 return render(self.request, 'account/activation_invalid.html')
256 messages.success(request, 'Votre addresse mail est confirmé! \n\
257 Comme vous n\'avez pas utilisé votre adresse Centrale, vous devez encore attendre qu\'un administrateur vérifie votre inscription.\n\
258 On vous prévient par mail dès que c\'est bon!. ')
259 return redirect(reverse('home:home'))
260 else:
261 return render(self.request, 'account/activation_invalid.html')
262
263
264 class PermanentAccountUpgradeView(LoginRequiredMixin, FormView):
265 form_class = UpgradePermanentAccountForm
266 template_name = 'account/permanent_account_upgrade.html'
267 success_url = reverse_lazy('home:home')
268
269 def get(self, request):
270 get_object_or_404(
271 TemporaryAccessRequest,
272 user=self.request.user
273 )
274 return super().get(request)
275
276 def form_valid(self, form: UpgradePermanentAccountForm) -> HttpResponse:
277 temp_request = get_object_or_404(
278 TemporaryAccessRequest,
279 user=self.request.user
280 )
281 temp_request.final_email = form.cleaned_data['email']
282 temp_request.save()
283 send_email_confirmation(
284 self.request.user, self.request, send_to=form.cleaned_data['email'])
285 return super().form_valid(form)
286
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/server/apps/account/urls.py b/server/apps/account/urls.py
--- a/server/apps/account/urls.py
+++ b/server/apps/account/urls.py
@@ -22,7 +22,7 @@
path('permanent', PermanentAccountUpgradeView.as_view(),
name='upgrade-permanent'),
path('forgotten', ForgottenPassView.as_view(), name='forgotten_pass'),
- path('reset_pass/<slug:uidb64>/<slug:token>',
+ path('reset_pass/<slug:uidb64>/<slug:token>/',
PasswordResetConfirmCustomView.as_view(), name='reset_pass'),
path('<slug:user_id>/student', redirect_to_student, name='redirect-student'),
]
diff --git a/server/apps/account/views.py b/server/apps/account/views.py
--- a/server/apps/account/views.py
+++ b/server/apps/account/views.py
@@ -176,19 +176,22 @@
template_name = 'account/forgotten_pass.html'
def form_valid(self, form):
- user = User.objects.get(email=form.cleaned_data['email'])
- if user is not None:
- subject = '[Nantral Platform] Reinitialisation de votre mot de passe'
- current_site = get_current_site(self.request)
- message = render_to_string('account/mail/password_request.html', {
- 'user': user,
- 'domain': current_site.domain,
- 'uidb64': urlsafe_base64_encode(force_bytes(user.pk)),
- # method will generate a hash value with user related data
- 'token': account_activation_token.make_token(user),
- })
- user.email_user(
- subject, message, 'accounts@nantral-platform.fr', html_message=message)
+ try:
+ user = User.objects.get(email=form.cleaned_data['email'])
+ if user is not None:
+ subject = '[Nantral Platform] Reinitialisation de votre mot de passe'
+ current_site = get_current_site(self.request)
+ message = render_to_string('account/mail/password_request.html', {
+ 'user': user,
+ 'domain': current_site.domain,
+ 'uidb64': urlsafe_base64_encode(force_bytes(user.pk)),
+ # method will generate a hash value with user related data
+ 'token': account_activation_token.make_token(user),
+ })
+ user.email_user(
+ subject, message, 'accounts@nantral-platform.fr', html_message=message)
+ except User.DoesNotExist:
+ pass
messages.success(
self.request, 'Un email de récuperation a été envoyé si cette adresse existe.')
return redirect(reverse('account:login'))
| {"golden_diff": "diff --git a/server/apps/account/urls.py b/server/apps/account/urls.py\n--- a/server/apps/account/urls.py\n+++ b/server/apps/account/urls.py\n@@ -22,7 +22,7 @@\n path('permanent', PermanentAccountUpgradeView.as_view(),\n name='upgrade-permanent'),\n path('forgotten', ForgottenPassView.as_view(), name='forgotten_pass'),\n- path('reset_pass/<slug:uidb64>/<slug:token>',\n+ path('reset_pass/<slug:uidb64>/<slug:token>/',\n PasswordResetConfirmCustomView.as_view(), name='reset_pass'),\n path('<slug:user_id>/student', redirect_to_student, name='redirect-student'),\n ]\ndiff --git a/server/apps/account/views.py b/server/apps/account/views.py\n--- a/server/apps/account/views.py\n+++ b/server/apps/account/views.py\n@@ -176,19 +176,22 @@\n template_name = 'account/forgotten_pass.html'\n \n def form_valid(self, form):\n- user = User.objects.get(email=form.cleaned_data['email'])\n- if user is not None:\n- subject = '[Nantral Platform] Reinitialisation de votre mot de passe'\n- current_site = get_current_site(self.request)\n- message = render_to_string('account/mail/password_request.html', {\n- 'user': user,\n- 'domain': current_site.domain,\n- 'uidb64': urlsafe_base64_encode(force_bytes(user.pk)),\n- # method will generate a hash value with user related data\n- 'token': account_activation_token.make_token(user),\n- })\n- user.email_user(\n- subject, message, 'accounts@nantral-platform.fr', html_message=message)\n+ try:\n+ user = User.objects.get(email=form.cleaned_data['email'])\n+ if user is not None:\n+ subject = '[Nantral Platform] Reinitialisation de votre mot de passe'\n+ current_site = get_current_site(self.request)\n+ message = render_to_string('account/mail/password_request.html', {\n+ 'user': user,\n+ 'domain': current_site.domain,\n+ 'uidb64': urlsafe_base64_encode(force_bytes(user.pk)),\n+ # method will generate a hash value with user related data\n+ 'token': account_activation_token.make_token(user),\n+ })\n+ user.email_user(\n+ subject, message, 'accounts@nantral-platform.fr', html_message=message)\n+ except User.DoesNotExist:\n+ pass\n messages.success(\n self.request, 'Un email de r\u00e9cuperation a \u00e9t\u00e9 envoy\u00e9 si cette adresse existe.')\n return redirect(reverse('account:login'))\n", "issue": "Mot de passe perdu\nCette fonctionnalit\u00e9 de fonctionne pas sur mon ordinateur mais fonctionne sur l'ordinateur de Gabin Schieffer <br/> Propos\u00e9 par julie.geffraye@eleves.ec-nantes.fr\n", "before_files": [{"content": "from django.conf.urls import url\nfrom django.urls import path\n\nfrom .views import *\n\napp_name = 'account'\n\nurlpatterns = [\n path('login', AuthView.as_view(), name='login'),\n path('logout', LogoutView.as_view(), name='logout'),\n path('registration', RegistrationView.as_view(), name='registration'),\n path('registration/temporary/<int:id>/approve', ApproveTemporaryRegistrationView.as_view(),\n name='temp-req-approve'),\n path('registration/temporary/<int:id>/deny', DenyTemporaryRegistrationView.as_view(),\n name='temp-req-deny'),\n path('registration/temporary', TemporaryRegistrationView.as_view(),\n name='temporary-registration'),\n path('activate/<slug:uidb64>/<slug:token>/',\n ConfirmUser.as_view(), name='confirm'),\n path('activate/<slug:uidb64>/<slug:token>/temporary',\n ConfirmUserTemporary.as_view(), name='confirm-temporary'),\n path('permanent', PermanentAccountUpgradeView.as_view(),\n name='upgrade-permanent'),\n path('forgotten', ForgottenPassView.as_view(), name='forgotten_pass'),\n path('reset_pass/<slug:uidb64>/<slug:token>',\n PasswordResetConfirmCustomView.as_view(), name='reset_pass'),\n path('<slug:user_id>/student', redirect_to_student, name='redirect-student'),\n]\n", "path": "server/apps/account/urls.py"}, {"content": "from datetime import date\nfrom typing import Any, Dict, Union\nfrom django.conf import settings\nfrom django.contrib.auth import login, logout\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.http.response import HttpResponse\nfrom django.views.generic.edit import FormView\nfrom django.shortcuts import get_object_or_404\n\nfrom apps.utils.accessMixins import UserIsSuperAdmin\nfrom .forms import SignUpForm, LoginForm, ForgottenPassForm, TemporaryRequestSignUpForm, UpgradePermanentAccountForm\nfrom .tokens import account_activation_token\nfrom django.contrib import messages\nfrom django.shortcuts import render, redirect\nfrom django.template.loader import render_to_string\nfrom django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode\nfrom django.utils.encoding import force_bytes, force_text\nfrom django.urls import reverse, reverse_lazy\n\nfrom django.contrib.auth.views import PasswordResetConfirmView\nfrom django.views import View\n\nfrom django.contrib.auth.forms import SetPasswordForm\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom django.contrib.auth.models import User\nfrom apps.student.models import Student\n\nfrom .emailAuthBackend import EmailBackend\nfrom .models import TemporaryAccessRequest\nfrom .utils import user_creation, send_email_confirmation\n\n\nclass RegistrationView(FormView):\n template_name = 'account/registration.html'\n form_class = SignUpForm\n\n def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:\n context = super().get_context_data(**kwargs)\n context['temporary_registration'] = settings.TEMPORARY_ACCOUNTS_DATE_LIMIT >= date.today()\n return context\n\n def form_valid(self, form):\n user_creation(form, self.request)\n return redirect(reverse('home:home'))\n\n\nclass TemporaryRegistrationView(FormView):\n form_class = TemporaryRequestSignUpForm\n template_name = 'account/temporary_registration.html'\n\n def dispatch(self, request, *args: Any, **kwargs: Any):\n \"\"\"Do not allow to use this view outside of allowed temporary accounts windows.\"\"\"\n if not settings.TEMPORARY_ACCOUNTS_DATE_LIMIT >= date.today():\n return redirect(reverse('account:registration'))\n return super().dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:\n context = super().get_context_data(**kwargs)\n context['DEADLINE_TEMPORARY_REGISTRATION'] = settings.TEMPORARY_ACCOUNTS_DATE_LIMIT\n return context\n\n def form_valid(self, form) -> HttpResponse:\n user_creation(form, self.request)\n return redirect(reverse('home:home'))\n\n\nclass ConfirmUser(View):\n def get(self, request, uidb64, token):\n tempAccessReq: Union[TemporaryAccessRequest, None] = None\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n return render(self.request, 'account/activation_invalid.html')\n # checking if the user is not a temporary one\n try:\n tempAccessReq: TemporaryAccessRequest = TemporaryAccessRequest.objects.get(\n user=user.pk)\n if not tempAccessReq.approved:\n return render(self.request, 'account/activation_invalid.html')\n except TemporaryAccessRequest.DoesNotExist:\n tempAccessReq = None\n # checking if the token is valid.\n if account_activation_token.check_token(user, token):\n # if valid set active true\n user.is_active = True\n if tempAccessReq is not None:\n user.email = tempAccessReq.final_email\n tempAccessReq.delete()\n messages.warning(\n request, f'Dor\u00e9navant vous devez utiliser {user.email} pour vous connecter.')\n user.save()\n login(self.request, user,\n backend='apps.account.emailAuthBackend.EmailBackend')\n messages.success(request, 'Votre compte est desormais actif !')\n return redirect(reverse('home:home'))\n else:\n return render(self.request, 'account/activation_invalid.html')\n\n\nclass AuthView(FormView):\n template_name = 'account/login.html'\n form_class = LoginForm\n\n def get(self, request):\n if request.user.is_authenticated:\n user = request.user\n message = f'Vous etes d\u00e9j\u00e0 connect\u00e9 en tant que {user.first_name.title()}.'\n messages.warning(request, message)\n return redirect(reverse('home:home'))\n else:\n return super(AuthView, AuthView).get(self, request)\n\n def form_invalid(self, form):\n message = f'Veuillez vous connecter avec votre adresse mail ECN.'\n messages.warning(self.request, message)\n return redirect(reverse('account:login'))\n\n def form_valid(self, form):\n username = form.cleaned_data['email']\n password = form.cleaned_data['password']\n user = EmailBackend.authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n message = f'Bonjour {user.first_name.title()} !'\n messages.success(self.request, message)\n else:\n if settings.TEMPORARY_ACCOUNTS_DATE_LIMIT >= date.today():\n # During certain periods allow temporary accounts.\n try:\n temporaryAccessRequest: TemporaryAccessRequest = TemporaryAccessRequest.objects.get(\n user=user\n )\n if not temporaryAccessRequest.mail_valid:\n message = 'Votre compte n\\'est pas encore actif.\\\n Veuillez cliquer sur le lien envoy\u00e9 par mail pour l\\'\\\n activer.'\n messages.error(self.request, message)\n return redirect(reverse('account:login'))\n if temporaryAccessRequest.approved_until <= date.today():\n message = 'Votre compte n\\'a pas encore \u00e9t\u00e9 approuv\u00e9.\\\n On vous pr\u00e9vient par mail d\u00e8s que c\\'est le cas.'\n messages.error(self.request, message)\n return redirect(reverse('account:login'))\n message = f'Votre compte n\\'est pas encore d\u00e9finitif.\\\n Veuillez le valider <a href=\"{reverse(\"account:upgrade-permanent\")}\">ici</a>.\\\n Attention apr\u00e8s le {temporaryAccessRequest.approved_until}\\\n vous ne pourrez plus vous connecter si vous n\\'avez pas renseign\u00e9 votre adresse Centrale.'\n messages.warning(self.request, message)\n except TemporaryAccessRequest.DoesNotExist:\n messages.error(\n self.request, 'Identifiant inconnu ou mot de passe invalide.')\n return redirect(reverse('account:login'))\n else:\n messages.warning(\n self.request, 'Votre compte n\\'est pas encore actif. Veuillez cliquer sur le lien dans \\'email.')\n login(self.request, user,\n backend='apps.account.emailAuthBackend.EmailBackend')\n return redirect(reverse('home:home'))\n else:\n messages.error(\n self.request, 'Identifiant inconnu ou mot de passe invalide.')\n return redirect(reverse('account:login'))\n\n\nclass LogoutView(View):\n def get(self, request):\n logout(request)\n messages.success(request, 'Vous avez \u00e9t\u00e9 d\u00e9connect\u00e9.')\n return redirect(reverse('account:login'))\n\n\nclass ForgottenPassView(FormView):\n form_class = ForgottenPassForm\n template_name = 'account/forgotten_pass.html'\n\n def form_valid(self, form):\n user = User.objects.get(email=form.cleaned_data['email'])\n if user is not None:\n subject = '[Nantral Platform] Reinitialisation de votre mot de passe'\n current_site = get_current_site(self.request)\n message = render_to_string('account/mail/password_request.html', {\n 'user': user,\n 'domain': current_site.domain,\n 'uidb64': urlsafe_base64_encode(force_bytes(user.pk)),\n # method will generate a hash value with user related data\n 'token': account_activation_token.make_token(user),\n })\n user.email_user(\n subject, message, 'accounts@nantral-platform.fr', html_message=message)\n messages.success(\n self.request, 'Un email de r\u00e9cuperation a \u00e9t\u00e9 envoy\u00e9 si cette adresse existe.')\n return redirect(reverse('account:login'))\n\n\nclass PasswordResetConfirmCustomView(PasswordResetConfirmView):\n template_name = 'account/reset_password.html'\n post_reset_login = True\n post_reset_login_backend = 'apps.account.emailAuthBackend.EmailBackend'\n form_class = SetPasswordForm\n token_generator = account_activation_token\n success_url = reverse_lazy('home:home')\n\n\ndef redirect_to_student(request, user_id):\n user = User.objects.get(id=user_id)\n student = Student.objects.get(user=user)\n return redirect('student:update', student.pk)\n\n\nclass ABCApprovalTemporaryResgistrationView(UserIsSuperAdmin, View):\n def get(self, request, id):\n self.temp_req: TemporaryAccessRequest = get_object_or_404(\n TemporaryAccessRequest, id=id)\n\n if self.temp_req.approved:\n messages.warning(request, f'Cette requ\u00eate a d\u00e9j\u00e0 \u00e9t\u00e9 approuv\u00e9e.')\n return redirect(reverse('home:home'))\n\n\nclass ApproveTemporaryRegistrationView(ABCApprovalTemporaryResgistrationView):\n def get(self, request, id):\n super().get(request, id)\n self.temp_req.approve()\n messages.success(\n request, f'Vous avez accept\u00e9 la demande de {self.temp_req.user.first_name} {self.temp_req.user.last_name}')\n return redirect(reverse('home:home'))\n\n\nclass DenyTemporaryRegistrationView(ABCApprovalTemporaryResgistrationView):\n def get(self, request, id):\n super().get(request, id)\n messages.success(\n request, f'Vous avez refus\u00e9 la demande de {self.temp_req.user.first_name} {self.temp_req.user.last_name}')\n self.temp_req.deny()\n return redirect(reverse('home:home'))\n\n\nclass ConfirmUserTemporary(View):\n def get(self, request, uidb64, token):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n # checking if the user exists, if the token is valid.\n if user is not None and account_activation_token.check_token(user, token):\n try:\n temp_req: TemporaryAccessRequest = TemporaryAccessRequest.objects.get(\n user=user)\n temp_req.mail_valid = True\n temp_req.save()\n except TemporaryAccessRequest.DoesNotExist:\n return render(self.request, 'account/activation_invalid.html')\n messages.success(request, 'Votre addresse mail est confirm\u00e9! \\n\\\n Comme vous n\\'avez pas utilis\u00e9 votre adresse Centrale, vous devez encore attendre qu\\'un administrateur v\u00e9rifie votre inscription.\\n\\\n On vous pr\u00e9vient par mail d\u00e8s que c\\'est bon!. ')\n return redirect(reverse('home:home'))\n else:\n return render(self.request, 'account/activation_invalid.html')\n\n\nclass PermanentAccountUpgradeView(LoginRequiredMixin, FormView):\n form_class = UpgradePermanentAccountForm\n template_name = 'account/permanent_account_upgrade.html'\n success_url = reverse_lazy('home:home')\n\n def get(self, request):\n get_object_or_404(\n TemporaryAccessRequest,\n user=self.request.user\n )\n return super().get(request)\n\n def form_valid(self, form: UpgradePermanentAccountForm) -> HttpResponse:\n temp_request = get_object_or_404(\n TemporaryAccessRequest,\n user=self.request.user\n )\n temp_request.final_email = form.cleaned_data['email']\n temp_request.save()\n send_email_confirmation(\n self.request.user, self.request, send_to=form.cleaned_data['email'])\n return super().form_valid(form)\n", "path": "server/apps/account/views.py"}], "after_files": [{"content": "from django.conf.urls import url\nfrom django.urls import path\n\nfrom .views import *\n\napp_name = 'account'\n\nurlpatterns = [\n path('login', AuthView.as_view(), name='login'),\n path('logout', LogoutView.as_view(), name='logout'),\n path('registration', RegistrationView.as_view(), name='registration'),\n path('registration/temporary/<int:id>/approve', ApproveTemporaryRegistrationView.as_view(),\n name='temp-req-approve'),\n path('registration/temporary/<int:id>/deny', DenyTemporaryRegistrationView.as_view(),\n name='temp-req-deny'),\n path('registration/temporary', TemporaryRegistrationView.as_view(),\n name='temporary-registration'),\n path('activate/<slug:uidb64>/<slug:token>/',\n ConfirmUser.as_view(), name='confirm'),\n path('activate/<slug:uidb64>/<slug:token>/temporary',\n ConfirmUserTemporary.as_view(), name='confirm-temporary'),\n path('permanent', PermanentAccountUpgradeView.as_view(),\n name='upgrade-permanent'),\n path('forgotten', ForgottenPassView.as_view(), name='forgotten_pass'),\n path('reset_pass/<slug:uidb64>/<slug:token>/',\n PasswordResetConfirmCustomView.as_view(), name='reset_pass'),\n path('<slug:user_id>/student', redirect_to_student, name='redirect-student'),\n]\n", "path": "server/apps/account/urls.py"}, {"content": "from datetime import date\nfrom typing import Any, Dict, Union\nfrom django.conf import settings\nfrom django.contrib.auth import login, logout\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.http.response import HttpResponse\nfrom django.views.generic.edit import FormView\nfrom django.shortcuts import get_object_or_404\n\nfrom apps.utils.accessMixins import UserIsSuperAdmin\nfrom .forms import SignUpForm, LoginForm, ForgottenPassForm, TemporaryRequestSignUpForm, UpgradePermanentAccountForm\nfrom .tokens import account_activation_token\nfrom django.contrib import messages\nfrom django.shortcuts import render, redirect\nfrom django.template.loader import render_to_string\nfrom django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode\nfrom django.utils.encoding import force_bytes, force_text\nfrom django.urls import reverse, reverse_lazy\n\nfrom django.contrib.auth.views import PasswordResetConfirmView\nfrom django.views import View\n\nfrom django.contrib.auth.forms import SetPasswordForm\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom django.contrib.auth.models import User\nfrom apps.student.models import Student\n\nfrom .emailAuthBackend import EmailBackend\nfrom .models import TemporaryAccessRequest\nfrom .utils import user_creation, send_email_confirmation\n\n\nclass RegistrationView(FormView):\n template_name = 'account/registration.html'\n form_class = SignUpForm\n\n def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:\n context = super().get_context_data(**kwargs)\n context['temporary_registration'] = settings.TEMPORARY_ACCOUNTS_DATE_LIMIT >= date.today()\n return context\n\n def form_valid(self, form):\n user_creation(form, self.request)\n return redirect(reverse('home:home'))\n\n\nclass TemporaryRegistrationView(FormView):\n form_class = TemporaryRequestSignUpForm\n template_name = 'account/temporary_registration.html'\n\n def dispatch(self, request, *args: Any, **kwargs: Any):\n \"\"\"Do not allow to use this view outside of allowed temporary accounts windows.\"\"\"\n if not settings.TEMPORARY_ACCOUNTS_DATE_LIMIT >= date.today():\n return redirect(reverse('account:registration'))\n return super().dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:\n context = super().get_context_data(**kwargs)\n context['DEADLINE_TEMPORARY_REGISTRATION'] = settings.TEMPORARY_ACCOUNTS_DATE_LIMIT\n return context\n\n def form_valid(self, form) -> HttpResponse:\n user_creation(form, self.request)\n return redirect(reverse('home:home'))\n\n\nclass ConfirmUser(View):\n def get(self, request, uidb64, token):\n tempAccessReq: Union[TemporaryAccessRequest, None] = None\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n return render(self.request, 'account/activation_invalid.html')\n # checking if the user is not a temporary one\n try:\n tempAccessReq: TemporaryAccessRequest = TemporaryAccessRequest.objects.get(\n user=user.pk)\n if not tempAccessReq.approved:\n return render(self.request, 'account/activation_invalid.html')\n except TemporaryAccessRequest.DoesNotExist:\n tempAccessReq = None\n # checking if the token is valid.\n if account_activation_token.check_token(user, token):\n # if valid set active true\n user.is_active = True\n if tempAccessReq is not None:\n user.email = tempAccessReq.final_email\n tempAccessReq.delete()\n messages.warning(\n request, f'Dor\u00e9navant vous devez utiliser {user.email} pour vous connecter.')\n user.save()\n login(self.request, user,\n backend='apps.account.emailAuthBackend.EmailBackend')\n messages.success(request, 'Votre compte est desormais actif !')\n return redirect(reverse('home:home'))\n else:\n return render(self.request, 'account/activation_invalid.html')\n\n\nclass AuthView(FormView):\n template_name = 'account/login.html'\n form_class = LoginForm\n\n def get(self, request):\n if request.user.is_authenticated:\n user = request.user\n message = f'Vous etes d\u00e9j\u00e0 connect\u00e9 en tant que {user.first_name.title()}.'\n messages.warning(request, message)\n return redirect(reverse('home:home'))\n else:\n return super(AuthView, AuthView).get(self, request)\n\n def form_invalid(self, form):\n message = f'Veuillez vous connecter avec votre adresse mail ECN.'\n messages.warning(self.request, message)\n return redirect(reverse('account:login'))\n\n def form_valid(self, form):\n username = form.cleaned_data['email']\n password = form.cleaned_data['password']\n user = EmailBackend.authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n message = f'Bonjour {user.first_name.title()} !'\n messages.success(self.request, message)\n else:\n if settings.TEMPORARY_ACCOUNTS_DATE_LIMIT >= date.today():\n # During certain periods allow temporary accounts.\n try:\n temporaryAccessRequest: TemporaryAccessRequest = TemporaryAccessRequest.objects.get(\n user=user\n )\n if not temporaryAccessRequest.mail_valid:\n message = 'Votre compte n\\'est pas encore actif.\\\n Veuillez cliquer sur le lien envoy\u00e9 par mail pour l\\'\\\n activer.'\n messages.error(self.request, message)\n return redirect(reverse('account:login'))\n if temporaryAccessRequest.approved_until <= date.today():\n message = 'Votre compte n\\'a pas encore \u00e9t\u00e9 approuv\u00e9.\\\n On vous pr\u00e9vient par mail d\u00e8s que c\\'est le cas.'\n messages.error(self.request, message)\n return redirect(reverse('account:login'))\n message = f'Votre compte n\\'est pas encore d\u00e9finitif.\\\n Veuillez le valider <a href=\"{reverse(\"account:upgrade-permanent\")}\">ici</a>.\\\n Attention apr\u00e8s le {temporaryAccessRequest.approved_until}\\\n vous ne pourrez plus vous connecter si vous n\\'avez pas renseign\u00e9 votre adresse Centrale.'\n messages.warning(self.request, message)\n except TemporaryAccessRequest.DoesNotExist:\n messages.error(\n self.request, 'Identifiant inconnu ou mot de passe invalide.')\n return redirect(reverse('account:login'))\n else:\n messages.warning(\n self.request, 'Votre compte n\\'est pas encore actif. Veuillez cliquer sur le lien dans \\'email.')\n login(self.request, user,\n backend='apps.account.emailAuthBackend.EmailBackend')\n return redirect(reverse('home:home'))\n else:\n messages.error(\n self.request, 'Identifiant inconnu ou mot de passe invalide.')\n return redirect(reverse('account:login'))\n\n\nclass LogoutView(View):\n def get(self, request):\n logout(request)\n messages.success(request, 'Vous avez \u00e9t\u00e9 d\u00e9connect\u00e9.')\n return redirect(reverse('account:login'))\n\n\nclass ForgottenPassView(FormView):\n form_class = ForgottenPassForm\n template_name = 'account/forgotten_pass.html'\n\n def form_valid(self, form):\n try:\n user = User.objects.get(email=form.cleaned_data['email'])\n if user is not None:\n subject = '[Nantral Platform] Reinitialisation de votre mot de passe'\n current_site = get_current_site(self.request)\n message = render_to_string('account/mail/password_request.html', {\n 'user': user,\n 'domain': current_site.domain,\n 'uidb64': urlsafe_base64_encode(force_bytes(user.pk)),\n # method will generate a hash value with user related data\n 'token': account_activation_token.make_token(user),\n })\n user.email_user(\n subject, message, 'accounts@nantral-platform.fr', html_message=message)\n except User.DoesNotExist:\n pass\n messages.success(\n self.request, 'Un email de r\u00e9cuperation a \u00e9t\u00e9 envoy\u00e9 si cette adresse existe.')\n return redirect(reverse('account:login'))\n\n\nclass PasswordResetConfirmCustomView(PasswordResetConfirmView):\n template_name = 'account/reset_password.html'\n post_reset_login = True\n post_reset_login_backend = 'apps.account.emailAuthBackend.EmailBackend'\n form_class = SetPasswordForm\n token_generator = account_activation_token\n success_url = reverse_lazy('home:home')\n\n\ndef redirect_to_student(request, user_id):\n user = User.objects.get(id=user_id)\n student = Student.objects.get(user=user)\n return redirect('student:update', student.pk)\n\n\nclass ABCApprovalTemporaryResgistrationView(UserIsSuperAdmin, View):\n def get(self, request, id):\n self.temp_req: TemporaryAccessRequest = get_object_or_404(\n TemporaryAccessRequest, id=id)\n\n if self.temp_req.approved:\n messages.warning(request, f'Cette requ\u00eate a d\u00e9j\u00e0 \u00e9t\u00e9 approuv\u00e9e.')\n return redirect(reverse('home:home'))\n\n\nclass ApproveTemporaryRegistrationView(ABCApprovalTemporaryResgistrationView):\n def get(self, request, id):\n super().get(request, id)\n self.temp_req.approve()\n messages.success(\n request, f'Vous avez accept\u00e9 la demande de {self.temp_req.user.first_name} {self.temp_req.user.last_name}')\n return redirect(reverse('home:home'))\n\n\nclass DenyTemporaryRegistrationView(ABCApprovalTemporaryResgistrationView):\n def get(self, request, id):\n super().get(request, id)\n messages.success(\n request, f'Vous avez refus\u00e9 la demande de {self.temp_req.user.first_name} {self.temp_req.user.last_name}')\n self.temp_req.deny()\n return redirect(reverse('home:home'))\n\n\nclass ConfirmUserTemporary(View):\n def get(self, request, uidb64, token):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n # checking if the user exists, if the token is valid.\n if user is not None and account_activation_token.check_token(user, token):\n try:\n temp_req: TemporaryAccessRequest = TemporaryAccessRequest.objects.get(\n user=user)\n temp_req.mail_valid = True\n temp_req.save()\n except TemporaryAccessRequest.DoesNotExist:\n return render(self.request, 'account/activation_invalid.html')\n messages.success(request, 'Votre addresse mail est confirm\u00e9! \\n\\\n Comme vous n\\'avez pas utilis\u00e9 votre adresse Centrale, vous devez encore attendre qu\\'un administrateur v\u00e9rifie votre inscription.\\n\\\n On vous pr\u00e9vient par mail d\u00e8s que c\\'est bon!. ')\n return redirect(reverse('home:home'))\n else:\n return render(self.request, 'account/activation_invalid.html')\n\n\nclass PermanentAccountUpgradeView(LoginRequiredMixin, FormView):\n form_class = UpgradePermanentAccountForm\n template_name = 'account/permanent_account_upgrade.html'\n success_url = reverse_lazy('home:home')\n\n def get(self, request):\n get_object_or_404(\n TemporaryAccessRequest,\n user=self.request.user\n )\n return super().get(request)\n\n def form_valid(self, form: UpgradePermanentAccountForm) -> HttpResponse:\n temp_request = get_object_or_404(\n TemporaryAccessRequest,\n user=self.request.user\n )\n temp_request.final_email = form.cleaned_data['email']\n temp_request.save()\n send_email_confirmation(\n self.request.user, self.request, send_to=form.cleaned_data['email'])\n return super().form_valid(form)\n", "path": "server/apps/account/views.py"}]} | 3,930 | 587 |
gh_patches_debug_17418 | rasdani/github-patches | git_diff | zulip__zulip-3596 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wikipedia bot crashes when the query contains multiple word delimit by underscore.
The bot is under `contrib_bots/bots`. Some error handling is needed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `contrib_bots/bots/wikipedia/wikipedia.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import print_function
3 import requests
4 import logging
5
6 # See readme.md for instructions on running this code.
7
8 class WikipediaHandler(object):
9 '''
10 This plugin facilitates searching Wikipedia for a
11 specific key term and returns the top article from the
12 search. It looks for messages starting with '@wikipedia'
13 or '@wiki'.
14
15 In this example, we write all Wikipedia searches into
16 the same stream that it was called from, but this code
17 could be adapted to write Wikipedia searches to some
18 kind of external issue tracker as well.
19 '''
20
21 def usage(self):
22 return '''
23 This plugin will allow users to directly search
24 Wikipedia for a specific key term and get the top
25 article that is returned from the search. Users
26 should preface searches with "@wikipedia" or
27 "@wiki".
28 '''
29
30 def triage_message(self, message, client):
31 original_content = message['content']
32
33 # This next line of code is defensive, as we
34 # never want to get into an infinite loop of posting Wikipedia
35 # searches for own Wikipedia searches!
36 if message['sender_full_name'] == 'wikipedia-bot':
37 return False
38 is_wikipedia = (original_content.startswith('@wiki') or
39 original_content.startswith('@wikipedia'))
40
41 return is_wikipedia
42
43 def handle_message(self, message, client, state_handler):
44 query = message['content']
45
46 for prefix in ['@wikipedia', '@wiki']:
47 if query.startswith(prefix):
48 query = query[len(prefix)+1:]
49 break
50
51 query_wiki_link = ('https://en.wikipedia.org/w/api.php?action=query&'
52 'list=search&srsearch=%s&format=json' % (query,))
53 try:
54 data = requests.get(query_wiki_link)
55 except requests.exceptions.RequestException:
56 logging.error('broken link')
57 return
58
59 if data.status_code != 200:
60 logging.error('unsuccessful data')
61 return
62
63 search_string = data.json()['query']['search'][0]['title'].replace(' ', '_')
64 url = 'https://wikipedia.org/wiki/' + search_string
65 new_content = 'For search term "' + query
66 if len(data.json()['query']['search']) == 0:
67 new_content = 'I am sorry. The search term you provided is not found :slightly_frowning_face:'
68 else:
69 new_content = new_content + '", ' + url
70
71 client.send_message(dict(
72 type=message['type'],
73 to=message['display_recipient'],
74 subject=message['subject'],
75 content=new_content,
76 ))
77
78 handler_class = WikipediaHandler
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/contrib_bots/bots/wikipedia/wikipedia.py b/contrib_bots/bots/wikipedia/wikipedia.py
--- a/contrib_bots/bots/wikipedia/wikipedia.py
+++ b/contrib_bots/bots/wikipedia/wikipedia.py
@@ -60,12 +60,12 @@
logging.error('unsuccessful data')
return
- search_string = data.json()['query']['search'][0]['title'].replace(' ', '_')
- url = 'https://wikipedia.org/wiki/' + search_string
new_content = 'For search term "' + query
if len(data.json()['query']['search']) == 0:
new_content = 'I am sorry. The search term you provided is not found :slightly_frowning_face:'
else:
+ search_string = data.json()['query']['search'][0]['title'].replace(' ', '_')
+ url = 'https://en.wikipedia.org/wiki/' + search_string
new_content = new_content + '", ' + url
client.send_message(dict(
| {"golden_diff": "diff --git a/contrib_bots/bots/wikipedia/wikipedia.py b/contrib_bots/bots/wikipedia/wikipedia.py\n--- a/contrib_bots/bots/wikipedia/wikipedia.py\n+++ b/contrib_bots/bots/wikipedia/wikipedia.py\n@@ -60,12 +60,12 @@\n logging.error('unsuccessful data')\n return\n \n- search_string = data.json()['query']['search'][0]['title'].replace(' ', '_')\n- url = 'https://wikipedia.org/wiki/' + search_string\n new_content = 'For search term \"' + query\n if len(data.json()['query']['search']) == 0:\n new_content = 'I am sorry. The search term you provided is not found :slightly_frowning_face:'\n else:\n+ search_string = data.json()['query']['search'][0]['title'].replace(' ', '_')\n+ url = 'https://en.wikipedia.org/wiki/' + search_string\n new_content = new_content + '\", ' + url\n \n client.send_message(dict(\n", "issue": "Wikipedia bot crashes when the query contains multiple word delimit by underscore.\nThe bot is under `contrib_bots/bots`. Some error handling is needed.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nimport requests\nimport logging\n\n# See readme.md for instructions on running this code.\n\nclass WikipediaHandler(object):\n '''\n This plugin facilitates searching Wikipedia for a\n specific key term and returns the top article from the\n search. It looks for messages starting with '@wikipedia'\n or '@wiki'.\n\n In this example, we write all Wikipedia searches into\n the same stream that it was called from, but this code\n could be adapted to write Wikipedia searches to some\n kind of external issue tracker as well.\n '''\n\n def usage(self):\n return '''\n This plugin will allow users to directly search\n Wikipedia for a specific key term and get the top\n article that is returned from the search. Users\n should preface searches with \"@wikipedia\" or\n \"@wiki\".\n '''\n\n def triage_message(self, message, client):\n original_content = message['content']\n\n # This next line of code is defensive, as we\n # never want to get into an infinite loop of posting Wikipedia\n # searches for own Wikipedia searches!\n if message['sender_full_name'] == 'wikipedia-bot':\n return False\n is_wikipedia = (original_content.startswith('@wiki') or\n original_content.startswith('@wikipedia'))\n\n return is_wikipedia\n\n def handle_message(self, message, client, state_handler):\n query = message['content']\n\n for prefix in ['@wikipedia', '@wiki']:\n if query.startswith(prefix):\n query = query[len(prefix)+1:]\n break\n\n query_wiki_link = ('https://en.wikipedia.org/w/api.php?action=query&'\n 'list=search&srsearch=%s&format=json' % (query,))\n try:\n data = requests.get(query_wiki_link)\n except requests.exceptions.RequestException:\n logging.error('broken link')\n return\n\n if data.status_code != 200:\n logging.error('unsuccessful data')\n return\n\n search_string = data.json()['query']['search'][0]['title'].replace(' ', '_')\n url = 'https://wikipedia.org/wiki/' + search_string\n new_content = 'For search term \"' + query\n if len(data.json()['query']['search']) == 0:\n new_content = 'I am sorry. The search term you provided is not found :slightly_frowning_face:'\n else:\n new_content = new_content + '\", ' + url\n\n client.send_message(dict(\n type=message['type'],\n to=message['display_recipient'],\n subject=message['subject'],\n content=new_content,\n ))\n\nhandler_class = WikipediaHandler\n", "path": "contrib_bots/bots/wikipedia/wikipedia.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nimport requests\nimport logging\n\n# See readme.md for instructions on running this code.\n\nclass WikipediaHandler(object):\n '''\n This plugin facilitates searching Wikipedia for a\n specific key term and returns the top article from the\n search. It looks for messages starting with '@wikipedia'\n or '@wiki'.\n\n In this example, we write all Wikipedia searches into\n the same stream that it was called from, but this code\n could be adapted to write Wikipedia searches to some\n kind of external issue tracker as well.\n '''\n\n def usage(self):\n return '''\n This plugin will allow users to directly search\n Wikipedia for a specific key term and get the top\n article that is returned from the search. Users\n should preface searches with \"@wikipedia\" or\n \"@wiki\".\n '''\n\n def triage_message(self, message, client):\n original_content = message['content']\n\n # This next line of code is defensive, as we\n # never want to get into an infinite loop of posting Wikipedia\n # searches for own Wikipedia searches!\n if message['sender_full_name'] == 'wikipedia-bot':\n return False\n is_wikipedia = (original_content.startswith('@wiki') or\n original_content.startswith('@wikipedia'))\n\n return is_wikipedia\n\n def handle_message(self, message, client, state_handler):\n query = message['content']\n\n for prefix in ['@wikipedia', '@wiki']:\n if query.startswith(prefix):\n query = query[len(prefix)+1:]\n break\n\n query_wiki_link = ('https://en.wikipedia.org/w/api.php?action=query&'\n 'list=search&srsearch=%s&format=json' % (query,))\n try:\n data = requests.get(query_wiki_link)\n except requests.exceptions.RequestException:\n logging.error('broken link')\n return\n\n if data.status_code != 200:\n logging.error('unsuccessful data')\n return\n\n new_content = 'For search term \"' + query\n if len(data.json()['query']['search']) == 0:\n new_content = 'I am sorry. The search term you provided is not found :slightly_frowning_face:'\n else:\n search_string = data.json()['query']['search'][0]['title'].replace(' ', '_')\n url = 'https://en.wikipedia.org/wiki/' + search_string\n new_content = new_content + '\", ' + url\n\n client.send_message(dict(\n type=message['type'],\n to=message['display_recipient'],\n subject=message['subject'],\n content=new_content,\n ))\n\nhandler_class = WikipediaHandler\n", "path": "contrib_bots/bots/wikipedia/wikipedia.py"}]} | 1,023 | 226 |
gh_patches_debug_3523 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-py-480 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
translate.py error with -n_best option (n > 1)
This happens with **-n_best, n> 1** and **-verbose**.
```
$ python translate.py -model model.pt -src source.txt -n_best 10 -output pred10best.txt -replace_unk -verbose
Loading model parameters.
PRED SCORE: -6.3616
BEST HYP:
Traceback (most recent call last):
File "translate.py", line 116, in <module>
main()
File "translate.py", line 97, in main
output = trans.log(sent_number)
File "/home/user/OpenNMT-py/onmt/translate/Translation.py", line 116, in log
for score, sent in zip(self.pred_score, self.pred_sents):
AttributeError: 'Translation' object has no attribute 'pred_score'
```
translate.py error with -n_best option (n > 1)
This happens with **-n_best, n> 1** and **-verbose**.
```
$ python translate.py -model model.pt -src source.txt -n_best 10 -output pred10best.txt -replace_unk -verbose
Loading model parameters.
PRED SCORE: -6.3616
BEST HYP:
Traceback (most recent call last):
File "translate.py", line 116, in <module>
main()
File "translate.py", line 97, in main
output = trans.log(sent_number)
File "/home/user/OpenNMT-py/onmt/translate/Translation.py", line 116, in log
for score, sent in zip(self.pred_score, self.pred_sents):
AttributeError: 'Translation' object has no attribute 'pred_score'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `onmt/translate/Translation.py`
Content:
```
1 from __future__ import division, unicode_literals
2
3 import torch
4 import onmt.io
5
6
7 class TranslationBuilder(object):
8 """
9 Build a word-based translation from the batch output
10 of translator and the underlying dictionaries.
11
12 Replacement based on "Addressing the Rare Word
13 Problem in Neural Machine Translation" :cite:`Luong2015b`
14
15 Args:
16 data (DataSet):
17 fields (dict of Fields): data fields
18 n_best (int): number of translations produced
19 replace_unk (bool): replace unknown words using attention
20 has_tgt (bool): will the batch have gold targets
21 """
22 def __init__(self, data, fields, n_best, replace_unk, has_tgt):
23 self.data = data
24 self.fields = fields
25 self.n_best = n_best
26 self.replace_unk = replace_unk
27 self.has_tgt = has_tgt
28
29 def _build_target_tokens(self, src, src_vocab, src_raw, pred, attn):
30 vocab = self.fields["tgt"].vocab
31 tokens = []
32 for tok in pred:
33 if tok < len(vocab):
34 tokens.append(vocab.itos[tok])
35 else:
36 tokens.append(src_vocab.itos[tok - len(vocab)])
37 if tokens[-1] == onmt.io.EOS_WORD:
38 tokens = tokens[:-1]
39 break
40 if self.replace_unk and (attn is not None) and (src is not None):
41 for i in range(len(tokens)):
42 if tokens[i] == vocab.itos[onmt.io.UNK]:
43 _, maxIndex = attn[i].max(0)
44 tokens[i] = src_raw[maxIndex[0]]
45 return tokens
46
47 def from_batch(self, translation_batch):
48 batch = translation_batch["batch"]
49 assert(len(translation_batch["gold_score"]) ==
50 len(translation_batch["predictions"]))
51 batch_size = batch.batch_size
52
53 preds, predScore, attn, gold_score, indices = list(zip(
54 *sorted(zip(translation_batch["predictions"],
55 translation_batch["scores"],
56 translation_batch["attention"],
57 translation_batch["gold_score"],
58 batch.indices.data),
59 key=lambda x: x[-1])))
60
61 # Sorting
62 inds, perm = torch.sort(batch.indices.data)
63 data_type = self.data.data_type
64 if data_type == 'text':
65 src = batch.src[0].data.index_select(1, perm)
66 else:
67 src = None
68
69 if self.has_tgt:
70 tgt = batch.tgt.data.index_select(1, perm)
71 else:
72 tgt = None
73
74 translations = []
75 for b in range(batch_size):
76 if data_type == 'text':
77 src_vocab = self.data.src_vocabs[inds[b]]
78 src_raw = self.data.examples[inds[b]].src
79 else:
80 src_vocab = None
81 src_raw = None
82 pred_sents = [self._build_target_tokens(
83 src[:, b] if src is not None else None,
84 src_vocab, src_raw,
85 preds[b][n], attn[b][n])
86 for n in range(self.n_best)]
87 gold_sent = None
88 if tgt is not None:
89 gold_sent = self._build_target_tokens(
90 src[:, b] if src is not None else None,
91 src_vocab, src_raw,
92 tgt[1:, b] if tgt is not None else None, None)
93
94 translation = Translation(src[:, b] if src is not None else None,
95 src_raw, pred_sents,
96 attn[b], predScore[b], gold_sent,
97 gold_score[b])
98 translations.append(translation)
99
100 return translations
101
102
103 class Translation(object):
104 """
105 Container for a translated sentence.
106
107 Attributes:
108 src (`LongTensor`): src word ids
109 src_raw ([str]): raw src words
110
111 pred_sents ([[str]]): words from the n-best translations
112 pred_scores ([[float]]): log-probs of n-best translations
113 attns ([`FloatTensor`]) : attention dist for each translation
114 gold_sent ([str]): words from gold translation
115 gold_score ([float]): log-prob of gold translation
116
117 """
118 def __init__(self, src, src_raw, pred_sents,
119 attn, pred_scores, tgt_sent, gold_score):
120 self.src = src
121 self.src_raw = src_raw
122 self.pred_sents = pred_sents
123 self.attns = attn
124 self.pred_scores = pred_scores
125 self.gold_sent = tgt_sent
126 self.gold_score = gold_score
127
128 def log(self, sent_number):
129 """
130 Log translation to stdout.
131 """
132 output = '\nSENT {}: {}\n'.format(sent_number, self.src_raw)
133
134 best_pred = self.pred_sents[0]
135 best_score = self.pred_scores[0]
136 pred_sent = ' '.join(best_pred)
137 output += 'PRED {}: {}\n'.format(sent_number, pred_sent)
138 print("PRED SCORE: {:.4f}".format(best_score))
139
140 if self.gold_sent is not None:
141 tgt_sent = ' '.join(self.gold_sent)
142 output += 'GOLD {}: {}\n'.format(sent_number, tgt_sent)
143 output += ("GOLD SCORE: {:.4f}".format(self.gold_score))
144
145 if len(self.pred_sents) > 1:
146 print('\nBEST HYP:')
147 for score, sent in zip(self.pred_score, self.pred_sents):
148 output += "[{:.4f}] {}\n".format(score, sent)
149
150 return output
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/onmt/translate/Translation.py b/onmt/translate/Translation.py
--- a/onmt/translate/Translation.py
+++ b/onmt/translate/Translation.py
@@ -113,7 +113,7 @@
if len(self.pred_sents) > 1:
print('\nBEST HYP:')
- for score, sent in zip(self.pred_score, self.pred_sents):
+ for score, sent in zip(self.pred_scores, self.pred_sents):
output += "[{:.4f}] {}\n".format(score, sent)
return output
| {"golden_diff": "diff --git a/onmt/translate/Translation.py b/onmt/translate/Translation.py\n--- a/onmt/translate/Translation.py\n+++ b/onmt/translate/Translation.py\n@@ -113,7 +113,7 @@\n \n if len(self.pred_sents) > 1:\n print('\\nBEST HYP:')\n- for score, sent in zip(self.pred_score, self.pred_sents):\n+ for score, sent in zip(self.pred_scores, self.pred_sents):\n output += \"[{:.4f}] {}\\n\".format(score, sent)\n \n return output\n", "issue": "translate.py error with -n_best option (n > 1)\nThis happens with **-n_best, n> 1** and **-verbose**.\r\n```\r\n$ python translate.py -model model.pt -src source.txt -n_best 10 -output pred10best.txt -replace_unk -verbose\r\nLoading model parameters.\r\nPRED SCORE: -6.3616\r\n\r\nBEST HYP:\r\nTraceback (most recent call last):\r\n File \"translate.py\", line 116, in <module>\r\n main()\r\n File \"translate.py\", line 97, in main\r\n output = trans.log(sent_number)\r\n File \"/home/user/OpenNMT-py/onmt/translate/Translation.py\", line 116, in log\r\n for score, sent in zip(self.pred_score, self.pred_sents):\r\nAttributeError: 'Translation' object has no attribute 'pred_score'\r\n```\ntranslate.py error with -n_best option (n > 1)\nThis happens with **-n_best, n> 1** and **-verbose**.\r\n```\r\n$ python translate.py -model model.pt -src source.txt -n_best 10 -output pred10best.txt -replace_unk -verbose\r\nLoading model parameters.\r\nPRED SCORE: -6.3616\r\n\r\nBEST HYP:\r\nTraceback (most recent call last):\r\n File \"translate.py\", line 116, in <module>\r\n main()\r\n File \"translate.py\", line 97, in main\r\n output = trans.log(sent_number)\r\n File \"/home/user/OpenNMT-py/onmt/translate/Translation.py\", line 116, in log\r\n for score, sent in zip(self.pred_score, self.pred_sents):\r\nAttributeError: 'Translation' object has no attribute 'pred_score'\r\n```\n", "before_files": [{"content": "from __future__ import division, unicode_literals\n\nimport torch\nimport onmt.io\n\n\nclass TranslationBuilder(object):\n \"\"\"\n Build a word-based translation from the batch output\n of translator and the underlying dictionaries.\n\n Replacement based on \"Addressing the Rare Word\n Problem in Neural Machine Translation\" :cite:`Luong2015b`\n\n Args:\n data (DataSet):\n fields (dict of Fields): data fields\n n_best (int): number of translations produced\n replace_unk (bool): replace unknown words using attention\n has_tgt (bool): will the batch have gold targets\n \"\"\"\n def __init__(self, data, fields, n_best, replace_unk, has_tgt):\n self.data = data\n self.fields = fields\n self.n_best = n_best\n self.replace_unk = replace_unk\n self.has_tgt = has_tgt\n\n def _build_target_tokens(self, src, src_vocab, src_raw, pred, attn):\n vocab = self.fields[\"tgt\"].vocab\n tokens = []\n for tok in pred:\n if tok < len(vocab):\n tokens.append(vocab.itos[tok])\n else:\n tokens.append(src_vocab.itos[tok - len(vocab)])\n if tokens[-1] == onmt.io.EOS_WORD:\n tokens = tokens[:-1]\n break\n if self.replace_unk and (attn is not None) and (src is not None):\n for i in range(len(tokens)):\n if tokens[i] == vocab.itos[onmt.io.UNK]:\n _, maxIndex = attn[i].max(0)\n tokens[i] = src_raw[maxIndex[0]]\n return tokens\n\n def from_batch(self, translation_batch):\n batch = translation_batch[\"batch\"]\n assert(len(translation_batch[\"gold_score\"]) ==\n len(translation_batch[\"predictions\"]))\n batch_size = batch.batch_size\n\n preds, predScore, attn, gold_score, indices = list(zip(\n *sorted(zip(translation_batch[\"predictions\"],\n translation_batch[\"scores\"],\n translation_batch[\"attention\"],\n translation_batch[\"gold_score\"],\n batch.indices.data),\n key=lambda x: x[-1])))\n\n # Sorting\n inds, perm = torch.sort(batch.indices.data)\n data_type = self.data.data_type\n if data_type == 'text':\n src = batch.src[0].data.index_select(1, perm)\n else:\n src = None\n\n if self.has_tgt:\n tgt = batch.tgt.data.index_select(1, perm)\n else:\n tgt = None\n\n translations = []\n for b in range(batch_size):\n if data_type == 'text':\n src_vocab = self.data.src_vocabs[inds[b]]\n src_raw = self.data.examples[inds[b]].src\n else:\n src_vocab = None\n src_raw = None\n pred_sents = [self._build_target_tokens(\n src[:, b] if src is not None else None,\n src_vocab, src_raw,\n preds[b][n], attn[b][n])\n for n in range(self.n_best)]\n gold_sent = None\n if tgt is not None:\n gold_sent = self._build_target_tokens(\n src[:, b] if src is not None else None,\n src_vocab, src_raw,\n tgt[1:, b] if tgt is not None else None, None)\n\n translation = Translation(src[:, b] if src is not None else None,\n src_raw, pred_sents,\n attn[b], predScore[b], gold_sent,\n gold_score[b])\n translations.append(translation)\n\n return translations\n\n\nclass Translation(object):\n \"\"\"\n Container for a translated sentence.\n\n Attributes:\n src (`LongTensor`): src word ids\n src_raw ([str]): raw src words\n\n pred_sents ([[str]]): words from the n-best translations\n pred_scores ([[float]]): log-probs of n-best translations\n attns ([`FloatTensor`]) : attention dist for each translation\n gold_sent ([str]): words from gold translation\n gold_score ([float]): log-prob of gold translation\n\n \"\"\"\n def __init__(self, src, src_raw, pred_sents,\n attn, pred_scores, tgt_sent, gold_score):\n self.src = src\n self.src_raw = src_raw\n self.pred_sents = pred_sents\n self.attns = attn\n self.pred_scores = pred_scores\n self.gold_sent = tgt_sent\n self.gold_score = gold_score\n\n def log(self, sent_number):\n \"\"\"\n Log translation to stdout.\n \"\"\"\n output = '\\nSENT {}: {}\\n'.format(sent_number, self.src_raw)\n\n best_pred = self.pred_sents[0]\n best_score = self.pred_scores[0]\n pred_sent = ' '.join(best_pred)\n output += 'PRED {}: {}\\n'.format(sent_number, pred_sent)\n print(\"PRED SCORE: {:.4f}\".format(best_score))\n\n if self.gold_sent is not None:\n tgt_sent = ' '.join(self.gold_sent)\n output += 'GOLD {}: {}\\n'.format(sent_number, tgt_sent)\n output += (\"GOLD SCORE: {:.4f}\".format(self.gold_score))\n\n if len(self.pred_sents) > 1:\n print('\\nBEST HYP:')\n for score, sent in zip(self.pred_score, self.pred_sents):\n output += \"[{:.4f}] {}\\n\".format(score, sent)\n\n return output\n", "path": "onmt/translate/Translation.py"}], "after_files": [{"content": "from __future__ import division, unicode_literals\n\nimport torch\nimport onmt.io\n\n\nclass TranslationBuilder(object):\n def __init__(self, data, fields, n_best, replace_unk, has_tgt):\n self.data = data\n self.fields = fields\n self.n_best = n_best\n self.replace_unk = replace_unk\n self.has_tgt = has_tgt\n\n def _build_target_tokens(self, src, src_vocab, src_raw, pred, attn):\n vocab = self.fields[\"tgt\"].vocab\n tokens = []\n for tok in pred:\n if tok < len(vocab):\n tokens.append(vocab.itos[tok])\n else:\n tokens.append(src_vocab.itos[tok - len(vocab)])\n if tokens[-1] == onmt.io.EOS_WORD:\n tokens = tokens[:-1]\n break\n if self.replace_unk and (attn is not None) and (src is not None):\n for i in range(len(tokens)):\n if tokens[i] == vocab.itos[onmt.io.UNK]:\n _, maxIndex = attn[i].max(0)\n tokens[i] = src_raw[maxIndex[0]]\n return tokens\n\n def from_batch(self, translation_batch):\n batch = translation_batch[\"batch\"]\n assert(len(translation_batch[\"gold_score\"]) ==\n len(translation_batch[\"predictions\"]))\n batch_size = batch.batch_size\n\n preds, predScore, attn, gold_score, indices = list(zip(\n *sorted(zip(translation_batch[\"predictions\"],\n translation_batch[\"scores\"],\n translation_batch[\"attention\"],\n translation_batch[\"gold_score\"],\n batch.indices.data),\n key=lambda x: x[-1])))\n\n # Sorting\n inds, perm = torch.sort(batch.indices.data)\n data_type = self.data.data_type\n if data_type == 'text':\n src = batch.src[0].data.index_select(1, perm)\n else:\n src = None\n\n if self.has_tgt:\n tgt = batch.tgt.data.index_select(1, perm)\n else:\n tgt = None\n\n translations = []\n for b in range(batch_size):\n if data_type == 'text':\n src_vocab = self.data.src_vocabs[inds[b]]\n src_raw = self.data.examples[inds[b]].src\n else:\n src_vocab = None\n src_raw = None\n pred_sents = [self._build_target_tokens(\n src[:, b] if src is not None else None,\n src_vocab, src_raw,\n preds[b][n], attn[b][n])\n for n in range(self.n_best)]\n gold_sent = None\n if tgt is not None:\n gold_sent = self._build_target_tokens(\n src[:, b] if src is not None else None,\n src_vocab, src_raw,\n tgt[1:, b] if tgt is not None else None, None)\n\n translation = Translation(src[:, b] if src is not None else None,\n src_raw, pred_sents,\n attn[b], predScore[b], gold_sent,\n gold_score[b])\n translations.append(translation)\n\n return translations\n\n\nclass Translation(object):\n def __init__(self, src, src_raw, pred_sents,\n attn, pred_scores, tgt_sent, gold_score):\n self.src = src\n self.src_raw = src_raw\n self.pred_sents = pred_sents\n self.attns = attn\n self.pred_scores = pred_scores\n self.gold_sent = tgt_sent\n self.gold_score = gold_score\n\n def log(self, sent_number):\n output = '\\nSENT {}: {}\\n'.format(sent_number, self.src_raw)\n\n best_pred = self.pred_sents[0]\n best_score = self.pred_scores[0]\n pred_sent = ' '.join(best_pred)\n output += 'PRED {}: {}\\n'.format(sent_number, pred_sent)\n print(\"PRED SCORE: {:.4f}\".format(best_score))\n\n if self.gold_sent is not None:\n tgt_sent = ' '.join(self.gold_sent)\n output += 'GOLD {}: {}\\n'.format(sent_number, tgt_sent)\n output += (\"GOLD SCORE: {:.4f}\".format(self.gold_score))\n\n if len(self.pred_sents) > 1:\n print('\\nBEST HYP:')\n for score, sent in zip(self.pred_scores, self.pred_sents):\n output += \"[{:.4f}] {}\\n\".format(score, sent)\n\n return output\n", "path": "onmt/translate/Translation.py"}]} | 2,194 | 131 |
gh_patches_debug_5911 | rasdani/github-patches | git_diff | opendatacube__datacube-core-1417 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
links on read the doc footer leads to 404
https://github.com/opendatacube/datacube-core/blob/develop/docs/_templates/odc-footer.html renders at the footer of page https://datacube-core.readthedocs.io/en/latest/index.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # This file is part of the Open Data Cube, see https://opendatacube.org for more information
2 #
3 # Copyright (c) 2015-2020 ODC Contributors
4 # SPDX-License-Identifier: Apache-2.0
5 import os
6 import sys
7
8 from bs4 import BeautifulSoup as bs # noqa: N813
9
10 # If extensions (or modules to document with autodoc) are in another directory,
11 # add these directories to sys.path here. If the directory is relative to the
12 # documentation root, use os.path.abspath to make it absolute, like shown here.
13 sys.path.insert(0, os.path.abspath('..'))
14 sys.path.insert(0, os.path.abspath('.'))
15 print(sys.path)
16 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
17
18 # -- General configuration ------------------------------------------------
19
20 # If your documentation needs a minimal Sphinx version, state it here.
21 # needs_sphinx = '1.0'
22
23 # Add any Sphinx extension module names here, as strings. They can be
24 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
25 # ones.
26 extensions = [
27 'sphinx.ext.autodoc',
28 'sphinx.ext.autosummary',
29 'sphinx_autodoc_typehints',
30 'sphinx.ext.graphviz',
31 'sphinx.ext.viewcode',
32 'sphinx.ext.intersphinx',
33 'sphinx.ext.extlinks',
34 'sphinx.ext.mathjax',
35 'sphinx_click.ext',
36 'click_utils',
37 'autodocsumm',
38 'nbsphinx',
39 'sphinx.ext.napoleon',
40 'sphinx.ext.autosectionlabel'
41 ]
42
43 # Add any paths that contain templates here, relative to this directory.
44 templates_path = ['_templates']
45
46 # The suffix of source filenames.
47 source_suffix = ['.rst', '.md']
48
49 # The master toctree document.
50 master_doc = 'index'
51
52 # General information about the project.
53 project = u'Open Data Cube'
54
55 # The version info for the project you're documenting, acts as replacement for
56 # |version| and |release|, also used in various other places throughout the
57 # built documents.
58 #
59 # The short X.Y version.
60 version = "1.8"
61 # The full version, including alpha/beta/rc tags.
62 # FIXME: obtain real version by running git
63 release = version
64
65 # There are two options for replacing |today|: either, you set today to some
66 # non-false value, then it is used:
67 # today = ''
68 # Else, today_fmt is used as the format for a strftime call.
69 # today_fmt = '%B %d, %Y'
70
71 # List of patterns, relative to source directory, that match files and
72 # directories to ignore when looking for source files.
73 exclude_patterns = ['README.rst', '.condaenv', '.direnv']
74
75 # If true, '()' will be appended to :func: etc. cross-reference text.
76 add_function_parentheses = True
77
78 # If true, sectionauthor and moduleauthor directives will be shown in the
79 # output. They are ignored by default.
80 show_authors = False
81
82 # The name of the Pygments (syntax highlighting) style to use.
83 pygments_style = 'friendly'
84
85 autosummary_generate = True
86 autoclass_content = "both"
87
88 autodoc_default_options = {
89 'autosummary': True,
90 'inherited-members': True
91 }
92
93 extlinks = {'issue': ('https://github.com/opendatacube/datacube-core/issues/%s', 'issue '),
94 'pull': ('https://github.com/opendatacube/datacube-core/pulls/%s', 'PR ')}
95
96 intersphinx_mapping = {
97 'python': ('https://docs.python.org/3', None),
98 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
99 'numpy': ('https://docs.scipy.org/doc/numpy/', None),
100 'xarray': ('https://xarray.pydata.org/en/stable/', None),
101 }
102
103 graphviz_output_format = 'svg'
104
105 # -- Options for HTML output ----------------------------------------------
106
107 # The theme to use for HTML and HTML Help pages. See the documentation for
108 # a list of builtin themes.
109 if on_rtd:
110 html_theme = 'pydata_sphinx_theme'
111 else:
112 html_theme = 'pydata_sphinx_theme'
113
114 html_theme_options = {
115 "navigation_depth": 1,
116 "show_prev_next": False,
117 "collapse_navigation": True,
118 "use_edit_page_button": True,
119 "footer_items": ["odc-footer"],
120 "page_sidebar_items": [
121 "page-toc",
122 "autoclass_page_toc",
123 "autosummary_page_toc",
124 "edit-this-page"
125 ],
126 "icon_links": [
127 {
128 "name": "GitHub",
129 "url": "https://github.com/opendatacube/datacube-core",
130 "icon": "fab fa-github",
131 },
132 {
133 "name": "Slack",
134 "url": "http://slack.opendatacube.org/",
135 "icon": "fab fa-slack",
136 },
137 ],
138 }
139
140 html_context = {
141 "github_user": "opendatacube",
142 "github_repo": "datacube-core",
143 "github_version": "develop",
144 "doc_path": "docs",
145 }
146
147 html_logo = '_static/odc-logo-horizontal.svg'
148 html_static_path = ['_static']
149
150 # The name of an image file (within the static path) to use as favicon of the
151 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
152 # pixels large.
153 # html_favicon = None
154
155 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
156 # using the given strftime format.
157 html_last_updated_fmt = '%b %d, %Y'
158
159
160 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
161 html_show_sphinx = False
162
163 # Output file base name for HTML help builder.
164 htmlhelp_basename = 'ODCdoc'
165
166 # Grouping the document tree into LaTeX files. List of tuples
167 # (source start file, target name, title,
168 # author, documentclass [howto, manual, or own class]).
169 latex_documents = [
170 ('index', 'ODC.tex', u'Open Data Cube Documentation', 'Open Data Cube', 'manual')
171 ]
172
173 numfig = True
174
175
176 def custom_page_funcs(app, pagename, templatename, context, doctree):
177
178 def get_autosummary_toc():
179 soup = bs(context["body"], "html.parser")
180
181 class_sections = soup.find(class_='class')
182 if class_sections != None:
183 return ""
184
185 matches = soup.find_all('dl')
186 if matches == None or len(matches) == 0:
187 return ""
188
189 out = {
190 'title': '',
191 'menu_items': []
192 }
193
194 # remove the class dt
195 pyclass = matches.pop(0)
196 pyclass = pyclass.find('dt')
197 if pyclass != None:
198 out['title'] = pyclass.get('id')
199
200 for match in matches:
201 match_dt = match.find('dt')
202 link = match.find(class_="headerlink")
203 if link != None:
204 out['menu_items'].append({
205 'title': match_dt.get('id'),
206 'link': link['href']
207 })
208
209 return out
210
211 def get_class_toc():
212 soup = bs(context["body"], "html.parser")
213
214 class_sections = soup.find_all(class_='autosummary')
215 if class_sections == None or len(class_sections) == 0:
216 return ""
217
218 out = {
219 'title': '',
220 'menu_items': []
221 }
222 class_title = soup.find(class_='class')
223 if class_title == None:
224 return ""
225
226 pyclass = class_title.find('dt')
227 if pyclass != None:
228 out['title'] = pyclass.get('id')
229
230 for section in class_sections:
231 out_section = {
232 'title': '',
233 'menu_items': []
234 }
235 out_section['title'] = section.find_previous_sibling('p').text.replace(':', '')
236 matches = section.find_all('tr')
237 for match in matches:
238 link = match.find(class_="internal")
239
240 if link != None:
241 title = link['title']
242 if title != None:
243 title = title.replace(out['title'], '')
244 out_section['menu_items'].append({
245 'title': title,
246 'link': link['href']
247 })
248 if len(out_section['menu_items']) > 0:
249 out['menu_items'].append(out_section)
250
251 # print(out)
252 return out
253
254 context['get_class_toc'] = get_class_toc
255 context['get_autosummary_toc'] = get_autosummary_toc
256
257
258 def setup(app):
259 # Fix bug where code isn't being highlighted
260 app.add_css_file('pygments.css')
261 app.add_css_file('custom.css')
262
263 app.connect("html-page-context", custom_page_funcs)
264
265
266 # Clean up generated documentation files that RTD seems to be having trouble with
267 if on_rtd:
268 import shutil
269
270 shutil.rmtree('./dev/generate', ignore_errors=True)
271
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -70,7 +70,7 @@
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
-exclude_patterns = ['README.rst', '.condaenv', '.direnv']
+exclude_patterns = ['README.rst', '.condaenv', '.direnv', '_build']
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -70,7 +70,7 @@\n \n # List of patterns, relative to source directory, that match files and\n # directories to ignore when looking for source files.\n-exclude_patterns = ['README.rst', '.condaenv', '.direnv']\n+exclude_patterns = ['README.rst', '.condaenv', '.direnv', '_build']\n \n # If true, '()' will be appended to :func: etc. cross-reference text.\n add_function_parentheses = True\n", "issue": "links on read the doc footer leads to 404\nhttps://github.com/opendatacube/datacube-core/blob/develop/docs/_templates/odc-footer.html renders at the footer of page https://datacube-core.readthedocs.io/en/latest/index.html\n", "before_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2020 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\nimport os\nimport sys\n\nfrom bs4 import BeautifulSoup as bs # noqa: N813\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.insert(0, os.path.abspath('.'))\nprint(sys.path)\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx_autodoc_typehints',\n 'sphinx.ext.graphviz',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.extlinks',\n 'sphinx.ext.mathjax',\n 'sphinx_click.ext',\n 'click_utils',\n 'autodocsumm',\n 'nbsphinx',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.autosectionlabel'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = ['.rst', '.md']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Open Data Cube'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \"1.8\"\n# The full version, including alpha/beta/rc tags.\n# FIXME: obtain real version by running git\nrelease = version\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['README.rst', '.condaenv', '.direnv']\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\nadd_function_parentheses = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\nshow_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'friendly'\n\nautosummary_generate = True\nautoclass_content = \"both\"\n\nautodoc_default_options = {\n 'autosummary': True,\n 'inherited-members': True\n}\n\nextlinks = {'issue': ('https://github.com/opendatacube/datacube-core/issues/%s', 'issue '),\n 'pull': ('https://github.com/opendatacube/datacube-core/pulls/%s', 'PR ')}\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),\n 'numpy': ('https://docs.scipy.org/doc/numpy/', None),\n 'xarray': ('https://xarray.pydata.org/en/stable/', None),\n}\n\ngraphviz_output_format = 'svg'\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nif on_rtd:\n html_theme = 'pydata_sphinx_theme'\nelse:\n html_theme = 'pydata_sphinx_theme'\n\nhtml_theme_options = {\n \"navigation_depth\": 1,\n \"show_prev_next\": False,\n \"collapse_navigation\": True,\n \"use_edit_page_button\": True,\n \"footer_items\": [\"odc-footer\"],\n \"page_sidebar_items\": [\n \"page-toc\",\n \"autoclass_page_toc\",\n \"autosummary_page_toc\",\n \"edit-this-page\"\n ],\n \"icon_links\": [\n {\n \"name\": \"GitHub\",\n \"url\": \"https://github.com/opendatacube/datacube-core\",\n \"icon\": \"fab fa-github\",\n },\n {\n \"name\": \"Slack\",\n \"url\": \"http://slack.opendatacube.org/\",\n \"icon\": \"fab fa-slack\",\n },\n ],\n}\n\nhtml_context = {\n \"github_user\": \"opendatacube\",\n \"github_repo\": \"datacube-core\",\n \"github_version\": \"develop\",\n \"doc_path\": \"docs\",\n}\n\nhtml_logo = '_static/odc-logo-horizontal.svg'\nhtml_static_path = ['_static']\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\nhtml_show_sphinx = False\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'ODCdoc'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'ODC.tex', u'Open Data Cube Documentation', 'Open Data Cube', 'manual')\n]\n\nnumfig = True\n\n\ndef custom_page_funcs(app, pagename, templatename, context, doctree):\n\n def get_autosummary_toc():\n soup = bs(context[\"body\"], \"html.parser\")\n\n class_sections = soup.find(class_='class')\n if class_sections != None:\n return \"\"\n\n matches = soup.find_all('dl')\n if matches == None or len(matches) == 0:\n return \"\"\n\n out = {\n 'title': '',\n 'menu_items': []\n }\n\n # remove the class dt\n pyclass = matches.pop(0)\n pyclass = pyclass.find('dt')\n if pyclass != None:\n out['title'] = pyclass.get('id')\n\n for match in matches:\n match_dt = match.find('dt')\n link = match.find(class_=\"headerlink\")\n if link != None:\n out['menu_items'].append({\n 'title': match_dt.get('id'),\n 'link': link['href']\n })\n\n return out\n\n def get_class_toc():\n soup = bs(context[\"body\"], \"html.parser\")\n\n class_sections = soup.find_all(class_='autosummary')\n if class_sections == None or len(class_sections) == 0:\n return \"\"\n\n out = {\n 'title': '',\n 'menu_items': []\n }\n class_title = soup.find(class_='class')\n if class_title == None:\n return \"\"\n\n pyclass = class_title.find('dt')\n if pyclass != None:\n out['title'] = pyclass.get('id')\n\n for section in class_sections:\n out_section = {\n 'title': '',\n 'menu_items': []\n }\n out_section['title'] = section.find_previous_sibling('p').text.replace(':', '')\n matches = section.find_all('tr')\n for match in matches:\n link = match.find(class_=\"internal\")\n\n if link != None:\n title = link['title']\n if title != None:\n title = title.replace(out['title'], '')\n out_section['menu_items'].append({\n 'title': title,\n 'link': link['href']\n })\n if len(out_section['menu_items']) > 0:\n out['menu_items'].append(out_section)\n\n # print(out)\n return out\n\n context['get_class_toc'] = get_class_toc\n context['get_autosummary_toc'] = get_autosummary_toc\n\n\ndef setup(app):\n # Fix bug where code isn't being highlighted\n app.add_css_file('pygments.css')\n app.add_css_file('custom.css')\n\n app.connect(\"html-page-context\", custom_page_funcs)\n\n\n# Clean up generated documentation files that RTD seems to be having trouble with\nif on_rtd:\n import shutil\n\n shutil.rmtree('./dev/generate', ignore_errors=True)\n", "path": "docs/conf.py"}], "after_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2020 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\nimport os\nimport sys\n\nfrom bs4 import BeautifulSoup as bs # noqa: N813\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.insert(0, os.path.abspath('.'))\nprint(sys.path)\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx_autodoc_typehints',\n 'sphinx.ext.graphviz',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.extlinks',\n 'sphinx.ext.mathjax',\n 'sphinx_click.ext',\n 'click_utils',\n 'autodocsumm',\n 'nbsphinx',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.autosectionlabel'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = ['.rst', '.md']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Open Data Cube'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \"1.8\"\n# The full version, including alpha/beta/rc tags.\n# FIXME: obtain real version by running git\nrelease = version\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['README.rst', '.condaenv', '.direnv', '_build']\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\nadd_function_parentheses = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\nshow_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'friendly'\n\nautosummary_generate = True\nautoclass_content = \"both\"\n\nautodoc_default_options = {\n 'autosummary': True,\n 'inherited-members': True\n}\n\nextlinks = {'issue': ('https://github.com/opendatacube/datacube-core/issues/%s', 'issue '),\n 'pull': ('https://github.com/opendatacube/datacube-core/pulls/%s', 'PR ')}\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),\n 'numpy': ('https://docs.scipy.org/doc/numpy/', None),\n 'xarray': ('https://xarray.pydata.org/en/stable/', None),\n}\n\ngraphviz_output_format = 'svg'\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nif on_rtd:\n html_theme = 'pydata_sphinx_theme'\nelse:\n html_theme = 'pydata_sphinx_theme'\n\nhtml_theme_options = {\n \"navigation_depth\": 1,\n \"show_prev_next\": False,\n \"collapse_navigation\": True,\n \"use_edit_page_button\": True,\n \"footer_items\": [\"odc-footer\"],\n \"page_sidebar_items\": [\n \"page-toc\",\n \"autoclass_page_toc\",\n \"autosummary_page_toc\",\n \"edit-this-page\"\n ],\n \"icon_links\": [\n {\n \"name\": \"GitHub\",\n \"url\": \"https://github.com/opendatacube/datacube-core\",\n \"icon\": \"fab fa-github\",\n },\n {\n \"name\": \"Slack\",\n \"url\": \"http://slack.opendatacube.org/\",\n \"icon\": \"fab fa-slack\",\n },\n ],\n}\n\nhtml_context = {\n \"github_user\": \"opendatacube\",\n \"github_repo\": \"datacube-core\",\n \"github_version\": \"develop\",\n \"doc_path\": \"docs\",\n}\n\nhtml_logo = '_static/odc-logo-horizontal.svg'\nhtml_static_path = ['_static']\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\nhtml_show_sphinx = False\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'ODCdoc'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'ODC.tex', u'Open Data Cube Documentation', 'Open Data Cube', 'manual')\n]\n\nnumfig = True\n\n\ndef custom_page_funcs(app, pagename, templatename, context, doctree):\n\n def get_autosummary_toc():\n soup = bs(context[\"body\"], \"html.parser\")\n\n class_sections = soup.find(class_='class')\n if class_sections != None:\n return \"\"\n\n matches = soup.find_all('dl')\n if matches == None or len(matches) == 0:\n return \"\"\n\n out = {\n 'title': '',\n 'menu_items': []\n }\n\n # remove the class dt\n pyclass = matches.pop(0)\n pyclass = pyclass.find('dt')\n if pyclass != None:\n out['title'] = pyclass.get('id')\n\n for match in matches:\n match_dt = match.find('dt')\n link = match.find(class_=\"headerlink\")\n if link != None:\n out['menu_items'].append({\n 'title': match_dt.get('id'),\n 'link': link['href']\n })\n\n return out\n\n def get_class_toc():\n soup = bs(context[\"body\"], \"html.parser\")\n\n class_sections = soup.find_all(class_='autosummary')\n if class_sections == None or len(class_sections) == 0:\n return \"\"\n\n out = {\n 'title': '',\n 'menu_items': []\n }\n class_title = soup.find(class_='class')\n if class_title == None:\n return \"\"\n\n pyclass = class_title.find('dt')\n if pyclass != None:\n out['title'] = pyclass.get('id')\n\n for section in class_sections:\n out_section = {\n 'title': '',\n 'menu_items': []\n }\n out_section['title'] = section.find_previous_sibling('p').text.replace(':', '')\n matches = section.find_all('tr')\n for match in matches:\n link = match.find(class_=\"internal\")\n\n if link != None:\n title = link['title']\n if title != None:\n title = title.replace(out['title'], '')\n out_section['menu_items'].append({\n 'title': title,\n 'link': link['href']\n })\n if len(out_section['menu_items']) > 0:\n out['menu_items'].append(out_section)\n\n # print(out)\n return out\n\n context['get_class_toc'] = get_class_toc\n context['get_autosummary_toc'] = get_autosummary_toc\n\n\ndef setup(app):\n # Fix bug where code isn't being highlighted\n app.add_css_file('pygments.css')\n app.add_css_file('custom.css')\n\n app.connect(\"html-page-context\", custom_page_funcs)\n\n\n# Clean up generated documentation files that RTD seems to be having trouble with\nif on_rtd:\n import shutil\n\n shutil.rmtree('./dev/generate', ignore_errors=True)\n", "path": "docs/conf.py"}]} | 3,046 | 127 |
gh_patches_debug_24325 | rasdani/github-patches | git_diff | xonsh__xonsh-3002 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cmd && othercmd throws an exception
```
➤ ls &&Exception in thread Thread-35:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/usr/lib/python3.6/site-packages/prompt_toolkit/interface.py", line 860, in run
completions = list(buffer.completer.get_completions(document, complete_event))
File "/usr/lib/python3.6/site-packages/xonsh/ptk/completer.py", line 49, in get_completions
self.ctx)
File "/usr/lib/python3.6/site-packages/xonsh/__amalgam__.py", line 124, in complete
out = func(prefix, line, begidx, endidx, ctx)
File "/usr/lib/python3.6/site-packages/xonsh/completers/__amalgam__.py", line 831, in complete_skipper
ctx)
File "/usr/lib/python3.6/site-packages/xonsh/__amalgam__.py", line 124, in complete
out = func(prefix, line, begidx, endidx, ctx)
File "/usr/lib/python3.6/site-packages/xonsh/completers/__amalgam__.py", line 1427, in complete_base
complete_command(prefix, line, start, end, ctx))
TypeError: unsupported operand type(s) for |: 'tuple' and 'set'
```
```
➤ y -Q | egrep 'toolkit|xon'
community/python-prompt_toolkit 1.0.14-1
local/xonsh 0.5.9-1
```
Let me know if you need anything else.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xonsh/completers/base.py`
Content:
```
1 """Base completer for xonsh."""
2 import collections.abc as cabc
3
4 from xonsh.completers.path import complete_path
5 from xonsh.completers.python import complete_python
6 from xonsh.completers.commands import complete_command
7
8
9 def complete_base(prefix, line, start, end, ctx):
10 """If the line is empty, complete based on valid commands, python names,
11 and paths. If we are completing the first argument, complete based on
12 valid commands and python names.
13 """
14 if line.strip() == "":
15 out = complete_python(prefix, line, start, end, ctx) | complete_command(
16 prefix, line, start, end, ctx
17 )
18 paths = complete_path(prefix, line, start, end, ctx, False)
19 return (out | paths[0]), paths[1]
20 elif prefix == line:
21 python_comps = complete_python(prefix, line, start, end, ctx)
22 if isinstance(python_comps, cabc.Sequence):
23 return (
24 python_comps[0] | complete_command(prefix, line, start, end, ctx),
25 python_comps[1],
26 )
27 else:
28 return python_comps | complete_command(prefix, line, start, end, ctx)
29 return set()
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/xonsh/completers/base.py b/xonsh/completers/base.py
--- a/xonsh/completers/base.py
+++ b/xonsh/completers/base.py
@@ -11,19 +11,21 @@
and paths. If we are completing the first argument, complete based on
valid commands and python names.
"""
+ # get and unpack python completions
+ python_comps = complete_python(prefix, line, start, end, ctx)
+ if isinstance(python_comps, cabc.Sequence):
+ python_comps, python_comps_len = python_comps
+ else:
+ python_comps_len = None
+ # add command completions
+ out = python_comps | complete_command(prefix, line, start, end, ctx)
+ # add paths, if needed
if line.strip() == "":
- out = complete_python(prefix, line, start, end, ctx) | complete_command(
- prefix, line, start, end, ctx
- )
paths = complete_path(prefix, line, start, end, ctx, False)
return (out | paths[0]), paths[1]
elif prefix == line:
- python_comps = complete_python(prefix, line, start, end, ctx)
- if isinstance(python_comps, cabc.Sequence):
- return (
- python_comps[0] | complete_command(prefix, line, start, end, ctx),
- python_comps[1],
- )
+ if python_comps_len is None:
+ return out
else:
- return python_comps | complete_command(prefix, line, start, end, ctx)
+ return out, python_comps_len
return set()
| {"golden_diff": "diff --git a/xonsh/completers/base.py b/xonsh/completers/base.py\n--- a/xonsh/completers/base.py\n+++ b/xonsh/completers/base.py\n@@ -11,19 +11,21 @@\n and paths. If we are completing the first argument, complete based on\n valid commands and python names.\n \"\"\"\n+ # get and unpack python completions\n+ python_comps = complete_python(prefix, line, start, end, ctx)\n+ if isinstance(python_comps, cabc.Sequence):\n+ python_comps, python_comps_len = python_comps\n+ else:\n+ python_comps_len = None\n+ # add command completions\n+ out = python_comps | complete_command(prefix, line, start, end, ctx)\n+ # add paths, if needed\n if line.strip() == \"\":\n- out = complete_python(prefix, line, start, end, ctx) | complete_command(\n- prefix, line, start, end, ctx\n- )\n paths = complete_path(prefix, line, start, end, ctx, False)\n return (out | paths[0]), paths[1]\n elif prefix == line:\n- python_comps = complete_python(prefix, line, start, end, ctx)\n- if isinstance(python_comps, cabc.Sequence):\n- return (\n- python_comps[0] | complete_command(prefix, line, start, end, ctx),\n- python_comps[1],\n- )\n+ if python_comps_len is None:\n+ return out\n else:\n- return python_comps | complete_command(prefix, line, start, end, ctx)\n+ return out, python_comps_len\n return set()\n", "issue": "cmd && othercmd throws an exception\n```\r\n\u27a4 ls &&Exception in thread Thread-35:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.6/threading.py\", line 916, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python3.6/threading.py\", line 864, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/usr/lib/python3.6/site-packages/prompt_toolkit/interface.py\", line 860, in run\r\n completions = list(buffer.completer.get_completions(document, complete_event))\r\n File \"/usr/lib/python3.6/site-packages/xonsh/ptk/completer.py\", line 49, in get_completions\r\n self.ctx)\r\n File \"/usr/lib/python3.6/site-packages/xonsh/__amalgam__.py\", line 124, in complete\r\n out = func(prefix, line, begidx, endidx, ctx)\r\n File \"/usr/lib/python3.6/site-packages/xonsh/completers/__amalgam__.py\", line 831, in complete_skipper\r\n ctx)\r\n File \"/usr/lib/python3.6/site-packages/xonsh/__amalgam__.py\", line 124, in complete\r\n out = func(prefix, line, begidx, endidx, ctx)\r\n File \"/usr/lib/python3.6/site-packages/xonsh/completers/__amalgam__.py\", line 1427, in complete_base\r\n complete_command(prefix, line, start, end, ctx))\r\nTypeError: unsupported operand type(s) for |: 'tuple' and 'set'\r\n```\r\n\r\n```\r\n\u27a4 y -Q | egrep 'toolkit|xon'\r\ncommunity/python-prompt_toolkit 1.0.14-1\r\nlocal/xonsh 0.5.9-1\r\n```\r\n\r\nLet me know if you need anything else.\n", "before_files": [{"content": "\"\"\"Base completer for xonsh.\"\"\"\nimport collections.abc as cabc\n\nfrom xonsh.completers.path import complete_path\nfrom xonsh.completers.python import complete_python\nfrom xonsh.completers.commands import complete_command\n\n\ndef complete_base(prefix, line, start, end, ctx):\n \"\"\"If the line is empty, complete based on valid commands, python names,\n and paths. If we are completing the first argument, complete based on\n valid commands and python names.\n \"\"\"\n if line.strip() == \"\":\n out = complete_python(prefix, line, start, end, ctx) | complete_command(\n prefix, line, start, end, ctx\n )\n paths = complete_path(prefix, line, start, end, ctx, False)\n return (out | paths[0]), paths[1]\n elif prefix == line:\n python_comps = complete_python(prefix, line, start, end, ctx)\n if isinstance(python_comps, cabc.Sequence):\n return (\n python_comps[0] | complete_command(prefix, line, start, end, ctx),\n python_comps[1],\n )\n else:\n return python_comps | complete_command(prefix, line, start, end, ctx)\n return set()\n", "path": "xonsh/completers/base.py"}], "after_files": [{"content": "\"\"\"Base completer for xonsh.\"\"\"\nimport collections.abc as cabc\n\nfrom xonsh.completers.path import complete_path\nfrom xonsh.completers.python import complete_python\nfrom xonsh.completers.commands import complete_command\n\n\ndef complete_base(prefix, line, start, end, ctx):\n \"\"\"If the line is empty, complete based on valid commands, python names,\n and paths. If we are completing the first argument, complete based on\n valid commands and python names.\n \"\"\"\n # get and unpack python completions\n python_comps = complete_python(prefix, line, start, end, ctx)\n if isinstance(python_comps, cabc.Sequence):\n python_comps, python_comps_len = python_comps\n else:\n python_comps_len = None\n # add command completions\n out = python_comps | complete_command(prefix, line, start, end, ctx)\n # add paths, if needed\n if line.strip() == \"\":\n paths = complete_path(prefix, line, start, end, ctx, False)\n return (out | paths[0]), paths[1]\n elif prefix == line:\n if python_comps_len is None:\n return out\n else:\n return out, python_comps_len\n return set()\n", "path": "xonsh/completers/base.py"}]} | 1,012 | 386 |
gh_patches_debug_27141 | rasdani/github-patches | git_diff | deepset-ai__haystack-6261 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`JoinDocuments` now fails if any document is without a score
**Describe the bug**
Since the latest release (more specifically this commit: https://github.com/deepset-ai/haystack/commit/32e87d37c153cc8e8c5de5c72e5f939b2efd1022) the `JoinDocument` node will error out if any of the documents joined has not score, even if the `JoinDocuments` is used with `sort_by_score=False`
This break the documented functionality of the `sort_by_score` param, which allows (well, allowed before the above commit) the use of JoinDocuments with documents without scores:
https://github.com/deepset-ai/haystack/blob/main/haystack/nodes/other/join_docs.py#L40
Also this likely means that there is no unit test to verify that `sort_by_score=False` actually works, otherwise that should have caught this bug when it got introduced.
**Error message**
The error you will be seeing if you have some documents without scores even if you have `sort_by_score=False` set :
```
File ".../lib/python3.10/site-packages/haystack/nodes/base.py", line 317, in _adispatch_run_general
output, stream = await run_method(**run_inputs, **run_params)
│ │ └ {}
│ └ {'inputs': [{'documents': [<Document: {'content': "Zoltan Fedor is the Director, AI Platform ...
└ <bound method JoinNode.run of <haystack.nodes.other.join_docs.JoinDocuments object at 0x7f7beae0df30>>
File ".../lib/python3.10/site-packages/haystack/nodes/other/join.py", line 24, in run
results = self.run_accumulated(inputs, top_k_join=top_k_join)
│ │ │ └ None
│ │ └ [{'documents': [<Document: {'content': "Zoltan Fedor is the Director, AI Platform ...
│ └ <function JoinDocuments.run_accumulated at 0x7f7af5d160e0>
└ <haystack.nodes.other.join_docs.JoinDocuments object at 0x7f7beae0df30>
File ".../lib/python3.10/site-packages/haystack/nodes/other/join_docs.py", line 66, in run_accumulated
scores_map = self._concatenate_results(results, document_map)
│ │ │ └ {'8a51d965-082b-055f-f486-a372fc7254e6': <Document: {'content': "Zoltan Fedor is the Director, AI Platform ...
│ │ └ [[<Document: {'content': "Zoltan Fedor is the Director, AI Platform ...
│ └ <function JoinDocuments._concatenate_results at 0x7f7af5d164d0>
└ <haystack.nodes.other.join_docs.JoinDocuments object at 0x7f7beae0df30>
File ".../lib/python3.10/site-packages/haystack/nodes/other/join_docs.py", line 133, in _concatenate_results
item_best_score = max(tmp, key=lambda x: x.score)
└ [<Document: {'content': "Zoltan Fedor is the Director, AI Platform ...
TypeError: '>' not supported between instances of 'NoneType' and 'float'
```
**Expected behavior**
The document functionality of `sort_by_score=False` of `JoinDocuments` should not get broken by new functionality being introduced.
**Additional context**
As mentioned above this bug could have not been introduced it there would have been a unit test for the `sort_by_score=False` functionality - so I would recommend adding one to avoid this happening again in the future.
**To Reproduce**
Create a document set with some documents with no score and use `JoinDocuments` on them.
**FAQ Check**
- [ X ] Have you had a look at [our new FAQ page](https://docs.haystack.deepset.ai/docs/faq)?
**System:**
- OS: all
- GPU/CPU: all
- Haystack version (commit or version number): 1.22.0
- DocumentStore: Any
- Reader: Any
- Retriever: Any that produces some documents with no score
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/nodes/other/join_docs.py`
Content:
```
1 import logging
2 from collections import defaultdict
3 from math import inf
4 from typing import List, Optional
5
6 from haystack.nodes.other.join import JoinNode
7 from haystack.schema import Document
8
9 logger = logging.getLogger(__name__)
10
11
12 class JoinDocuments(JoinNode):
13 """
14 A node to join documents outputted by multiple retriever nodes.
15
16 The node allows multiple join modes:
17 * concatenate: combine the documents from multiple nodes. Any duplicate documents are discarded.
18 The score is only determined by the last node that outputs the document.
19 * merge: merge scores of documents from multiple nodes. Optionally, each input score can be given a different
20 `weight` & a `top_k` limit can be set. This mode can also be used for "reranking" retrieved documents.
21 * reciprocal_rank_fusion: combines the documents based on their rank in multiple nodes.
22 """
23
24 outgoing_edges = 1
25
26 def __init__(
27 self,
28 join_mode: str = "concatenate",
29 weights: Optional[List[float]] = None,
30 top_k_join: Optional[int] = None,
31 sort_by_score: bool = True,
32 ):
33 """
34 :param join_mode: `concatenate` to combine documents from multiple retrievers `merge` to aggregate scores of
35 individual documents, `reciprocal_rank_fusion` to apply rank based scoring.
36 :param weights: A node-wise list(length of list must be equal to the number of input nodes) of weights for
37 adjusting document scores when using the `merge` join_mode. By default, equal weight is given
38 to each retriever score. This param is not compatible with the `concatenate` join_mode.
39 :param top_k_join: Limit documents to top_k based on the resulting scores of the join.
40 :param sort_by_score: Whether to sort the incoming documents by their score. Set this to True if all your
41 Documents are coming with `score` values. Set to False if any of the Documents come
42 from sources where the `score` is set to `None`, like `TfidfRetriever` on Elasticsearch.
43 """
44 assert join_mode in [
45 "concatenate",
46 "merge",
47 "reciprocal_rank_fusion",
48 ], f"JoinDocuments node does not support '{join_mode}' join_mode."
49
50 assert not (
51 weights is not None and join_mode == "concatenate"
52 ), "Weights are not compatible with 'concatenate' join_mode."
53
54 super().__init__()
55
56 self.join_mode = join_mode
57 self.weights = [float(i) / sum(weights) for i in weights] if weights else None
58 self.top_k_join = top_k_join
59 self.sort_by_score = sort_by_score
60
61 def run_accumulated(self, inputs: List[dict], top_k_join: Optional[int] = None): # type: ignore
62 results = [inp["documents"] for inp in inputs]
63 document_map = {doc.id: doc for result in results for doc in result}
64
65 if self.join_mode == "concatenate":
66 scores_map = self._concatenate_results(results, document_map)
67 elif self.join_mode == "merge":
68 scores_map = self._calculate_comb_sum(results)
69 elif self.join_mode == "reciprocal_rank_fusion":
70 scores_map = self._calculate_rrf(results)
71 else:
72 raise ValueError(f"Invalid join_mode: {self.join_mode}")
73
74 # only sort the docs if that was requested
75 if self.sort_by_score:
76 sorted_docs = sorted(scores_map.items(), key=lambda d: d[1] if d[1] is not None else -inf, reverse=True)
77 if any(s is None for s in scores_map.values()):
78 logger.info(
79 "The `JoinDocuments` node has received some documents with `score=None` - and was requested "
80 "to sort the documents by score, so the `score=None` documents got sorted as if their "
81 "score would be `-infinity`."
82 )
83 else:
84 sorted_docs = list(scores_map.items())
85
86 if not top_k_join:
87 top_k_join = self.top_k_join
88 if not top_k_join:
89 top_k_join = len(sorted_docs)
90
91 docs = []
92 for id, score in sorted_docs[:top_k_join]:
93 doc = document_map[id]
94 doc.score = score
95 docs.append(doc)
96
97 output = {"documents": docs, "labels": inputs[0].get("labels", None)}
98
99 return output, "output_1"
100
101 def run_batch_accumulated(self, inputs: List[dict], top_k_join: Optional[int] = None): # type: ignore
102 # Join single document lists
103 if isinstance(inputs[0]["documents"][0], Document):
104 return self.run(inputs=inputs, top_k_join=top_k_join)
105 # Join lists of document lists
106 else:
107 output_docs = []
108 incoming_edges = [inp["documents"] for inp in inputs]
109 for idx in range(len(incoming_edges[0])):
110 cur_docs_to_join = []
111 for edge in incoming_edges:
112 cur_docs_to_join.append({"documents": edge[idx]})
113 cur, _ = self.run(inputs=cur_docs_to_join, top_k_join=top_k_join)
114 output_docs.append(cur["documents"])
115
116 output = {"documents": output_docs, "labels": inputs[0].get("labels", None)}
117
118 return output, "output_1"
119
120 def _concatenate_results(self, results, document_map):
121 """
122 Concatenates multiple document result lists.
123 Return the documents with the higher score.
124 """
125 list_id = list(document_map.keys())
126 scores_map = {}
127 for idx in list_id:
128 tmp = []
129 for result in results:
130 for doc in result:
131 if doc.id == idx:
132 tmp.append(doc)
133 item_best_score = max(tmp, key=lambda x: x.score)
134 scores_map.update({idx: item_best_score.score})
135 return scores_map
136
137 def _calculate_comb_sum(self, results):
138 """
139 Calculates a combination sum by multiplying each score by its weight.
140 """
141 scores_map = defaultdict(int)
142 weights = self.weights if self.weights else [1 / len(results)] * len(results)
143
144 for result, weight in zip(results, weights):
145 for doc in result:
146 scores_map[doc.id] += (doc.score if doc.score else 0) * weight
147
148 return scores_map
149
150 def _calculate_rrf(self, results):
151 """
152 Calculates the reciprocal rank fusion. The constant K is set to 61 (60 was suggested by the original paper,
153 plus 1 as python lists are 0-based and the paper used 1-based ranking).
154 """
155 K = 61
156
157 scores_map = defaultdict(int)
158 for result in results:
159 for rank, doc in enumerate(result):
160 scores_map[doc.id] += 1 / (K + rank)
161
162 return scores_map
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/haystack/nodes/other/join_docs.py b/haystack/nodes/other/join_docs.py
--- a/haystack/nodes/other/join_docs.py
+++ b/haystack/nodes/other/join_docs.py
@@ -14,8 +14,8 @@
A node to join documents outputted by multiple retriever nodes.
The node allows multiple join modes:
- * concatenate: combine the documents from multiple nodes. Any duplicate documents are discarded.
- The score is only determined by the last node that outputs the document.
+ * concatenate: combine the documents from multiple nodes.
+ In case of duplicate documents, the one with the highest score is kept.
* merge: merge scores of documents from multiple nodes. Optionally, each input score can be given a different
`weight` & a `top_k` limit can be set. This mode can also be used for "reranking" retrieved documents.
* reciprocal_rank_fusion: combines the documents based on their rank in multiple nodes.
@@ -130,7 +130,7 @@
for doc in result:
if doc.id == idx:
tmp.append(doc)
- item_best_score = max(tmp, key=lambda x: x.score)
+ item_best_score = max(tmp, key=lambda x: x.score if x.score is not None else -inf)
scores_map.update({idx: item_best_score.score})
return scores_map
| {"golden_diff": "diff --git a/haystack/nodes/other/join_docs.py b/haystack/nodes/other/join_docs.py\n--- a/haystack/nodes/other/join_docs.py\n+++ b/haystack/nodes/other/join_docs.py\n@@ -14,8 +14,8 @@\n A node to join documents outputted by multiple retriever nodes.\n \n The node allows multiple join modes:\n- * concatenate: combine the documents from multiple nodes. Any duplicate documents are discarded.\n- The score is only determined by the last node that outputs the document.\n+ * concatenate: combine the documents from multiple nodes.\n+ In case of duplicate documents, the one with the highest score is kept.\n * merge: merge scores of documents from multiple nodes. Optionally, each input score can be given a different\n `weight` & a `top_k` limit can be set. This mode can also be used for \"reranking\" retrieved documents.\n * reciprocal_rank_fusion: combines the documents based on their rank in multiple nodes.\n@@ -130,7 +130,7 @@\n for doc in result:\n if doc.id == idx:\n tmp.append(doc)\n- item_best_score = max(tmp, key=lambda x: x.score)\n+ item_best_score = max(tmp, key=lambda x: x.score if x.score is not None else -inf)\n scores_map.update({idx: item_best_score.score})\n return scores_map\n", "issue": "`JoinDocuments` now fails if any document is without a score\n**Describe the bug**\r\nSince the latest release (more specifically this commit: https://github.com/deepset-ai/haystack/commit/32e87d37c153cc8e8c5de5c72e5f939b2efd1022) the `JoinDocument` node will error out if any of the documents joined has not score, even if the `JoinDocuments` is used with `sort_by_score=False`\r\n\r\nThis break the documented functionality of the `sort_by_score` param, which allows (well, allowed before the above commit) the use of JoinDocuments with documents without scores:\r\nhttps://github.com/deepset-ai/haystack/blob/main/haystack/nodes/other/join_docs.py#L40\r\n\r\nAlso this likely means that there is no unit test to verify that `sort_by_score=False` actually works, otherwise that should have caught this bug when it got introduced.\r\n\r\n**Error message**\r\nThe error you will be seeing if you have some documents without scores even if you have `sort_by_score=False` set :\r\n```\r\n File \".../lib/python3.10/site-packages/haystack/nodes/base.py\", line 317, in _adispatch_run_general\r\n output, stream = await run_method(**run_inputs, **run_params)\r\n \u2502 \u2502 \u2514 {}\r\n \u2502 \u2514 {'inputs': [{'documents': [<Document: {'content': \"Zoltan Fedor is the Director, AI Platform ...\r\n \u2514 <bound method JoinNode.run of <haystack.nodes.other.join_docs.JoinDocuments object at 0x7f7beae0df30>>\r\n File \".../lib/python3.10/site-packages/haystack/nodes/other/join.py\", line 24, in run\r\n results = self.run_accumulated(inputs, top_k_join=top_k_join)\r\n \u2502 \u2502 \u2502 \u2514 None\r\n \u2502 \u2502 \u2514 [{'documents': [<Document: {'content': \"Zoltan Fedor is the Director, AI Platform ...\r\n \u2502 \u2514 <function JoinDocuments.run_accumulated at 0x7f7af5d160e0>\r\n \u2514 <haystack.nodes.other.join_docs.JoinDocuments object at 0x7f7beae0df30>\r\n File \".../lib/python3.10/site-packages/haystack/nodes/other/join_docs.py\", line 66, in run_accumulated\r\n scores_map = self._concatenate_results(results, document_map)\r\n \u2502 \u2502 \u2502 \u2514 {'8a51d965-082b-055f-f486-a372fc7254e6': <Document: {'content': \"Zoltan Fedor is the Director, AI Platform ...\r\n \u2502 \u2502 \u2514 [[<Document: {'content': \"Zoltan Fedor is the Director, AI Platform ...\r\n \u2502 \u2514 <function JoinDocuments._concatenate_results at 0x7f7af5d164d0>\r\n \u2514 <haystack.nodes.other.join_docs.JoinDocuments object at 0x7f7beae0df30>\r\n File \".../lib/python3.10/site-packages/haystack/nodes/other/join_docs.py\", line 133, in _concatenate_results\r\n item_best_score = max(tmp, key=lambda x: x.score)\r\n \u2514 [<Document: {'content': \"Zoltan Fedor is the Director, AI Platform ...\r\n\r\nTypeError: '>' not supported between instances of 'NoneType' and 'float'\r\n```\r\n\r\n**Expected behavior**\r\nThe document functionality of `sort_by_score=False` of `JoinDocuments` should not get broken by new functionality being introduced.\r\n\r\n**Additional context**\r\nAs mentioned above this bug could have not been introduced it there would have been a unit test for the `sort_by_score=False` functionality - so I would recommend adding one to avoid this happening again in the future.\r\n\r\n**To Reproduce**\r\nCreate a document set with some documents with no score and use `JoinDocuments` on them.\r\n\r\n**FAQ Check**\r\n- [ X ] Have you had a look at [our new FAQ page](https://docs.haystack.deepset.ai/docs/faq)?\r\n\r\n**System:**\r\n - OS: all\r\n - GPU/CPU: all\r\n - Haystack version (commit or version number): 1.22.0\r\n - DocumentStore: Any\r\n - Reader: Any\r\n - Retriever: Any that produces some documents with no score\r\n\n", "before_files": [{"content": "import logging\nfrom collections import defaultdict\nfrom math import inf\nfrom typing import List, Optional\n\nfrom haystack.nodes.other.join import JoinNode\nfrom haystack.schema import Document\n\nlogger = logging.getLogger(__name__)\n\n\nclass JoinDocuments(JoinNode):\n \"\"\"\n A node to join documents outputted by multiple retriever nodes.\n\n The node allows multiple join modes:\n * concatenate: combine the documents from multiple nodes. Any duplicate documents are discarded.\n The score is only determined by the last node that outputs the document.\n * merge: merge scores of documents from multiple nodes. Optionally, each input score can be given a different\n `weight` & a `top_k` limit can be set. This mode can also be used for \"reranking\" retrieved documents.\n * reciprocal_rank_fusion: combines the documents based on their rank in multiple nodes.\n \"\"\"\n\n outgoing_edges = 1\n\n def __init__(\n self,\n join_mode: str = \"concatenate\",\n weights: Optional[List[float]] = None,\n top_k_join: Optional[int] = None,\n sort_by_score: bool = True,\n ):\n \"\"\"\n :param join_mode: `concatenate` to combine documents from multiple retrievers `merge` to aggregate scores of\n individual documents, `reciprocal_rank_fusion` to apply rank based scoring.\n :param weights: A node-wise list(length of list must be equal to the number of input nodes) of weights for\n adjusting document scores when using the `merge` join_mode. By default, equal weight is given\n to each retriever score. This param is not compatible with the `concatenate` join_mode.\n :param top_k_join: Limit documents to top_k based on the resulting scores of the join.\n :param sort_by_score: Whether to sort the incoming documents by their score. Set this to True if all your\n Documents are coming with `score` values. Set to False if any of the Documents come\n from sources where the `score` is set to `None`, like `TfidfRetriever` on Elasticsearch.\n \"\"\"\n assert join_mode in [\n \"concatenate\",\n \"merge\",\n \"reciprocal_rank_fusion\",\n ], f\"JoinDocuments node does not support '{join_mode}' join_mode.\"\n\n assert not (\n weights is not None and join_mode == \"concatenate\"\n ), \"Weights are not compatible with 'concatenate' join_mode.\"\n\n super().__init__()\n\n self.join_mode = join_mode\n self.weights = [float(i) / sum(weights) for i in weights] if weights else None\n self.top_k_join = top_k_join\n self.sort_by_score = sort_by_score\n\n def run_accumulated(self, inputs: List[dict], top_k_join: Optional[int] = None): # type: ignore\n results = [inp[\"documents\"] for inp in inputs]\n document_map = {doc.id: doc for result in results for doc in result}\n\n if self.join_mode == \"concatenate\":\n scores_map = self._concatenate_results(results, document_map)\n elif self.join_mode == \"merge\":\n scores_map = self._calculate_comb_sum(results)\n elif self.join_mode == \"reciprocal_rank_fusion\":\n scores_map = self._calculate_rrf(results)\n else:\n raise ValueError(f\"Invalid join_mode: {self.join_mode}\")\n\n # only sort the docs if that was requested\n if self.sort_by_score:\n sorted_docs = sorted(scores_map.items(), key=lambda d: d[1] if d[1] is not None else -inf, reverse=True)\n if any(s is None for s in scores_map.values()):\n logger.info(\n \"The `JoinDocuments` node has received some documents with `score=None` - and was requested \"\n \"to sort the documents by score, so the `score=None` documents got sorted as if their \"\n \"score would be `-infinity`.\"\n )\n else:\n sorted_docs = list(scores_map.items())\n\n if not top_k_join:\n top_k_join = self.top_k_join\n if not top_k_join:\n top_k_join = len(sorted_docs)\n\n docs = []\n for id, score in sorted_docs[:top_k_join]:\n doc = document_map[id]\n doc.score = score\n docs.append(doc)\n\n output = {\"documents\": docs, \"labels\": inputs[0].get(\"labels\", None)}\n\n return output, \"output_1\"\n\n def run_batch_accumulated(self, inputs: List[dict], top_k_join: Optional[int] = None): # type: ignore\n # Join single document lists\n if isinstance(inputs[0][\"documents\"][0], Document):\n return self.run(inputs=inputs, top_k_join=top_k_join)\n # Join lists of document lists\n else:\n output_docs = []\n incoming_edges = [inp[\"documents\"] for inp in inputs]\n for idx in range(len(incoming_edges[0])):\n cur_docs_to_join = []\n for edge in incoming_edges:\n cur_docs_to_join.append({\"documents\": edge[idx]})\n cur, _ = self.run(inputs=cur_docs_to_join, top_k_join=top_k_join)\n output_docs.append(cur[\"documents\"])\n\n output = {\"documents\": output_docs, \"labels\": inputs[0].get(\"labels\", None)}\n\n return output, \"output_1\"\n\n def _concatenate_results(self, results, document_map):\n \"\"\"\n Concatenates multiple document result lists.\n Return the documents with the higher score.\n \"\"\"\n list_id = list(document_map.keys())\n scores_map = {}\n for idx in list_id:\n tmp = []\n for result in results:\n for doc in result:\n if doc.id == idx:\n tmp.append(doc)\n item_best_score = max(tmp, key=lambda x: x.score)\n scores_map.update({idx: item_best_score.score})\n return scores_map\n\n def _calculate_comb_sum(self, results):\n \"\"\"\n Calculates a combination sum by multiplying each score by its weight.\n \"\"\"\n scores_map = defaultdict(int)\n weights = self.weights if self.weights else [1 / len(results)] * len(results)\n\n for result, weight in zip(results, weights):\n for doc in result:\n scores_map[doc.id] += (doc.score if doc.score else 0) * weight\n\n return scores_map\n\n def _calculate_rrf(self, results):\n \"\"\"\n Calculates the reciprocal rank fusion. The constant K is set to 61 (60 was suggested by the original paper,\n plus 1 as python lists are 0-based and the paper used 1-based ranking).\n \"\"\"\n K = 61\n\n scores_map = defaultdict(int)\n for result in results:\n for rank, doc in enumerate(result):\n scores_map[doc.id] += 1 / (K + rank)\n\n return scores_map\n", "path": "haystack/nodes/other/join_docs.py"}], "after_files": [{"content": "import logging\nfrom collections import defaultdict\nfrom math import inf\nfrom typing import List, Optional\n\nfrom haystack.nodes.other.join import JoinNode\nfrom haystack.schema import Document\n\nlogger = logging.getLogger(__name__)\n\n\nclass JoinDocuments(JoinNode):\n \"\"\"\n A node to join documents outputted by multiple retriever nodes.\n\n The node allows multiple join modes:\n * concatenate: combine the documents from multiple nodes.\n In case of duplicate documents, the one with the highest score is kept.\n * merge: merge scores of documents from multiple nodes. Optionally, each input score can be given a different\n `weight` & a `top_k` limit can be set. This mode can also be used for \"reranking\" retrieved documents.\n * reciprocal_rank_fusion: combines the documents based on their rank in multiple nodes.\n \"\"\"\n\n outgoing_edges = 1\n\n def __init__(\n self,\n join_mode: str = \"concatenate\",\n weights: Optional[List[float]] = None,\n top_k_join: Optional[int] = None,\n sort_by_score: bool = True,\n ):\n \"\"\"\n :param join_mode: `concatenate` to combine documents from multiple retrievers `merge` to aggregate scores of\n individual documents, `reciprocal_rank_fusion` to apply rank based scoring.\n :param weights: A node-wise list(length of list must be equal to the number of input nodes) of weights for\n adjusting document scores when using the `merge` join_mode. By default, equal weight is given\n to each retriever score. This param is not compatible with the `concatenate` join_mode.\n :param top_k_join: Limit documents to top_k based on the resulting scores of the join.\n :param sort_by_score: Whether to sort the incoming documents by their score. Set this to True if all your\n Documents are coming with `score` values. Set to False if any of the Documents come\n from sources where the `score` is set to `None`, like `TfidfRetriever` on Elasticsearch.\n \"\"\"\n assert join_mode in [\n \"concatenate\",\n \"merge\",\n \"reciprocal_rank_fusion\",\n ], f\"JoinDocuments node does not support '{join_mode}' join_mode.\"\n\n assert not (\n weights is not None and join_mode == \"concatenate\"\n ), \"Weights are not compatible with 'concatenate' join_mode.\"\n\n super().__init__()\n\n self.join_mode = join_mode\n self.weights = [float(i) / sum(weights) for i in weights] if weights else None\n self.top_k_join = top_k_join\n self.sort_by_score = sort_by_score\n\n def run_accumulated(self, inputs: List[dict], top_k_join: Optional[int] = None): # type: ignore\n results = [inp[\"documents\"] for inp in inputs]\n document_map = {doc.id: doc for result in results for doc in result}\n\n if self.join_mode == \"concatenate\":\n scores_map = self._concatenate_results(results, document_map)\n elif self.join_mode == \"merge\":\n scores_map = self._calculate_comb_sum(results)\n elif self.join_mode == \"reciprocal_rank_fusion\":\n scores_map = self._calculate_rrf(results)\n else:\n raise ValueError(f\"Invalid join_mode: {self.join_mode}\")\n\n # only sort the docs if that was requested\n if self.sort_by_score:\n sorted_docs = sorted(scores_map.items(), key=lambda d: d[1] if d[1] is not None else -inf, reverse=True)\n if any(s is None for s in scores_map.values()):\n logger.info(\n \"The `JoinDocuments` node has received some documents with `score=None` - and was requested \"\n \"to sort the documents by score, so the `score=None` documents got sorted as if their \"\n \"score would be `-infinity`.\"\n )\n else:\n sorted_docs = list(scores_map.items())\n\n if not top_k_join:\n top_k_join = self.top_k_join\n if not top_k_join:\n top_k_join = len(sorted_docs)\n\n docs = []\n for id, score in sorted_docs[:top_k_join]:\n doc = document_map[id]\n doc.score = score\n docs.append(doc)\n\n output = {\"documents\": docs, \"labels\": inputs[0].get(\"labels\", None)}\n\n return output, \"output_1\"\n\n def run_batch_accumulated(self, inputs: List[dict], top_k_join: Optional[int] = None): # type: ignore\n # Join single document lists\n if isinstance(inputs[0][\"documents\"][0], Document):\n return self.run(inputs=inputs, top_k_join=top_k_join)\n # Join lists of document lists\n else:\n output_docs = []\n incoming_edges = [inp[\"documents\"] for inp in inputs]\n for idx in range(len(incoming_edges[0])):\n cur_docs_to_join = []\n for edge in incoming_edges:\n cur_docs_to_join.append({\"documents\": edge[idx]})\n cur, _ = self.run(inputs=cur_docs_to_join, top_k_join=top_k_join)\n output_docs.append(cur[\"documents\"])\n\n output = {\"documents\": output_docs, \"labels\": inputs[0].get(\"labels\", None)}\n\n return output, \"output_1\"\n\n def _concatenate_results(self, results, document_map):\n \"\"\"\n Concatenates multiple document result lists.\n Return the documents with the higher score.\n \"\"\"\n list_id = list(document_map.keys())\n scores_map = {}\n for idx in list_id:\n tmp = []\n for result in results:\n for doc in result:\n if doc.id == idx:\n tmp.append(doc)\n item_best_score = max(tmp, key=lambda x: x.score if x.score is not None else -inf)\n scores_map.update({idx: item_best_score.score})\n return scores_map\n\n def _calculate_comb_sum(self, results):\n \"\"\"\n Calculates a combination sum by multiplying each score by its weight.\n \"\"\"\n scores_map = defaultdict(int)\n weights = self.weights if self.weights else [1 / len(results)] * len(results)\n\n for result, weight in zip(results, weights):\n for doc in result:\n scores_map[doc.id] += (doc.score if doc.score else 0) * weight\n\n return scores_map\n\n def _calculate_rrf(self, results):\n \"\"\"\n Calculates the reciprocal rank fusion. The constant K is set to 61 (60 was suggested by the original paper,\n plus 1 as python lists are 0-based and the paper used 1-based ranking).\n \"\"\"\n K = 61\n\n scores_map = defaultdict(int)\n for result in results:\n for rank, doc in enumerate(result):\n scores_map[doc.id] += 1 / (K + rank)\n\n return scores_map\n", "path": "haystack/nodes/other/join_docs.py"}]} | 3,125 | 314 |
gh_patches_debug_21537 | rasdani/github-patches | git_diff | Flexget__Flexget-2187 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SABnzbd not adding local nzb files
### Expected behaviour:
Add local nzb file to SABnzbd.
### Actual behaviour:
Not added to SABnzbd but still logged as a success.
### Steps to reproduce:
- Step 1: Run the task below
#### Config:
```
download-manual:
filesystem:
path: /data/watch
mask: '*.nzb'
disable: seen
accept_all: yes
sabnzbd:
<<: *sabnzbd-config
```
#### Log:
```
2018-08-01 19:19 VERBOSE filesystem download-movies-manual Scanning folder /data/watch. Recursion is set to False.
2018-08-01 19:27 DEBUG filesystem download-movies-manual Scanning /data/watch
2018-08-01 19:27 DEBUG filesystem download-movies-manual Checking if /data/watch/Butch_Cassidy_and_the_Sundance_Kid.1969-NoGroup.nzb qualifies to be added as an entry.
2018-08-01 19:27 DEBUG backlog download-movies-manual 0 entries purged from backlog
2018-08-01 19:19 VERBOSE details download-movies-manual Produced 1 entries.
2018-08-01 19:19 VERBOSE task download-movies-manual ACCEPTED: `Butch_Cassidy_and_the_Sundance_Kid.1969-NoGroup` by accept_all plugin
2018-08-01 19:19 VERBOSE details download-movies-manual Summary - Accepted: 1 (Rejected: 0 Undecided: 0 Failed: 0)
2018-08-01 19:27 DEBUG sabnzbd download-movies-manual request_url: http://sabnzbd:8080/api?nzbname=Butch_Cassidy_and_the_Sundance_Kid.1969-NoGroup&apikey=<api_key>&mode=addurl&name=file%3A%2F%2F%2Fdata%2Fwatch%2FButch_Cassidy_and_the_Sundance_Kid.1969-NoGroup.nzb
2018-08-01 19:27 DEBUG utils.requests download-movies-manual GETing URL http://sabnzbd:8080/api?nzbname=Butch_Cassidy_and_the_Sundance_Kid.1969-NoGroup&apikey=<api_key>&mode=addurl&name=file%3A%2F%2F%2Fdata%2Fwatch%2FButch_Cassidy_and_the_Sundance_Kid.1969-NoGroup.nzb with args () and kwargs {'allow_redirects': True, u'timeout': 30}
2018-08-01 19:19 INFO sabnzbd download-movies-manual Added `Butch_Cassidy_and_the_Sundance_Kid.1969-NoGroup` to SABnzbd
```
### Additional information:
- FlexGet version: 2.14.13.dev
- Python version: 2.7.15
- Installation method: git install
- Using daemon: no
- OS and version: Debian GNU/Linux 9 (stretch)
PR incoming.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flexget/plugins/output/sabnzbd.py`
Content:
```
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
3 from future.moves.urllib.parse import urlencode
4
5 import logging
6
7 from flexget import plugin
8 from flexget.event import event
9 from requests import RequestException
10
11 log = logging.getLogger('sabnzbd')
12
13
14 class OutputSabnzbd(object):
15 """
16 Example::
17
18 sabnzbd:
19 apikey: 123456
20 url: http://localhost/sabnzbd/api?
21 category: movies
22
23 All parameters::
24
25 sabnzbd:
26 apikey: ...
27 url: ...
28 category: ...
29 script: ...
30 pp: ...
31 priority: ...
32 """
33 schema = {
34 'type': 'object',
35 'properties': {
36 'key': {'type': 'string'},
37 'url': {'type': 'string', 'format': 'url'},
38 'category': {'type': 'string'},
39 'script': {'type': 'string'},
40 'pp': {'type': 'string'},
41 'priority': {'type': 'integer'},
42 'password': {'type': 'string'},
43 'username': {'type': 'string'},
44 },
45 'required': ['key', 'url'],
46 'additionalProperties': False,
47 }
48
49 def get_params(self, config):
50 params = {}
51 if 'key' in config:
52 params['apikey'] = config['key']
53 if 'category' in config:
54 params['cat'] = '%s' % config['category']
55 if 'script' in config:
56 params['script'] = config['script']
57 if 'pp' in config:
58 params['pp'] = config['pp']
59 if 'priority' in config:
60 params['priority'] = config['priority']
61 if 'username' in config:
62 params['ma_username'] = config['username']
63 if 'password' in config:
64 params['ma_password'] = config['password']
65 params['mode'] = 'addurl'
66 return params
67
68 def on_task_output(self, task, config):
69 for entry in task.accepted:
70 if task.options.test:
71 log.info('Would add into sabnzbd: %s' % entry['title'])
72 continue
73
74 params = self.get_params(config)
75 # allow overriding the category
76 if 'category' in entry:
77 # Dirty hack over the next few lines to strip out non-ascii
78 # chars. We're going to urlencode this, which causes
79 # serious issues in python2.x if it's not ascii input.
80 params['cat'] = ''.join([x for x in entry['category'] if ord(x) < 128])
81 params['name'] = ''.join([x for x in entry['url'] if ord(x) < 128])
82 # add cleaner nzb name (undocumented api feature)
83 params['nzbname'] = ''.join([x for x in entry['title'] if ord(x) < 128])
84
85 request_url = config['url'] + urlencode(params)
86 log.debug('request_url: %s' % request_url)
87 try:
88 response = task.requests.get(request_url)
89 except RequestException as e:
90 log.critical('Failed to use sabnzbd. Requested %s' % request_url)
91 log.critical('Result was: %s' % e.args[0])
92 entry.fail('sabnzbd unreachable')
93 if task.options.debug:
94 log.exception(e)
95 continue
96
97 if 'error' in response.text.lower():
98 entry.fail(response.text.replace('\n', ''))
99 else:
100 log.info('Added `%s` to SABnzbd' % (entry['title']))
101
102
103 @event('plugin.register')
104 def register_plugin():
105 plugin.register(OutputSabnzbd, 'sabnzbd', api_ver=2)
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flexget/plugins/output/sabnzbd.py b/flexget/plugins/output/sabnzbd.py
--- a/flexget/plugins/output/sabnzbd.py
+++ b/flexget/plugins/output/sabnzbd.py
@@ -62,7 +62,6 @@
params['ma_username'] = config['username']
if 'password' in config:
params['ma_password'] = config['password']
- params['mode'] = 'addurl'
return params
def on_task_output(self, task, config):
@@ -82,6 +81,13 @@
# add cleaner nzb name (undocumented api feature)
params['nzbname'] = ''.join([x for x in entry['title'] if ord(x) < 128])
+ # check whether file is local or remote
+ if entry['url'].startswith('file://'):
+ params['mode'] = 'addlocalfile'
+ params['name'] = entry['location']
+ else:
+ params['mode'] = 'addurl'
+
request_url = config['url'] + urlencode(params)
log.debug('request_url: %s' % request_url)
try:
| {"golden_diff": "diff --git a/flexget/plugins/output/sabnzbd.py b/flexget/plugins/output/sabnzbd.py\n--- a/flexget/plugins/output/sabnzbd.py\n+++ b/flexget/plugins/output/sabnzbd.py\n@@ -62,7 +62,6 @@\n params['ma_username'] = config['username']\n if 'password' in config:\n params['ma_password'] = config['password']\n- params['mode'] = 'addurl'\n return params\n \n def on_task_output(self, task, config):\n@@ -82,6 +81,13 @@\n # add cleaner nzb name (undocumented api feature)\n params['nzbname'] = ''.join([x for x in entry['title'] if ord(x) < 128])\n \n+ # check whether file is local or remote\n+ if entry['url'].startswith('file://'):\n+ params['mode'] = 'addlocalfile'\n+ params['name'] = entry['location']\n+ else:\n+ params['mode'] = 'addurl'\n+\n request_url = config['url'] + urlencode(params)\n log.debug('request_url: %s' % request_url)\n try:\n", "issue": "SABnzbd not adding local nzb files\n### Expected behaviour:\r\nAdd local nzb file to SABnzbd.\r\n\r\n### Actual behaviour:\r\nNot added to SABnzbd but still logged as a success.\r\n\r\n### Steps to reproduce:\r\n- Step 1: Run the task below\r\n\r\n#### Config:\r\n```\r\ndownload-manual:\r\n filesystem:\r\n path: /data/watch\r\n mask: '*.nzb'\r\n disable: seen\r\n accept_all: yes\r\n sabnzbd:\r\n <<: *sabnzbd-config\r\n```\r\n \r\n#### Log:\r\n```\r\n2018-08-01 19:19 VERBOSE filesystem download-movies-manual Scanning folder /data/watch. Recursion is set to False.\r\n2018-08-01 19:27 DEBUG filesystem download-movies-manual Scanning /data/watch\r\n2018-08-01 19:27 DEBUG filesystem download-movies-manual Checking if /data/watch/Butch_Cassidy_and_the_Sundance_Kid.1969-NoGroup.nzb qualifies to be added as an entry.\r\n2018-08-01 19:27 DEBUG backlog download-movies-manual 0 entries purged from backlog\r\n2018-08-01 19:19 VERBOSE details download-movies-manual Produced 1 entries.\r\n2018-08-01 19:19 VERBOSE task download-movies-manual ACCEPTED: `Butch_Cassidy_and_the_Sundance_Kid.1969-NoGroup` by accept_all plugin\r\n2018-08-01 19:19 VERBOSE details download-movies-manual Summary - Accepted: 1 (Rejected: 0 Undecided: 0 Failed: 0)\r\n2018-08-01 19:27 DEBUG sabnzbd download-movies-manual request_url: http://sabnzbd:8080/api?nzbname=Butch_Cassidy_and_the_Sundance_Kid.1969-NoGroup&apikey=<api_key>&mode=addurl&name=file%3A%2F%2F%2Fdata%2Fwatch%2FButch_Cassidy_and_the_Sundance_Kid.1969-NoGroup.nzb\r\n2018-08-01 19:27 DEBUG utils.requests download-movies-manual GETing URL http://sabnzbd:8080/api?nzbname=Butch_Cassidy_and_the_Sundance_Kid.1969-NoGroup&apikey=<api_key>&mode=addurl&name=file%3A%2F%2F%2Fdata%2Fwatch%2FButch_Cassidy_and_the_Sundance_Kid.1969-NoGroup.nzb with args () and kwargs {'allow_redirects': True, u'timeout': 30}\r\n2018-08-01 19:19 INFO sabnzbd download-movies-manual Added `Butch_Cassidy_and_the_Sundance_Kid.1969-NoGroup` to SABnzbd\r\n```\r\n\r\n### Additional information:\r\n- FlexGet version: 2.14.13.dev\r\n- Python version: 2.7.15\r\n- Installation method: git install\r\n- Using daemon: no\r\n- OS and version: Debian GNU/Linux 9 (stretch)\r\n\r\nPR incoming.\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\nfrom future.moves.urllib.parse import urlencode\n\nimport logging\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom requests import RequestException\n\nlog = logging.getLogger('sabnzbd')\n\n\nclass OutputSabnzbd(object):\n \"\"\"\n Example::\n\n sabnzbd:\n apikey: 123456\n url: http://localhost/sabnzbd/api?\n category: movies\n\n All parameters::\n\n sabnzbd:\n apikey: ...\n url: ...\n category: ...\n script: ...\n pp: ...\n priority: ...\n \"\"\"\n schema = {\n 'type': 'object',\n 'properties': {\n 'key': {'type': 'string'},\n 'url': {'type': 'string', 'format': 'url'},\n 'category': {'type': 'string'},\n 'script': {'type': 'string'},\n 'pp': {'type': 'string'},\n 'priority': {'type': 'integer'},\n 'password': {'type': 'string'},\n 'username': {'type': 'string'},\n },\n 'required': ['key', 'url'],\n 'additionalProperties': False,\n }\n\n def get_params(self, config):\n params = {}\n if 'key' in config:\n params['apikey'] = config['key']\n if 'category' in config:\n params['cat'] = '%s' % config['category']\n if 'script' in config:\n params['script'] = config['script']\n if 'pp' in config:\n params['pp'] = config['pp']\n if 'priority' in config:\n params['priority'] = config['priority']\n if 'username' in config:\n params['ma_username'] = config['username']\n if 'password' in config:\n params['ma_password'] = config['password']\n params['mode'] = 'addurl'\n return params\n\n def on_task_output(self, task, config):\n for entry in task.accepted:\n if task.options.test:\n log.info('Would add into sabnzbd: %s' % entry['title'])\n continue\n\n params = self.get_params(config)\n # allow overriding the category\n if 'category' in entry:\n # Dirty hack over the next few lines to strip out non-ascii\n # chars. We're going to urlencode this, which causes\n # serious issues in python2.x if it's not ascii input.\n params['cat'] = ''.join([x for x in entry['category'] if ord(x) < 128])\n params['name'] = ''.join([x for x in entry['url'] if ord(x) < 128])\n # add cleaner nzb name (undocumented api feature)\n params['nzbname'] = ''.join([x for x in entry['title'] if ord(x) < 128])\n\n request_url = config['url'] + urlencode(params)\n log.debug('request_url: %s' % request_url)\n try:\n response = task.requests.get(request_url)\n except RequestException as e:\n log.critical('Failed to use sabnzbd. Requested %s' % request_url)\n log.critical('Result was: %s' % e.args[0])\n entry.fail('sabnzbd unreachable')\n if task.options.debug:\n log.exception(e)\n continue\n\n if 'error' in response.text.lower():\n entry.fail(response.text.replace('\\n', ''))\n else:\n log.info('Added `%s` to SABnzbd' % (entry['title']))\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(OutputSabnzbd, 'sabnzbd', api_ver=2)\n", "path": "flexget/plugins/output/sabnzbd.py"}], "after_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\nfrom future.moves.urllib.parse import urlencode\n\nimport logging\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom requests import RequestException\n\nlog = logging.getLogger('sabnzbd')\n\n\nclass OutputSabnzbd(object):\n \"\"\"\n Example::\n\n sabnzbd:\n apikey: 123456\n url: http://localhost/sabnzbd/api?\n category: movies\n\n All parameters::\n\n sabnzbd:\n apikey: ...\n url: ...\n category: ...\n script: ...\n pp: ...\n priority: ...\n \"\"\"\n schema = {\n 'type': 'object',\n 'properties': {\n 'key': {'type': 'string'},\n 'url': {'type': 'string', 'format': 'url'},\n 'category': {'type': 'string'},\n 'script': {'type': 'string'},\n 'pp': {'type': 'string'},\n 'priority': {'type': 'integer'},\n 'password': {'type': 'string'},\n 'username': {'type': 'string'},\n },\n 'required': ['key', 'url'],\n 'additionalProperties': False,\n }\n\n def get_params(self, config):\n params = {}\n if 'key' in config:\n params['apikey'] = config['key']\n if 'category' in config:\n params['cat'] = '%s' % config['category']\n if 'script' in config:\n params['script'] = config['script']\n if 'pp' in config:\n params['pp'] = config['pp']\n if 'priority' in config:\n params['priority'] = config['priority']\n if 'username' in config:\n params['ma_username'] = config['username']\n if 'password' in config:\n params['ma_password'] = config['password']\n return params\n\n def on_task_output(self, task, config):\n for entry in task.accepted:\n if task.options.test:\n log.info('Would add into sabnzbd: %s' % entry['title'])\n continue\n\n params = self.get_params(config)\n # allow overriding the category\n if 'category' in entry:\n # Dirty hack over the next few lines to strip out non-ascii\n # chars. We're going to urlencode this, which causes\n # serious issues in python2.x if it's not ascii input.\n params['cat'] = ''.join([x for x in entry['category'] if ord(x) < 128])\n params['name'] = ''.join([x for x in entry['url'] if ord(x) < 128])\n # add cleaner nzb name (undocumented api feature)\n params['nzbname'] = ''.join([x for x in entry['title'] if ord(x) < 128])\n\n # check whether file is local or remote\n if entry['url'].startswith('file://'):\n params['mode'] = 'addlocalfile'\n params['name'] = entry['location']\n else:\n params['mode'] = 'addurl'\n\n request_url = config['url'] + urlencode(params)\n log.debug('request_url: %s' % request_url)\n try:\n response = task.requests.get(request_url)\n except RequestException as e:\n log.critical('Failed to use sabnzbd. Requested %s' % request_url)\n log.critical('Result was: %s' % e.args[0])\n entry.fail('sabnzbd unreachable')\n if task.options.debug:\n log.exception(e)\n continue\n\n if 'error' in response.text.lower():\n entry.fail(response.text.replace('\\n', ''))\n else:\n log.info('Added `%s` to SABnzbd' % (entry['title']))\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(OutputSabnzbd, 'sabnzbd', api_ver=2)\n", "path": "flexget/plugins/output/sabnzbd.py"}]} | 2,096 | 267 |
gh_patches_debug_22442 | rasdani/github-patches | git_diff | getsentry__sentry-24461 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
I have an issue when I import the export json file
## Important Details
On-Premise w/ Docker, version 9.1.2
## Description
I have two servers with a sentry. There are several projects on the first server, and I would like to copy these projects to the clear second server.
I use the export/import commands. The export command works fine. However, when I run the import command, I get an error.
## Steps to Reproduce
1. Run command 'sentry export sentry_export.json' on the first server
2. Run command 'sentry import sentry_export.json' on the second server
3. Get an error
Good items to include here include:
`Traceback (most recent call last):
File "/usr/local/bin/sentry", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python2.7/site-packages/sentry/runner/__init__.py", line 162, in main
cli(prog_name=get_prog(), obj={}, max_content_width=100)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/click/decorators.py", line 17, in new_func
return f(get_current_context(), *args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/sentry/runner/decorators.py", line 36, in inner
return ctx.invoke(f, *args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/sentry/runner/commands/backup.py", line 21, in import_
for obj in serializers.deserialize("json", src, stream=True, use_natural_keys=True):
File "/usr/local/lib/python2.7/site-packages/django/core/serializers/json.py", line 76, in Deserializer
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
File "/usr/local/lib/python2.7/site-packages/django/core/serializers/json.py", line 70, in Deserializer
for obj in PythonDeserializer(objects, **options):
File "/usr/local/lib/python2.7/site-packages/django/core/serializers/python.py", line 140, in Deserializer
data[field.name] = field.to_python(field_value)
File "/usr/local/lib/python2.7/site-packages/sentry/db/models/fields/array.py", line 56, in to_python
value = json.loads(value)
File "/usr/local/lib/python2.7/site-packages/sentry/utils/json.py", line 111, in loads
return _default_decoder.decode(value)
File "/usr/local/lib/python2.7/site-packages/simplejson/decoder.py", line 370, in decode
obj, end = self.raw_decode(s)
File "/usr/local/lib/python2.7/site-packages/simplejson/decoder.py", line 400, in raw_decode
return self.scan_once(s, idx=_w(s, idx).end())
django.core.serializers.base.DeserializationError: Expecting value: line 1 column 2 (char 1)`
### What you expected to happen
Import works fine too
### Possible Solution
[If you have an idea on how this could be solved include that detail here.]
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/db/models/fields/array.py`
Content:
```
1 from django.db import models
2
3 from sentry.db.models.utils import Creator
4 from sentry.utils import json
5 from sentry.utils.compat import map
6
7
8 # Adapted from django-pgfields
9 # https://github.com/lukesneeringer/django-pgfields/blob/master/django_pg/models/fields/array.py
10 class ArrayField(models.Field):
11 def __init__(self, of=models.TextField, **kwargs):
12 # Arrays in PostgreSQL are arrays of a particular type.
13 # Save the subtype in our field class.
14 if isinstance(of, type):
15 of = of()
16 self.of = of
17
18 # Set "null" to True. Arrays don't have nulls, but null=True
19 # in the ORM amounts to nothing in SQL (whereas null=False
20 # corresponds to `NOT NULL`)
21 kwargs["null"] = True
22
23 super().__init__(**kwargs)
24
25 def contribute_to_class(self, cls, name):
26 """
27 Add a descriptor for backwards compatibility
28 with previous Django behavior.
29 """
30 super().contribute_to_class(cls, name)
31 setattr(cls, name, Creator(self))
32
33 def db_type(self, connection):
34 return f"{self.of.db_type(connection)}[]"
35
36 def get_internal_type(self):
37 return "TextField"
38
39 def to_python(self, value):
40 if not value:
41 value = []
42 if isinstance(value, str):
43 value = json.loads(value)
44 return map(self.of.to_python, value)
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/db/models/fields/array.py b/src/sentry/db/models/fields/array.py
--- a/src/sentry/db/models/fields/array.py
+++ b/src/sentry/db/models/fields/array.py
@@ -1,3 +1,5 @@
+import ast
+
from django.db import models
from sentry.db.models.utils import Creator
@@ -36,9 +38,26 @@
def get_internal_type(self):
return "TextField"
+ def get_prep_value(self, value):
+ """Iterate over each item in the array, and run it
+ through the `get_prep_value` of this array's type.
+ """
+ # If no valid value was given, return an empty list.
+ if not value:
+ return []
+
+ # Appropriately coerce each individual value within
+ # our array.
+ return [self.of.get_prep_value(item) for item in value]
+
def to_python(self, value):
if not value:
value = []
if isinstance(value, str):
- value = json.loads(value)
+ try:
+ value = json.loads(value)
+ except json.JSONDecodeError:
+ # This is to accomodate the erronous exports pre 21.4.0
+ # See getsentry/sentry#23843 for more details
+ value = ast.literal_eval(value)
return map(self.of.to_python, value)
| {"golden_diff": "diff --git a/src/sentry/db/models/fields/array.py b/src/sentry/db/models/fields/array.py\n--- a/src/sentry/db/models/fields/array.py\n+++ b/src/sentry/db/models/fields/array.py\n@@ -1,3 +1,5 @@\n+import ast\n+\n from django.db import models\n \n from sentry.db.models.utils import Creator\n@@ -36,9 +38,26 @@\n def get_internal_type(self):\n return \"TextField\"\n \n+ def get_prep_value(self, value):\n+ \"\"\"Iterate over each item in the array, and run it\n+ through the `get_prep_value` of this array's type.\n+ \"\"\"\n+ # If no valid value was given, return an empty list.\n+ if not value:\n+ return []\n+\n+ # Appropriately coerce each individual value within\n+ # our array.\n+ return [self.of.get_prep_value(item) for item in value]\n+\n def to_python(self, value):\n if not value:\n value = []\n if isinstance(value, str):\n- value = json.loads(value)\n+ try:\n+ value = json.loads(value)\n+ except json.JSONDecodeError:\n+ # This is to accomodate the erronous exports pre 21.4.0\n+ # See getsentry/sentry#23843 for more details\n+ value = ast.literal_eval(value)\n return map(self.of.to_python, value)\n", "issue": "I have an issue when I import the export json file\n## Important Details\r\n\r\nOn-Premise w/ Docker, version 9.1.2\r\n\r\n## Description\r\n\r\nI have two servers with a sentry. There are several projects on the first server, and I would like to copy these projects to the clear second server.\r\nI use the export/import commands. The export command works fine. However, when I run the import command, I get an error.\r\n\r\n## Steps to Reproduce\r\n\r\n1. Run command 'sentry export sentry_export.json' on the first server\r\n2. Run command 'sentry import sentry_export.json' on the second server\r\n3. Get an error\r\n\r\nGood items to include here include:\r\n\r\n`Traceback (most recent call last):\r\n File \"/usr/local/bin/sentry\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/usr/local/lib/python2.7/site-packages/sentry/runner/__init__.py\", line 162, in main\r\n cli(prog_name=get_prog(), obj={}, max_content_width=100)\r\n File \"/usr/local/lib/python2.7/site-packages/click/core.py\", line 722, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/usr/local/lib/python2.7/site-packages/click/core.py\", line 697, in main\r\n rv = self.invoke(ctx)\r\n File \"/usr/local/lib/python2.7/site-packages/click/core.py\", line 1066, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/usr/local/lib/python2.7/site-packages/click/core.py\", line 895, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/usr/local/lib/python2.7/site-packages/click/core.py\", line 535, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/usr/local/lib/python2.7/site-packages/click/decorators.py\", line 17, in new_func\r\n return f(get_current_context(), *args, **kwargs)\r\n File \"/usr/local/lib/python2.7/site-packages/sentry/runner/decorators.py\", line 36, in inner\r\n return ctx.invoke(f, *args, **kwargs)\r\n File \"/usr/local/lib/python2.7/site-packages/click/core.py\", line 535, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/usr/local/lib/python2.7/site-packages/sentry/runner/commands/backup.py\", line 21, in import_\r\n for obj in serializers.deserialize(\"json\", src, stream=True, use_natural_keys=True):\r\n File \"/usr/local/lib/python2.7/site-packages/django/core/serializers/json.py\", line 76, in Deserializer\r\n six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])\r\n File \"/usr/local/lib/python2.7/site-packages/django/core/serializers/json.py\", line 70, in Deserializer\r\n for obj in PythonDeserializer(objects, **options):\r\n File \"/usr/local/lib/python2.7/site-packages/django/core/serializers/python.py\", line 140, in Deserializer\r\n data[field.name] = field.to_python(field_value)\r\n File \"/usr/local/lib/python2.7/site-packages/sentry/db/models/fields/array.py\", line 56, in to_python\r\n value = json.loads(value)\r\n File \"/usr/local/lib/python2.7/site-packages/sentry/utils/json.py\", line 111, in loads\r\n return _default_decoder.decode(value)\r\n File \"/usr/local/lib/python2.7/site-packages/simplejson/decoder.py\", line 370, in decode\r\n obj, end = self.raw_decode(s)\r\n File \"/usr/local/lib/python2.7/site-packages/simplejson/decoder.py\", line 400, in raw_decode\r\n return self.scan_once(s, idx=_w(s, idx).end())\r\ndjango.core.serializers.base.DeserializationError: Expecting value: line 1 column 2 (char 1)`\r\n\r\n### What you expected to happen\r\n\r\nImport works fine too\r\n\r\n### Possible Solution\r\n\r\n[If you have an idea on how this could be solved include that detail here.]\r\n\n", "before_files": [{"content": "from django.db import models\n\nfrom sentry.db.models.utils import Creator\nfrom sentry.utils import json\nfrom sentry.utils.compat import map\n\n\n# Adapted from django-pgfields\n# https://github.com/lukesneeringer/django-pgfields/blob/master/django_pg/models/fields/array.py\nclass ArrayField(models.Field):\n def __init__(self, of=models.TextField, **kwargs):\n # Arrays in PostgreSQL are arrays of a particular type.\n # Save the subtype in our field class.\n if isinstance(of, type):\n of = of()\n self.of = of\n\n # Set \"null\" to True. Arrays don't have nulls, but null=True\n # in the ORM amounts to nothing in SQL (whereas null=False\n # corresponds to `NOT NULL`)\n kwargs[\"null\"] = True\n\n super().__init__(**kwargs)\n\n def contribute_to_class(self, cls, name):\n \"\"\"\n Add a descriptor for backwards compatibility\n with previous Django behavior.\n \"\"\"\n super().contribute_to_class(cls, name)\n setattr(cls, name, Creator(self))\n\n def db_type(self, connection):\n return f\"{self.of.db_type(connection)}[]\"\n\n def get_internal_type(self):\n return \"TextField\"\n\n def to_python(self, value):\n if not value:\n value = []\n if isinstance(value, str):\n value = json.loads(value)\n return map(self.of.to_python, value)\n", "path": "src/sentry/db/models/fields/array.py"}], "after_files": [{"content": "import ast\n\nfrom django.db import models\n\nfrom sentry.db.models.utils import Creator\nfrom sentry.utils import json\nfrom sentry.utils.compat import map\n\n\n# Adapted from django-pgfields\n# https://github.com/lukesneeringer/django-pgfields/blob/master/django_pg/models/fields/array.py\nclass ArrayField(models.Field):\n def __init__(self, of=models.TextField, **kwargs):\n # Arrays in PostgreSQL are arrays of a particular type.\n # Save the subtype in our field class.\n if isinstance(of, type):\n of = of()\n self.of = of\n\n # Set \"null\" to True. Arrays don't have nulls, but null=True\n # in the ORM amounts to nothing in SQL (whereas null=False\n # corresponds to `NOT NULL`)\n kwargs[\"null\"] = True\n\n super().__init__(**kwargs)\n\n def contribute_to_class(self, cls, name):\n \"\"\"\n Add a descriptor for backwards compatibility\n with previous Django behavior.\n \"\"\"\n super().contribute_to_class(cls, name)\n setattr(cls, name, Creator(self))\n\n def db_type(self, connection):\n return f\"{self.of.db_type(connection)}[]\"\n\n def get_internal_type(self):\n return \"TextField\"\n\n def get_prep_value(self, value):\n \"\"\"Iterate over each item in the array, and run it\n through the `get_prep_value` of this array's type.\n \"\"\"\n # If no valid value was given, return an empty list.\n if not value:\n return []\n\n # Appropriately coerce each individual value within\n # our array.\n return [self.of.get_prep_value(item) for item in value]\n\n def to_python(self, value):\n if not value:\n value = []\n if isinstance(value, str):\n try:\n value = json.loads(value)\n except json.JSONDecodeError:\n # This is to accomodate the erronous exports pre 21.4.0\n # See getsentry/sentry#23843 for more details\n value = ast.literal_eval(value)\n return map(self.of.to_python, value)\n", "path": "src/sentry/db/models/fields/array.py"}]} | 1,583 | 319 |
gh_patches_debug_40098 | rasdani/github-patches | git_diff | pytorch__vision-6458 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Port `transforms.LinearTransformation` to `prototype.transforms`
cc @vfdev-5 @datumbox @bjuncek
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/prototype/transforms/_misc.py`
Content:
```
1 import functools
2 from typing import Any, Callable, Dict, List, Sequence, Type, Union
3
4 import torch
5 from torchvision.prototype.transforms import functional as F, Transform
6 from torchvision.transforms.transforms import _setup_size
7
8
9 class Identity(Transform):
10 def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
11 return inpt
12
13
14 class Lambda(Transform):
15 def __init__(self, fn: Callable[[Any], Any], *types: Type):
16 super().__init__()
17 self.fn = fn
18 self.types = types
19
20 def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
21 if type(inpt) in self.types:
22 return self.fn(inpt)
23 else:
24 return inpt
25
26 def extra_repr(self) -> str:
27 extras = []
28 name = getattr(self.fn, "__name__", None)
29 if name:
30 extras.append(name)
31 extras.append(f"types={[type.__name__ for type in self.types]}")
32 return ", ".join(extras)
33
34
35 class Normalize(Transform):
36 def __init__(self, mean: List[float], std: List[float]):
37 super().__init__()
38 self.mean = mean
39 self.std = std
40
41 def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
42 return F.normalize(inpt, mean=self.mean, std=self.std)
43
44
45 class GaussianBlur(Transform):
46 def __init__(
47 self, kernel_size: Union[int, Sequence[int]], sigma: Union[float, Sequence[float]] = (0.1, 2.0)
48 ) -> None:
49 super().__init__()
50 self.kernel_size = _setup_size(kernel_size, "Kernel size should be a tuple/list of two integers")
51 for ks in self.kernel_size:
52 if ks <= 0 or ks % 2 == 0:
53 raise ValueError("Kernel size value should be an odd and positive number.")
54
55 if isinstance(sigma, float):
56 if sigma <= 0:
57 raise ValueError("If sigma is a single number, it must be positive.")
58 sigma = (sigma, sigma)
59 elif isinstance(sigma, Sequence) and len(sigma) == 2:
60 if not 0.0 < sigma[0] <= sigma[1]:
61 raise ValueError("sigma values should be positive and of the form (min, max).")
62 else:
63 raise TypeError("sigma should be a single float or a list/tuple with length 2 floats.")
64
65 self.sigma = sigma
66
67 def _get_params(self, sample: Any) -> Dict[str, Any]:
68 sigma = torch.empty(1).uniform_(self.sigma[0], self.sigma[1]).item()
69 return dict(sigma=[sigma, sigma])
70
71 def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
72 return F.gaussian_blur(inpt, **params)
73
74
75 class ToDtype(Lambda):
76 def __init__(self, dtype: torch.dtype, *types: Type) -> None:
77 self.dtype = dtype
78 super().__init__(functools.partial(torch.Tensor.to, dtype=dtype), *types)
79
80 def extra_repr(self) -> str:
81 return ", ".join([f"dtype={self.dtype}", f"types={[type.__name__ for type in self.types]}"])
82
```
Path: `torchvision/prototype/transforms/__init__.py`
Content:
```
1 from . import functional # usort: skip
2
3 from ._transform import Transform # usort: skip
4
5 from ._augment import RandomCutmix, RandomErasing, RandomMixup
6 from ._auto_augment import AugMix, AutoAugment, AutoAugmentPolicy, RandAugment, TrivialAugmentWide
7 from ._color import (
8 ColorJitter,
9 RandomAdjustSharpness,
10 RandomAutocontrast,
11 RandomEqualize,
12 RandomInvert,
13 RandomPhotometricDistort,
14 RandomPosterize,
15 RandomSolarize,
16 )
17 from ._container import Compose, RandomApply, RandomChoice, RandomOrder
18 from ._geometry import (
19 BatchMultiCrop,
20 CenterCrop,
21 ElasticTransform,
22 FiveCrop,
23 FixedSizeCrop,
24 Pad,
25 RandomAffine,
26 RandomCrop,
27 RandomHorizontalFlip,
28 RandomIoUCrop,
29 RandomPerspective,
30 RandomResizedCrop,
31 RandomRotation,
32 RandomShortestSize,
33 RandomVerticalFlip,
34 RandomZoomOut,
35 Resize,
36 ScaleJitter,
37 TenCrop,
38 )
39 from ._meta import ConvertBoundingBoxFormat, ConvertColorSpace, ConvertImageDtype
40 from ._misc import GaussianBlur, Identity, Lambda, Normalize, ToDtype
41 from ._type_conversion import DecodeImage, LabelToOneHot, ToImagePIL, ToImageTensor
42
43 from ._deprecated import Grayscale, RandomGrayscale, ToTensor, ToPILImage, PILToTensor # usort: skip
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchvision/prototype/transforms/__init__.py b/torchvision/prototype/transforms/__init__.py
--- a/torchvision/prototype/transforms/__init__.py
+++ b/torchvision/prototype/transforms/__init__.py
@@ -37,7 +37,7 @@
TenCrop,
)
from ._meta import ConvertBoundingBoxFormat, ConvertColorSpace, ConvertImageDtype
-from ._misc import GaussianBlur, Identity, Lambda, Normalize, ToDtype
+from ._misc import GaussianBlur, Identity, Lambda, LinearTransformation, Normalize, ToDtype
from ._type_conversion import DecodeImage, LabelToOneHot, ToImagePIL, ToImageTensor
from ._deprecated import Grayscale, RandomGrayscale, ToTensor, ToPILImage, PILToTensor # usort: skip
diff --git a/torchvision/prototype/transforms/_misc.py b/torchvision/prototype/transforms/_misc.py
--- a/torchvision/prototype/transforms/_misc.py
+++ b/torchvision/prototype/transforms/_misc.py
@@ -1,7 +1,10 @@
import functools
from typing import Any, Callable, Dict, List, Sequence, Type, Union
+import PIL.Image
+
import torch
+from torchvision.prototype import features
from torchvision.prototype.transforms import functional as F, Transform
from torchvision.transforms.transforms import _setup_size
@@ -32,6 +35,59 @@
return ", ".join(extras)
+class LinearTransformation(Transform):
+ def __init__(self, transformation_matrix: torch.Tensor, mean_vector: torch.Tensor):
+ super().__init__()
+ if transformation_matrix.size(0) != transformation_matrix.size(1):
+ raise ValueError(
+ "transformation_matrix should be square. Got "
+ f"{tuple(transformation_matrix.size())} rectangular matrix."
+ )
+
+ if mean_vector.size(0) != transformation_matrix.size(0):
+ raise ValueError(
+ f"mean_vector should have the same length {mean_vector.size(0)}"
+ f" as any one of the dimensions of the transformation_matrix [{tuple(transformation_matrix.size())}]"
+ )
+
+ if transformation_matrix.device != mean_vector.device:
+ raise ValueError(
+ f"Input tensors should be on the same device. Got {transformation_matrix.device} and {mean_vector.device}"
+ )
+
+ self.transformation_matrix = transformation_matrix
+ self.mean_vector = mean_vector
+
+ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
+
+ if isinstance(inpt, features._Feature) and not isinstance(inpt, features.Image):
+ return inpt
+ elif isinstance(inpt, PIL.Image.Image):
+ raise TypeError("Unsupported input type")
+
+ # Image instance after linear transformation is not Image anymore due to unknown data range
+ # Thus we will return Tensor for input Image
+
+ shape = inpt.shape
+ n = shape[-3] * shape[-2] * shape[-1]
+ if n != self.transformation_matrix.shape[0]:
+ raise ValueError(
+ "Input tensor and transformation matrix have incompatible shape."
+ + f"[{shape[-3]} x {shape[-2]} x {shape[-1]}] != "
+ + f"{self.transformation_matrix.shape[0]}"
+ )
+
+ if inpt.device.type != self.mean_vector.device.type:
+ raise ValueError(
+ "Input tensor should be on the same device as transformation matrix and mean vector. "
+ f"Got {inpt.device} vs {self.mean_vector.device}"
+ )
+
+ flat_tensor = inpt.view(-1, n) - self.mean_vector
+ transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)
+ return transformed_tensor.view(shape)
+
+
class Normalize(Transform):
def __init__(self, mean: List[float], std: List[float]):
super().__init__()
| {"golden_diff": "diff --git a/torchvision/prototype/transforms/__init__.py b/torchvision/prototype/transforms/__init__.py\n--- a/torchvision/prototype/transforms/__init__.py\n+++ b/torchvision/prototype/transforms/__init__.py\n@@ -37,7 +37,7 @@\n TenCrop,\n )\n from ._meta import ConvertBoundingBoxFormat, ConvertColorSpace, ConvertImageDtype\n-from ._misc import GaussianBlur, Identity, Lambda, Normalize, ToDtype\n+from ._misc import GaussianBlur, Identity, Lambda, LinearTransformation, Normalize, ToDtype\n from ._type_conversion import DecodeImage, LabelToOneHot, ToImagePIL, ToImageTensor\n \n from ._deprecated import Grayscale, RandomGrayscale, ToTensor, ToPILImage, PILToTensor # usort: skip\ndiff --git a/torchvision/prototype/transforms/_misc.py b/torchvision/prototype/transforms/_misc.py\n--- a/torchvision/prototype/transforms/_misc.py\n+++ b/torchvision/prototype/transforms/_misc.py\n@@ -1,7 +1,10 @@\n import functools\n from typing import Any, Callable, Dict, List, Sequence, Type, Union\n \n+import PIL.Image\n+\n import torch\n+from torchvision.prototype import features\n from torchvision.prototype.transforms import functional as F, Transform\n from torchvision.transforms.transforms import _setup_size\n \n@@ -32,6 +35,59 @@\n return \", \".join(extras)\n \n \n+class LinearTransformation(Transform):\n+ def __init__(self, transformation_matrix: torch.Tensor, mean_vector: torch.Tensor):\n+ super().__init__()\n+ if transformation_matrix.size(0) != transformation_matrix.size(1):\n+ raise ValueError(\n+ \"transformation_matrix should be square. Got \"\n+ f\"{tuple(transformation_matrix.size())} rectangular matrix.\"\n+ )\n+\n+ if mean_vector.size(0) != transformation_matrix.size(0):\n+ raise ValueError(\n+ f\"mean_vector should have the same length {mean_vector.size(0)}\"\n+ f\" as any one of the dimensions of the transformation_matrix [{tuple(transformation_matrix.size())}]\"\n+ )\n+\n+ if transformation_matrix.device != mean_vector.device:\n+ raise ValueError(\n+ f\"Input tensors should be on the same device. Got {transformation_matrix.device} and {mean_vector.device}\"\n+ )\n+\n+ self.transformation_matrix = transformation_matrix\n+ self.mean_vector = mean_vector\n+\n+ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n+\n+ if isinstance(inpt, features._Feature) and not isinstance(inpt, features.Image):\n+ return inpt\n+ elif isinstance(inpt, PIL.Image.Image):\n+ raise TypeError(\"Unsupported input type\")\n+\n+ # Image instance after linear transformation is not Image anymore due to unknown data range\n+ # Thus we will return Tensor for input Image\n+\n+ shape = inpt.shape\n+ n = shape[-3] * shape[-2] * shape[-1]\n+ if n != self.transformation_matrix.shape[0]:\n+ raise ValueError(\n+ \"Input tensor and transformation matrix have incompatible shape.\"\n+ + f\"[{shape[-3]} x {shape[-2]} x {shape[-1]}] != \"\n+ + f\"{self.transformation_matrix.shape[0]}\"\n+ )\n+\n+ if inpt.device.type != self.mean_vector.device.type:\n+ raise ValueError(\n+ \"Input tensor should be on the same device as transformation matrix and mean vector. \"\n+ f\"Got {inpt.device} vs {self.mean_vector.device}\"\n+ )\n+\n+ flat_tensor = inpt.view(-1, n) - self.mean_vector\n+ transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)\n+ return transformed_tensor.view(shape)\n+\n+\n class Normalize(Transform):\n def __init__(self, mean: List[float], std: List[float]):\n super().__init__()\n", "issue": "Port `transforms.LinearTransformation` to `prototype.transforms`\ncc @vfdev-5 @datumbox @bjuncek\n", "before_files": [{"content": "import functools\nfrom typing import Any, Callable, Dict, List, Sequence, Type, Union\n\nimport torch\nfrom torchvision.prototype.transforms import functional as F, Transform\nfrom torchvision.transforms.transforms import _setup_size\n\n\nclass Identity(Transform):\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return inpt\n\n\nclass Lambda(Transform):\n def __init__(self, fn: Callable[[Any], Any], *types: Type):\n super().__init__()\n self.fn = fn\n self.types = types\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n if type(inpt) in self.types:\n return self.fn(inpt)\n else:\n return inpt\n\n def extra_repr(self) -> str:\n extras = []\n name = getattr(self.fn, \"__name__\", None)\n if name:\n extras.append(name)\n extras.append(f\"types={[type.__name__ for type in self.types]}\")\n return \", \".join(extras)\n\n\nclass Normalize(Transform):\n def __init__(self, mean: List[float], std: List[float]):\n super().__init__()\n self.mean = mean\n self.std = std\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return F.normalize(inpt, mean=self.mean, std=self.std)\n\n\nclass GaussianBlur(Transform):\n def __init__(\n self, kernel_size: Union[int, Sequence[int]], sigma: Union[float, Sequence[float]] = (0.1, 2.0)\n ) -> None:\n super().__init__()\n self.kernel_size = _setup_size(kernel_size, \"Kernel size should be a tuple/list of two integers\")\n for ks in self.kernel_size:\n if ks <= 0 or ks % 2 == 0:\n raise ValueError(\"Kernel size value should be an odd and positive number.\")\n\n if isinstance(sigma, float):\n if sigma <= 0:\n raise ValueError(\"If sigma is a single number, it must be positive.\")\n sigma = (sigma, sigma)\n elif isinstance(sigma, Sequence) and len(sigma) == 2:\n if not 0.0 < sigma[0] <= sigma[1]:\n raise ValueError(\"sigma values should be positive and of the form (min, max).\")\n else:\n raise TypeError(\"sigma should be a single float or a list/tuple with length 2 floats.\")\n\n self.sigma = sigma\n\n def _get_params(self, sample: Any) -> Dict[str, Any]:\n sigma = torch.empty(1).uniform_(self.sigma[0], self.sigma[1]).item()\n return dict(sigma=[sigma, sigma])\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return F.gaussian_blur(inpt, **params)\n\n\nclass ToDtype(Lambda):\n def __init__(self, dtype: torch.dtype, *types: Type) -> None:\n self.dtype = dtype\n super().__init__(functools.partial(torch.Tensor.to, dtype=dtype), *types)\n\n def extra_repr(self) -> str:\n return \", \".join([f\"dtype={self.dtype}\", f\"types={[type.__name__ for type in self.types]}\"])\n", "path": "torchvision/prototype/transforms/_misc.py"}, {"content": "from . import functional # usort: skip\n\nfrom ._transform import Transform # usort: skip\n\nfrom ._augment import RandomCutmix, RandomErasing, RandomMixup\nfrom ._auto_augment import AugMix, AutoAugment, AutoAugmentPolicy, RandAugment, TrivialAugmentWide\nfrom ._color import (\n ColorJitter,\n RandomAdjustSharpness,\n RandomAutocontrast,\n RandomEqualize,\n RandomInvert,\n RandomPhotometricDistort,\n RandomPosterize,\n RandomSolarize,\n)\nfrom ._container import Compose, RandomApply, RandomChoice, RandomOrder\nfrom ._geometry import (\n BatchMultiCrop,\n CenterCrop,\n ElasticTransform,\n FiveCrop,\n FixedSizeCrop,\n Pad,\n RandomAffine,\n RandomCrop,\n RandomHorizontalFlip,\n RandomIoUCrop,\n RandomPerspective,\n RandomResizedCrop,\n RandomRotation,\n RandomShortestSize,\n RandomVerticalFlip,\n RandomZoomOut,\n Resize,\n ScaleJitter,\n TenCrop,\n)\nfrom ._meta import ConvertBoundingBoxFormat, ConvertColorSpace, ConvertImageDtype\nfrom ._misc import GaussianBlur, Identity, Lambda, Normalize, ToDtype\nfrom ._type_conversion import DecodeImage, LabelToOneHot, ToImagePIL, ToImageTensor\n\nfrom ._deprecated import Grayscale, RandomGrayscale, ToTensor, ToPILImage, PILToTensor # usort: skip\n", "path": "torchvision/prototype/transforms/__init__.py"}], "after_files": [{"content": "import functools\nfrom typing import Any, Callable, Dict, List, Sequence, Type, Union\n\nimport PIL.Image\n\nimport torch\nfrom torchvision.prototype import features\nfrom torchvision.prototype.transforms import functional as F, Transform\nfrom torchvision.transforms.transforms import _setup_size\n\n\nclass Identity(Transform):\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return inpt\n\n\nclass Lambda(Transform):\n def __init__(self, fn: Callable[[Any], Any], *types: Type):\n super().__init__()\n self.fn = fn\n self.types = types\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n if type(inpt) in self.types:\n return self.fn(inpt)\n else:\n return inpt\n\n def extra_repr(self) -> str:\n extras = []\n name = getattr(self.fn, \"__name__\", None)\n if name:\n extras.append(name)\n extras.append(f\"types={[type.__name__ for type in self.types]}\")\n return \", \".join(extras)\n\n\nclass LinearTransformation(Transform):\n def __init__(self, transformation_matrix: torch.Tensor, mean_vector: torch.Tensor):\n super().__init__()\n if transformation_matrix.size(0) != transformation_matrix.size(1):\n raise ValueError(\n \"transformation_matrix should be square. Got \"\n f\"{tuple(transformation_matrix.size())} rectangular matrix.\"\n )\n\n if mean_vector.size(0) != transformation_matrix.size(0):\n raise ValueError(\n f\"mean_vector should have the same length {mean_vector.size(0)}\"\n f\" as any one of the dimensions of the transformation_matrix [{tuple(transformation_matrix.size())}]\"\n )\n\n if transformation_matrix.device != mean_vector.device:\n raise ValueError(\n f\"Input tensors should be on the same device. Got {transformation_matrix.device} and {mean_vector.device}\"\n )\n\n self.transformation_matrix = transformation_matrix\n self.mean_vector = mean_vector\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n\n if isinstance(inpt, features._Feature) and not isinstance(inpt, features.Image):\n return inpt\n elif isinstance(inpt, PIL.Image.Image):\n raise TypeError(\"Unsupported input type\")\n\n # Image instance after linear transformation is not Image anymore due to unknown data range\n # Thus we will return Tensor for input Image\n\n shape = inpt.shape\n n = shape[-3] * shape[-2] * shape[-1]\n if n != self.transformation_matrix.shape[0]:\n raise ValueError(\n \"Input tensor and transformation matrix have incompatible shape.\"\n + f\"[{shape[-3]} x {shape[-2]} x {shape[-1]}] != \"\n + f\"{self.transformation_matrix.shape[0]}\"\n )\n\n if inpt.device.type != self.mean_vector.device.type:\n raise ValueError(\n \"Input tensor should be on the same device as transformation matrix and mean vector. \"\n f\"Got {inpt.device} vs {self.mean_vector.device}\"\n )\n\n flat_tensor = inpt.view(-1, n) - self.mean_vector\n transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)\n return transformed_tensor.view(shape)\n\n\nclass Normalize(Transform):\n def __init__(self, mean: List[float], std: List[float]):\n super().__init__()\n self.mean = mean\n self.std = std\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return F.normalize(inpt, mean=self.mean, std=self.std)\n\n\nclass GaussianBlur(Transform):\n def __init__(\n self, kernel_size: Union[int, Sequence[int]], sigma: Union[float, Sequence[float]] = (0.1, 2.0)\n ) -> None:\n super().__init__()\n self.kernel_size = _setup_size(kernel_size, \"Kernel size should be a tuple/list of two integers\")\n for ks in self.kernel_size:\n if ks <= 0 or ks % 2 == 0:\n raise ValueError(\"Kernel size value should be an odd and positive number.\")\n\n if isinstance(sigma, float):\n if sigma <= 0:\n raise ValueError(\"If sigma is a single number, it must be positive.\")\n sigma = (sigma, sigma)\n elif isinstance(sigma, Sequence) and len(sigma) == 2:\n if not 0.0 < sigma[0] <= sigma[1]:\n raise ValueError(\"sigma values should be positive and of the form (min, max).\")\n else:\n raise TypeError(\"sigma should be a single float or a list/tuple with length 2 floats.\")\n\n self.sigma = sigma\n\n def _get_params(self, sample: Any) -> Dict[str, Any]:\n sigma = torch.empty(1).uniform_(self.sigma[0], self.sigma[1]).item()\n return dict(sigma=[sigma, sigma])\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return F.gaussian_blur(inpt, **params)\n\n\nclass ToDtype(Lambda):\n def __init__(self, dtype: torch.dtype, *types: Type) -> None:\n self.dtype = dtype\n super().__init__(functools.partial(torch.Tensor.to, dtype=dtype), *types)\n\n def extra_repr(self) -> str:\n return \", \".join([f\"dtype={self.dtype}\", f\"types={[type.__name__ for type in self.types]}\"])\n", "path": "torchvision/prototype/transforms/_misc.py"}, {"content": "from . import functional # usort: skip\n\nfrom ._transform import Transform # usort: skip\n\nfrom ._augment import RandomCutmix, RandomErasing, RandomMixup\nfrom ._auto_augment import AugMix, AutoAugment, AutoAugmentPolicy, RandAugment, TrivialAugmentWide\nfrom ._color import (\n ColorJitter,\n RandomAdjustSharpness,\n RandomAutocontrast,\n RandomEqualize,\n RandomInvert,\n RandomPhotometricDistort,\n RandomPosterize,\n RandomSolarize,\n)\nfrom ._container import Compose, RandomApply, RandomChoice, RandomOrder\nfrom ._geometry import (\n BatchMultiCrop,\n CenterCrop,\n ElasticTransform,\n FiveCrop,\n FixedSizeCrop,\n Pad,\n RandomAffine,\n RandomCrop,\n RandomHorizontalFlip,\n RandomIoUCrop,\n RandomPerspective,\n RandomResizedCrop,\n RandomRotation,\n RandomShortestSize,\n RandomVerticalFlip,\n RandomZoomOut,\n Resize,\n ScaleJitter,\n TenCrop,\n)\nfrom ._meta import ConvertBoundingBoxFormat, ConvertColorSpace, ConvertImageDtype\nfrom ._misc import GaussianBlur, Identity, Lambda, LinearTransformation, Normalize, ToDtype\nfrom ._type_conversion import DecodeImage, LabelToOneHot, ToImagePIL, ToImageTensor\n\nfrom ._deprecated import Grayscale, RandomGrayscale, ToTensor, ToPILImage, PILToTensor # usort: skip\n", "path": "torchvision/prototype/transforms/__init__.py"}]} | 1,574 | 862 |
gh_patches_debug_3152 | rasdani/github-patches | git_diff | ckan__ckan-4084 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Specify max resource size in an env variable
### CKAN Version if known (or site URL)
2.7.2 (applies to head of master as well)
### Please describe the expected behaviour
When deploying CKAN through docker it is not ideal to have to edit the configuration file in order to set maximum resource upload size. This would be better done through environment variables.
### Please describe the actual behaviour
Currently the ckan.max_upload_size config can only changed via editing the config .ini files.
### What steps can be taken to reproduce the issue?
Running CKAN through Docker via Docker Compose.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckan/config/environment.py`
Content:
```
1 # encoding: utf-8
2
3 '''CKAN environment configuration'''
4 import os
5 import logging
6 import warnings
7 from urlparse import urlparse
8 import pytz
9
10 import sqlalchemy
11 from pylons import config as pylons_config
12 import formencode
13
14 import ckan.config.routing as routing
15 import ckan.model as model
16 import ckan.plugins as p
17 import ckan.lib.helpers as helpers
18 import ckan.lib.app_globals as app_globals
19 from ckan.lib.redis import is_redis_available
20 import ckan.lib.render as render
21 import ckan.lib.search as search
22 import ckan.logic as logic
23 import ckan.authz as authz
24 import ckan.lib.jinja_extensions as jinja_extensions
25 from ckan.lib.i18n import build_js_translations
26
27 from ckan.common import _, ungettext, config
28 from ckan.exceptions import CkanConfigurationException
29
30 log = logging.getLogger(__name__)
31
32
33 # Suppress benign warning 'Unbuilt egg for setuptools'
34 warnings.simplefilter('ignore', UserWarning)
35
36
37 def load_environment(global_conf, app_conf):
38 """
39 Configure the Pylons environment via the ``pylons.config`` object. This
40 code should only need to be run once.
41 """
42 # this must be run at a time when the env is semi-setup, thus inlined here.
43 # Required by the deliverance plugin and iATI
44 from pylons.wsgiapp import PylonsApp
45 import pkg_resources
46 find_controller_generic = PylonsApp.find_controller
47
48 # This is from pylons 1.0 source, will monkey-patch into 0.9.7
49 def find_controller(self, controller):
50 if controller in self.controller_classes:
51 return self.controller_classes[controller]
52 # Check to see if its a dotted name
53 if '.' in controller or ':' in controller:
54 ep = pkg_resources.EntryPoint.parse('x={0}'.format(controller))
55
56 if hasattr(ep, 'resolve'):
57 # setuptools >= 10.2
58 mycontroller = ep.resolve()
59 else:
60 # setuptools >= 11.3
61 mycontroller = ep.load(False)
62
63 self.controller_classes[controller] = mycontroller
64 return mycontroller
65 return find_controller_generic(self, controller)
66 PylonsApp.find_controller = find_controller
67
68 os.environ['CKAN_CONFIG'] = global_conf['__file__']
69
70 # Pylons paths
71 root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
72
73 valid_base_public_folder_names = ['public', 'public-bs2']
74 static_files = app_conf.get('ckan.base_public_folder', 'public')
75 app_conf['ckan.base_public_folder'] = static_files
76
77 if static_files not in valid_base_public_folder_names:
78 raise CkanConfigurationException(
79 'You provided an invalid value for ckan.base_public_folder. '
80 'Possible values are: "public" and "public-bs2".'
81 )
82
83 log.info('Loading static files from %s' % static_files)
84 paths = dict(root=root,
85 controllers=os.path.join(root, 'controllers'),
86 static_files=os.path.join(root, static_files),
87 templates=[])
88
89 # Initialize main CKAN config object
90 config.update(global_conf)
91 config.update(app_conf)
92
93 # Initialize Pylons own config object
94 pylons_config.init_app(global_conf, app_conf, package='ckan', paths=paths)
95
96 # Update the main CKAN config object with the Pylons specific stuff, as it
97 # quite hard to keep them separated. This should be removed once Pylons
98 # support is dropped
99 config.update(pylons_config)
100
101 # Setup the SQLAlchemy database engine
102 # Suppress a couple of sqlalchemy warnings
103 msgs = ['^Unicode type received non-unicode bind param value',
104 "^Did not recognize type 'BIGINT' of column 'size'",
105 "^Did not recognize type 'tsvector' of column 'search_vector'"
106 ]
107 for msg in msgs:
108 warnings.filterwarnings('ignore', msg, sqlalchemy.exc.SAWarning)
109
110 # load all CKAN plugins
111 p.load_all()
112
113 # Check Redis availability
114 if not is_redis_available():
115 log.critical('Could not connect to Redis.')
116
117 app_globals.reset()
118
119 # issue #3260: remove idle transaction
120 # Session that was used for getting all config params nor committed,
121 # neither removed and we have idle connection as result
122 model.Session.commit()
123
124 # Build JavaScript translations. Must be done after plugins have
125 # been loaded.
126 build_js_translations()
127
128
129 # A mapping of config settings that can be overridden by env vars.
130 # Note: Do not remove the following lines, they are used in the docs
131 # Start CONFIG_FROM_ENV_VARS
132 CONFIG_FROM_ENV_VARS = {
133 'sqlalchemy.url': 'CKAN_SQLALCHEMY_URL',
134 'ckan.datastore.write_url': 'CKAN_DATASTORE_WRITE_URL',
135 'ckan.datastore.read_url': 'CKAN_DATASTORE_READ_URL',
136 'ckan.redis.url': 'CKAN_REDIS_URL',
137 'solr_url': 'CKAN_SOLR_URL',
138 'ckan.site_id': 'CKAN_SITE_ID',
139 'ckan.site_url': 'CKAN_SITE_URL',
140 'ckan.storage_path': 'CKAN_STORAGE_PATH',
141 'ckan.datapusher.url': 'CKAN_DATAPUSHER_URL',
142 'smtp.server': 'CKAN_SMTP_SERVER',
143 'smtp.starttls': 'CKAN_SMTP_STARTTLS',
144 'smtp.user': 'CKAN_SMTP_USER',
145 'smtp.password': 'CKAN_SMTP_PASSWORD',
146 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM'
147 }
148 # End CONFIG_FROM_ENV_VARS
149
150
151 def update_config():
152 ''' This code needs to be run when the config is changed to take those
153 changes into account. It is called whenever a plugin is loaded as the
154 plugin might have changed the config values (for instance it might
155 change ckan.site_url) '''
156
157 for plugin in p.PluginImplementations(p.IConfigurer):
158 # must do update in place as this does not work:
159 # config = plugin.update_config(config)
160 plugin.update_config(config)
161
162 # Set whitelisted env vars on config object
163 # This is set up before globals are initialized
164
165 ckan_db = os.environ.get('CKAN_DB', None)
166 if ckan_db:
167 msg = 'Setting CKAN_DB as an env var is deprecated and will be' \
168 ' removed in a future release. Use CKAN_SQLALCHEMY_URL instead.'
169 log.warn(msg)
170 config['sqlalchemy.url'] = ckan_db
171
172 for option in CONFIG_FROM_ENV_VARS:
173 from_env = os.environ.get(CONFIG_FROM_ENV_VARS[option], None)
174 if from_env:
175 config[option] = from_env
176
177 root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
178
179 site_url = config.get('ckan.site_url', '')
180 if not site_url:
181 raise RuntimeError(
182 'ckan.site_url is not configured and it must have a value.'
183 ' Please amend your .ini file.')
184 if not site_url.lower().startswith('http'):
185 raise RuntimeError(
186 'ckan.site_url should be a full URL, including the schema '
187 '(http or https)')
188
189 display_timezone = config.get('ckan.display_timezone', '')
190 if (display_timezone and
191 display_timezone != 'server' and
192 display_timezone not in pytz.all_timezones):
193 raise CkanConfigurationException(
194 "ckan.display_timezone is not 'server' or a valid timezone"
195 )
196
197 # Remove backslash from site_url if present
198 config['ckan.site_url'] = config['ckan.site_url'].rstrip('/')
199
200 ckan_host = config['ckan.host'] = urlparse(site_url).netloc
201 if config.get('ckan.site_id') is None:
202 if ':' in ckan_host:
203 ckan_host, port = ckan_host.split(':')
204 assert ckan_host, 'You need to configure ckan.site_url or ' \
205 'ckan.site_id for SOLR search-index rebuild to work.'
206 config['ckan.site_id'] = ckan_host
207
208 # ensure that a favicon has been set
209 favicon = config.get('ckan.favicon', '/base/images/ckan.ico')
210 config['ckan.favicon'] = favicon
211
212 # Init SOLR settings and check if the schema is compatible
213 # from ckan.lib.search import SolrSettings, check_solr_schema_version
214
215 # lib.search is imported here as we need the config enabled and parsed
216 search.SolrSettings.init(config.get('solr_url'),
217 config.get('solr_user'),
218 config.get('solr_password'))
219 search.check_solr_schema_version()
220
221 routes_map = routing.make_map()
222 config['routes.map'] = routes_map
223 # The RoutesMiddleware needs its mapper updating if it exists
224 if 'routes.middleware' in config:
225 config['routes.middleware'].mapper = routes_map
226 # routes.named_routes is a CKAN thing
227 config['routes.named_routes'] = routing.named_routes
228 config['pylons.app_globals'] = app_globals.app_globals
229 # initialise the globals
230 app_globals.app_globals._init()
231
232 helpers.load_plugin_helpers()
233 config['pylons.h'] = helpers.helper_functions
234
235 # Templates and CSS loading from configuration
236 valid_base_templates_folder_names = ['templates', 'templates-bs2']
237 templates = config.get('ckan.base_templates_folder', 'templates')
238 config['ckan.base_templates_folder'] = templates
239
240 if templates not in valid_base_templates_folder_names:
241 raise CkanConfigurationException(
242 'You provided an invalid value for ckan.base_templates_folder. '
243 'Possible values are: "templates" and "templates-bs2".'
244 )
245
246 jinja2_templates_path = os.path.join(root, templates)
247 log.info('Loading templates from %s' % jinja2_templates_path)
248 template_paths = [jinja2_templates_path]
249
250 extra_template_paths = config.get('extra_template_paths', '')
251 if extra_template_paths:
252 # must be first for them to override defaults
253 template_paths = extra_template_paths.split(',') + template_paths
254 config['computed_template_paths'] = template_paths
255
256 # Set the default language for validation messages from formencode
257 # to what is set as the default locale in the config
258 default_lang = config.get('ckan.locale_default', 'en')
259 formencode.api.set_stdtranslation(domain="FormEncode",
260 languages=[default_lang])
261
262 # Markdown ignores the logger config, so to get rid of excessive
263 # markdown debug messages in the log, set it to the level of the
264 # root logger.
265 logging.getLogger("MARKDOWN").setLevel(logging.getLogger().level)
266
267 # Create Jinja2 environment
268 env = jinja_extensions.Environment(
269 **jinja_extensions.get_jinja_env_options())
270 env.install_gettext_callables(_, ungettext, newstyle=True)
271 # custom filters
272 env.filters['empty_and_escape'] = jinja_extensions.empty_and_escape
273 config['pylons.app_globals'].jinja_env = env
274
275 # CONFIGURATION OPTIONS HERE (note: all config options will override
276 # any Pylons config options)
277
278 # Initialize SQLAlchemy
279 engine = sqlalchemy.engine_from_config(config, client_encoding='utf8')
280 model.init_model(engine)
281
282 for plugin in p.PluginImplementations(p.IConfigurable):
283 plugin.configure(config)
284
285 # reset the template cache - we do this here so that when we load the
286 # environment it is clean
287 render.reset_template_info_cache()
288
289 # clear other caches
290 logic.clear_actions_cache()
291 logic.clear_validators_cache()
292 authz.clear_auth_functions_cache()
293
294 # Here we create the site user if they are not already in the database
295 try:
296 logic.get_action('get_site_user')({'ignore_auth': True}, None)
297 except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):
298 # (ProgrammingError for Postgres, OperationalError for SQLite)
299 # The database is not initialised. This is a bit dirty. This occurs
300 # when running tests.
301 pass
302 except sqlalchemy.exc.InternalError:
303 # The database is not initialised. Travis hits this
304 pass
305
306 # Close current session and open database connections to ensure a clean
307 # clean environment even if an error occurs later on
308 model.Session.remove()
309 model.Session.bind.dispose()
310
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckan/config/environment.py b/ckan/config/environment.py
--- a/ckan/config/environment.py
+++ b/ckan/config/environment.py
@@ -143,7 +143,8 @@
'smtp.starttls': 'CKAN_SMTP_STARTTLS',
'smtp.user': 'CKAN_SMTP_USER',
'smtp.password': 'CKAN_SMTP_PASSWORD',
- 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM'
+ 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM',
+ 'ckan.max_resource_size': 'CKAN_MAX_UPLOAD_SIZE_MB'
}
# End CONFIG_FROM_ENV_VARS
| {"golden_diff": "diff --git a/ckan/config/environment.py b/ckan/config/environment.py\n--- a/ckan/config/environment.py\n+++ b/ckan/config/environment.py\n@@ -143,7 +143,8 @@\n 'smtp.starttls': 'CKAN_SMTP_STARTTLS',\n 'smtp.user': 'CKAN_SMTP_USER',\n 'smtp.password': 'CKAN_SMTP_PASSWORD',\n- 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM'\n+ 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM',\n+ 'ckan.max_resource_size': 'CKAN_MAX_UPLOAD_SIZE_MB'\n }\n # End CONFIG_FROM_ENV_VARS\n", "issue": "Specify max resource size in an env variable\n### CKAN Version if known (or site URL)\r\n2.7.2 (applies to head of master as well)\r\n\r\n### Please describe the expected behaviour\r\nWhen deploying CKAN through docker it is not ideal to have to edit the configuration file in order to set maximum resource upload size. This would be better done through environment variables.\r\n\r\n### Please describe the actual behaviour\r\nCurrently the ckan.max_upload_size config can only changed via editing the config .ini files.\r\n\r\n### What steps can be taken to reproduce the issue? \r\nRunning CKAN through Docker via Docker Compose.\n", "before_files": [{"content": "# encoding: utf-8\n\n'''CKAN environment configuration'''\nimport os\nimport logging\nimport warnings\nfrom urlparse import urlparse\nimport pytz\n\nimport sqlalchemy\nfrom pylons import config as pylons_config\nimport formencode\n\nimport ckan.config.routing as routing\nimport ckan.model as model\nimport ckan.plugins as p\nimport ckan.lib.helpers as helpers\nimport ckan.lib.app_globals as app_globals\nfrom ckan.lib.redis import is_redis_available\nimport ckan.lib.render as render\nimport ckan.lib.search as search\nimport ckan.logic as logic\nimport ckan.authz as authz\nimport ckan.lib.jinja_extensions as jinja_extensions\nfrom ckan.lib.i18n import build_js_translations\n\nfrom ckan.common import _, ungettext, config\nfrom ckan.exceptions import CkanConfigurationException\n\nlog = logging.getLogger(__name__)\n\n\n# Suppress benign warning 'Unbuilt egg for setuptools'\nwarnings.simplefilter('ignore', UserWarning)\n\n\ndef load_environment(global_conf, app_conf):\n \"\"\"\n Configure the Pylons environment via the ``pylons.config`` object. This\n code should only need to be run once.\n \"\"\"\n # this must be run at a time when the env is semi-setup, thus inlined here.\n # Required by the deliverance plugin and iATI\n from pylons.wsgiapp import PylonsApp\n import pkg_resources\n find_controller_generic = PylonsApp.find_controller\n\n # This is from pylons 1.0 source, will monkey-patch into 0.9.7\n def find_controller(self, controller):\n if controller in self.controller_classes:\n return self.controller_classes[controller]\n # Check to see if its a dotted name\n if '.' in controller or ':' in controller:\n ep = pkg_resources.EntryPoint.parse('x={0}'.format(controller))\n\n if hasattr(ep, 'resolve'):\n # setuptools >= 10.2\n mycontroller = ep.resolve()\n else:\n # setuptools >= 11.3\n mycontroller = ep.load(False)\n\n self.controller_classes[controller] = mycontroller\n return mycontroller\n return find_controller_generic(self, controller)\n PylonsApp.find_controller = find_controller\n\n os.environ['CKAN_CONFIG'] = global_conf['__file__']\n\n # Pylons paths\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n valid_base_public_folder_names = ['public', 'public-bs2']\n static_files = app_conf.get('ckan.base_public_folder', 'public')\n app_conf['ckan.base_public_folder'] = static_files\n\n if static_files not in valid_base_public_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_public_folder. '\n 'Possible values are: \"public\" and \"public-bs2\".'\n )\n\n log.info('Loading static files from %s' % static_files)\n paths = dict(root=root,\n controllers=os.path.join(root, 'controllers'),\n static_files=os.path.join(root, static_files),\n templates=[])\n\n # Initialize main CKAN config object\n config.update(global_conf)\n config.update(app_conf)\n\n # Initialize Pylons own config object\n pylons_config.init_app(global_conf, app_conf, package='ckan', paths=paths)\n\n # Update the main CKAN config object with the Pylons specific stuff, as it\n # quite hard to keep them separated. This should be removed once Pylons\n # support is dropped\n config.update(pylons_config)\n\n # Setup the SQLAlchemy database engine\n # Suppress a couple of sqlalchemy warnings\n msgs = ['^Unicode type received non-unicode bind param value',\n \"^Did not recognize type 'BIGINT' of column 'size'\",\n \"^Did not recognize type 'tsvector' of column 'search_vector'\"\n ]\n for msg in msgs:\n warnings.filterwarnings('ignore', msg, sqlalchemy.exc.SAWarning)\n\n # load all CKAN plugins\n p.load_all()\n\n # Check Redis availability\n if not is_redis_available():\n log.critical('Could not connect to Redis.')\n\n app_globals.reset()\n\n # issue #3260: remove idle transaction\n # Session that was used for getting all config params nor committed,\n # neither removed and we have idle connection as result\n model.Session.commit()\n\n # Build JavaScript translations. Must be done after plugins have\n # been loaded.\n build_js_translations()\n\n\n# A mapping of config settings that can be overridden by env vars.\n# Note: Do not remove the following lines, they are used in the docs\n# Start CONFIG_FROM_ENV_VARS\nCONFIG_FROM_ENV_VARS = {\n 'sqlalchemy.url': 'CKAN_SQLALCHEMY_URL',\n 'ckan.datastore.write_url': 'CKAN_DATASTORE_WRITE_URL',\n 'ckan.datastore.read_url': 'CKAN_DATASTORE_READ_URL',\n 'ckan.redis.url': 'CKAN_REDIS_URL',\n 'solr_url': 'CKAN_SOLR_URL',\n 'ckan.site_id': 'CKAN_SITE_ID',\n 'ckan.site_url': 'CKAN_SITE_URL',\n 'ckan.storage_path': 'CKAN_STORAGE_PATH',\n 'ckan.datapusher.url': 'CKAN_DATAPUSHER_URL',\n 'smtp.server': 'CKAN_SMTP_SERVER',\n 'smtp.starttls': 'CKAN_SMTP_STARTTLS',\n 'smtp.user': 'CKAN_SMTP_USER',\n 'smtp.password': 'CKAN_SMTP_PASSWORD',\n 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM'\n}\n# End CONFIG_FROM_ENV_VARS\n\n\ndef update_config():\n ''' This code needs to be run when the config is changed to take those\n changes into account. It is called whenever a plugin is loaded as the\n plugin might have changed the config values (for instance it might\n change ckan.site_url) '''\n\n for plugin in p.PluginImplementations(p.IConfigurer):\n # must do update in place as this does not work:\n # config = plugin.update_config(config)\n plugin.update_config(config)\n\n # Set whitelisted env vars on config object\n # This is set up before globals are initialized\n\n ckan_db = os.environ.get('CKAN_DB', None)\n if ckan_db:\n msg = 'Setting CKAN_DB as an env var is deprecated and will be' \\\n ' removed in a future release. Use CKAN_SQLALCHEMY_URL instead.'\n log.warn(msg)\n config['sqlalchemy.url'] = ckan_db\n\n for option in CONFIG_FROM_ENV_VARS:\n from_env = os.environ.get(CONFIG_FROM_ENV_VARS[option], None)\n if from_env:\n config[option] = from_env\n\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n site_url = config.get('ckan.site_url', '')\n if not site_url:\n raise RuntimeError(\n 'ckan.site_url is not configured and it must have a value.'\n ' Please amend your .ini file.')\n if not site_url.lower().startswith('http'):\n raise RuntimeError(\n 'ckan.site_url should be a full URL, including the schema '\n '(http or https)')\n\n display_timezone = config.get('ckan.display_timezone', '')\n if (display_timezone and\n display_timezone != 'server' and\n display_timezone not in pytz.all_timezones):\n raise CkanConfigurationException(\n \"ckan.display_timezone is not 'server' or a valid timezone\"\n )\n\n # Remove backslash from site_url if present\n config['ckan.site_url'] = config['ckan.site_url'].rstrip('/')\n\n ckan_host = config['ckan.host'] = urlparse(site_url).netloc\n if config.get('ckan.site_id') is None:\n if ':' in ckan_host:\n ckan_host, port = ckan_host.split(':')\n assert ckan_host, 'You need to configure ckan.site_url or ' \\\n 'ckan.site_id for SOLR search-index rebuild to work.'\n config['ckan.site_id'] = ckan_host\n\n # ensure that a favicon has been set\n favicon = config.get('ckan.favicon', '/base/images/ckan.ico')\n config['ckan.favicon'] = favicon\n\n # Init SOLR settings and check if the schema is compatible\n # from ckan.lib.search import SolrSettings, check_solr_schema_version\n\n # lib.search is imported here as we need the config enabled and parsed\n search.SolrSettings.init(config.get('solr_url'),\n config.get('solr_user'),\n config.get('solr_password'))\n search.check_solr_schema_version()\n\n routes_map = routing.make_map()\n config['routes.map'] = routes_map\n # The RoutesMiddleware needs its mapper updating if it exists\n if 'routes.middleware' in config:\n config['routes.middleware'].mapper = routes_map\n # routes.named_routes is a CKAN thing\n config['routes.named_routes'] = routing.named_routes\n config['pylons.app_globals'] = app_globals.app_globals\n # initialise the globals\n app_globals.app_globals._init()\n\n helpers.load_plugin_helpers()\n config['pylons.h'] = helpers.helper_functions\n\n # Templates and CSS loading from configuration\n valid_base_templates_folder_names = ['templates', 'templates-bs2']\n templates = config.get('ckan.base_templates_folder', 'templates')\n config['ckan.base_templates_folder'] = templates\n\n if templates not in valid_base_templates_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_templates_folder. '\n 'Possible values are: \"templates\" and \"templates-bs2\".'\n )\n\n jinja2_templates_path = os.path.join(root, templates)\n log.info('Loading templates from %s' % jinja2_templates_path)\n template_paths = [jinja2_templates_path]\n\n extra_template_paths = config.get('extra_template_paths', '')\n if extra_template_paths:\n # must be first for them to override defaults\n template_paths = extra_template_paths.split(',') + template_paths\n config['computed_template_paths'] = template_paths\n\n # Set the default language for validation messages from formencode\n # to what is set as the default locale in the config\n default_lang = config.get('ckan.locale_default', 'en')\n formencode.api.set_stdtranslation(domain=\"FormEncode\",\n languages=[default_lang])\n\n # Markdown ignores the logger config, so to get rid of excessive\n # markdown debug messages in the log, set it to the level of the\n # root logger.\n logging.getLogger(\"MARKDOWN\").setLevel(logging.getLogger().level)\n\n # Create Jinja2 environment\n env = jinja_extensions.Environment(\n **jinja_extensions.get_jinja_env_options())\n env.install_gettext_callables(_, ungettext, newstyle=True)\n # custom filters\n env.filters['empty_and_escape'] = jinja_extensions.empty_and_escape\n config['pylons.app_globals'].jinja_env = env\n\n # CONFIGURATION OPTIONS HERE (note: all config options will override\n # any Pylons config options)\n\n # Initialize SQLAlchemy\n engine = sqlalchemy.engine_from_config(config, client_encoding='utf8')\n model.init_model(engine)\n\n for plugin in p.PluginImplementations(p.IConfigurable):\n plugin.configure(config)\n\n # reset the template cache - we do this here so that when we load the\n # environment it is clean\n render.reset_template_info_cache()\n\n # clear other caches\n logic.clear_actions_cache()\n logic.clear_validators_cache()\n authz.clear_auth_functions_cache()\n\n # Here we create the site user if they are not already in the database\n try:\n logic.get_action('get_site_user')({'ignore_auth': True}, None)\n except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):\n # (ProgrammingError for Postgres, OperationalError for SQLite)\n # The database is not initialised. This is a bit dirty. This occurs\n # when running tests.\n pass\n except sqlalchemy.exc.InternalError:\n # The database is not initialised. Travis hits this\n pass\n\n # Close current session and open database connections to ensure a clean\n # clean environment even if an error occurs later on\n model.Session.remove()\n model.Session.bind.dispose()\n", "path": "ckan/config/environment.py"}], "after_files": [{"content": "# encoding: utf-8\n\n'''CKAN environment configuration'''\nimport os\nimport logging\nimport warnings\nfrom urlparse import urlparse\nimport pytz\n\nimport sqlalchemy\nfrom pylons import config as pylons_config\nimport formencode\n\nimport ckan.config.routing as routing\nimport ckan.model as model\nimport ckan.plugins as p\nimport ckan.lib.helpers as helpers\nimport ckan.lib.app_globals as app_globals\nfrom ckan.lib.redis import is_redis_available\nimport ckan.lib.render as render\nimport ckan.lib.search as search\nimport ckan.logic as logic\nimport ckan.authz as authz\nimport ckan.lib.jinja_extensions as jinja_extensions\nfrom ckan.lib.i18n import build_js_translations\n\nfrom ckan.common import _, ungettext, config\nfrom ckan.exceptions import CkanConfigurationException\n\nlog = logging.getLogger(__name__)\n\n\n# Suppress benign warning 'Unbuilt egg for setuptools'\nwarnings.simplefilter('ignore', UserWarning)\n\n\ndef load_environment(global_conf, app_conf):\n \"\"\"\n Configure the Pylons environment via the ``pylons.config`` object. This\n code should only need to be run once.\n \"\"\"\n # this must be run at a time when the env is semi-setup, thus inlined here.\n # Required by the deliverance plugin and iATI\n from pylons.wsgiapp import PylonsApp\n import pkg_resources\n find_controller_generic = PylonsApp.find_controller\n\n # This is from pylons 1.0 source, will monkey-patch into 0.9.7\n def find_controller(self, controller):\n if controller in self.controller_classes:\n return self.controller_classes[controller]\n # Check to see if its a dotted name\n if '.' in controller or ':' in controller:\n ep = pkg_resources.EntryPoint.parse('x={0}'.format(controller))\n\n if hasattr(ep, 'resolve'):\n # setuptools >= 10.2\n mycontroller = ep.resolve()\n else:\n # setuptools >= 11.3\n mycontroller = ep.load(False)\n\n self.controller_classes[controller] = mycontroller\n return mycontroller\n return find_controller_generic(self, controller)\n PylonsApp.find_controller = find_controller\n\n os.environ['CKAN_CONFIG'] = global_conf['__file__']\n\n # Pylons paths\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n valid_base_public_folder_names = ['public', 'public-bs2']\n static_files = app_conf.get('ckan.base_public_folder', 'public')\n app_conf['ckan.base_public_folder'] = static_files\n\n if static_files not in valid_base_public_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_public_folder. '\n 'Possible values are: \"public\" and \"public-bs2\".'\n )\n\n log.info('Loading static files from %s' % static_files)\n paths = dict(root=root,\n controllers=os.path.join(root, 'controllers'),\n static_files=os.path.join(root, static_files),\n templates=[])\n\n # Initialize main CKAN config object\n config.update(global_conf)\n config.update(app_conf)\n\n # Initialize Pylons own config object\n pylons_config.init_app(global_conf, app_conf, package='ckan', paths=paths)\n\n # Update the main CKAN config object with the Pylons specific stuff, as it\n # quite hard to keep them separated. This should be removed once Pylons\n # support is dropped\n config.update(pylons_config)\n\n # Setup the SQLAlchemy database engine\n # Suppress a couple of sqlalchemy warnings\n msgs = ['^Unicode type received non-unicode bind param value',\n \"^Did not recognize type 'BIGINT' of column 'size'\",\n \"^Did not recognize type 'tsvector' of column 'search_vector'\"\n ]\n for msg in msgs:\n warnings.filterwarnings('ignore', msg, sqlalchemy.exc.SAWarning)\n\n # load all CKAN plugins\n p.load_all()\n\n # Check Redis availability\n if not is_redis_available():\n log.critical('Could not connect to Redis.')\n\n app_globals.reset()\n\n # issue #3260: remove idle transaction\n # Session that was used for getting all config params nor committed,\n # neither removed and we have idle connection as result\n model.Session.commit()\n\n # Build JavaScript translations. Must be done after plugins have\n # been loaded.\n build_js_translations()\n\n\n# A mapping of config settings that can be overridden by env vars.\n# Note: Do not remove the following lines, they are used in the docs\n# Start CONFIG_FROM_ENV_VARS\nCONFIG_FROM_ENV_VARS = {\n 'sqlalchemy.url': 'CKAN_SQLALCHEMY_URL',\n 'ckan.datastore.write_url': 'CKAN_DATASTORE_WRITE_URL',\n 'ckan.datastore.read_url': 'CKAN_DATASTORE_READ_URL',\n 'ckan.redis.url': 'CKAN_REDIS_URL',\n 'solr_url': 'CKAN_SOLR_URL',\n 'ckan.site_id': 'CKAN_SITE_ID',\n 'ckan.site_url': 'CKAN_SITE_URL',\n 'ckan.storage_path': 'CKAN_STORAGE_PATH',\n 'ckan.datapusher.url': 'CKAN_DATAPUSHER_URL',\n 'smtp.server': 'CKAN_SMTP_SERVER',\n 'smtp.starttls': 'CKAN_SMTP_STARTTLS',\n 'smtp.user': 'CKAN_SMTP_USER',\n 'smtp.password': 'CKAN_SMTP_PASSWORD',\n 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM',\n 'ckan.max_resource_size': 'CKAN_MAX_UPLOAD_SIZE_MB'\n}\n# End CONFIG_FROM_ENV_VARS\n\n\ndef update_config():\n ''' This code needs to be run when the config is changed to take those\n changes into account. It is called whenever a plugin is loaded as the\n plugin might have changed the config values (for instance it might\n change ckan.site_url) '''\n\n for plugin in p.PluginImplementations(p.IConfigurer):\n # must do update in place as this does not work:\n # config = plugin.update_config(config)\n plugin.update_config(config)\n\n # Set whitelisted env vars on config object\n # This is set up before globals are initialized\n\n ckan_db = os.environ.get('CKAN_DB', None)\n if ckan_db:\n msg = 'Setting CKAN_DB as an env var is deprecated and will be' \\\n ' removed in a future release. Use CKAN_SQLALCHEMY_URL instead.'\n log.warn(msg)\n config['sqlalchemy.url'] = ckan_db\n\n for option in CONFIG_FROM_ENV_VARS:\n from_env = os.environ.get(CONFIG_FROM_ENV_VARS[option], None)\n if from_env:\n config[option] = from_env\n\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n site_url = config.get('ckan.site_url', '')\n if not site_url:\n raise RuntimeError(\n 'ckan.site_url is not configured and it must have a value.'\n ' Please amend your .ini file.')\n if not site_url.lower().startswith('http'):\n raise RuntimeError(\n 'ckan.site_url should be a full URL, including the schema '\n '(http or https)')\n\n display_timezone = config.get('ckan.display_timezone', '')\n if (display_timezone and\n display_timezone != 'server' and\n display_timezone not in pytz.all_timezones):\n raise CkanConfigurationException(\n \"ckan.display_timezone is not 'server' or a valid timezone\"\n )\n\n # Remove backslash from site_url if present\n config['ckan.site_url'] = config['ckan.site_url'].rstrip('/')\n\n ckan_host = config['ckan.host'] = urlparse(site_url).netloc\n if config.get('ckan.site_id') is None:\n if ':' in ckan_host:\n ckan_host, port = ckan_host.split(':')\n assert ckan_host, 'You need to configure ckan.site_url or ' \\\n 'ckan.site_id for SOLR search-index rebuild to work.'\n config['ckan.site_id'] = ckan_host\n\n # ensure that a favicon has been set\n favicon = config.get('ckan.favicon', '/base/images/ckan.ico')\n config['ckan.favicon'] = favicon\n\n # Init SOLR settings and check if the schema is compatible\n # from ckan.lib.search import SolrSettings, check_solr_schema_version\n\n # lib.search is imported here as we need the config enabled and parsed\n search.SolrSettings.init(config.get('solr_url'),\n config.get('solr_user'),\n config.get('solr_password'))\n search.check_solr_schema_version()\n\n routes_map = routing.make_map()\n config['routes.map'] = routes_map\n # The RoutesMiddleware needs its mapper updating if it exists\n if 'routes.middleware' in config:\n config['routes.middleware'].mapper = routes_map\n # routes.named_routes is a CKAN thing\n config['routes.named_routes'] = routing.named_routes\n config['pylons.app_globals'] = app_globals.app_globals\n # initialise the globals\n app_globals.app_globals._init()\n\n helpers.load_plugin_helpers()\n config['pylons.h'] = helpers.helper_functions\n\n # Templates and CSS loading from configuration\n valid_base_templates_folder_names = ['templates', 'templates-bs2']\n templates = config.get('ckan.base_templates_folder', 'templates')\n config['ckan.base_templates_folder'] = templates\n\n if templates not in valid_base_templates_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_templates_folder. '\n 'Possible values are: \"templates\" and \"templates-bs2\".'\n )\n\n jinja2_templates_path = os.path.join(root, templates)\n log.info('Loading templates from %s' % jinja2_templates_path)\n template_paths = [jinja2_templates_path]\n\n extra_template_paths = config.get('extra_template_paths', '')\n if extra_template_paths:\n # must be first for them to override defaults\n template_paths = extra_template_paths.split(',') + template_paths\n config['computed_template_paths'] = template_paths\n\n # Set the default language for validation messages from formencode\n # to what is set as the default locale in the config\n default_lang = config.get('ckan.locale_default', 'en')\n formencode.api.set_stdtranslation(domain=\"FormEncode\",\n languages=[default_lang])\n\n # Markdown ignores the logger config, so to get rid of excessive\n # markdown debug messages in the log, set it to the level of the\n # root logger.\n logging.getLogger(\"MARKDOWN\").setLevel(logging.getLogger().level)\n\n # Create Jinja2 environment\n env = jinja_extensions.Environment(\n loader=jinja_extensions.CkanFileSystemLoader(template_paths),\n autoescape=True,\n extensions=['jinja2.ext.do', 'jinja2.ext.with_',\n jinja_extensions.SnippetExtension,\n jinja_extensions.CkanExtend,\n jinja_extensions.CkanInternationalizationExtension,\n jinja_extensions.LinkForExtension,\n jinja_extensions.ResourceExtension,\n jinja_extensions.UrlForStaticExtension,\n jinja_extensions.UrlForExtension]\n )\n env.install_gettext_callables(_, ungettext, newstyle=True)\n # custom filters\n env.filters['empty_and_escape'] = jinja_extensions.empty_and_escape\n config['pylons.app_globals'].jinja_env = env\n\n # CONFIGURATION OPTIONS HERE (note: all config options will override\n # any Pylons config options)\n\n # Initialize SQLAlchemy\n engine = sqlalchemy.engine_from_config(config, client_encoding='utf8')\n model.init_model(engine)\n\n for plugin in p.PluginImplementations(p.IConfigurable):\n plugin.configure(config)\n\n # reset the template cache - we do this here so that when we load the\n # environment it is clean\n render.reset_template_info_cache()\n\n # clear other caches\n logic.clear_actions_cache()\n logic.clear_validators_cache()\n authz.clear_auth_functions_cache()\n\n # Here we create the site user if they are not already in the database\n try:\n logic.get_action('get_site_user')({'ignore_auth': True}, None)\n except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):\n # (ProgrammingError for Postgres, OperationalError for SQLite)\n # The database is not initialised. This is a bit dirty. This occurs\n # when running tests.\n pass\n except sqlalchemy.exc.InternalError:\n # The database is not initialised. Travis hits this\n pass\n\n # Close current session and open database connections to ensure a clean\n # clean environment even if an error occurs later on\n model.Session.remove()\n model.Session.bind.dispose()\n", "path": "ckan/config/environment.py"}]} | 3,914 | 143 |
gh_patches_debug_13037 | rasdani/github-patches | git_diff | psf__black-2739 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Custom cache directory to be thread/process safe
**Is your feature request related to a problem? Please describe.**
The cache files are not thread/process safe. I've checked multiple projects simultaneously in different processes but get permission errors when one process is writing to the cache while another is reading from it. On linux the fix is pretty easy: set a different temp directory for each process using the ``XDG_CACHE_HOME`` environment variable but there is no equivalent for Windows (there's somewhat an equivalent but you need a rather [specific setup](https://github.com/platformdirs/platformdirs/blob/main/src/platformdirs/windows.py#L157-L165) to use the environment variable).
**Describe the solution you'd like**
The solution that will work for my use case (and I think is the least amount of changes) is to create a custom environment variable that overrides using platformdirs to get the cache directory. Think this could look like:
```python
CACHE_DIR = Path(os.environ.get("BLACK_CACHE_DIR", user_cache_dir("black", version=__version__)))
if not CACHE_DIR.exists():
raise RuntimeError(f"{CACHE_DIR} does not exist")
```
**Describe alternatives you've considered**
1. Add the command line option to turn off cacheing (as this ticket as #248 asked for) (guess is this more work)
2. Add command line option to set the cache directory (guess is this is more work but not a lot)
3. Make the cache dir thread/process safe (guess is this is more work)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/black/cache.py`
Content:
```
1 """Caching of formatted files with feature-based invalidation."""
2
3 import os
4 import pickle
5 from pathlib import Path
6 import tempfile
7 from typing import Dict, Iterable, Set, Tuple
8
9 from platformdirs import user_cache_dir
10
11 from black.mode import Mode
12
13 from _black_version import version as __version__
14
15
16 # types
17 Timestamp = float
18 FileSize = int
19 CacheInfo = Tuple[Timestamp, FileSize]
20 Cache = Dict[str, CacheInfo]
21
22
23 CACHE_DIR = Path(user_cache_dir("black", version=__version__))
24
25
26 def read_cache(mode: Mode) -> Cache:
27 """Read the cache if it exists and is well formed.
28
29 If it is not well formed, the call to write_cache later should resolve the issue.
30 """
31 cache_file = get_cache_file(mode)
32 if not cache_file.exists():
33 return {}
34
35 with cache_file.open("rb") as fobj:
36 try:
37 cache: Cache = pickle.load(fobj)
38 except (pickle.UnpicklingError, ValueError, IndexError):
39 return {}
40
41 return cache
42
43
44 def get_cache_file(mode: Mode) -> Path:
45 return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle"
46
47
48 def get_cache_info(path: Path) -> CacheInfo:
49 """Return the information used to check if a file is already formatted or not."""
50 stat = path.stat()
51 return stat.st_mtime, stat.st_size
52
53
54 def filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:
55 """Split an iterable of paths in `sources` into two sets.
56
57 The first contains paths of files that modified on disk or are not in the
58 cache. The other contains paths to non-modified files.
59 """
60 todo, done = set(), set()
61 for src in sources:
62 res_src = src.resolve()
63 if cache.get(str(res_src)) != get_cache_info(res_src):
64 todo.add(src)
65 else:
66 done.add(src)
67 return todo, done
68
69
70 def write_cache(cache: Cache, sources: Iterable[Path], mode: Mode) -> None:
71 """Update the cache file."""
72 cache_file = get_cache_file(mode)
73 try:
74 CACHE_DIR.mkdir(parents=True, exist_ok=True)
75 new_cache = {
76 **cache,
77 **{str(src.resolve()): get_cache_info(src) for src in sources},
78 }
79 with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f:
80 pickle.dump(new_cache, f, protocol=4)
81 os.replace(f.name, cache_file)
82 except OSError:
83 pass
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/black/cache.py b/src/black/cache.py
--- a/src/black/cache.py
+++ b/src/black/cache.py
@@ -20,7 +20,23 @@
Cache = Dict[str, CacheInfo]
-CACHE_DIR = Path(user_cache_dir("black", version=__version__))
+def get_cache_dir() -> Path:
+ """Get the cache directory used by black.
+
+ Users can customize this directory on all systems using `BLACK_CACHE_DIR`
+ environment variable. By default, the cache directory is the user cache directory
+ under the black application.
+
+ This result is immediately set to a constant `black.cache.CACHE_DIR` as to avoid
+ repeated calls.
+ """
+ # NOTE: Function mostly exists as a clean way to test getting the cache directory.
+ default_cache_dir = user_cache_dir("black", version=__version__)
+ cache_dir = Path(os.environ.get("BLACK_CACHE_DIR", default_cache_dir))
+ return cache_dir
+
+
+CACHE_DIR = get_cache_dir()
def read_cache(mode: Mode) -> Cache:
| {"golden_diff": "diff --git a/src/black/cache.py b/src/black/cache.py\n--- a/src/black/cache.py\n+++ b/src/black/cache.py\n@@ -20,7 +20,23 @@\n Cache = Dict[str, CacheInfo]\n \n \n-CACHE_DIR = Path(user_cache_dir(\"black\", version=__version__))\n+def get_cache_dir() -> Path:\n+ \"\"\"Get the cache directory used by black.\n+\n+ Users can customize this directory on all systems using `BLACK_CACHE_DIR`\n+ environment variable. By default, the cache directory is the user cache directory\n+ under the black application.\n+\n+ This result is immediately set to a constant `black.cache.CACHE_DIR` as to avoid\n+ repeated calls.\n+ \"\"\"\n+ # NOTE: Function mostly exists as a clean way to test getting the cache directory.\n+ default_cache_dir = user_cache_dir(\"black\", version=__version__)\n+ cache_dir = Path(os.environ.get(\"BLACK_CACHE_DIR\", default_cache_dir))\n+ return cache_dir\n+\n+\n+CACHE_DIR = get_cache_dir()\n \n \n def read_cache(mode: Mode) -> Cache:\n", "issue": "Custom cache directory to be thread/process safe\n**Is your feature request related to a problem? Please describe.**\r\n\r\nThe cache files are not thread/process safe. I've checked multiple projects simultaneously in different processes but get permission errors when one process is writing to the cache while another is reading from it. On linux the fix is pretty easy: set a different temp directory for each process using the ``XDG_CACHE_HOME`` environment variable but there is no equivalent for Windows (there's somewhat an equivalent but you need a rather [specific setup](https://github.com/platformdirs/platformdirs/blob/main/src/platformdirs/windows.py#L157-L165) to use the environment variable).\r\n\r\n**Describe the solution you'd like**\r\n\r\nThe solution that will work for my use case (and I think is the least amount of changes) is to create a custom environment variable that overrides using platformdirs to get the cache directory. Think this could look like:\r\n```python\r\nCACHE_DIR = Path(os.environ.get(\"BLACK_CACHE_DIR\", user_cache_dir(\"black\", version=__version__)))\r\nif not CACHE_DIR.exists():\r\n raise RuntimeError(f\"{CACHE_DIR} does not exist\")\r\n```\r\n\r\n**Describe alternatives you've considered**\r\n\r\n1. Add the command line option to turn off cacheing (as this ticket as #248 asked for) (guess is this more work)\r\n2. Add command line option to set the cache directory (guess is this is more work but not a lot)\r\n3. Make the cache dir thread/process safe (guess is this is more work)\n", "before_files": [{"content": "\"\"\"Caching of formatted files with feature-based invalidation.\"\"\"\n\nimport os\nimport pickle\nfrom pathlib import Path\nimport tempfile\nfrom typing import Dict, Iterable, Set, Tuple\n\nfrom platformdirs import user_cache_dir\n\nfrom black.mode import Mode\n\nfrom _black_version import version as __version__\n\n\n# types\nTimestamp = float\nFileSize = int\nCacheInfo = Tuple[Timestamp, FileSize]\nCache = Dict[str, CacheInfo]\n\n\nCACHE_DIR = Path(user_cache_dir(\"black\", version=__version__))\n\n\ndef read_cache(mode: Mode) -> Cache:\n \"\"\"Read the cache if it exists and is well formed.\n\n If it is not well formed, the call to write_cache later should resolve the issue.\n \"\"\"\n cache_file = get_cache_file(mode)\n if not cache_file.exists():\n return {}\n\n with cache_file.open(\"rb\") as fobj:\n try:\n cache: Cache = pickle.load(fobj)\n except (pickle.UnpicklingError, ValueError, IndexError):\n return {}\n\n return cache\n\n\ndef get_cache_file(mode: Mode) -> Path:\n return CACHE_DIR / f\"cache.{mode.get_cache_key()}.pickle\"\n\n\ndef get_cache_info(path: Path) -> CacheInfo:\n \"\"\"Return the information used to check if a file is already formatted or not.\"\"\"\n stat = path.stat()\n return stat.st_mtime, stat.st_size\n\n\ndef filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:\n \"\"\"Split an iterable of paths in `sources` into two sets.\n\n The first contains paths of files that modified on disk or are not in the\n cache. The other contains paths to non-modified files.\n \"\"\"\n todo, done = set(), set()\n for src in sources:\n res_src = src.resolve()\n if cache.get(str(res_src)) != get_cache_info(res_src):\n todo.add(src)\n else:\n done.add(src)\n return todo, done\n\n\ndef write_cache(cache: Cache, sources: Iterable[Path], mode: Mode) -> None:\n \"\"\"Update the cache file.\"\"\"\n cache_file = get_cache_file(mode)\n try:\n CACHE_DIR.mkdir(parents=True, exist_ok=True)\n new_cache = {\n **cache,\n **{str(src.resolve()): get_cache_info(src) for src in sources},\n }\n with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f:\n pickle.dump(new_cache, f, protocol=4)\n os.replace(f.name, cache_file)\n except OSError:\n pass\n", "path": "src/black/cache.py"}], "after_files": [{"content": "\"\"\"Caching of formatted files with feature-based invalidation.\"\"\"\n\nimport os\nimport pickle\nfrom pathlib import Path\nimport tempfile\nfrom typing import Dict, Iterable, Set, Tuple\n\nfrom platformdirs import user_cache_dir\n\nfrom black.mode import Mode\n\nfrom _black_version import version as __version__\n\n\n# types\nTimestamp = float\nFileSize = int\nCacheInfo = Tuple[Timestamp, FileSize]\nCache = Dict[str, CacheInfo]\n\n\ndef get_cache_dir() -> Path:\n \"\"\"Get the cache directory used by black.\n\n Users can customize this directory on all systems using `BLACK_CACHE_DIR`\n environment variable. By default, the cache directory is the user cache directory\n under the black application.\n\n This result is immediately set to a constant `black.cache.CACHE_DIR` as to avoid\n repeated calls.\n \"\"\"\n # NOTE: Function mostly exists as a clean way to test getting the cache directory.\n default_cache_dir = user_cache_dir(\"black\", version=__version__)\n cache_dir = Path(os.environ.get(\"BLACK_CACHE_DIR\", default_cache_dir))\n return cache_dir\n\n\nCACHE_DIR = get_cache_dir()\n\n\ndef read_cache(mode: Mode) -> Cache:\n \"\"\"Read the cache if it exists and is well formed.\n\n If it is not well formed, the call to write_cache later should resolve the issue.\n \"\"\"\n cache_file = get_cache_file(mode)\n if not cache_file.exists():\n return {}\n\n with cache_file.open(\"rb\") as fobj:\n try:\n cache: Cache = pickle.load(fobj)\n except (pickle.UnpicklingError, ValueError, IndexError):\n return {}\n\n return cache\n\n\ndef get_cache_file(mode: Mode) -> Path:\n return CACHE_DIR / f\"cache.{mode.get_cache_key()}.pickle\"\n\n\ndef get_cache_info(path: Path) -> CacheInfo:\n \"\"\"Return the information used to check if a file is already formatted or not.\"\"\"\n stat = path.stat()\n return stat.st_mtime, stat.st_size\n\n\ndef filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:\n \"\"\"Split an iterable of paths in `sources` into two sets.\n\n The first contains paths of files that modified on disk or are not in the\n cache. The other contains paths to non-modified files.\n \"\"\"\n todo, done = set(), set()\n for src in sources:\n res_src = src.resolve()\n if cache.get(str(res_src)) != get_cache_info(res_src):\n todo.add(src)\n else:\n done.add(src)\n return todo, done\n\n\ndef write_cache(cache: Cache, sources: Iterable[Path], mode: Mode) -> None:\n \"\"\"Update the cache file.\"\"\"\n cache_file = get_cache_file(mode)\n try:\n CACHE_DIR.mkdir(parents=True, exist_ok=True)\n new_cache = {\n **cache,\n **{str(src.resolve()): get_cache_info(src) for src in sources},\n }\n with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f:\n pickle.dump(new_cache, f, protocol=4)\n os.replace(f.name, cache_file)\n except OSError:\n pass\n", "path": "src/black/cache.py"}]} | 1,297 | 236 |
gh_patches_debug_1081 | rasdani/github-patches | git_diff | mlcommons__GaNDLF-315 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add an easy way to verify installation
**Is your feature request related to a problem? Please describe.**
Currently, we are asking users to run specific commands to verify installation, which can be cumbursome.
**Describe the solution you'd like**
It would be great if this could put in a script (and extended/updated as needed).
**Describe alternatives you've considered**
N.A.
**Additional context**
N.A.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 """The setup script."""
4
5
6 import os
7 from setuptools import setup, find_packages
8 from setuptools.command.install import install
9 from setuptools.command.develop import develop
10 from setuptools.command.egg_info import egg_info
11
12 with open("README.md") as readme_file:
13 readme = readme_file.read()
14
15
16 def git_submodule_update():
17 ## submodule update
18 os.system("git submodule update --init --recursive")
19
20
21 class CustomInstallCommand(install):
22 def run(self):
23 install.run(self)
24 git_submodule_update()
25
26
27 class CustomDevelopCommand(develop):
28 def run(self):
29 develop.run(self)
30 git_submodule_update()
31
32
33 class CustomEggInfoCommand(egg_info):
34 def run(self):
35 egg_info.run(self)
36 git_submodule_update()
37
38
39 # read version.py
40 import sys, re
41
42 try:
43 filepath = "GANDLF/version.py"
44 version_file = open(filepath)
45 (__version__,) = re.findall('__version__ = "(.*)"', version_file.read())
46
47 except Exception as error:
48 __version__ = "0.0.1"
49 sys.stderr.write("Warning: Could not open '%s' due %s\n" % (filepath, error))
50
51 requirements = [
52 "black",
53 "numpy==1.21.0",
54 "scipy",
55 "SimpleITK==2.1.0",
56 "torch>=1.7",
57 "torchvision",
58 "tqdm",
59 "torchio==0.18.57",
60 "pandas",
61 "pylint",
62 "scikit-learn==0.23.1",
63 "pickle5==0.0.11",
64 "setuptools",
65 "seaborn",
66 "pyyaml",
67 "openslide-python",
68 "scikit-image",
69 "matplotlib",
70 "requests>=2.25.0",
71 "pyvips",
72 "pytest",
73 "coverage",
74 "pytest-cov",
75 "psutil",
76 "medcam",
77 "opencv-python",
78 "torchmetrics",
79 "OpenPatchMiner==0.1.6",
80 "pydicom",
81 ]
82
83 setup(
84 name="GANDLF",
85 version=__version__,
86 author="Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun Güley, Ibrahim Ethem Hamamci, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos", # alphabetical order
87 author_email="software@cbica.upenn.edu",
88 python_requires=">=3.6",
89 packages=find_packages(),
90 cmdclass={ # this ensures git_submodule_update is called during install
91 "install": CustomInstallCommand,
92 "develop": CustomDevelopCommand,
93 "egg_info": CustomEggInfoCommand,
94 },
95 scripts=[
96 "gandlf_run",
97 "gandlf_constructCSV",
98 "gandlf_collectStats",
99 "gandlf_patchMiner",
100 "gandlf_preprocess",
101 "gandlf_anonymizer",
102 ],
103 classifiers=[
104 "Development Status :: 3 - Alpha",
105 "Intended Audience :: Science/Research",
106 "License :: OSI Approved :: BSD License",
107 "Natural Language :: English",
108 "Operating System :: OS Independent",
109 "Programming Language :: Python :: 3.7",
110 "Programming Language :: Python :: 3.8",
111 "Programming Language :: Python :: 3.9",
112 "Topic :: Scientific/Engineering :: Medical Science Apps",
113 ],
114 description=(
115 "PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging."
116 ),
117 install_requires=requirements,
118 license="BSD-3-Clause License",
119 long_description=readme,
120 long_description_content_type="text/markdown",
121 include_package_data=True,
122 keywords="semantic, segmentation, regression, classification, data-augmentation, medical-imaging",
123 zip_safe=False,
124 )
125
126 ## windows vips installation
127 if os.name == "nt": # proceed for windows
128 from pathlib import Path
129
130 # download and extract if main dll is absent
131 if not Path("./vips/vips-dev-8.10/bin/libvips-42.dll").exists():
132 print("Downloading and extracting VIPS for Windows")
133 url = "https://github.com/libvips/libvips/releases/download/v8.10.2/vips-dev-w64-all-8.10.2.zip"
134 zip_to_extract = "./vips.zip"
135 import urllib.request, zipfile
136
137 urllib.request.urlretrieve(url, zip_to_extract)
138 z = zipfile.ZipFile(zip_to_extract)
139 z.extractall("./vips")
140 z.close()
141 os.remove(zip_to_extract)
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -99,6 +99,7 @@
"gandlf_patchMiner",
"gandlf_preprocess",
"gandlf_anonymizer",
+ "gandlf_verifyInstall",
],
classifiers=[
"Development Status :: 3 - Alpha",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -99,6 +99,7 @@\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n+ \"gandlf_verifyInstall\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n", "issue": "Add an easy way to verify installation\n**Is your feature request related to a problem? Please describe.**\r\nCurrently, we are asking users to run specific commands to verify installation, which can be cumbursome.\r\n\r\n**Describe the solution you'd like**\r\nIt would be great if this could put in a script (and extended/updated as needed).\r\n\r\n**Describe alternatives you've considered**\r\nN.A.\r\n\r\n**Additional context**\r\nN.A.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\nwith open(\"README.md\") as readme_file:\n readme = readme_file.read()\n\n\ndef git_submodule_update():\n ## submodule update\n os.system(\"git submodule update --init --recursive\")\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n git_submodule_update()\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n git_submodule_update()\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n git_submodule_update()\n\n\n# read version.py\nimport sys, re\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\"Warning: Could not open '%s' due %s\\n\" % (filepath, error))\n\nrequirements = [\n \"black\",\n \"numpy==1.21.0\",\n \"scipy\",\n \"SimpleITK==2.1.0\",\n \"torch>=1.7\",\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.18.57\",\n \"pandas\",\n \"pylint\",\n \"scikit-learn==0.23.1\",\n \"pickle5==0.0.11\",\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"openslide-python\",\n \"scikit-image\",\n \"matplotlib\",\n \"requests>=2.25.0\",\n \"pyvips\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics\",\n \"OpenPatchMiner==0.1.6\",\n \"pydicom\",\n]\n\nsetup(\n name=\"GANDLF\",\n version=__version__,\n author=\"Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun G\u00fcley, Ibrahim Ethem Hamamci, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos\", # alphabetical order\n author_email=\"software@cbica.upenn.edu\",\n python_requires=\">=3.6\",\n packages=find_packages(),\n cmdclass={ # this ensures git_submodule_update is called during install\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"BSD-3-Clause License\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging\",\n zip_safe=False,\n)\n\n## windows vips installation\nif os.name == \"nt\": # proceed for windows\n from pathlib import Path\n\n # download and extract if main dll is absent\n if not Path(\"./vips/vips-dev-8.10/bin/libvips-42.dll\").exists():\n print(\"Downloading and extracting VIPS for Windows\")\n url = \"https://github.com/libvips/libvips/releases/download/v8.10.2/vips-dev-w64-all-8.10.2.zip\"\n zip_to_extract = \"./vips.zip\"\n import urllib.request, zipfile\n\n urllib.request.urlretrieve(url, zip_to_extract)\n z = zipfile.ZipFile(zip_to_extract)\n z.extractall(\"./vips\")\n z.close()\n os.remove(zip_to_extract)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\nwith open(\"README.md\") as readme_file:\n readme = readme_file.read()\n\n\ndef git_submodule_update():\n ## submodule update\n os.system(\"git submodule update --init --recursive\")\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n git_submodule_update()\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n git_submodule_update()\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n git_submodule_update()\n\n\n# read version.py\nimport sys, re\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\"Warning: Could not open '%s' due %s\\n\" % (filepath, error))\n\nrequirements = [\n \"black\",\n \"numpy==1.21.0\",\n \"scipy\",\n \"SimpleITK==2.1.0\",\n \"torch>=1.7\",\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.18.57\",\n \"pandas\",\n \"pylint\",\n \"scikit-learn==0.23.1\",\n \"pickle5==0.0.11\",\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"openslide-python\",\n \"scikit-image\",\n \"matplotlib\",\n \"requests>=2.25.0\",\n \"pyvips\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics\",\n \"OpenPatchMiner==0.1.6\",\n \"pydicom\",\n]\n\nsetup(\n name=\"GANDLF\",\n version=__version__,\n author=\"Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun G\u00fcley, Ibrahim Ethem Hamamci, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos\", # alphabetical order\n author_email=\"software@cbica.upenn.edu\",\n python_requires=\">=3.6\",\n packages=find_packages(),\n cmdclass={ # this ensures git_submodule_update is called during install\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"BSD-3-Clause License\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging\",\n zip_safe=False,\n)\n\n## windows vips installation\nif os.name == \"nt\": # proceed for windows\n from pathlib import Path\n\n # download and extract if main dll is absent\n if not Path(\"./vips/vips-dev-8.10/bin/libvips-42.dll\").exists():\n print(\"Downloading and extracting VIPS for Windows\")\n url = \"https://github.com/libvips/libvips/releases/download/v8.10.2/vips-dev-w64-all-8.10.2.zip\"\n zip_to_extract = \"./vips.zip\"\n import urllib.request, zipfile\n\n urllib.request.urlretrieve(url, zip_to_extract)\n z = zipfile.ZipFile(zip_to_extract)\n z.extractall(\"./vips\")\n z.close()\n os.remove(zip_to_extract)\n", "path": "setup.py"}]} | 1,739 | 82 |
gh_patches_debug_1040 | rasdani/github-patches | git_diff | falconry__falcon-1985 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
StaticRouteAsync leaves open files
When using static routes with a [`falcon.asgi.App`](https://falcon.readthedocs.io/en/stable/api/app.html#asgi-app), it seems that the `_AsyncFileReader` wrapper does not implement any `.close()` method, so files are left open.
On CPython, I wasn't able to demonstrate any practical impact of this bug as the file object in question is refcounted to 0 and garbage collected as soon as it goes out of scope. However, that isn't the case when running `uvicorn` on PyPy 3.7, as PyPy uses a different GC implementation.
Test case in point:
```
import io
import logging
import os.path
import unittest.mock
import falcon.asgi
logging.basicConfig(
format='%(asctime)s [%(levelname)s] %(message)s', level=logging.INFO)
class DebugIO(io.BytesIO):
@classmethod
def open(cls, *args, **kwargs):
return cls(b'Test data!\n')
def close(self):
logging.info(f'{self}.close()')
super().close()
app = falcon.asgi.App()
app.add_static_route('/files', '/tmp')
debug = unittest.mock.patch('io.open', DebugIO.open)
debug.start()
```
StaticRouteAsync leaves open files
When using static routes with a [`falcon.asgi.App`](https://falcon.readthedocs.io/en/stable/api/app.html#asgi-app), it seems that the `_AsyncFileReader` wrapper does not implement any `.close()` method, so files are left open.
On CPython, I wasn't able to demonstrate any practical impact of this bug as the file object in question is refcounted to 0 and garbage collected as soon as it goes out of scope. However, that isn't the case when running `uvicorn` on PyPy 3.7, as PyPy uses a different GC implementation.
Test case in point:
```
import io
import logging
import os.path
import unittest.mock
import falcon.asgi
logging.basicConfig(
format='%(asctime)s [%(levelname)s] %(message)s', level=logging.INFO)
class DebugIO(io.BytesIO):
@classmethod
def open(cls, *args, **kwargs):
return cls(b'Test data!\n')
def close(self):
logging.info(f'{self}.close()')
super().close()
app = falcon.asgi.App()
app.add_static_route('/files', '/tmp')
debug = unittest.mock.patch('io.open', DebugIO.open)
debug.start()
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `falcon/routing/static.py`
Content:
```
1 from functools import partial
2 import io
3 import os
4 import pathlib
5 import re
6
7 import falcon
8 from falcon.util.sync import get_running_loop
9
10
11 def _open_range(file_path, req_range):
12 """Open a file for a ranged request.
13
14 Args:
15 file_path (str): Path to the file to open.
16 req_range (Optional[Tuple[int, int]]): Request.range value.
17 Returns:
18 tuple: Three-member tuple of (stream, content-length, content-range).
19 If req_range is ``None`` or ignored, content-range will be
20 ``None``; otherwise, the stream will be appropriately seeked and
21 possibly bounded, and the content-range will be a tuple of
22 (start, end, size).
23 """
24 fh = io.open(file_path, 'rb')
25 size = os.fstat(fh.fileno()).st_size
26 if req_range is None:
27 return fh, size, None
28
29 start, end = req_range
30 if size == 0:
31 # NOTE(tipabu): Ignore Range headers for zero-byte files; just serve
32 # the empty body since Content-Range can't be used to express a
33 # zero-byte body.
34 return fh, 0, None
35
36 if start < 0 and end == -1:
37 # NOTE(tipabu): Special case: only want the last N bytes.
38 start = max(start, -size)
39 fh.seek(start, os.SEEK_END)
40 # NOTE(vytas): Wrap in order to prevent sendfile from being used, as
41 # its implementation was found to be buggy in many popular WSGI
42 # servers for open files with a non-zero offset.
43 return _BoundedFile(fh, -start), -start, (size + start, size - 1, size)
44
45 if start >= size:
46 fh.close()
47 raise falcon.HTTPRangeNotSatisfiable(size)
48
49 fh.seek(start)
50 if end == -1:
51 # NOTE(vytas): Wrap in order to prevent sendfile from being used, as
52 # its implementation was found to be buggy in many popular WSGI
53 # servers for open files with a non-zero offset.
54 length = size - start
55 return _BoundedFile(fh, length), length, (start, size - 1, size)
56
57 end = min(end, size - 1)
58 length = end - start + 1
59 return _BoundedFile(fh, length), length, (start, end, size)
60
61
62 class _BoundedFile:
63 """Wrap a file to only allow part of it to be read.
64
65 Args:
66 fh: The file object to wrap. Should be opened in binary mode,
67 and already seeked to an appropriate position. The object must
68 expose a ``.close()`` method.
69 length (int): Number of bytes that may be read.
70 """
71
72 def __init__(self, fh, length):
73 self.fh = fh
74 self.close = fh.close
75 self.remaining = length
76
77 def read(self, size=-1):
78 """Read the underlying file object, within the specified bounds."""
79 if size < 0:
80 size = self.remaining
81 else:
82 size = min(size, self.remaining)
83 data = self.fh.read(size)
84 self.remaining -= len(data)
85 return data
86
87
88 class StaticRoute:
89 """Represents a static route.
90
91 Args:
92 prefix (str): The path prefix to match for this route. If the
93 path in the requested URI starts with this string, the remainder
94 of the path will be appended to the source directory to
95 determine the file to serve. This is done in a secure manner
96 to prevent an attacker from requesting a file outside the
97 specified directory.
98
99 Note that static routes are matched in LIFO order, and are only
100 attempted after checking dynamic routes and sinks.
101
102 directory (Union[str, pathlib.Path]): The source directory from which to
103 serve files. Must be an absolute path.
104 downloadable (bool): Set to ``True`` to include a
105 Content-Disposition header in the response. The "filename"
106 directive is simply set to the name of the requested file.
107 fallback_filename (str): Fallback filename used when the requested file
108 is not found. Can be a relative path inside the prefix folder or
109 any valid absolute path.
110
111 Note:
112 If the fallback file is served instead of the requested file,
113 the response Content-Type header, as well as the
114 Content-Disposition header (provided it was requested with the
115 `downloadable` parameter described above), are derived from the
116 fallback filename, as opposed to the requested filename.
117 """
118
119 # NOTE(kgriffs): Don't allow control characters and reserved chars
120 _DISALLOWED_CHARS_PATTERN = re.compile('[\x00-\x1f\x80-\x9f\ufffd~?<>:*|\'"]')
121
122 # NOTE(kgriffs): If somehow an executable code exploit is triggerable, this
123 # minimizes how much can be included in the payload.
124 _MAX_NON_PREFIXED_LEN = 512
125
126 def __init__(self, prefix, directory, downloadable=False, fallback_filename=None):
127 if not prefix.startswith('/'):
128 raise ValueError("prefix must start with '/'")
129
130 # TODO(vgerak): Remove the check when py3.5 is dropped.
131 if isinstance(directory, pathlib.Path):
132 directory = str(directory)
133
134 self._directory = os.path.normpath(directory)
135 if not os.path.isabs(self._directory):
136 raise ValueError('directory must be an absolute path')
137
138 if fallback_filename is None:
139 self._fallback_filename = None
140 else:
141 self._fallback_filename = os.path.normpath(
142 os.path.join(self._directory, fallback_filename)
143 )
144 if not os.path.isfile(self._fallback_filename):
145 raise ValueError('fallback_filename is not a file')
146
147 # NOTE(kgriffs): Ensure it ends with a path separator to ensure
148 # we only match on the complete segment. Don't raise an error
149 # because most people won't expect to have to append a slash.
150 if not prefix.endswith('/'):
151 prefix += '/'
152
153 self._prefix = prefix
154 self._downloadable = downloadable
155
156 def match(self, path):
157 """Check whether the given path matches this route."""
158 if self._fallback_filename is None:
159 return path.startswith(self._prefix)
160 return path.startswith(self._prefix) or path == self._prefix[:-1]
161
162 def __call__(self, req, resp):
163 """Resource responder for this route."""
164
165 without_prefix = req.path[len(self._prefix) :]
166
167 # NOTE(kgriffs): Check surrounding whitespace and strip trailing
168 # periods, which are illegal on windows
169 # NOTE(CaselIT): An empty filename is allowed when fallback_filename is provided
170 if (
171 not (without_prefix or self._fallback_filename is not None)
172 or without_prefix.strip().rstrip('.') != without_prefix
173 or self._DISALLOWED_CHARS_PATTERN.search(without_prefix)
174 or '\\' in without_prefix
175 or '//' in without_prefix
176 or len(without_prefix) > self._MAX_NON_PREFIXED_LEN
177 ):
178
179 raise falcon.HTTPNotFound()
180
181 normalized = os.path.normpath(without_prefix)
182
183 if normalized.startswith('../') or normalized.startswith('/'):
184 raise falcon.HTTPNotFound()
185
186 file_path = os.path.join(self._directory, normalized)
187
188 # NOTE(kgriffs): Final sanity-check just to be safe. This check
189 # should never succeed, but this should guard against us having
190 # overlooked something.
191 if '..' in file_path or not file_path.startswith(self._directory):
192 raise falcon.HTTPNotFound()
193
194 req_range = req.range
195 if req.range_unit != 'bytes':
196 req_range = None
197 try:
198 stream, length, content_range = _open_range(file_path, req_range)
199 resp.set_stream(stream, length)
200 except IOError:
201 if self._fallback_filename is None:
202 raise falcon.HTTPNotFound()
203 try:
204 stream, length, content_range = _open_range(
205 self._fallback_filename, req_range
206 )
207 resp.set_stream(stream, length)
208 file_path = self._fallback_filename
209 except IOError:
210 raise falcon.HTTPNotFound()
211
212 suffix = os.path.splitext(file_path)[1]
213 resp.content_type = resp.options.static_media_types.get(
214 suffix, 'application/octet-stream'
215 )
216 resp.accept_ranges = 'bytes'
217
218 if self._downloadable:
219 resp.downloadable_as = os.path.basename(file_path)
220 if content_range:
221 resp.status = falcon.HTTP_206
222 resp.content_range = content_range
223
224
225 class StaticRouteAsync(StaticRoute):
226 """Subclass of StaticRoute with modifications to support ASGI apps."""
227
228 async def __call__(self, req, resp):
229 super().__call__(req, resp)
230
231 # NOTE(kgriffs): Fixup resp.stream so that it is non-blocking
232 resp.stream = _AsyncFileReader(resp.stream)
233
234
235 class _AsyncFileReader:
236 """Adapts a standard file I/O object so that reads are non-blocking."""
237
238 def __init__(self, file):
239 self._file = file
240 self._loop = get_running_loop()
241
242 async def read(self, size=-1):
243 return await self._loop.run_in_executor(None, partial(self._file.read, size))
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/falcon/routing/static.py b/falcon/routing/static.py
--- a/falcon/routing/static.py
+++ b/falcon/routing/static.py
@@ -241,3 +241,6 @@
async def read(self, size=-1):
return await self._loop.run_in_executor(None, partial(self._file.read, size))
+
+ async def close(self):
+ await self._loop.run_in_executor(None, self._file.close)
| {"golden_diff": "diff --git a/falcon/routing/static.py b/falcon/routing/static.py\n--- a/falcon/routing/static.py\n+++ b/falcon/routing/static.py\n@@ -241,3 +241,6 @@\n \n async def read(self, size=-1):\n return await self._loop.run_in_executor(None, partial(self._file.read, size))\n+\n+ async def close(self):\n+ await self._loop.run_in_executor(None, self._file.close)\n", "issue": "StaticRouteAsync leaves open files\nWhen using static routes with a [`falcon.asgi.App`](https://falcon.readthedocs.io/en/stable/api/app.html#asgi-app), it seems that the `_AsyncFileReader` wrapper does not implement any `.close()` method, so files are left open.\r\n\r\nOn CPython, I wasn't able to demonstrate any practical impact of this bug as the file object in question is refcounted to 0 and garbage collected as soon as it goes out of scope. However, that isn't the case when running `uvicorn` on PyPy 3.7, as PyPy uses a different GC implementation.\r\n\r\nTest case in point:\r\n```\r\nimport io\r\nimport logging\r\nimport os.path\r\nimport unittest.mock\r\n\r\nimport falcon.asgi\r\n\r\nlogging.basicConfig(\r\n format='%(asctime)s [%(levelname)s] %(message)s', level=logging.INFO)\r\n\r\n\r\nclass DebugIO(io.BytesIO):\r\n\r\n @classmethod\r\n def open(cls, *args, **kwargs):\r\n return cls(b'Test data!\\n')\r\n\r\n def close(self):\r\n logging.info(f'{self}.close()')\r\n super().close()\r\n\r\n\r\napp = falcon.asgi.App()\r\napp.add_static_route('/files', '/tmp')\r\n\r\ndebug = unittest.mock.patch('io.open', DebugIO.open)\r\ndebug.start()\r\n```\nStaticRouteAsync leaves open files\nWhen using static routes with a [`falcon.asgi.App`](https://falcon.readthedocs.io/en/stable/api/app.html#asgi-app), it seems that the `_AsyncFileReader` wrapper does not implement any `.close()` method, so files are left open.\r\n\r\nOn CPython, I wasn't able to demonstrate any practical impact of this bug as the file object in question is refcounted to 0 and garbage collected as soon as it goes out of scope. However, that isn't the case when running `uvicorn` on PyPy 3.7, as PyPy uses a different GC implementation.\r\n\r\nTest case in point:\r\n```\r\nimport io\r\nimport logging\r\nimport os.path\r\nimport unittest.mock\r\n\r\nimport falcon.asgi\r\n\r\nlogging.basicConfig(\r\n format='%(asctime)s [%(levelname)s] %(message)s', level=logging.INFO)\r\n\r\n\r\nclass DebugIO(io.BytesIO):\r\n\r\n @classmethod\r\n def open(cls, *args, **kwargs):\r\n return cls(b'Test data!\\n')\r\n\r\n def close(self):\r\n logging.info(f'{self}.close()')\r\n super().close()\r\n\r\n\r\napp = falcon.asgi.App()\r\napp.add_static_route('/files', '/tmp')\r\n\r\ndebug = unittest.mock.patch('io.open', DebugIO.open)\r\ndebug.start()\r\n```\n", "before_files": [{"content": "from functools import partial\nimport io\nimport os\nimport pathlib\nimport re\n\nimport falcon\nfrom falcon.util.sync import get_running_loop\n\n\ndef _open_range(file_path, req_range):\n \"\"\"Open a file for a ranged request.\n\n Args:\n file_path (str): Path to the file to open.\n req_range (Optional[Tuple[int, int]]): Request.range value.\n Returns:\n tuple: Three-member tuple of (stream, content-length, content-range).\n If req_range is ``None`` or ignored, content-range will be\n ``None``; otherwise, the stream will be appropriately seeked and\n possibly bounded, and the content-range will be a tuple of\n (start, end, size).\n \"\"\"\n fh = io.open(file_path, 'rb')\n size = os.fstat(fh.fileno()).st_size\n if req_range is None:\n return fh, size, None\n\n start, end = req_range\n if size == 0:\n # NOTE(tipabu): Ignore Range headers for zero-byte files; just serve\n # the empty body since Content-Range can't be used to express a\n # zero-byte body.\n return fh, 0, None\n\n if start < 0 and end == -1:\n # NOTE(tipabu): Special case: only want the last N bytes.\n start = max(start, -size)\n fh.seek(start, os.SEEK_END)\n # NOTE(vytas): Wrap in order to prevent sendfile from being used, as\n # its implementation was found to be buggy in many popular WSGI\n # servers for open files with a non-zero offset.\n return _BoundedFile(fh, -start), -start, (size + start, size - 1, size)\n\n if start >= size:\n fh.close()\n raise falcon.HTTPRangeNotSatisfiable(size)\n\n fh.seek(start)\n if end == -1:\n # NOTE(vytas): Wrap in order to prevent sendfile from being used, as\n # its implementation was found to be buggy in many popular WSGI\n # servers for open files with a non-zero offset.\n length = size - start\n return _BoundedFile(fh, length), length, (start, size - 1, size)\n\n end = min(end, size - 1)\n length = end - start + 1\n return _BoundedFile(fh, length), length, (start, end, size)\n\n\nclass _BoundedFile:\n \"\"\"Wrap a file to only allow part of it to be read.\n\n Args:\n fh: The file object to wrap. Should be opened in binary mode,\n and already seeked to an appropriate position. The object must\n expose a ``.close()`` method.\n length (int): Number of bytes that may be read.\n \"\"\"\n\n def __init__(self, fh, length):\n self.fh = fh\n self.close = fh.close\n self.remaining = length\n\n def read(self, size=-1):\n \"\"\"Read the underlying file object, within the specified bounds.\"\"\"\n if size < 0:\n size = self.remaining\n else:\n size = min(size, self.remaining)\n data = self.fh.read(size)\n self.remaining -= len(data)\n return data\n\n\nclass StaticRoute:\n \"\"\"Represents a static route.\n\n Args:\n prefix (str): The path prefix to match for this route. If the\n path in the requested URI starts with this string, the remainder\n of the path will be appended to the source directory to\n determine the file to serve. This is done in a secure manner\n to prevent an attacker from requesting a file outside the\n specified directory.\n\n Note that static routes are matched in LIFO order, and are only\n attempted after checking dynamic routes and sinks.\n\n directory (Union[str, pathlib.Path]): The source directory from which to\n serve files. Must be an absolute path.\n downloadable (bool): Set to ``True`` to include a\n Content-Disposition header in the response. The \"filename\"\n directive is simply set to the name of the requested file.\n fallback_filename (str): Fallback filename used when the requested file\n is not found. Can be a relative path inside the prefix folder or\n any valid absolute path.\n\n Note:\n If the fallback file is served instead of the requested file,\n the response Content-Type header, as well as the\n Content-Disposition header (provided it was requested with the\n `downloadable` parameter described above), are derived from the\n fallback filename, as opposed to the requested filename.\n \"\"\"\n\n # NOTE(kgriffs): Don't allow control characters and reserved chars\n _DISALLOWED_CHARS_PATTERN = re.compile('[\\x00-\\x1f\\x80-\\x9f\\ufffd~?<>:*|\\'\"]')\n\n # NOTE(kgriffs): If somehow an executable code exploit is triggerable, this\n # minimizes how much can be included in the payload.\n _MAX_NON_PREFIXED_LEN = 512\n\n def __init__(self, prefix, directory, downloadable=False, fallback_filename=None):\n if not prefix.startswith('/'):\n raise ValueError(\"prefix must start with '/'\")\n\n # TODO(vgerak): Remove the check when py3.5 is dropped.\n if isinstance(directory, pathlib.Path):\n directory = str(directory)\n\n self._directory = os.path.normpath(directory)\n if not os.path.isabs(self._directory):\n raise ValueError('directory must be an absolute path')\n\n if fallback_filename is None:\n self._fallback_filename = None\n else:\n self._fallback_filename = os.path.normpath(\n os.path.join(self._directory, fallback_filename)\n )\n if not os.path.isfile(self._fallback_filename):\n raise ValueError('fallback_filename is not a file')\n\n # NOTE(kgriffs): Ensure it ends with a path separator to ensure\n # we only match on the complete segment. Don't raise an error\n # because most people won't expect to have to append a slash.\n if not prefix.endswith('/'):\n prefix += '/'\n\n self._prefix = prefix\n self._downloadable = downloadable\n\n def match(self, path):\n \"\"\"Check whether the given path matches this route.\"\"\"\n if self._fallback_filename is None:\n return path.startswith(self._prefix)\n return path.startswith(self._prefix) or path == self._prefix[:-1]\n\n def __call__(self, req, resp):\n \"\"\"Resource responder for this route.\"\"\"\n\n without_prefix = req.path[len(self._prefix) :]\n\n # NOTE(kgriffs): Check surrounding whitespace and strip trailing\n # periods, which are illegal on windows\n # NOTE(CaselIT): An empty filename is allowed when fallback_filename is provided\n if (\n not (without_prefix or self._fallback_filename is not None)\n or without_prefix.strip().rstrip('.') != without_prefix\n or self._DISALLOWED_CHARS_PATTERN.search(without_prefix)\n or '\\\\' in without_prefix\n or '//' in without_prefix\n or len(without_prefix) > self._MAX_NON_PREFIXED_LEN\n ):\n\n raise falcon.HTTPNotFound()\n\n normalized = os.path.normpath(without_prefix)\n\n if normalized.startswith('../') or normalized.startswith('/'):\n raise falcon.HTTPNotFound()\n\n file_path = os.path.join(self._directory, normalized)\n\n # NOTE(kgriffs): Final sanity-check just to be safe. This check\n # should never succeed, but this should guard against us having\n # overlooked something.\n if '..' in file_path or not file_path.startswith(self._directory):\n raise falcon.HTTPNotFound()\n\n req_range = req.range\n if req.range_unit != 'bytes':\n req_range = None\n try:\n stream, length, content_range = _open_range(file_path, req_range)\n resp.set_stream(stream, length)\n except IOError:\n if self._fallback_filename is None:\n raise falcon.HTTPNotFound()\n try:\n stream, length, content_range = _open_range(\n self._fallback_filename, req_range\n )\n resp.set_stream(stream, length)\n file_path = self._fallback_filename\n except IOError:\n raise falcon.HTTPNotFound()\n\n suffix = os.path.splitext(file_path)[1]\n resp.content_type = resp.options.static_media_types.get(\n suffix, 'application/octet-stream'\n )\n resp.accept_ranges = 'bytes'\n\n if self._downloadable:\n resp.downloadable_as = os.path.basename(file_path)\n if content_range:\n resp.status = falcon.HTTP_206\n resp.content_range = content_range\n\n\nclass StaticRouteAsync(StaticRoute):\n \"\"\"Subclass of StaticRoute with modifications to support ASGI apps.\"\"\"\n\n async def __call__(self, req, resp):\n super().__call__(req, resp)\n\n # NOTE(kgriffs): Fixup resp.stream so that it is non-blocking\n resp.stream = _AsyncFileReader(resp.stream)\n\n\nclass _AsyncFileReader:\n \"\"\"Adapts a standard file I/O object so that reads are non-blocking.\"\"\"\n\n def __init__(self, file):\n self._file = file\n self._loop = get_running_loop()\n\n async def read(self, size=-1):\n return await self._loop.run_in_executor(None, partial(self._file.read, size))\n", "path": "falcon/routing/static.py"}], "after_files": [{"content": "from functools import partial\nimport io\nimport os\nimport pathlib\nimport re\n\nimport falcon\nfrom falcon.util.sync import get_running_loop\n\n\ndef _open_range(file_path, req_range):\n \"\"\"Open a file for a ranged request.\n\n Args:\n file_path (str): Path to the file to open.\n req_range (Optional[Tuple[int, int]]): Request.range value.\n Returns:\n tuple: Three-member tuple of (stream, content-length, content-range).\n If req_range is ``None`` or ignored, content-range will be\n ``None``; otherwise, the stream will be appropriately seeked and\n possibly bounded, and the content-range will be a tuple of\n (start, end, size).\n \"\"\"\n fh = io.open(file_path, 'rb')\n size = os.fstat(fh.fileno()).st_size\n if req_range is None:\n return fh, size, None\n\n start, end = req_range\n if size == 0:\n # NOTE(tipabu): Ignore Range headers for zero-byte files; just serve\n # the empty body since Content-Range can't be used to express a\n # zero-byte body.\n return fh, 0, None\n\n if start < 0 and end == -1:\n # NOTE(tipabu): Special case: only want the last N bytes.\n start = max(start, -size)\n fh.seek(start, os.SEEK_END)\n # NOTE(vytas): Wrap in order to prevent sendfile from being used, as\n # its implementation was found to be buggy in many popular WSGI\n # servers for open files with a non-zero offset.\n return _BoundedFile(fh, -start), -start, (size + start, size - 1, size)\n\n if start >= size:\n fh.close()\n raise falcon.HTTPRangeNotSatisfiable(size)\n\n fh.seek(start)\n if end == -1:\n # NOTE(vytas): Wrap in order to prevent sendfile from being used, as\n # its implementation was found to be buggy in many popular WSGI\n # servers for open files with a non-zero offset.\n length = size - start\n return _BoundedFile(fh, length), length, (start, size - 1, size)\n\n end = min(end, size - 1)\n length = end - start + 1\n return _BoundedFile(fh, length), length, (start, end, size)\n\n\nclass _BoundedFile:\n \"\"\"Wrap a file to only allow part of it to be read.\n\n Args:\n fh: The file object to wrap. Should be opened in binary mode,\n and already seeked to an appropriate position. The object must\n expose a ``.close()`` method.\n length (int): Number of bytes that may be read.\n \"\"\"\n\n def __init__(self, fh, length):\n self.fh = fh\n self.close = fh.close\n self.remaining = length\n\n def read(self, size=-1):\n \"\"\"Read the underlying file object, within the specified bounds.\"\"\"\n if size < 0:\n size = self.remaining\n else:\n size = min(size, self.remaining)\n data = self.fh.read(size)\n self.remaining -= len(data)\n return data\n\n\nclass StaticRoute:\n \"\"\"Represents a static route.\n\n Args:\n prefix (str): The path prefix to match for this route. If the\n path in the requested URI starts with this string, the remainder\n of the path will be appended to the source directory to\n determine the file to serve. This is done in a secure manner\n to prevent an attacker from requesting a file outside the\n specified directory.\n\n Note that static routes are matched in LIFO order, and are only\n attempted after checking dynamic routes and sinks.\n\n directory (Union[str, pathlib.Path]): The source directory from which to\n serve files. Must be an absolute path.\n downloadable (bool): Set to ``True`` to include a\n Content-Disposition header in the response. The \"filename\"\n directive is simply set to the name of the requested file.\n fallback_filename (str): Fallback filename used when the requested file\n is not found. Can be a relative path inside the prefix folder or\n any valid absolute path.\n\n Note:\n If the fallback file is served instead of the requested file,\n the response Content-Type header, as well as the\n Content-Disposition header (provided it was requested with the\n `downloadable` parameter described above), are derived from the\n fallback filename, as opposed to the requested filename.\n \"\"\"\n\n # NOTE(kgriffs): Don't allow control characters and reserved chars\n _DISALLOWED_CHARS_PATTERN = re.compile('[\\x00-\\x1f\\x80-\\x9f\\ufffd~?<>:*|\\'\"]')\n\n # NOTE(kgriffs): If somehow an executable code exploit is triggerable, this\n # minimizes how much can be included in the payload.\n _MAX_NON_PREFIXED_LEN = 512\n\n def __init__(self, prefix, directory, downloadable=False, fallback_filename=None):\n if not prefix.startswith('/'):\n raise ValueError(\"prefix must start with '/'\")\n\n # TODO(vgerak): Remove the check when py3.5 is dropped.\n if isinstance(directory, pathlib.Path):\n directory = str(directory)\n\n self._directory = os.path.normpath(directory)\n if not os.path.isabs(self._directory):\n raise ValueError('directory must be an absolute path')\n\n if fallback_filename is None:\n self._fallback_filename = None\n else:\n self._fallback_filename = os.path.normpath(\n os.path.join(self._directory, fallback_filename)\n )\n if not os.path.isfile(self._fallback_filename):\n raise ValueError('fallback_filename is not a file')\n\n # NOTE(kgriffs): Ensure it ends with a path separator to ensure\n # we only match on the complete segment. Don't raise an error\n # because most people won't expect to have to append a slash.\n if not prefix.endswith('/'):\n prefix += '/'\n\n self._prefix = prefix\n self._downloadable = downloadable\n\n def match(self, path):\n \"\"\"Check whether the given path matches this route.\"\"\"\n if self._fallback_filename is None:\n return path.startswith(self._prefix)\n return path.startswith(self._prefix) or path == self._prefix[:-1]\n\n def __call__(self, req, resp):\n \"\"\"Resource responder for this route.\"\"\"\n\n without_prefix = req.path[len(self._prefix) :]\n\n # NOTE(kgriffs): Check surrounding whitespace and strip trailing\n # periods, which are illegal on windows\n # NOTE(CaselIT): An empty filename is allowed when fallback_filename is provided\n if (\n not (without_prefix or self._fallback_filename is not None)\n or without_prefix.strip().rstrip('.') != without_prefix\n or self._DISALLOWED_CHARS_PATTERN.search(without_prefix)\n or '\\\\' in without_prefix\n or '//' in without_prefix\n or len(without_prefix) > self._MAX_NON_PREFIXED_LEN\n ):\n\n raise falcon.HTTPNotFound()\n\n normalized = os.path.normpath(without_prefix)\n\n if normalized.startswith('../') or normalized.startswith('/'):\n raise falcon.HTTPNotFound()\n\n file_path = os.path.join(self._directory, normalized)\n\n # NOTE(kgriffs): Final sanity-check just to be safe. This check\n # should never succeed, but this should guard against us having\n # overlooked something.\n if '..' in file_path or not file_path.startswith(self._directory):\n raise falcon.HTTPNotFound()\n\n req_range = req.range\n if req.range_unit != 'bytes':\n req_range = None\n try:\n stream, length, content_range = _open_range(file_path, req_range)\n resp.set_stream(stream, length)\n except IOError:\n if self._fallback_filename is None:\n raise falcon.HTTPNotFound()\n try:\n stream, length, content_range = _open_range(\n self._fallback_filename, req_range\n )\n resp.set_stream(stream, length)\n file_path = self._fallback_filename\n except IOError:\n raise falcon.HTTPNotFound()\n\n suffix = os.path.splitext(file_path)[1]\n resp.content_type = resp.options.static_media_types.get(\n suffix, 'application/octet-stream'\n )\n resp.accept_ranges = 'bytes'\n\n if self._downloadable:\n resp.downloadable_as = os.path.basename(file_path)\n if content_range:\n resp.status = falcon.HTTP_206\n resp.content_range = content_range\n\n\nclass StaticRouteAsync(StaticRoute):\n \"\"\"Subclass of StaticRoute with modifications to support ASGI apps.\"\"\"\n\n async def __call__(self, req, resp):\n super().__call__(req, resp)\n\n # NOTE(kgriffs): Fixup resp.stream so that it is non-blocking\n resp.stream = _AsyncFileReader(resp.stream)\n\n\nclass _AsyncFileReader:\n \"\"\"Adapts a standard file I/O object so that reads are non-blocking.\"\"\"\n\n def __init__(self, file):\n self._file = file\n self._loop = get_running_loop()\n\n async def read(self, size=-1):\n return await self._loop.run_in_executor(None, partial(self._file.read, size))\n\n async def close(self):\n await self._loop.run_in_executor(None, self._file.close)\n", "path": "falcon/routing/static.py"}]} | 3,485 | 104 |
gh_patches_debug_11952 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-517 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Core agent update to 1.2.9
We've had a few bug fixes and improvements with the CA. Please bump to 1.2.9 in python CA manager.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/scout_apm/core/config.py`
Content:
```
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import logging
5 import os
6 import warnings
7
8 from scout_apm.compat import string_type
9 from scout_apm.core import platform_detection
10
11 logger = logging.getLogger(__name__)
12
13
14 class ScoutConfig(object):
15 """
16 Configuration object for the ScoutApm agent.
17
18 Contains a list of configuration "layers". When a configuration key is
19 looked up, each layer is asked in turn if it knows the value. The first one
20 to answer affirmatively returns the value.
21 """
22
23 def __init__(self):
24 self.layers = [
25 Env(),
26 Python(),
27 Derived(self),
28 Defaults(),
29 Null(),
30 ]
31
32 def value(self, key):
33 value = self.locate_layer_for_key(key).value(key)
34 if key in CONVERSIONS:
35 return CONVERSIONS[key](value)
36 return value
37
38 def locate_layer_for_key(self, key):
39 for layer in self.layers:
40 if layer.has_config(key):
41 return layer
42
43 # Should be unreachable because Null returns None for all keys.
44 raise ValueError("key {!r} not found in any layer".format(key))
45
46 def log(self):
47 logger.debug("Configuration Loaded:")
48 for key in self.known_keys():
49 layer = self.locate_layer_for_key(key)
50 logger.debug(
51 "%-9s: %s = %s", layer.__class__.__name__, key, layer.value(key)
52 )
53
54 def known_keys(self):
55 return [
56 "app_server",
57 "application_root",
58 "core_agent_config_file",
59 "core_agent_dir",
60 "core_agent_download",
61 "core_agent_launch",
62 "core_agent_log_file",
63 "core_agent_log_level",
64 "core_agent_permissions",
65 "core_agent_socket_path",
66 "core_agent_version",
67 "disabled_instruments",
68 "download_url",
69 "framework",
70 "framework_version",
71 "hostname",
72 "ignore",
73 "key",
74 "log_level",
75 "monitor",
76 "name",
77 "revision_sha",
78 "scm_subdirectory",
79 "shutdown_message_enabled",
80 "shutdown_timeout_seconds",
81 ]
82
83 def core_agent_permissions(self):
84 try:
85 return int(str(self.value("core_agent_permissions")), 8)
86 except ValueError:
87 logger.exception(
88 "Invalid core_agent_permissions value, using default of 0o700"
89 )
90 return 0o700
91
92 @classmethod
93 def set(cls, **kwargs):
94 """
95 Sets a configuration value for the Scout agent. Values set here will
96 not override values set in ENV.
97 """
98 for key, value in kwargs.items():
99 SCOUT_PYTHON_VALUES[key] = value
100
101 @classmethod
102 def unset(cls, *keys):
103 """
104 Removes a configuration value for the Scout agent.
105 """
106 for key in keys:
107 SCOUT_PYTHON_VALUES.pop(key, None)
108
109 @classmethod
110 def reset_all(cls):
111 """
112 Remove all configuration settings set via `ScoutConfig.set(...)`.
113
114 This is meant for use in testing.
115 """
116 SCOUT_PYTHON_VALUES.clear()
117
118
119 # Module-level data, the ScoutConfig.set(key="value") adds to this
120 SCOUT_PYTHON_VALUES = {}
121
122
123 class Python(object):
124 """
125 A configuration overlay that lets other parts of python set values.
126 """
127
128 def has_config(self, key):
129 return key in SCOUT_PYTHON_VALUES
130
131 def value(self, key):
132 return SCOUT_PYTHON_VALUES[key]
133
134
135 class Env(object):
136 """
137 Reads configuration from environment by prefixing the key
138 requested with "SCOUT_"
139
140 Example: the `key` config looks for SCOUT_KEY
141 environment variable
142 """
143
144 def has_config(self, key):
145 env_key = self.modify_key(key)
146 return env_key in os.environ
147
148 def value(self, key):
149 env_key = self.modify_key(key)
150 return os.environ[env_key]
151
152 def modify_key(self, key):
153 env_key = ("SCOUT_" + key).upper()
154 return env_key
155
156
157 class Derived(object):
158 """
159 A configuration overlay that calculates from other values.
160 """
161
162 def __init__(self, config):
163 """
164 config argument is the overall ScoutConfig var, so we can lookup the
165 components of the derived info.
166 """
167 self.config = config
168
169 def has_config(self, key):
170 return self.lookup_func(key) is not None
171
172 def value(self, key):
173 return self.lookup_func(key)()
174
175 def lookup_func(self, key):
176 """
177 Returns the derive_#{key} function, or None if it isn't defined
178 """
179 func_name = "derive_" + key
180 return getattr(self, func_name, None)
181
182 def derive_core_agent_socket_path(self):
183 return "{}/{}/scout-agent.sock".format(
184 self.config.value("core_agent_dir"),
185 self.config.value("core_agent_full_name"),
186 )
187
188 def derive_core_agent_full_name(self):
189 triple = self.config.value("core_agent_triple")
190 if not platform_detection.is_valid_triple(triple):
191 warnings.warn("Invalid value for core_agent_triple: {}".format(triple))
192 return "{name}-{version}-{triple}".format(
193 name="scout_apm_core",
194 version=self.config.value("core_agent_version"),
195 triple=triple,
196 )
197
198 def derive_core_agent_triple(self):
199 return platform_detection.get_triple()
200
201
202 class Defaults(object):
203 """
204 Provides default values for important configurations
205 """
206
207 def __init__(self):
208 self.defaults = {
209 "app_server": "",
210 "application_root": "",
211 "core_agent_dir": "/tmp/scout_apm_core",
212 "core_agent_download": True,
213 "core_agent_launch": True,
214 "core_agent_log_level": "info",
215 "core_agent_permissions": 700,
216 "core_agent_version": "v1.2.6", # can be an exact tag name, or 'latest'
217 "disabled_instruments": [],
218 "download_url": "https://s3-us-west-1.amazonaws.com/scout-public-downloads/apm_core_agent/release", # noqa: E501
219 "framework": "",
220 "framework_version": "",
221 "hostname": None,
222 "key": "",
223 "monitor": False,
224 "name": "Python App",
225 "revision_sha": self._git_revision_sha(),
226 "scm_subdirectory": "",
227 "shutdown_message_enabled": True,
228 "shutdown_timeout_seconds": 2.0,
229 "uri_reporting": "filtered_params",
230 }
231
232 def _git_revision_sha(self):
233 # N.B. The environment variable SCOUT_REVISION_SHA may also be used,
234 # but that will be picked up by Env
235 return os.environ.get("HEROKU_SLUG_COMMIT", "")
236
237 def has_config(self, key):
238 return key in self.defaults
239
240 def value(self, key):
241 return self.defaults[key]
242
243
244 class Null(object):
245 """
246 Always answers that a key is present, but the value is None
247
248 Used as the last step of the layered configuration.
249 """
250
251 def has_config(self, key):
252 return True
253
254 def value(self, key):
255 return None
256
257
258 def convert_to_bool(value):
259 if isinstance(value, bool):
260 return value
261 if isinstance(value, string_type):
262 return value.lower() in ("yes", "true", "t", "1")
263 # Unknown type - default to false?
264 return False
265
266
267 def convert_to_float(value):
268 try:
269 return float(value)
270 except ValueError:
271 return 0.0
272
273
274 def convert_to_list(value):
275 if isinstance(value, list):
276 return value
277 if isinstance(value, tuple):
278 return list(value)
279 if isinstance(value, string_type):
280 # Split on commas
281 return [item.strip() for item in value.split(",") if item]
282 # Unknown type - default to empty?
283 return []
284
285
286 CONVERSIONS = {
287 "core_agent_download": convert_to_bool,
288 "core_agent_launch": convert_to_bool,
289 "disabled_instruments": convert_to_list,
290 "ignore": convert_to_list,
291 "monitor": convert_to_bool,
292 "shutdown_message_enabled": convert_to_bool,
293 "shutdown_timeout_seconds": convert_to_float,
294 }
295
296
297 scout_config = ScoutConfig()
298
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/scout_apm/core/config.py b/src/scout_apm/core/config.py
--- a/src/scout_apm/core/config.py
+++ b/src/scout_apm/core/config.py
@@ -213,7 +213,7 @@
"core_agent_launch": True,
"core_agent_log_level": "info",
"core_agent_permissions": 700,
- "core_agent_version": "v1.2.6", # can be an exact tag name, or 'latest'
+ "core_agent_version": "v1.2.9", # can be an exact tag name, or 'latest'
"disabled_instruments": [],
"download_url": "https://s3-us-west-1.amazonaws.com/scout-public-downloads/apm_core_agent/release", # noqa: E501
"framework": "",
| {"golden_diff": "diff --git a/src/scout_apm/core/config.py b/src/scout_apm/core/config.py\n--- a/src/scout_apm/core/config.py\n+++ b/src/scout_apm/core/config.py\n@@ -213,7 +213,7 @@\n \"core_agent_launch\": True,\n \"core_agent_log_level\": \"info\",\n \"core_agent_permissions\": 700,\n- \"core_agent_version\": \"v1.2.6\", # can be an exact tag name, or 'latest'\n+ \"core_agent_version\": \"v1.2.9\", # can be an exact tag name, or 'latest'\n \"disabled_instruments\": [],\n \"download_url\": \"https://s3-us-west-1.amazonaws.com/scout-public-downloads/apm_core_agent/release\", # noqa: E501\n \"framework\": \"\",\n", "issue": "Core agent update to 1.2.9\nWe've had a few bug fixes and improvements with the CA. Please bump to 1.2.9 in python CA manager.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport os\nimport warnings\n\nfrom scout_apm.compat import string_type\nfrom scout_apm.core import platform_detection\n\nlogger = logging.getLogger(__name__)\n\n\nclass ScoutConfig(object):\n \"\"\"\n Configuration object for the ScoutApm agent.\n\n Contains a list of configuration \"layers\". When a configuration key is\n looked up, each layer is asked in turn if it knows the value. The first one\n to answer affirmatively returns the value.\n \"\"\"\n\n def __init__(self):\n self.layers = [\n Env(),\n Python(),\n Derived(self),\n Defaults(),\n Null(),\n ]\n\n def value(self, key):\n value = self.locate_layer_for_key(key).value(key)\n if key in CONVERSIONS:\n return CONVERSIONS[key](value)\n return value\n\n def locate_layer_for_key(self, key):\n for layer in self.layers:\n if layer.has_config(key):\n return layer\n\n # Should be unreachable because Null returns None for all keys.\n raise ValueError(\"key {!r} not found in any layer\".format(key))\n\n def log(self):\n logger.debug(\"Configuration Loaded:\")\n for key in self.known_keys():\n layer = self.locate_layer_for_key(key)\n logger.debug(\n \"%-9s: %s = %s\", layer.__class__.__name__, key, layer.value(key)\n )\n\n def known_keys(self):\n return [\n \"app_server\",\n \"application_root\",\n \"core_agent_config_file\",\n \"core_agent_dir\",\n \"core_agent_download\",\n \"core_agent_launch\",\n \"core_agent_log_file\",\n \"core_agent_log_level\",\n \"core_agent_permissions\",\n \"core_agent_socket_path\",\n \"core_agent_version\",\n \"disabled_instruments\",\n \"download_url\",\n \"framework\",\n \"framework_version\",\n \"hostname\",\n \"ignore\",\n \"key\",\n \"log_level\",\n \"monitor\",\n \"name\",\n \"revision_sha\",\n \"scm_subdirectory\",\n \"shutdown_message_enabled\",\n \"shutdown_timeout_seconds\",\n ]\n\n def core_agent_permissions(self):\n try:\n return int(str(self.value(\"core_agent_permissions\")), 8)\n except ValueError:\n logger.exception(\n \"Invalid core_agent_permissions value, using default of 0o700\"\n )\n return 0o700\n\n @classmethod\n def set(cls, **kwargs):\n \"\"\"\n Sets a configuration value for the Scout agent. Values set here will\n not override values set in ENV.\n \"\"\"\n for key, value in kwargs.items():\n SCOUT_PYTHON_VALUES[key] = value\n\n @classmethod\n def unset(cls, *keys):\n \"\"\"\n Removes a configuration value for the Scout agent.\n \"\"\"\n for key in keys:\n SCOUT_PYTHON_VALUES.pop(key, None)\n\n @classmethod\n def reset_all(cls):\n \"\"\"\n Remove all configuration settings set via `ScoutConfig.set(...)`.\n\n This is meant for use in testing.\n \"\"\"\n SCOUT_PYTHON_VALUES.clear()\n\n\n# Module-level data, the ScoutConfig.set(key=\"value\") adds to this\nSCOUT_PYTHON_VALUES = {}\n\n\nclass Python(object):\n \"\"\"\n A configuration overlay that lets other parts of python set values.\n \"\"\"\n\n def has_config(self, key):\n return key in SCOUT_PYTHON_VALUES\n\n def value(self, key):\n return SCOUT_PYTHON_VALUES[key]\n\n\nclass Env(object):\n \"\"\"\n Reads configuration from environment by prefixing the key\n requested with \"SCOUT_\"\n\n Example: the `key` config looks for SCOUT_KEY\n environment variable\n \"\"\"\n\n def has_config(self, key):\n env_key = self.modify_key(key)\n return env_key in os.environ\n\n def value(self, key):\n env_key = self.modify_key(key)\n return os.environ[env_key]\n\n def modify_key(self, key):\n env_key = (\"SCOUT_\" + key).upper()\n return env_key\n\n\nclass Derived(object):\n \"\"\"\n A configuration overlay that calculates from other values.\n \"\"\"\n\n def __init__(self, config):\n \"\"\"\n config argument is the overall ScoutConfig var, so we can lookup the\n components of the derived info.\n \"\"\"\n self.config = config\n\n def has_config(self, key):\n return self.lookup_func(key) is not None\n\n def value(self, key):\n return self.lookup_func(key)()\n\n def lookup_func(self, key):\n \"\"\"\n Returns the derive_#{key} function, or None if it isn't defined\n \"\"\"\n func_name = \"derive_\" + key\n return getattr(self, func_name, None)\n\n def derive_core_agent_socket_path(self):\n return \"{}/{}/scout-agent.sock\".format(\n self.config.value(\"core_agent_dir\"),\n self.config.value(\"core_agent_full_name\"),\n )\n\n def derive_core_agent_full_name(self):\n triple = self.config.value(\"core_agent_triple\")\n if not platform_detection.is_valid_triple(triple):\n warnings.warn(\"Invalid value for core_agent_triple: {}\".format(triple))\n return \"{name}-{version}-{triple}\".format(\n name=\"scout_apm_core\",\n version=self.config.value(\"core_agent_version\"),\n triple=triple,\n )\n\n def derive_core_agent_triple(self):\n return platform_detection.get_triple()\n\n\nclass Defaults(object):\n \"\"\"\n Provides default values for important configurations\n \"\"\"\n\n def __init__(self):\n self.defaults = {\n \"app_server\": \"\",\n \"application_root\": \"\",\n \"core_agent_dir\": \"/tmp/scout_apm_core\",\n \"core_agent_download\": True,\n \"core_agent_launch\": True,\n \"core_agent_log_level\": \"info\",\n \"core_agent_permissions\": 700,\n \"core_agent_version\": \"v1.2.6\", # can be an exact tag name, or 'latest'\n \"disabled_instruments\": [],\n \"download_url\": \"https://s3-us-west-1.amazonaws.com/scout-public-downloads/apm_core_agent/release\", # noqa: E501\n \"framework\": \"\",\n \"framework_version\": \"\",\n \"hostname\": None,\n \"key\": \"\",\n \"monitor\": False,\n \"name\": \"Python App\",\n \"revision_sha\": self._git_revision_sha(),\n \"scm_subdirectory\": \"\",\n \"shutdown_message_enabled\": True,\n \"shutdown_timeout_seconds\": 2.0,\n \"uri_reporting\": \"filtered_params\",\n }\n\n def _git_revision_sha(self):\n # N.B. The environment variable SCOUT_REVISION_SHA may also be used,\n # but that will be picked up by Env\n return os.environ.get(\"HEROKU_SLUG_COMMIT\", \"\")\n\n def has_config(self, key):\n return key in self.defaults\n\n def value(self, key):\n return self.defaults[key]\n\n\nclass Null(object):\n \"\"\"\n Always answers that a key is present, but the value is None\n\n Used as the last step of the layered configuration.\n \"\"\"\n\n def has_config(self, key):\n return True\n\n def value(self, key):\n return None\n\n\ndef convert_to_bool(value):\n if isinstance(value, bool):\n return value\n if isinstance(value, string_type):\n return value.lower() in (\"yes\", \"true\", \"t\", \"1\")\n # Unknown type - default to false?\n return False\n\n\ndef convert_to_float(value):\n try:\n return float(value)\n except ValueError:\n return 0.0\n\n\ndef convert_to_list(value):\n if isinstance(value, list):\n return value\n if isinstance(value, tuple):\n return list(value)\n if isinstance(value, string_type):\n # Split on commas\n return [item.strip() for item in value.split(\",\") if item]\n # Unknown type - default to empty?\n return []\n\n\nCONVERSIONS = {\n \"core_agent_download\": convert_to_bool,\n \"core_agent_launch\": convert_to_bool,\n \"disabled_instruments\": convert_to_list,\n \"ignore\": convert_to_list,\n \"monitor\": convert_to_bool,\n \"shutdown_message_enabled\": convert_to_bool,\n \"shutdown_timeout_seconds\": convert_to_float,\n}\n\n\nscout_config = ScoutConfig()\n", "path": "src/scout_apm/core/config.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport os\nimport warnings\n\nfrom scout_apm.compat import string_type\nfrom scout_apm.core import platform_detection\n\nlogger = logging.getLogger(__name__)\n\n\nclass ScoutConfig(object):\n \"\"\"\n Configuration object for the ScoutApm agent.\n\n Contains a list of configuration \"layers\". When a configuration key is\n looked up, each layer is asked in turn if it knows the value. The first one\n to answer affirmatively returns the value.\n \"\"\"\n\n def __init__(self):\n self.layers = [\n Env(),\n Python(),\n Derived(self),\n Defaults(),\n Null(),\n ]\n\n def value(self, key):\n value = self.locate_layer_for_key(key).value(key)\n if key in CONVERSIONS:\n return CONVERSIONS[key](value)\n return value\n\n def locate_layer_for_key(self, key):\n for layer in self.layers:\n if layer.has_config(key):\n return layer\n\n # Should be unreachable because Null returns None for all keys.\n raise ValueError(\"key {!r} not found in any layer\".format(key))\n\n def log(self):\n logger.debug(\"Configuration Loaded:\")\n for key in self.known_keys():\n layer = self.locate_layer_for_key(key)\n logger.debug(\n \"%-9s: %s = %s\", layer.__class__.__name__, key, layer.value(key)\n )\n\n def known_keys(self):\n return [\n \"app_server\",\n \"application_root\",\n \"core_agent_config_file\",\n \"core_agent_dir\",\n \"core_agent_download\",\n \"core_agent_launch\",\n \"core_agent_log_file\",\n \"core_agent_log_level\",\n \"core_agent_permissions\",\n \"core_agent_socket_path\",\n \"core_agent_version\",\n \"disabled_instruments\",\n \"download_url\",\n \"framework\",\n \"framework_version\",\n \"hostname\",\n \"ignore\",\n \"key\",\n \"log_level\",\n \"monitor\",\n \"name\",\n \"revision_sha\",\n \"scm_subdirectory\",\n \"shutdown_message_enabled\",\n \"shutdown_timeout_seconds\",\n ]\n\n def core_agent_permissions(self):\n try:\n return int(str(self.value(\"core_agent_permissions\")), 8)\n except ValueError:\n logger.exception(\n \"Invalid core_agent_permissions value, using default of 0o700\"\n )\n return 0o700\n\n @classmethod\n def set(cls, **kwargs):\n \"\"\"\n Sets a configuration value for the Scout agent. Values set here will\n not override values set in ENV.\n \"\"\"\n for key, value in kwargs.items():\n SCOUT_PYTHON_VALUES[key] = value\n\n @classmethod\n def unset(cls, *keys):\n \"\"\"\n Removes a configuration value for the Scout agent.\n \"\"\"\n for key in keys:\n SCOUT_PYTHON_VALUES.pop(key, None)\n\n @classmethod\n def reset_all(cls):\n \"\"\"\n Remove all configuration settings set via `ScoutConfig.set(...)`.\n\n This is meant for use in testing.\n \"\"\"\n SCOUT_PYTHON_VALUES.clear()\n\n\n# Module-level data, the ScoutConfig.set(key=\"value\") adds to this\nSCOUT_PYTHON_VALUES = {}\n\n\nclass Python(object):\n \"\"\"\n A configuration overlay that lets other parts of python set values.\n \"\"\"\n\n def has_config(self, key):\n return key in SCOUT_PYTHON_VALUES\n\n def value(self, key):\n return SCOUT_PYTHON_VALUES[key]\n\n\nclass Env(object):\n \"\"\"\n Reads configuration from environment by prefixing the key\n requested with \"SCOUT_\"\n\n Example: the `key` config looks for SCOUT_KEY\n environment variable\n \"\"\"\n\n def has_config(self, key):\n env_key = self.modify_key(key)\n return env_key in os.environ\n\n def value(self, key):\n env_key = self.modify_key(key)\n return os.environ[env_key]\n\n def modify_key(self, key):\n env_key = (\"SCOUT_\" + key).upper()\n return env_key\n\n\nclass Derived(object):\n \"\"\"\n A configuration overlay that calculates from other values.\n \"\"\"\n\n def __init__(self, config):\n \"\"\"\n config argument is the overall ScoutConfig var, so we can lookup the\n components of the derived info.\n \"\"\"\n self.config = config\n\n def has_config(self, key):\n return self.lookup_func(key) is not None\n\n def value(self, key):\n return self.lookup_func(key)()\n\n def lookup_func(self, key):\n \"\"\"\n Returns the derive_#{key} function, or None if it isn't defined\n \"\"\"\n func_name = \"derive_\" + key\n return getattr(self, func_name, None)\n\n def derive_core_agent_socket_path(self):\n return \"{}/{}/scout-agent.sock\".format(\n self.config.value(\"core_agent_dir\"),\n self.config.value(\"core_agent_full_name\"),\n )\n\n def derive_core_agent_full_name(self):\n triple = self.config.value(\"core_agent_triple\")\n if not platform_detection.is_valid_triple(triple):\n warnings.warn(\"Invalid value for core_agent_triple: {}\".format(triple))\n return \"{name}-{version}-{triple}\".format(\n name=\"scout_apm_core\",\n version=self.config.value(\"core_agent_version\"),\n triple=triple,\n )\n\n def derive_core_agent_triple(self):\n return platform_detection.get_triple()\n\n\nclass Defaults(object):\n \"\"\"\n Provides default values for important configurations\n \"\"\"\n\n def __init__(self):\n self.defaults = {\n \"app_server\": \"\",\n \"application_root\": \"\",\n \"core_agent_dir\": \"/tmp/scout_apm_core\",\n \"core_agent_download\": True,\n \"core_agent_launch\": True,\n \"core_agent_log_level\": \"info\",\n \"core_agent_permissions\": 700,\n \"core_agent_version\": \"v1.2.9\", # can be an exact tag name, or 'latest'\n \"disabled_instruments\": [],\n \"download_url\": \"https://s3-us-west-1.amazonaws.com/scout-public-downloads/apm_core_agent/release\", # noqa: E501\n \"framework\": \"\",\n \"framework_version\": \"\",\n \"hostname\": None,\n \"key\": \"\",\n \"monitor\": False,\n \"name\": \"Python App\",\n \"revision_sha\": self._git_revision_sha(),\n \"scm_subdirectory\": \"\",\n \"shutdown_message_enabled\": True,\n \"shutdown_timeout_seconds\": 2.0,\n \"uri_reporting\": \"filtered_params\",\n }\n\n def _git_revision_sha(self):\n # N.B. The environment variable SCOUT_REVISION_SHA may also be used,\n # but that will be picked up by Env\n return os.environ.get(\"HEROKU_SLUG_COMMIT\", \"\")\n\n def has_config(self, key):\n return key in self.defaults\n\n def value(self, key):\n return self.defaults[key]\n\n\nclass Null(object):\n \"\"\"\n Always answers that a key is present, but the value is None\n\n Used as the last step of the layered configuration.\n \"\"\"\n\n def has_config(self, key):\n return True\n\n def value(self, key):\n return None\n\n\ndef convert_to_bool(value):\n if isinstance(value, bool):\n return value\n if isinstance(value, string_type):\n return value.lower() in (\"yes\", \"true\", \"t\", \"1\")\n # Unknown type - default to false?\n return False\n\n\ndef convert_to_float(value):\n try:\n return float(value)\n except ValueError:\n return 0.0\n\n\ndef convert_to_list(value):\n if isinstance(value, list):\n return value\n if isinstance(value, tuple):\n return list(value)\n if isinstance(value, string_type):\n # Split on commas\n return [item.strip() for item in value.split(\",\") if item]\n # Unknown type - default to empty?\n return []\n\n\nCONVERSIONS = {\n \"core_agent_download\": convert_to_bool,\n \"core_agent_launch\": convert_to_bool,\n \"disabled_instruments\": convert_to_list,\n \"ignore\": convert_to_list,\n \"monitor\": convert_to_bool,\n \"shutdown_message_enabled\": convert_to_bool,\n \"shutdown_timeout_seconds\": convert_to_float,\n}\n\n\nscout_config = ScoutConfig()\n", "path": "src/scout_apm/core/config.py"}]} | 2,914 | 191 |
gh_patches_debug_27670 | rasdani/github-patches | git_diff | crytic__slither-211 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug in function-id printer
Hi there! There is an issue connected to `function-id` printer that I faced:
when you create a dynamic array with `public` visibility modifier compiler automatically generates a getter-function with `uint256` input parameter. However, Slither thinks that the getter has no input parameters. Thus, the wrong function signature is printed in the output.
Here is a small example.
```
pragma solidity 0.5.7;
contract Example {
uint256[] public example;
}
```
Slither outputs the following table:
```
+-----------+------------+
| Name | ID |
+-----------+------------+
| example() | 0x54353f2f |
+-----------+------------+
```
However, the real `example()` function's signature is `0x477e4a02` in the example.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/printers/summary/function_ids.py`
Content:
```
1 """
2 Module printing summary of the contract
3 """
4 import collections
5 from prettytable import PrettyTable
6 from slither.printers.abstract_printer import AbstractPrinter
7 from slither.utils.colors import blue, green, magenta
8 from slither.utils.function import get_function_id
9
10 class FunctionIds(AbstractPrinter):
11
12 ARGUMENT = 'function-id'
13 HELP = 'Print the keccack256 signature of the functions'
14
15 WIKI = 'https://github.com/trailofbits/slither/wiki/Printer-documentation#function-id'
16
17 def output(self, _filename):
18 """
19 _filename is not used
20 Args:
21 _filename(string)
22 """
23
24 txt = ''
25 for contract in self.slither.contracts_derived:
26 txt += '\n{}:\n'.format(contract.name)
27 table = PrettyTable(['Name', 'ID'])
28 for function in contract.functions:
29 if function.visibility in ['public', 'external']:
30 table.add_row([function.full_name, hex(get_function_id(function.full_name))])
31 for variable in contract.state_variables:
32 if variable.visibility in ['public']:
33 table.add_row([variable.name+'()', hex(get_function_id(variable.name+'()'))])
34 txt += str(table) + '\n'
35
36 self.info(txt)
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/slither/printers/summary/function_ids.py b/slither/printers/summary/function_ids.py
--- a/slither/printers/summary/function_ids.py
+++ b/slither/printers/summary/function_ids.py
@@ -3,6 +3,8 @@
"""
import collections
from prettytable import PrettyTable
+
+from slither.core.solidity_types import ArrayType, MappingType
from slither.printers.abstract_printer import AbstractPrinter
from slither.utils.colors import blue, green, magenta
from slither.utils.function import get_function_id
@@ -30,7 +32,18 @@
table.add_row([function.full_name, hex(get_function_id(function.full_name))])
for variable in contract.state_variables:
if variable.visibility in ['public']:
- table.add_row([variable.name+'()', hex(get_function_id(variable.name+'()'))])
+ variable_getter_args = ""
+ if type(variable.type) is ArrayType:
+ length = 0
+ v = variable
+ while type(v.type) is ArrayType:
+ length += 1
+ v = v.type
+ variable_getter_args = ','.join(["uint256"]*length)
+ elif type(variable.type) is MappingType:
+ variable_getter_args = variable.type.type_from
+
+ table.add_row([f"{variable.name}({variable_getter_args})", hex(get_function_id(f"{variable.name}({variable_getter_args})"))])
txt += str(table) + '\n'
self.info(txt)
| {"golden_diff": "diff --git a/slither/printers/summary/function_ids.py b/slither/printers/summary/function_ids.py\n--- a/slither/printers/summary/function_ids.py\n+++ b/slither/printers/summary/function_ids.py\n@@ -3,6 +3,8 @@\n \"\"\"\n import collections\n from prettytable import PrettyTable\n+\n+from slither.core.solidity_types import ArrayType, MappingType\n from slither.printers.abstract_printer import AbstractPrinter\n from slither.utils.colors import blue, green, magenta\n from slither.utils.function import get_function_id\n@@ -30,7 +32,18 @@\n table.add_row([function.full_name, hex(get_function_id(function.full_name))])\n for variable in contract.state_variables:\n if variable.visibility in ['public']:\n- table.add_row([variable.name+'()', hex(get_function_id(variable.name+'()'))])\n+ variable_getter_args = \"\"\n+ if type(variable.type) is ArrayType:\n+ length = 0\n+ v = variable\n+ while type(v.type) is ArrayType:\n+ length += 1\n+ v = v.type\n+ variable_getter_args = ','.join([\"uint256\"]*length)\n+ elif type(variable.type) is MappingType:\n+ variable_getter_args = variable.type.type_from\n+\n+ table.add_row([f\"{variable.name}({variable_getter_args})\", hex(get_function_id(f\"{variable.name}({variable_getter_args})\"))])\n txt += str(table) + '\\n'\n \n self.info(txt)\n", "issue": "Bug in function-id printer\nHi there! There is an issue connected to `function-id` printer that I faced:\r\n\r\nwhen you create a dynamic array with `public` visibility modifier compiler automatically generates a getter-function with `uint256` input parameter. However, Slither thinks that the getter has no input parameters. Thus, the wrong function signature is printed in the output.\r\n\r\nHere is a small example.\r\n\r\n```\r\npragma solidity 0.5.7;\r\n\r\ncontract Example {\r\n uint256[] public example;\r\n}\r\n```\r\n\r\nSlither outputs the following table: \r\n\r\n```\r\n+-----------+------------+\r\n| Name | ID |\r\n+-----------+------------+\r\n| example() | 0x54353f2f |\r\n+-----------+------------+\r\n```\r\n\r\nHowever, the real `example()` function's signature is `0x477e4a02` in the example.\n", "before_files": [{"content": "\"\"\"\n Module printing summary of the contract\n\"\"\"\nimport collections\nfrom prettytable import PrettyTable\nfrom slither.printers.abstract_printer import AbstractPrinter\nfrom slither.utils.colors import blue, green, magenta\nfrom slither.utils.function import get_function_id\n\nclass FunctionIds(AbstractPrinter):\n\n ARGUMENT = 'function-id'\n HELP = 'Print the keccack256 signature of the functions'\n\n WIKI = 'https://github.com/trailofbits/slither/wiki/Printer-documentation#function-id'\n\n def output(self, _filename):\n \"\"\"\n _filename is not used\n Args:\n _filename(string)\n \"\"\"\n\n txt = ''\n for contract in self.slither.contracts_derived:\n txt += '\\n{}:\\n'.format(contract.name)\n table = PrettyTable(['Name', 'ID'])\n for function in contract.functions:\n if function.visibility in ['public', 'external']:\n table.add_row([function.full_name, hex(get_function_id(function.full_name))])\n for variable in contract.state_variables:\n if variable.visibility in ['public']:\n table.add_row([variable.name+'()', hex(get_function_id(variable.name+'()'))])\n txt += str(table) + '\\n'\n\n self.info(txt)\n", "path": "slither/printers/summary/function_ids.py"}], "after_files": [{"content": "\"\"\"\n Module printing summary of the contract\n\"\"\"\nimport collections\nfrom prettytable import PrettyTable\n\nfrom slither.core.solidity_types import ArrayType, MappingType\nfrom slither.printers.abstract_printer import AbstractPrinter\nfrom slither.utils.colors import blue, green, magenta\nfrom slither.utils.function import get_function_id\n\nclass FunctionIds(AbstractPrinter):\n\n ARGUMENT = 'function-id'\n HELP = 'Print the keccack256 signature of the functions'\n\n WIKI = 'https://github.com/trailofbits/slither/wiki/Printer-documentation#function-id'\n\n def output(self, _filename):\n \"\"\"\n _filename is not used\n Args:\n _filename(string)\n \"\"\"\n\n txt = ''\n for contract in self.slither.contracts_derived:\n txt += '\\n{}:\\n'.format(contract.name)\n table = PrettyTable(['Name', 'ID'])\n for function in contract.functions:\n if function.visibility in ['public', 'external']:\n table.add_row([function.full_name, hex(get_function_id(function.full_name))])\n for variable in contract.state_variables:\n if variable.visibility in ['public']:\n variable_getter_args = \"\"\n if type(variable.type) is ArrayType:\n length = 0\n v = variable\n while type(v.type) is ArrayType:\n length += 1\n v = v.type\n variable_getter_args = ','.join([\"uint256\"]*length)\n elif type(variable.type) is MappingType:\n variable_getter_args = variable.type.type_from\n\n table.add_row([f\"{variable.name}({variable_getter_args})\", hex(get_function_id(f\"{variable.name}({variable_getter_args})\"))])\n txt += str(table) + '\\n'\n\n self.info(txt)\n", "path": "slither/printers/summary/function_ids.py"}]} | 788 | 336 |
gh_patches_debug_5271 | rasdani/github-patches | git_diff | safe-global__safe-config-service-145 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Increase max limit for the chains endpoint
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/chains/views.py`
Content:
```
1 from rest_framework import filters
2 from rest_framework.generics import ListAPIView, RetrieveAPIView
3 from rest_framework.pagination import LimitOffsetPagination
4
5 from .models import Chain
6 from .serializers import ChainSerializer
7
8
9 class ChainsListView(ListAPIView):
10 serializer_class = ChainSerializer
11 pagination_class = LimitOffsetPagination
12 pagination_class.max_limit = 10
13 pagination_class.default_limit = 10
14 queryset = Chain.objects.all()
15 filter_backends = [filters.OrderingFilter]
16 ordering_fields = ["relevance", "name"]
17 ordering = [
18 "relevance",
19 "name",
20 ]
21
22
23 class ChainsDetailView(RetrieveAPIView):
24 serializer_class = ChainSerializer
25 queryset = Chain.objects.all()
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/chains/views.py b/src/chains/views.py
--- a/src/chains/views.py
+++ b/src/chains/views.py
@@ -9,7 +9,7 @@
class ChainsListView(ListAPIView):
serializer_class = ChainSerializer
pagination_class = LimitOffsetPagination
- pagination_class.max_limit = 10
+ pagination_class.max_limit = 100
pagination_class.default_limit = 10
queryset = Chain.objects.all()
filter_backends = [filters.OrderingFilter]
| {"golden_diff": "diff --git a/src/chains/views.py b/src/chains/views.py\n--- a/src/chains/views.py\n+++ b/src/chains/views.py\n@@ -9,7 +9,7 @@\n class ChainsListView(ListAPIView):\n serializer_class = ChainSerializer\n pagination_class = LimitOffsetPagination\n- pagination_class.max_limit = 10\n+ pagination_class.max_limit = 100\n pagination_class.default_limit = 10\n queryset = Chain.objects.all()\n filter_backends = [filters.OrderingFilter]\n", "issue": "Increase max limit for the chains endpoint\n\n", "before_files": [{"content": "from rest_framework import filters\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView\nfrom rest_framework.pagination import LimitOffsetPagination\n\nfrom .models import Chain\nfrom .serializers import ChainSerializer\n\n\nclass ChainsListView(ListAPIView):\n serializer_class = ChainSerializer\n pagination_class = LimitOffsetPagination\n pagination_class.max_limit = 10\n pagination_class.default_limit = 10\n queryset = Chain.objects.all()\n filter_backends = [filters.OrderingFilter]\n ordering_fields = [\"relevance\", \"name\"]\n ordering = [\n \"relevance\",\n \"name\",\n ]\n\n\nclass ChainsDetailView(RetrieveAPIView):\n serializer_class = ChainSerializer\n queryset = Chain.objects.all()\n", "path": "src/chains/views.py"}], "after_files": [{"content": "from rest_framework import filters\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView\nfrom rest_framework.pagination import LimitOffsetPagination\n\nfrom .models import Chain\nfrom .serializers import ChainSerializer\n\n\nclass ChainsListView(ListAPIView):\n serializer_class = ChainSerializer\n pagination_class = LimitOffsetPagination\n pagination_class.max_limit = 100\n pagination_class.default_limit = 10\n queryset = Chain.objects.all()\n filter_backends = [filters.OrderingFilter]\n ordering_fields = [\"relevance\", \"name\"]\n ordering = [\n \"relevance\",\n \"name\",\n ]\n\n\nclass ChainsDetailView(RetrieveAPIView):\n serializer_class = ChainSerializer\n queryset = Chain.objects.all()\n", "path": "src/chains/views.py"}]} | 463 | 114 |
gh_patches_debug_17136 | rasdani/github-patches | git_diff | netbox-community__netbox-3609 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SCRIPTS_ROOT is missing in configuration.example.py
### Change Type
[x] Addition
[ ] Correction
[ ] Deprecation
[ ] Cleanup (formatting, typos, etc.)
### Proposed Changes
Add new SCRIPTS_ROOT variable to configuration.example.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/netbox/configuration.example.py`
Content:
```
1 #########################
2 # #
3 # Required settings #
4 # #
5 #########################
6
7 # This is a list of valid fully-qualified domain names (FQDNs) for the NetBox server. NetBox will not permit write
8 # access to the server via any other hostnames. The first FQDN in the list will be treated as the preferred name.
9 #
10 # Example: ALLOWED_HOSTS = ['netbox.example.com', 'netbox.internal.local']
11 ALLOWED_HOSTS = []
12
13 # PostgreSQL database configuration.
14 DATABASE = {
15 'NAME': 'netbox', # Database name
16 'USER': '', # PostgreSQL username
17 'PASSWORD': '', # PostgreSQL password
18 'HOST': 'localhost', # Database server
19 'PORT': '', # Database port (leave blank for default)
20 }
21
22 # This key is used for secure generation of random numbers and strings. It must never be exposed outside of this file.
23 # For optimal security, SECRET_KEY should be at least 50 characters in length and contain a mix of letters, numbers, and
24 # symbols. NetBox will not run without this defined. For more information, see
25 # https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-SECRET_KEY
26 SECRET_KEY = ''
27
28 # Redis database settings. The Redis database is used for caching and background processing such as webhooks
29 REDIS = {
30 'HOST': 'localhost',
31 'PORT': 6379,
32 'PASSWORD': '',
33 'DATABASE': 0,
34 'CACHE_DATABASE': 1,
35 'DEFAULT_TIMEOUT': 300,
36 'SSL': False,
37 }
38
39
40 #########################
41 # #
42 # Optional settings #
43 # #
44 #########################
45
46 # Specify one or more name and email address tuples representing NetBox administrators. These people will be notified of
47 # application errors (assuming correct email settings are provided).
48 ADMINS = [
49 # ['John Doe', 'jdoe@example.com'],
50 ]
51
52 # Optionally display a persistent banner at the top and/or bottom of every page. HTML is allowed. To display the same
53 # content in both banners, define BANNER_TOP and set BANNER_BOTTOM = BANNER_TOP.
54 BANNER_TOP = ''
55 BANNER_BOTTOM = ''
56
57 # Text to include on the login page above the login form. HTML is allowed.
58 BANNER_LOGIN = ''
59
60 # Base URL path if accessing NetBox within a directory. For example, if installed at http://example.com/netbox/, set:
61 # BASE_PATH = 'netbox/'
62 BASE_PATH = ''
63
64 # Cache timeout in seconds. Set to 0 to dissable caching. Defaults to 900 (15 minutes)
65 CACHE_TIMEOUT = 900
66
67 # Maximum number of days to retain logged changes. Set to 0 to retain changes indefinitely. (Default: 90)
68 CHANGELOG_RETENTION = 90
69
70 # API Cross-Origin Resource Sharing (CORS) settings. If CORS_ORIGIN_ALLOW_ALL is set to True, all origins will be
71 # allowed. Otherwise, define a list of allowed origins using either CORS_ORIGIN_WHITELIST or
72 # CORS_ORIGIN_REGEX_WHITELIST. For more information, see https://github.com/ottoyiu/django-cors-headers
73 CORS_ORIGIN_ALLOW_ALL = False
74 CORS_ORIGIN_WHITELIST = [
75 # 'https://hostname.example.com',
76 ]
77 CORS_ORIGIN_REGEX_WHITELIST = [
78 # r'^(https?://)?(\w+\.)?example\.com$',
79 ]
80
81 # Set to True to enable server debugging. WARNING: Debugging introduces a substantial performance penalty and may reveal
82 # sensitive information about your installation. Only enable debugging while performing testing. Never enable debugging
83 # on a production system.
84 DEBUG = False
85
86 # Email settings
87 EMAIL = {
88 'SERVER': 'localhost',
89 'PORT': 25,
90 'USERNAME': '',
91 'PASSWORD': '',
92 'TIMEOUT': 10, # seconds
93 'FROM_EMAIL': '',
94 }
95
96 # Enforcement of unique IP space can be toggled on a per-VRF basis. To enforce unique IP space within the global table
97 # (all prefixes and IP addresses not assigned to a VRF), set ENFORCE_GLOBAL_UNIQUE to True.
98 ENFORCE_GLOBAL_UNIQUE = False
99
100 # Exempt certain models from the enforcement of view permissions. Models listed here will be viewable by all users and
101 # by anonymous users. List models in the form `<app>.<model>`. Add '*' to this list to exempt all models.
102 EXEMPT_VIEW_PERMISSIONS = [
103 # 'dcim.site',
104 # 'dcim.region',
105 # 'ipam.prefix',
106 ]
107
108 # Enable custom logging. Please see the Django documentation for detailed guidance on configuring custom logs:
109 # https://docs.djangoproject.com/en/1.11/topics/logging/
110 LOGGING = {}
111
112 # Setting this to True will permit only authenticated users to access any part of NetBox. By default, anonymous users
113 # are permitted to access most data in NetBox (excluding secrets) but not make any changes.
114 LOGIN_REQUIRED = False
115
116 # The length of time (in seconds) for which a user will remain logged into the web UI before being prompted to
117 # re-authenticate. (Default: 1209600 [14 days])
118 LOGIN_TIMEOUT = None
119
120 # Setting this to True will display a "maintenance mode" banner at the top of every page.
121 MAINTENANCE_MODE = False
122
123 # An API consumer can request an arbitrary number of objects =by appending the "limit" parameter to the URL (e.g.
124 # "?limit=1000"). This setting defines the maximum limit. Setting it to 0 or None will allow an API consumer to request
125 # all objects by specifying "?limit=0".
126 MAX_PAGE_SIZE = 1000
127
128 # The file path where uploaded media such as image attachments are stored. A trailing slash is not needed. Note that
129 # the default value of this setting is derived from the installed location.
130 # MEDIA_ROOT = '/opt/netbox/netbox/media'
131
132 # Expose Prometheus monitoring metrics at the HTTP endpoint '/metrics'
133 METRICS_ENABLED = False
134
135 # Credentials that NetBox will uses to authenticate to devices when connecting via NAPALM.
136 NAPALM_USERNAME = ''
137 NAPALM_PASSWORD = ''
138
139 # NAPALM timeout (in seconds). (Default: 30)
140 NAPALM_TIMEOUT = 30
141
142 # NAPALM optional arguments (see http://napalm.readthedocs.io/en/latest/support/#optional-arguments). Arguments must
143 # be provided as a dictionary.
144 NAPALM_ARGS = {}
145
146 # Determine how many objects to display per page within a list. (Default: 50)
147 PAGINATE_COUNT = 50
148
149 # When determining the primary IP address for a device, IPv6 is preferred over IPv4 by default. Set this to True to
150 # prefer IPv4 instead.
151 PREFER_IPV4 = False
152
153 # The file path where custom reports will be stored. A trailing slash is not needed. Note that the default value of
154 # this setting is derived from the installed location.
155 # REPORTS_ROOT = '/opt/netbox/netbox/reports'
156
157 # By default, NetBox will store session data in the database. Alternatively, a file path can be specified here to use
158 # local file storage instead. (This can be useful for enabling authentication on a standby instance with read-only
159 # database access.) Note that the user as which NetBox runs must have read and write permissions to this path.
160 SESSION_FILE_PATH = None
161
162 # Time zone (default: UTC)
163 TIME_ZONE = 'UTC'
164
165 # The webhooks backend is disabled by default. Set this to True to enable it. Note that this requires a Redis
166 # database be configured and accessible by NetBox.
167 WEBHOOKS_ENABLED = False
168
169 # Date/time formatting. See the following link for supported formats:
170 # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
171 DATE_FORMAT = 'N j, Y'
172 SHORT_DATE_FORMAT = 'Y-m-d'
173 TIME_FORMAT = 'g:i a'
174 SHORT_TIME_FORMAT = 'H:i:s'
175 DATETIME_FORMAT = 'N j, Y g:i a'
176 SHORT_DATETIME_FORMAT = 'Y-m-d H:i'
177
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/netbox/netbox/configuration.example.py b/netbox/netbox/configuration.example.py
--- a/netbox/netbox/configuration.example.py
+++ b/netbox/netbox/configuration.example.py
@@ -154,6 +154,10 @@
# this setting is derived from the installed location.
# REPORTS_ROOT = '/opt/netbox/netbox/reports'
+# The file path where custom scripts will be stored. A trailing slash is not needed. Note that the default value of
+# this setting is derived from the installed location.
+# SCRIPTS_ROOT = '/opt/netbox/netbox/scripts'
+
# By default, NetBox will store session data in the database. Alternatively, a file path can be specified here to use
# local file storage instead. (This can be useful for enabling authentication on a standby instance with read-only
# database access.) Note that the user as which NetBox runs must have read and write permissions to this path.
| {"golden_diff": "diff --git a/netbox/netbox/configuration.example.py b/netbox/netbox/configuration.example.py\n--- a/netbox/netbox/configuration.example.py\n+++ b/netbox/netbox/configuration.example.py\n@@ -154,6 +154,10 @@\n # this setting is derived from the installed location.\n # REPORTS_ROOT = '/opt/netbox/netbox/reports'\n \n+# The file path where custom scripts will be stored. A trailing slash is not needed. Note that the default value of\n+# this setting is derived from the installed location.\n+# SCRIPTS_ROOT = '/opt/netbox/netbox/scripts'\n+\n # By default, NetBox will store session data in the database. Alternatively, a file path can be specified here to use\n # local file storage instead. (This can be useful for enabling authentication on a standby instance with read-only\n # database access.) Note that the user as which NetBox runs must have read and write permissions to this path.\n", "issue": "SCRIPTS_ROOT is missing in configuration.example.py\n### Change Type\r\n[x] Addition\r\n[ ] Correction\r\n[ ] Deprecation\r\n[ ] Cleanup (formatting, typos, etc.)\r\n\r\n### Proposed Changes\r\nAdd new SCRIPTS_ROOT variable to configuration.example.py\n", "before_files": [{"content": "#########################\n# #\n# Required settings #\n# #\n#########################\n\n# This is a list of valid fully-qualified domain names (FQDNs) for the NetBox server. NetBox will not permit write\n# access to the server via any other hostnames. The first FQDN in the list will be treated as the preferred name.\n#\n# Example: ALLOWED_HOSTS = ['netbox.example.com', 'netbox.internal.local']\nALLOWED_HOSTS = []\n\n# PostgreSQL database configuration.\nDATABASE = {\n 'NAME': 'netbox', # Database name\n 'USER': '', # PostgreSQL username\n 'PASSWORD': '', # PostgreSQL password\n 'HOST': 'localhost', # Database server\n 'PORT': '', # Database port (leave blank for default)\n}\n\n# This key is used for secure generation of random numbers and strings. It must never be exposed outside of this file.\n# For optimal security, SECRET_KEY should be at least 50 characters in length and contain a mix of letters, numbers, and\n# symbols. NetBox will not run without this defined. For more information, see\n# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-SECRET_KEY\nSECRET_KEY = ''\n\n# Redis database settings. The Redis database is used for caching and background processing such as webhooks\nREDIS = {\n 'HOST': 'localhost',\n 'PORT': 6379,\n 'PASSWORD': '',\n 'DATABASE': 0,\n 'CACHE_DATABASE': 1,\n 'DEFAULT_TIMEOUT': 300,\n 'SSL': False,\n}\n\n\n#########################\n# #\n# Optional settings #\n# #\n#########################\n\n# Specify one or more name and email address tuples representing NetBox administrators. These people will be notified of\n# application errors (assuming correct email settings are provided).\nADMINS = [\n # ['John Doe', 'jdoe@example.com'],\n]\n\n# Optionally display a persistent banner at the top and/or bottom of every page. HTML is allowed. To display the same\n# content in both banners, define BANNER_TOP and set BANNER_BOTTOM = BANNER_TOP.\nBANNER_TOP = ''\nBANNER_BOTTOM = ''\n\n# Text to include on the login page above the login form. HTML is allowed.\nBANNER_LOGIN = ''\n\n# Base URL path if accessing NetBox within a directory. For example, if installed at http://example.com/netbox/, set:\n# BASE_PATH = 'netbox/'\nBASE_PATH = ''\n\n# Cache timeout in seconds. Set to 0 to dissable caching. Defaults to 900 (15 minutes)\nCACHE_TIMEOUT = 900\n\n# Maximum number of days to retain logged changes. Set to 0 to retain changes indefinitely. (Default: 90)\nCHANGELOG_RETENTION = 90\n\n# API Cross-Origin Resource Sharing (CORS) settings. If CORS_ORIGIN_ALLOW_ALL is set to True, all origins will be\n# allowed. Otherwise, define a list of allowed origins using either CORS_ORIGIN_WHITELIST or\n# CORS_ORIGIN_REGEX_WHITELIST. For more information, see https://github.com/ottoyiu/django-cors-headers\nCORS_ORIGIN_ALLOW_ALL = False\nCORS_ORIGIN_WHITELIST = [\n # 'https://hostname.example.com',\n]\nCORS_ORIGIN_REGEX_WHITELIST = [\n # r'^(https?://)?(\\w+\\.)?example\\.com$',\n]\n\n# Set to True to enable server debugging. WARNING: Debugging introduces a substantial performance penalty and may reveal\n# sensitive information about your installation. Only enable debugging while performing testing. Never enable debugging\n# on a production system.\nDEBUG = False\n\n# Email settings\nEMAIL = {\n 'SERVER': 'localhost',\n 'PORT': 25,\n 'USERNAME': '',\n 'PASSWORD': '',\n 'TIMEOUT': 10, # seconds\n 'FROM_EMAIL': '',\n}\n\n# Enforcement of unique IP space can be toggled on a per-VRF basis. To enforce unique IP space within the global table\n# (all prefixes and IP addresses not assigned to a VRF), set ENFORCE_GLOBAL_UNIQUE to True.\nENFORCE_GLOBAL_UNIQUE = False\n\n# Exempt certain models from the enforcement of view permissions. Models listed here will be viewable by all users and\n# by anonymous users. List models in the form `<app>.<model>`. Add '*' to this list to exempt all models.\nEXEMPT_VIEW_PERMISSIONS = [\n # 'dcim.site',\n # 'dcim.region',\n # 'ipam.prefix',\n]\n\n# Enable custom logging. Please see the Django documentation for detailed guidance on configuring custom logs:\n# https://docs.djangoproject.com/en/1.11/topics/logging/\nLOGGING = {}\n\n# Setting this to True will permit only authenticated users to access any part of NetBox. By default, anonymous users\n# are permitted to access most data in NetBox (excluding secrets) but not make any changes.\nLOGIN_REQUIRED = False\n\n# The length of time (in seconds) for which a user will remain logged into the web UI before being prompted to\n# re-authenticate. (Default: 1209600 [14 days])\nLOGIN_TIMEOUT = None\n\n# Setting this to True will display a \"maintenance mode\" banner at the top of every page.\nMAINTENANCE_MODE = False\n\n# An API consumer can request an arbitrary number of objects =by appending the \"limit\" parameter to the URL (e.g.\n# \"?limit=1000\"). This setting defines the maximum limit. Setting it to 0 or None will allow an API consumer to request\n# all objects by specifying \"?limit=0\".\nMAX_PAGE_SIZE = 1000\n\n# The file path where uploaded media such as image attachments are stored. A trailing slash is not needed. Note that\n# the default value of this setting is derived from the installed location.\n# MEDIA_ROOT = '/opt/netbox/netbox/media'\n\n# Expose Prometheus monitoring metrics at the HTTP endpoint '/metrics'\nMETRICS_ENABLED = False\n\n# Credentials that NetBox will uses to authenticate to devices when connecting via NAPALM.\nNAPALM_USERNAME = ''\nNAPALM_PASSWORD = ''\n\n# NAPALM timeout (in seconds). (Default: 30)\nNAPALM_TIMEOUT = 30\n\n# NAPALM optional arguments (see http://napalm.readthedocs.io/en/latest/support/#optional-arguments). Arguments must\n# be provided as a dictionary.\nNAPALM_ARGS = {}\n\n# Determine how many objects to display per page within a list. (Default: 50)\nPAGINATE_COUNT = 50\n\n# When determining the primary IP address for a device, IPv6 is preferred over IPv4 by default. Set this to True to\n# prefer IPv4 instead.\nPREFER_IPV4 = False\n\n# The file path where custom reports will be stored. A trailing slash is not needed. Note that the default value of\n# this setting is derived from the installed location.\n# REPORTS_ROOT = '/opt/netbox/netbox/reports'\n\n# By default, NetBox will store session data in the database. Alternatively, a file path can be specified here to use\n# local file storage instead. (This can be useful for enabling authentication on a standby instance with read-only\n# database access.) Note that the user as which NetBox runs must have read and write permissions to this path.\nSESSION_FILE_PATH = None\n\n# Time zone (default: UTC)\nTIME_ZONE = 'UTC'\n\n# The webhooks backend is disabled by default. Set this to True to enable it. Note that this requires a Redis\n# database be configured and accessible by NetBox.\nWEBHOOKS_ENABLED = False\n\n# Date/time formatting. See the following link for supported formats:\n# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date\nDATE_FORMAT = 'N j, Y'\nSHORT_DATE_FORMAT = 'Y-m-d'\nTIME_FORMAT = 'g:i a'\nSHORT_TIME_FORMAT = 'H:i:s'\nDATETIME_FORMAT = 'N j, Y g:i a'\nSHORT_DATETIME_FORMAT = 'Y-m-d H:i'\n", "path": "netbox/netbox/configuration.example.py"}], "after_files": [{"content": "#########################\n# #\n# Required settings #\n# #\n#########################\n\n# This is a list of valid fully-qualified domain names (FQDNs) for the NetBox server. NetBox will not permit write\n# access to the server via any other hostnames. The first FQDN in the list will be treated as the preferred name.\n#\n# Example: ALLOWED_HOSTS = ['netbox.example.com', 'netbox.internal.local']\nALLOWED_HOSTS = []\n\n# PostgreSQL database configuration.\nDATABASE = {\n 'NAME': 'netbox', # Database name\n 'USER': '', # PostgreSQL username\n 'PASSWORD': '', # PostgreSQL password\n 'HOST': 'localhost', # Database server\n 'PORT': '', # Database port (leave blank for default)\n}\n\n# This key is used for secure generation of random numbers and strings. It must never be exposed outside of this file.\n# For optimal security, SECRET_KEY should be at least 50 characters in length and contain a mix of letters, numbers, and\n# symbols. NetBox will not run without this defined. For more information, see\n# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-SECRET_KEY\nSECRET_KEY = ''\n\n# Redis database settings. The Redis database is used for caching and background processing such as webhooks\nREDIS = {\n 'HOST': 'localhost',\n 'PORT': 6379,\n 'PASSWORD': '',\n 'DATABASE': 0,\n 'CACHE_DATABASE': 1,\n 'DEFAULT_TIMEOUT': 300,\n 'SSL': False,\n}\n\n\n#########################\n# #\n# Optional settings #\n# #\n#########################\n\n# Specify one or more name and email address tuples representing NetBox administrators. These people will be notified of\n# application errors (assuming correct email settings are provided).\nADMINS = [\n # ['John Doe', 'jdoe@example.com'],\n]\n\n# Optionally display a persistent banner at the top and/or bottom of every page. HTML is allowed. To display the same\n# content in both banners, define BANNER_TOP and set BANNER_BOTTOM = BANNER_TOP.\nBANNER_TOP = ''\nBANNER_BOTTOM = ''\n\n# Text to include on the login page above the login form. HTML is allowed.\nBANNER_LOGIN = ''\n\n# Base URL path if accessing NetBox within a directory. For example, if installed at http://example.com/netbox/, set:\n# BASE_PATH = 'netbox/'\nBASE_PATH = ''\n\n# Cache timeout in seconds. Set to 0 to dissable caching. Defaults to 900 (15 minutes)\nCACHE_TIMEOUT = 900\n\n# Maximum number of days to retain logged changes. Set to 0 to retain changes indefinitely. (Default: 90)\nCHANGELOG_RETENTION = 90\n\n# API Cross-Origin Resource Sharing (CORS) settings. If CORS_ORIGIN_ALLOW_ALL is set to True, all origins will be\n# allowed. Otherwise, define a list of allowed origins using either CORS_ORIGIN_WHITELIST or\n# CORS_ORIGIN_REGEX_WHITELIST. For more information, see https://github.com/ottoyiu/django-cors-headers\nCORS_ORIGIN_ALLOW_ALL = False\nCORS_ORIGIN_WHITELIST = [\n # 'https://hostname.example.com',\n]\nCORS_ORIGIN_REGEX_WHITELIST = [\n # r'^(https?://)?(\\w+\\.)?example\\.com$',\n]\n\n# Set to True to enable server debugging. WARNING: Debugging introduces a substantial performance penalty and may reveal\n# sensitive information about your installation. Only enable debugging while performing testing. Never enable debugging\n# on a production system.\nDEBUG = False\n\n# Email settings\nEMAIL = {\n 'SERVER': 'localhost',\n 'PORT': 25,\n 'USERNAME': '',\n 'PASSWORD': '',\n 'TIMEOUT': 10, # seconds\n 'FROM_EMAIL': '',\n}\n\n# Enforcement of unique IP space can be toggled on a per-VRF basis. To enforce unique IP space within the global table\n# (all prefixes and IP addresses not assigned to a VRF), set ENFORCE_GLOBAL_UNIQUE to True.\nENFORCE_GLOBAL_UNIQUE = False\n\n# Exempt certain models from the enforcement of view permissions. Models listed here will be viewable by all users and\n# by anonymous users. List models in the form `<app>.<model>`. Add '*' to this list to exempt all models.\nEXEMPT_VIEW_PERMISSIONS = [\n # 'dcim.site',\n # 'dcim.region',\n # 'ipam.prefix',\n]\n\n# Enable custom logging. Please see the Django documentation for detailed guidance on configuring custom logs:\n# https://docs.djangoproject.com/en/1.11/topics/logging/\nLOGGING = {}\n\n# Setting this to True will permit only authenticated users to access any part of NetBox. By default, anonymous users\n# are permitted to access most data in NetBox (excluding secrets) but not make any changes.\nLOGIN_REQUIRED = False\n\n# The length of time (in seconds) for which a user will remain logged into the web UI before being prompted to\n# re-authenticate. (Default: 1209600 [14 days])\nLOGIN_TIMEOUT = None\n\n# Setting this to True will display a \"maintenance mode\" banner at the top of every page.\nMAINTENANCE_MODE = False\n\n# An API consumer can request an arbitrary number of objects =by appending the \"limit\" parameter to the URL (e.g.\n# \"?limit=1000\"). This setting defines the maximum limit. Setting it to 0 or None will allow an API consumer to request\n# all objects by specifying \"?limit=0\".\nMAX_PAGE_SIZE = 1000\n\n# The file path where uploaded media such as image attachments are stored. A trailing slash is not needed. Note that\n# the default value of this setting is derived from the installed location.\n# MEDIA_ROOT = '/opt/netbox/netbox/media'\n\n# Expose Prometheus monitoring metrics at the HTTP endpoint '/metrics'\nMETRICS_ENABLED = False\n\n# Credentials that NetBox will uses to authenticate to devices when connecting via NAPALM.\nNAPALM_USERNAME = ''\nNAPALM_PASSWORD = ''\n\n# NAPALM timeout (in seconds). (Default: 30)\nNAPALM_TIMEOUT = 30\n\n# NAPALM optional arguments (see http://napalm.readthedocs.io/en/latest/support/#optional-arguments). Arguments must\n# be provided as a dictionary.\nNAPALM_ARGS = {}\n\n# Determine how many objects to display per page within a list. (Default: 50)\nPAGINATE_COUNT = 50\n\n# When determining the primary IP address for a device, IPv6 is preferred over IPv4 by default. Set this to True to\n# prefer IPv4 instead.\nPREFER_IPV4 = False\n\n# The file path where custom reports will be stored. A trailing slash is not needed. Note that the default value of\n# this setting is derived from the installed location.\n# REPORTS_ROOT = '/opt/netbox/netbox/reports'\n\n# The file path where custom scripts will be stored. A trailing slash is not needed. Note that the default value of\n# this setting is derived from the installed location.\n# SCRIPTS_ROOT = '/opt/netbox/netbox/scripts'\n\n# By default, NetBox will store session data in the database. Alternatively, a file path can be specified here to use\n# local file storage instead. (This can be useful for enabling authentication on a standby instance with read-only\n# database access.) Note that the user as which NetBox runs must have read and write permissions to this path.\nSESSION_FILE_PATH = None\n\n# Time zone (default: UTC)\nTIME_ZONE = 'UTC'\n\n# The webhooks backend is disabled by default. Set this to True to enable it. Note that this requires a Redis\n# database be configured and accessible by NetBox.\nWEBHOOKS_ENABLED = False\n\n# Date/time formatting. See the following link for supported formats:\n# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date\nDATE_FORMAT = 'N j, Y'\nSHORT_DATE_FORMAT = 'Y-m-d'\nTIME_FORMAT = 'g:i a'\nSHORT_TIME_FORMAT = 'H:i:s'\nDATETIME_FORMAT = 'N j, Y g:i a'\nSHORT_DATETIME_FORMAT = 'Y-m-d H:i'\n", "path": "netbox/netbox/configuration.example.py"}]} | 2,492 | 200 |
gh_patches_debug_24394 | rasdani/github-patches | git_diff | pulp__pulpcore-4182 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'."
**Version**
3.16, but probably all versions
**Describe the bug**
- Reclaim space for repository fails with the following error.
~~~
Task paused with error: "("Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})"
~~~
**To Reproduce**
See BZ
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2169322
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/tasks/reclaim_space.py`
Content:
```
1 from pulpcore.app.models import (
2 Artifact,
3 Content,
4 ContentArtifact,
5 ProgressReport,
6 PublishedMetadata,
7 Repository,
8 RepositoryVersion,
9 )
10 from pulpcore.app.util import get_domain
11
12
13 def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
14 """
15 This task frees-up disk space by removing Artifact files from the filesystem for Content
16 exclusive to the list of provided repos.
17
18 Note: content marked as `proctected` will be excluded from the reclaim disk space.
19
20 Kwargs:
21 repo_pks (list): A list of repo pks the disk reclaim space is performed on.
22 keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim
23 disk space.
24 force (bool): If True, uploaded content will be taken into account.
25
26 """
27 reclaimed_repos = Repository.objects.filter(pk__in=repo_pks)
28 for repo in reclaimed_repos:
29 repo.invalidate_cache(everything=True)
30
31 domain = get_domain()
32 rest_of_repos = Repository.objects.filter(pulp_domain=domain).exclude(pk__in=repo_pks)
33 c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos)
34 c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks)
35 c_reclaim_qs = c_reclaim_qs.exclude(
36 pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type()
37 )
38
39 if keeplist_rv_pks:
40 rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks)
41 rv_content = Content.objects.none()
42 for rv in rv_qs.iterator():
43 rv_content |= rv.content
44 c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content)
45
46 content_distinct = c_reclaim_qs.distinct("pulp_type")
47 unprotected = []
48 for content in content_distinct:
49 if not content.cast().PROTECTED_FROM_RECLAIM:
50 unprotected.append(content.pulp_type)
51
52 ca_qs = ContentArtifact.objects.select_related("content", "artifact").filter(
53 content__in=c_reclaim_qs.values("pk"), artifact__isnull=False
54 )
55 if not force:
56 ca_qs = ca_qs.filter(remoteartifact__isnull=False)
57 artifact_pks = set()
58 ca_to_update = []
59 for ca in ca_qs.iterator():
60 if ca.content.pulp_type in unprotected:
61 artifact_pks.add(ca.artifact.pk)
62 ca.artifact = None
63 ca_to_update.append(ca)
64
65 ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=["artifact"], batch_size=1000)
66 artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks)
67 progress_bar = ProgressReport(
68 message="Reclaim disk space",
69 total=artifacts_to_delete.count(),
70 code="reclaim-space.artifact",
71 done=0,
72 state="running",
73 )
74 progress_bar.save()
75
76 counter = 0
77 interval = 100
78 for artifact in artifacts_to_delete.iterator():
79 # we need to manually call delete() because it cleans up the file on the filesystem
80 artifact.delete()
81 progress_bar.done += 1
82 counter += 1
83
84 if counter >= interval:
85 progress_bar.save()
86 counter = 0
87
88 progress_bar.state = "completed"
89 progress_bar.save()
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py
--- a/pulpcore/app/tasks/reclaim_space.py
+++ b/pulpcore/app/tasks/reclaim_space.py
@@ -1,3 +1,7 @@
+from logging import getLogger
+
+from django.db.models.deletion import ProtectedError
+
from pulpcore.app.models import (
Artifact,
Content,
@@ -9,6 +13,8 @@
)
from pulpcore.app.util import get_domain
+log = getLogger(__name__)
+
def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
"""
@@ -76,10 +82,16 @@
counter = 0
interval = 100
for artifact in artifacts_to_delete.iterator():
- # we need to manually call delete() because it cleans up the file on the filesystem
- artifact.delete()
- progress_bar.done += 1
- counter += 1
+ try:
+ # we need to manually call delete() because it cleans up the file on the filesystem
+ artifact.delete()
+ except ProtectedError as e:
+ # Rarely artifact could be shared between to different content units.
+ # Just log and skip the artifact deletion in this case
+ log.info(e)
+ else:
+ progress_bar.done += 1
+ counter += 1
if counter >= interval:
progress_bar.save()
| {"golden_diff": "diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py\n--- a/pulpcore/app/tasks/reclaim_space.py\n+++ b/pulpcore/app/tasks/reclaim_space.py\n@@ -1,3 +1,7 @@\n+from logging import getLogger\n+\n+from django.db.models.deletion import ProtectedError\n+\n from pulpcore.app.models import (\n Artifact,\n Content,\n@@ -9,6 +13,8 @@\n )\n from pulpcore.app.util import get_domain\n \n+log = getLogger(__name__)\n+\n \n def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):\n \"\"\"\n@@ -76,10 +82,16 @@\n counter = 0\n interval = 100\n for artifact in artifacts_to_delete.iterator():\n- # we need to manually call delete() because it cleans up the file on the filesystem\n- artifact.delete()\n- progress_bar.done += 1\n- counter += 1\n+ try:\n+ # we need to manually call delete() because it cleans up the file on the filesystem\n+ artifact.delete()\n+ except ProtectedError as e:\n+ # Rarely artifact could be shared between to different content units.\n+ # Just log and skip the artifact deletion in this case\n+ log.info(e)\n+ else:\n+ progress_bar.done += 1\n+ counter += 1\n \n if counter >= interval:\n progress_bar.save()\n", "issue": "Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.\"\n**Version**\r\n3.16, but probably all versions\r\n\r\n**Describe the bug**\r\n\r\n- Reclaim space for repository fails with the following error.\r\n\r\n ~~~\r\n Task paused with error: \"(\"Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.\", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})\"\r\n ~~~\r\n\r\n**To Reproduce**\r\nSee BZ\r\n\r\n**Additional context**\r\nhttps://bugzilla.redhat.com/show_bug.cgi?id=2169322\r\n\n", "before_files": [{"content": "from pulpcore.app.models import (\n Artifact,\n Content,\n ContentArtifact,\n ProgressReport,\n PublishedMetadata,\n Repository,\n RepositoryVersion,\n)\nfrom pulpcore.app.util import get_domain\n\n\ndef reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):\n \"\"\"\n This task frees-up disk space by removing Artifact files from the filesystem for Content\n exclusive to the list of provided repos.\n\n Note: content marked as `proctected` will be excluded from the reclaim disk space.\n\n Kwargs:\n repo_pks (list): A list of repo pks the disk reclaim space is performed on.\n keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim\n disk space.\n force (bool): If True, uploaded content will be taken into account.\n\n \"\"\"\n reclaimed_repos = Repository.objects.filter(pk__in=repo_pks)\n for repo in reclaimed_repos:\n repo.invalidate_cache(everything=True)\n\n domain = get_domain()\n rest_of_repos = Repository.objects.filter(pulp_domain=domain).exclude(pk__in=repo_pks)\n c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos)\n c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks)\n c_reclaim_qs = c_reclaim_qs.exclude(\n pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type()\n )\n\n if keeplist_rv_pks:\n rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks)\n rv_content = Content.objects.none()\n for rv in rv_qs.iterator():\n rv_content |= rv.content\n c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content)\n\n content_distinct = c_reclaim_qs.distinct(\"pulp_type\")\n unprotected = []\n for content in content_distinct:\n if not content.cast().PROTECTED_FROM_RECLAIM:\n unprotected.append(content.pulp_type)\n\n ca_qs = ContentArtifact.objects.select_related(\"content\", \"artifact\").filter(\n content__in=c_reclaim_qs.values(\"pk\"), artifact__isnull=False\n )\n if not force:\n ca_qs = ca_qs.filter(remoteartifact__isnull=False)\n artifact_pks = set()\n ca_to_update = []\n for ca in ca_qs.iterator():\n if ca.content.pulp_type in unprotected:\n artifact_pks.add(ca.artifact.pk)\n ca.artifact = None\n ca_to_update.append(ca)\n\n ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=[\"artifact\"], batch_size=1000)\n artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks)\n progress_bar = ProgressReport(\n message=\"Reclaim disk space\",\n total=artifacts_to_delete.count(),\n code=\"reclaim-space.artifact\",\n done=0,\n state=\"running\",\n )\n progress_bar.save()\n\n counter = 0\n interval = 100\n for artifact in artifacts_to_delete.iterator():\n # we need to manually call delete() because it cleans up the file on the filesystem\n artifact.delete()\n progress_bar.done += 1\n counter += 1\n\n if counter >= interval:\n progress_bar.save()\n counter = 0\n\n progress_bar.state = \"completed\"\n progress_bar.save()\n", "path": "pulpcore/app/tasks/reclaim_space.py"}], "after_files": [{"content": "from logging import getLogger\n\nfrom django.db.models.deletion import ProtectedError\n\nfrom pulpcore.app.models import (\n Artifact,\n Content,\n ContentArtifact,\n ProgressReport,\n PublishedMetadata,\n Repository,\n RepositoryVersion,\n)\nfrom pulpcore.app.util import get_domain\n\nlog = getLogger(__name__)\n\n\ndef reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):\n \"\"\"\n This task frees-up disk space by removing Artifact files from the filesystem for Content\n exclusive to the list of provided repos.\n\n Note: content marked as `proctected` will be excluded from the reclaim disk space.\n\n Kwargs:\n repo_pks (list): A list of repo pks the disk reclaim space is performed on.\n keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim\n disk space.\n force (bool): If True, uploaded content will be taken into account.\n\n \"\"\"\n reclaimed_repos = Repository.objects.filter(pk__in=repo_pks)\n for repo in reclaimed_repos:\n repo.invalidate_cache(everything=True)\n\n domain = get_domain()\n rest_of_repos = Repository.objects.filter(pulp_domain=domain).exclude(pk__in=repo_pks)\n c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos)\n c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks)\n c_reclaim_qs = c_reclaim_qs.exclude(\n pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type()\n )\n\n if keeplist_rv_pks:\n rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks)\n rv_content = Content.objects.none()\n for rv in rv_qs.iterator():\n rv_content |= rv.content\n c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content)\n\n content_distinct = c_reclaim_qs.distinct(\"pulp_type\")\n unprotected = []\n for content in content_distinct:\n if not content.cast().PROTECTED_FROM_RECLAIM:\n unprotected.append(content.pulp_type)\n\n ca_qs = ContentArtifact.objects.select_related(\"content\", \"artifact\").filter(\n content__in=c_reclaim_qs.values(\"pk\"), artifact__isnull=False\n )\n if not force:\n ca_qs = ca_qs.filter(remoteartifact__isnull=False)\n artifact_pks = set()\n ca_to_update = []\n for ca in ca_qs.iterator():\n if ca.content.pulp_type in unprotected:\n artifact_pks.add(ca.artifact.pk)\n ca.artifact = None\n ca_to_update.append(ca)\n\n ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=[\"artifact\"], batch_size=1000)\n artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks)\n progress_bar = ProgressReport(\n message=\"Reclaim disk space\",\n total=artifacts_to_delete.count(),\n code=\"reclaim-space.artifact\",\n done=0,\n state=\"running\",\n )\n progress_bar.save()\n\n counter = 0\n interval = 100\n for artifact in artifacts_to_delete.iterator():\n try:\n # we need to manually call delete() because it cleans up the file on the filesystem\n artifact.delete()\n except ProtectedError as e:\n # Rarely artifact could be shared between to different content units.\n # Just log and skip the artifact deletion in this case\n log.info(e)\n else:\n progress_bar.done += 1\n counter += 1\n\n if counter >= interval:\n progress_bar.save()\n counter = 0\n\n progress_bar.state = \"completed\"\n progress_bar.save()\n", "path": "pulpcore/app/tasks/reclaim_space.py"}]} | 1,349 | 322 |
gh_patches_debug_43886 | rasdani/github-patches | git_diff | aws__aws-cli-2835 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Profile does not return region or output format if it contains a SPACE character
The AWS CLI tool allows you to create a profile name with a space character in it, and writes the values to the credentials and config file as expected. However when getting that config using the quoted profile name, it doesn't return the region or default output format.
Setting the profile credentials and config:
$ aws configure --profile 'Foo Bar'
AWS Access Key ID [None]: foo
AWS Secret Access Key [None]: bar
Default region name [None]: eu-west-1
Default output format [None]: json
$ cat .aws/config
[profile Foo Bar]
output = json
region = eu-west-1
$ cat .aws/credentials
[Foo Bar]
aws_access_key_id = foo
aws_secret_access_key = bar
Checking the profile credentials and config:
$ aws configure --profile 'Foo Bar'
AWS Access Key ID [****************foo]:
AWS Secret Access Key [****************bar]:
Default region name [None]:
Default output format [None]:
CLI commands then prompt for a region when using the profile name. The **credentials** do still work, and are retrieved from the profile, but the **config** ,e.g. the region, has to be specified on the command line still:
$ aws autoscaling describe-auto-scaling-groups --auto-scaling-group-names group_name --profile 'Foo Bar'
You must specify a region. You can also configure your region by running "aws configure".
When performing the same steps above with a profile name without a space character (Foo_Bar), everything works as expected.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/customizations/configure/set.py`
Content:
```
1 # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 import os
14
15 from awscli.customizations.commands import BasicCommand
16 from awscli.customizations.configure.writer import ConfigFileWriter
17
18 from . import PREDEFINED_SECTION_NAMES
19
20
21 class ConfigureSetCommand(BasicCommand):
22 NAME = 'set'
23 DESCRIPTION = BasicCommand.FROM_FILE('configure', 'set',
24 '_description.rst')
25 SYNOPSIS = 'aws configure set varname value [--profile profile-name]'
26 EXAMPLES = BasicCommand.FROM_FILE('configure', 'set', '_examples.rst')
27 ARG_TABLE = [
28 {'name': 'varname',
29 'help_text': 'The name of the config value to set.',
30 'action': 'store',
31 'cli_type_name': 'string', 'positional_arg': True},
32 {'name': 'value',
33 'help_text': 'The value to set.',
34 'action': 'store',
35 'no_paramfile': True, # To disable the default paramfile behavior
36 'cli_type_name': 'string', 'positional_arg': True},
37 ]
38 # Any variables specified in this list will be written to
39 # the ~/.aws/credentials file instead of ~/.aws/config.
40 _WRITE_TO_CREDS_FILE = ['aws_access_key_id', 'aws_secret_access_key',
41 'aws_session_token']
42
43 def __init__(self, session, config_writer=None):
44 super(ConfigureSetCommand, self).__init__(session)
45 if config_writer is None:
46 config_writer = ConfigFileWriter()
47 self._config_writer = config_writer
48
49 def _run_main(self, args, parsed_globals):
50 varname = args.varname
51 value = args.value
52 section = 'default'
53 # Before handing things off to the config writer,
54 # we need to find out three things:
55 # 1. What section we're writing to (section).
56 # 2. The name of the config key (varname)
57 # 3. The actual value (value).
58 if '.' not in varname:
59 # unqualified name, scope it to the current
60 # profile (or leave it as the 'default' section if
61 # no profile is set).
62 if self._session.profile is not None:
63 section = 'profile %s' % self._session.profile
64 else:
65 # First figure out if it's been scoped to a profile.
66 parts = varname.split('.')
67 if parts[0] in ('default', 'profile'):
68 # Then we know we're scoped to a profile.
69 if parts[0] == 'default':
70 section = 'default'
71 remaining = parts[1:]
72 else:
73 # [profile, profile_name, ...]
74 section = "profile %s" % parts[1]
75 remaining = parts[2:]
76 varname = remaining[0]
77 if len(remaining) == 2:
78 value = {remaining[1]: value}
79 elif parts[0] not in PREDEFINED_SECTION_NAMES:
80 if self._session.profile is not None:
81 section = 'profile %s' % self._session.profile
82 else:
83 profile_name = self._session.get_config_variable('profile')
84 if profile_name is not None:
85 section = profile_name
86 varname = parts[0]
87 if len(parts) == 2:
88 value = {parts[1]: value}
89 elif len(parts) == 2:
90 # Otherwise it's something like "set preview.service true"
91 # of something in the [plugin] section.
92 section, varname = parts
93 config_filename = os.path.expanduser(
94 self._session.get_config_variable('config_file'))
95 updated_config = {'__section__': section, varname: value}
96 if varname in self._WRITE_TO_CREDS_FILE:
97 config_filename = os.path.expanduser(
98 self._session.get_config_variable('credentials_file'))
99 section_name = updated_config['__section__']
100 if section_name.startswith('profile '):
101 updated_config['__section__'] = section_name[8:]
102 self._config_writer.update_config(updated_config, config_filename)
103
```
Path: `awscli/customizations/configure/__init__.py`
Content:
```
1 # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 NOT_SET = '<not set>'
14 PREDEFINED_SECTION_NAMES = ('preview', 'plugins')
15
16
17 class ConfigValue(object):
18
19 def __init__(self, value, config_type, config_variable):
20 self.value = value
21 self.config_type = config_type
22 self.config_variable = config_variable
23
24 def mask_value(self):
25 if self.value is NOT_SET:
26 return
27 self.value = mask_value(self.value)
28
29
30 class SectionNotFoundError(Exception):
31 pass
32
33
34 def mask_value(current_value):
35 if current_value is None:
36 return 'None'
37 else:
38 return ('*' * 16) + current_value[-4:]
39
```
Path: `awscli/customizations/configure/configure.py`
Content:
```
1 # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 import os
14 import logging
15
16 from botocore.exceptions import ProfileNotFound
17
18 from awscli.compat import compat_input
19 from awscli.customizations.commands import BasicCommand
20 from awscli.customizations.configure.addmodel import AddModelCommand
21 from awscli.customizations.configure.set import ConfigureSetCommand
22 from awscli.customizations.configure.get import ConfigureGetCommand
23 from awscli.customizations.configure.list import ConfigureListCommand
24 from awscli.customizations.configure.writer import ConfigFileWriter
25
26 from . import mask_value
27
28
29 logger = logging.getLogger(__name__)
30
31
32 def register_configure_cmd(cli):
33 cli.register('building-command-table.main',
34 ConfigureCommand.add_command)
35
36
37 class InteractivePrompter(object):
38
39 def get_value(self, current_value, config_name, prompt_text=''):
40 if config_name in ('aws_access_key_id', 'aws_secret_access_key'):
41 current_value = mask_value(current_value)
42 response = compat_input("%s [%s]: " % (prompt_text, current_value))
43 if not response:
44 # If the user hits enter, we return a value of None
45 # instead of an empty string. That way we can determine
46 # whether or not a value has changed.
47 response = None
48 return response
49
50
51 class ConfigureCommand(BasicCommand):
52 NAME = 'configure'
53 DESCRIPTION = BasicCommand.FROM_FILE()
54 SYNOPSIS = ('aws configure [--profile profile-name]')
55 EXAMPLES = (
56 'To create a new configuration::\n'
57 '\n'
58 ' $ aws configure\n'
59 ' AWS Access Key ID [None]: accesskey\n'
60 ' AWS Secret Access Key [None]: secretkey\n'
61 ' Default region name [None]: us-west-2\n'
62 ' Default output format [None]:\n'
63 '\n'
64 'To update just the region name::\n'
65 '\n'
66 ' $ aws configure\n'
67 ' AWS Access Key ID [****]:\n'
68 ' AWS Secret Access Key [****]:\n'
69 ' Default region name [us-west-1]: us-west-2\n'
70 ' Default output format [None]:\n'
71 )
72 SUBCOMMANDS = [
73 {'name': 'list', 'command_class': ConfigureListCommand},
74 {'name': 'get', 'command_class': ConfigureGetCommand},
75 {'name': 'set', 'command_class': ConfigureSetCommand},
76 {'name': 'add-model', 'command_class': AddModelCommand}
77 ]
78
79 # If you want to add new values to prompt, update this list here.
80 VALUES_TO_PROMPT = [
81 # (logical_name, config_name, prompt_text)
82 ('aws_access_key_id', "AWS Access Key ID"),
83 ('aws_secret_access_key', "AWS Secret Access Key"),
84 ('region', "Default region name"),
85 ('output', "Default output format"),
86 ]
87
88 def __init__(self, session, prompter=None, config_writer=None):
89 super(ConfigureCommand, self).__init__(session)
90 if prompter is None:
91 prompter = InteractivePrompter()
92 self._prompter = prompter
93 if config_writer is None:
94 config_writer = ConfigFileWriter()
95 self._config_writer = config_writer
96
97 def _run_main(self, parsed_args, parsed_globals):
98 # Called when invoked with no args "aws configure"
99 new_values = {}
100 # This is the config from the config file scoped to a specific
101 # profile.
102 try:
103 config = self._session.get_scoped_config()
104 except ProfileNotFound:
105 config = {}
106 for config_name, prompt_text in self.VALUES_TO_PROMPT:
107 current_value = config.get(config_name)
108 new_value = self._prompter.get_value(current_value, config_name,
109 prompt_text)
110 if new_value is not None and new_value != current_value:
111 new_values[config_name] = new_value
112 config_filename = os.path.expanduser(
113 self._session.get_config_variable('config_file'))
114 if new_values:
115 self._write_out_creds_file_values(new_values,
116 parsed_globals.profile)
117 if parsed_globals.profile is not None:
118 new_values['__section__'] = (
119 'profile %s' % parsed_globals.profile)
120 self._config_writer.update_config(new_values, config_filename)
121
122 def _write_out_creds_file_values(self, new_values, profile_name):
123 # The access_key/secret_key are now *always* written to the shared
124 # credentials file (~/.aws/credentials), see aws/aws-cli#847.
125 # post-conditions: ~/.aws/credentials will have the updated credential
126 # file values and new_values will have the cred vars removed.
127 credential_file_values = {}
128 if 'aws_access_key_id' in new_values:
129 credential_file_values['aws_access_key_id'] = new_values.pop(
130 'aws_access_key_id')
131 if 'aws_secret_access_key' in new_values:
132 credential_file_values['aws_secret_access_key'] = new_values.pop(
133 'aws_secret_access_key')
134 if credential_file_values:
135 if profile_name is not None:
136 credential_file_values['__section__'] = profile_name
137 shared_credentials_filename = os.path.expanduser(
138 self._session.get_config_variable('credentials_file'))
139 self._config_writer.update_config(
140 credential_file_values,
141 shared_credentials_filename)
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awscli/customizations/configure/__init__.py b/awscli/customizations/configure/__init__.py
--- a/awscli/customizations/configure/__init__.py
+++ b/awscli/customizations/configure/__init__.py
@@ -10,8 +10,12 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+import string
+from botocore.vendored.six.moves import shlex_quote
+
NOT_SET = '<not set>'
PREDEFINED_SECTION_NAMES = ('preview', 'plugins')
+_WHITESPACE = ' \t'
class ConfigValue(object):
@@ -36,3 +40,10 @@
return 'None'
else:
return ('*' * 16) + current_value[-4:]
+
+
+def profile_to_section(profile_name):
+ """Converts a profile name to a section header to be used in the config."""
+ if any(c in _WHITESPACE for c in profile_name):
+ profile_name = shlex_quote(profile_name)
+ return 'profile %s' % profile_name
diff --git a/awscli/customizations/configure/configure.py b/awscli/customizations/configure/configure.py
--- a/awscli/customizations/configure/configure.py
+++ b/awscli/customizations/configure/configure.py
@@ -23,7 +23,7 @@
from awscli.customizations.configure.list import ConfigureListCommand
from awscli.customizations.configure.writer import ConfigFileWriter
-from . import mask_value
+from . import mask_value, profile_to_section
logger = logging.getLogger(__name__)
@@ -115,8 +115,8 @@
self._write_out_creds_file_values(new_values,
parsed_globals.profile)
if parsed_globals.profile is not None:
- new_values['__section__'] = (
- 'profile %s' % parsed_globals.profile)
+ section = profile_to_section(parsed_globals.profile)
+ new_values['__section__'] = section
self._config_writer.update_config(new_values, config_filename)
def _write_out_creds_file_values(self, new_values, profile_name):
diff --git a/awscli/customizations/configure/set.py b/awscli/customizations/configure/set.py
--- a/awscli/customizations/configure/set.py
+++ b/awscli/customizations/configure/set.py
@@ -15,7 +15,7 @@
from awscli.customizations.commands import BasicCommand
from awscli.customizations.configure.writer import ConfigFileWriter
-from . import PREDEFINED_SECTION_NAMES
+from . import PREDEFINED_SECTION_NAMES, profile_to_section
class ConfigureSetCommand(BasicCommand):
@@ -60,7 +60,7 @@
# profile (or leave it as the 'default' section if
# no profile is set).
if self._session.profile is not None:
- section = 'profile %s' % self._session.profile
+ section = profile_to_section(self._session.profile)
else:
# First figure out if it's been scoped to a profile.
parts = varname.split('.')
@@ -71,14 +71,14 @@
remaining = parts[1:]
else:
# [profile, profile_name, ...]
- section = "profile %s" % parts[1]
+ section = profile_to_section(parts[1])
remaining = parts[2:]
varname = remaining[0]
if len(remaining) == 2:
value = {remaining[1]: value}
elif parts[0] not in PREDEFINED_SECTION_NAMES:
if self._session.profile is not None:
- section = 'profile %s' % self._session.profile
+ section = profile_to_section(self._session.profile)
else:
profile_name = self._session.get_config_variable('profile')
if profile_name is not None:
| {"golden_diff": "diff --git a/awscli/customizations/configure/__init__.py b/awscli/customizations/configure/__init__.py\n--- a/awscli/customizations/configure/__init__.py\n+++ b/awscli/customizations/configure/__init__.py\n@@ -10,8 +10,12 @@\n # distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n # ANY KIND, either express or implied. See the License for the specific\n # language governing permissions and limitations under the License.\n+import string\n+from botocore.vendored.six.moves import shlex_quote\n+\n NOT_SET = '<not set>'\n PREDEFINED_SECTION_NAMES = ('preview', 'plugins')\n+_WHITESPACE = ' \\t'\n \n \n class ConfigValue(object):\n@@ -36,3 +40,10 @@\n return 'None'\n else:\n return ('*' * 16) + current_value[-4:]\n+\n+\n+def profile_to_section(profile_name):\n+ \"\"\"Converts a profile name to a section header to be used in the config.\"\"\"\n+ if any(c in _WHITESPACE for c in profile_name):\n+ profile_name = shlex_quote(profile_name)\n+ return 'profile %s' % profile_name\ndiff --git a/awscli/customizations/configure/configure.py b/awscli/customizations/configure/configure.py\n--- a/awscli/customizations/configure/configure.py\n+++ b/awscli/customizations/configure/configure.py\n@@ -23,7 +23,7 @@\n from awscli.customizations.configure.list import ConfigureListCommand\n from awscli.customizations.configure.writer import ConfigFileWriter\n \n-from . import mask_value\n+from . import mask_value, profile_to_section\n \n \n logger = logging.getLogger(__name__)\n@@ -115,8 +115,8 @@\n self._write_out_creds_file_values(new_values,\n parsed_globals.profile)\n if parsed_globals.profile is not None:\n- new_values['__section__'] = (\n- 'profile %s' % parsed_globals.profile)\n+ section = profile_to_section(parsed_globals.profile)\n+ new_values['__section__'] = section\n self._config_writer.update_config(new_values, config_filename)\n \n def _write_out_creds_file_values(self, new_values, profile_name):\ndiff --git a/awscli/customizations/configure/set.py b/awscli/customizations/configure/set.py\n--- a/awscli/customizations/configure/set.py\n+++ b/awscli/customizations/configure/set.py\n@@ -15,7 +15,7 @@\n from awscli.customizations.commands import BasicCommand\n from awscli.customizations.configure.writer import ConfigFileWriter\n \n-from . import PREDEFINED_SECTION_NAMES\n+from . import PREDEFINED_SECTION_NAMES, profile_to_section\n \n \n class ConfigureSetCommand(BasicCommand):\n@@ -60,7 +60,7 @@\n # profile (or leave it as the 'default' section if\n # no profile is set).\n if self._session.profile is not None:\n- section = 'profile %s' % self._session.profile\n+ section = profile_to_section(self._session.profile)\n else:\n # First figure out if it's been scoped to a profile.\n parts = varname.split('.')\n@@ -71,14 +71,14 @@\n remaining = parts[1:]\n else:\n # [profile, profile_name, ...]\n- section = \"profile %s\" % parts[1]\n+ section = profile_to_section(parts[1])\n remaining = parts[2:]\n varname = remaining[0]\n if len(remaining) == 2:\n value = {remaining[1]: value}\n elif parts[0] not in PREDEFINED_SECTION_NAMES:\n if self._session.profile is not None:\n- section = 'profile %s' % self._session.profile\n+ section = profile_to_section(self._session.profile)\n else:\n profile_name = self._session.get_config_variable('profile')\n if profile_name is not None:\n", "issue": "Profile does not return region or output format if it contains a SPACE character\nThe AWS CLI tool allows you to create a profile name with a space character in it, and writes the values to the credentials and config file as expected. However when getting that config using the quoted profile name, it doesn't return the region or default output format.\r\n\r\nSetting the profile credentials and config:\r\n\r\n $ aws configure --profile 'Foo Bar'\r\n AWS Access Key ID [None]: foo\r\n AWS Secret Access Key [None]: bar\r\n Default region name [None]: eu-west-1\r\n Default output format [None]: json\r\n\r\n $ cat .aws/config\r\n [profile Foo Bar]\r\n output = json\r\n region = eu-west-1\r\n\r\n $ cat .aws/credentials\r\n [Foo Bar]\r\n aws_access_key_id = foo\r\n aws_secret_access_key = bar\r\n\r\n\r\nChecking the profile credentials and config:\r\n\r\n $ aws configure --profile 'Foo Bar'\r\n AWS Access Key ID [****************foo]:\r\n AWS Secret Access Key [****************bar]:\r\n Default region name [None]:\r\n Default output format [None]:\r\n\r\nCLI commands then prompt for a region when using the profile name. The **credentials** do still work, and are retrieved from the profile, but the **config** ,e.g. the region, has to be specified on the command line still:\r\n\r\n $ aws autoscaling describe-auto-scaling-groups --auto-scaling-group-names group_name --profile 'Foo Bar'\r\n You must specify a region. You can also configure your region by running \"aws configure\".\r\n\r\nWhen performing the same steps above with a profile name without a space character (Foo_Bar), everything works as expected.\n", "before_files": [{"content": "# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport os\n\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.configure.writer import ConfigFileWriter\n\nfrom . import PREDEFINED_SECTION_NAMES\n\n\nclass ConfigureSetCommand(BasicCommand):\n NAME = 'set'\n DESCRIPTION = BasicCommand.FROM_FILE('configure', 'set',\n '_description.rst')\n SYNOPSIS = 'aws configure set varname value [--profile profile-name]'\n EXAMPLES = BasicCommand.FROM_FILE('configure', 'set', '_examples.rst')\n ARG_TABLE = [\n {'name': 'varname',\n 'help_text': 'The name of the config value to set.',\n 'action': 'store',\n 'cli_type_name': 'string', 'positional_arg': True},\n {'name': 'value',\n 'help_text': 'The value to set.',\n 'action': 'store',\n 'no_paramfile': True, # To disable the default paramfile behavior\n 'cli_type_name': 'string', 'positional_arg': True},\n ]\n # Any variables specified in this list will be written to\n # the ~/.aws/credentials file instead of ~/.aws/config.\n _WRITE_TO_CREDS_FILE = ['aws_access_key_id', 'aws_secret_access_key',\n 'aws_session_token']\n\n def __init__(self, session, config_writer=None):\n super(ConfigureSetCommand, self).__init__(session)\n if config_writer is None:\n config_writer = ConfigFileWriter()\n self._config_writer = config_writer\n\n def _run_main(self, args, parsed_globals):\n varname = args.varname\n value = args.value\n section = 'default'\n # Before handing things off to the config writer,\n # we need to find out three things:\n # 1. What section we're writing to (section).\n # 2. The name of the config key (varname)\n # 3. The actual value (value).\n if '.' not in varname:\n # unqualified name, scope it to the current\n # profile (or leave it as the 'default' section if\n # no profile is set).\n if self._session.profile is not None:\n section = 'profile %s' % self._session.profile\n else:\n # First figure out if it's been scoped to a profile.\n parts = varname.split('.')\n if parts[0] in ('default', 'profile'):\n # Then we know we're scoped to a profile.\n if parts[0] == 'default':\n section = 'default'\n remaining = parts[1:]\n else:\n # [profile, profile_name, ...]\n section = \"profile %s\" % parts[1]\n remaining = parts[2:]\n varname = remaining[0]\n if len(remaining) == 2:\n value = {remaining[1]: value}\n elif parts[0] not in PREDEFINED_SECTION_NAMES:\n if self._session.profile is not None:\n section = 'profile %s' % self._session.profile\n else:\n profile_name = self._session.get_config_variable('profile')\n if profile_name is not None:\n section = profile_name\n varname = parts[0]\n if len(parts) == 2:\n value = {parts[1]: value}\n elif len(parts) == 2:\n # Otherwise it's something like \"set preview.service true\"\n # of something in the [plugin] section.\n section, varname = parts\n config_filename = os.path.expanduser(\n self._session.get_config_variable('config_file'))\n updated_config = {'__section__': section, varname: value}\n if varname in self._WRITE_TO_CREDS_FILE:\n config_filename = os.path.expanduser(\n self._session.get_config_variable('credentials_file'))\n section_name = updated_config['__section__']\n if section_name.startswith('profile '):\n updated_config['__section__'] = section_name[8:]\n self._config_writer.update_config(updated_config, config_filename)\n", "path": "awscli/customizations/configure/set.py"}, {"content": "# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nNOT_SET = '<not set>'\nPREDEFINED_SECTION_NAMES = ('preview', 'plugins')\n\n\nclass ConfigValue(object):\n\n def __init__(self, value, config_type, config_variable):\n self.value = value\n self.config_type = config_type\n self.config_variable = config_variable\n\n def mask_value(self):\n if self.value is NOT_SET:\n return\n self.value = mask_value(self.value)\n\n\nclass SectionNotFoundError(Exception):\n pass\n\n\ndef mask_value(current_value):\n if current_value is None:\n return 'None'\n else:\n return ('*' * 16) + current_value[-4:]\n", "path": "awscli/customizations/configure/__init__.py"}, {"content": "# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport os\nimport logging\n\nfrom botocore.exceptions import ProfileNotFound\n\nfrom awscli.compat import compat_input\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.configure.addmodel import AddModelCommand\nfrom awscli.customizations.configure.set import ConfigureSetCommand\nfrom awscli.customizations.configure.get import ConfigureGetCommand\nfrom awscli.customizations.configure.list import ConfigureListCommand\nfrom awscli.customizations.configure.writer import ConfigFileWriter\n\nfrom . import mask_value\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef register_configure_cmd(cli):\n cli.register('building-command-table.main',\n ConfigureCommand.add_command)\n\n\nclass InteractivePrompter(object):\n\n def get_value(self, current_value, config_name, prompt_text=''):\n if config_name in ('aws_access_key_id', 'aws_secret_access_key'):\n current_value = mask_value(current_value)\n response = compat_input(\"%s [%s]: \" % (prompt_text, current_value))\n if not response:\n # If the user hits enter, we return a value of None\n # instead of an empty string. That way we can determine\n # whether or not a value has changed.\n response = None\n return response\n\n\nclass ConfigureCommand(BasicCommand):\n NAME = 'configure'\n DESCRIPTION = BasicCommand.FROM_FILE()\n SYNOPSIS = ('aws configure [--profile profile-name]')\n EXAMPLES = (\n 'To create a new configuration::\\n'\n '\\n'\n ' $ aws configure\\n'\n ' AWS Access Key ID [None]: accesskey\\n'\n ' AWS Secret Access Key [None]: secretkey\\n'\n ' Default region name [None]: us-west-2\\n'\n ' Default output format [None]:\\n'\n '\\n'\n 'To update just the region name::\\n'\n '\\n'\n ' $ aws configure\\n'\n ' AWS Access Key ID [****]:\\n'\n ' AWS Secret Access Key [****]:\\n'\n ' Default region name [us-west-1]: us-west-2\\n'\n ' Default output format [None]:\\n'\n )\n SUBCOMMANDS = [\n {'name': 'list', 'command_class': ConfigureListCommand},\n {'name': 'get', 'command_class': ConfigureGetCommand},\n {'name': 'set', 'command_class': ConfigureSetCommand},\n {'name': 'add-model', 'command_class': AddModelCommand}\n ]\n\n # If you want to add new values to prompt, update this list here.\n VALUES_TO_PROMPT = [\n # (logical_name, config_name, prompt_text)\n ('aws_access_key_id', \"AWS Access Key ID\"),\n ('aws_secret_access_key', \"AWS Secret Access Key\"),\n ('region', \"Default region name\"),\n ('output', \"Default output format\"),\n ]\n\n def __init__(self, session, prompter=None, config_writer=None):\n super(ConfigureCommand, self).__init__(session)\n if prompter is None:\n prompter = InteractivePrompter()\n self._prompter = prompter\n if config_writer is None:\n config_writer = ConfigFileWriter()\n self._config_writer = config_writer\n\n def _run_main(self, parsed_args, parsed_globals):\n # Called when invoked with no args \"aws configure\"\n new_values = {}\n # This is the config from the config file scoped to a specific\n # profile.\n try:\n config = self._session.get_scoped_config()\n except ProfileNotFound:\n config = {}\n for config_name, prompt_text in self.VALUES_TO_PROMPT:\n current_value = config.get(config_name)\n new_value = self._prompter.get_value(current_value, config_name,\n prompt_text)\n if new_value is not None and new_value != current_value:\n new_values[config_name] = new_value\n config_filename = os.path.expanduser(\n self._session.get_config_variable('config_file'))\n if new_values:\n self._write_out_creds_file_values(new_values,\n parsed_globals.profile)\n if parsed_globals.profile is not None:\n new_values['__section__'] = (\n 'profile %s' % parsed_globals.profile)\n self._config_writer.update_config(new_values, config_filename)\n\n def _write_out_creds_file_values(self, new_values, profile_name):\n # The access_key/secret_key are now *always* written to the shared\n # credentials file (~/.aws/credentials), see aws/aws-cli#847.\n # post-conditions: ~/.aws/credentials will have the updated credential\n # file values and new_values will have the cred vars removed.\n credential_file_values = {}\n if 'aws_access_key_id' in new_values:\n credential_file_values['aws_access_key_id'] = new_values.pop(\n 'aws_access_key_id')\n if 'aws_secret_access_key' in new_values:\n credential_file_values['aws_secret_access_key'] = new_values.pop(\n 'aws_secret_access_key')\n if credential_file_values:\n if profile_name is not None:\n credential_file_values['__section__'] = profile_name\n shared_credentials_filename = os.path.expanduser(\n self._session.get_config_variable('credentials_file'))\n self._config_writer.update_config(\n credential_file_values,\n shared_credentials_filename)\n", "path": "awscli/customizations/configure/configure.py"}], "after_files": [{"content": "# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport os\n\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.configure.writer import ConfigFileWriter\n\nfrom . import PREDEFINED_SECTION_NAMES, profile_to_section\n\n\nclass ConfigureSetCommand(BasicCommand):\n NAME = 'set'\n DESCRIPTION = BasicCommand.FROM_FILE('configure', 'set',\n '_description.rst')\n SYNOPSIS = 'aws configure set varname value [--profile profile-name]'\n EXAMPLES = BasicCommand.FROM_FILE('configure', 'set', '_examples.rst')\n ARG_TABLE = [\n {'name': 'varname',\n 'help_text': 'The name of the config value to set.',\n 'action': 'store',\n 'cli_type_name': 'string', 'positional_arg': True},\n {'name': 'value',\n 'help_text': 'The value to set.',\n 'action': 'store',\n 'no_paramfile': True, # To disable the default paramfile behavior\n 'cli_type_name': 'string', 'positional_arg': True},\n ]\n # Any variables specified in this list will be written to\n # the ~/.aws/credentials file instead of ~/.aws/config.\n _WRITE_TO_CREDS_FILE = ['aws_access_key_id', 'aws_secret_access_key',\n 'aws_session_token']\n\n def __init__(self, session, config_writer=None):\n super(ConfigureSetCommand, self).__init__(session)\n if config_writer is None:\n config_writer = ConfigFileWriter()\n self._config_writer = config_writer\n\n def _run_main(self, args, parsed_globals):\n varname = args.varname\n value = args.value\n section = 'default'\n # Before handing things off to the config writer,\n # we need to find out three things:\n # 1. What section we're writing to (section).\n # 2. The name of the config key (varname)\n # 3. The actual value (value).\n if '.' not in varname:\n # unqualified name, scope it to the current\n # profile (or leave it as the 'default' section if\n # no profile is set).\n if self._session.profile is not None:\n section = profile_to_section(self._session.profile)\n else:\n # First figure out if it's been scoped to a profile.\n parts = varname.split('.')\n if parts[0] in ('default', 'profile'):\n # Then we know we're scoped to a profile.\n if parts[0] == 'default':\n section = 'default'\n remaining = parts[1:]\n else:\n # [profile, profile_name, ...]\n section = profile_to_section(parts[1])\n remaining = parts[2:]\n varname = remaining[0]\n if len(remaining) == 2:\n value = {remaining[1]: value}\n elif parts[0] not in PREDEFINED_SECTION_NAMES:\n if self._session.profile is not None:\n section = profile_to_section(self._session.profile)\n else:\n profile_name = self._session.get_config_variable('profile')\n if profile_name is not None:\n section = profile_name\n varname = parts[0]\n if len(parts) == 2:\n value = {parts[1]: value}\n elif len(parts) == 2:\n # Otherwise it's something like \"set preview.service true\"\n # of something in the [plugin] section.\n section, varname = parts\n config_filename = os.path.expanduser(\n self._session.get_config_variable('config_file'))\n updated_config = {'__section__': section, varname: value}\n if varname in self._WRITE_TO_CREDS_FILE:\n config_filename = os.path.expanduser(\n self._session.get_config_variable('credentials_file'))\n section_name = updated_config['__section__']\n if section_name.startswith('profile '):\n updated_config['__section__'] = section_name[8:]\n self._config_writer.update_config(updated_config, config_filename)\n", "path": "awscli/customizations/configure/set.py"}, {"content": "# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport string\nfrom botocore.vendored.six.moves import shlex_quote\n\nNOT_SET = '<not set>'\nPREDEFINED_SECTION_NAMES = ('preview', 'plugins')\n_WHITESPACE = ' \\t'\n\n\nclass ConfigValue(object):\n\n def __init__(self, value, config_type, config_variable):\n self.value = value\n self.config_type = config_type\n self.config_variable = config_variable\n\n def mask_value(self):\n if self.value is NOT_SET:\n return\n self.value = mask_value(self.value)\n\n\nclass SectionNotFoundError(Exception):\n pass\n\n\ndef mask_value(current_value):\n if current_value is None:\n return 'None'\n else:\n return ('*' * 16) + current_value[-4:]\n\n\ndef profile_to_section(profile_name):\n \"\"\"Converts a profile name to a section header to be used in the config.\"\"\"\n if any(c in _WHITESPACE for c in profile_name):\n profile_name = shlex_quote(profile_name)\n return 'profile %s' % profile_name\n", "path": "awscli/customizations/configure/__init__.py"}, {"content": "# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport os\nimport logging\n\nfrom botocore.exceptions import ProfileNotFound\n\nfrom awscli.compat import compat_input\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.configure.addmodel import AddModelCommand\nfrom awscli.customizations.configure.set import ConfigureSetCommand\nfrom awscli.customizations.configure.get import ConfigureGetCommand\nfrom awscli.customizations.configure.list import ConfigureListCommand\nfrom awscli.customizations.configure.writer import ConfigFileWriter\n\nfrom . import mask_value, profile_to_section\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef register_configure_cmd(cli):\n cli.register('building-command-table.main',\n ConfigureCommand.add_command)\n\n\nclass InteractivePrompter(object):\n\n def get_value(self, current_value, config_name, prompt_text=''):\n if config_name in ('aws_access_key_id', 'aws_secret_access_key'):\n current_value = mask_value(current_value)\n response = compat_input(\"%s [%s]: \" % (prompt_text, current_value))\n if not response:\n # If the user hits enter, we return a value of None\n # instead of an empty string. That way we can determine\n # whether or not a value has changed.\n response = None\n return response\n\n\nclass ConfigureCommand(BasicCommand):\n NAME = 'configure'\n DESCRIPTION = BasicCommand.FROM_FILE()\n SYNOPSIS = ('aws configure [--profile profile-name]')\n EXAMPLES = (\n 'To create a new configuration::\\n'\n '\\n'\n ' $ aws configure\\n'\n ' AWS Access Key ID [None]: accesskey\\n'\n ' AWS Secret Access Key [None]: secretkey\\n'\n ' Default region name [None]: us-west-2\\n'\n ' Default output format [None]:\\n'\n '\\n'\n 'To update just the region name::\\n'\n '\\n'\n ' $ aws configure\\n'\n ' AWS Access Key ID [****]:\\n'\n ' AWS Secret Access Key [****]:\\n'\n ' Default region name [us-west-1]: us-west-2\\n'\n ' Default output format [None]:\\n'\n )\n SUBCOMMANDS = [\n {'name': 'list', 'command_class': ConfigureListCommand},\n {'name': 'get', 'command_class': ConfigureGetCommand},\n {'name': 'set', 'command_class': ConfigureSetCommand},\n {'name': 'add-model', 'command_class': AddModelCommand}\n ]\n\n # If you want to add new values to prompt, update this list here.\n VALUES_TO_PROMPT = [\n # (logical_name, config_name, prompt_text)\n ('aws_access_key_id', \"AWS Access Key ID\"),\n ('aws_secret_access_key', \"AWS Secret Access Key\"),\n ('region', \"Default region name\"),\n ('output', \"Default output format\"),\n ]\n\n def __init__(self, session, prompter=None, config_writer=None):\n super(ConfigureCommand, self).__init__(session)\n if prompter is None:\n prompter = InteractivePrompter()\n self._prompter = prompter\n if config_writer is None:\n config_writer = ConfigFileWriter()\n self._config_writer = config_writer\n\n def _run_main(self, parsed_args, parsed_globals):\n # Called when invoked with no args \"aws configure\"\n new_values = {}\n # This is the config from the config file scoped to a specific\n # profile.\n try:\n config = self._session.get_scoped_config()\n except ProfileNotFound:\n config = {}\n for config_name, prompt_text in self.VALUES_TO_PROMPT:\n current_value = config.get(config_name)\n new_value = self._prompter.get_value(current_value, config_name,\n prompt_text)\n if new_value is not None and new_value != current_value:\n new_values[config_name] = new_value\n config_filename = os.path.expanduser(\n self._session.get_config_variable('config_file'))\n if new_values:\n self._write_out_creds_file_values(new_values,\n parsed_globals.profile)\n if parsed_globals.profile is not None:\n section = profile_to_section(parsed_globals.profile)\n new_values['__section__'] = section\n self._config_writer.update_config(new_values, config_filename)\n\n def _write_out_creds_file_values(self, new_values, profile_name):\n # The access_key/secret_key are now *always* written to the shared\n # credentials file (~/.aws/credentials), see aws/aws-cli#847.\n # post-conditions: ~/.aws/credentials will have the updated credential\n # file values and new_values will have the cred vars removed.\n credential_file_values = {}\n if 'aws_access_key_id' in new_values:\n credential_file_values['aws_access_key_id'] = new_values.pop(\n 'aws_access_key_id')\n if 'aws_secret_access_key' in new_values:\n credential_file_values['aws_secret_access_key'] = new_values.pop(\n 'aws_secret_access_key')\n if credential_file_values:\n if profile_name is not None:\n credential_file_values['__section__'] = profile_name\n shared_credentials_filename = os.path.expanduser(\n self._session.get_config_variable('credentials_file'))\n self._config_writer.update_config(\n credential_file_values,\n shared_credentials_filename)\n", "path": "awscli/customizations/configure/configure.py"}]} | 3,793 | 860 |
gh_patches_debug_18769 | rasdani/github-patches | git_diff | streamlit__streamlit-1884 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
New streamlit.beta_page_config
# Summary
I called streamlit.beta_page_config once at the beginning of the code, It works at start, but, when using slider in app, it reports the error:
`StreamlitAPIException: beta_set_page_config() can only be called once per app, and must be called as the first Streamlit command in your script.
`
# Steps to reproduce
What are the steps we should take to reproduce the bug:
1. Go to 'https://nimani.diplomacy.edu:8502/'
2. Click on 'Isolation Forest ' radio button from sidebar menu
3. try to change slider 'Contamination' on sidebar menu
## Expected behavior:
It should show updated plotly table
## Actual behavior:
It reports error:
`StreamlitAPIException: beta_set_page_config() can only be called once per app, and must be called as the first Streamlit command in your script.
## Is this a regression?
That is, did this use to work the way you expected in the past?
no, feature did not exist in earlier versions, since it is a new beta feature
# Debug info
- Streamlit version: 0.65.1
- Python version: 3.7.7
- Using Conda? PipEnv? PyEnv? Pex? No
- OS version: Ubuntu 18.04
- Browser version: Google Chrome, Version 84.0.4147.125
# Additional information
If needed, add any other context about the problem here. For example, did this bug come from https://discuss.streamlit.io or another site? Link the original source here!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/streamlit/report_thread.py`
Content:
```
1 # Copyright 2018-2020 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import threading
16
17 from streamlit.logger import get_logger
18 from streamlit.errors import StreamlitAPIException
19
20 LOGGER = get_logger(__name__)
21
22
23 class ReportContext(object):
24 def __init__(
25 self,
26 session_id,
27 enqueue,
28 query_string,
29 widgets,
30 widget_ids_this_run,
31 uploaded_file_mgr,
32 ):
33 """Construct a ReportContext.
34
35 Parameters
36 ----------
37 session_id : str
38 The ReportSession's id.
39 enqueue : callable
40 Function that enqueues ForwardMsg protos in the websocket.
41 query_string : str
42 The URL query string for this run.
43 widgets : Widgets
44 The Widgets state object for the report.
45 widget_ids_this_run : _WidgetIDSet
46 The set of widget IDs that have been assigned in the
47 current report run. This set is cleared at the start of each run.
48 uploaded_file_mgr : UploadedFileManager
49 The manager for files uploaded by all users.
50
51 """
52 # (dict) Mapping of container (type str or BlockPath) to top-level
53 # cursor (type AbstractCursor).
54 self.cursors = {}
55 self.session_id = session_id
56 self._enqueue = enqueue
57 self.query_string = query_string
58 self.widgets = widgets
59 self.widget_ids_this_run = widget_ids_this_run
60 self.uploaded_file_mgr = uploaded_file_mgr
61
62 # set_page_config is allowed at most once, as the very first st.command
63 self._set_page_config_allowed = True
64
65 def reset(self, query_string=""):
66 self.cursors = {}
67 self.widget_ids_this_run.clear()
68 self.query_string = query_string
69
70 def enqueue(self, msg):
71 if msg.HasField("page_config_changed") and not self._set_page_config_allowed:
72 raise StreamlitAPIException(
73 "`beta_set_page_config()` can only be called once per app, "
74 + "and must be called as the first Streamlit command in your script.\n\n"
75 + "For more information refer to the [docs]"
76 + "(https://docs.streamlit.io/en/stable/api.html#streamlit.beta_set_page_config)."
77 )
78
79 if msg.HasField("delta") or msg.HasField("page_config_changed"):
80 self._set_page_config_allowed = False
81
82 self._enqueue(msg)
83
84
85 class _WidgetIDSet(object):
86 """Stores a set of widget IDs. Safe to mutate from multiple threads."""
87
88 def __init__(self):
89 self._lock = threading.Lock()
90 self._items = set()
91
92 def clear(self):
93 """Clears all items in the set."""
94 with self._lock:
95 self._items.clear()
96
97 def add(self, item):
98 """Adds an item to the set.
99
100 Parameters
101 ----------
102 item : Any
103 The item to add.
104
105 Returns
106 -------
107 bool
108 True if the item was added, and False if it was already in
109 the set.
110
111 """
112 with self._lock:
113 if item in self._items:
114 return False
115 self._items.add(item)
116 return True
117
118
119 REPORT_CONTEXT_ATTR_NAME = "streamlit_report_ctx"
120
121
122 class ReportThread(threading.Thread):
123 """Extends threading.Thread with a ReportContext member"""
124
125 def __init__(
126 self,
127 session_id,
128 enqueue,
129 query_string,
130 widgets,
131 uploaded_file_mgr=None,
132 target=None,
133 name=None,
134 ):
135 """Construct a ReportThread.
136
137 Parameters
138 ----------
139 session_id : str
140 The ReportSession's id.
141 enqueue : callable
142 Function that enqueues ForwardMsg protos in the websocket.
143 query_string : str
144 The URL query string for this run.
145 widgets : Widgets
146 The Widgets state object for the report.
147 uploaded_file_mgr : UploadedFileManager
148 The manager for files uploaded by all users.
149 target : callable
150 The callable object to be invoked by the run() method.
151 Defaults to None, meaning nothing is called.
152 name : str
153 The thread name. By default, a unique name is constructed of
154 the form "Thread-N" where N is a small decimal number.
155
156 """
157 super(ReportThread, self).__init__(target=target, name=name)
158 self.streamlit_report_ctx = ReportContext(
159 session_id=session_id,
160 enqueue=enqueue,
161 query_string=query_string,
162 widgets=widgets,
163 uploaded_file_mgr=uploaded_file_mgr,
164 widget_ids_this_run=_WidgetIDSet(),
165 )
166
167
168 def add_report_ctx(thread=None, ctx=None):
169 """Adds the current ReportContext to a newly-created thread.
170
171 This should be called from this thread's parent thread,
172 before the new thread starts.
173
174 Parameters
175 ----------
176 thread : threading.Thread
177 The thread to attach the current ReportContext to.
178 ctx : ReportContext or None
179 The ReportContext to add, or None to use the current thread's
180 ReportContext.
181
182 Returns
183 -------
184 threading.Thread
185 The same thread that was passed in, for chaining.
186
187 """
188 if thread is None:
189 thread = threading.current_thread()
190 if ctx is None:
191 ctx = get_report_ctx()
192 if ctx is not None:
193 setattr(thread, REPORT_CONTEXT_ATTR_NAME, ctx)
194 return thread
195
196
197 def get_report_ctx():
198 """
199 Returns
200 -------
201 ReportContext | None
202 The current thread's ReportContext, or None if it doesn't have one.
203
204 """
205 thread = threading.current_thread()
206 ctx = getattr(thread, REPORT_CONTEXT_ATTR_NAME, None)
207 if ctx is None and streamlit._is_running_with_streamlit:
208 # Only warn about a missing ReportContext if we were started
209 # via `streamlit run`. Otherwise, the user is likely running a
210 # script "bare", and doesn't need to be warned about streamlit
211 # bits that are irrelevant when not connected to a report.
212 LOGGER.warning("Thread '%s': missing ReportContext" % thread.name)
213 return ctx
214
215
216 # Needed to avoid circular dependencies while running tests.
217 import streamlit
218
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/streamlit/report_thread.py b/lib/streamlit/report_thread.py
--- a/lib/streamlit/report_thread.py
+++ b/lib/streamlit/report_thread.py
@@ -58,7 +58,6 @@
self.widgets = widgets
self.widget_ids_this_run = widget_ids_this_run
self.uploaded_file_mgr = uploaded_file_mgr
-
# set_page_config is allowed at most once, as the very first st.command
self._set_page_config_allowed = True
@@ -66,6 +65,8 @@
self.cursors = {}
self.widget_ids_this_run.clear()
self.query_string = query_string
+ # Permit set_page_config when the ReportContext is reused on a rerun
+ self._set_page_config_allowed = True
def enqueue(self, msg):
if msg.HasField("page_config_changed") and not self._set_page_config_allowed:
| {"golden_diff": "diff --git a/lib/streamlit/report_thread.py b/lib/streamlit/report_thread.py\n--- a/lib/streamlit/report_thread.py\n+++ b/lib/streamlit/report_thread.py\n@@ -58,7 +58,6 @@\n self.widgets = widgets\n self.widget_ids_this_run = widget_ids_this_run\n self.uploaded_file_mgr = uploaded_file_mgr\n-\n # set_page_config is allowed at most once, as the very first st.command\n self._set_page_config_allowed = True\n \n@@ -66,6 +65,8 @@\n self.cursors = {}\n self.widget_ids_this_run.clear()\n self.query_string = query_string\n+ # Permit set_page_config when the ReportContext is reused on a rerun\n+ self._set_page_config_allowed = True\n \n def enqueue(self, msg):\n if msg.HasField(\"page_config_changed\") and not self._set_page_config_allowed:\n", "issue": "New streamlit.beta_page_config\n# Summary\r\n\r\nI called streamlit.beta_page_config once at the beginning of the code, It works at start, but, when using slider in app, it reports the error:\r\n`StreamlitAPIException: beta_set_page_config() can only be called once per app, and must be called as the first Streamlit command in your script.\r\n`\r\n# Steps to reproduce\r\n\r\nWhat are the steps we should take to reproduce the bug:\r\n\r\n1. Go to 'https://nimani.diplomacy.edu:8502/'\r\n2. Click on 'Isolation Forest ' radio button from sidebar menu\r\n3. try to change slider 'Contamination' on sidebar menu\r\n\r\n## Expected behavior:\r\n\r\nIt should show updated plotly table\r\n\r\n## Actual behavior:\r\n\r\nIt reports error:\r\n`StreamlitAPIException: beta_set_page_config() can only be called once per app, and must be called as the first Streamlit command in your script.\r\n\r\n## Is this a regression?\r\n\r\nThat is, did this use to work the way you expected in the past?\r\nno, feature did not exist in earlier versions, since it is a new beta feature\r\n\r\n# Debug info\r\n\r\n- Streamlit version: 0.65.1\r\n- Python version: 3.7.7\r\n- Using Conda? PipEnv? PyEnv? Pex? No\r\n- OS version: Ubuntu 18.04\r\n- Browser version: Google Chrome, Version 84.0.4147.125\r\n\r\n# Additional information\r\n\r\nIf needed, add any other context about the problem here. For example, did this bug come from https://discuss.streamlit.io or another site? Link the original source here!\r\n\n", "before_files": [{"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport threading\n\nfrom streamlit.logger import get_logger\nfrom streamlit.errors import StreamlitAPIException\n\nLOGGER = get_logger(__name__)\n\n\nclass ReportContext(object):\n def __init__(\n self,\n session_id,\n enqueue,\n query_string,\n widgets,\n widget_ids_this_run,\n uploaded_file_mgr,\n ):\n \"\"\"Construct a ReportContext.\n\n Parameters\n ----------\n session_id : str\n The ReportSession's id.\n enqueue : callable\n Function that enqueues ForwardMsg protos in the websocket.\n query_string : str\n The URL query string for this run.\n widgets : Widgets\n The Widgets state object for the report.\n widget_ids_this_run : _WidgetIDSet\n The set of widget IDs that have been assigned in the\n current report run. This set is cleared at the start of each run.\n uploaded_file_mgr : UploadedFileManager\n The manager for files uploaded by all users.\n\n \"\"\"\n # (dict) Mapping of container (type str or BlockPath) to top-level\n # cursor (type AbstractCursor).\n self.cursors = {}\n self.session_id = session_id\n self._enqueue = enqueue\n self.query_string = query_string\n self.widgets = widgets\n self.widget_ids_this_run = widget_ids_this_run\n self.uploaded_file_mgr = uploaded_file_mgr\n\n # set_page_config is allowed at most once, as the very first st.command\n self._set_page_config_allowed = True\n\n def reset(self, query_string=\"\"):\n self.cursors = {}\n self.widget_ids_this_run.clear()\n self.query_string = query_string\n\n def enqueue(self, msg):\n if msg.HasField(\"page_config_changed\") and not self._set_page_config_allowed:\n raise StreamlitAPIException(\n \"`beta_set_page_config()` can only be called once per app, \"\n + \"and must be called as the first Streamlit command in your script.\\n\\n\"\n + \"For more information refer to the [docs]\"\n + \"(https://docs.streamlit.io/en/stable/api.html#streamlit.beta_set_page_config).\"\n )\n\n if msg.HasField(\"delta\") or msg.HasField(\"page_config_changed\"):\n self._set_page_config_allowed = False\n\n self._enqueue(msg)\n\n\nclass _WidgetIDSet(object):\n \"\"\"Stores a set of widget IDs. Safe to mutate from multiple threads.\"\"\"\n\n def __init__(self):\n self._lock = threading.Lock()\n self._items = set()\n\n def clear(self):\n \"\"\"Clears all items in the set.\"\"\"\n with self._lock:\n self._items.clear()\n\n def add(self, item):\n \"\"\"Adds an item to the set.\n\n Parameters\n ----------\n item : Any\n The item to add.\n\n Returns\n -------\n bool\n True if the item was added, and False if it was already in\n the set.\n\n \"\"\"\n with self._lock:\n if item in self._items:\n return False\n self._items.add(item)\n return True\n\n\nREPORT_CONTEXT_ATTR_NAME = \"streamlit_report_ctx\"\n\n\nclass ReportThread(threading.Thread):\n \"\"\"Extends threading.Thread with a ReportContext member\"\"\"\n\n def __init__(\n self,\n session_id,\n enqueue,\n query_string,\n widgets,\n uploaded_file_mgr=None,\n target=None,\n name=None,\n ):\n \"\"\"Construct a ReportThread.\n\n Parameters\n ----------\n session_id : str\n The ReportSession's id.\n enqueue : callable\n Function that enqueues ForwardMsg protos in the websocket.\n query_string : str\n The URL query string for this run.\n widgets : Widgets\n The Widgets state object for the report.\n uploaded_file_mgr : UploadedFileManager\n The manager for files uploaded by all users.\n target : callable\n The callable object to be invoked by the run() method.\n Defaults to None, meaning nothing is called.\n name : str\n The thread name. By default, a unique name is constructed of\n the form \"Thread-N\" where N is a small decimal number.\n\n \"\"\"\n super(ReportThread, self).__init__(target=target, name=name)\n self.streamlit_report_ctx = ReportContext(\n session_id=session_id,\n enqueue=enqueue,\n query_string=query_string,\n widgets=widgets,\n uploaded_file_mgr=uploaded_file_mgr,\n widget_ids_this_run=_WidgetIDSet(),\n )\n\n\ndef add_report_ctx(thread=None, ctx=None):\n \"\"\"Adds the current ReportContext to a newly-created thread.\n\n This should be called from this thread's parent thread,\n before the new thread starts.\n\n Parameters\n ----------\n thread : threading.Thread\n The thread to attach the current ReportContext to.\n ctx : ReportContext or None\n The ReportContext to add, or None to use the current thread's\n ReportContext.\n\n Returns\n -------\n threading.Thread\n The same thread that was passed in, for chaining.\n\n \"\"\"\n if thread is None:\n thread = threading.current_thread()\n if ctx is None:\n ctx = get_report_ctx()\n if ctx is not None:\n setattr(thread, REPORT_CONTEXT_ATTR_NAME, ctx)\n return thread\n\n\ndef get_report_ctx():\n \"\"\"\n Returns\n -------\n ReportContext | None\n The current thread's ReportContext, or None if it doesn't have one.\n\n \"\"\"\n thread = threading.current_thread()\n ctx = getattr(thread, REPORT_CONTEXT_ATTR_NAME, None)\n if ctx is None and streamlit._is_running_with_streamlit:\n # Only warn about a missing ReportContext if we were started\n # via `streamlit run`. Otherwise, the user is likely running a\n # script \"bare\", and doesn't need to be warned about streamlit\n # bits that are irrelevant when not connected to a report.\n LOGGER.warning(\"Thread '%s': missing ReportContext\" % thread.name)\n return ctx\n\n\n# Needed to avoid circular dependencies while running tests.\nimport streamlit\n", "path": "lib/streamlit/report_thread.py"}], "after_files": [{"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport threading\n\nfrom streamlit.logger import get_logger\nfrom streamlit.errors import StreamlitAPIException\n\nLOGGER = get_logger(__name__)\n\n\nclass ReportContext(object):\n def __init__(\n self,\n session_id,\n enqueue,\n query_string,\n widgets,\n widget_ids_this_run,\n uploaded_file_mgr,\n ):\n \"\"\"Construct a ReportContext.\n\n Parameters\n ----------\n session_id : str\n The ReportSession's id.\n enqueue : callable\n Function that enqueues ForwardMsg protos in the websocket.\n query_string : str\n The URL query string for this run.\n widgets : Widgets\n The Widgets state object for the report.\n widget_ids_this_run : _WidgetIDSet\n The set of widget IDs that have been assigned in the\n current report run. This set is cleared at the start of each run.\n uploaded_file_mgr : UploadedFileManager\n The manager for files uploaded by all users.\n\n \"\"\"\n # (dict) Mapping of container (type str or BlockPath) to top-level\n # cursor (type AbstractCursor).\n self.cursors = {}\n self.session_id = session_id\n self._enqueue = enqueue\n self.query_string = query_string\n self.widgets = widgets\n self.widget_ids_this_run = widget_ids_this_run\n self.uploaded_file_mgr = uploaded_file_mgr\n # set_page_config is allowed at most once, as the very first st.command\n self._set_page_config_allowed = True\n\n def reset(self, query_string=\"\"):\n self.cursors = {}\n self.widget_ids_this_run.clear()\n self.query_string = query_string\n # Permit set_page_config when the ReportContext is reused on a rerun\n self._set_page_config_allowed = True\n\n def enqueue(self, msg):\n if msg.HasField(\"page_config_changed\") and not self._set_page_config_allowed:\n raise StreamlitAPIException(\n \"`beta_set_page_config()` can only be called once per app, \"\n + \"and must be called as the first Streamlit command in your script.\\n\\n\"\n + \"For more information refer to the [docs]\"\n + \"(https://docs.streamlit.io/en/stable/api.html#streamlit.beta_set_page_config).\"\n )\n\n if msg.HasField(\"delta\") or msg.HasField(\"page_config_changed\"):\n self._set_page_config_allowed = False\n\n self._enqueue(msg)\n\n\nclass _WidgetIDSet(object):\n \"\"\"Stores a set of widget IDs. Safe to mutate from multiple threads.\"\"\"\n\n def __init__(self):\n self._lock = threading.Lock()\n self._items = set()\n\n def clear(self):\n \"\"\"Clears all items in the set.\"\"\"\n with self._lock:\n self._items.clear()\n\n def add(self, item):\n \"\"\"Adds an item to the set.\n\n Parameters\n ----------\n item : Any\n The item to add.\n\n Returns\n -------\n bool\n True if the item was added, and False if it was already in\n the set.\n\n \"\"\"\n with self._lock:\n if item in self._items:\n return False\n self._items.add(item)\n return True\n\n\nREPORT_CONTEXT_ATTR_NAME = \"streamlit_report_ctx\"\n\n\nclass ReportThread(threading.Thread):\n \"\"\"Extends threading.Thread with a ReportContext member\"\"\"\n\n def __init__(\n self,\n session_id,\n enqueue,\n query_string,\n widgets,\n uploaded_file_mgr=None,\n target=None,\n name=None,\n ):\n \"\"\"Construct a ReportThread.\n\n Parameters\n ----------\n session_id : str\n The ReportSession's id.\n enqueue : callable\n Function that enqueues ForwardMsg protos in the websocket.\n query_string : str\n The URL query string for this run.\n widgets : Widgets\n The Widgets state object for the report.\n uploaded_file_mgr : UploadedFileManager\n The manager for files uploaded by all users.\n target : callable\n The callable object to be invoked by the run() method.\n Defaults to None, meaning nothing is called.\n name : str\n The thread name. By default, a unique name is constructed of\n the form \"Thread-N\" where N is a small decimal number.\n\n \"\"\"\n super(ReportThread, self).__init__(target=target, name=name)\n self.streamlit_report_ctx = ReportContext(\n session_id=session_id,\n enqueue=enqueue,\n query_string=query_string,\n widgets=widgets,\n uploaded_file_mgr=uploaded_file_mgr,\n widget_ids_this_run=_WidgetIDSet(),\n )\n\n\ndef add_report_ctx(thread=None, ctx=None):\n \"\"\"Adds the current ReportContext to a newly-created thread.\n\n This should be called from this thread's parent thread,\n before the new thread starts.\n\n Parameters\n ----------\n thread : threading.Thread\n The thread to attach the current ReportContext to.\n ctx : ReportContext or None\n The ReportContext to add, or None to use the current thread's\n ReportContext.\n\n Returns\n -------\n threading.Thread\n The same thread that was passed in, for chaining.\n\n \"\"\"\n if thread is None:\n thread = threading.current_thread()\n if ctx is None:\n ctx = get_report_ctx()\n if ctx is not None:\n setattr(thread, REPORT_CONTEXT_ATTR_NAME, ctx)\n return thread\n\n\ndef get_report_ctx():\n \"\"\"\n Returns\n -------\n ReportContext | None\n The current thread's ReportContext, or None if it doesn't have one.\n\n \"\"\"\n thread = threading.current_thread()\n ctx = getattr(thread, REPORT_CONTEXT_ATTR_NAME, None)\n if ctx is None and streamlit._is_running_with_streamlit:\n # Only warn about a missing ReportContext if we were started\n # via `streamlit run`. Otherwise, the user is likely running a\n # script \"bare\", and doesn't need to be warned about streamlit\n # bits that are irrelevant when not connected to a report.\n LOGGER.warning(\"Thread '%s': missing ReportContext\" % thread.name)\n return ctx\n\n\n# Needed to avoid circular dependencies while running tests.\nimport streamlit\n", "path": "lib/streamlit/report_thread.py"}]} | 2,608 | 195 |
gh_patches_debug_12942 | rasdani/github-patches | git_diff | goauthentik__authentik-9474 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
http-basic-auth headers for external OAuth source
**Describe your question**
Is there a way to have authentik send http-basic-auth headers (as per [RFC6749 2.3.1](https://datatracker.ietf.org/doc/html/rfc6749#section-2.3.1)) for external IdPs? I can't find any settings regarding this and it doesn't seem to do that by default.
**Relevant info**
An external IdP we use as a login source updated their underlying software. They now require applications to send http-basic-auth headers for OIDC client authentication. Before the update, login worked just fine.
Now we receive an error message: "Authentication failed: Could not retrieve token." and the logs show an HTTP 401 Unauthorized error, when trying to reach the IdP's token endpoint.
**Logs**
server-1:
{
"auth_via": "unauthenticated",
"domain_url": "[authentik.idp]",
"event": "Unable to fetch access token",
"exc": "HTTPError('401 Client Error: Unauthorized for url: https://[external.idp]/oauth2/token')",
"host": "[authentik.idp]",
"level": "warning",
"logger": "authentik.sources.oauth.clients.oauth2",
"pid": 55,
"request_id": "51bca021eac7412bb2e54233753761cf",
"response": "401 Client Error: Unauthorized for url: https://[external.idp]/oauth2/token",
"schema_name": "public",
"timestamp": "2024-04-15T11:22:40.705924"
}
Note that [url.idp] is redacted.
**Version and Deployment:**
- authentik version: 2024.2.2
- Deployment: docker-compose
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/sources/oauth/clients/oauth2.py`
Content:
```
1 """OAuth 2 Clients"""
2
3 from json import loads
4 from typing import Any
5 from urllib.parse import parse_qsl
6
7 from django.utils.crypto import constant_time_compare, get_random_string
8 from django.utils.translation import gettext as _
9 from requests.exceptions import RequestException
10 from requests.models import Response
11 from structlog.stdlib import get_logger
12
13 from authentik.sources.oauth.clients.base import BaseOAuthClient
14
15 LOGGER = get_logger()
16 SESSION_KEY_OAUTH_PKCE = "authentik/sources/oauth/pkce"
17
18
19 class OAuth2Client(BaseOAuthClient):
20 """OAuth2 Client"""
21
22 _default_headers = {
23 "Accept": "application/json",
24 }
25
26 def get_request_arg(self, key: str, default: Any | None = None) -> Any:
27 """Depending on request type, get data from post or get"""
28 if self.request.method == "POST":
29 return self.request.POST.get(key, default)
30 return self.request.GET.get(key, default)
31
32 def check_application_state(self) -> bool:
33 """Check optional state parameter."""
34 stored = self.request.session.get(self.session_key, None)
35 returned = self.get_request_arg("state", None)
36 check = False
37 if stored is not None:
38 if returned is not None:
39 check = constant_time_compare(stored, returned)
40 else:
41 LOGGER.warning("No state parameter returned by the source.")
42 else:
43 LOGGER.warning("No state stored in the session.")
44 return check
45
46 def get_application_state(self) -> str:
47 """Generate state optional parameter."""
48 return get_random_string(32)
49
50 def get_client_id(self) -> str:
51 """Get client id"""
52 return self.source.consumer_key
53
54 def get_client_secret(self) -> str:
55 """Get client secret"""
56 return self.source.consumer_secret
57
58 def get_access_token(self, **request_kwargs) -> dict[str, Any] | None:
59 """Fetch access token from callback request."""
60 callback = self.request.build_absolute_uri(self.callback or self.request.path)
61 if not self.check_application_state():
62 LOGGER.warning("Application state check failed.")
63 return {"error": "State check failed."}
64 code = self.get_request_arg("code", None)
65 if not code:
66 LOGGER.warning("No code returned by the source")
67 error = self.get_request_arg("error", None)
68 error_desc = self.get_request_arg("error_description", None)
69 return {"error": error_desc or error or _("No token received.")}
70 args = {
71 "client_id": self.get_client_id(),
72 "client_secret": self.get_client_secret(),
73 "redirect_uri": callback,
74 "code": code,
75 "grant_type": "authorization_code",
76 }
77 if SESSION_KEY_OAUTH_PKCE in self.request.session:
78 args["code_verifier"] = self.request.session[SESSION_KEY_OAUTH_PKCE]
79 try:
80 access_token_url = self.source.source_type.access_token_url or ""
81 if self.source.source_type.urls_customizable and self.source.access_token_url:
82 access_token_url = self.source.access_token_url
83 response = self.session.request(
84 "post", access_token_url, data=args, headers=self._default_headers, **request_kwargs
85 )
86 response.raise_for_status()
87 except RequestException as exc:
88 LOGGER.warning(
89 "Unable to fetch access token",
90 exc=exc,
91 response=exc.response.text if exc.response else str(exc),
92 )
93 return None
94 return response.json()
95
96 def get_redirect_args(self) -> dict[str, str]:
97 """Get request parameters for redirect url."""
98 callback = self.request.build_absolute_uri(self.callback)
99 client_id: str = self.get_client_id()
100 args: dict[str, str] = {
101 "client_id": client_id,
102 "redirect_uri": callback,
103 "response_type": "code",
104 }
105 state = self.get_application_state()
106 if state is not None:
107 args["state"] = state
108 self.request.session[self.session_key] = state
109 return args
110
111 def parse_raw_token(self, raw_token: str) -> dict[str, Any]:
112 """Parse token and secret from raw token response."""
113 # Load as json first then parse as query string
114 try:
115 token_data = loads(raw_token)
116 except ValueError:
117 return dict(parse_qsl(raw_token))
118 return token_data
119
120 def do_request(self, method: str, url: str, **kwargs) -> Response:
121 """Build remote url request. Constructs necessary auth."""
122 if "token" in kwargs:
123 token = kwargs.pop("token")
124
125 params = kwargs.get("params", {})
126 params["access_token"] = token["access_token"]
127 kwargs["params"] = params
128
129 headers = kwargs.get("headers", {})
130 headers["Authorization"] = f"{token['token_type']} {token['access_token']}"
131 kwargs["headers"] = headers
132 return super().do_request(method, url, **kwargs)
133
134 @property
135 def session_key(self):
136 return f"oauth-client-{self.source.name}-request-state"
137
138
139 class UserprofileHeaderAuthClient(OAuth2Client):
140 """OAuth client which only sends authentication via header, not querystring"""
141
142 def get_profile_info(self, token: dict[str, str]) -> dict[str, Any] | None:
143 "Fetch user profile information."
144 profile_url = self.source.source_type.profile_url or ""
145 if self.source.source_type.urls_customizable and self.source.profile_url:
146 profile_url = self.source.profile_url
147 response = self.session.request(
148 "get",
149 profile_url,
150 headers={"Authorization": f"{token['token_type']} {token['access_token']}"},
151 )
152 try:
153 response.raise_for_status()
154 except RequestException as exc:
155 LOGGER.warning(
156 "Unable to fetch user profile",
157 exc=exc,
158 response=exc.response.text if exc.response else str(exc),
159 )
160 return None
161 return response.json()
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/authentik/sources/oauth/clients/oauth2.py b/authentik/sources/oauth/clients/oauth2.py
--- a/authentik/sources/oauth/clients/oauth2.py
+++ b/authentik/sources/oauth/clients/oauth2.py
@@ -80,7 +80,7 @@
access_token_url = self.source.source_type.access_token_url or ""
if self.source.source_type.urls_customizable and self.source.access_token_url:
access_token_url = self.source.access_token_url
- response = self.session.request(
+ response = self.do_request(
"post", access_token_url, data=args, headers=self._default_headers, **request_kwargs
)
response.raise_for_status()
| {"golden_diff": "diff --git a/authentik/sources/oauth/clients/oauth2.py b/authentik/sources/oauth/clients/oauth2.py\n--- a/authentik/sources/oauth/clients/oauth2.py\n+++ b/authentik/sources/oauth/clients/oauth2.py\n@@ -80,7 +80,7 @@\n access_token_url = self.source.source_type.access_token_url or \"\"\n if self.source.source_type.urls_customizable and self.source.access_token_url:\n access_token_url = self.source.access_token_url\n- response = self.session.request(\n+ response = self.do_request(\n \"post\", access_token_url, data=args, headers=self._default_headers, **request_kwargs\n )\n response.raise_for_status()\n", "issue": "http-basic-auth headers for external OAuth source\n**Describe your question**\r\n Is there a way to have authentik send http-basic-auth headers (as per [RFC6749 2.3.1](https://datatracker.ietf.org/doc/html/rfc6749#section-2.3.1)) for external IdPs? I can't find any settings regarding this and it doesn't seem to do that by default.\r\n\r\n**Relevant info**\r\nAn external IdP we use as a login source updated their underlying software. They now require applications to send http-basic-auth headers for OIDC client authentication. Before the update, login worked just fine.\r\nNow we receive an error message: \"Authentication failed: Could not retrieve token.\" and the logs show an HTTP 401 Unauthorized error, when trying to reach the IdP's token endpoint.\r\n\r\n**Logs**\r\nserver-1:\r\n{\r\n \"auth_via\": \"unauthenticated\",\r\n \"domain_url\": \"[authentik.idp]\", \r\n \"event\": \"Unable to fetch access token\", \r\n \"exc\": \"HTTPError('401 Client Error: Unauthorized for url: https://[external.idp]/oauth2/token')\",\r\n \"host\": \"[authentik.idp]\",\r\n \"level\": \"warning\",\r\n \"logger\": \"authentik.sources.oauth.clients.oauth2\",\r\n \"pid\": 55,\r\n \"request_id\": \"51bca021eac7412bb2e54233753761cf\",\r\n \"response\": \"401 Client Error: Unauthorized for url: https://[external.idp]/oauth2/token\",\r\n \"schema_name\": \"public\",\r\n \"timestamp\": \"2024-04-15T11:22:40.705924\"\r\n}\r\n\r\nNote that [url.idp] is redacted.\r\n\r\n**Version and Deployment:**\r\n\r\n- authentik version: 2024.2.2\r\n- Deployment: docker-compose\n", "before_files": [{"content": "\"\"\"OAuth 2 Clients\"\"\"\n\nfrom json import loads\nfrom typing import Any\nfrom urllib.parse import parse_qsl\n\nfrom django.utils.crypto import constant_time_compare, get_random_string\nfrom django.utils.translation import gettext as _\nfrom requests.exceptions import RequestException\nfrom requests.models import Response\nfrom structlog.stdlib import get_logger\n\nfrom authentik.sources.oauth.clients.base import BaseOAuthClient\n\nLOGGER = get_logger()\nSESSION_KEY_OAUTH_PKCE = \"authentik/sources/oauth/pkce\"\n\n\nclass OAuth2Client(BaseOAuthClient):\n \"\"\"OAuth2 Client\"\"\"\n\n _default_headers = {\n \"Accept\": \"application/json\",\n }\n\n def get_request_arg(self, key: str, default: Any | None = None) -> Any:\n \"\"\"Depending on request type, get data from post or get\"\"\"\n if self.request.method == \"POST\":\n return self.request.POST.get(key, default)\n return self.request.GET.get(key, default)\n\n def check_application_state(self) -> bool:\n \"\"\"Check optional state parameter.\"\"\"\n stored = self.request.session.get(self.session_key, None)\n returned = self.get_request_arg(\"state\", None)\n check = False\n if stored is not None:\n if returned is not None:\n check = constant_time_compare(stored, returned)\n else:\n LOGGER.warning(\"No state parameter returned by the source.\")\n else:\n LOGGER.warning(\"No state stored in the session.\")\n return check\n\n def get_application_state(self) -> str:\n \"\"\"Generate state optional parameter.\"\"\"\n return get_random_string(32)\n\n def get_client_id(self) -> str:\n \"\"\"Get client id\"\"\"\n return self.source.consumer_key\n\n def get_client_secret(self) -> str:\n \"\"\"Get client secret\"\"\"\n return self.source.consumer_secret\n\n def get_access_token(self, **request_kwargs) -> dict[str, Any] | None:\n \"\"\"Fetch access token from callback request.\"\"\"\n callback = self.request.build_absolute_uri(self.callback or self.request.path)\n if not self.check_application_state():\n LOGGER.warning(\"Application state check failed.\")\n return {\"error\": \"State check failed.\"}\n code = self.get_request_arg(\"code\", None)\n if not code:\n LOGGER.warning(\"No code returned by the source\")\n error = self.get_request_arg(\"error\", None)\n error_desc = self.get_request_arg(\"error_description\", None)\n return {\"error\": error_desc or error or _(\"No token received.\")}\n args = {\n \"client_id\": self.get_client_id(),\n \"client_secret\": self.get_client_secret(),\n \"redirect_uri\": callback,\n \"code\": code,\n \"grant_type\": \"authorization_code\",\n }\n if SESSION_KEY_OAUTH_PKCE in self.request.session:\n args[\"code_verifier\"] = self.request.session[SESSION_KEY_OAUTH_PKCE]\n try:\n access_token_url = self.source.source_type.access_token_url or \"\"\n if self.source.source_type.urls_customizable and self.source.access_token_url:\n access_token_url = self.source.access_token_url\n response = self.session.request(\n \"post\", access_token_url, data=args, headers=self._default_headers, **request_kwargs\n )\n response.raise_for_status()\n except RequestException as exc:\n LOGGER.warning(\n \"Unable to fetch access token\",\n exc=exc,\n response=exc.response.text if exc.response else str(exc),\n )\n return None\n return response.json()\n\n def get_redirect_args(self) -> dict[str, str]:\n \"\"\"Get request parameters for redirect url.\"\"\"\n callback = self.request.build_absolute_uri(self.callback)\n client_id: str = self.get_client_id()\n args: dict[str, str] = {\n \"client_id\": client_id,\n \"redirect_uri\": callback,\n \"response_type\": \"code\",\n }\n state = self.get_application_state()\n if state is not None:\n args[\"state\"] = state\n self.request.session[self.session_key] = state\n return args\n\n def parse_raw_token(self, raw_token: str) -> dict[str, Any]:\n \"\"\"Parse token and secret from raw token response.\"\"\"\n # Load as json first then parse as query string\n try:\n token_data = loads(raw_token)\n except ValueError:\n return dict(parse_qsl(raw_token))\n return token_data\n\n def do_request(self, method: str, url: str, **kwargs) -> Response:\n \"\"\"Build remote url request. Constructs necessary auth.\"\"\"\n if \"token\" in kwargs:\n token = kwargs.pop(\"token\")\n\n params = kwargs.get(\"params\", {})\n params[\"access_token\"] = token[\"access_token\"]\n kwargs[\"params\"] = params\n\n headers = kwargs.get(\"headers\", {})\n headers[\"Authorization\"] = f\"{token['token_type']} {token['access_token']}\"\n kwargs[\"headers\"] = headers\n return super().do_request(method, url, **kwargs)\n\n @property\n def session_key(self):\n return f\"oauth-client-{self.source.name}-request-state\"\n\n\nclass UserprofileHeaderAuthClient(OAuth2Client):\n \"\"\"OAuth client which only sends authentication via header, not querystring\"\"\"\n\n def get_profile_info(self, token: dict[str, str]) -> dict[str, Any] | None:\n \"Fetch user profile information.\"\n profile_url = self.source.source_type.profile_url or \"\"\n if self.source.source_type.urls_customizable and self.source.profile_url:\n profile_url = self.source.profile_url\n response = self.session.request(\n \"get\",\n profile_url,\n headers={\"Authorization\": f\"{token['token_type']} {token['access_token']}\"},\n )\n try:\n response.raise_for_status()\n except RequestException as exc:\n LOGGER.warning(\n \"Unable to fetch user profile\",\n exc=exc,\n response=exc.response.text if exc.response else str(exc),\n )\n return None\n return response.json()\n", "path": "authentik/sources/oauth/clients/oauth2.py"}], "after_files": [{"content": "\"\"\"OAuth 2 Clients\"\"\"\n\nfrom json import loads\nfrom typing import Any\nfrom urllib.parse import parse_qsl\n\nfrom django.utils.crypto import constant_time_compare, get_random_string\nfrom django.utils.translation import gettext as _\nfrom requests.exceptions import RequestException\nfrom requests.models import Response\nfrom structlog.stdlib import get_logger\n\nfrom authentik.sources.oauth.clients.base import BaseOAuthClient\n\nLOGGER = get_logger()\nSESSION_KEY_OAUTH_PKCE = \"authentik/sources/oauth/pkce\"\n\n\nclass OAuth2Client(BaseOAuthClient):\n \"\"\"OAuth2 Client\"\"\"\n\n _default_headers = {\n \"Accept\": \"application/json\",\n }\n\n def get_request_arg(self, key: str, default: Any | None = None) -> Any:\n \"\"\"Depending on request type, get data from post or get\"\"\"\n if self.request.method == \"POST\":\n return self.request.POST.get(key, default)\n return self.request.GET.get(key, default)\n\n def check_application_state(self) -> bool:\n \"\"\"Check optional state parameter.\"\"\"\n stored = self.request.session.get(self.session_key, None)\n returned = self.get_request_arg(\"state\", None)\n check = False\n if stored is not None:\n if returned is not None:\n check = constant_time_compare(stored, returned)\n else:\n LOGGER.warning(\"No state parameter returned by the source.\")\n else:\n LOGGER.warning(\"No state stored in the session.\")\n return check\n\n def get_application_state(self) -> str:\n \"\"\"Generate state optional parameter.\"\"\"\n return get_random_string(32)\n\n def get_client_id(self) -> str:\n \"\"\"Get client id\"\"\"\n return self.source.consumer_key\n\n def get_client_secret(self) -> str:\n \"\"\"Get client secret\"\"\"\n return self.source.consumer_secret\n\n def get_access_token(self, **request_kwargs) -> dict[str, Any] | None:\n \"\"\"Fetch access token from callback request.\"\"\"\n callback = self.request.build_absolute_uri(self.callback or self.request.path)\n if not self.check_application_state():\n LOGGER.warning(\"Application state check failed.\")\n return {\"error\": \"State check failed.\"}\n code = self.get_request_arg(\"code\", None)\n if not code:\n LOGGER.warning(\"No code returned by the source\")\n error = self.get_request_arg(\"error\", None)\n error_desc = self.get_request_arg(\"error_description\", None)\n return {\"error\": error_desc or error or _(\"No token received.\")}\n args = {\n \"client_id\": self.get_client_id(),\n \"client_secret\": self.get_client_secret(),\n \"redirect_uri\": callback,\n \"code\": code,\n \"grant_type\": \"authorization_code\",\n }\n if SESSION_KEY_OAUTH_PKCE in self.request.session:\n args[\"code_verifier\"] = self.request.session[SESSION_KEY_OAUTH_PKCE]\n try:\n access_token_url = self.source.source_type.access_token_url or \"\"\n if self.source.source_type.urls_customizable and self.source.access_token_url:\n access_token_url = self.source.access_token_url\n response = self.do_request(\n \"post\", access_token_url, data=args, headers=self._default_headers, **request_kwargs\n )\n response.raise_for_status()\n except RequestException as exc:\n LOGGER.warning(\n \"Unable to fetch access token\",\n exc=exc,\n response=exc.response.text if exc.response else str(exc),\n )\n return None\n return response.json()\n\n def get_redirect_args(self) -> dict[str, str]:\n \"\"\"Get request parameters for redirect url.\"\"\"\n callback = self.request.build_absolute_uri(self.callback)\n client_id: str = self.get_client_id()\n args: dict[str, str] = {\n \"client_id\": client_id,\n \"redirect_uri\": callback,\n \"response_type\": \"code\",\n }\n state = self.get_application_state()\n if state is not None:\n args[\"state\"] = state\n self.request.session[self.session_key] = state\n return args\n\n def parse_raw_token(self, raw_token: str) -> dict[str, Any]:\n \"\"\"Parse token and secret from raw token response.\"\"\"\n # Load as json first then parse as query string\n try:\n token_data = loads(raw_token)\n except ValueError:\n return dict(parse_qsl(raw_token))\n return token_data\n\n def do_request(self, method: str, url: str, **kwargs) -> Response:\n \"\"\"Build remote url request. Constructs necessary auth.\"\"\"\n if \"token\" in kwargs:\n token = kwargs.pop(\"token\")\n\n params = kwargs.get(\"params\", {})\n params[\"access_token\"] = token[\"access_token\"]\n kwargs[\"params\"] = params\n\n headers = kwargs.get(\"headers\", {})\n headers[\"Authorization\"] = f\"{token['token_type']} {token['access_token']}\"\n kwargs[\"headers\"] = headers\n return super().do_request(method, url, **kwargs)\n\n @property\n def session_key(self):\n return f\"oauth-client-{self.source.name}-request-state\"\n\n\nclass UserprofileHeaderAuthClient(OAuth2Client):\n \"\"\"OAuth client which only sends authentication via header, not querystring\"\"\"\n\n def get_profile_info(self, token: dict[str, str]) -> dict[str, Any] | None:\n \"Fetch user profile information.\"\n profile_url = self.source.source_type.profile_url or \"\"\n if self.source.source_type.urls_customizable and self.source.profile_url:\n profile_url = self.source.profile_url\n response = self.session.request(\n \"get\",\n profile_url,\n headers={\"Authorization\": f\"{token['token_type']} {token['access_token']}\"},\n )\n try:\n response.raise_for_status()\n except RequestException as exc:\n LOGGER.warning(\n \"Unable to fetch user profile\",\n exc=exc,\n response=exc.response.text if exc.response else str(exc),\n )\n return None\n return response.json()\n", "path": "authentik/sources/oauth/clients/oauth2.py"}]} | 2,343 | 150 |
gh_patches_debug_5389 | rasdani/github-patches | git_diff | vega__altair-989 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
scatter-matrix example don't fit into screen
https://altair-viz.github.io/gallery/scatter_matrix.html#scatter-matrix

```python
#...
).properties(
width=150, # suggested instead of 250
height=150 # suggested instead of 250
).repeat(
#...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `altair/vegalite/v2/examples/scatter_matrix.py`
Content:
```
1 """
2 Scatter Matrix
3 --------------
4 An example of using a RepeatChart to construct a multi-panel scatter plot
5 with linked panning and zooming.
6 """
7 # category: scatter plots
8 import altair as alt
9 from vega_datasets import data
10
11 alt.Chart(data.cars.url).mark_circle().encode(
12 alt.X(alt.repeat("column"), type='quantitative'),
13 alt.Y(alt.repeat("row"), type='quantitative'),
14 color='Origin:N'
15 ).properties(
16 width=250,
17 height=250
18 ).repeat(
19 row=['Horsepower', 'Acceleration', 'Miles_per_Gallon'],
20 column=['Miles_per_Gallon', 'Acceleration', 'Horsepower']
21 ).interactive()
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/altair/vegalite/v2/examples/scatter_matrix.py b/altair/vegalite/v2/examples/scatter_matrix.py
--- a/altair/vegalite/v2/examples/scatter_matrix.py
+++ b/altair/vegalite/v2/examples/scatter_matrix.py
@@ -13,8 +13,8 @@
alt.Y(alt.repeat("row"), type='quantitative'),
color='Origin:N'
).properties(
- width=250,
- height=250
+ width=150,
+ height=150
).repeat(
row=['Horsepower', 'Acceleration', 'Miles_per_Gallon'],
column=['Miles_per_Gallon', 'Acceleration', 'Horsepower']
| {"golden_diff": "diff --git a/altair/vegalite/v2/examples/scatter_matrix.py b/altair/vegalite/v2/examples/scatter_matrix.py\n--- a/altair/vegalite/v2/examples/scatter_matrix.py\n+++ b/altair/vegalite/v2/examples/scatter_matrix.py\n@@ -13,8 +13,8 @@\n alt.Y(alt.repeat(\"row\"), type='quantitative'),\n color='Origin:N'\n ).properties(\n- width=250,\n- height=250\n+ width=150,\n+ height=150\n ).repeat(\n row=['Horsepower', 'Acceleration', 'Miles_per_Gallon'],\n column=['Miles_per_Gallon', 'Acceleration', 'Horsepower']\n", "issue": "scatter-matrix example don't fit into screen\nhttps://altair-viz.github.io/gallery/scatter_matrix.html#scatter-matrix\r\n\r\n```python\r\n#...\r\n).properties(\r\n width=150, # suggested instead of 250\r\n height=150 # suggested instead of 250\r\n).repeat(\r\n#...\r\n```\n", "before_files": [{"content": "\"\"\"\nScatter Matrix\n--------------\nAn example of using a RepeatChart to construct a multi-panel scatter plot\nwith linked panning and zooming.\n\"\"\"\n# category: scatter plots\nimport altair as alt\nfrom vega_datasets import data\n\nalt.Chart(data.cars.url).mark_circle().encode(\n alt.X(alt.repeat(\"column\"), type='quantitative'),\n alt.Y(alt.repeat(\"row\"), type='quantitative'),\n color='Origin:N'\n).properties(\n width=250,\n height=250\n).repeat(\n row=['Horsepower', 'Acceleration', 'Miles_per_Gallon'],\n column=['Miles_per_Gallon', 'Acceleration', 'Horsepower']\n).interactive()\n", "path": "altair/vegalite/v2/examples/scatter_matrix.py"}], "after_files": [{"content": "\"\"\"\nScatter Matrix\n--------------\nAn example of using a RepeatChart to construct a multi-panel scatter plot\nwith linked panning and zooming.\n\"\"\"\n# category: scatter plots\nimport altair as alt\nfrom vega_datasets import data\n\nalt.Chart(data.cars.url).mark_circle().encode(\n alt.X(alt.repeat(\"column\"), type='quantitative'),\n alt.Y(alt.repeat(\"row\"), type='quantitative'),\n color='Origin:N'\n).properties(\n width=150,\n height=150\n).repeat(\n row=['Horsepower', 'Acceleration', 'Miles_per_Gallon'],\n column=['Miles_per_Gallon', 'Acceleration', 'Horsepower']\n).interactive()\n", "path": "altair/vegalite/v2/examples/scatter_matrix.py"}]} | 592 | 172 |
gh_patches_debug_27574 | rasdani/github-patches | git_diff | goauthentik__authentik-8644 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
General system exception - Event Log
**Describe the bug**
Getting a "General system exception" in the Event log, after the user logged in
**To Reproduce**
Steps to reproduce the behavior:
1. Default [default-authentication-flow]
2. between default-authentication-identification and default-authentication-password there is a Prompt Stage with Checkbox ("I'm not a Robot")
3. User login
4. Exception in the Event Log
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**


**Logs**
Property Mappings Exception:
ExceptionTraceback (most recent call last): File "/authentik/lib/expression/evaluator.py", line 198, in evaluate ast_obj = self.compile(expression_source) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/authentik/lib/expression/evaluator.py", line 187, in compile return compile(self.wrap_expression(expression, param_keys), self._filename, "exec") ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ builtins.IndentationError: expected an indented block after function definition on line 1 (Checkbox, line 3)
--
General system exception:
Traceback (most recent call last):
File "/ak-root/venv/lib/python3.12/site-packages/celery/app/trace.py", line 494, in trace_task
I, R, state, retval = on_error(task_request, exc)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.12/site-packages/celery/app/trace.py", line 399, in on_error
R = I.handle_error_state(
^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.12/site-packages/celery/app/trace.py", line 178, in handle_error_state
return {
^
File "/ak-root/venv/lib/python3.12/site-packages/celery/app/trace.py", line 236, in handle_failure
task.on_failure(exc, req.id, req.args, req.kwargs, einfo)
File "/authentik/events/system_tasks.py", line 106, in on_failure
DBSystemTask.objects.update_or_create(
File "/ak-root/venv/lib/python3.12/site-packages/django/db/models/manager.py", line 87, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.12/site-packages/django/db/models/query.py", line 986, in update_or_create
obj, created = self.select_for_update().get_or_create(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.12/site-packages/django/db/models/query.py", line 955, in get_or_create
return self.create(**params), True
^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.12/site-packages/django/db/models/query.py", line 679, in create
obj.save(force_insert=True, using=self.db)
File "/ak-root/venv/lib/python3.12/site-packages/django/db/models/base.py", line 822, in save
self.save_base(
File "/ak-root/venv/lib/python3.12/site-packages/django/db/models/base.py", line 909, in save_base
updated = self._save_table(
^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.12/site-packages/django/db/models/base.py", line 1067, in _save_table
results = self._do_insert(
^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.12/site-packages/django/db/models/base.py", line 1108, in _do_insert
return manager._insert(
^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.12/site-packages/django/db/models/manager.py", line 87, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.12/site-packages/django/db/models/query.py", line 1847, in _insert
return query.get_compiler(using=using).execute_sql(returning_fields)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.12/site-packages/django/db/models/sql/compiler.py", line 1823, in execute_sql
cursor.execute(sql, params)
File "/ak-root/venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/__init__.py", line 641, in execute
result = real_execute(self, sql, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.12/site-packages/django/db/backends/utils.py", line 79, in execute
return self._execute_with_wrappers(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.12/site-packages/django/db/backends/utils.py", line 92, in _execute_with_wrappers
return executor(sql, params, many, context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.12/site-packages/django/db/backends/utils.py", line 105, in _execute
return self.cursor.execute(sql, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.12/site-packages/django_prometheus/db/common.py", line 69, in execute
return super().execute(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.12/site-packages/psycopg/cursor.py", line 728, in execute
self._conn.wait(
File "/ak-root/venv/lib/python3.12/site-packages/psycopg/connection.py", line 969, in wait
return waiting.wait(gen, self.pgconn.socket, timeout=timeout)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "psycopg_c/_psycopg/waiting.pyx", line 190, in psycopg_c._psycopg.wait_c
File "/ak-root/venv/lib/python3.12/site-packages/psycopg/cursor.py", line 210, in _execute_gen
pgq = self._convert_query(query, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.12/site-packages/psycopg/client_cursor.py", line 79, in _convert_query
pgq.convert(query, params)
File "/ak-root/venv/lib/python3.12/site-packages/psycopg/_queries.py", line 213, in convert
self.dump(vars)
File "/ak-root/venv/lib/python3.12/site-packages/psycopg/_queries.py", line 223, in dump
self.params = tuple(
^^^^^^
File "/ak-root/venv/lib/python3.12/site-packages/psycopg/_queries.py", line 224, in <genexpr>
self._tx.as_literal(p) if p is not None else b"NULL" for p in params
^^^^^^^^^^^^^^^^^^^^^^
File "psycopg_c/_psycopg/transform.pyx", line 206, in psycopg_c._psycopg.Transformer.as_literal
File "psycopg_c/_psycopg/transform.pyx", line 215, in psycopg_c._psycopg.Transformer.as_literal
File "/ak-root/venv/lib/python3.12/site-packages/psycopg/adapt.py", line 57, in quote
value = self.dump(obj)
^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.12/site-packages/psycopg/types/json.py", line 151, in dump
data = dumps(obj)
^^^^^^^^^^
File "/usr/local/lib/python3.12/json/__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/json/encoder.py", line 200, in encode
chunks = self.iterencode(o, _one_shot=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/json/encoder.py", line 258, in iterencode
return _iterencode(o, 0)
^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/json/encoder.py", line 180, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
builtins.TypeError: Object of type UUID is not JSON serializable
General system exception:
Traceback (most recent call last):
builtins.TypeError: Object of type UUID is not JSON serializable
**Version and Deployment (please complete the following information):**
- authentik version: 2024.2.0
- Deployment: Docker
**Additional context**
Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/events/system_tasks.py`
Content:
```
1 """Monitored tasks"""
2
3 from datetime import datetime, timedelta
4 from time import perf_counter
5 from typing import Any, Optional
6
7 from django.utils.timezone import now
8 from django.utils.translation import gettext_lazy as _
9 from structlog.stdlib import get_logger
10 from tenant_schemas_celery.task import TenantTask
11
12 from authentik.events.models import Event, EventAction
13 from authentik.events.models import SystemTask as DBSystemTask
14 from authentik.events.models import TaskStatus
15 from authentik.events.utils import sanitize_item
16 from authentik.lib.utils.errors import exception_to_string
17
18 LOGGER = get_logger()
19
20
21 class SystemTask(TenantTask):
22 """Task which can save its state to the cache"""
23
24 # For tasks that should only be listed if they failed, set this to False
25 save_on_success: bool
26
27 _status: TaskStatus
28 _messages: list[str]
29
30 _uid: Optional[str]
31 # Precise start time from perf_counter
32 _start_precise: Optional[float] = None
33 _start: Optional[datetime] = None
34
35 def __init__(self, *args, **kwargs) -> None:
36 super().__init__(*args, **kwargs)
37 self._status = TaskStatus.SUCCESSFUL
38 self.save_on_success = True
39 self._uid = None
40 self._status = None
41 self._messages = []
42 self.result_timeout_hours = 6
43
44 def set_uid(self, uid: str):
45 """Set UID, so in the case of an unexpected error its saved correctly"""
46 self._uid = uid
47
48 def set_status(self, status: TaskStatus, *messages: str):
49 """Set result for current run, will overwrite previous result."""
50 self._status = status
51 self._messages = messages
52
53 def set_error(self, exception: Exception):
54 """Set result to error and save exception"""
55 self._status = TaskStatus.ERROR
56 self._messages = [exception_to_string(exception)]
57
58 def before_start(self, task_id, args, kwargs):
59 self._start_precise = perf_counter()
60 self._start = now()
61 return super().before_start(task_id, args, kwargs)
62
63 def db(self) -> Optional[DBSystemTask]:
64 """Get DB object for latest task"""
65 return DBSystemTask.objects.filter(
66 name=self.__name__,
67 uid=self._uid,
68 ).first()
69
70 # pylint: disable=too-many-arguments
71 def after_return(self, status, retval, task_id, args: list[Any], kwargs: dict[str, Any], einfo):
72 super().after_return(status, retval, task_id, args, kwargs, einfo=einfo)
73 if not self._status:
74 return
75 if self._status == TaskStatus.SUCCESSFUL and not self.save_on_success:
76 DBSystemTask.objects.filter(
77 name=self.__name__,
78 uid=self._uid,
79 ).delete()
80 return
81 DBSystemTask.objects.update_or_create(
82 name=self.__name__,
83 uid=self._uid,
84 defaults={
85 "description": self.__doc__,
86 "start_timestamp": self._start or now(),
87 "finish_timestamp": now(),
88 "duration": max(perf_counter() - self._start_precise, 0),
89 "task_call_module": self.__module__,
90 "task_call_func": self.__name__,
91 "task_call_args": args,
92 "task_call_kwargs": kwargs,
93 "status": self._status,
94 "messages": sanitize_item(self._messages),
95 "expires": now() + timedelta(hours=self.result_timeout_hours),
96 "expiring": True,
97 },
98 )
99
100 # pylint: disable=too-many-arguments
101 def on_failure(self, exc, task_id, args, kwargs, einfo):
102 super().on_failure(exc, task_id, args, kwargs, einfo=einfo)
103 if not self._status:
104 self._status = TaskStatus.ERROR
105 self._messages = exception_to_string(exc)
106 DBSystemTask.objects.update_or_create(
107 name=self.__name__,
108 uid=self._uid,
109 defaults={
110 "description": self.__doc__,
111 "start_timestamp": self._start or now(),
112 "finish_timestamp": now(),
113 "duration": max(perf_counter() - self._start_precise, 0),
114 "task_call_module": self.__module__,
115 "task_call_func": self.__name__,
116 "task_call_args": args,
117 "task_call_kwargs": kwargs,
118 "status": self._status,
119 "messages": sanitize_item(self._messages),
120 "expires": now() + timedelta(hours=self.result_timeout_hours),
121 "expiring": True,
122 },
123 )
124 Event.new(
125 EventAction.SYSTEM_TASK_EXCEPTION,
126 message=f"Task {self.__name__} encountered an error: {exception_to_string(exc)}",
127 ).save()
128
129 def run(self, *args, **kwargs):
130 raise NotImplementedError
131
132
133 def prefill_task(func):
134 """Ensure a task's details are always in cache, so it can always be triggered via API"""
135 _prefill_tasks.append(
136 DBSystemTask(
137 name=func.__name__,
138 description=func.__doc__,
139 start_timestamp=now(),
140 finish_timestamp=now(),
141 status=TaskStatus.UNKNOWN,
142 messages=sanitize_item([_("Task has not been run yet.")]),
143 task_call_module=func.__module__,
144 task_call_func=func.__name__,
145 expiring=False,
146 duration=0,
147 )
148 )
149 return func
150
151
152 _prefill_tasks = []
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/authentik/events/system_tasks.py b/authentik/events/system_tasks.py
--- a/authentik/events/system_tasks.py
+++ b/authentik/events/system_tasks.py
@@ -88,8 +88,8 @@
"duration": max(perf_counter() - self._start_precise, 0),
"task_call_module": self.__module__,
"task_call_func": self.__name__,
- "task_call_args": args,
- "task_call_kwargs": kwargs,
+ "task_call_args": sanitize_item(args),
+ "task_call_kwargs": sanitize_item(kwargs),
"status": self._status,
"messages": sanitize_item(self._messages),
"expires": now() + timedelta(hours=self.result_timeout_hours),
@@ -113,8 +113,8 @@
"duration": max(perf_counter() - self._start_precise, 0),
"task_call_module": self.__module__,
"task_call_func": self.__name__,
- "task_call_args": args,
- "task_call_kwargs": kwargs,
+ "task_call_args": sanitize_item(args),
+ "task_call_kwargs": sanitize_item(kwargs),
"status": self._status,
"messages": sanitize_item(self._messages),
"expires": now() + timedelta(hours=self.result_timeout_hours),
| {"golden_diff": "diff --git a/authentik/events/system_tasks.py b/authentik/events/system_tasks.py\n--- a/authentik/events/system_tasks.py\n+++ b/authentik/events/system_tasks.py\n@@ -88,8 +88,8 @@\n \"duration\": max(perf_counter() - self._start_precise, 0),\n \"task_call_module\": self.__module__,\n \"task_call_func\": self.__name__,\n- \"task_call_args\": args,\n- \"task_call_kwargs\": kwargs,\n+ \"task_call_args\": sanitize_item(args),\n+ \"task_call_kwargs\": sanitize_item(kwargs),\n \"status\": self._status,\n \"messages\": sanitize_item(self._messages),\n \"expires\": now() + timedelta(hours=self.result_timeout_hours),\n@@ -113,8 +113,8 @@\n \"duration\": max(perf_counter() - self._start_precise, 0),\n \"task_call_module\": self.__module__,\n \"task_call_func\": self.__name__,\n- \"task_call_args\": args,\n- \"task_call_kwargs\": kwargs,\n+ \"task_call_args\": sanitize_item(args),\n+ \"task_call_kwargs\": sanitize_item(kwargs),\n \"status\": self._status,\n \"messages\": sanitize_item(self._messages),\n \"expires\": now() + timedelta(hours=self.result_timeout_hours),\n", "issue": "General system exception - Event Log\n**Describe the bug**\r\nGetting a \"General system exception\" in the Event log, after the user logged in\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n1. Default [default-authentication-flow]\r\n2. between default-authentication-identification and default-authentication-password there is a Prompt Stage with Checkbox (\"I'm not a Robot\")\r\n3. User login\r\n4. Exception in the Event Log\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n\r\n**Logs**\r\nProperty Mappings Exception:\r\n\r\nExceptionTraceback (most recent call last): File \"/authentik/lib/expression/evaluator.py\", line 198, in evaluate ast_obj = self.compile(expression_source) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File \"/authentik/lib/expression/evaluator.py\", line 187, in compile return compile(self.wrap_expression(expression, param_keys), self._filename, \"exec\") ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ builtins.IndentationError: expected an indented block after function definition on line 1 (Checkbox, line 3)\r\n--\r\n\r\n\r\nGeneral system exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/ak-root/venv/lib/python3.12/site-packages/celery/app/trace.py\", line 494, in trace_task\r\n I, R, state, retval = on_error(task_request, exc)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/celery/app/trace.py\", line 399, in on_error\r\n R = I.handle_error_state(\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/celery/app/trace.py\", line 178, in handle_error_state\r\n return {\r\n ^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/celery/app/trace.py\", line 236, in handle_failure\r\n task.on_failure(exc, req.id, req.args, req.kwargs, einfo)\r\n File \"/authentik/events/system_tasks.py\", line 106, in on_failure\r\n DBSystemTask.objects.update_or_create(\r\n File \"/ak-root/venv/lib/python3.12/site-packages/django/db/models/manager.py\", line 87, in manager_method\r\n return getattr(self.get_queryset(), name)(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/django/db/models/query.py\", line 986, in update_or_create\r\n obj, created = self.select_for_update().get_or_create(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/django/db/models/query.py\", line 955, in get_or_create\r\n return self.create(**params), True\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/django/db/models/query.py\", line 679, in create\r\n obj.save(force_insert=True, using=self.db)\r\n File \"/ak-root/venv/lib/python3.12/site-packages/django/db/models/base.py\", line 822, in save\r\n self.save_base(\r\n File \"/ak-root/venv/lib/python3.12/site-packages/django/db/models/base.py\", line 909, in save_base\r\n updated = self._save_table(\r\n ^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/django/db/models/base.py\", line 1067, in _save_table\r\n results = self._do_insert(\r\n ^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/django/db/models/base.py\", line 1108, in _do_insert\r\n return manager._insert(\r\n ^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/django/db/models/manager.py\", line 87, in manager_method\r\n return getattr(self.get_queryset(), name)(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/django/db/models/query.py\", line 1847, in _insert\r\n return query.get_compiler(using=using).execute_sql(returning_fields)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/django/db/models/sql/compiler.py\", line 1823, in execute_sql\r\n cursor.execute(sql, params)\r\n File \"/ak-root/venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/__init__.py\", line 641, in execute\r\n result = real_execute(self, sql, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/django/db/backends/utils.py\", line 79, in execute\r\n return self._execute_with_wrappers(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/django/db/backends/utils.py\", line 92, in _execute_with_wrappers\r\n return executor(sql, params, many, context)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/django/db/backends/utils.py\", line 105, in _execute\r\n return self.cursor.execute(sql, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/django_prometheus/db/common.py\", line 69, in execute\r\n return super().execute(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/psycopg/cursor.py\", line 728, in execute\r\n self._conn.wait(\r\n File \"/ak-root/venv/lib/python3.12/site-packages/psycopg/connection.py\", line 969, in wait\r\n return waiting.wait(gen, self.pgconn.socket, timeout=timeout)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"psycopg_c/_psycopg/waiting.pyx\", line 190, in psycopg_c._psycopg.wait_c\r\n File \"/ak-root/venv/lib/python3.12/site-packages/psycopg/cursor.py\", line 210, in _execute_gen\r\n pgq = self._convert_query(query, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/psycopg/client_cursor.py\", line 79, in _convert_query\r\n pgq.convert(query, params)\r\n File \"/ak-root/venv/lib/python3.12/site-packages/psycopg/_queries.py\", line 213, in convert\r\n self.dump(vars)\r\n File \"/ak-root/venv/lib/python3.12/site-packages/psycopg/_queries.py\", line 223, in dump\r\n self.params = tuple(\r\n ^^^^^^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/psycopg/_queries.py\", line 224, in <genexpr>\r\n self._tx.as_literal(p) if p is not None else b\"NULL\" for p in params\r\n ^^^^^^^^^^^^^^^^^^^^^^\r\n File \"psycopg_c/_psycopg/transform.pyx\", line 206, in psycopg_c._psycopg.Transformer.as_literal\r\n File \"psycopg_c/_psycopg/transform.pyx\", line 215, in psycopg_c._psycopg.Transformer.as_literal\r\n File \"/ak-root/venv/lib/python3.12/site-packages/psycopg/adapt.py\", line 57, in quote\r\n value = self.dump(obj)\r\n ^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/psycopg/types/json.py\", line 151, in dump\r\n data = dumps(obj)\r\n ^^^^^^^^^^\r\n File \"/usr/local/lib/python3.12/json/__init__.py\", line 231, in dumps\r\n return _default_encoder.encode(obj)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.12/json/encoder.py\", line 200, in encode\r\n chunks = self.iterencode(o, _one_shot=True)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.12/json/encoder.py\", line 258, in iterencode\r\n return _iterencode(o, 0)\r\n ^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.12/json/encoder.py\", line 180, in default\r\n raise TypeError(f'Object of type {o.__class__.__name__} '\r\nbuiltins.TypeError: Object of type UUID is not JSON serializable\r\n\r\n\r\nGeneral system exception:\r\n\r\nTraceback (most recent call last):\r\nbuiltins.TypeError: Object of type UUID is not JSON serializable\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: 2024.2.0\r\n- Deployment: Docker\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "\"\"\"Monitored tasks\"\"\"\n\nfrom datetime import datetime, timedelta\nfrom time import perf_counter\nfrom typing import Any, Optional\n\nfrom django.utils.timezone import now\nfrom django.utils.translation import gettext_lazy as _\nfrom structlog.stdlib import get_logger\nfrom tenant_schemas_celery.task import TenantTask\n\nfrom authentik.events.models import Event, EventAction\nfrom authentik.events.models import SystemTask as DBSystemTask\nfrom authentik.events.models import TaskStatus\nfrom authentik.events.utils import sanitize_item\nfrom authentik.lib.utils.errors import exception_to_string\n\nLOGGER = get_logger()\n\n\nclass SystemTask(TenantTask):\n \"\"\"Task which can save its state to the cache\"\"\"\n\n # For tasks that should only be listed if they failed, set this to False\n save_on_success: bool\n\n _status: TaskStatus\n _messages: list[str]\n\n _uid: Optional[str]\n # Precise start time from perf_counter\n _start_precise: Optional[float] = None\n _start: Optional[datetime] = None\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self._status = TaskStatus.SUCCESSFUL\n self.save_on_success = True\n self._uid = None\n self._status = None\n self._messages = []\n self.result_timeout_hours = 6\n\n def set_uid(self, uid: str):\n \"\"\"Set UID, so in the case of an unexpected error its saved correctly\"\"\"\n self._uid = uid\n\n def set_status(self, status: TaskStatus, *messages: str):\n \"\"\"Set result for current run, will overwrite previous result.\"\"\"\n self._status = status\n self._messages = messages\n\n def set_error(self, exception: Exception):\n \"\"\"Set result to error and save exception\"\"\"\n self._status = TaskStatus.ERROR\n self._messages = [exception_to_string(exception)]\n\n def before_start(self, task_id, args, kwargs):\n self._start_precise = perf_counter()\n self._start = now()\n return super().before_start(task_id, args, kwargs)\n\n def db(self) -> Optional[DBSystemTask]:\n \"\"\"Get DB object for latest task\"\"\"\n return DBSystemTask.objects.filter(\n name=self.__name__,\n uid=self._uid,\n ).first()\n\n # pylint: disable=too-many-arguments\n def after_return(self, status, retval, task_id, args: list[Any], kwargs: dict[str, Any], einfo):\n super().after_return(status, retval, task_id, args, kwargs, einfo=einfo)\n if not self._status:\n return\n if self._status == TaskStatus.SUCCESSFUL and not self.save_on_success:\n DBSystemTask.objects.filter(\n name=self.__name__,\n uid=self._uid,\n ).delete()\n return\n DBSystemTask.objects.update_or_create(\n name=self.__name__,\n uid=self._uid,\n defaults={\n \"description\": self.__doc__,\n \"start_timestamp\": self._start or now(),\n \"finish_timestamp\": now(),\n \"duration\": max(perf_counter() - self._start_precise, 0),\n \"task_call_module\": self.__module__,\n \"task_call_func\": self.__name__,\n \"task_call_args\": args,\n \"task_call_kwargs\": kwargs,\n \"status\": self._status,\n \"messages\": sanitize_item(self._messages),\n \"expires\": now() + timedelta(hours=self.result_timeout_hours),\n \"expiring\": True,\n },\n )\n\n # pylint: disable=too-many-arguments\n def on_failure(self, exc, task_id, args, kwargs, einfo):\n super().on_failure(exc, task_id, args, kwargs, einfo=einfo)\n if not self._status:\n self._status = TaskStatus.ERROR\n self._messages = exception_to_string(exc)\n DBSystemTask.objects.update_or_create(\n name=self.__name__,\n uid=self._uid,\n defaults={\n \"description\": self.__doc__,\n \"start_timestamp\": self._start or now(),\n \"finish_timestamp\": now(),\n \"duration\": max(perf_counter() - self._start_precise, 0),\n \"task_call_module\": self.__module__,\n \"task_call_func\": self.__name__,\n \"task_call_args\": args,\n \"task_call_kwargs\": kwargs,\n \"status\": self._status,\n \"messages\": sanitize_item(self._messages),\n \"expires\": now() + timedelta(hours=self.result_timeout_hours),\n \"expiring\": True,\n },\n )\n Event.new(\n EventAction.SYSTEM_TASK_EXCEPTION,\n message=f\"Task {self.__name__} encountered an error: {exception_to_string(exc)}\",\n ).save()\n\n def run(self, *args, **kwargs):\n raise NotImplementedError\n\n\ndef prefill_task(func):\n \"\"\"Ensure a task's details are always in cache, so it can always be triggered via API\"\"\"\n _prefill_tasks.append(\n DBSystemTask(\n name=func.__name__,\n description=func.__doc__,\n start_timestamp=now(),\n finish_timestamp=now(),\n status=TaskStatus.UNKNOWN,\n messages=sanitize_item([_(\"Task has not been run yet.\")]),\n task_call_module=func.__module__,\n task_call_func=func.__name__,\n expiring=False,\n duration=0,\n )\n )\n return func\n\n\n_prefill_tasks = []\n", "path": "authentik/events/system_tasks.py"}], "after_files": [{"content": "\"\"\"Monitored tasks\"\"\"\n\nfrom datetime import datetime, timedelta\nfrom time import perf_counter\nfrom typing import Any, Optional\n\nfrom django.utils.timezone import now\nfrom django.utils.translation import gettext_lazy as _\nfrom structlog.stdlib import get_logger\nfrom tenant_schemas_celery.task import TenantTask\n\nfrom authentik.events.models import Event, EventAction\nfrom authentik.events.models import SystemTask as DBSystemTask\nfrom authentik.events.models import TaskStatus\nfrom authentik.events.utils import sanitize_item\nfrom authentik.lib.utils.errors import exception_to_string\n\nLOGGER = get_logger()\n\n\nclass SystemTask(TenantTask):\n \"\"\"Task which can save its state to the cache\"\"\"\n\n # For tasks that should only be listed if they failed, set this to False\n save_on_success: bool\n\n _status: TaskStatus\n _messages: list[str]\n\n _uid: Optional[str]\n # Precise start time from perf_counter\n _start_precise: Optional[float] = None\n _start: Optional[datetime] = None\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self._status = TaskStatus.SUCCESSFUL\n self.save_on_success = True\n self._uid = None\n self._status = None\n self._messages = []\n self.result_timeout_hours = 6\n\n def set_uid(self, uid: str):\n \"\"\"Set UID, so in the case of an unexpected error its saved correctly\"\"\"\n self._uid = uid\n\n def set_status(self, status: TaskStatus, *messages: str):\n \"\"\"Set result for current run, will overwrite previous result.\"\"\"\n self._status = status\n self._messages = messages\n\n def set_error(self, exception: Exception):\n \"\"\"Set result to error and save exception\"\"\"\n self._status = TaskStatus.ERROR\n self._messages = [exception_to_string(exception)]\n\n def before_start(self, task_id, args, kwargs):\n self._start_precise = perf_counter()\n self._start = now()\n return super().before_start(task_id, args, kwargs)\n\n def db(self) -> Optional[DBSystemTask]:\n \"\"\"Get DB object for latest task\"\"\"\n return DBSystemTask.objects.filter(\n name=self.__name__,\n uid=self._uid,\n ).first()\n\n # pylint: disable=too-many-arguments\n def after_return(self, status, retval, task_id, args: list[Any], kwargs: dict[str, Any], einfo):\n super().after_return(status, retval, task_id, args, kwargs, einfo=einfo)\n if not self._status:\n return\n if self._status == TaskStatus.SUCCESSFUL and not self.save_on_success:\n DBSystemTask.objects.filter(\n name=self.__name__,\n uid=self._uid,\n ).delete()\n return\n DBSystemTask.objects.update_or_create(\n name=self.__name__,\n uid=self._uid,\n defaults={\n \"description\": self.__doc__,\n \"start_timestamp\": self._start or now(),\n \"finish_timestamp\": now(),\n \"duration\": max(perf_counter() - self._start_precise, 0),\n \"task_call_module\": self.__module__,\n \"task_call_func\": self.__name__,\n \"task_call_args\": sanitize_item(args),\n \"task_call_kwargs\": sanitize_item(kwargs),\n \"status\": self._status,\n \"messages\": sanitize_item(self._messages),\n \"expires\": now() + timedelta(hours=self.result_timeout_hours),\n \"expiring\": True,\n },\n )\n\n # pylint: disable=too-many-arguments\n def on_failure(self, exc, task_id, args, kwargs, einfo):\n super().on_failure(exc, task_id, args, kwargs, einfo=einfo)\n if not self._status:\n self._status = TaskStatus.ERROR\n self._messages = exception_to_string(exc)\n DBSystemTask.objects.update_or_create(\n name=self.__name__,\n uid=self._uid,\n defaults={\n \"description\": self.__doc__,\n \"start_timestamp\": self._start or now(),\n \"finish_timestamp\": now(),\n \"duration\": max(perf_counter() - self._start_precise, 0),\n \"task_call_module\": self.__module__,\n \"task_call_func\": self.__name__,\n \"task_call_args\": sanitize_item(args),\n \"task_call_kwargs\": sanitize_item(kwargs),\n \"status\": self._status,\n \"messages\": sanitize_item(self._messages),\n \"expires\": now() + timedelta(hours=self.result_timeout_hours),\n \"expiring\": True,\n },\n )\n Event.new(\n EventAction.SYSTEM_TASK_EXCEPTION,\n message=f\"Task {self.__name__} encountered an error: {exception_to_string(exc)}\",\n ).save()\n\n def run(self, *args, **kwargs):\n raise NotImplementedError\n\n\ndef prefill_task(func):\n \"\"\"Ensure a task's details are always in cache, so it can always be triggered via API\"\"\"\n _prefill_tasks.append(\n DBSystemTask(\n name=func.__name__,\n description=func.__doc__,\n start_timestamp=now(),\n finish_timestamp=now(),\n status=TaskStatus.UNKNOWN,\n messages=sanitize_item([_(\"Task has not been run yet.\")]),\n task_call_module=func.__module__,\n task_call_func=func.__name__,\n expiring=False,\n duration=0,\n )\n )\n return func\n\n\n_prefill_tasks = []\n", "path": "authentik/events/system_tasks.py"}]} | 4,074 | 288 |
gh_patches_debug_17737 | rasdani/github-patches | git_diff | unionai-oss__pandera-379 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Validating optional columns with a pandera type causes TypeError in SchemaModel
**Describe the bug**
When I use the class-based API with an optional column of type `pandera.typing.*`, I get the following TypeError:
```
TypeError: type of `pandas_dtype` argument not recognized: typing_extensions.Literal.
Please specify a pandera PandasDtype enum, legal pandas data type, pandas data type
string alias, or numpy data type string alias
```
If a Python type is used instead or when the column is not optional, it works as expected.
#### Code Sample
```python
import pandas as pd
import pandera as pa
from pandera.typing import DataFrame, DateTime, Series
class Schema(pa.SchemaModel):
date : Optional[Series[DateTime]]
df = pd.DataFrame({
'date': pd.date_range('2015-02-24', periods=5, freq='T')
})
Schema.validate(df)
```
```
TypeError Traceback (most recent call last)
<ipython-input-10-1d3df28d227a> in <module>
----> 1 Schema.validate(df)
/cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/model.py in validate(cls, check_obj, head, tail, sample, random_state, lazy)
146 ) -> pd.DataFrame:
147 """%(validate_doc)s"""
--> 148 return cls.to_schema().validate(
149 check_obj, head, tail, sample, random_state, lazy
150 )
/cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/model.py in to_schema(cls)
119 }
120 columns, index = cls._build_columns_index(
--> 121 cls.__fields__, cls.__checks__, **mi_kwargs
122 )
123 cls.__schema__ = DataFrameSchema(
/cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/model.py in _build_columns_index(cls, fields, checks, **multiindex_kwargs)
199 required=not annotation.optional,
200 checks=field_checks,
--> 201 name=field_name,
202 )
203 elif annotation.origin is Index:
/cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/schema_components.py in __init__(self, pandas_dtype, checks, nullable, allow_duplicates, coerce, required, name, regex)
77 """
78 super().__init__(
---> 79 pandas_dtype, checks, nullable, allow_duplicates, coerce
80 )
81 if (
/cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/schemas.py in __init__(self, pandas_dtype, checks, nullable, allow_duplicates, coerce, name)
1453
1454 # make sure pandas dtype is valid
-> 1455 self.dtype # pylint: disable=pointless-statement
1456
1457 # this attribute is not meant to be accessed by users and is explicitly
/cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/schemas.py in dtype(self)
1526 def dtype(self) -> Optional[str]:
1527 """String representation of the dtype."""
-> 1528 return PandasDtype.get_str_dtype(self._pandas_dtype)
1529
1530 @property
/cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/dtypes.py in get_str_dtype(cls, pandas_dtype_arg)
310 return dtype_.str_alias
311 raise TypeError(
--> 312 "type of `pandas_dtype` argument not recognized: "
313 f"{type(pandas_dtype_arg)}. Please specify a pandera PandasDtype "
314 "enum, legal pandas data type, pandas data type string alias, or "
TypeError: type of `pandas_dtype` argument not recognized: typing_extensions.Literal. Please specify a pandera PandasDtype enum, legal pandas data type, pandas data type string alias, or numpy data type string alias
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandera/typing.py`
Content:
```
1 """Typing definitions and helpers."""
2 # pylint:disable=abstract-method,disable=too-many-ancestors
3 import sys
4 from typing import TYPE_CHECKING, Generic, Type, TypeVar
5
6 import pandas as pd
7 import typing_inspect
8
9 from .dtypes import PandasDtype, PandasExtensionType
10
11 if sys.version_info < (3, 8): # pragma: no cover
12 from typing_extensions import Literal
13 else: # pragma: no cover
14 from typing import Literal # pylint:disable=no-name-in-module
15
16
17 _LEGACY_TYPING = sys.version_info[:3] < (3, 7, 0)
18
19 GenericDtype = TypeVar( # type: ignore
20 "GenericDtype",
21 PandasDtype,
22 PandasExtensionType,
23 bool,
24 int,
25 str,
26 float,
27 Literal[PandasDtype.Bool],
28 Literal[PandasDtype.DateTime],
29 Literal[PandasDtype.Category],
30 Literal[PandasDtype.Float],
31 Literal[PandasDtype.Float16],
32 Literal[PandasDtype.Float32],
33 Literal[PandasDtype.Float64],
34 Literal[PandasDtype.Int],
35 Literal[PandasDtype.Int8],
36 Literal[PandasDtype.Int16],
37 Literal[PandasDtype.Int32],
38 Literal[PandasDtype.Int64],
39 Literal[PandasDtype.UInt8],
40 Literal[PandasDtype.UInt16],
41 Literal[PandasDtype.UInt32],
42 Literal[PandasDtype.UInt64],
43 Literal[PandasDtype.INT8],
44 Literal[PandasDtype.INT16],
45 Literal[PandasDtype.INT32],
46 Literal[PandasDtype.INT64],
47 Literal[PandasDtype.UINT8],
48 Literal[PandasDtype.UINT16],
49 Literal[PandasDtype.UINT32],
50 Literal[PandasDtype.UINT64],
51 Literal[PandasDtype.Object],
52 Literal[PandasDtype.String],
53 Literal[PandasDtype.STRING],
54 Literal[PandasDtype.Timedelta],
55 covariant=True,
56 )
57 Schema = TypeVar("Schema", bound="SchemaModel") # type: ignore
58
59
60 # pylint:disable=too-few-public-methods
61 class Index(pd.Index, Generic[GenericDtype]):
62 """Representation of pandas.Index, only used for type annotation.
63
64 *new in 0.5.0*
65 """
66
67
68 # pylint:disable=too-few-public-methods
69 class Series(pd.Series, Generic[GenericDtype]): # type: ignore
70 """Representation of pandas.Series, only used for type annotation.
71
72 *new in 0.5.0*
73 """
74
75
76 if TYPE_CHECKING: # pragma: no cover
77 # pylint:disable=too-few-public-methods,invalid-name
78 T = TypeVar("T")
79
80 class DataFrame(pd.DataFrame, Generic[T]):
81 """
82 Representation of pandas.DataFrame, only used for type annotation.
83
84 *new in 0.5.0*
85 """
86
87
88 else:
89 # pylint:disable=too-few-public-methods
90 class DataFrame(pd.DataFrame, Generic[Schema]):
91 """
92 Representation of pandas.DataFrame, only used for type annotation.
93
94 *new in 0.5.0*
95 """
96
97
98 class AnnotationInfo: # pylint:disable=too-few-public-methods
99 """Captures extra information about an annotation.
100
101 Attributes:
102 origin: The non-parameterized generic class.
103 arg: The first generic type (SchemaModel does not support more than 1 argument).
104 literal: Whether the annotation is a literal.
105 optional: Whether the annotation is optional.
106 raw_annotation: The raw annotation.
107 """
108
109 def __init__(self, raw_annotation: Type) -> None:
110 self._parse_annotation(raw_annotation)
111
112 @property
113 def is_generic_df(self) -> bool:
114 """True if the annotation is a pandera.typing.DataFrame."""
115 return self.origin is not None and issubclass(self.origin, DataFrame)
116
117 def _parse_annotation(self, raw_annotation: Type) -> None:
118 """Parse key information from annotation.
119
120 :param annotation: A subscripted type.
121 :returns: Annotation
122 """
123 self.raw_annotation = raw_annotation
124
125 self.optional = typing_inspect.is_optional_type(raw_annotation)
126 if self.optional:
127 # e.g: Typing.Union[pandera.typing.Index[str], NoneType]
128 if _LEGACY_TYPING: # pragma: no cover
129 # get_args -> ((pandera.typing.Index, <class 'str'>), <class 'NoneType'>)
130 self.origin, self.arg = typing_inspect.get_args(
131 raw_annotation
132 )[0]
133 return
134 # get_args -> (pandera.typing.Index[str], <class 'NoneType'>)
135 raw_annotation = typing_inspect.get_args(raw_annotation)[0]
136
137 self.origin = typing_inspect.get_origin(raw_annotation)
138 args = typing_inspect.get_args(raw_annotation)
139 self.arg = args[0] if args else args
140
141 self.literal = typing_inspect.is_literal_type(self.arg)
142 if self.literal:
143 self.arg = typing_inspect.get_args(self.arg)[0]
144
145
146 Bool = Literal[PandasDtype.Bool] #: ``"bool"`` numpy dtype
147 DateTime = Literal[PandasDtype.DateTime] #: ``"datetime64[ns]"`` numpy dtype
148 Timedelta = Literal[
149 PandasDtype.Timedelta
150 ] #: ``"timedelta64[ns]"`` numpy dtype
151 Category = Literal[PandasDtype.Category] #: pandas ``"categorical"`` datatype
152 Float = Literal[PandasDtype.Float] #: ``"float"`` numpy dtype
153 Float16 = Literal[PandasDtype.Float16] #: ``"float16"`` numpy dtype
154 Float32 = Literal[PandasDtype.Float32] #: ``"float32"`` numpy dtype
155 Float64 = Literal[PandasDtype.Float64] #: ``"float64"`` numpy dtype
156 Int = Literal[PandasDtype.Int] #: ``"int"`` numpy dtype
157 Int8 = Literal[PandasDtype.Int8] #: ``"int8"`` numpy dtype
158 Int16 = Literal[PandasDtype.Int16] #: ``"int16"`` numpy dtype
159 Int32 = Literal[PandasDtype.Int32] #: ``"int32"`` numpy dtype
160 Int64 = Literal[PandasDtype.Int64] #: ``"int64"`` numpy dtype
161 UInt8 = Literal[PandasDtype.UInt8] #: ``"uint8"`` numpy dtype
162 UInt16 = Literal[PandasDtype.UInt16] #: ``"uint16"`` numpy dtype
163 UInt32 = Literal[PandasDtype.UInt32] #: ``"uint32"`` numpy dtype
164 UInt64 = Literal[PandasDtype.UInt64] #: ``"uint64"`` numpy dtype
165 INT8 = Literal[PandasDtype.INT8] #: ``"Int8"`` pandas dtype:: pandas 0.24.0+
166 INT16 = Literal[PandasDtype.INT16] #: ``"Int16"`` pandas dtype: pandas 0.24.0+
167 INT32 = Literal[PandasDtype.INT32] #: ``"Int32"`` pandas dtype: pandas 0.24.0+
168 INT64 = Literal[PandasDtype.INT64] #: ``"Int64"`` pandas dtype: pandas 0.24.0+
169 UINT8 = Literal[
170 PandasDtype.UINT8
171 ] #: ``"UInt8"`` pandas dtype:: pandas 0.24.0+
172 UINT16 = Literal[
173 PandasDtype.UINT16
174 ] #: ``"UInt16"`` pandas dtype: pandas 0.24.0+
175 UINT32 = Literal[
176 PandasDtype.UINT32
177 ] #: ``"UInt32"`` pandas dtype: pandas 0.24.0+
178 UINT64 = Literal[
179 PandasDtype.UINT64
180 ] #: ``"UInt64"`` pandas dtype: pandas 0.24.0+
181 Object = Literal[PandasDtype.Object] #: ``"object"`` numpy dtype
182
183 String = Literal[PandasDtype.String] #: ``"str"`` numpy dtype
184
185 #: ``"string"`` pandas dtypes: pandas 1.0.0+. For <1.0.0, this enum will
186 #: fall back on the str-as-object-array representation.
187 STRING = Literal[PandasDtype.STRING] #: ``"str"`` numpy dtype
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pandera/typing.py b/pandera/typing.py
--- a/pandera/typing.py
+++ b/pandera/typing.py
@@ -130,13 +130,13 @@
self.origin, self.arg = typing_inspect.get_args(
raw_annotation
)[0]
- return
# get_args -> (pandera.typing.Index[str], <class 'NoneType'>)
raw_annotation = typing_inspect.get_args(raw_annotation)[0]
- self.origin = typing_inspect.get_origin(raw_annotation)
- args = typing_inspect.get_args(raw_annotation)
- self.arg = args[0] if args else args
+ if not (self.optional and _LEGACY_TYPING):
+ self.origin = typing_inspect.get_origin(raw_annotation)
+ args = typing_inspect.get_args(raw_annotation)
+ self.arg = args[0] if args else args
self.literal = typing_inspect.is_literal_type(self.arg)
if self.literal:
| {"golden_diff": "diff --git a/pandera/typing.py b/pandera/typing.py\n--- a/pandera/typing.py\n+++ b/pandera/typing.py\n@@ -130,13 +130,13 @@\n self.origin, self.arg = typing_inspect.get_args(\n raw_annotation\n )[0]\n- return\n # get_args -> (pandera.typing.Index[str], <class 'NoneType'>)\n raw_annotation = typing_inspect.get_args(raw_annotation)[0]\n \n- self.origin = typing_inspect.get_origin(raw_annotation)\n- args = typing_inspect.get_args(raw_annotation)\n- self.arg = args[0] if args else args\n+ if not (self.optional and _LEGACY_TYPING):\n+ self.origin = typing_inspect.get_origin(raw_annotation)\n+ args = typing_inspect.get_args(raw_annotation)\n+ self.arg = args[0] if args else args\n \n self.literal = typing_inspect.is_literal_type(self.arg)\n if self.literal:\n", "issue": "Validating optional columns with a pandera type causes TypeError in SchemaModel\n**Describe the bug**\r\nWhen I use the class-based API with an optional column of type `pandera.typing.*`, I get the following TypeError:\r\n\r\n```\r\nTypeError: type of `pandas_dtype` argument not recognized: typing_extensions.Literal. \r\nPlease specify a pandera PandasDtype enum, legal pandas data type, pandas data type \r\nstring alias, or numpy data type string alias\r\n```\r\n\r\nIf a Python type is used instead or when the column is not optional, it works as expected.\r\n\r\n#### Code Sample\r\n\r\n```python\r\nimport pandas as pd\r\nimport pandera as pa\r\nfrom pandera.typing import DataFrame, DateTime, Series\r\n\r\nclass Schema(pa.SchemaModel):\r\n date : Optional[Series[DateTime]]\r\n\r\ndf = pd.DataFrame({ \r\n 'date': pd.date_range('2015-02-24', periods=5, freq='T')\r\n })\r\nSchema.validate(df)\r\n```\r\n\r\n```\r\nTypeError Traceback (most recent call last)\r\n<ipython-input-10-1d3df28d227a> in <module>\r\n----> 1 Schema.validate(df)\r\n\r\n/cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/model.py in validate(cls, check_obj, head, tail, sample, random_state, lazy)\r\n 146 ) -> pd.DataFrame:\r\n 147 \"\"\"%(validate_doc)s\"\"\"\r\n--> 148 return cls.to_schema().validate(\r\n 149 check_obj, head, tail, sample, random_state, lazy\r\n 150 )\r\n\r\n/cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/model.py in to_schema(cls)\r\n 119 }\r\n 120 columns, index = cls._build_columns_index(\r\n--> 121 cls.__fields__, cls.__checks__, **mi_kwargs\r\n 122 )\r\n 123 cls.__schema__ = DataFrameSchema(\r\n\r\n/cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/model.py in _build_columns_index(cls, fields, checks, **multiindex_kwargs)\r\n 199 required=not annotation.optional,\r\n 200 checks=field_checks,\r\n--> 201 name=field_name,\r\n 202 )\r\n 203 elif annotation.origin is Index:\r\n\r\n/cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/schema_components.py in __init__(self, pandas_dtype, checks, nullable, allow_duplicates, coerce, required, name, regex)\r\n 77 \"\"\"\r\n 78 super().__init__(\r\n---> 79 pandas_dtype, checks, nullable, allow_duplicates, coerce\r\n 80 )\r\n 81 if (\r\n\r\n/cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/schemas.py in __init__(self, pandas_dtype, checks, nullable, allow_duplicates, coerce, name)\r\n 1453\r\n 1454 # make sure pandas dtype is valid\r\n-> 1455 self.dtype # pylint: disable=pointless-statement\r\n 1456\r\n 1457 # this attribute is not meant to be accessed by users and is explicitly\r\n\r\n/cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/schemas.py in dtype(self)\r\n 1526 def dtype(self) -> Optional[str]:\r\n 1527 \"\"\"String representation of the dtype.\"\"\"\r\n-> 1528 return PandasDtype.get_str_dtype(self._pandas_dtype)\r\n 1529\r\n 1530 @property\r\n\r\n/cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/dtypes.py in get_str_dtype(cls, pandas_dtype_arg)\r\n 310 return dtype_.str_alias\r\n 311 raise TypeError(\r\n--> 312 \"type of `pandas_dtype` argument not recognized: \"\r\n 313 f\"{type(pandas_dtype_arg)}. Please specify a pandera PandasDtype \"\r\n 314 \"enum, legal pandas data type, pandas data type string alias, or \"\r\n\r\nTypeError: type of `pandas_dtype` argument not recognized: typing_extensions.Literal. Please specify a pandera PandasDtype enum, legal pandas data type, pandas data type string alias, or numpy data type string alias\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Typing definitions and helpers.\"\"\"\n# pylint:disable=abstract-method,disable=too-many-ancestors\nimport sys\nfrom typing import TYPE_CHECKING, Generic, Type, TypeVar\n\nimport pandas as pd\nimport typing_inspect\n\nfrom .dtypes import PandasDtype, PandasExtensionType\n\nif sys.version_info < (3, 8): # pragma: no cover\n from typing_extensions import Literal\nelse: # pragma: no cover\n from typing import Literal # pylint:disable=no-name-in-module\n\n\n_LEGACY_TYPING = sys.version_info[:3] < (3, 7, 0)\n\nGenericDtype = TypeVar( # type: ignore\n \"GenericDtype\",\n PandasDtype,\n PandasExtensionType,\n bool,\n int,\n str,\n float,\n Literal[PandasDtype.Bool],\n Literal[PandasDtype.DateTime],\n Literal[PandasDtype.Category],\n Literal[PandasDtype.Float],\n Literal[PandasDtype.Float16],\n Literal[PandasDtype.Float32],\n Literal[PandasDtype.Float64],\n Literal[PandasDtype.Int],\n Literal[PandasDtype.Int8],\n Literal[PandasDtype.Int16],\n Literal[PandasDtype.Int32],\n Literal[PandasDtype.Int64],\n Literal[PandasDtype.UInt8],\n Literal[PandasDtype.UInt16],\n Literal[PandasDtype.UInt32],\n Literal[PandasDtype.UInt64],\n Literal[PandasDtype.INT8],\n Literal[PandasDtype.INT16],\n Literal[PandasDtype.INT32],\n Literal[PandasDtype.INT64],\n Literal[PandasDtype.UINT8],\n Literal[PandasDtype.UINT16],\n Literal[PandasDtype.UINT32],\n Literal[PandasDtype.UINT64],\n Literal[PandasDtype.Object],\n Literal[PandasDtype.String],\n Literal[PandasDtype.STRING],\n Literal[PandasDtype.Timedelta],\n covariant=True,\n)\nSchema = TypeVar(\"Schema\", bound=\"SchemaModel\") # type: ignore\n\n\n# pylint:disable=too-few-public-methods\nclass Index(pd.Index, Generic[GenericDtype]):\n \"\"\"Representation of pandas.Index, only used for type annotation.\n\n *new in 0.5.0*\n \"\"\"\n\n\n# pylint:disable=too-few-public-methods\nclass Series(pd.Series, Generic[GenericDtype]): # type: ignore\n \"\"\"Representation of pandas.Series, only used for type annotation.\n\n *new in 0.5.0*\n \"\"\"\n\n\nif TYPE_CHECKING: # pragma: no cover\n # pylint:disable=too-few-public-methods,invalid-name\n T = TypeVar(\"T\")\n\n class DataFrame(pd.DataFrame, Generic[T]):\n \"\"\"\n Representation of pandas.DataFrame, only used for type annotation.\n\n *new in 0.5.0*\n \"\"\"\n\n\nelse:\n # pylint:disable=too-few-public-methods\n class DataFrame(pd.DataFrame, Generic[Schema]):\n \"\"\"\n Representation of pandas.DataFrame, only used for type annotation.\n\n *new in 0.5.0*\n \"\"\"\n\n\nclass AnnotationInfo: # pylint:disable=too-few-public-methods\n \"\"\"Captures extra information about an annotation.\n\n Attributes:\n origin: The non-parameterized generic class.\n arg: The first generic type (SchemaModel does not support more than 1 argument).\n literal: Whether the annotation is a literal.\n optional: Whether the annotation is optional.\n raw_annotation: The raw annotation.\n \"\"\"\n\n def __init__(self, raw_annotation: Type) -> None:\n self._parse_annotation(raw_annotation)\n\n @property\n def is_generic_df(self) -> bool:\n \"\"\"True if the annotation is a pandera.typing.DataFrame.\"\"\"\n return self.origin is not None and issubclass(self.origin, DataFrame)\n\n def _parse_annotation(self, raw_annotation: Type) -> None:\n \"\"\"Parse key information from annotation.\n\n :param annotation: A subscripted type.\n :returns: Annotation\n \"\"\"\n self.raw_annotation = raw_annotation\n\n self.optional = typing_inspect.is_optional_type(raw_annotation)\n if self.optional:\n # e.g: Typing.Union[pandera.typing.Index[str], NoneType]\n if _LEGACY_TYPING: # pragma: no cover\n # get_args -> ((pandera.typing.Index, <class 'str'>), <class 'NoneType'>)\n self.origin, self.arg = typing_inspect.get_args(\n raw_annotation\n )[0]\n return\n # get_args -> (pandera.typing.Index[str], <class 'NoneType'>)\n raw_annotation = typing_inspect.get_args(raw_annotation)[0]\n\n self.origin = typing_inspect.get_origin(raw_annotation)\n args = typing_inspect.get_args(raw_annotation)\n self.arg = args[0] if args else args\n\n self.literal = typing_inspect.is_literal_type(self.arg)\n if self.literal:\n self.arg = typing_inspect.get_args(self.arg)[0]\n\n\nBool = Literal[PandasDtype.Bool] #: ``\"bool\"`` numpy dtype\nDateTime = Literal[PandasDtype.DateTime] #: ``\"datetime64[ns]\"`` numpy dtype\nTimedelta = Literal[\n PandasDtype.Timedelta\n] #: ``\"timedelta64[ns]\"`` numpy dtype\nCategory = Literal[PandasDtype.Category] #: pandas ``\"categorical\"`` datatype\nFloat = Literal[PandasDtype.Float] #: ``\"float\"`` numpy dtype\nFloat16 = Literal[PandasDtype.Float16] #: ``\"float16\"`` numpy dtype\nFloat32 = Literal[PandasDtype.Float32] #: ``\"float32\"`` numpy dtype\nFloat64 = Literal[PandasDtype.Float64] #: ``\"float64\"`` numpy dtype\nInt = Literal[PandasDtype.Int] #: ``\"int\"`` numpy dtype\nInt8 = Literal[PandasDtype.Int8] #: ``\"int8\"`` numpy dtype\nInt16 = Literal[PandasDtype.Int16] #: ``\"int16\"`` numpy dtype\nInt32 = Literal[PandasDtype.Int32] #: ``\"int32\"`` numpy dtype\nInt64 = Literal[PandasDtype.Int64] #: ``\"int64\"`` numpy dtype\nUInt8 = Literal[PandasDtype.UInt8] #: ``\"uint8\"`` numpy dtype\nUInt16 = Literal[PandasDtype.UInt16] #: ``\"uint16\"`` numpy dtype\nUInt32 = Literal[PandasDtype.UInt32] #: ``\"uint32\"`` numpy dtype\nUInt64 = Literal[PandasDtype.UInt64] #: ``\"uint64\"`` numpy dtype\nINT8 = Literal[PandasDtype.INT8] #: ``\"Int8\"`` pandas dtype:: pandas 0.24.0+\nINT16 = Literal[PandasDtype.INT16] #: ``\"Int16\"`` pandas dtype: pandas 0.24.0+\nINT32 = Literal[PandasDtype.INT32] #: ``\"Int32\"`` pandas dtype: pandas 0.24.0+\nINT64 = Literal[PandasDtype.INT64] #: ``\"Int64\"`` pandas dtype: pandas 0.24.0+\nUINT8 = Literal[\n PandasDtype.UINT8\n] #: ``\"UInt8\"`` pandas dtype:: pandas 0.24.0+\nUINT16 = Literal[\n PandasDtype.UINT16\n] #: ``\"UInt16\"`` pandas dtype: pandas 0.24.0+\nUINT32 = Literal[\n PandasDtype.UINT32\n] #: ``\"UInt32\"`` pandas dtype: pandas 0.24.0+\nUINT64 = Literal[\n PandasDtype.UINT64\n] #: ``\"UInt64\"`` pandas dtype: pandas 0.24.0+\nObject = Literal[PandasDtype.Object] #: ``\"object\"`` numpy dtype\n\nString = Literal[PandasDtype.String] #: ``\"str\"`` numpy dtype\n\n#: ``\"string\"`` pandas dtypes: pandas 1.0.0+. For <1.0.0, this enum will\n#: fall back on the str-as-object-array representation.\nSTRING = Literal[PandasDtype.STRING] #: ``\"str\"`` numpy dtype\n", "path": "pandera/typing.py"}], "after_files": [{"content": "\"\"\"Typing definitions and helpers.\"\"\"\n# pylint:disable=abstract-method,disable=too-many-ancestors\nimport sys\nfrom typing import TYPE_CHECKING, Generic, Type, TypeVar\n\nimport pandas as pd\nimport typing_inspect\n\nfrom .dtypes import PandasDtype, PandasExtensionType\n\nif sys.version_info < (3, 8): # pragma: no cover\n from typing_extensions import Literal\nelse: # pragma: no cover\n from typing import Literal # pylint:disable=no-name-in-module\n\n\n_LEGACY_TYPING = sys.version_info[:3] < (3, 7, 0)\n\nGenericDtype = TypeVar( # type: ignore\n \"GenericDtype\",\n PandasDtype,\n PandasExtensionType,\n bool,\n int,\n str,\n float,\n Literal[PandasDtype.Bool],\n Literal[PandasDtype.DateTime],\n Literal[PandasDtype.Category],\n Literal[PandasDtype.Float],\n Literal[PandasDtype.Float16],\n Literal[PandasDtype.Float32],\n Literal[PandasDtype.Float64],\n Literal[PandasDtype.Int],\n Literal[PandasDtype.Int8],\n Literal[PandasDtype.Int16],\n Literal[PandasDtype.Int32],\n Literal[PandasDtype.Int64],\n Literal[PandasDtype.UInt8],\n Literal[PandasDtype.UInt16],\n Literal[PandasDtype.UInt32],\n Literal[PandasDtype.UInt64],\n Literal[PandasDtype.INT8],\n Literal[PandasDtype.INT16],\n Literal[PandasDtype.INT32],\n Literal[PandasDtype.INT64],\n Literal[PandasDtype.UINT8],\n Literal[PandasDtype.UINT16],\n Literal[PandasDtype.UINT32],\n Literal[PandasDtype.UINT64],\n Literal[PandasDtype.Object],\n Literal[PandasDtype.String],\n Literal[PandasDtype.STRING],\n Literal[PandasDtype.Timedelta],\n covariant=True,\n)\nSchema = TypeVar(\"Schema\", bound=\"SchemaModel\") # type: ignore\n\n\n# pylint:disable=too-few-public-methods\nclass Index(pd.Index, Generic[GenericDtype]):\n \"\"\"Representation of pandas.Index, only used for type annotation.\n\n *new in 0.5.0*\n \"\"\"\n\n\n# pylint:disable=too-few-public-methods\nclass Series(pd.Series, Generic[GenericDtype]): # type: ignore\n \"\"\"Representation of pandas.Series, only used for type annotation.\n\n *new in 0.5.0*\n \"\"\"\n\n\nif TYPE_CHECKING: # pragma: no cover\n # pylint:disable=too-few-public-methods,invalid-name\n T = TypeVar(\"T\")\n\n class DataFrame(pd.DataFrame, Generic[T]):\n \"\"\"\n Representation of pandas.DataFrame, only used for type annotation.\n\n *new in 0.5.0*\n \"\"\"\n\n\nelse:\n # pylint:disable=too-few-public-methods\n class DataFrame(pd.DataFrame, Generic[Schema]):\n \"\"\"\n Representation of pandas.DataFrame, only used for type annotation.\n\n *new in 0.5.0*\n \"\"\"\n\n\nclass AnnotationInfo: # pylint:disable=too-few-public-methods\n \"\"\"Captures extra information about an annotation.\n\n Attributes:\n origin: The non-parameterized generic class.\n arg: The first generic type (SchemaModel does not support more than 1 argument).\n literal: Whether the annotation is a literal.\n optional: Whether the annotation is optional.\n raw_annotation: The raw annotation.\n \"\"\"\n\n def __init__(self, raw_annotation: Type) -> None:\n self._parse_annotation(raw_annotation)\n\n @property\n def is_generic_df(self) -> bool:\n \"\"\"True if the annotation is a pandera.typing.DataFrame.\"\"\"\n return self.origin is not None and issubclass(self.origin, DataFrame)\n\n def _parse_annotation(self, raw_annotation: Type) -> None:\n \"\"\"Parse key information from annotation.\n\n :param annotation: A subscripted type.\n :returns: Annotation\n \"\"\"\n self.raw_annotation = raw_annotation\n\n self.optional = typing_inspect.is_optional_type(raw_annotation)\n if self.optional:\n # e.g: Typing.Union[pandera.typing.Index[str], NoneType]\n if _LEGACY_TYPING: # pragma: no cover\n # get_args -> ((pandera.typing.Index, <class 'str'>), <class 'NoneType'>)\n self.origin, self.arg = typing_inspect.get_args(\n raw_annotation\n )[0]\n # get_args -> (pandera.typing.Index[str], <class 'NoneType'>)\n raw_annotation = typing_inspect.get_args(raw_annotation)[0]\n\n if not (self.optional and _LEGACY_TYPING):\n self.origin = typing_inspect.get_origin(raw_annotation)\n args = typing_inspect.get_args(raw_annotation)\n self.arg = args[0] if args else args\n\n self.literal = typing_inspect.is_literal_type(self.arg)\n if self.literal:\n self.arg = typing_inspect.get_args(self.arg)[0]\n\n\nBool = Literal[PandasDtype.Bool] #: ``\"bool\"`` numpy dtype\nDateTime = Literal[PandasDtype.DateTime] #: ``\"datetime64[ns]\"`` numpy dtype\nTimedelta = Literal[\n PandasDtype.Timedelta\n] #: ``\"timedelta64[ns]\"`` numpy dtype\nCategory = Literal[PandasDtype.Category] #: pandas ``\"categorical\"`` datatype\nFloat = Literal[PandasDtype.Float] #: ``\"float\"`` numpy dtype\nFloat16 = Literal[PandasDtype.Float16] #: ``\"float16\"`` numpy dtype\nFloat32 = Literal[PandasDtype.Float32] #: ``\"float32\"`` numpy dtype\nFloat64 = Literal[PandasDtype.Float64] #: ``\"float64\"`` numpy dtype\nInt = Literal[PandasDtype.Int] #: ``\"int\"`` numpy dtype\nInt8 = Literal[PandasDtype.Int8] #: ``\"int8\"`` numpy dtype\nInt16 = Literal[PandasDtype.Int16] #: ``\"int16\"`` numpy dtype\nInt32 = Literal[PandasDtype.Int32] #: ``\"int32\"`` numpy dtype\nInt64 = Literal[PandasDtype.Int64] #: ``\"int64\"`` numpy dtype\nUInt8 = Literal[PandasDtype.UInt8] #: ``\"uint8\"`` numpy dtype\nUInt16 = Literal[PandasDtype.UInt16] #: ``\"uint16\"`` numpy dtype\nUInt32 = Literal[PandasDtype.UInt32] #: ``\"uint32\"`` numpy dtype\nUInt64 = Literal[PandasDtype.UInt64] #: ``\"uint64\"`` numpy dtype\nINT8 = Literal[PandasDtype.INT8] #: ``\"Int8\"`` pandas dtype:: pandas 0.24.0+\nINT16 = Literal[PandasDtype.INT16] #: ``\"Int16\"`` pandas dtype: pandas 0.24.0+\nINT32 = Literal[PandasDtype.INT32] #: ``\"Int32\"`` pandas dtype: pandas 0.24.0+\nINT64 = Literal[PandasDtype.INT64] #: ``\"Int64\"`` pandas dtype: pandas 0.24.0+\nUINT8 = Literal[\n PandasDtype.UINT8\n] #: ``\"UInt8\"`` pandas dtype:: pandas 0.24.0+\nUINT16 = Literal[\n PandasDtype.UINT16\n] #: ``\"UInt16\"`` pandas dtype: pandas 0.24.0+\nUINT32 = Literal[\n PandasDtype.UINT32\n] #: ``\"UInt32\"`` pandas dtype: pandas 0.24.0+\nUINT64 = Literal[\n PandasDtype.UINT64\n] #: ``\"UInt64\"`` pandas dtype: pandas 0.24.0+\nObject = Literal[PandasDtype.Object] #: ``\"object\"`` numpy dtype\n\nString = Literal[PandasDtype.String] #: ``\"str\"`` numpy dtype\n\n#: ``\"string\"`` pandas dtypes: pandas 1.0.0+. For <1.0.0, this enum will\n#: fall back on the str-as-object-array representation.\nSTRING = Literal[PandasDtype.STRING] #: ``\"str\"`` numpy dtype\n", "path": "pandera/typing.py"}]} | 3,739 | 223 |
gh_patches_debug_21774 | rasdani/github-patches | git_diff | bentoml__BentoML-825 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add --print-location option to `bentoml get` command
**Is your feature request related to a problem? Please describe.**
See related discussions here: https://github.com/bentoml/BentoML/pull/810#pullrequestreview-432731488
Currently, we've been using `jq` to do this in our documentation:
```bash
$ bentoml get IrisClassifier:latest -q | jq -r ".uri.uri"
/Users/chaoyu/bentoml/repository/IrisClassifier/20200616144911_7CE204
```
But this approach requires installing `jq`
**Describe the solution you'd like**
Would be great to allow the user to get just the saved location, without jq installed, from the CLI:
```bash
$ bentoml get IrisClassifier:latest --print-location
/Users/chaoyu/bentoml/repository/IrisClassifier/20200616144911_7CE204
```
**Describe alternatives you've considered**
A general json selector syntax that works for all JSON CLI output and allow the user to select other fields in the JSON output?
e.g.
```
$ bentoml get IrisClassifier:latest --json-output ".uri.uri"
/Users/chaoyu/bentoml/repository/IrisClassifier/20200616144911_7CE204
```
It can use the same jq syntax, with something like https://github.com/doloopwhile/pyjq
**Additional context**
Add any other context or screenshots about the feature request here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bentoml/cli/bento.py`
Content:
```
1 # Copyright 2019 Atalaya Tech, Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import click
15 import os
16 from google.protobuf.json_format import MessageToJson
17 from tabulate import tabulate
18
19 from bentoml.cli.click_utils import (
20 CLI_COLOR_ERROR,
21 _echo,
22 parse_bento_tag_list_callback,
23 )
24 from bentoml.cli.utils import humanfriendly_age_from_datetime
25 from bentoml.yatai.proto import status_pb2
26 from bentoml.utils import pb_to_yaml, status_pb_to_error_code_and_message
27 from bentoml.utils.usage_stats import track_cli
28 from bentoml.yatai.client import YataiClient
29 from bentoml.saved_bundle import safe_retrieve
30
31
32 def _print_bento_info(bento, output_type):
33 if output_type == 'yaml':
34 _echo(pb_to_yaml(bento))
35 else:
36 _echo(MessageToJson(bento))
37
38
39 def _print_bento_table(bentos, wide=False):
40 table = []
41 if wide:
42 headers = ['BENTO_SERVICE', 'CREATED_AT', 'APIS', 'ARTIFACTS', 'URI']
43 else:
44 headers = ['BENTO_SERVICE', 'AGE', 'APIS', 'ARTIFACTS']
45
46 for bento in bentos:
47 artifacts = [
48 f'{artifact.name}<{artifact.artifact_type}>'
49 for artifact in bento.bento_service_metadata.artifacts
50 ]
51 apis = [
52 f'{api.name}<{api.input_type}:{api.output_type}>'
53 for api in bento.bento_service_metadata.apis
54 ]
55 if wide:
56 created_at = bento.bento_service_metadata.created_at.ToDatetime().strftime(
57 "%Y-%m-%d %H:%M"
58 )
59 else:
60 created_at = humanfriendly_age_from_datetime(
61 bento.bento_service_metadata.created_at.ToDatetime()
62 )
63 row = [
64 f'{bento.name}:{bento.version}',
65 created_at,
66 ', '.join(apis),
67 ', '.join(artifacts),
68 ]
69 if wide:
70 row.append(bento.uri.uri)
71 table.append(row)
72
73 table_display = tabulate(table, headers, tablefmt='plain')
74 _echo(table_display)
75
76
77 def _print_bentos_info(bentos, output_type):
78 if output_type == 'table':
79 _print_bento_table(bentos)
80 elif output_type == 'wide':
81 _print_bento_table(bentos, wide=True)
82 else:
83 for bento in bentos:
84 _print_bento_info(bento, output_type)
85
86
87 def add_bento_sub_command(cli):
88 # pylint: disable=unused-variable
89 @cli.command(help='Get BentoService information')
90 @click.argument('bento', type=click.STRING)
91 @click.option(
92 '--limit', type=click.INT, help='Limit how many resources will be retrieved'
93 )
94 @click.option('--ascending-order', is_flag=True)
95 @click.option(
96 '-o', '--output', type=click.Choice(['json', 'yaml', 'table', 'wide'])
97 )
98 def get(bento, limit, ascending_order, output):
99 if ':' in bento:
100 name, version = bento.split(':')
101 else:
102 name = bento
103 version = None
104 yatai_client = YataiClient()
105
106 if name and version:
107 track_cli('bento-get')
108 output = output or 'json'
109 get_bento_result = yatai_client.repository.get(name, version)
110 if get_bento_result.status.status_code != status_pb2.Status.OK:
111 error_code, error_message = status_pb_to_error_code_and_message(
112 get_bento_result.status
113 )
114 _echo(
115 f'BentoService {name}:{version} not found - '
116 f'{error_code}:{error_message}',
117 CLI_COLOR_ERROR,
118 )
119 return
120 _print_bento_info(get_bento_result.bento, output)
121 return
122 elif name:
123 track_cli('bento-list')
124 output = output or 'table'
125 list_bento_versions_result = yatai_client.repository.list(
126 bento_name=name, limit=limit, ascending_order=ascending_order
127 )
128 if list_bento_versions_result.status.status_code != status_pb2.Status.OK:
129 error_code, error_message = status_pb_to_error_code_and_message(
130 list_bento_versions_result.status
131 )
132 _echo(
133 f'Failed to list versions for BentoService {name} '
134 f'{error_code}:{error_message}',
135 CLI_COLOR_ERROR,
136 )
137 return
138
139 _print_bentos_info(list_bento_versions_result.bentos, output)
140
141 @cli.command(name='list', help='List BentoServices information')
142 @click.option(
143 '--limit', type=click.INT, help='Limit how many BentoServices will be retrieved'
144 )
145 @click.option(
146 '--offset', type=click.INT, help='How many BentoServices will be skipped'
147 )
148 @click.option(
149 '--order-by', type=click.Choice(['created_at', 'name']), default='created_at',
150 )
151 @click.option('--ascending-order', is_flag=True)
152 @click.option(
153 '-o',
154 '--output',
155 type=click.Choice(['json', 'yaml', 'table', 'wide']),
156 default='table',
157 )
158 def list_bentos(limit, offset, order_by, ascending_order, output):
159 yatai_client = YataiClient()
160 track_cli('bento-list')
161 list_bentos_result = yatai_client.repository.list(
162 limit=limit,
163 offset=offset,
164 order_by=order_by,
165 ascending_order=ascending_order,
166 )
167 if list_bentos_result.status.status_code != status_pb2.Status.OK:
168 error_code, error_message = status_pb_to_error_code_and_message(
169 list_bentos_result.status
170 )
171 _echo(
172 f'Failed to list BentoServices ' f'{error_code}:{error_message}',
173 CLI_COLOR_ERROR,
174 )
175 return
176
177 _print_bentos_info(list_bentos_result.bentos, output)
178
179 @cli.command()
180 @click.argument("bentos", type=click.STRING, callback=parse_bento_tag_list_callback)
181 @click.option(
182 '-y', '--yes', '--assume-yes', is_flag=True, help='Automatic yes to prompts'
183 )
184 def delete(bentos, yes):
185 """Delete saved BentoService.
186
187 BENTO is the target BentoService to be deleted, referenced by its name and
188 version in format of name:version. For example: "iris_classifier:v1.2.0"
189
190 `bentoml delete` command also supports deleting multiple saved BentoService at
191 once, by providing name version tag separated by ",", for example:
192
193 `bentoml delete iris_classifier:v1.2.0,my_svc:v1,my_svc2:v3`
194 """
195 yatai_client = YataiClient()
196 for bento in bentos:
197 name, version = bento.split(':')
198 if not name and not version:
199 _echo(
200 'BentoService name or version is missing. Please provide in the '
201 'format of name:version',
202 CLI_COLOR_ERROR,
203 )
204 return
205 if not yes and not click.confirm(
206 f'Are you sure about delete {bento}? This will delete the BentoService '
207 f'saved bundle files permanently'
208 ):
209 return
210 result = yatai_client.repository.dangerously_delete_bento(
211 name=name, version=version
212 )
213 if result.status.status_code != status_pb2.Status.OK:
214 error_code, error_message = status_pb_to_error_code_and_message(
215 result.status
216 )
217 _echo(
218 f'Failed to delete Bento {name}:{version} '
219 f'{error_code}:{error_message}',
220 CLI_COLOR_ERROR,
221 )
222 _echo(f'BentoService {name}:{version} deleted')
223
224 @cli.command(
225 help='Retrieves BentoService artifacts into a target directory',
226 short_help="Retrieves BentoService artifacts into a target directory",
227 )
228 @click.argument("bento", type=click.STRING)
229 @click.option(
230 '--target_dir',
231 help="Directory to put artifacts into. Defaults to pwd.",
232 default=os.getcwd(),
233 )
234 def retrieve(bento, target_dir):
235 if ':' not in bento:
236 _echo(f'BentoService {bento} invalid - specify name:version')
237 return
238 name, version = bento.split(':')
239
240 yatai_client = YataiClient()
241
242 track_cli('bento-retrieve')
243 get_bento_result = yatai_client.repository.get(name, version)
244 if get_bento_result.status.status_code != status_pb2.Status.OK:
245 error_code, error_message = status_pb_to_error_code_and_message(
246 get_bento_result.status
247 )
248 _echo(
249 f'BentoService {name}:{version} not found - '
250 f'{error_code}:{error_message}',
251 CLI_COLOR_ERROR,
252 )
253 return
254
255 if get_bento_result.bento.uri.s3_presigned_url:
256 bento_service_bundle_path = get_bento_result.bento.uri.s3_presigned_url
257 else:
258 bento_service_bundle_path = get_bento_result.bento.uri.uri
259
260 safe_retrieve(bento_service_bundle_path, target_dir)
261
262 click.echo('Service %s artifact directory => %s' % (name, target_dir))
263
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bentoml/cli/bento.py b/bentoml/cli/bento.py
--- a/bentoml/cli/bento.py
+++ b/bentoml/cli/bento.py
@@ -92,10 +92,11 @@
'--limit', type=click.INT, help='Limit how many resources will be retrieved'
)
@click.option('--ascending-order', is_flag=True)
+ @click.option('--print-location', is_flag=True)
@click.option(
'-o', '--output', type=click.Choice(['json', 'yaml', 'table', 'wide'])
)
- def get(bento, limit, ascending_order, output):
+ def get(bento, limit, ascending_order, print_location, output):
if ':' in bento:
name, version = bento.split(':')
else:
@@ -117,6 +118,9 @@
CLI_COLOR_ERROR,
)
return
+ if print_location:
+ _echo(get_bento_result.bento.uri.uri)
+ return
_print_bento_info(get_bento_result.bento, output)
return
elif name:
| {"golden_diff": "diff --git a/bentoml/cli/bento.py b/bentoml/cli/bento.py\n--- a/bentoml/cli/bento.py\n+++ b/bentoml/cli/bento.py\n@@ -92,10 +92,11 @@\n '--limit', type=click.INT, help='Limit how many resources will be retrieved'\n )\n @click.option('--ascending-order', is_flag=True)\n+ @click.option('--print-location', is_flag=True)\n @click.option(\n '-o', '--output', type=click.Choice(['json', 'yaml', 'table', 'wide'])\n )\n- def get(bento, limit, ascending_order, output):\n+ def get(bento, limit, ascending_order, print_location, output):\n if ':' in bento:\n name, version = bento.split(':')\n else:\n@@ -117,6 +118,9 @@\n CLI_COLOR_ERROR,\n )\n return\n+ if print_location:\n+ _echo(get_bento_result.bento.uri.uri)\n+ return\n _print_bento_info(get_bento_result.bento, output)\n return\n elif name:\n", "issue": "Add --print-location option to `bentoml get` command\n**Is your feature request related to a problem? Please describe.**\r\nSee related discussions here: https://github.com/bentoml/BentoML/pull/810#pullrequestreview-432731488\r\n\r\nCurrently, we've been using `jq` to do this in our documentation:\r\n```bash\r\n$ bentoml get IrisClassifier:latest -q | jq -r \".uri.uri\"\r\n\r\n/Users/chaoyu/bentoml/repository/IrisClassifier/20200616144911_7CE204\r\n```\r\nBut this approach requires installing `jq`\r\n\r\n**Describe the solution you'd like**\r\n\r\nWould be great to allow the user to get just the saved location, without jq installed, from the CLI:\r\n```bash\r\n$ bentoml get IrisClassifier:latest --print-location\r\n\r\n/Users/chaoyu/bentoml/repository/IrisClassifier/20200616144911_7CE204\r\n```\r\n\r\n**Describe alternatives you've considered**\r\n\r\nA general json selector syntax that works for all JSON CLI output and allow the user to select other fields in the JSON output?\r\n\r\ne.g.\r\n```\r\n$ bentoml get IrisClassifier:latest --json-output \".uri.uri\"\r\n\r\n/Users/chaoyu/bentoml/repository/IrisClassifier/20200616144911_7CE204\r\n```\r\n\r\nIt can use the same jq syntax, with something like https://github.com/doloopwhile/pyjq\r\n\r\n**Additional context**\r\nAdd any other context or screenshots about the feature request here.\r\n\n", "before_files": [{"content": "# Copyright 2019 Atalaya Tech, Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport click\nimport os\nfrom google.protobuf.json_format import MessageToJson\nfrom tabulate import tabulate\n\nfrom bentoml.cli.click_utils import (\n CLI_COLOR_ERROR,\n _echo,\n parse_bento_tag_list_callback,\n)\nfrom bentoml.cli.utils import humanfriendly_age_from_datetime\nfrom bentoml.yatai.proto import status_pb2\nfrom bentoml.utils import pb_to_yaml, status_pb_to_error_code_and_message\nfrom bentoml.utils.usage_stats import track_cli\nfrom bentoml.yatai.client import YataiClient\nfrom bentoml.saved_bundle import safe_retrieve\n\n\ndef _print_bento_info(bento, output_type):\n if output_type == 'yaml':\n _echo(pb_to_yaml(bento))\n else:\n _echo(MessageToJson(bento))\n\n\ndef _print_bento_table(bentos, wide=False):\n table = []\n if wide:\n headers = ['BENTO_SERVICE', 'CREATED_AT', 'APIS', 'ARTIFACTS', 'URI']\n else:\n headers = ['BENTO_SERVICE', 'AGE', 'APIS', 'ARTIFACTS']\n\n for bento in bentos:\n artifacts = [\n f'{artifact.name}<{artifact.artifact_type}>'\n for artifact in bento.bento_service_metadata.artifacts\n ]\n apis = [\n f'{api.name}<{api.input_type}:{api.output_type}>'\n for api in bento.bento_service_metadata.apis\n ]\n if wide:\n created_at = bento.bento_service_metadata.created_at.ToDatetime().strftime(\n \"%Y-%m-%d %H:%M\"\n )\n else:\n created_at = humanfriendly_age_from_datetime(\n bento.bento_service_metadata.created_at.ToDatetime()\n )\n row = [\n f'{bento.name}:{bento.version}',\n created_at,\n ', '.join(apis),\n ', '.join(artifacts),\n ]\n if wide:\n row.append(bento.uri.uri)\n table.append(row)\n\n table_display = tabulate(table, headers, tablefmt='plain')\n _echo(table_display)\n\n\ndef _print_bentos_info(bentos, output_type):\n if output_type == 'table':\n _print_bento_table(bentos)\n elif output_type == 'wide':\n _print_bento_table(bentos, wide=True)\n else:\n for bento in bentos:\n _print_bento_info(bento, output_type)\n\n\ndef add_bento_sub_command(cli):\n # pylint: disable=unused-variable\n @cli.command(help='Get BentoService information')\n @click.argument('bento', type=click.STRING)\n @click.option(\n '--limit', type=click.INT, help='Limit how many resources will be retrieved'\n )\n @click.option('--ascending-order', is_flag=True)\n @click.option(\n '-o', '--output', type=click.Choice(['json', 'yaml', 'table', 'wide'])\n )\n def get(bento, limit, ascending_order, output):\n if ':' in bento:\n name, version = bento.split(':')\n else:\n name = bento\n version = None\n yatai_client = YataiClient()\n\n if name and version:\n track_cli('bento-get')\n output = output or 'json'\n get_bento_result = yatai_client.repository.get(name, version)\n if get_bento_result.status.status_code != status_pb2.Status.OK:\n error_code, error_message = status_pb_to_error_code_and_message(\n get_bento_result.status\n )\n _echo(\n f'BentoService {name}:{version} not found - '\n f'{error_code}:{error_message}',\n CLI_COLOR_ERROR,\n )\n return\n _print_bento_info(get_bento_result.bento, output)\n return\n elif name:\n track_cli('bento-list')\n output = output or 'table'\n list_bento_versions_result = yatai_client.repository.list(\n bento_name=name, limit=limit, ascending_order=ascending_order\n )\n if list_bento_versions_result.status.status_code != status_pb2.Status.OK:\n error_code, error_message = status_pb_to_error_code_and_message(\n list_bento_versions_result.status\n )\n _echo(\n f'Failed to list versions for BentoService {name} '\n f'{error_code}:{error_message}',\n CLI_COLOR_ERROR,\n )\n return\n\n _print_bentos_info(list_bento_versions_result.bentos, output)\n\n @cli.command(name='list', help='List BentoServices information')\n @click.option(\n '--limit', type=click.INT, help='Limit how many BentoServices will be retrieved'\n )\n @click.option(\n '--offset', type=click.INT, help='How many BentoServices will be skipped'\n )\n @click.option(\n '--order-by', type=click.Choice(['created_at', 'name']), default='created_at',\n )\n @click.option('--ascending-order', is_flag=True)\n @click.option(\n '-o',\n '--output',\n type=click.Choice(['json', 'yaml', 'table', 'wide']),\n default='table',\n )\n def list_bentos(limit, offset, order_by, ascending_order, output):\n yatai_client = YataiClient()\n track_cli('bento-list')\n list_bentos_result = yatai_client.repository.list(\n limit=limit,\n offset=offset,\n order_by=order_by,\n ascending_order=ascending_order,\n )\n if list_bentos_result.status.status_code != status_pb2.Status.OK:\n error_code, error_message = status_pb_to_error_code_and_message(\n list_bentos_result.status\n )\n _echo(\n f'Failed to list BentoServices ' f'{error_code}:{error_message}',\n CLI_COLOR_ERROR,\n )\n return\n\n _print_bentos_info(list_bentos_result.bentos, output)\n\n @cli.command()\n @click.argument(\"bentos\", type=click.STRING, callback=parse_bento_tag_list_callback)\n @click.option(\n '-y', '--yes', '--assume-yes', is_flag=True, help='Automatic yes to prompts'\n )\n def delete(bentos, yes):\n \"\"\"Delete saved BentoService.\n\n BENTO is the target BentoService to be deleted, referenced by its name and\n version in format of name:version. For example: \"iris_classifier:v1.2.0\"\n\n `bentoml delete` command also supports deleting multiple saved BentoService at\n once, by providing name version tag separated by \",\", for example:\n\n `bentoml delete iris_classifier:v1.2.0,my_svc:v1,my_svc2:v3`\n \"\"\"\n yatai_client = YataiClient()\n for bento in bentos:\n name, version = bento.split(':')\n if not name and not version:\n _echo(\n 'BentoService name or version is missing. Please provide in the '\n 'format of name:version',\n CLI_COLOR_ERROR,\n )\n return\n if not yes and not click.confirm(\n f'Are you sure about delete {bento}? This will delete the BentoService '\n f'saved bundle files permanently'\n ):\n return\n result = yatai_client.repository.dangerously_delete_bento(\n name=name, version=version\n )\n if result.status.status_code != status_pb2.Status.OK:\n error_code, error_message = status_pb_to_error_code_and_message(\n result.status\n )\n _echo(\n f'Failed to delete Bento {name}:{version} '\n f'{error_code}:{error_message}',\n CLI_COLOR_ERROR,\n )\n _echo(f'BentoService {name}:{version} deleted')\n\n @cli.command(\n help='Retrieves BentoService artifacts into a target directory',\n short_help=\"Retrieves BentoService artifacts into a target directory\",\n )\n @click.argument(\"bento\", type=click.STRING)\n @click.option(\n '--target_dir',\n help=\"Directory to put artifacts into. Defaults to pwd.\",\n default=os.getcwd(),\n )\n def retrieve(bento, target_dir):\n if ':' not in bento:\n _echo(f'BentoService {bento} invalid - specify name:version')\n return\n name, version = bento.split(':')\n\n yatai_client = YataiClient()\n\n track_cli('bento-retrieve')\n get_bento_result = yatai_client.repository.get(name, version)\n if get_bento_result.status.status_code != status_pb2.Status.OK:\n error_code, error_message = status_pb_to_error_code_and_message(\n get_bento_result.status\n )\n _echo(\n f'BentoService {name}:{version} not found - '\n f'{error_code}:{error_message}',\n CLI_COLOR_ERROR,\n )\n return\n\n if get_bento_result.bento.uri.s3_presigned_url:\n bento_service_bundle_path = get_bento_result.bento.uri.s3_presigned_url\n else:\n bento_service_bundle_path = get_bento_result.bento.uri.uri\n\n safe_retrieve(bento_service_bundle_path, target_dir)\n\n click.echo('Service %s artifact directory => %s' % (name, target_dir))\n", "path": "bentoml/cli/bento.py"}], "after_files": [{"content": "# Copyright 2019 Atalaya Tech, Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport click\nimport os\nfrom google.protobuf.json_format import MessageToJson\nfrom tabulate import tabulate\n\nfrom bentoml.cli.click_utils import (\n CLI_COLOR_ERROR,\n _echo,\n parse_bento_tag_list_callback,\n)\nfrom bentoml.cli.utils import humanfriendly_age_from_datetime\nfrom bentoml.yatai.proto import status_pb2\nfrom bentoml.utils import pb_to_yaml, status_pb_to_error_code_and_message\nfrom bentoml.utils.usage_stats import track_cli\nfrom bentoml.yatai.client import YataiClient\nfrom bentoml.saved_bundle import safe_retrieve\n\n\ndef _print_bento_info(bento, output_type):\n if output_type == 'yaml':\n _echo(pb_to_yaml(bento))\n else:\n _echo(MessageToJson(bento))\n\n\ndef _print_bento_table(bentos, wide=False):\n table = []\n if wide:\n headers = ['BENTO_SERVICE', 'CREATED_AT', 'APIS', 'ARTIFACTS', 'URI']\n else:\n headers = ['BENTO_SERVICE', 'AGE', 'APIS', 'ARTIFACTS']\n\n for bento in bentos:\n artifacts = [\n f'{artifact.name}<{artifact.artifact_type}>'\n for artifact in bento.bento_service_metadata.artifacts\n ]\n apis = [\n f'{api.name}<{api.input_type}:{api.output_type}>'\n for api in bento.bento_service_metadata.apis\n ]\n if wide:\n created_at = bento.bento_service_metadata.created_at.ToDatetime().strftime(\n \"%Y-%m-%d %H:%M\"\n )\n else:\n created_at = humanfriendly_age_from_datetime(\n bento.bento_service_metadata.created_at.ToDatetime()\n )\n row = [\n f'{bento.name}:{bento.version}',\n created_at,\n ', '.join(apis),\n ', '.join(artifacts),\n ]\n if wide:\n row.append(bento.uri.uri)\n table.append(row)\n\n table_display = tabulate(table, headers, tablefmt='plain')\n _echo(table_display)\n\n\ndef _print_bentos_info(bentos, output_type):\n if output_type == 'table':\n _print_bento_table(bentos)\n elif output_type == 'wide':\n _print_bento_table(bentos, wide=True)\n else:\n for bento in bentos:\n _print_bento_info(bento, output_type)\n\n\ndef add_bento_sub_command(cli):\n # pylint: disable=unused-variable\n @cli.command(help='Get BentoService information')\n @click.argument('bento', type=click.STRING)\n @click.option(\n '--limit', type=click.INT, help='Limit how many resources will be retrieved'\n )\n @click.option('--ascending-order', is_flag=True)\n @click.option('--print-location', is_flag=True)\n @click.option(\n '-o', '--output', type=click.Choice(['json', 'yaml', 'table', 'wide'])\n )\n def get(bento, limit, ascending_order, print_location, output):\n if ':' in bento:\n name, version = bento.split(':')\n else:\n name = bento\n version = None\n yatai_client = YataiClient()\n\n if name and version:\n track_cli('bento-get')\n output = output or 'json'\n get_bento_result = yatai_client.repository.get(name, version)\n if get_bento_result.status.status_code != status_pb2.Status.OK:\n error_code, error_message = status_pb_to_error_code_and_message(\n get_bento_result.status\n )\n _echo(\n f'BentoService {name}:{version} not found - '\n f'{error_code}:{error_message}',\n CLI_COLOR_ERROR,\n )\n return\n if print_location:\n _echo(get_bento_result.bento.uri.uri)\n return\n _print_bento_info(get_bento_result.bento, output)\n return\n elif name:\n track_cli('bento-list')\n output = output or 'table'\n list_bento_versions_result = yatai_client.repository.list(\n bento_name=name, limit=limit, ascending_order=ascending_order\n )\n if list_bento_versions_result.status.status_code != status_pb2.Status.OK:\n error_code, error_message = status_pb_to_error_code_and_message(\n list_bento_versions_result.status\n )\n _echo(\n f'Failed to list versions for BentoService {name} '\n f'{error_code}:{error_message}',\n CLI_COLOR_ERROR,\n )\n return\n\n _print_bentos_info(list_bento_versions_result.bentos, output)\n\n @cli.command(name='list', help='List BentoServices information')\n @click.option(\n '--limit', type=click.INT, help='Limit how many BentoServices will be retrieved'\n )\n @click.option(\n '--offset', type=click.INT, help='How many BentoServices will be skipped'\n )\n @click.option(\n '--order-by', type=click.Choice(['created_at', 'name']), default='created_at',\n )\n @click.option('--ascending-order', is_flag=True)\n @click.option(\n '-o',\n '--output',\n type=click.Choice(['json', 'yaml', 'table', 'wide']),\n default='table',\n )\n def list_bentos(limit, offset, order_by, ascending_order, output):\n yatai_client = YataiClient()\n track_cli('bento-list')\n list_bentos_result = yatai_client.repository.list(\n limit=limit,\n offset=offset,\n order_by=order_by,\n ascending_order=ascending_order,\n )\n if list_bentos_result.status.status_code != status_pb2.Status.OK:\n error_code, error_message = status_pb_to_error_code_and_message(\n list_bentos_result.status\n )\n _echo(\n f'Failed to list BentoServices ' f'{error_code}:{error_message}',\n CLI_COLOR_ERROR,\n )\n return\n\n _print_bentos_info(list_bentos_result.bentos, output)\n\n @cli.command()\n @click.argument(\"bentos\", type=click.STRING, callback=parse_bento_tag_list_callback)\n @click.option(\n '-y', '--yes', '--assume-yes', is_flag=True, help='Automatic yes to prompts'\n )\n def delete(bentos, yes):\n \"\"\"Delete saved BentoService.\n\n BENTO is the target BentoService to be deleted, referenced by its name and\n version in format of name:version. For example: \"iris_classifier:v1.2.0\"\n\n `bentoml delete` command also supports deleting multiple saved BentoService at\n once, by providing name version tag separated by \",\", for example:\n\n `bentoml delete iris_classifier:v1.2.0,my_svc:v1,my_svc2:v3`\n \"\"\"\n yatai_client = YataiClient()\n for bento in bentos:\n name, version = bento.split(':')\n if not name and not version:\n _echo(\n 'BentoService name or version is missing. Please provide in the '\n 'format of name:version',\n CLI_COLOR_ERROR,\n )\n return\n if not yes and not click.confirm(\n f'Are you sure about delete {bento}? This will delete the BentoService '\n f'saved bundle files permanently'\n ):\n return\n result = yatai_client.repository.dangerously_delete_bento(\n name=name, version=version\n )\n if result.status.status_code != status_pb2.Status.OK:\n error_code, error_message = status_pb_to_error_code_and_message(\n result.status\n )\n _echo(\n f'Failed to delete Bento {name}:{version} '\n f'{error_code}:{error_message}',\n CLI_COLOR_ERROR,\n )\n _echo(f'BentoService {name}:{version} deleted')\n\n @cli.command(\n help='Retrieves BentoService artifacts into a target directory',\n short_help=\"Retrieves BentoService artifacts into a target directory\",\n )\n @click.argument(\"bento\", type=click.STRING)\n @click.option(\n '--target_dir',\n help=\"Directory to put artifacts into. Defaults to pwd.\",\n default=os.getcwd(),\n )\n def retrieve(bento, target_dir):\n if ':' not in bento:\n _echo(f'BentoService {bento} invalid - specify name:version')\n return\n name, version = bento.split(':')\n\n yatai_client = YataiClient()\n\n track_cli('bento-retrieve')\n get_bento_result = yatai_client.repository.get(name, version)\n if get_bento_result.status.status_code != status_pb2.Status.OK:\n error_code, error_message = status_pb_to_error_code_and_message(\n get_bento_result.status\n )\n _echo(\n f'BentoService {name}:{version} not found - '\n f'{error_code}:{error_message}',\n CLI_COLOR_ERROR,\n )\n return\n\n if get_bento_result.bento.uri.s3_presigned_url:\n bento_service_bundle_path = get_bento_result.bento.uri.s3_presigned_url\n else:\n bento_service_bundle_path = get_bento_result.bento.uri.uri\n\n safe_retrieve(bento_service_bundle_path, target_dir)\n\n click.echo('Service %s artifact directory => %s' % (name, target_dir))\n", "path": "bentoml/cli/bento.py"}]} | 3,497 | 255 |
gh_patches_debug_16489 | rasdani/github-patches | git_diff | fossasia__open-event-server-6754 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error in migrations
**Describe the bug**
Due to recent merging of PRs https://github.com/fossasia/open-event-server/pull/6744/ https://github.com/fossasia/open-event-server/pull/6748
The PR https://github.com/fossasia/open-event-server/pull/6744 has an invalid migration to remove the 'secret' column from the db which was unrelated to PR. Hence now we have two migration for the same column `secret` from `settings` which
drops column on upgrade and creates on downgrade
**Additional context**
@iamareebjamal - Please chck
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `migrations/versions/rev-2020-01-17-18:09:31-6ebafb385765_.py`
Content:
```
1 """empty message
2
3 Revision ID: 6ebafb385765
4 Revises: 30a490ad1609
5 Create Date: 2020-01-17 18:09:31.897988
6
7 """
8
9 from alembic import op
10 import sqlalchemy as sa
11 import sqlalchemy_utils
12
13
14 # revision identifiers, used by Alembic.
15 revision = '6ebafb385765'
16 down_revision = '30a490ad1609'
17
18
19 def upgrade():
20 # ### commands auto generated by Alembic - please adjust! ###
21 op.drop_column('settings', 'secret')
22 op.add_column('ticket_holders', sa.Column('age_group', sa.String(), nullable=True))
23 # ### end Alembic commands ###
24
25
26 def downgrade():
27 # ### commands auto generated by Alembic - please adjust! ###
28 op.drop_column('ticket_holders', 'age_group')
29 op.add_column('settings', sa.Column('secret', sa.VARCHAR(), autoincrement=False, nullable=True))
30 # ### end Alembic commands ###
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/migrations/versions/rev-2020-01-17-18:09:31-6ebafb385765_.py b/migrations/versions/rev-2020-01-17-18:09:31-6ebafb385765_.py
--- a/migrations/versions/rev-2020-01-17-18:09:31-6ebafb385765_.py
+++ b/migrations/versions/rev-2020-01-17-18:09:31-6ebafb385765_.py
@@ -18,7 +18,6 @@
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
- op.drop_column('settings', 'secret')
op.add_column('ticket_holders', sa.Column('age_group', sa.String(), nullable=True))
# ### end Alembic commands ###
@@ -26,5 +25,4 @@
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('ticket_holders', 'age_group')
- op.add_column('settings', sa.Column('secret', sa.VARCHAR(), autoincrement=False, nullable=True))
# ### end Alembic commands ###
| {"golden_diff": "diff --git a/migrations/versions/rev-2020-01-17-18:09:31-6ebafb385765_.py b/migrations/versions/rev-2020-01-17-18:09:31-6ebafb385765_.py\n--- a/migrations/versions/rev-2020-01-17-18:09:31-6ebafb385765_.py\n+++ b/migrations/versions/rev-2020-01-17-18:09:31-6ebafb385765_.py\n@@ -18,7 +18,6 @@\n \n def upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n- op.drop_column('settings', 'secret')\n op.add_column('ticket_holders', sa.Column('age_group', sa.String(), nullable=True))\n # ### end Alembic commands ###\n \n@@ -26,5 +25,4 @@\n def downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('ticket_holders', 'age_group')\n- op.add_column('settings', sa.Column('secret', sa.VARCHAR(), autoincrement=False, nullable=True))\n # ### end Alembic commands ###\n", "issue": "Error in migrations\n**Describe the bug**\r\nDue to recent merging of PRs https://github.com/fossasia/open-event-server/pull/6744/ https://github.com/fossasia/open-event-server/pull/6748\r\n\r\nThe PR https://github.com/fossasia/open-event-server/pull/6744 has an invalid migration to remove the 'secret' column from the db which was unrelated to PR. Hence now we have two migration for the same column `secret` from `settings` which\r\n drops column on upgrade and creates on downgrade\r\n\r\n\r\n**Additional context**\r\n@iamareebjamal - Please chck\r\n\n", "before_files": [{"content": "\"\"\"empty message\n\nRevision ID: 6ebafb385765\nRevises: 30a490ad1609\nCreate Date: 2020-01-17 18:09:31.897988\n\n\"\"\"\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy_utils\n\n\n# revision identifiers, used by Alembic.\nrevision = '6ebafb385765'\ndown_revision = '30a490ad1609'\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('settings', 'secret')\n op.add_column('ticket_holders', sa.Column('age_group', sa.String(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('ticket_holders', 'age_group')\n op.add_column('settings', sa.Column('secret', sa.VARCHAR(), autoincrement=False, nullable=True))\n # ### end Alembic commands ###\n", "path": "migrations/versions/rev-2020-01-17-18:09:31-6ebafb385765_.py"}], "after_files": [{"content": "\"\"\"empty message\n\nRevision ID: 6ebafb385765\nRevises: 30a490ad1609\nCreate Date: 2020-01-17 18:09:31.897988\n\n\"\"\"\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy_utils\n\n\n# revision identifiers, used by Alembic.\nrevision = '6ebafb385765'\ndown_revision = '30a490ad1609'\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('ticket_holders', sa.Column('age_group', sa.String(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('ticket_holders', 'age_group')\n # ### end Alembic commands ###\n", "path": "migrations/versions/rev-2020-01-17-18:09:31-6ebafb385765_.py"}]} | 734 | 313 |
gh_patches_debug_13042 | rasdani/github-patches | git_diff | e-valuation__EvaP-688 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Exporter only uses first contributor per type
The exporter only uses the results of the first contributor per type.
It should instead compute the average of all contributors per type.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/results/exporters.py`
Content:
```
1 from evap.evaluation.models import Questionnaire
2 from evap.evaluation.tools import calculate_results, calculate_average_grades_and_deviation, get_grade_color, get_deviation_color
3
4 from django.utils.translation import ugettext as _
5
6 from collections import OrderedDict
7 from collections import defaultdict
8 import datetime
9 import xlwt
10
11
12 class ExcelExporter(object):
13
14 def __init__(self, semester):
15 self.semester = semester
16 self.styles = dict()
17
18 self.CUSTOM_COLOR_START = 8
19 self.NUM_GRADE_COLORS = 21 # 1.0 to 5.0 in 0.2 steps
20 self.NUM_DEVIATION_COLORS = 13 # 0.0 to 2.4 in 0.2 steps
21 self.STEP = 0.2 # we only have a limited number of custom colors
22
23 def normalize_number(self, number):
24 """ floors 'number' to a multiply of self.STEP """
25 rounded_number = round(number, 1) # see #302
26 return round(int(rounded_number / self.STEP + 0.0001) * self.STEP, 1)
27
28 def create_style(self, workbook, base_style, style_name, palette_index, color):
29 color_name = style_name + "_color"
30 xlwt.add_palette_colour(color_name, palette_index)
31 workbook.set_colour_RGB(palette_index, *color)
32 self.styles[style_name] = xlwt.easyxf(base_style.format(color_name), num_format_str="0.0")
33
34 def init_styles(self, workbook):
35 self.styles = {
36 'default': xlwt.Style.default_style,
37 'avg': xlwt.easyxf('alignment: horiz centre; font: bold on; borders: left medium, top medium, bottom medium'),
38 'headline': xlwt.easyxf('font: bold on, height 400; alignment: horiz centre, vert centre, wrap on', num_format_str="0.0"),
39 'course': xlwt.easyxf('alignment: horiz centre, wrap on, rota 90; borders: left medium, top medium'),
40 'course_unfinished': xlwt.easyxf('alignment: horiz centre, wrap on, rota 90; borders: left medium, top medium; font: italic on'),
41 'total_voters': xlwt.easyxf('alignment: horiz centre; borders: left medium, bottom medium, right medium'),
42 'bold': xlwt.easyxf('font: bold on'),
43 'border_left': xlwt.easyxf('borders: left medium'),
44 'border_right': xlwt.easyxf('borders: right medium'),
45 'border_top_bottom_right': xlwt.easyxf('borders: top medium, bottom medium, right medium')}
46
47
48
49 grade_base_style = 'pattern: pattern solid, fore_colour {}; alignment: horiz centre; font: bold on; borders: left medium'
50 for i in range(0, self.NUM_GRADE_COLORS):
51 grade = 1 + i*self.STEP
52 color = get_grade_color(grade)
53 palette_index = self.CUSTOM_COLOR_START + i
54 style_name = self.grade_to_style(grade)
55 self.create_style(workbook, grade_base_style, style_name, palette_index, color)
56
57 deviation_base_style = 'pattern: pattern solid, fore_colour {}; alignment: horiz centre; borders: right medium'
58 for i in range(0, self.NUM_DEVIATION_COLORS):
59 deviation = i * self.STEP
60 color = get_deviation_color(deviation)
61 palette_index = self.CUSTOM_COLOR_START + self.NUM_GRADE_COLORS + i
62 style_name = self.deviation_to_style(deviation)
63 self.create_style(workbook, deviation_base_style, style_name, palette_index, color)
64
65
66 def grade_to_style(self, grade):
67 return 'grade_' + str(self.normalize_number(grade))
68
69 def deviation_to_style(self, deviation):
70 return 'deviation_' + str(self.normalize_number(deviation))
71
72 def export(self, response, ignore_not_enough_answers=False):
73 courses_with_results = list()
74 for course in self.semester.course_set.filter(state="published").all():
75 if course.is_single_result():
76 continue
77 results = OrderedDict()
78 for questionnaire, contributor, label, data, section_warning in calculate_results(course):
79 results.setdefault(questionnaire.id, []).extend(data)
80 courses_with_results.append((course, results))
81
82 courses_with_results.sort(key=lambda cr: cr[0].type)
83
84 qn_frequencies = defaultdict(int)
85 for course, results in courses_with_results:
86 for questionnaire, results in results.items():
87 qn_frequencies[questionnaire] += 1
88
89 qn_relevant = list(qn_frequencies.items())
90 qn_relevant.sort(key=lambda t: -t[1])
91
92 questionnaires = [Questionnaire.objects.get(id=t[0]) for t in qn_relevant]
93
94 self.workbook = xlwt.Workbook()
95 self.sheet = self.workbook.add_sheet(_("Results"))
96 self.row = 0
97 self.col = 0
98
99
100 self.init_styles(self.workbook)
101
102 writec(self, _("Evaluation {0} - created on {1}").format(self.semester.name, datetime.date.today()), "headline")
103 for course, results in courses_with_results:
104 if course.state == "published":
105 writec(self, course.name, "course", cols=2)
106 else:
107 writec(self, course.name, "course_unfinished", cols=2)
108
109 writen(self)
110 for course, results in courses_with_results:
111 writec(self, "Average", "avg")
112 writec(self, "Deviation", "border_top_bottom_right")
113
114 for questionnaire in questionnaires:
115 writen(self, questionnaire.name, "bold")
116 for course, results in courses_with_results:
117 self.write_two_empty_cells_with_borders()
118
119 for question in questionnaire.question_set.all():
120 if question.is_text_question:
121 continue
122
123 writen(self, question.text)
124
125 for course, results in courses_with_results:
126 qn_results = results.get(questionnaire.id, None)
127 if qn_results:
128 values = []
129 deviations = []
130 for grade_result in qn_results:
131 if grade_result.question.id == question.id:
132 if grade_result.average:
133 values.append(grade_result.average)
134 deviations.append(grade_result.deviation)
135 break
136 enough_answers = course.can_publish_grades
137 if values and (enough_answers or ignore_not_enough_answers):
138 avg = sum(values) / len(values)
139 writec(self, avg, self.grade_to_style(avg))
140
141 dev = sum(deviations) / len(deviations)
142 writec(self, dev, self.deviation_to_style(dev))
143 else:
144 self.write_two_empty_cells_with_borders()
145 else:
146 self.write_two_empty_cells_with_borders()
147 writen(self, None)
148 for course, results in courses_with_results:
149 self.write_two_empty_cells_with_borders()
150
151 writen(self, _("Overall Average Grade"), "bold")
152 for course, results in courses_with_results:
153 avg, dev = calculate_average_grades_and_deviation(course)
154 if avg:
155 writec(self, avg, self.grade_to_style(avg), cols=2)
156 else:
157 self.write_two_empty_cells_with_borders()
158
159 writen(self, _("Overall Average Standard Deviation"), "bold")
160 for course, results in courses_with_results:
161 avg, dev = calculate_average_grades_and_deviation(course)
162 if dev is not None:
163 writec(self, dev, self.deviation_to_style(dev), cols=2)
164 else:
165 self.write_two_empty_cells_with_borders()
166
167 writen(self, _("Total Voters/Total Participants"), "bold")
168 for course, results in courses_with_results:
169 percent_participants = float(course.num_voters)/float(course.num_participants) if course.num_participants > 0 else 0
170 writec(self, "{}/{} ({:.0%})".format(course.num_voters, course.num_participants, percent_participants), "total_voters", cols=2)
171
172 self.workbook.save(response)
173
174
175 def write_two_empty_cells_with_borders(self):
176 writec(self, None, "border_left")
177 writec(self, None, "border_right")
178
179
180 def writen(exporter, label="", style_name="default"):
181 """Write the cell at the beginning of the next row."""
182 exporter.col = 0
183 exporter.row += 1
184 writec(exporter, label, style_name)
185
186 def writec(exporter, label, style_name, rows=1, cols=1):
187 """Write the cell in the next column of the current line."""
188 _write(exporter, label, exporter.styles[style_name], rows, cols)
189 exporter.col += 1
190
191 def _write(exporter, label, style, rows, cols):
192 if rows > 1 or cols > 1:
193 exporter.sheet.write_merge(exporter.row, exporter.row+rows-1, exporter.col, exporter.col+cols-1, label, style)
194 exporter.col += cols - 1
195 else:
196 exporter.sheet.write(exporter.row, exporter.col, label, style)
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/evap/results/exporters.py b/evap/results/exporters.py
--- a/evap/results/exporters.py
+++ b/evap/results/exporters.py
@@ -132,7 +132,6 @@
if grade_result.average:
values.append(grade_result.average)
deviations.append(grade_result.deviation)
- break
enough_answers = course.can_publish_grades
if values and (enough_answers or ignore_not_enough_answers):
avg = sum(values) / len(values)
| {"golden_diff": "diff --git a/evap/results/exporters.py b/evap/results/exporters.py\n--- a/evap/results/exporters.py\n+++ b/evap/results/exporters.py\n@@ -132,7 +132,6 @@\n if grade_result.average:\n values.append(grade_result.average)\n deviations.append(grade_result.deviation)\n- break\n enough_answers = course.can_publish_grades\n if values and (enough_answers or ignore_not_enough_answers):\n avg = sum(values) / len(values)\n", "issue": "Exporter only uses first contributor per type\nThe exporter only uses the results of the first contributor per type.\nIt should instead compute the average of all contributors per type.\n\n", "before_files": [{"content": "from evap.evaluation.models import Questionnaire\nfrom evap.evaluation.tools import calculate_results, calculate_average_grades_and_deviation, get_grade_color, get_deviation_color\n\nfrom django.utils.translation import ugettext as _\n\nfrom collections import OrderedDict\nfrom collections import defaultdict\nimport datetime\nimport xlwt\n\n\nclass ExcelExporter(object):\n\n def __init__(self, semester):\n self.semester = semester\n self.styles = dict()\n\n self.CUSTOM_COLOR_START = 8\n self.NUM_GRADE_COLORS = 21 # 1.0 to 5.0 in 0.2 steps\n self.NUM_DEVIATION_COLORS = 13 # 0.0 to 2.4 in 0.2 steps\n self.STEP = 0.2 # we only have a limited number of custom colors\n\n def normalize_number(self, number):\n \"\"\" floors 'number' to a multiply of self.STEP \"\"\"\n rounded_number = round(number, 1) # see #302\n return round(int(rounded_number / self.STEP + 0.0001) * self.STEP, 1)\n\n def create_style(self, workbook, base_style, style_name, palette_index, color):\n color_name = style_name + \"_color\"\n xlwt.add_palette_colour(color_name, palette_index)\n workbook.set_colour_RGB(palette_index, *color)\n self.styles[style_name] = xlwt.easyxf(base_style.format(color_name), num_format_str=\"0.0\")\n\n def init_styles(self, workbook):\n self.styles = {\n 'default': xlwt.Style.default_style,\n 'avg': xlwt.easyxf('alignment: horiz centre; font: bold on; borders: left medium, top medium, bottom medium'),\n 'headline': xlwt.easyxf('font: bold on, height 400; alignment: horiz centre, vert centre, wrap on', num_format_str=\"0.0\"),\n 'course': xlwt.easyxf('alignment: horiz centre, wrap on, rota 90; borders: left medium, top medium'),\n 'course_unfinished': xlwt.easyxf('alignment: horiz centre, wrap on, rota 90; borders: left medium, top medium; font: italic on'),\n 'total_voters': xlwt.easyxf('alignment: horiz centre; borders: left medium, bottom medium, right medium'),\n 'bold': xlwt.easyxf('font: bold on'),\n 'border_left': xlwt.easyxf('borders: left medium'),\n 'border_right': xlwt.easyxf('borders: right medium'),\n 'border_top_bottom_right': xlwt.easyxf('borders: top medium, bottom medium, right medium')}\n\n\n\n grade_base_style = 'pattern: pattern solid, fore_colour {}; alignment: horiz centre; font: bold on; borders: left medium'\n for i in range(0, self.NUM_GRADE_COLORS):\n grade = 1 + i*self.STEP\n color = get_grade_color(grade)\n palette_index = self.CUSTOM_COLOR_START + i\n style_name = self.grade_to_style(grade)\n self.create_style(workbook, grade_base_style, style_name, palette_index, color)\n\n deviation_base_style = 'pattern: pattern solid, fore_colour {}; alignment: horiz centre; borders: right medium'\n for i in range(0, self.NUM_DEVIATION_COLORS):\n deviation = i * self.STEP\n color = get_deviation_color(deviation)\n palette_index = self.CUSTOM_COLOR_START + self.NUM_GRADE_COLORS + i\n style_name = self.deviation_to_style(deviation)\n self.create_style(workbook, deviation_base_style, style_name, palette_index, color)\n\n\n def grade_to_style(self, grade):\n return 'grade_' + str(self.normalize_number(grade))\n\n def deviation_to_style(self, deviation):\n return 'deviation_' + str(self.normalize_number(deviation))\n\n def export(self, response, ignore_not_enough_answers=False):\n courses_with_results = list()\n for course in self.semester.course_set.filter(state=\"published\").all():\n if course.is_single_result():\n continue\n results = OrderedDict()\n for questionnaire, contributor, label, data, section_warning in calculate_results(course):\n results.setdefault(questionnaire.id, []).extend(data)\n courses_with_results.append((course, results))\n\n courses_with_results.sort(key=lambda cr: cr[0].type)\n\n qn_frequencies = defaultdict(int)\n for course, results in courses_with_results:\n for questionnaire, results in results.items():\n qn_frequencies[questionnaire] += 1\n\n qn_relevant = list(qn_frequencies.items())\n qn_relevant.sort(key=lambda t: -t[1])\n\n questionnaires = [Questionnaire.objects.get(id=t[0]) for t in qn_relevant]\n\n self.workbook = xlwt.Workbook()\n self.sheet = self.workbook.add_sheet(_(\"Results\"))\n self.row = 0\n self.col = 0\n\n\n self.init_styles(self.workbook)\n\n writec(self, _(\"Evaluation {0} - created on {1}\").format(self.semester.name, datetime.date.today()), \"headline\")\n for course, results in courses_with_results:\n if course.state == \"published\":\n writec(self, course.name, \"course\", cols=2)\n else:\n writec(self, course.name, \"course_unfinished\", cols=2)\n\n writen(self)\n for course, results in courses_with_results:\n writec(self, \"Average\", \"avg\")\n writec(self, \"Deviation\", \"border_top_bottom_right\")\n\n for questionnaire in questionnaires:\n writen(self, questionnaire.name, \"bold\")\n for course, results in courses_with_results:\n self.write_two_empty_cells_with_borders()\n\n for question in questionnaire.question_set.all():\n if question.is_text_question:\n continue\n\n writen(self, question.text)\n\n for course, results in courses_with_results:\n qn_results = results.get(questionnaire.id, None)\n if qn_results:\n values = []\n deviations = []\n for grade_result in qn_results:\n if grade_result.question.id == question.id:\n if grade_result.average:\n values.append(grade_result.average)\n deviations.append(grade_result.deviation)\n break\n enough_answers = course.can_publish_grades\n if values and (enough_answers or ignore_not_enough_answers):\n avg = sum(values) / len(values)\n writec(self, avg, self.grade_to_style(avg))\n\n dev = sum(deviations) / len(deviations)\n writec(self, dev, self.deviation_to_style(dev))\n else:\n self.write_two_empty_cells_with_borders()\n else:\n self.write_two_empty_cells_with_borders()\n writen(self, None)\n for course, results in courses_with_results:\n self.write_two_empty_cells_with_borders()\n\n writen(self, _(\"Overall Average Grade\"), \"bold\")\n for course, results in courses_with_results:\n avg, dev = calculate_average_grades_and_deviation(course)\n if avg:\n writec(self, avg, self.grade_to_style(avg), cols=2)\n else:\n self.write_two_empty_cells_with_borders()\n\n writen(self, _(\"Overall Average Standard Deviation\"), \"bold\")\n for course, results in courses_with_results:\n avg, dev = calculate_average_grades_and_deviation(course)\n if dev is not None:\n writec(self, dev, self.deviation_to_style(dev), cols=2)\n else:\n self.write_two_empty_cells_with_borders()\n\n writen(self, _(\"Total Voters/Total Participants\"), \"bold\")\n for course, results in courses_with_results:\n percent_participants = float(course.num_voters)/float(course.num_participants) if course.num_participants > 0 else 0\n writec(self, \"{}/{} ({:.0%})\".format(course.num_voters, course.num_participants, percent_participants), \"total_voters\", cols=2)\n\n self.workbook.save(response)\n\n\n def write_two_empty_cells_with_borders(self):\n writec(self, None, \"border_left\")\n writec(self, None, \"border_right\")\n\n\ndef writen(exporter, label=\"\", style_name=\"default\"):\n \"\"\"Write the cell at the beginning of the next row.\"\"\"\n exporter.col = 0\n exporter.row += 1\n writec(exporter, label, style_name)\n\ndef writec(exporter, label, style_name, rows=1, cols=1):\n \"\"\"Write the cell in the next column of the current line.\"\"\"\n _write(exporter, label, exporter.styles[style_name], rows, cols)\n exporter.col += 1\n\ndef _write(exporter, label, style, rows, cols):\n if rows > 1 or cols > 1:\n exporter.sheet.write_merge(exporter.row, exporter.row+rows-1, exporter.col, exporter.col+cols-1, label, style)\n exporter.col += cols - 1\n else:\n exporter.sheet.write(exporter.row, exporter.col, label, style)\n", "path": "evap/results/exporters.py"}], "after_files": [{"content": "from evap.evaluation.models import Questionnaire\nfrom evap.evaluation.tools import calculate_results, calculate_average_grades_and_deviation, get_grade_color, get_deviation_color\n\nfrom django.utils.translation import ugettext as _\n\nfrom collections import OrderedDict\nfrom collections import defaultdict\nimport datetime\nimport xlwt\n\n\nclass ExcelExporter(object):\n\n def __init__(self, semester):\n self.semester = semester\n self.styles = dict()\n\n self.CUSTOM_COLOR_START = 8\n self.NUM_GRADE_COLORS = 21 # 1.0 to 5.0 in 0.2 steps\n self.NUM_DEVIATION_COLORS = 13 # 0.0 to 2.4 in 0.2 steps\n self.STEP = 0.2 # we only have a limited number of custom colors\n\n def normalize_number(self, number):\n \"\"\" floors 'number' to a multiply of self.STEP \"\"\"\n rounded_number = round(number, 1) # see #302\n return round(int(rounded_number / self.STEP + 0.0001) * self.STEP, 1)\n\n def create_style(self, workbook, base_style, style_name, palette_index, color):\n color_name = style_name + \"_color\"\n xlwt.add_palette_colour(color_name, palette_index)\n workbook.set_colour_RGB(palette_index, *color)\n self.styles[style_name] = xlwt.easyxf(base_style.format(color_name), num_format_str=\"0.0\")\n\n def init_styles(self, workbook):\n self.styles = {\n 'default': xlwt.Style.default_style,\n 'avg': xlwt.easyxf('alignment: horiz centre; font: bold on; borders: left medium, top medium, bottom medium'),\n 'headline': xlwt.easyxf('font: bold on, height 400; alignment: horiz centre, vert centre, wrap on', num_format_str=\"0.0\"),\n 'course': xlwt.easyxf('alignment: horiz centre, wrap on, rota 90; borders: left medium, top medium'),\n 'course_unfinished': xlwt.easyxf('alignment: horiz centre, wrap on, rota 90; borders: left medium, top medium; font: italic on'),\n 'total_voters': xlwt.easyxf('alignment: horiz centre; borders: left medium, bottom medium, right medium'),\n 'bold': xlwt.easyxf('font: bold on'),\n 'border_left': xlwt.easyxf('borders: left medium'),\n 'border_right': xlwt.easyxf('borders: right medium'),\n 'border_top_bottom_right': xlwt.easyxf('borders: top medium, bottom medium, right medium')}\n\n\n\n grade_base_style = 'pattern: pattern solid, fore_colour {}; alignment: horiz centre; font: bold on; borders: left medium'\n for i in range(0, self.NUM_GRADE_COLORS):\n grade = 1 + i*self.STEP\n color = get_grade_color(grade)\n palette_index = self.CUSTOM_COLOR_START + i\n style_name = self.grade_to_style(grade)\n self.create_style(workbook, grade_base_style, style_name, palette_index, color)\n\n deviation_base_style = 'pattern: pattern solid, fore_colour {}; alignment: horiz centre; borders: right medium'\n for i in range(0, self.NUM_DEVIATION_COLORS):\n deviation = i * self.STEP\n color = get_deviation_color(deviation)\n palette_index = self.CUSTOM_COLOR_START + self.NUM_GRADE_COLORS + i\n style_name = self.deviation_to_style(deviation)\n self.create_style(workbook, deviation_base_style, style_name, palette_index, color)\n\n\n def grade_to_style(self, grade):\n return 'grade_' + str(self.normalize_number(grade))\n\n def deviation_to_style(self, deviation):\n return 'deviation_' + str(self.normalize_number(deviation))\n\n def export(self, response, ignore_not_enough_answers=False):\n courses_with_results = list()\n for course in self.semester.course_set.filter(state=\"published\").all():\n if course.is_single_result():\n continue\n results = OrderedDict()\n for questionnaire, contributor, label, data, section_warning in calculate_results(course):\n results.setdefault(questionnaire.id, []).extend(data)\n courses_with_results.append((course, results))\n\n courses_with_results.sort(key=lambda cr: cr[0].type)\n\n qn_frequencies = defaultdict(int)\n for course, results in courses_with_results:\n for questionnaire, results in results.items():\n qn_frequencies[questionnaire] += 1\n\n qn_relevant = list(qn_frequencies.items())\n qn_relevant.sort(key=lambda t: -t[1])\n\n questionnaires = [Questionnaire.objects.get(id=t[0]) for t in qn_relevant]\n\n self.workbook = xlwt.Workbook()\n self.sheet = self.workbook.add_sheet(_(\"Results\"))\n self.row = 0\n self.col = 0\n\n\n self.init_styles(self.workbook)\n\n writec(self, _(\"Evaluation {0} - created on {1}\").format(self.semester.name, datetime.date.today()), \"headline\")\n for course, results in courses_with_results:\n if course.state == \"published\":\n writec(self, course.name, \"course\", cols=2)\n else:\n writec(self, course.name, \"course_unfinished\", cols=2)\n\n writen(self)\n for course, results in courses_with_results:\n writec(self, \"Average\", \"avg\")\n writec(self, \"Deviation\", \"border_top_bottom_right\")\n\n for questionnaire in questionnaires:\n writen(self, questionnaire.name, \"bold\")\n for course, results in courses_with_results:\n self.write_two_empty_cells_with_borders()\n\n for question in questionnaire.question_set.all():\n if question.is_text_question:\n continue\n\n writen(self, question.text)\n\n for course, results in courses_with_results:\n qn_results = results.get(questionnaire.id, None)\n if qn_results:\n values = []\n deviations = []\n for grade_result in qn_results:\n if grade_result.question.id == question.id:\n if grade_result.average:\n values.append(grade_result.average)\n deviations.append(grade_result.deviation)\n enough_answers = course.can_publish_grades\n if values and (enough_answers or ignore_not_enough_answers):\n avg = sum(values) / len(values)\n writec(self, avg, self.grade_to_style(avg))\n\n dev = sum(deviations) / len(deviations)\n writec(self, dev, self.deviation_to_style(dev))\n else:\n self.write_two_empty_cells_with_borders()\n else:\n self.write_two_empty_cells_with_borders()\n writen(self, None)\n for course, results in courses_with_results:\n self.write_two_empty_cells_with_borders()\n\n writen(self, _(\"Overall Average Grade\"), \"bold\")\n for course, results in courses_with_results:\n avg, dev = calculate_average_grades_and_deviation(course)\n if avg:\n writec(self, avg, self.grade_to_style(avg), cols=2)\n else:\n self.write_two_empty_cells_with_borders()\n\n writen(self, _(\"Overall Average Standard Deviation\"), \"bold\")\n for course, results in courses_with_results:\n avg, dev = calculate_average_grades_and_deviation(course)\n if dev is not None:\n writec(self, dev, self.deviation_to_style(dev), cols=2)\n else:\n self.write_two_empty_cells_with_borders()\n\n writen(self, _(\"Total Voters/Total Participants\"), \"bold\")\n for course, results in courses_with_results:\n percent_participants = float(course.num_voters)/float(course.num_participants) if course.num_participants > 0 else 0\n writec(self, \"{}/{} ({:.0%})\".format(course.num_voters, course.num_participants, percent_participants), \"total_voters\", cols=2)\n\n self.workbook.save(response)\n\n\n def write_two_empty_cells_with_borders(self):\n writec(self, None, \"border_left\")\n writec(self, None, \"border_right\")\n\n\ndef writen(exporter, label=\"\", style_name=\"default\"):\n \"\"\"Write the cell at the beginning of the next row.\"\"\"\n exporter.col = 0\n exporter.row += 1\n writec(exporter, label, style_name)\n\ndef writec(exporter, label, style_name, rows=1, cols=1):\n \"\"\"Write the cell in the next column of the current line.\"\"\"\n _write(exporter, label, exporter.styles[style_name], rows, cols)\n exporter.col += 1\n\ndef _write(exporter, label, style, rows, cols):\n if rows > 1 or cols > 1:\n exporter.sheet.write_merge(exporter.row, exporter.row+rows-1, exporter.col, exporter.col+cols-1, label, style)\n exporter.col += cols - 1\n else:\n exporter.sheet.write(exporter.row, exporter.col, label, style)\n", "path": "evap/results/exporters.py"}]} | 2,784 | 116 |
gh_patches_debug_6122 | rasdani/github-patches | git_diff | huggingface__accelerate-2730 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Logging does not respect the "%(lineno)s" and "%(filename)s".
### System Info
```Shell
Copy-and-paste the text below in your GitHub issue
- `Accelerate` version: 0.24.1
- Platform: Linux-6.2.0-35-generic-x86_64-with-glibc2.35
- Python version: 3.11.5
- Numpy version: 1.26.1
- PyTorch version (GPU?): 2.1.0+cu121 (True)
- PyTorch XPU available: False
- PyTorch NPU available: False
- System RAM: 125.48 GB
- GPU type: NVIDIA GeForce RTX 3090
- `Accelerate` default config:
- compute_environment: LOCAL_MACHINE
- distributed_type: NO
- mixed_precision: bf16
- use_cpu: False
- debug: False
- num_processes: 1
- machine_rank: 0
- num_machines: 1
- gpu_ids: all
- rdzv_backend: static
- same_network: True
- main_training_function: main
- downcast_bf16: no
- tpu_use_cluster: False
- tpu_use_sudo: False
- tpu_env: []
```
### Information
- [ ] The official example scripts
- [X] My own modified scripts
### Tasks
- [ ] One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`)
- [X] My own task or dataset (give details below)
### Reproduction
When using `accelerate.logging.get_logger` to log, all of the logs says it came from `logging.py` at line 60.
Interestingly, it respects the current module's name correctly.
```python
import logging
from accelerate.logging import get_logger
from rich.logging import RichHandler
logger = get_logger(__name__)
if __name__ == "__main__":
logging.basicConfig(
format="%(name)s: %(lineno)s - %(message)s",
datefmt="%m/%d %H:%M:%S",
level=logging.INFO,
handlers=[RichHandler(show_time=True, show_level=True, show_path=True)],
)
logger.info("Test")
```
The output looks something like this.
```
11/02 16:45:19 INFO __main__: 60 - Test logging.py:60
```
### Expected behavior
The line number and filename should respect where the `logger.info()` is called.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/accelerate/logging.py`
Content:
```
1 # Copyright 2022 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import functools
16 import logging
17 import os
18
19 from .state import PartialState
20
21
22 class MultiProcessAdapter(logging.LoggerAdapter):
23 """
24 An adapter to assist with logging in multiprocess.
25
26 `log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes
27 or only the main executed one. Default is `main_process_only=True`.
28
29 Does not require an `Accelerator` object to be created first.
30 """
31
32 @staticmethod
33 def _should_log(main_process_only):
34 "Check if log should be performed"
35 state = PartialState()
36 return not main_process_only or (main_process_only and state.is_main_process)
37
38 def log(self, level, msg, *args, **kwargs):
39 """
40 Delegates logger call after checking if we should log.
41
42 Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes
43 or only the main executed one. Default is `True` if not passed
44
45 Also accepts "in_order", which if `True` makes the processes log one by one, in order. This is much easier to
46 read, but comes at the cost of sometimes needing to wait for the other processes. Default is `False` to not
47 break with the previous behavior.
48
49 `in_order` is ignored if `main_process_only` is passed.
50 """
51 if PartialState._shared_state == {}:
52 raise RuntimeError(
53 "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility."
54 )
55 main_process_only = kwargs.pop("main_process_only", True)
56 in_order = kwargs.pop("in_order", False)
57
58 if self.isEnabledFor(level):
59 if self._should_log(main_process_only):
60 msg, kwargs = self.process(msg, kwargs)
61 self.logger.log(level, msg, *args, **kwargs)
62
63 elif in_order:
64 state = PartialState()
65 for i in range(state.num_processes):
66 if i == state.process_index:
67 msg, kwargs = self.process(msg, kwargs)
68 self.logger.log(level, msg, *args, **kwargs)
69 state.wait_for_everyone()
70
71 @functools.lru_cache(None)
72 def warning_once(self, *args, **kwargs):
73 """
74 This method is identical to `logger.warning()`, but will emit the warning with the same message only once
75
76 Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the
77 cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to
78 switch to another type of cache that includes the caller frame information in the hashing function.
79 """
80 self.warning(*args, **kwargs)
81
82
83 def get_logger(name: str, log_level: str = None):
84 """
85 Returns a `logging.Logger` for `name` that can handle multiprocessing.
86
87 If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all
88 processes and in order, also pass `in_order=True`
89
90 Args:
91 name (`str`):
92 The name for the logger, such as `__file__`
93 log_level (`str`, *optional*):
94 The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not
95
96 Example:
97
98 ```python
99 >>> from accelerate.logging import get_logger
100 >>> from accelerate import Accelerator
101
102 >>> logger = get_logger(__name__)
103
104 >>> accelerator = Accelerator()
105 >>> logger.info("My log", main_process_only=False)
106 >>> logger.debug("My log", main_process_only=True)
107
108 >>> logger = get_logger(__name__, log_level="DEBUG")
109 >>> logger.info("My log")
110 >>> logger.debug("My second log")
111
112 >>> array = ["a", "b", "c", "d"]
113 >>> letter_at_rank = array[accelerator.process_index]
114 >>> logger.info(letter_at_rank, in_order=True)
115 ```
116 """
117 if log_level is None:
118 log_level = os.environ.get("ACCELERATE_LOG_LEVEL", None)
119 logger = logging.getLogger(name)
120 if log_level is not None:
121 logger.setLevel(log_level.upper())
122 logger.root.setLevel(log_level.upper())
123 return MultiProcessAdapter(logger, {})
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/accelerate/logging.py b/src/accelerate/logging.py
--- a/src/accelerate/logging.py
+++ b/src/accelerate/logging.py
@@ -54,6 +54,8 @@
)
main_process_only = kwargs.pop("main_process_only", True)
in_order = kwargs.pop("in_order", False)
+ # set `stacklevel` to exclude ourself in `Logger.findCaller()` while respecting user's choice
+ kwargs.setdefault("stacklevel", 2)
if self.isEnabledFor(level):
if self._should_log(main_process_only):
| {"golden_diff": "diff --git a/src/accelerate/logging.py b/src/accelerate/logging.py\n--- a/src/accelerate/logging.py\n+++ b/src/accelerate/logging.py\n@@ -54,6 +54,8 @@\n )\n main_process_only = kwargs.pop(\"main_process_only\", True)\n in_order = kwargs.pop(\"in_order\", False)\n+ # set `stacklevel` to exclude ourself in `Logger.findCaller()` while respecting user's choice\n+ kwargs.setdefault(\"stacklevel\", 2)\n \n if self.isEnabledFor(level):\n if self._should_log(main_process_only):\n", "issue": "Logging does not respect the \"%(lineno)s\" and \"%(filename)s\".\n### System Info\n\n```Shell\nCopy-and-paste the text below in your GitHub issue\r\n\r\n- `Accelerate` version: 0.24.1\r\n- Platform: Linux-6.2.0-35-generic-x86_64-with-glibc2.35\r\n- Python version: 3.11.5\r\n- Numpy version: 1.26.1\r\n- PyTorch version (GPU?): 2.1.0+cu121 (True)\r\n- PyTorch XPU available: False\r\n- PyTorch NPU available: False\r\n- System RAM: 125.48 GB\r\n- GPU type: NVIDIA GeForce RTX 3090\r\n- `Accelerate` default config:\r\n - compute_environment: LOCAL_MACHINE\r\n - distributed_type: NO\r\n - mixed_precision: bf16\r\n - use_cpu: False\r\n - debug: False\r\n - num_processes: 1\r\n - machine_rank: 0\r\n - num_machines: 1\r\n - gpu_ids: all\r\n - rdzv_backend: static\r\n - same_network: True\r\n - main_training_function: main\r\n - downcast_bf16: no\r\n - tpu_use_cluster: False\r\n - tpu_use_sudo: False\r\n - tpu_env: []\n```\n\n\n### Information\n\n- [ ] The official example scripts\n- [X] My own modified scripts\n\n### Tasks\n\n- [ ] One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`)\n- [X] My own task or dataset (give details below)\n\n### Reproduction\n\nWhen using `accelerate.logging.get_logger` to log, all of the logs says it came from `logging.py` at line 60.\r\n\r\nInterestingly, it respects the current module's name correctly.\r\n\r\n```python\r\nimport logging\r\n\r\nfrom accelerate.logging import get_logger\r\nfrom rich.logging import RichHandler\r\n\r\nlogger = get_logger(__name__)\r\n\r\nif __name__ == \"__main__\":\r\n logging.basicConfig(\r\n format=\"%(name)s: %(lineno)s - %(message)s\",\r\n datefmt=\"%m/%d %H:%M:%S\",\r\n level=logging.INFO,\r\n handlers=[RichHandler(show_time=True, show_level=True, show_path=True)],\r\n )\r\n logger.info(\"Test\")\r\n```\r\n\r\nThe output looks something like this.\r\n\r\n```\r\n11/02 16:45:19 INFO __main__: 60 - Test logging.py:60\r\n```\n\n### Expected behavior\n\nThe line number and filename should respect where the `logger.info()` is called.\n", "before_files": [{"content": "# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nimport logging\nimport os\n\nfrom .state import PartialState\n\n\nclass MultiProcessAdapter(logging.LoggerAdapter):\n \"\"\"\n An adapter to assist with logging in multiprocess.\n\n `log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes\n or only the main executed one. Default is `main_process_only=True`.\n\n Does not require an `Accelerator` object to be created first.\n \"\"\"\n\n @staticmethod\n def _should_log(main_process_only):\n \"Check if log should be performed\"\n state = PartialState()\n return not main_process_only or (main_process_only and state.is_main_process)\n\n def log(self, level, msg, *args, **kwargs):\n \"\"\"\n Delegates logger call after checking if we should log.\n\n Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes\n or only the main executed one. Default is `True` if not passed\n\n Also accepts \"in_order\", which if `True` makes the processes log one by one, in order. This is much easier to\n read, but comes at the cost of sometimes needing to wait for the other processes. Default is `False` to not\n break with the previous behavior.\n\n `in_order` is ignored if `main_process_only` is passed.\n \"\"\"\n if PartialState._shared_state == {}:\n raise RuntimeError(\n \"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.\"\n )\n main_process_only = kwargs.pop(\"main_process_only\", True)\n in_order = kwargs.pop(\"in_order\", False)\n\n if self.isEnabledFor(level):\n if self._should_log(main_process_only):\n msg, kwargs = self.process(msg, kwargs)\n self.logger.log(level, msg, *args, **kwargs)\n\n elif in_order:\n state = PartialState()\n for i in range(state.num_processes):\n if i == state.process_index:\n msg, kwargs = self.process(msg, kwargs)\n self.logger.log(level, msg, *args, **kwargs)\n state.wait_for_everyone()\n\n @functools.lru_cache(None)\n def warning_once(self, *args, **kwargs):\n \"\"\"\n This method is identical to `logger.warning()`, but will emit the warning with the same message only once\n\n Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the\n cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to\n switch to another type of cache that includes the caller frame information in the hashing function.\n \"\"\"\n self.warning(*args, **kwargs)\n\n\ndef get_logger(name: str, log_level: str = None):\n \"\"\"\n Returns a `logging.Logger` for `name` that can handle multiprocessing.\n\n If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all\n processes and in order, also pass `in_order=True`\n\n Args:\n name (`str`):\n The name for the logger, such as `__file__`\n log_level (`str`, *optional*):\n The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not\n\n Example:\n\n ```python\n >>> from accelerate.logging import get_logger\n >>> from accelerate import Accelerator\n\n >>> logger = get_logger(__name__)\n\n >>> accelerator = Accelerator()\n >>> logger.info(\"My log\", main_process_only=False)\n >>> logger.debug(\"My log\", main_process_only=True)\n\n >>> logger = get_logger(__name__, log_level=\"DEBUG\")\n >>> logger.info(\"My log\")\n >>> logger.debug(\"My second log\")\n\n >>> array = [\"a\", \"b\", \"c\", \"d\"]\n >>> letter_at_rank = array[accelerator.process_index]\n >>> logger.info(letter_at_rank, in_order=True)\n ```\n \"\"\"\n if log_level is None:\n log_level = os.environ.get(\"ACCELERATE_LOG_LEVEL\", None)\n logger = logging.getLogger(name)\n if log_level is not None:\n logger.setLevel(log_level.upper())\n logger.root.setLevel(log_level.upper())\n return MultiProcessAdapter(logger, {})\n", "path": "src/accelerate/logging.py"}], "after_files": [{"content": "# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nimport logging\nimport os\n\nfrom .state import PartialState\n\n\nclass MultiProcessAdapter(logging.LoggerAdapter):\n \"\"\"\n An adapter to assist with logging in multiprocess.\n\n `log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes\n or only the main executed one. Default is `main_process_only=True`.\n\n Does not require an `Accelerator` object to be created first.\n \"\"\"\n\n @staticmethod\n def _should_log(main_process_only):\n \"Check if log should be performed\"\n state = PartialState()\n return not main_process_only or (main_process_only and state.is_main_process)\n\n def log(self, level, msg, *args, **kwargs):\n \"\"\"\n Delegates logger call after checking if we should log.\n\n Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes\n or only the main executed one. Default is `True` if not passed\n\n Also accepts \"in_order\", which if `True` makes the processes log one by one, in order. This is much easier to\n read, but comes at the cost of sometimes needing to wait for the other processes. Default is `False` to not\n break with the previous behavior.\n\n `in_order` is ignored if `main_process_only` is passed.\n \"\"\"\n if PartialState._shared_state == {}:\n raise RuntimeError(\n \"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.\"\n )\n main_process_only = kwargs.pop(\"main_process_only\", True)\n in_order = kwargs.pop(\"in_order\", False)\n # set `stacklevel` to exclude ourself in `Logger.findCaller()` while respecting user's choice\n kwargs.setdefault(\"stacklevel\", 2)\n\n if self.isEnabledFor(level):\n if self._should_log(main_process_only):\n msg, kwargs = self.process(msg, kwargs)\n self.logger.log(level, msg, *args, **kwargs)\n\n elif in_order:\n state = PartialState()\n for i in range(state.num_processes):\n if i == state.process_index:\n msg, kwargs = self.process(msg, kwargs)\n self.logger.log(level, msg, *args, **kwargs)\n state.wait_for_everyone()\n\n @functools.lru_cache(None)\n def warning_once(self, *args, **kwargs):\n \"\"\"\n This method is identical to `logger.warning()`, but will emit the warning with the same message only once\n\n Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the\n cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to\n switch to another type of cache that includes the caller frame information in the hashing function.\n \"\"\"\n self.warning(*args, **kwargs)\n\n\ndef get_logger(name: str, log_level: str = None):\n \"\"\"\n Returns a `logging.Logger` for `name` that can handle multiprocessing.\n\n If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all\n processes and in order, also pass `in_order=True`\n\n Args:\n name (`str`):\n The name for the logger, such as `__file__`\n log_level (`str`, *optional*):\n The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not\n\n Example:\n\n ```python\n >>> from accelerate.logging import get_logger\n >>> from accelerate import Accelerator\n\n >>> logger = get_logger(__name__)\n\n >>> accelerator = Accelerator()\n >>> logger.info(\"My log\", main_process_only=False)\n >>> logger.debug(\"My log\", main_process_only=True)\n\n >>> logger = get_logger(__name__, log_level=\"DEBUG\")\n >>> logger.info(\"My log\")\n >>> logger.debug(\"My second log\")\n\n >>> array = [\"a\", \"b\", \"c\", \"d\"]\n >>> letter_at_rank = array[accelerator.process_index]\n >>> logger.info(letter_at_rank, in_order=True)\n ```\n \"\"\"\n if log_level is None:\n log_level = os.environ.get(\"ACCELERATE_LOG_LEVEL\", None)\n logger = logging.getLogger(name)\n if log_level is not None:\n logger.setLevel(log_level.upper())\n logger.root.setLevel(log_level.upper())\n return MultiProcessAdapter(logger, {})\n", "path": "src/accelerate/logging.py"}]} | 2,220 | 129 |
gh_patches_debug_59305 | rasdani/github-patches | git_diff | LMFDB__lmfdb-2961 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Favicon color should be updated
If we are changing the color scheme, the background color of the favicon (small image that appears on a tab) should be updated to match.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lmfdb/utils/config.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # LMFDB - L-function and Modular Forms Database web-site - www.lmfdb.org
3 # Copyright (C) 2010-2012 by the LMFDB authors
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Library General Public
7 # License as published by the Free Software Foundation; either
8 # version 2 of the License, or (at your option) any later version.
9
10 """
11 This file must not depend on other files from this project.
12 It's purpose is to parse a config file (create a default one if none
13 is present) and replace values stored within it with those given
14 via optional command-line arguments.
15 """
16 import argparse
17 import sys
18 import os
19
20 class Configuration(object):
21
22 def __init__(self, writeargstofile = False):
23 default_config_file = "config.ini"
24 root_lmfdb_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..'))
25 if root_lmfdb_path != os.path.abspath(os.getcwd()):
26 default_config_file = os.path.relpath(os.path.join(root_lmfdb_path, default_config_file),os.getcwd())
27
28 # 1: parsing command-line arguments
29 parser = argparse.ArgumentParser(description = 'LMFDB - The L-functions and modular forms database')
30 parser.add_argument('-c', '--config-file',
31 dest = "config_file",
32 metavar = "FILE",
33 help = 'configuration file [default: %(default)s]',
34 default = default_config_file)
35
36 parser.add_argument(
37 '-d', '--debug',
38 action = "store_true",
39 dest = 'core_debug',
40 help = 'enable debug mode')
41
42 parser.add_argument(
43 '--color',
44 dest = 'core_color',
45 metavar = "COLOR",
46 help = 'color template (see lmfdb/utils/color.py)',
47 default = 0,
48 type = int)
49
50 parser.add_argument('-p','--port',
51 dest = 'web_port',
52 metavar = 'PORT',
53 help = 'the LMFDB server will be running on PORT [default: %(default)d]',
54 type = int,
55 default = 37777)
56 parser.add_argument('-b', '--bind_ip',
57 dest = 'web_bindip',
58 metavar ='HOST',
59 help = 'the LMFDB server will be listening to HOST [default: %(default)s]',
60 default = '127.0.0.1')
61
62 logginggroup = parser.add_argument_group('Logging options:')
63 logginggroup.add_argument('--logfile',
64 help = 'logfile for flask [default: %(default)s]',
65 dest = 'logging_logfile',
66 metavar = 'FILE',
67 default = 'flasklog')
68
69 logginggroup.add_argument('--logfocus',
70 help = 'name of a logger to focus on',
71 default = argparse.SUPPRESS)
72
73
74 logginggroup.add_argument(
75 '--slowcutoff',
76 dest = 'logging_slowcutoff',
77 metavar = "SLOWCUTOFF",
78 help = 'threshold to log slow queries [default: %(default)s]',
79 default = 0.1,
80 type = float)
81
82 logginggroup.add_argument('--slowlogfile',
83 help = 'logfile for slow queries [default: %(default)s]',
84 dest = 'logging_slowlogfile',
85 metavar = 'FILE',
86 default = 'slow_queries.log')
87
88
89 # PostgresSQL options
90 postgresqlgroup = parser.add_argument_group('PostgreSQL options')
91 postgresqlgroup.add_argument('--postgresql-host',
92 dest = 'postgresql_host',
93 metavar = 'HOST',
94 help = 'PostgreSQL server host or socket directory [default: %(default)s]',
95 default = 'devmirror.lmfdb.xyz')
96 postgresqlgroup.add_argument('--postgresql-port',
97 dest = 'postgresql_port',
98 metavar = 'PORT',
99 type = int,
100 help = 'PostgreSQL server port [default: %(default)d]',
101 default = 5432)
102
103 postgresqlgroup.add_argument('--postgresql-user',
104 dest = 'postgresql_user',
105 metavar = 'USER',
106 help = 'PostgreSQL username [default: %(default)s]',
107 default = "lmfdb")
108
109 postgresqlgroup.add_argument('--postgresql-pass',
110 dest = 'postgresql_password',
111 metavar = 'PASS',
112 help = 'PostgreSQL password [default: %(default)s]',
113 default = "lmfdb")
114
115 # undocumented options
116 parser.add_argument('--enable-profiler',
117 dest = 'profiler',
118 help=argparse.SUPPRESS,
119 action='store_true',
120 default=argparse.SUPPRESS)
121
122 # undocumented flask options
123 parser.add_argument('--enable-reloader',
124 dest='use_reloader',
125 help=argparse.SUPPRESS,
126 action='store_true',
127 default=argparse.SUPPRESS)
128
129 parser.add_argument('--disable-reloader',
130 dest='use_reloader',
131 help=argparse.SUPPRESS,
132 action='store_false',
133 default=argparse.SUPPRESS)
134
135 parser.add_argument('--enable-debugger',
136 dest='use_debugger',
137 help=argparse.SUPPRESS,
138 action = 'store_true',
139 default=argparse.SUPPRESS)
140
141 parser.add_argument('--disable-debugger',
142 dest='use_debugger',
143 help=argparse.SUPPRESS,
144 action='store_false',
145 default=argparse.SUPPRESS)
146 if os.path.split(sys.argv[0])[-1] == "start-lmfdb.py" or writeargstofile:
147 args = parser.parse_args()
148 else:
149 # only read config file
150 args = parser.parse_args([])
151 args_dict = vars(args)
152 default_arguments_dict = vars(parser.parse_args([]))
153 if writeargstofile:
154 default_arguments_dict = dict(args_dict)
155
156 del default_arguments_dict['config_file']
157
158 self.default_args = {}
159 for key, val in default_arguments_dict.iteritems():
160 sec, opt = key.split('_', 1)
161 if sec not in self.default_args:
162 self.default_args[sec] = {}
163 self.default_args[sec][opt] = str(val)
164
165
166
167 from ConfigParser import ConfigParser
168
169 # reading the config file, creating it if necessary
170 # 2/1: does config file exist?
171 if not os.path.exists(args.config_file):
172 if not writeargstofile:
173 print("Config file: %s not found, creating it with the default values" % args.config_file )
174 else:
175 print("Config file: %s not found, creating it with the passed values" % args.config_file )
176 _cfgp = ConfigParser()
177
178 # create sections
179 _cfgp.add_section('core')
180 _cfgp.add_section('web')
181 _cfgp.add_section('postgresql')
182 _cfgp.add_section('logging')
183
184
185 for sec, options in self.default_args.iteritems():
186 for opt, val in options.iteritems():
187 _cfgp.set(sec, opt, str(val))
188
189 with open(args.config_file, 'wb') as configfile:
190 _cfgp.write(configfile)
191
192 # 2/2: reading the config file
193 _cfgp = ConfigParser()
194 _cfgp.read(args.config_file)
195
196
197 # 3: override specific settings
198 def all(sep = '_'):
199 ret = {}
200 for s in _cfgp.sections():
201 for k, v in _cfgp.items(s):
202 ret['%s%s%s' % (s, sep, k)] = v
203 return ret
204
205 all_set = all()
206
207 for key, val in default_arguments_dict.iteritems():
208 # if a nondefault value was passed through command line arguments set it
209 # or if a default value was not set in the config file
210 if args_dict[key] != val or key not in all_set:
211 sec, opt = key.split('_')
212 _cfgp.set(sec, opt, str(args_dict[key]))
213
214
215 # some generic functions
216 def get(section, key):
217 return _cfgp.get(section, key)
218
219 def getint(section, key):
220 return _cfgp.getint(section, key)
221
222 def getboolean(section, key):
223 return _cfgp.getboolean(section, key)
224
225
226
227 self.flask_options = {
228 "port": getint('web', 'port'),
229 "host": get('web', 'bindip'),
230 "debug": getboolean('core', 'debug')
231 }
232 for opt in ['use_debugger', 'use_reloader', 'profiler']:
233 if opt in args_dict:
234 self.flask_options[opt] = args_dict[opt]
235
236 self.color = getint('core', 'color')
237
238 self.postgresql_options = {
239 "port": getint("postgresql", "port"),
240 "host": get("postgresql", "host"),
241 "dbname": "lmfdb"}
242
243 # optional items
244 for elt in ['user','password']:
245 if _cfgp.has_option("postgresql", elt):
246 self.postgresql_options[elt] = get("postgresql", elt)
247
248 self.logging_options = {'logfile': get('logging', 'logfile'), 'slowcutoff': float(get('logging', 'slowcutoff')), 'slowlogfile': get('logging', 'slowlogfile') }
249 if "logfocus" in args_dict:
250 self.logging_options["logfocus"] = args_dict["logfocus"]
251 if _cfgp.has_option("logging", "editor"):
252 self.logging_options["editor"] = get("logging", "editor")
253
254 def get_all(self):
255 return { 'flask_options' : self.flask_options, 'postgresql_options' : self.postgresql_options, 'logging_options' : self.logging_options}
256
257 def get_flask(self):
258 return self.flask_options
259
260 def get_color(self):
261 return self.color
262
263 def get_postgresql(self):
264 return self.postgresql_options
265
266 def get_postgresql_default(self):
267 res = dict(self.default_args["postgresql"])
268 res["port"] = int(res["port"])
269 return res
270
271 def get_logging(self):
272 return self.logging_options
273
274
275 if __name__ == '__main__':
276 Configuration(writeargstofile = True)
277
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lmfdb/utils/config.py b/lmfdb/utils/config.py
--- a/lmfdb/utils/config.py
+++ b/lmfdb/utils/config.py
@@ -44,7 +44,7 @@
dest = 'core_color',
metavar = "COLOR",
help = 'color template (see lmfdb/utils/color.py)',
- default = 0,
+ default = 19,
type = int)
parser.add_argument('-p','--port',
| {"golden_diff": "diff --git a/lmfdb/utils/config.py b/lmfdb/utils/config.py\n--- a/lmfdb/utils/config.py\n+++ b/lmfdb/utils/config.py\n@@ -44,7 +44,7 @@\n dest = 'core_color',\n metavar = \"COLOR\",\n help = 'color template (see lmfdb/utils/color.py)',\n- default = 0,\n+ default = 19,\n type = int)\n \n parser.add_argument('-p','--port',\n", "issue": "Favicon color should be updated\nIf we are changing the color scheme, the background color of the favicon (small image that appears on a tab) should be updated to match.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# LMFDB - L-function and Modular Forms Database web-site - www.lmfdb.org\n# Copyright (C) 2010-2012 by the LMFDB authors\n#\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Library General Public\n# License as published by the Free Software Foundation; either\n# version 2 of the License, or (at your option) any later version.\n\n\"\"\"\nThis file must not depend on other files from this project.\nIt's purpose is to parse a config file (create a default one if none\nis present) and replace values stored within it with those given\nvia optional command-line arguments.\n\"\"\"\nimport argparse\nimport sys\nimport os\n\nclass Configuration(object):\n\n def __init__(self, writeargstofile = False):\n default_config_file = \"config.ini\"\n root_lmfdb_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..'))\n if root_lmfdb_path != os.path.abspath(os.getcwd()):\n default_config_file = os.path.relpath(os.path.join(root_lmfdb_path, default_config_file),os.getcwd())\n\n # 1: parsing command-line arguments\n parser = argparse.ArgumentParser(description = 'LMFDB - The L-functions and modular forms database')\n parser.add_argument('-c', '--config-file',\n dest = \"config_file\",\n metavar = \"FILE\",\n help = 'configuration file [default: %(default)s]',\n default = default_config_file)\n\n parser.add_argument(\n '-d', '--debug',\n action = \"store_true\",\n dest = 'core_debug',\n help = 'enable debug mode')\n\n parser.add_argument(\n '--color',\n dest = 'core_color',\n metavar = \"COLOR\",\n help = 'color template (see lmfdb/utils/color.py)',\n default = 0,\n type = int)\n\n parser.add_argument('-p','--port',\n dest = 'web_port',\n metavar = 'PORT',\n help = 'the LMFDB server will be running on PORT [default: %(default)d]',\n type = int,\n default = 37777)\n parser.add_argument('-b', '--bind_ip',\n dest = 'web_bindip',\n metavar ='HOST',\n help = 'the LMFDB server will be listening to HOST [default: %(default)s]',\n default = '127.0.0.1')\n\n logginggroup = parser.add_argument_group('Logging options:')\n logginggroup.add_argument('--logfile',\n help = 'logfile for flask [default: %(default)s]',\n dest = 'logging_logfile',\n metavar = 'FILE',\n default = 'flasklog')\n\n logginggroup.add_argument('--logfocus',\n help = 'name of a logger to focus on',\n default = argparse.SUPPRESS)\n\n\n logginggroup.add_argument(\n '--slowcutoff',\n dest = 'logging_slowcutoff',\n metavar = \"SLOWCUTOFF\",\n help = 'threshold to log slow queries [default: %(default)s]',\n default = 0.1,\n type = float)\n\n logginggroup.add_argument('--slowlogfile',\n help = 'logfile for slow queries [default: %(default)s]',\n dest = 'logging_slowlogfile',\n metavar = 'FILE',\n default = 'slow_queries.log')\n\n\n # PostgresSQL options\n postgresqlgroup = parser.add_argument_group('PostgreSQL options')\n postgresqlgroup.add_argument('--postgresql-host',\n dest = 'postgresql_host',\n metavar = 'HOST',\n help = 'PostgreSQL server host or socket directory [default: %(default)s]',\n default = 'devmirror.lmfdb.xyz')\n postgresqlgroup.add_argument('--postgresql-port',\n dest = 'postgresql_port',\n metavar = 'PORT',\n type = int,\n help = 'PostgreSQL server port [default: %(default)d]',\n default = 5432)\n\n postgresqlgroup.add_argument('--postgresql-user',\n dest = 'postgresql_user',\n metavar = 'USER',\n help = 'PostgreSQL username [default: %(default)s]',\n default = \"lmfdb\")\n\n postgresqlgroup.add_argument('--postgresql-pass',\n dest = 'postgresql_password',\n metavar = 'PASS',\n help = 'PostgreSQL password [default: %(default)s]',\n default = \"lmfdb\")\n\n # undocumented options\n parser.add_argument('--enable-profiler',\n dest = 'profiler',\n help=argparse.SUPPRESS,\n action='store_true',\n default=argparse.SUPPRESS)\n\n # undocumented flask options\n parser.add_argument('--enable-reloader',\n dest='use_reloader',\n help=argparse.SUPPRESS,\n action='store_true',\n default=argparse.SUPPRESS)\n\n parser.add_argument('--disable-reloader',\n dest='use_reloader',\n help=argparse.SUPPRESS,\n action='store_false',\n default=argparse.SUPPRESS)\n\n parser.add_argument('--enable-debugger',\n dest='use_debugger',\n help=argparse.SUPPRESS,\n action = 'store_true',\n default=argparse.SUPPRESS)\n\n parser.add_argument('--disable-debugger',\n dest='use_debugger',\n help=argparse.SUPPRESS,\n action='store_false',\n default=argparse.SUPPRESS)\n if os.path.split(sys.argv[0])[-1] == \"start-lmfdb.py\" or writeargstofile:\n args = parser.parse_args()\n else:\n # only read config file\n args = parser.parse_args([])\n args_dict = vars(args)\n default_arguments_dict = vars(parser.parse_args([]))\n if writeargstofile:\n default_arguments_dict = dict(args_dict)\n\n del default_arguments_dict['config_file']\n\n self.default_args = {}\n for key, val in default_arguments_dict.iteritems():\n sec, opt = key.split('_', 1)\n if sec not in self.default_args:\n self.default_args[sec] = {}\n self.default_args[sec][opt] = str(val)\n\n\n\n from ConfigParser import ConfigParser\n\n # reading the config file, creating it if necessary\n # 2/1: does config file exist?\n if not os.path.exists(args.config_file):\n if not writeargstofile:\n print(\"Config file: %s not found, creating it with the default values\" % args.config_file )\n else:\n print(\"Config file: %s not found, creating it with the passed values\" % args.config_file )\n _cfgp = ConfigParser()\n\n # create sections\n _cfgp.add_section('core')\n _cfgp.add_section('web')\n _cfgp.add_section('postgresql')\n _cfgp.add_section('logging')\n\n\n for sec, options in self.default_args.iteritems():\n for opt, val in options.iteritems():\n _cfgp.set(sec, opt, str(val))\n\n with open(args.config_file, 'wb') as configfile:\n _cfgp.write(configfile)\n\n # 2/2: reading the config file\n _cfgp = ConfigParser()\n _cfgp.read(args.config_file)\n\n\n # 3: override specific settings\n def all(sep = '_'):\n ret = {}\n for s in _cfgp.sections():\n for k, v in _cfgp.items(s):\n ret['%s%s%s' % (s, sep, k)] = v\n return ret\n\n all_set = all()\n\n for key, val in default_arguments_dict.iteritems():\n # if a nondefault value was passed through command line arguments set it\n # or if a default value was not set in the config file\n if args_dict[key] != val or key not in all_set:\n sec, opt = key.split('_')\n _cfgp.set(sec, opt, str(args_dict[key]))\n\n\n # some generic functions\n def get(section, key):\n return _cfgp.get(section, key)\n\n def getint(section, key):\n return _cfgp.getint(section, key)\n\n def getboolean(section, key):\n return _cfgp.getboolean(section, key)\n\n\n\n self.flask_options = {\n \"port\": getint('web', 'port'),\n \"host\": get('web', 'bindip'),\n \"debug\": getboolean('core', 'debug')\n }\n for opt in ['use_debugger', 'use_reloader', 'profiler']:\n if opt in args_dict:\n self.flask_options[opt] = args_dict[opt]\n\n self.color = getint('core', 'color')\n\n self.postgresql_options = {\n \"port\": getint(\"postgresql\", \"port\"),\n \"host\": get(\"postgresql\", \"host\"),\n \"dbname\": \"lmfdb\"}\n\n # optional items\n for elt in ['user','password']:\n if _cfgp.has_option(\"postgresql\", elt):\n self.postgresql_options[elt] = get(\"postgresql\", elt)\n\n self.logging_options = {'logfile': get('logging', 'logfile'), 'slowcutoff': float(get('logging', 'slowcutoff')), 'slowlogfile': get('logging', 'slowlogfile') }\n if \"logfocus\" in args_dict:\n self.logging_options[\"logfocus\"] = args_dict[\"logfocus\"]\n if _cfgp.has_option(\"logging\", \"editor\"):\n self.logging_options[\"editor\"] = get(\"logging\", \"editor\")\n\n def get_all(self):\n return { 'flask_options' : self.flask_options, 'postgresql_options' : self.postgresql_options, 'logging_options' : self.logging_options}\n\n def get_flask(self):\n return self.flask_options\n\n def get_color(self):\n return self.color\n\n def get_postgresql(self):\n return self.postgresql_options\n\n def get_postgresql_default(self):\n res = dict(self.default_args[\"postgresql\"])\n res[\"port\"] = int(res[\"port\"])\n return res\n\n def get_logging(self):\n return self.logging_options\n\n\nif __name__ == '__main__':\n Configuration(writeargstofile = True)\n", "path": "lmfdb/utils/config.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# LMFDB - L-function and Modular Forms Database web-site - www.lmfdb.org\n# Copyright (C) 2010-2012 by the LMFDB authors\n#\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Library General Public\n# License as published by the Free Software Foundation; either\n# version 2 of the License, or (at your option) any later version.\n\n\"\"\"\nThis file must not depend on other files from this project.\nIt's purpose is to parse a config file (create a default one if none\nis present) and replace values stored within it with those given\nvia optional command-line arguments.\n\"\"\"\nimport argparse\nimport sys\nimport os\n\nclass Configuration(object):\n\n def __init__(self, writeargstofile = False):\n default_config_file = \"config.ini\"\n root_lmfdb_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..'))\n if root_lmfdb_path != os.path.abspath(os.getcwd()):\n default_config_file = os.path.relpath(os.path.join(root_lmfdb_path, default_config_file),os.getcwd())\n\n # 1: parsing command-line arguments\n parser = argparse.ArgumentParser(description = 'LMFDB - The L-functions and modular forms database')\n parser.add_argument('-c', '--config-file',\n dest = \"config_file\",\n metavar = \"FILE\",\n help = 'configuration file [default: %(default)s]',\n default = default_config_file)\n\n parser.add_argument(\n '-d', '--debug',\n action = \"store_true\",\n dest = 'core_debug',\n help = 'enable debug mode')\n\n parser.add_argument(\n '--color',\n dest = 'core_color',\n metavar = \"COLOR\",\n help = 'color template (see lmfdb/utils/color.py)',\n default = 19,\n type = int)\n\n parser.add_argument('-p','--port',\n dest = 'web_port',\n metavar = 'PORT',\n help = 'the LMFDB server will be running on PORT [default: %(default)d]',\n type = int,\n default = 37777)\n parser.add_argument('-b', '--bind_ip',\n dest = 'web_bindip',\n metavar ='HOST',\n help = 'the LMFDB server will be listening to HOST [default: %(default)s]',\n default = '127.0.0.1')\n\n logginggroup = parser.add_argument_group('Logging options:')\n logginggroup.add_argument('--logfile',\n help = 'logfile for flask [default: %(default)s]',\n dest = 'logging_logfile',\n metavar = 'FILE',\n default = 'flasklog')\n\n logginggroup.add_argument('--logfocus',\n help = 'name of a logger to focus on',\n default = argparse.SUPPRESS)\n\n\n logginggroup.add_argument(\n '--slowcutoff',\n dest = 'logging_slowcutoff',\n metavar = \"SLOWCUTOFF\",\n help = 'threshold to log slow queries [default: %(default)s]',\n default = 0.1,\n type = float)\n\n logginggroup.add_argument('--slowlogfile',\n help = 'logfile for slow queries [default: %(default)s]',\n dest = 'logging_slowlogfile',\n metavar = 'FILE',\n default = 'slow_queries.log')\n\n\n # PostgresSQL options\n postgresqlgroup = parser.add_argument_group('PostgreSQL options')\n postgresqlgroup.add_argument('--postgresql-host',\n dest = 'postgresql_host',\n metavar = 'HOST',\n help = 'PostgreSQL server host or socket directory [default: %(default)s]',\n default = 'devmirror.lmfdb.xyz')\n postgresqlgroup.add_argument('--postgresql-port',\n dest = 'postgresql_port',\n metavar = 'PORT',\n type = int,\n help = 'PostgreSQL server port [default: %(default)d]',\n default = 5432)\n\n postgresqlgroup.add_argument('--postgresql-user',\n dest = 'postgresql_user',\n metavar = 'USER',\n help = 'PostgreSQL username [default: %(default)s]',\n default = \"lmfdb\")\n\n postgresqlgroup.add_argument('--postgresql-pass',\n dest = 'postgresql_password',\n metavar = 'PASS',\n help = 'PostgreSQL password [default: %(default)s]',\n default = \"lmfdb\")\n\n # undocumented options\n parser.add_argument('--enable-profiler',\n dest = 'profiler',\n help=argparse.SUPPRESS,\n action='store_true',\n default=argparse.SUPPRESS)\n\n # undocumented flask options\n parser.add_argument('--enable-reloader',\n dest='use_reloader',\n help=argparse.SUPPRESS,\n action='store_true',\n default=argparse.SUPPRESS)\n\n parser.add_argument('--disable-reloader',\n dest='use_reloader',\n help=argparse.SUPPRESS,\n action='store_false',\n default=argparse.SUPPRESS)\n\n parser.add_argument('--enable-debugger',\n dest='use_debugger',\n help=argparse.SUPPRESS,\n action = 'store_true',\n default=argparse.SUPPRESS)\n\n parser.add_argument('--disable-debugger',\n dest='use_debugger',\n help=argparse.SUPPRESS,\n action='store_false',\n default=argparse.SUPPRESS)\n if os.path.split(sys.argv[0])[-1] == \"start-lmfdb.py\" or writeargstofile:\n args = parser.parse_args()\n else:\n # only read config file\n args = parser.parse_args([])\n args_dict = vars(args)\n default_arguments_dict = vars(parser.parse_args([]))\n if writeargstofile:\n default_arguments_dict = dict(args_dict)\n\n del default_arguments_dict['config_file']\n\n self.default_args = {}\n for key, val in default_arguments_dict.iteritems():\n sec, opt = key.split('_', 1)\n if sec not in self.default_args:\n self.default_args[sec] = {}\n self.default_args[sec][opt] = str(val)\n\n\n\n from ConfigParser import ConfigParser\n\n # reading the config file, creating it if necessary\n # 2/1: does config file exist?\n if not os.path.exists(args.config_file):\n if not writeargstofile:\n print(\"Config file: %s not found, creating it with the default values\" % args.config_file )\n else:\n print(\"Config file: %s not found, creating it with the passed values\" % args.config_file )\n _cfgp = ConfigParser()\n\n # create sections\n _cfgp.add_section('core')\n _cfgp.add_section('web')\n _cfgp.add_section('postgresql')\n _cfgp.add_section('logging')\n\n\n for sec, options in self.default_args.iteritems():\n for opt, val in options.iteritems():\n _cfgp.set(sec, opt, str(val))\n\n with open(args.config_file, 'wb') as configfile:\n _cfgp.write(configfile)\n\n # 2/2: reading the config file\n _cfgp = ConfigParser()\n _cfgp.read(args.config_file)\n\n\n # 3: override specific settings\n def all(sep = '_'):\n ret = {}\n for s in _cfgp.sections():\n for k, v in _cfgp.items(s):\n ret['%s%s%s' % (s, sep, k)] = v\n return ret\n\n all_set = all()\n\n for key, val in default_arguments_dict.iteritems():\n # if a nondefault value was passed through command line arguments set it\n # or if a default value was not set in the config file\n if args_dict[key] != val or key not in all_set:\n sec, opt = key.split('_')\n _cfgp.set(sec, opt, str(args_dict[key]))\n\n\n # some generic functions\n def get(section, key):\n return _cfgp.get(section, key)\n\n def getint(section, key):\n return _cfgp.getint(section, key)\n\n def getboolean(section, key):\n return _cfgp.getboolean(section, key)\n\n\n\n self.flask_options = {\n \"port\": getint('web', 'port'),\n \"host\": get('web', 'bindip'),\n \"debug\": getboolean('core', 'debug')\n }\n for opt in ['use_debugger', 'use_reloader', 'profiler']:\n if opt in args_dict:\n self.flask_options[opt] = args_dict[opt]\n\n self.color = getint('core', 'color')\n\n self.postgresql_options = {\n \"port\": getint(\"postgresql\", \"port\"),\n \"host\": get(\"postgresql\", \"host\"),\n \"dbname\": \"lmfdb\"}\n\n # optional items\n for elt in ['user','password']:\n if _cfgp.has_option(\"postgresql\", elt):\n self.postgresql_options[elt] = get(\"postgresql\", elt)\n\n self.logging_options = {'logfile': get('logging', 'logfile'), 'slowcutoff': float(get('logging', 'slowcutoff')), 'slowlogfile': get('logging', 'slowlogfile') }\n if \"logfocus\" in args_dict:\n self.logging_options[\"logfocus\"] = args_dict[\"logfocus\"]\n if _cfgp.has_option(\"logging\", \"editor\"):\n self.logging_options[\"editor\"] = get(\"logging\", \"editor\")\n\n def get_all(self):\n return { 'flask_options' : self.flask_options, 'postgresql_options' : self.postgresql_options, 'logging_options' : self.logging_options}\n\n def get_flask(self):\n return self.flask_options\n\n def get_color(self):\n return self.color\n\n def get_postgresql(self):\n return self.postgresql_options\n\n def get_postgresql_default(self):\n res = dict(self.default_args[\"postgresql\"])\n res[\"port\"] = int(res[\"port\"])\n return res\n\n def get_logging(self):\n return self.logging_options\n\n\nif __name__ == '__main__':\n Configuration(writeargstofile = True)\n", "path": "lmfdb/utils/config.py"}]} | 3,239 | 107 |
gh_patches_debug_13555 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1461 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Content: Clarify that a domain that appears "available" is not guaranteed
### Story
As an applicant
I want to understand that my requested domain is not guaranteed
so that I can set appropriate expectations for myself and my team.
### Acceptance Criteria
On the ".gov Domain" step in the application process, language appears that clarifies a requested domain is not guaranteed.
### Additional Context
During user testing, most people understood what to input, but it could’ve been more explicit that the domain they entered was not guaranteed, even if available. When prompted, most participants understood the process they would go through to get the domain, but the language could be more explicit.
### Issue Links
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/api/views.py`
Content:
```
1 """Internal API views"""
2 from django.apps import apps
3 from django.views.decorators.http import require_http_methods
4 from django.http import HttpResponse, JsonResponse
5 from django.utils.safestring import mark_safe
6
7 from registrar.templatetags.url_helpers import public_site_url
8 from registrar.utility.errors import GenericError, GenericErrorCodes
9
10 import requests
11
12 from login_required import login_not_required
13
14 from cachetools.func import ttl_cache
15
16 from registrar.utility.s3_bucket import S3ClientError, S3ClientHelper
17
18
19 DOMAIN_FILE_URL = "https://raw.githubusercontent.com/cisagov/dotgov-data/main/current-full.csv"
20
21
22 DOMAIN_API_MESSAGES = {
23 "required": "Enter the .gov domain you want. Don’t include “www” or “.gov.”"
24 " For example, if you want www.city.gov, you would enter “city”"
25 " (without the quotes).",
26 "extra_dots": "Enter the .gov domain you want without any periods.",
27 # message below is considered safe; no user input can be inserted into the message
28 # body; public_site_url() function reads from local app settings and therefore safe
29 "unavailable": mark_safe( # nosec
30 "That domain isn’t available. "
31 "<a class='usa-link' href='{}' target='_blank'>"
32 "Read more about choosing your .gov domain.</a>".format(public_site_url("domains/choosing"))
33 ),
34 "invalid": "Enter a domain using only letters, numbers, or hyphens (though we don't recommend using hyphens).",
35 "success": "That domain is available!",
36 "error": GenericError.get_error_message(GenericErrorCodes.CANNOT_CONTACT_REGISTRY),
37 }
38
39
40 # this file doesn't change that often, nor is it that big, so cache the result
41 # in memory for ten minutes
42 @ttl_cache(ttl=600)
43 def _domains():
44 """Return a list of the current .gov domains.
45
46 Fetch a file from DOMAIN_FILE_URL, parse the CSV for the domain,
47 lowercase everything and return the list.
48 """
49 DraftDomain = apps.get_model("registrar.DraftDomain")
50 # 5 second timeout
51 file_contents = requests.get(DOMAIN_FILE_URL, timeout=5).text
52 domains = set()
53 # skip the first line
54 for line in file_contents.splitlines()[1:]:
55 # get the domain before the first comma
56 domain = line.split(",", 1)[0]
57 # sanity-check the string we got from the file here
58 if DraftDomain.string_could_be_domain(domain):
59 # lowercase everything when we put it in domains
60 domains.add(domain.lower())
61 return domains
62
63
64 def check_domain_available(domain):
65 """Return true if the given domain is available.
66
67 The given domain is lowercased to match against the domains list. If the
68 given domain doesn't end with .gov, ".gov" is added when looking for
69 a match. If check fails, throws a RegistryError.
70 """
71 Domain = apps.get_model("registrar.Domain")
72 if domain.endswith(".gov"):
73 return Domain.available(domain)
74 else:
75 # domain search string doesn't end with .gov, add it on here
76 return Domain.available(domain + ".gov")
77
78
79 @require_http_methods(["GET"])
80 @login_not_required
81 def available(request, domain=""):
82 """Is a given domain available or not.
83
84 Response is a JSON dictionary with the key "available" and value true or
85 false.
86 """
87 domain = request.GET.get("domain", "")
88 DraftDomain = apps.get_model("registrar.DraftDomain")
89 # validate that the given domain could be a domain name and fail early if
90 # not.
91 if not (DraftDomain.string_could_be_domain(domain) or DraftDomain.string_could_be_domain(domain + ".gov")):
92 return JsonResponse({"available": False, "message": DOMAIN_API_MESSAGES["invalid"]})
93 # a domain is available if it is NOT in the list of current domains
94 try:
95 if check_domain_available(domain):
96 return JsonResponse({"available": True, "message": DOMAIN_API_MESSAGES["success"]})
97 else:
98 return JsonResponse({"available": False, "message": DOMAIN_API_MESSAGES["unavailable"]})
99 except Exception:
100 return JsonResponse({"available": False, "message": DOMAIN_API_MESSAGES["error"]})
101
102
103 @require_http_methods(["GET"])
104 @login_not_required
105 def get_current_full(request, file_name="current-full.csv"):
106 """This will return the file content of current-full.csv which is the command
107 output of generate_current_full_report.py. This command iterates through each Domain
108 and returns a CSV representation."""
109 return serve_file(file_name)
110
111
112 @require_http_methods(["GET"])
113 @login_not_required
114 def get_current_federal(request, file_name="current-federal.csv"):
115 """This will return the file content of current-federal.csv which is the command
116 output of generate_current_federal_report.py. This command iterates through each Domain
117 and returns a CSV representation."""
118 return serve_file(file_name)
119
120
121 def serve_file(file_name):
122 """Downloads a file based on a given filepath. Returns a 500 if not found."""
123 s3_client = S3ClientHelper()
124 # Serve the CSV file. If not found, an exception will be thrown.
125 # This will then be caught by flat, causing it to not read it - which is what we want.
126 try:
127 file = s3_client.get_file(file_name, decode_to_utf=True)
128 except S3ClientError as err:
129 # TODO - #1317: Notify operations when auto report generation fails
130 raise err
131
132 response = HttpResponse(file)
133 return response
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/api/views.py b/src/api/views.py
--- a/src/api/views.py
+++ b/src/api/views.py
@@ -32,7 +32,9 @@
"Read more about choosing your .gov domain.</a>".format(public_site_url("domains/choosing"))
),
"invalid": "Enter a domain using only letters, numbers, or hyphens (though we don't recommend using hyphens).",
- "success": "That domain is available!",
+ "success": "That domain is available! We’ll try to give you the domain you want, \
+ but it's not guaranteed. After you complete this form, we’ll \
+ evaluate whether your request meets our requirements.",
"error": GenericError.get_error_message(GenericErrorCodes.CANNOT_CONTACT_REGISTRY),
}
| {"golden_diff": "diff --git a/src/api/views.py b/src/api/views.py\n--- a/src/api/views.py\n+++ b/src/api/views.py\n@@ -32,7 +32,9 @@\n \"Read more about choosing your .gov domain.</a>\".format(public_site_url(\"domains/choosing\"))\n ),\n \"invalid\": \"Enter a domain using only letters, numbers, or hyphens (though we don't recommend using hyphens).\",\n- \"success\": \"That domain is available!\",\n+ \"success\": \"That domain is available! We\u2019ll try to give you the domain you want, \\\n+ but it's not guaranteed. After you complete this form, we\u2019ll \\\n+ evaluate whether your request meets our requirements.\",\n \"error\": GenericError.get_error_message(GenericErrorCodes.CANNOT_CONTACT_REGISTRY),\n }\n", "issue": "Content: Clarify that a domain that appears \"available\" is not guaranteed\n### Story\nAs an applicant\nI want to understand that my requested domain is not guaranteed\nso that I can set appropriate expectations for myself and my team.\n\n\n### Acceptance Criteria\nOn the \".gov Domain\" step in the application process, language appears that clarifies a requested domain is not guaranteed.\n\n### Additional Context\nDuring user testing, most people understood what to input, but it could\u2019ve been more explicit that the domain they entered was not guaranteed, even if available. When prompted, most participants understood the process they would go through to get the domain, but the language could be more explicit.\n\n### Issue Links\n_No response_\n", "before_files": [{"content": "\"\"\"Internal API views\"\"\"\nfrom django.apps import apps\nfrom django.views.decorators.http import require_http_methods\nfrom django.http import HttpResponse, JsonResponse\nfrom django.utils.safestring import mark_safe\n\nfrom registrar.templatetags.url_helpers import public_site_url\nfrom registrar.utility.errors import GenericError, GenericErrorCodes\n\nimport requests\n\nfrom login_required import login_not_required\n\nfrom cachetools.func import ttl_cache\n\nfrom registrar.utility.s3_bucket import S3ClientError, S3ClientHelper\n\n\nDOMAIN_FILE_URL = \"https://raw.githubusercontent.com/cisagov/dotgov-data/main/current-full.csv\"\n\n\nDOMAIN_API_MESSAGES = {\n \"required\": \"Enter the .gov domain you want. Don\u2019t include \u201cwww\u201d or \u201c.gov.\u201d\"\n \" For example, if you want www.city.gov, you would enter \u201ccity\u201d\"\n \" (without the quotes).\",\n \"extra_dots\": \"Enter the .gov domain you want without any periods.\",\n # message below is considered safe; no user input can be inserted into the message\n # body; public_site_url() function reads from local app settings and therefore safe\n \"unavailable\": mark_safe( # nosec\n \"That domain isn\u2019t available. \"\n \"<a class='usa-link' href='{}' target='_blank'>\"\n \"Read more about choosing your .gov domain.</a>\".format(public_site_url(\"domains/choosing\"))\n ),\n \"invalid\": \"Enter a domain using only letters, numbers, or hyphens (though we don't recommend using hyphens).\",\n \"success\": \"That domain is available!\",\n \"error\": GenericError.get_error_message(GenericErrorCodes.CANNOT_CONTACT_REGISTRY),\n}\n\n\n# this file doesn't change that often, nor is it that big, so cache the result\n# in memory for ten minutes\n@ttl_cache(ttl=600)\ndef _domains():\n \"\"\"Return a list of the current .gov domains.\n\n Fetch a file from DOMAIN_FILE_URL, parse the CSV for the domain,\n lowercase everything and return the list.\n \"\"\"\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # 5 second timeout\n file_contents = requests.get(DOMAIN_FILE_URL, timeout=5).text\n domains = set()\n # skip the first line\n for line in file_contents.splitlines()[1:]:\n # get the domain before the first comma\n domain = line.split(\",\", 1)[0]\n # sanity-check the string we got from the file here\n if DraftDomain.string_could_be_domain(domain):\n # lowercase everything when we put it in domains\n domains.add(domain.lower())\n return domains\n\n\ndef check_domain_available(domain):\n \"\"\"Return true if the given domain is available.\n\n The given domain is lowercased to match against the domains list. If the\n given domain doesn't end with .gov, \".gov\" is added when looking for\n a match. If check fails, throws a RegistryError.\n \"\"\"\n Domain = apps.get_model(\"registrar.Domain\")\n if domain.endswith(\".gov\"):\n return Domain.available(domain)\n else:\n # domain search string doesn't end with .gov, add it on here\n return Domain.available(domain + \".gov\")\n\n\n@require_http_methods([\"GET\"])\n@login_not_required\ndef available(request, domain=\"\"):\n \"\"\"Is a given domain available or not.\n\n Response is a JSON dictionary with the key \"available\" and value true or\n false.\n \"\"\"\n domain = request.GET.get(\"domain\", \"\")\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # validate that the given domain could be a domain name and fail early if\n # not.\n if not (DraftDomain.string_could_be_domain(domain) or DraftDomain.string_could_be_domain(domain + \".gov\")):\n return JsonResponse({\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"invalid\"]})\n # a domain is available if it is NOT in the list of current domains\n try:\n if check_domain_available(domain):\n return JsonResponse({\"available\": True, \"message\": DOMAIN_API_MESSAGES[\"success\"]})\n else:\n return JsonResponse({\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"unavailable\"]})\n except Exception:\n return JsonResponse({\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"error\"]})\n\n\n@require_http_methods([\"GET\"])\n@login_not_required\ndef get_current_full(request, file_name=\"current-full.csv\"):\n \"\"\"This will return the file content of current-full.csv which is the command\n output of generate_current_full_report.py. This command iterates through each Domain\n and returns a CSV representation.\"\"\"\n return serve_file(file_name)\n\n\n@require_http_methods([\"GET\"])\n@login_not_required\ndef get_current_federal(request, file_name=\"current-federal.csv\"):\n \"\"\"This will return the file content of current-federal.csv which is the command\n output of generate_current_federal_report.py. This command iterates through each Domain\n and returns a CSV representation.\"\"\"\n return serve_file(file_name)\n\n\ndef serve_file(file_name):\n \"\"\"Downloads a file based on a given filepath. Returns a 500 if not found.\"\"\"\n s3_client = S3ClientHelper()\n # Serve the CSV file. If not found, an exception will be thrown.\n # This will then be caught by flat, causing it to not read it - which is what we want.\n try:\n file = s3_client.get_file(file_name, decode_to_utf=True)\n except S3ClientError as err:\n # TODO - #1317: Notify operations when auto report generation fails\n raise err\n\n response = HttpResponse(file)\n return response\n", "path": "src/api/views.py"}], "after_files": [{"content": "\"\"\"Internal API views\"\"\"\nfrom django.apps import apps\nfrom django.views.decorators.http import require_http_methods\nfrom django.http import HttpResponse, JsonResponse\nfrom django.utils.safestring import mark_safe\n\nfrom registrar.templatetags.url_helpers import public_site_url\nfrom registrar.utility.errors import GenericError, GenericErrorCodes\n\nimport requests\n\nfrom login_required import login_not_required\n\nfrom cachetools.func import ttl_cache\n\nfrom registrar.utility.s3_bucket import S3ClientError, S3ClientHelper\n\n\nDOMAIN_FILE_URL = \"https://raw.githubusercontent.com/cisagov/dotgov-data/main/current-full.csv\"\n\n\nDOMAIN_API_MESSAGES = {\n \"required\": \"Enter the .gov domain you want. Don\u2019t include \u201cwww\u201d or \u201c.gov.\u201d\"\n \" For example, if you want www.city.gov, you would enter \u201ccity\u201d\"\n \" (without the quotes).\",\n \"extra_dots\": \"Enter the .gov domain you want without any periods.\",\n # message below is considered safe; no user input can be inserted into the message\n # body; public_site_url() function reads from local app settings and therefore safe\n \"unavailable\": mark_safe( # nosec\n \"That domain isn\u2019t available. \"\n \"<a class='usa-link' href='{}' target='_blank'>\"\n \"Read more about choosing your .gov domain.</a>\".format(public_site_url(\"domains/choosing\"))\n ),\n \"invalid\": \"Enter a domain using only letters, numbers, or hyphens (though we don't recommend using hyphens).\",\n \"success\": \"That domain is available! We\u2019ll try to give you the domain you want, \\\n but it's not guaranteed. After you complete this form, we\u2019ll \\\n evaluate whether your request meets our requirements.\",\n \"error\": GenericError.get_error_message(GenericErrorCodes.CANNOT_CONTACT_REGISTRY),\n}\n\n\n# this file doesn't change that often, nor is it that big, so cache the result\n# in memory for ten minutes\n@ttl_cache(ttl=600)\ndef _domains():\n \"\"\"Return a list of the current .gov domains.\n\n Fetch a file from DOMAIN_FILE_URL, parse the CSV for the domain,\n lowercase everything and return the list.\n \"\"\"\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # 5 second timeout\n file_contents = requests.get(DOMAIN_FILE_URL, timeout=5).text\n domains = set()\n # skip the first line\n for line in file_contents.splitlines()[1:]:\n # get the domain before the first comma\n domain = line.split(\",\", 1)[0]\n # sanity-check the string we got from the file here\n if DraftDomain.string_could_be_domain(domain):\n # lowercase everything when we put it in domains\n domains.add(domain.lower())\n return domains\n\n\ndef check_domain_available(domain):\n \"\"\"Return true if the given domain is available.\n\n The given domain is lowercased to match against the domains list. If the\n given domain doesn't end with .gov, \".gov\" is added when looking for\n a match. If check fails, throws a RegistryError.\n \"\"\"\n Domain = apps.get_model(\"registrar.Domain\")\n if domain.endswith(\".gov\"):\n return Domain.available(domain)\n else:\n # domain search string doesn't end with .gov, add it on here\n return Domain.available(domain + \".gov\")\n\n\n@require_http_methods([\"GET\"])\n@login_not_required\ndef available(request, domain=\"\"):\n \"\"\"Is a given domain available or not.\n\n Response is a JSON dictionary with the key \"available\" and value true or\n false.\n \"\"\"\n domain = request.GET.get(\"domain\", \"\")\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # validate that the given domain could be a domain name and fail early if\n # not.\n if not (DraftDomain.string_could_be_domain(domain) or DraftDomain.string_could_be_domain(domain + \".gov\")):\n return JsonResponse({\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"invalid\"]})\n # a domain is available if it is NOT in the list of current domains\n try:\n if check_domain_available(domain):\n return JsonResponse({\"available\": True, \"message\": DOMAIN_API_MESSAGES[\"success\"]})\n else:\n return JsonResponse({\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"unavailable\"]})\n except Exception:\n return JsonResponse({\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"error\"]})\n\n\n@require_http_methods([\"GET\"])\n@login_not_required\ndef get_current_full(request, file_name=\"current-full.csv\"):\n \"\"\"This will return the file content of current-full.csv which is the command\n output of generate_current_full_report.py. This command iterates through each Domain\n and returns a CSV representation.\"\"\"\n return serve_file(file_name)\n\n\n@require_http_methods([\"GET\"])\n@login_not_required\ndef get_current_federal(request, file_name=\"current-federal.csv\"):\n \"\"\"This will return the file content of current-federal.csv which is the command\n output of generate_current_federal_report.py. This command iterates through each Domain\n and returns a CSV representation.\"\"\"\n return serve_file(file_name)\n\n\ndef serve_file(file_name):\n \"\"\"Downloads a file based on a given filepath. Returns a 500 if not found.\"\"\"\n s3_client = S3ClientHelper()\n # Serve the CSV file. If not found, an exception will be thrown.\n # This will then be caught by flat, causing it to not read it - which is what we want.\n try:\n file = s3_client.get_file(file_name, decode_to_utf=True)\n except S3ClientError as err:\n # TODO - #1317: Notify operations when auto report generation fails\n raise err\n\n response = HttpResponse(file)\n return response\n", "path": "src/api/views.py"}]} | 1,925 | 175 |
gh_patches_debug_42612 | rasdani/github-patches | git_diff | common-workflow-language__cwltool-1228 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace galaxy-lib requirement with galaxy-tool-util
I'm working on Planemo to replace its dependency on galaxy-lib (which will be basically deprecated with Galaxy release 19.09) with [galaxy-tool-util](https://pypi.org/project/galaxy-tool-util/). Unfortunately CWL support in Planemo via cwltool would break unless also cwltool moves to galaxy-tool-util.
I can do the necessary changes in cwltool when I'm back from holidays, in the mean time I wanted to check that's fine with you all.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cwltool/software_requirements.py`
Content:
```
1 """This module handles resolution of SoftwareRequirement hints.
2
3 This is accomplished mainly by adapting cwltool internals to galaxy-lib's
4 concept of "dependencies". Despite the name, galaxy-lib is a light weight
5 library that can be used to map SoftwareRequirements in all sorts of ways -
6 Homebrew, Conda, custom scripts, environment modules. We'd be happy to find
7 ways to adapt new packages managers and such as well.
8 """
9 from __future__ import absolute_import
10
11 import argparse # pylint: disable=unused-import
12 import os
13 import string
14 from typing import Dict, List, MutableSequence, Optional
15
16 from typing_extensions import Text # pylint: disable=unused-import
17 # move to a regular typing import when Python 3.3-3.6 is no longer supported
18
19 from .builder import Builder, HasReqsHints
20 try:
21 from galaxy.tools.deps.requirements import ToolRequirement, ToolRequirements
22 from galaxy.tools import deps
23 except ImportError:
24 ToolRequirement = None # type: ignore
25 ToolRequirements = None # type: ignore
26 deps = None # type: ignore
27
28
29 SOFTWARE_REQUIREMENTS_ENABLED = deps is not None
30
31 COMMAND_WITH_DEPENDENCIES_TEMPLATE = string.Template("""#!/bin/bash
32 $handle_dependencies
33 python "run_job.py" "job.json"
34 """)
35
36
37 class DependenciesConfiguration(object):
38
39 def __init__(self, args):
40 # type: (argparse.Namespace) -> None
41 """Initialize."""
42 conf_file = getattr(args, "beta_dependency_resolvers_configuration", None)
43 tool_dependency_dir = getattr(args, "beta_dependencies_directory", None)
44 conda_dependencies = getattr(args, "beta_conda_dependencies", None)
45 if conf_file is not None and os.path.exists(conf_file):
46 self.use_tool_dependencies = True
47 if tool_dependency_dir is None:
48 tool_dependency_dir = os.path.abspath(os.path.dirname(conf_file))
49 self.tool_dependency_dir = tool_dependency_dir
50 self.dependency_resolvers_config_file = os.path.abspath(conf_file)
51 elif conda_dependencies is not None:
52 if not tool_dependency_dir is not None:
53 tool_dependency_dir = os.path.abspath("./cwltool_deps")
54 self.tool_dependency_dir = tool_dependency_dir
55 self.use_tool_dependencies = True
56 self.dependency_resolvers_config_file = None
57 else:
58 self.use_tool_dependencies = False
59
60 @property
61 def config_dict(self): # type: () -> Dict[Text, bool]
62 return {
63 'conda_auto_install': True,
64 'conda_auto_init': True,
65 }
66
67 def build_job_script(self, builder, command):
68 # type: (Builder, List[str]) -> Text
69 ensure_galaxy_lib_available()
70 tool_dependency_manager = deps.build_dependency_manager(self) # type: deps.DependencyManager
71 dependencies = get_dependencies(builder)
72 handle_dependencies = "" # str
73 if dependencies:
74 handle_dependencies = "\n".join(
75 tool_dependency_manager.dependency_shell_commands(
76 dependencies, job_directory=builder.tmpdir))
77
78 template_kwds = dict(handle_dependencies=handle_dependencies) # type: Dict[str, str]
79 job_script = COMMAND_WITH_DEPENDENCIES_TEMPLATE.substitute(template_kwds)
80 return job_script
81
82
83 def get_dependencies(builder): # type: (HasReqsHints) -> ToolRequirements
84 (software_requirement, _) = builder.get_requirement("SoftwareRequirement")
85 dependencies = [] # type: List[ToolRequirement]
86 if software_requirement and software_requirement.get("packages"):
87 packages = software_requirement.get("packages")
88 for package in packages:
89 version = package.get("version", None)
90 if isinstance(version, MutableSequence):
91 if version:
92 version = version[0]
93 else:
94 version = None
95 specs = [{"uri": s} for s in package.get("specs", [])]
96 dependencies.append(ToolRequirement.from_dict(dict(
97 name=package["package"].split("#")[-1],
98 version=version,
99 type="package",
100 specs=specs,
101 )))
102
103 return ToolRequirements.from_list(dependencies)
104
105
106 def get_container_from_software_requirements(use_biocontainers, builder):
107 # type: (bool, HasReqsHints) -> Optional[Text]
108 if use_biocontainers:
109 ensure_galaxy_lib_available()
110 from galaxy.tools.deps.containers import ContainerRegistry, AppInfo, ToolInfo, DOCKER_CONTAINER_TYPE
111 app_info = AppInfo(
112 involucro_auto_init=True,
113 enable_beta_mulled_containers=True,
114 container_image_cache_path=".",
115 ) # type: AppInfo
116 container_registry = ContainerRegistry(app_info) # type: ContainerRegistry
117 requirements = get_dependencies(builder)
118 tool_info = ToolInfo(requirements=requirements) # type: ToolInfo
119 container_description = container_registry.find_best_container_description([DOCKER_CONTAINER_TYPE], tool_info)
120 if container_description:
121 return container_description.identifier
122
123 return None
124
125
126 def ensure_galaxy_lib_available():
127 # type: () -> None
128 if not SOFTWARE_REQUIREMENTS_ENABLED:
129 raise Exception("Optional Python library galaxy-lib not available, it is required for this configuration.")
130
```
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 """Setup for the reference implementation of the CWL standards."""
3 from __future__ import print_function
4 import os
5 import sys
6
7 import setuptools.command.egg_info as egg_info_cmd
8 from setuptools import setup
9
10 SETUP_DIR = os.path.dirname(__file__)
11 README = os.path.join(SETUP_DIR, 'README.rst')
12
13 try:
14 import gittaggers
15
16 Tagger = gittaggers.EggInfoFromGit
17 except ImportError:
18 Tagger = egg_info_cmd.egg_info
19
20 NEEDS_PYTEST = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
21 PYTEST_RUNNER = ['pytest-runner', 'pytest-cov'] if NEEDS_PYTEST else []
22
23 if sys.version_info < (3, 0):
24 print("""
25 DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020.
26 Please upgrade your Python as the Python 2.7 version of cwltool won't be
27 maintained after that date.
28 """, file=sys.stderr)
29
30 setup(name='cwltool',
31 version='1.0',
32 description='Common workflow language reference implementation',
33 long_description=open(README).read(),
34 long_description_content_type="text/x-rst",
35 author='Common workflow language working group',
36 author_email='common-workflow-language@googlegroups.com',
37 url="https://github.com/common-workflow-language/cwltool",
38 download_url="https://github.com/common-workflow-language/cwltool",
39 # platforms='', # empty as is conveyed by the classifier below
40 # license='', # empty as is conveyed by the classifier below
41 packages=["cwltool", 'cwltool.tests'],
42 package_dir={'cwltool.tests': 'tests'},
43 package_data={'cwltool': [
44 'schemas/v1.0/*.yml',
45 'schemas/v1.0/*.md',
46 'schemas/v1.0/salad/schema_salad/metaschema/*.yml',
47 'schemas/v1.0/salad/schema_salad/metaschema/*.md',
48 'schemas/v1.1.0-dev1/*.yml',
49 'schemas/v1.1.0-dev1/*.md',
50 'schemas/v1.1.0-dev1/salad/schema_salad/metaschema/*.yml',
51 'schemas/v1.1.0-dev1/salad/schema_salad/metaschema/*.md',
52 'schemas/v1.1/*.yml',
53 'schemas/v1.1/*.md',
54 'schemas/v1.1/salad/schema_salad/metaschema/*.yml',
55 'schemas/v1.1/salad/schema_salad/metaschema/*.md',
56 'cwlNodeEngine.js',
57 'cwlNodeEngineJSConsole.js',
58 'extensions.yml',
59 'hello.simg']},
60 include_package_data=True,
61 install_requires=[
62 'setuptools',
63 'requests >= 2.6.1', # >= 2.6.1 to workaround
64 # https://github.com/ionrock/cachecontrol/issues/137
65 'ruamel.yaml >= 0.12.4, <= 0.16',
66 'rdflib >= 4.2.2, < 4.3.0',
67 'shellescape >= 3.4.1, < 3.5',
68 'schema-salad >= 4.5, < 5',
69 'mypy-extensions',
70 'six >= 1.9.0', # >= 1.9.0 required by prov
71 'psutil',
72 'scandir',
73 'prov == 1.5.1',
74 'bagit >= 1.6.4',
75 'typing-extensions',
76 'coloredlogs',
77 'future >= 0.16',
78 'pathlib2 != 2.3.1'
79 ],
80 extras_require={
81 ':os.name=="posix" and python_version<"3.5"': ['subprocess32 >= 3.5.0'],
82 ':python_version<"3.6"': ['typing >= 3.5.3'],
83 'deps': ["galaxy-lib >= 17.09.9, <= 18.9.2 "],
84 'docs': ["sphinx >= 2.2", "sphinx-rtd-theme"],
85 },
86 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4',
87 setup_requires=PYTEST_RUNNER,
88 test_suite='tests',
89 tests_require=['pytest < 4.3.0', 'mock >= 2.0.0', 'pytest-mock >= 1.10.0',
90 'arcp >= 0.2.0', 'rdflib-jsonld >= 0.4.0'],
91 entry_points={
92 'console_scripts': ["cwltool=cwltool.main:run"]
93 },
94 zip_safe=True,
95 cmdclass={'egg_info': Tagger},
96 classifiers=[
97 'Development Status :: 5 - Production/Stable',
98 'Environment :: Console',
99 'Intended Audience :: Developers',
100 'Intended Audience :: Science/Research',
101 'Intended Audience :: Healthcare Industry',
102 'License :: OSI Approved :: Apache Software License',
103 'Natural Language :: English',
104 'Operating System :: MacOS :: MacOS X',
105 'Operating System :: POSIX',
106 'Operating System :: POSIX :: Linux',
107 'Operating System :: OS Independent',
108 'Operating System :: Microsoft :: Windows',
109 'Operating System :: Microsoft :: Windows :: Windows 10',
110 'Operating System :: Microsoft :: Windows :: Windows 8.1',
111 # 'Operating System :: Microsoft :: Windows :: Windows 8', # not tested
112 # 'Operating System :: Microsoft :: Windows :: Windows 7', # not tested
113 'Programming Language :: Python :: 2',
114 'Programming Language :: Python :: 2.7',
115 'Programming Language :: Python :: 3',
116 'Programming Language :: Python :: 3.5',
117 'Programming Language :: Python :: 3.6',
118 'Programming Language :: Python :: 3.7',
119 'Programming Language :: Python :: 3.8',
120 'Topic :: Scientific/Engineering',
121 'Topic :: Scientific/Engineering :: Bio-Informatics',
122 'Topic :: Scientific/Engineering :: Astronomy',
123 'Topic :: Scientific/Engineering :: Atmospheric Science',
124 'Topic :: Scientific/Engineering :: Information Analysis',
125 'Topic :: Scientific/Engineering :: Medical Science Apps.',
126 'Topic :: System :: Distributed Computing',
127 'Topic :: Utilities',
128 ]
129 )
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cwltool/software_requirements.py b/cwltool/software_requirements.py
--- a/cwltool/software_requirements.py
+++ b/cwltool/software_requirements.py
@@ -18,8 +18,8 @@
from .builder import Builder, HasReqsHints
try:
- from galaxy.tools.deps.requirements import ToolRequirement, ToolRequirements
- from galaxy.tools import deps
+ from galaxy.tool_util.deps.requirements import ToolRequirement, ToolRequirements
+ from galaxy.tool_util import deps
except ImportError:
ToolRequirement = None # type: ignore
ToolRequirements = None # type: ignore
@@ -49,7 +49,7 @@
self.tool_dependency_dir = tool_dependency_dir
self.dependency_resolvers_config_file = os.path.abspath(conf_file)
elif conda_dependencies is not None:
- if not tool_dependency_dir is not None:
+ if tool_dependency_dir is None:
tool_dependency_dir = os.path.abspath("./cwltool_deps")
self.tool_dependency_dir = tool_dependency_dir
self.use_tool_dependencies = True
@@ -57,17 +57,22 @@
else:
self.use_tool_dependencies = False
- @property
- def config_dict(self): # type: () -> Dict[Text, bool]
- return {
- 'conda_auto_install': True,
- 'conda_auto_init': True,
- }
-
def build_job_script(self, builder, command):
# type: (Builder, List[str]) -> Text
ensure_galaxy_lib_available()
- tool_dependency_manager = deps.build_dependency_manager(self) # type: deps.DependencyManager
+ resolution_config_dict = {
+ 'use': self.use_tool_dependencies,
+ 'default_base_path': self.tool_dependency_dir,
+ }
+ app_config = {
+ 'conda_auto_install': True,
+ 'conda_auto_init': True,
+ }
+ tool_dependency_manager = deps.build_dependency_manager(
+ app_config_dict=app_config,
+ resolution_config_dict=resolution_config_dict,
+ conf_file=self.dependency_resolvers_config_file,
+ ) # type: deps.DependencyManager
dependencies = get_dependencies(builder)
handle_dependencies = "" # str
if dependencies:
@@ -107,10 +112,11 @@
# type: (bool, HasReqsHints) -> Optional[Text]
if use_biocontainers:
ensure_galaxy_lib_available()
- from galaxy.tools.deps.containers import ContainerRegistry, AppInfo, ToolInfo, DOCKER_CONTAINER_TYPE
+ from galaxy.tool_util.deps.dependencies import AppInfo, ToolInfo
+ from galaxy.tool_util.deps.containers import ContainerRegistry, DOCKER_CONTAINER_TYPE
app_info = AppInfo(
involucro_auto_init=True,
- enable_beta_mulled_containers=True,
+ enable_mulled_containers=True,
container_image_cache_path=".",
) # type: AppInfo
container_registry = ContainerRegistry(app_info) # type: ContainerRegistry
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -80,7 +80,7 @@
extras_require={
':os.name=="posix" and python_version<"3.5"': ['subprocess32 >= 3.5.0'],
':python_version<"3.6"': ['typing >= 3.5.3'],
- 'deps': ["galaxy-lib >= 17.09.9, <= 18.9.2 "],
+ 'deps': ["galaxy-tool-util"],
'docs': ["sphinx >= 2.2", "sphinx-rtd-theme"],
},
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4',
| {"golden_diff": "diff --git a/cwltool/software_requirements.py b/cwltool/software_requirements.py\n--- a/cwltool/software_requirements.py\n+++ b/cwltool/software_requirements.py\n@@ -18,8 +18,8 @@\n \n from .builder import Builder, HasReqsHints\n try:\n- from galaxy.tools.deps.requirements import ToolRequirement, ToolRequirements\n- from galaxy.tools import deps\n+ from galaxy.tool_util.deps.requirements import ToolRequirement, ToolRequirements\n+ from galaxy.tool_util import deps\n except ImportError:\n ToolRequirement = None # type: ignore\n ToolRequirements = None # type: ignore\n@@ -49,7 +49,7 @@\n self.tool_dependency_dir = tool_dependency_dir\n self.dependency_resolvers_config_file = os.path.abspath(conf_file)\n elif conda_dependencies is not None:\n- if not tool_dependency_dir is not None:\n+ if tool_dependency_dir is None:\n tool_dependency_dir = os.path.abspath(\"./cwltool_deps\")\n self.tool_dependency_dir = tool_dependency_dir\n self.use_tool_dependencies = True\n@@ -57,17 +57,22 @@\n else:\n self.use_tool_dependencies = False\n \n- @property\n- def config_dict(self): # type: () -> Dict[Text, bool]\n- return {\n- 'conda_auto_install': True,\n- 'conda_auto_init': True,\n- }\n-\n def build_job_script(self, builder, command):\n # type: (Builder, List[str]) -> Text\n ensure_galaxy_lib_available()\n- tool_dependency_manager = deps.build_dependency_manager(self) # type: deps.DependencyManager\n+ resolution_config_dict = {\n+ 'use': self.use_tool_dependencies,\n+ 'default_base_path': self.tool_dependency_dir,\n+ }\n+ app_config = {\n+ 'conda_auto_install': True,\n+ 'conda_auto_init': True,\n+ }\n+ tool_dependency_manager = deps.build_dependency_manager(\n+ app_config_dict=app_config,\n+ resolution_config_dict=resolution_config_dict,\n+ conf_file=self.dependency_resolvers_config_file,\n+ ) # type: deps.DependencyManager\n dependencies = get_dependencies(builder)\n handle_dependencies = \"\" # str\n if dependencies:\n@@ -107,10 +112,11 @@\n # type: (bool, HasReqsHints) -> Optional[Text]\n if use_biocontainers:\n ensure_galaxy_lib_available()\n- from galaxy.tools.deps.containers import ContainerRegistry, AppInfo, ToolInfo, DOCKER_CONTAINER_TYPE\n+ from galaxy.tool_util.deps.dependencies import AppInfo, ToolInfo\n+ from galaxy.tool_util.deps.containers import ContainerRegistry, DOCKER_CONTAINER_TYPE\n app_info = AppInfo(\n involucro_auto_init=True,\n- enable_beta_mulled_containers=True,\n+ enable_mulled_containers=True,\n container_image_cache_path=\".\",\n ) # type: AppInfo\n container_registry = ContainerRegistry(app_info) # type: ContainerRegistry\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -80,7 +80,7 @@\n extras_require={\n ':os.name==\"posix\" and python_version<\"3.5\"': ['subprocess32 >= 3.5.0'],\n ':python_version<\"3.6\"': ['typing >= 3.5.3'],\n- 'deps': [\"galaxy-lib >= 17.09.9, <= 18.9.2 \"],\n+ 'deps': [\"galaxy-tool-util\"],\n 'docs': [\"sphinx >= 2.2\", \"sphinx-rtd-theme\"],\n },\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4',\n", "issue": "Replace galaxy-lib requirement with galaxy-tool-util\nI'm working on Planemo to replace its dependency on galaxy-lib (which will be basically deprecated with Galaxy release 19.09) with [galaxy-tool-util](https://pypi.org/project/galaxy-tool-util/). Unfortunately CWL support in Planemo via cwltool would break unless also cwltool moves to galaxy-tool-util.\r\n\r\nI can do the necessary changes in cwltool when I'm back from holidays, in the mean time I wanted to check that's fine with you all.\n", "before_files": [{"content": "\"\"\"This module handles resolution of SoftwareRequirement hints.\n\nThis is accomplished mainly by adapting cwltool internals to galaxy-lib's\nconcept of \"dependencies\". Despite the name, galaxy-lib is a light weight\nlibrary that can be used to map SoftwareRequirements in all sorts of ways -\nHomebrew, Conda, custom scripts, environment modules. We'd be happy to find\nways to adapt new packages managers and such as well.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport argparse # pylint: disable=unused-import\nimport os\nimport string\nfrom typing import Dict, List, MutableSequence, Optional\n\nfrom typing_extensions import Text # pylint: disable=unused-import\n# move to a regular typing import when Python 3.3-3.6 is no longer supported\n\nfrom .builder import Builder, HasReqsHints\ntry:\n from galaxy.tools.deps.requirements import ToolRequirement, ToolRequirements\n from galaxy.tools import deps\nexcept ImportError:\n ToolRequirement = None # type: ignore\n ToolRequirements = None # type: ignore\n deps = None # type: ignore\n\n\nSOFTWARE_REQUIREMENTS_ENABLED = deps is not None\n\nCOMMAND_WITH_DEPENDENCIES_TEMPLATE = string.Template(\"\"\"#!/bin/bash\n$handle_dependencies\npython \"run_job.py\" \"job.json\"\n\"\"\")\n\n\nclass DependenciesConfiguration(object):\n\n def __init__(self, args):\n # type: (argparse.Namespace) -> None\n \"\"\"Initialize.\"\"\"\n conf_file = getattr(args, \"beta_dependency_resolvers_configuration\", None)\n tool_dependency_dir = getattr(args, \"beta_dependencies_directory\", None)\n conda_dependencies = getattr(args, \"beta_conda_dependencies\", None)\n if conf_file is not None and os.path.exists(conf_file):\n self.use_tool_dependencies = True\n if tool_dependency_dir is None:\n tool_dependency_dir = os.path.abspath(os.path.dirname(conf_file))\n self.tool_dependency_dir = tool_dependency_dir\n self.dependency_resolvers_config_file = os.path.abspath(conf_file)\n elif conda_dependencies is not None:\n if not tool_dependency_dir is not None:\n tool_dependency_dir = os.path.abspath(\"./cwltool_deps\")\n self.tool_dependency_dir = tool_dependency_dir\n self.use_tool_dependencies = True\n self.dependency_resolvers_config_file = None\n else:\n self.use_tool_dependencies = False\n\n @property\n def config_dict(self): # type: () -> Dict[Text, bool]\n return {\n 'conda_auto_install': True,\n 'conda_auto_init': True,\n }\n\n def build_job_script(self, builder, command):\n # type: (Builder, List[str]) -> Text\n ensure_galaxy_lib_available()\n tool_dependency_manager = deps.build_dependency_manager(self) # type: deps.DependencyManager\n dependencies = get_dependencies(builder)\n handle_dependencies = \"\" # str\n if dependencies:\n handle_dependencies = \"\\n\".join(\n tool_dependency_manager.dependency_shell_commands(\n dependencies, job_directory=builder.tmpdir))\n\n template_kwds = dict(handle_dependencies=handle_dependencies) # type: Dict[str, str]\n job_script = COMMAND_WITH_DEPENDENCIES_TEMPLATE.substitute(template_kwds)\n return job_script\n\n\ndef get_dependencies(builder): # type: (HasReqsHints) -> ToolRequirements\n (software_requirement, _) = builder.get_requirement(\"SoftwareRequirement\")\n dependencies = [] # type: List[ToolRequirement]\n if software_requirement and software_requirement.get(\"packages\"):\n packages = software_requirement.get(\"packages\")\n for package in packages:\n version = package.get(\"version\", None)\n if isinstance(version, MutableSequence):\n if version:\n version = version[0]\n else:\n version = None\n specs = [{\"uri\": s} for s in package.get(\"specs\", [])]\n dependencies.append(ToolRequirement.from_dict(dict(\n name=package[\"package\"].split(\"#\")[-1],\n version=version,\n type=\"package\",\n specs=specs,\n )))\n\n return ToolRequirements.from_list(dependencies)\n\n\ndef get_container_from_software_requirements(use_biocontainers, builder):\n # type: (bool, HasReqsHints) -> Optional[Text]\n if use_biocontainers:\n ensure_galaxy_lib_available()\n from galaxy.tools.deps.containers import ContainerRegistry, AppInfo, ToolInfo, DOCKER_CONTAINER_TYPE\n app_info = AppInfo(\n involucro_auto_init=True,\n enable_beta_mulled_containers=True,\n container_image_cache_path=\".\",\n ) # type: AppInfo\n container_registry = ContainerRegistry(app_info) # type: ContainerRegistry\n requirements = get_dependencies(builder)\n tool_info = ToolInfo(requirements=requirements) # type: ToolInfo\n container_description = container_registry.find_best_container_description([DOCKER_CONTAINER_TYPE], tool_info)\n if container_description:\n return container_description.identifier\n\n return None\n\n\ndef ensure_galaxy_lib_available():\n # type: () -> None\n if not SOFTWARE_REQUIREMENTS_ENABLED:\n raise Exception(\"Optional Python library galaxy-lib not available, it is required for this configuration.\")\n", "path": "cwltool/software_requirements.py"}, {"content": "#!/usr/bin/env python\n\"\"\"Setup for the reference implementation of the CWL standards.\"\"\"\nfrom __future__ import print_function\nimport os\nimport sys\n\nimport setuptools.command.egg_info as egg_info_cmd\nfrom setuptools import setup\n\nSETUP_DIR = os.path.dirname(__file__)\nREADME = os.path.join(SETUP_DIR, 'README.rst')\n\ntry:\n import gittaggers\n\n Tagger = gittaggers.EggInfoFromGit\nexcept ImportError:\n Tagger = egg_info_cmd.egg_info\n\nNEEDS_PYTEST = {'pytest', 'test', 'ptr'}.intersection(sys.argv)\nPYTEST_RUNNER = ['pytest-runner', 'pytest-cov'] if NEEDS_PYTEST else []\n\nif sys.version_info < (3, 0):\n print(\"\"\"\nDEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020.\nPlease upgrade your Python as the Python 2.7 version of cwltool won't be\nmaintained after that date.\n\"\"\", file=sys.stderr)\n\nsetup(name='cwltool',\n version='1.0',\n description='Common workflow language reference implementation',\n long_description=open(README).read(),\n long_description_content_type=\"text/x-rst\",\n author='Common workflow language working group',\n author_email='common-workflow-language@googlegroups.com',\n url=\"https://github.com/common-workflow-language/cwltool\",\n download_url=\"https://github.com/common-workflow-language/cwltool\",\n # platforms='', # empty as is conveyed by the classifier below\n # license='', # empty as is conveyed by the classifier below\n packages=[\"cwltool\", 'cwltool.tests'],\n package_dir={'cwltool.tests': 'tests'},\n package_data={'cwltool': [\n 'schemas/v1.0/*.yml',\n 'schemas/v1.0/*.md',\n 'schemas/v1.0/salad/schema_salad/metaschema/*.yml',\n 'schemas/v1.0/salad/schema_salad/metaschema/*.md',\n 'schemas/v1.1.0-dev1/*.yml',\n 'schemas/v1.1.0-dev1/*.md',\n 'schemas/v1.1.0-dev1/salad/schema_salad/metaschema/*.yml',\n 'schemas/v1.1.0-dev1/salad/schema_salad/metaschema/*.md',\n 'schemas/v1.1/*.yml',\n 'schemas/v1.1/*.md',\n 'schemas/v1.1/salad/schema_salad/metaschema/*.yml',\n 'schemas/v1.1/salad/schema_salad/metaschema/*.md',\n 'cwlNodeEngine.js',\n 'cwlNodeEngineJSConsole.js',\n 'extensions.yml',\n 'hello.simg']},\n include_package_data=True,\n install_requires=[\n 'setuptools',\n 'requests >= 2.6.1', # >= 2.6.1 to workaround\n # https://github.com/ionrock/cachecontrol/issues/137\n 'ruamel.yaml >= 0.12.4, <= 0.16',\n 'rdflib >= 4.2.2, < 4.3.0',\n 'shellescape >= 3.4.1, < 3.5',\n 'schema-salad >= 4.5, < 5',\n 'mypy-extensions',\n 'six >= 1.9.0', # >= 1.9.0 required by prov\n 'psutil',\n 'scandir',\n 'prov == 1.5.1',\n 'bagit >= 1.6.4',\n 'typing-extensions',\n 'coloredlogs',\n 'future >= 0.16',\n 'pathlib2 != 2.3.1'\n ],\n extras_require={\n ':os.name==\"posix\" and python_version<\"3.5\"': ['subprocess32 >= 3.5.0'],\n ':python_version<\"3.6\"': ['typing >= 3.5.3'],\n 'deps': [\"galaxy-lib >= 17.09.9, <= 18.9.2 \"],\n 'docs': [\"sphinx >= 2.2\", \"sphinx-rtd-theme\"],\n },\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4',\n setup_requires=PYTEST_RUNNER,\n test_suite='tests',\n tests_require=['pytest < 4.3.0', 'mock >= 2.0.0', 'pytest-mock >= 1.10.0',\n 'arcp >= 0.2.0', 'rdflib-jsonld >= 0.4.0'],\n entry_points={\n 'console_scripts': [\"cwltool=cwltool.main:run\"]\n },\n zip_safe=True,\n cmdclass={'egg_info': Tagger},\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Healthcare Industry',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: OS Independent',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: Microsoft :: Windows :: Windows 10',\n 'Operating System :: Microsoft :: Windows :: Windows 8.1',\n # 'Operating System :: Microsoft :: Windows :: Windows 8', # not tested\n # 'Operating System :: Microsoft :: Windows :: Windows 7', # not tested\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Topic :: Scientific/Engineering :: Atmospheric Science',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Medical Science Apps.',\n 'Topic :: System :: Distributed Computing',\n 'Topic :: Utilities',\n ]\n )\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"This module handles resolution of SoftwareRequirement hints.\n\nThis is accomplished mainly by adapting cwltool internals to galaxy-lib's\nconcept of \"dependencies\". Despite the name, galaxy-lib is a light weight\nlibrary that can be used to map SoftwareRequirements in all sorts of ways -\nHomebrew, Conda, custom scripts, environment modules. We'd be happy to find\nways to adapt new packages managers and such as well.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport argparse # pylint: disable=unused-import\nimport os\nimport string\nfrom typing import Dict, List, MutableSequence, Optional\n\nfrom typing_extensions import Text # pylint: disable=unused-import\n# move to a regular typing import when Python 3.3-3.6 is no longer supported\n\nfrom .builder import Builder, HasReqsHints\ntry:\n from galaxy.tool_util.deps.requirements import ToolRequirement, ToolRequirements\n from galaxy.tool_util import deps\nexcept ImportError:\n ToolRequirement = None # type: ignore\n ToolRequirements = None # type: ignore\n deps = None # type: ignore\n\n\nSOFTWARE_REQUIREMENTS_ENABLED = deps is not None\n\nCOMMAND_WITH_DEPENDENCIES_TEMPLATE = string.Template(\"\"\"#!/bin/bash\n$handle_dependencies\npython \"run_job.py\" \"job.json\"\n\"\"\")\n\n\nclass DependenciesConfiguration(object):\n\n def __init__(self, args):\n # type: (argparse.Namespace) -> None\n \"\"\"Initialize.\"\"\"\n conf_file = getattr(args, \"beta_dependency_resolvers_configuration\", None)\n tool_dependency_dir = getattr(args, \"beta_dependencies_directory\", None)\n conda_dependencies = getattr(args, \"beta_conda_dependencies\", None)\n if conf_file is not None and os.path.exists(conf_file):\n self.use_tool_dependencies = True\n if tool_dependency_dir is None:\n tool_dependency_dir = os.path.abspath(os.path.dirname(conf_file))\n self.tool_dependency_dir = tool_dependency_dir\n self.dependency_resolvers_config_file = os.path.abspath(conf_file)\n elif conda_dependencies is not None:\n if tool_dependency_dir is None:\n tool_dependency_dir = os.path.abspath(\"./cwltool_deps\")\n self.tool_dependency_dir = tool_dependency_dir\n self.use_tool_dependencies = True\n self.dependency_resolvers_config_file = None\n else:\n self.use_tool_dependencies = False\n\n def build_job_script(self, builder, command):\n # type: (Builder, List[str]) -> Text\n ensure_galaxy_lib_available()\n resolution_config_dict = {\n 'use': self.use_tool_dependencies,\n 'default_base_path': self.tool_dependency_dir,\n }\n app_config = {\n 'conda_auto_install': True,\n 'conda_auto_init': True,\n }\n tool_dependency_manager = deps.build_dependency_manager(\n app_config_dict=app_config,\n resolution_config_dict=resolution_config_dict,\n conf_file=self.dependency_resolvers_config_file,\n ) # type: deps.DependencyManager\n dependencies = get_dependencies(builder)\n handle_dependencies = \"\" # str\n if dependencies:\n handle_dependencies = \"\\n\".join(\n tool_dependency_manager.dependency_shell_commands(\n dependencies, job_directory=builder.tmpdir))\n\n template_kwds = dict(handle_dependencies=handle_dependencies) # type: Dict[str, str]\n job_script = COMMAND_WITH_DEPENDENCIES_TEMPLATE.substitute(template_kwds)\n return job_script\n\n\ndef get_dependencies(builder): # type: (HasReqsHints) -> ToolRequirements\n (software_requirement, _) = builder.get_requirement(\"SoftwareRequirement\")\n dependencies = [] # type: List[ToolRequirement]\n if software_requirement and software_requirement.get(\"packages\"):\n packages = software_requirement.get(\"packages\")\n for package in packages:\n version = package.get(\"version\", None)\n if isinstance(version, MutableSequence):\n if version:\n version = version[0]\n else:\n version = None\n specs = [{\"uri\": s} for s in package.get(\"specs\", [])]\n dependencies.append(ToolRequirement.from_dict(dict(\n name=package[\"package\"].split(\"#\")[-1],\n version=version,\n type=\"package\",\n specs=specs,\n )))\n\n return ToolRequirements.from_list(dependencies)\n\n\ndef get_container_from_software_requirements(use_biocontainers, builder):\n # type: (bool, HasReqsHints) -> Optional[Text]\n if use_biocontainers:\n ensure_galaxy_lib_available()\n from galaxy.tool_util.deps.dependencies import AppInfo, ToolInfo\n from galaxy.tool_util.deps.containers import ContainerRegistry, DOCKER_CONTAINER_TYPE\n app_info = AppInfo(\n involucro_auto_init=True,\n enable_mulled_containers=True,\n container_image_cache_path=\".\",\n ) # type: AppInfo\n container_registry = ContainerRegistry(app_info) # type: ContainerRegistry\n requirements = get_dependencies(builder)\n tool_info = ToolInfo(requirements=requirements) # type: ToolInfo\n container_description = container_registry.find_best_container_description([DOCKER_CONTAINER_TYPE], tool_info)\n if container_description:\n return container_description.identifier\n\n return None\n\n\ndef ensure_galaxy_lib_available():\n # type: () -> None\n if not SOFTWARE_REQUIREMENTS_ENABLED:\n raise Exception(\"Optional Python library galaxy-lib not available, it is required for this configuration.\")\n", "path": "cwltool/software_requirements.py"}, {"content": "#!/usr/bin/env python\n\"\"\"Setup for the reference implementation of the CWL standards.\"\"\"\nfrom __future__ import print_function\nimport os\nimport sys\n\nimport setuptools.command.egg_info as egg_info_cmd\nfrom setuptools import setup\n\nSETUP_DIR = os.path.dirname(__file__)\nREADME = os.path.join(SETUP_DIR, 'README.rst')\n\ntry:\n import gittaggers\n\n Tagger = gittaggers.EggInfoFromGit\nexcept ImportError:\n Tagger = egg_info_cmd.egg_info\n\nNEEDS_PYTEST = {'pytest', 'test', 'ptr'}.intersection(sys.argv)\nPYTEST_RUNNER = ['pytest-runner', 'pytest-cov'] if NEEDS_PYTEST else []\n\nif sys.version_info < (3, 0):\n print(\"\"\"\nDEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020.\nPlease upgrade your Python as the Python 2.7 version of cwltool won't be\nmaintained after that date.\n\"\"\", file=sys.stderr)\n\nsetup(name='cwltool',\n version='1.0',\n description='Common workflow language reference implementation',\n long_description=open(README).read(),\n long_description_content_type=\"text/x-rst\",\n author='Common workflow language working group',\n author_email='common-workflow-language@googlegroups.com',\n url=\"https://github.com/common-workflow-language/cwltool\",\n download_url=\"https://github.com/common-workflow-language/cwltool\",\n # platforms='', # empty as is conveyed by the classifier below\n # license='', # empty as is conveyed by the classifier below\n packages=[\"cwltool\", 'cwltool.tests'],\n package_dir={'cwltool.tests': 'tests'},\n package_data={'cwltool': [\n 'schemas/v1.0/*.yml',\n 'schemas/v1.0/*.md',\n 'schemas/v1.0/salad/schema_salad/metaschema/*.yml',\n 'schemas/v1.0/salad/schema_salad/metaschema/*.md',\n 'schemas/v1.1.0-dev1/*.yml',\n 'schemas/v1.1.0-dev1/*.md',\n 'schemas/v1.1.0-dev1/salad/schema_salad/metaschema/*.yml',\n 'schemas/v1.1.0-dev1/salad/schema_salad/metaschema/*.md',\n 'schemas/v1.1/*.yml',\n 'schemas/v1.1/*.md',\n 'schemas/v1.1/salad/schema_salad/metaschema/*.yml',\n 'schemas/v1.1/salad/schema_salad/metaschema/*.md',\n 'cwlNodeEngine.js',\n 'cwlNodeEngineJSConsole.js',\n 'extensions.yml',\n 'hello.simg']},\n include_package_data=True,\n install_requires=[\n 'setuptools',\n 'requests >= 2.6.1', # >= 2.6.1 to workaround\n # https://github.com/ionrock/cachecontrol/issues/137\n 'ruamel.yaml >= 0.12.4, <= 0.16',\n 'rdflib >= 4.2.2, < 4.3.0',\n 'shellescape >= 3.4.1, < 3.5',\n 'schema-salad >= 4.5, < 5',\n 'mypy-extensions',\n 'six >= 1.9.0', # >= 1.9.0 required by prov\n 'psutil',\n 'scandir',\n 'prov == 1.5.1',\n 'bagit >= 1.6.4',\n 'typing-extensions',\n 'coloredlogs',\n 'future >= 0.16',\n 'pathlib2 != 2.3.1'\n ],\n extras_require={\n ':os.name==\"posix\" and python_version<\"3.5\"': ['subprocess32 >= 3.5.0'],\n ':python_version<\"3.6\"': ['typing >= 3.5.3'],\n 'deps': [\"galaxy-tool-util\"],\n 'docs': [\"sphinx >= 2.2\", \"sphinx-rtd-theme\"],\n },\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4',\n setup_requires=PYTEST_RUNNER,\n test_suite='tests',\n tests_require=['pytest < 4.3.0', 'mock >= 2.0.0', 'pytest-mock >= 1.10.0',\n 'arcp >= 0.2.0', 'rdflib-jsonld >= 0.4.0'],\n entry_points={\n 'console_scripts': [\"cwltool=cwltool.main:run\"]\n },\n zip_safe=True,\n cmdclass={'egg_info': Tagger},\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Healthcare Industry',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: OS Independent',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: Microsoft :: Windows :: Windows 10',\n 'Operating System :: Microsoft :: Windows :: Windows 8.1',\n # 'Operating System :: Microsoft :: Windows :: Windows 8', # not tested\n # 'Operating System :: Microsoft :: Windows :: Windows 7', # not tested\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Topic :: Scientific/Engineering :: Atmospheric Science',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Medical Science Apps.',\n 'Topic :: System :: Distributed Computing',\n 'Topic :: Utilities',\n ]\n )\n", "path": "setup.py"}]} | 3,487 | 845 |
gh_patches_debug_5180 | rasdani/github-patches | git_diff | oppia__oppia-7075 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add the practice session experience to the topic viewer
Add an option in the topic viewer page that can start the practice session, based on the skills that are part of the topic.
Add the practice session experience to the topic viewer
Add an option in the topic viewer page that can start the practice session, based on the skills that are part of the topic.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/controllers/topic_viewer.py`
Content:
```
1 # Copyright 2018 The Oppia Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS-IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Controllers for the topic viewer page."""
16
17 from constants import constants
18 from core.controllers import acl_decorators
19 from core.controllers import base
20 from core.domain import story_services
21 from core.domain import topic_services
22 import feconf
23
24
25 class TopicViewerPage(base.BaseHandler):
26 """Renders the topic viewer page."""
27
28 @acl_decorators.can_access_topic_viewer_page
29 def get(self, _):
30 """Handles GET requests."""
31
32 if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:
33 raise self.PageNotFoundException
34
35 self.render_template('dist/topic-viewer-page.mainpage.html')
36
37
38 class TopicPageDataHandler(base.BaseHandler):
39 """Manages the data that needs to be displayed to a learner on the topic
40 viewer page.
41 """
42 GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
43
44 @acl_decorators.can_access_topic_viewer_page
45 def get(self, topic_name):
46 """Handles GET requests."""
47
48 if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:
49 raise self.PageNotFoundException
50
51 topic = topic_services.get_topic_by_name(topic_name)
52
53 canonical_story_summaries = [
54 story_services.get_story_summary_by_id(
55 canonical_story_id) for canonical_story_id
56 in topic.canonical_story_ids]
57
58 additional_story_summaries = [
59 story_services.get_story_summary_by_id(
60 additional_story_id) for additional_story_id
61 in topic.additional_story_ids]
62
63 canonical_story_dicts = [
64 summary.to_human_readable_dict() for summary
65 in canonical_story_summaries]
66
67 additional_story_dicts = [
68 summary.to_human_readable_dict() for summary
69 in additional_story_summaries]
70
71 uncategorized_skill_ids = topic.get_all_uncategorized_skill_ids()
72 subtopics = topic.get_all_subtopics()
73
74 self.values.update({
75 'topic_name': topic.name,
76 'canonical_story_dicts': canonical_story_dicts,
77 'additional_story_dicts': additional_story_dicts,
78 'uncategorized_skill_ids': uncategorized_skill_ids,
79 'subtopics': subtopics
80 })
81 self.render_json(self.values)
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/controllers/topic_viewer.py b/core/controllers/topic_viewer.py
--- a/core/controllers/topic_viewer.py
+++ b/core/controllers/topic_viewer.py
@@ -73,6 +73,7 @@
self.values.update({
'topic_name': topic.name,
+ 'topic_id': topic.id,
'canonical_story_dicts': canonical_story_dicts,
'additional_story_dicts': additional_story_dicts,
'uncategorized_skill_ids': uncategorized_skill_ids,
| {"golden_diff": "diff --git a/core/controllers/topic_viewer.py b/core/controllers/topic_viewer.py\n--- a/core/controllers/topic_viewer.py\n+++ b/core/controllers/topic_viewer.py\n@@ -73,6 +73,7 @@\n \n self.values.update({\n 'topic_name': topic.name,\n+ 'topic_id': topic.id,\n 'canonical_story_dicts': canonical_story_dicts,\n 'additional_story_dicts': additional_story_dicts,\n 'uncategorized_skill_ids': uncategorized_skill_ids,\n", "issue": "Add the practice session experience to the topic viewer\nAdd an option in the topic viewer page that can start the practice session, based on the skills that are part of the topic.\nAdd the practice session experience to the topic viewer\nAdd an option in the topic viewer page that can start the practice session, based on the skills that are part of the topic.\n", "before_files": [{"content": "# Copyright 2018 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Controllers for the topic viewer page.\"\"\"\n\nfrom constants import constants\nfrom core.controllers import acl_decorators\nfrom core.controllers import base\nfrom core.domain import story_services\nfrom core.domain import topic_services\nimport feconf\n\n\nclass TopicViewerPage(base.BaseHandler):\n \"\"\"Renders the topic viewer page.\"\"\"\n\n @acl_decorators.can_access_topic_viewer_page\n def get(self, _):\n \"\"\"Handles GET requests.\"\"\"\n\n if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:\n raise self.PageNotFoundException\n\n self.render_template('dist/topic-viewer-page.mainpage.html')\n\n\nclass TopicPageDataHandler(base.BaseHandler):\n \"\"\"Manages the data that needs to be displayed to a learner on the topic\n viewer page.\n \"\"\"\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n\n @acl_decorators.can_access_topic_viewer_page\n def get(self, topic_name):\n \"\"\"Handles GET requests.\"\"\"\n\n if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:\n raise self.PageNotFoundException\n\n topic = topic_services.get_topic_by_name(topic_name)\n\n canonical_story_summaries = [\n story_services.get_story_summary_by_id(\n canonical_story_id) for canonical_story_id\n in topic.canonical_story_ids]\n\n additional_story_summaries = [\n story_services.get_story_summary_by_id(\n additional_story_id) for additional_story_id\n in topic.additional_story_ids]\n\n canonical_story_dicts = [\n summary.to_human_readable_dict() for summary\n in canonical_story_summaries]\n\n additional_story_dicts = [\n summary.to_human_readable_dict() for summary\n in additional_story_summaries]\n\n uncategorized_skill_ids = topic.get_all_uncategorized_skill_ids()\n subtopics = topic.get_all_subtopics()\n\n self.values.update({\n 'topic_name': topic.name,\n 'canonical_story_dicts': canonical_story_dicts,\n 'additional_story_dicts': additional_story_dicts,\n 'uncategorized_skill_ids': uncategorized_skill_ids,\n 'subtopics': subtopics\n })\n self.render_json(self.values)\n", "path": "core/controllers/topic_viewer.py"}], "after_files": [{"content": "# Copyright 2018 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Controllers for the topic viewer page.\"\"\"\n\nfrom constants import constants\nfrom core.controllers import acl_decorators\nfrom core.controllers import base\nfrom core.domain import story_services\nfrom core.domain import topic_services\nimport feconf\n\n\nclass TopicViewerPage(base.BaseHandler):\n \"\"\"Renders the topic viewer page.\"\"\"\n\n @acl_decorators.can_access_topic_viewer_page\n def get(self, _):\n \"\"\"Handles GET requests.\"\"\"\n\n if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:\n raise self.PageNotFoundException\n\n self.render_template('dist/topic-viewer-page.mainpage.html')\n\n\nclass TopicPageDataHandler(base.BaseHandler):\n \"\"\"Manages the data that needs to be displayed to a learner on the topic\n viewer page.\n \"\"\"\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n\n @acl_decorators.can_access_topic_viewer_page\n def get(self, topic_name):\n \"\"\"Handles GET requests.\"\"\"\n\n if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:\n raise self.PageNotFoundException\n\n topic = topic_services.get_topic_by_name(topic_name)\n\n canonical_story_summaries = [\n story_services.get_story_summary_by_id(\n canonical_story_id) for canonical_story_id\n in topic.canonical_story_ids]\n\n additional_story_summaries = [\n story_services.get_story_summary_by_id(\n additional_story_id) for additional_story_id\n in topic.additional_story_ids]\n\n canonical_story_dicts = [\n summary.to_human_readable_dict() for summary\n in canonical_story_summaries]\n\n additional_story_dicts = [\n summary.to_human_readable_dict() for summary\n in additional_story_summaries]\n\n uncategorized_skill_ids = topic.get_all_uncategorized_skill_ids()\n subtopics = topic.get_all_subtopics()\n\n self.values.update({\n 'topic_name': topic.name,\n 'topic_id': topic.id,\n 'canonical_story_dicts': canonical_story_dicts,\n 'additional_story_dicts': additional_story_dicts,\n 'uncategorized_skill_ids': uncategorized_skill_ids,\n 'subtopics': subtopics\n })\n self.render_json(self.values)\n", "path": "core/controllers/topic_viewer.py"}]} | 1,058 | 99 |
gh_patches_debug_26429 | rasdani/github-patches | git_diff | mkdocs__mkdocs-2070 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecated --theme-dir is still listed as CLI option
Hey, I am busy migrating my project from v0.16.3 to v1.1 and it seems that the command line argument `-e / --theme-dir PATH` has been removed, I am defining a custom theme using the theme block and the `custom_dir` but the way we have things setup require us to use use the `--theme-dir` config as well, I don't suppose there is a replacement for that argument is there?
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/__main__.py`
Content:
```
1 #!/usr/bin/env python
2
3 import os
4 import sys
5 import logging
6 import click
7
8 # TODO: Remove this check at some point in the future.
9 # (also remove flake8's 'ignore E402' comments below)
10 if sys.version_info[0] < 3: # pragma: no cover
11 raise ImportError('A recent version of Python 3 is required.')
12
13 from mkdocs import __version__ # noqa: E402
14 from mkdocs import utils # noqa: E402
15 from mkdocs import exceptions # noqa: E402
16 from mkdocs import config # noqa: E402
17 from mkdocs.commands import build, gh_deploy, new, serve # noqa: E402
18
19 log = logging.getLogger(__name__)
20
21
22 class State:
23 ''' Maintain logging level.'''
24
25 def __init__(self, log_name='mkdocs', level=logging.INFO):
26 self.logger = logging.getLogger(log_name)
27 self.logger.propagate = False
28 stream = logging.StreamHandler()
29 formatter = logging.Formatter("%(levelname)-7s - %(message)s ")
30 stream.setFormatter(formatter)
31 self.logger.addHandler(stream)
32
33 self.logger.setLevel(level)
34
35
36 pass_state = click.make_pass_decorator(State, ensure=True)
37
38 clean_help = "Remove old files from the site_dir before building (the default)."
39 config_help = "Provide a specific MkDocs config"
40 dev_addr_help = ("IP address and port to serve documentation locally (default: "
41 "localhost:8000)")
42 strict_help = ("Enable strict mode. This will cause MkDocs to abort the build "
43 "on any warnings.")
44 theme_dir_help = "The theme directory to use when building your documentation."
45 theme_help = "The theme to use when building your documentation."
46 theme_choices = utils.get_theme_names()
47 site_dir_help = "The directory to output the result of the documentation build."
48 use_directory_urls_help = "Use directory URLs when building pages (the default)."
49 reload_help = "Enable the live reloading in the development server (this is the default)"
50 no_reload_help = "Disable the live reloading in the development server."
51 dirty_reload_help = "Enable the live reloading in the development server, but only re-build files that have changed"
52 commit_message_help = ("A commit message to use when committing to the "
53 "Github Pages remote branch. Commit {sha} and MkDocs {version} are available as expansions")
54 remote_branch_help = ("The remote branch to commit to for Github Pages. This "
55 "overrides the value specified in config")
56 remote_name_help = ("The remote name to commit to for Github Pages. This "
57 "overrides the value specified in config")
58 force_help = "Force the push to the repository."
59 ignore_version_help = "Ignore check that build is not being deployed with an older version of MkDocs."
60
61
62 def add_options(opts):
63 def inner(f):
64 for i in reversed(opts):
65 f = i(f)
66 return f
67
68 return inner
69
70
71 def verbose_option(f):
72 def callback(ctx, param, value):
73 state = ctx.ensure_object(State)
74 if value:
75 state.logger.setLevel(logging.DEBUG)
76 return click.option('-v', '--verbose',
77 is_flag=True,
78 expose_value=False,
79 help='Enable verbose output',
80 callback=callback)(f)
81
82
83 def quiet_option(f):
84 def callback(ctx, param, value):
85 state = ctx.ensure_object(State)
86 if value:
87 state.logger.setLevel(logging.ERROR)
88 return click.option('-q', '--quiet',
89 is_flag=True,
90 expose_value=False,
91 help='Silence warnings',
92 callback=callback)(f)
93
94
95 common_options = add_options([quiet_option, verbose_option])
96 common_config_options = add_options([
97 click.option('-f', '--config-file', type=click.File('rb'), help=config_help),
98 # Don't override config value if user did not specify --strict flag
99 # Conveniently, load_config drops None values
100 click.option('-s', '--strict', is_flag=True, default=None, help=strict_help),
101 click.option('-t', '--theme', type=click.Choice(theme_choices), help=theme_help),
102 click.option('-e', '--theme-dir', type=click.Path(), help=theme_dir_help),
103 # As with --strict, set the default to None so that this doesn't incorrectly
104 # override the config file
105 click.option('--use-directory-urls/--no-directory-urls', is_flag=True, default=None, help=use_directory_urls_help)
106 ])
107
108 pgk_dir = os.path.dirname(os.path.abspath(__file__))
109
110
111 @click.group(context_settings={'help_option_names': ['-h', '--help']})
112 @click.version_option(
113 '{} from {} (Python {})'.format(__version__, pgk_dir, sys.version[:3]),
114 '-V', '--version')
115 @common_options
116 def cli():
117 """
118 MkDocs - Project documentation with Markdown.
119 """
120
121
122 @cli.command(name="serve")
123 @click.option('-a', '--dev-addr', help=dev_addr_help, metavar='<IP:PORT>')
124 @click.option('--livereload', 'livereload', flag_value='livereload', help=reload_help, default=True)
125 @click.option('--no-livereload', 'livereload', flag_value='no-livereload', help=no_reload_help)
126 @click.option('--dirtyreload', 'livereload', flag_value='dirty', help=dirty_reload_help)
127 @common_config_options
128 @common_options
129 def serve_command(dev_addr, livereload, **kwargs):
130 """Run the builtin development server"""
131
132 logging.getLogger('tornado').setLevel(logging.WARNING)
133
134 try:
135 serve.serve(
136 dev_addr=dev_addr,
137 livereload=livereload,
138 **kwargs
139 )
140 except (exceptions.ConfigurationError, OSError) as e: # pragma: no cover
141 # Avoid ugly, unhelpful traceback
142 raise SystemExit('\n' + str(e))
143
144
145 @cli.command(name="build")
146 @click.option('-c', '--clean/--dirty', is_flag=True, default=True, help=clean_help)
147 @common_config_options
148 @click.option('-d', '--site-dir', type=click.Path(), help=site_dir_help)
149 @common_options
150 def build_command(clean, **kwargs):
151 """Build the MkDocs documentation"""
152
153 try:
154 build.build(config.load_config(**kwargs), dirty=not clean)
155 except exceptions.ConfigurationError as e: # pragma: no cover
156 # Avoid ugly, unhelpful traceback
157 raise SystemExit('\n' + str(e))
158
159
160 @cli.command(name="gh-deploy")
161 @click.option('-c', '--clean/--dirty', is_flag=True, default=True, help=clean_help)
162 @click.option('-m', '--message', help=commit_message_help)
163 @click.option('-b', '--remote-branch', help=remote_branch_help)
164 @click.option('-r', '--remote-name', help=remote_name_help)
165 @click.option('--force', is_flag=True, help=force_help)
166 @click.option('--ignore-version', is_flag=True, help=ignore_version_help)
167 @common_config_options
168 @click.option('-d', '--site-dir', type=click.Path(), help=site_dir_help)
169 @common_options
170 def gh_deploy_command(clean, message, remote_branch, remote_name, force, ignore_version, **kwargs):
171 """Deploy your documentation to GitHub Pages"""
172 try:
173 cfg = config.load_config(
174 remote_branch=remote_branch,
175 remote_name=remote_name,
176 **kwargs
177 )
178 build.build(cfg, dirty=not clean)
179 gh_deploy.gh_deploy(cfg, message=message, force=force, ignore_version=ignore_version)
180 except exceptions.ConfigurationError as e: # pragma: no cover
181 # Avoid ugly, unhelpful traceback
182 raise SystemExit('\n' + str(e))
183
184
185 @cli.command(name="new")
186 @click.argument("project_directory")
187 @common_options
188 def new_command(project_directory):
189 """Create a new MkDocs project"""
190 new.new(project_directory)
191
192
193 if __name__ == '__main__': # pragma: no cover
194 cli()
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mkdocs/__main__.py b/mkdocs/__main__.py
--- a/mkdocs/__main__.py
+++ b/mkdocs/__main__.py
@@ -41,7 +41,6 @@
"localhost:8000)")
strict_help = ("Enable strict mode. This will cause MkDocs to abort the build "
"on any warnings.")
-theme_dir_help = "The theme directory to use when building your documentation."
theme_help = "The theme to use when building your documentation."
theme_choices = utils.get_theme_names()
site_dir_help = "The directory to output the result of the documentation build."
@@ -99,7 +98,6 @@
# Conveniently, load_config drops None values
click.option('-s', '--strict', is_flag=True, default=None, help=strict_help),
click.option('-t', '--theme', type=click.Choice(theme_choices), help=theme_help),
- click.option('-e', '--theme-dir', type=click.Path(), help=theme_dir_help),
# As with --strict, set the default to None so that this doesn't incorrectly
# override the config file
click.option('--use-directory-urls/--no-directory-urls', is_flag=True, default=None, help=use_directory_urls_help)
| {"golden_diff": "diff --git a/mkdocs/__main__.py b/mkdocs/__main__.py\n--- a/mkdocs/__main__.py\n+++ b/mkdocs/__main__.py\n@@ -41,7 +41,6 @@\n \"localhost:8000)\")\n strict_help = (\"Enable strict mode. This will cause MkDocs to abort the build \"\n \"on any warnings.\")\n-theme_dir_help = \"The theme directory to use when building your documentation.\"\n theme_help = \"The theme to use when building your documentation.\"\n theme_choices = utils.get_theme_names()\n site_dir_help = \"The directory to output the result of the documentation build.\"\n@@ -99,7 +98,6 @@\n # Conveniently, load_config drops None values\n click.option('-s', '--strict', is_flag=True, default=None, help=strict_help),\n click.option('-t', '--theme', type=click.Choice(theme_choices), help=theme_help),\n- click.option('-e', '--theme-dir', type=click.Path(), help=theme_dir_help),\n # As with --strict, set the default to None so that this doesn't incorrectly\n # override the config file\n click.option('--use-directory-urls/--no-directory-urls', is_flag=True, default=None, help=use_directory_urls_help)\n", "issue": "Deprecated --theme-dir is still listed as CLI option\nHey, I am busy migrating my project from v0.16.3 to v1.1 and it seems that the command line argument `-e / --theme-dir PATH` has been removed, I am defining a custom theme using the theme block and the `custom_dir` but the way we have things setup require us to use use the `--theme-dir` config as well, I don't suppose there is a replacement for that argument is there?\r\n\r\nThanks!\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport logging\nimport click\n\n# TODO: Remove this check at some point in the future.\n# (also remove flake8's 'ignore E402' comments below)\nif sys.version_info[0] < 3: # pragma: no cover\n raise ImportError('A recent version of Python 3 is required.')\n\nfrom mkdocs import __version__ # noqa: E402\nfrom mkdocs import utils # noqa: E402\nfrom mkdocs import exceptions # noqa: E402\nfrom mkdocs import config # noqa: E402\nfrom mkdocs.commands import build, gh_deploy, new, serve # noqa: E402\n\nlog = logging.getLogger(__name__)\n\n\nclass State:\n ''' Maintain logging level.'''\n\n def __init__(self, log_name='mkdocs', level=logging.INFO):\n self.logger = logging.getLogger(log_name)\n self.logger.propagate = False\n stream = logging.StreamHandler()\n formatter = logging.Formatter(\"%(levelname)-7s - %(message)s \")\n stream.setFormatter(formatter)\n self.logger.addHandler(stream)\n\n self.logger.setLevel(level)\n\n\npass_state = click.make_pass_decorator(State, ensure=True)\n\nclean_help = \"Remove old files from the site_dir before building (the default).\"\nconfig_help = \"Provide a specific MkDocs config\"\ndev_addr_help = (\"IP address and port to serve documentation locally (default: \"\n \"localhost:8000)\")\nstrict_help = (\"Enable strict mode. This will cause MkDocs to abort the build \"\n \"on any warnings.\")\ntheme_dir_help = \"The theme directory to use when building your documentation.\"\ntheme_help = \"The theme to use when building your documentation.\"\ntheme_choices = utils.get_theme_names()\nsite_dir_help = \"The directory to output the result of the documentation build.\"\nuse_directory_urls_help = \"Use directory URLs when building pages (the default).\"\nreload_help = \"Enable the live reloading in the development server (this is the default)\"\nno_reload_help = \"Disable the live reloading in the development server.\"\ndirty_reload_help = \"Enable the live reloading in the development server, but only re-build files that have changed\"\ncommit_message_help = (\"A commit message to use when committing to the \"\n \"Github Pages remote branch. Commit {sha} and MkDocs {version} are available as expansions\")\nremote_branch_help = (\"The remote branch to commit to for Github Pages. This \"\n \"overrides the value specified in config\")\nremote_name_help = (\"The remote name to commit to for Github Pages. This \"\n \"overrides the value specified in config\")\nforce_help = \"Force the push to the repository.\"\nignore_version_help = \"Ignore check that build is not being deployed with an older version of MkDocs.\"\n\n\ndef add_options(opts):\n def inner(f):\n for i in reversed(opts):\n f = i(f)\n return f\n\n return inner\n\n\ndef verbose_option(f):\n def callback(ctx, param, value):\n state = ctx.ensure_object(State)\n if value:\n state.logger.setLevel(logging.DEBUG)\n return click.option('-v', '--verbose',\n is_flag=True,\n expose_value=False,\n help='Enable verbose output',\n callback=callback)(f)\n\n\ndef quiet_option(f):\n def callback(ctx, param, value):\n state = ctx.ensure_object(State)\n if value:\n state.logger.setLevel(logging.ERROR)\n return click.option('-q', '--quiet',\n is_flag=True,\n expose_value=False,\n help='Silence warnings',\n callback=callback)(f)\n\n\ncommon_options = add_options([quiet_option, verbose_option])\ncommon_config_options = add_options([\n click.option('-f', '--config-file', type=click.File('rb'), help=config_help),\n # Don't override config value if user did not specify --strict flag\n # Conveniently, load_config drops None values\n click.option('-s', '--strict', is_flag=True, default=None, help=strict_help),\n click.option('-t', '--theme', type=click.Choice(theme_choices), help=theme_help),\n click.option('-e', '--theme-dir', type=click.Path(), help=theme_dir_help),\n # As with --strict, set the default to None so that this doesn't incorrectly\n # override the config file\n click.option('--use-directory-urls/--no-directory-urls', is_flag=True, default=None, help=use_directory_urls_help)\n])\n\npgk_dir = os.path.dirname(os.path.abspath(__file__))\n\n\n@click.group(context_settings={'help_option_names': ['-h', '--help']})\n@click.version_option(\n '{} from {} (Python {})'.format(__version__, pgk_dir, sys.version[:3]),\n '-V', '--version')\n@common_options\ndef cli():\n \"\"\"\n MkDocs - Project documentation with Markdown.\n \"\"\"\n\n\n@cli.command(name=\"serve\")\n@click.option('-a', '--dev-addr', help=dev_addr_help, metavar='<IP:PORT>')\n@click.option('--livereload', 'livereload', flag_value='livereload', help=reload_help, default=True)\n@click.option('--no-livereload', 'livereload', flag_value='no-livereload', help=no_reload_help)\n@click.option('--dirtyreload', 'livereload', flag_value='dirty', help=dirty_reload_help)\n@common_config_options\n@common_options\ndef serve_command(dev_addr, livereload, **kwargs):\n \"\"\"Run the builtin development server\"\"\"\n\n logging.getLogger('tornado').setLevel(logging.WARNING)\n\n try:\n serve.serve(\n dev_addr=dev_addr,\n livereload=livereload,\n **kwargs\n )\n except (exceptions.ConfigurationError, OSError) as e: # pragma: no cover\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\n@cli.command(name=\"build\")\n@click.option('-c', '--clean/--dirty', is_flag=True, default=True, help=clean_help)\n@common_config_options\n@click.option('-d', '--site-dir', type=click.Path(), help=site_dir_help)\n@common_options\ndef build_command(clean, **kwargs):\n \"\"\"Build the MkDocs documentation\"\"\"\n\n try:\n build.build(config.load_config(**kwargs), dirty=not clean)\n except exceptions.ConfigurationError as e: # pragma: no cover\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\n@cli.command(name=\"gh-deploy\")\n@click.option('-c', '--clean/--dirty', is_flag=True, default=True, help=clean_help)\n@click.option('-m', '--message', help=commit_message_help)\n@click.option('-b', '--remote-branch', help=remote_branch_help)\n@click.option('-r', '--remote-name', help=remote_name_help)\n@click.option('--force', is_flag=True, help=force_help)\n@click.option('--ignore-version', is_flag=True, help=ignore_version_help)\n@common_config_options\n@click.option('-d', '--site-dir', type=click.Path(), help=site_dir_help)\n@common_options\ndef gh_deploy_command(clean, message, remote_branch, remote_name, force, ignore_version, **kwargs):\n \"\"\"Deploy your documentation to GitHub Pages\"\"\"\n try:\n cfg = config.load_config(\n remote_branch=remote_branch,\n remote_name=remote_name,\n **kwargs\n )\n build.build(cfg, dirty=not clean)\n gh_deploy.gh_deploy(cfg, message=message, force=force, ignore_version=ignore_version)\n except exceptions.ConfigurationError as e: # pragma: no cover\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\n@cli.command(name=\"new\")\n@click.argument(\"project_directory\")\n@common_options\ndef new_command(project_directory):\n \"\"\"Create a new MkDocs project\"\"\"\n new.new(project_directory)\n\n\nif __name__ == '__main__': # pragma: no cover\n cli()\n", "path": "mkdocs/__main__.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport logging\nimport click\n\n# TODO: Remove this check at some point in the future.\n# (also remove flake8's 'ignore E402' comments below)\nif sys.version_info[0] < 3: # pragma: no cover\n raise ImportError('A recent version of Python 3 is required.')\n\nfrom mkdocs import __version__ # noqa: E402\nfrom mkdocs import utils # noqa: E402\nfrom mkdocs import exceptions # noqa: E402\nfrom mkdocs import config # noqa: E402\nfrom mkdocs.commands import build, gh_deploy, new, serve # noqa: E402\n\nlog = logging.getLogger(__name__)\n\n\nclass State:\n ''' Maintain logging level.'''\n\n def __init__(self, log_name='mkdocs', level=logging.INFO):\n self.logger = logging.getLogger(log_name)\n self.logger.propagate = False\n stream = logging.StreamHandler()\n formatter = logging.Formatter(\"%(levelname)-7s - %(message)s \")\n stream.setFormatter(formatter)\n self.logger.addHandler(stream)\n\n self.logger.setLevel(level)\n\n\npass_state = click.make_pass_decorator(State, ensure=True)\n\nclean_help = \"Remove old files from the site_dir before building (the default).\"\nconfig_help = \"Provide a specific MkDocs config\"\ndev_addr_help = (\"IP address and port to serve documentation locally (default: \"\n \"localhost:8000)\")\nstrict_help = (\"Enable strict mode. This will cause MkDocs to abort the build \"\n \"on any warnings.\")\ntheme_help = \"The theme to use when building your documentation.\"\ntheme_choices = utils.get_theme_names()\nsite_dir_help = \"The directory to output the result of the documentation build.\"\nuse_directory_urls_help = \"Use directory URLs when building pages (the default).\"\nreload_help = \"Enable the live reloading in the development server (this is the default)\"\nno_reload_help = \"Disable the live reloading in the development server.\"\ndirty_reload_help = \"Enable the live reloading in the development server, but only re-build files that have changed\"\ncommit_message_help = (\"A commit message to use when committing to the \"\n \"Github Pages remote branch. Commit {sha} and MkDocs {version} are available as expansions\")\nremote_branch_help = (\"The remote branch to commit to for Github Pages. This \"\n \"overrides the value specified in config\")\nremote_name_help = (\"The remote name to commit to for Github Pages. This \"\n \"overrides the value specified in config\")\nforce_help = \"Force the push to the repository.\"\nignore_version_help = \"Ignore check that build is not being deployed with an older version of MkDocs.\"\n\n\ndef add_options(opts):\n def inner(f):\n for i in reversed(opts):\n f = i(f)\n return f\n\n return inner\n\n\ndef verbose_option(f):\n def callback(ctx, param, value):\n state = ctx.ensure_object(State)\n if value:\n state.logger.setLevel(logging.DEBUG)\n return click.option('-v', '--verbose',\n is_flag=True,\n expose_value=False,\n help='Enable verbose output',\n callback=callback)(f)\n\n\ndef quiet_option(f):\n def callback(ctx, param, value):\n state = ctx.ensure_object(State)\n if value:\n state.logger.setLevel(logging.ERROR)\n return click.option('-q', '--quiet',\n is_flag=True,\n expose_value=False,\n help='Silence warnings',\n callback=callback)(f)\n\n\ncommon_options = add_options([quiet_option, verbose_option])\ncommon_config_options = add_options([\n click.option('-f', '--config-file', type=click.File('rb'), help=config_help),\n # Don't override config value if user did not specify --strict flag\n # Conveniently, load_config drops None values\n click.option('-s', '--strict', is_flag=True, default=None, help=strict_help),\n click.option('-t', '--theme', type=click.Choice(theme_choices), help=theme_help),\n # As with --strict, set the default to None so that this doesn't incorrectly\n # override the config file\n click.option('--use-directory-urls/--no-directory-urls', is_flag=True, default=None, help=use_directory_urls_help)\n])\n\npgk_dir = os.path.dirname(os.path.abspath(__file__))\n\n\n@click.group(context_settings={'help_option_names': ['-h', '--help']})\n@click.version_option(\n '{} from {} (Python {})'.format(__version__, pgk_dir, sys.version[:3]),\n '-V', '--version')\n@common_options\ndef cli():\n \"\"\"\n MkDocs - Project documentation with Markdown.\n \"\"\"\n\n\n@cli.command(name=\"serve\")\n@click.option('-a', '--dev-addr', help=dev_addr_help, metavar='<IP:PORT>')\n@click.option('--livereload', 'livereload', flag_value='livereload', help=reload_help, default=True)\n@click.option('--no-livereload', 'livereload', flag_value='no-livereload', help=no_reload_help)\n@click.option('--dirtyreload', 'livereload', flag_value='dirty', help=dirty_reload_help)\n@common_config_options\n@common_options\ndef serve_command(dev_addr, livereload, **kwargs):\n \"\"\"Run the builtin development server\"\"\"\n\n logging.getLogger('tornado').setLevel(logging.WARNING)\n\n try:\n serve.serve(\n dev_addr=dev_addr,\n livereload=livereload,\n **kwargs\n )\n except (exceptions.ConfigurationError, OSError) as e: # pragma: no cover\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\n@cli.command(name=\"build\")\n@click.option('-c', '--clean/--dirty', is_flag=True, default=True, help=clean_help)\n@common_config_options\n@click.option('-d', '--site-dir', type=click.Path(), help=site_dir_help)\n@common_options\ndef build_command(clean, **kwargs):\n \"\"\"Build the MkDocs documentation\"\"\"\n\n try:\n build.build(config.load_config(**kwargs), dirty=not clean)\n except exceptions.ConfigurationError as e: # pragma: no cover\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\n@cli.command(name=\"gh-deploy\")\n@click.option('-c', '--clean/--dirty', is_flag=True, default=True, help=clean_help)\n@click.option('-m', '--message', help=commit_message_help)\n@click.option('-b', '--remote-branch', help=remote_branch_help)\n@click.option('-r', '--remote-name', help=remote_name_help)\n@click.option('--force', is_flag=True, help=force_help)\n@click.option('--ignore-version', is_flag=True, help=ignore_version_help)\n@common_config_options\n@click.option('-d', '--site-dir', type=click.Path(), help=site_dir_help)\n@common_options\ndef gh_deploy_command(clean, message, remote_branch, remote_name, force, ignore_version, **kwargs):\n \"\"\"Deploy your documentation to GitHub Pages\"\"\"\n try:\n cfg = config.load_config(\n remote_branch=remote_branch,\n remote_name=remote_name,\n **kwargs\n )\n build.build(cfg, dirty=not clean)\n gh_deploy.gh_deploy(cfg, message=message, force=force, ignore_version=ignore_version)\n except exceptions.ConfigurationError as e: # pragma: no cover\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\n@cli.command(name=\"new\")\n@click.argument(\"project_directory\")\n@common_options\ndef new_command(project_directory):\n \"\"\"Create a new MkDocs project\"\"\"\n new.new(project_directory)\n\n\nif __name__ == '__main__': # pragma: no cover\n cli()\n", "path": "mkdocs/__main__.py"}]} | 2,578 | 279 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.