title
stringlengths 2
169
| diff
stringlengths 235
19.5k
| body
stringlengths 0
30.5k
| url
stringlengths 48
84
| created_at
stringlengths 20
20
| closed_at
stringlengths 20
20
| merged_at
stringlengths 20
20
| updated_at
stringlengths 20
20
| diff_len
float64 101
3.99k
| repo_name
stringclasses 83
values | __index_level_0__
int64 15
52.7k
|
---|---|---|---|---|---|---|---|---|---|---|
shell calls sys.__interactivehook__ | diff --git a/CHANGES.rst b/CHANGES.rst
index 8c615d5fb8..2657dae910 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -70,6 +70,8 @@ Unreleased
- Support async views, error handlers, before and after request, and
teardown functions. :pr:`3412`
- Support nesting blueprints. :issue:`593, 1548`, :pr:`3923`
+- ``flask shell`` sets up tab and history completion like the default
+ ``python`` shell if ``readline`` is installed. :issue:`3941`
Version 1.1.2
diff --git a/src/flask/cli.py b/src/flask/cli.py
index c7b8813508..a5971d6065 100644
--- a/src/flask/cli.py
+++ b/src/flask/cli.py
@@ -887,6 +887,24 @@ def shell_command():
ctx.update(app.make_shell_context())
+ # Site, customize, or startup script can set a hook to call when
+ # entering interactive mode. The default one sets up readline with
+ # tab and history completion.
+ interactive_hook = getattr(sys, "__interactivehook__", None)
+
+ if interactive_hook is not None:
+ try:
+ import readline
+ from rlcompleter import Completer
+ except ImportError:
+ pass
+ else:
+ # rlcompleter uses __main__.__dict__ by default, which is
+ # flask.__main__. Use the shell context instead.
+ readline.set_completer(Completer(ctx).complete)
+
+ interactive_hook()
+
code.interact(banner=banner, local=ctx)
| This will set up readline tab and history completion by default.
<!--
Link to relevant issues or previous PRs, one per line. Use "fixes" to
automatically close an issue.
-->
- fixes #3941
<!--
Ensure each step in CONTRIBUTING.rst is complete by adding an "x" to
each box below.
If only docs were changed, these aren't relevant and can be removed.
-->
Checklist:
- [x] Add tests that demonstrate the correct behavior of the change. Tests should fail without the change.
- [x] Add or update relevant docs, in the docs folder and in code.
- [x] Add an entry in `CHANGES.rst` summarizing the change and linking to the issue.
- [x] Add `.. versionchanged::` entries in any relevant code docs.
- [x] Run `pre-commit` hooks and fix any issues.
- [x] Run `pytest` and `tox`, no tests failed.
| https://api.github.com/repos/pallets/flask/pulls/3960 | 2021-04-14T17:03:05Z | 2021-04-14T17:06:50Z | 2021-04-14T17:06:50Z | 2021-04-29T00:06:11Z | 398 | pallets/flask | 20,866 |
[toutv] fix info extraction(closes #1792)(closes #2082) | diff --git a/youtube_dl/extractor/radiocanada.py b/youtube_dl/extractor/radiocanada.py
index 4f05bbddc6d..4a3f40ee5c2 100644
--- a/youtube_dl/extractor/radiocanada.py
+++ b/youtube_dl/extractor/radiocanada.py
@@ -22,13 +22,13 @@ class RadioCanadaIE(InfoExtractor):
'url': 'http://ici.radio-canada.ca/widgets/mediaconsole/medianet/7184272',
'info_dict': {
'id': '7184272',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'Le parcours du tireur capté sur vidéo',
'description': 'Images des caméras de surveillance fournies par la GRC montrant le parcours du tireur d\'Ottawa',
'upload_date': '20141023',
},
'params': {
- # rtmp download
+ # m3u8 download
'skip_download': True,
},
}
@@ -36,11 +36,14 @@ class RadioCanadaIE(InfoExtractor):
def _real_extract(self, url):
app_code, video_id = re.match(self._VALID_URL, url).groups()
+ device_types = ['ipad']
+ if app_code != 'toutv':
+ device_types.append('flash')
+
formats = []
- # TODO: extract m3u8 and f4m formats
- # m3u8 formats can be extracted using ipad device_type return 403 error code when ffmpeg try to download segements
+ # TODO: extract f4m formats
# f4m formats can be extracted using flashhd device_type but they produce unplayable file
- for device_type in ('flash',):
+ for device_type in device_types:
v_data = self._download_xml(
'http://api.radio-canada.ca/validationMedia/v1/Validation.ashx',
video_id, note='Downloading %s XML' % device_type, query={
@@ -115,13 +118,13 @@ class RadioCanadaAudioVideoIE(InfoExtractor):
'url': 'http://ici.radio-canada.ca/audio-video/media-7527184/barack-obama-au-vietnam',
'info_dict': {
'id': '7527184',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'Barack Obama au Vietnam',
'description': 'Les États-Unis lèvent l\'embargo sur la vente d\'armes qui datait de la guerre du Vietnam',
'upload_date': '20160523',
},
'params': {
- # rtmp download
+ # m3u8 download
'skip_download': True,
},
}
diff --git a/youtube_dl/extractor/toutv.py b/youtube_dl/extractor/toutv.py
index 4797d1310aa..54c2d0aa6c0 100644
--- a/youtube_dl/extractor/toutv.py
+++ b/youtube_dl/extractor/toutv.py
@@ -1,74 +1,41 @@
# coding: utf-8
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
- unified_strdate,
-)
+from ..utils import int_or_none
class TouTvIE(InfoExtractor):
IE_NAME = 'tou.tv'
- _VALID_URL = r'https?://www\.tou\.tv/(?P<id>[a-zA-Z0-9_-]+(?:/(?P<episode>S[0-9]+E[0-9]+)))'
+ _VALID_URL = r'https?://ici\.tou\.tv/(?P<id>[a-zA-Z0-9_-]+/S[0-9]+E[0-9]+)'
_TEST = {
- 'url': 'http://www.tou.tv/30-vies/S04E41',
+ 'url': 'http://ici.tou.tv/garfield-tout-court/S2015E17',
'info_dict': {
- 'id': '30-vies_S04E41',
+ 'id': '122017',
'ext': 'mp4',
- 'title': '30 vies Saison 4 / Épisode 41',
- 'description': 'md5:da363002db82ccbe4dafeb9cab039b09',
- 'age_limit': 8,
- 'uploader': 'Groupe des Nouveaux Médias',
- 'duration': 1296,
- 'upload_date': '20131118',
- 'thumbnail': 'http://static.tou.tv/medias/images/2013-11-18_19_00_00_30VIES_0341_01_L.jpeg',
+ 'title': 'Saison 2015 Épisode 17',
+ 'description': 'La photo de famille 2',
+ 'upload_date': '20100717',
},
'params': {
- 'skip_download': True, # Requires rtmpdump
+ # m3u8 download
+ 'skip_download': True,
},
- 'skip': 'Only available in Canada'
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
- webpage = self._download_webpage(url, video_id)
-
- mediaId = self._search_regex(
- r'"idMedia":\s*"([^"]+)"', webpage, 'media ID')
-
- streams_url = 'http://release.theplatform.com/content.select?pid=' + mediaId
- streams_doc = self._download_xml(
- streams_url, video_id, note='Downloading stream list')
-
- video_url = next(n.text
- for n in streams_doc.findall('.//choice/url')
- if '//ad.doubleclick' not in n.text)
- if video_url.endswith('/Unavailable.flv'):
- raise ExtractorError(
- 'Access to this video is blocked from outside of Canada',
- expected=True)
-
- duration_str = self._html_search_meta(
- 'video:duration', webpage, 'duration')
- duration = int(duration_str) if duration_str else None
- upload_date_str = self._html_search_meta(
- 'video:release_date', webpage, 'upload date')
- upload_date = unified_strdate(upload_date_str) if upload_date_str else None
+ path = self._match_id(url)
+ metadata = self._download_json('http://ici.tou.tv/presentation/%s' % path, path)
+ video_id = metadata['IdMedia']
+ details = metadata['Details']
+ title = details['OriginalTitle']
return {
+ '_type': 'url_transparent',
+ 'url': 'radiocanada:%s:%s' % (metadata.get('AppCode', 'toutv'), video_id),
'id': video_id,
- 'title': self._og_search_title(webpage),
- 'url': video_url,
- 'description': self._og_search_description(webpage),
- 'uploader': self._dc_search_uploader(webpage),
- 'thumbnail': self._og_search_thumbnail(webpage),
- 'age_limit': self._media_rating_search(webpage),
- 'duration': duration,
- 'upload_date': upload_date,
- 'ext': 'mp4',
+ 'title': title,
+ 'thumbnail': details.get('ImageUrl'),
+ 'duration': int_or_none(details.get('LengthInSeconds')),
}
| there is problem with rtmp download, i don't realy know what's missing.
i created the PR even if it has this problem, so it can be simple for others to modify and test.
| https://api.github.com/repos/ytdl-org/youtube-dl/pulls/9597 | 2016-05-24T15:17:25Z | 2016-07-07T00:51:01Z | 2016-07-07T00:51:01Z | 2016-07-07T00:51:05Z | 1,731 | ytdl-org/youtube-dl | 50,243 |
update save model func | diff --git a/PPOCRLabel/libs/shape.py b/PPOCRLabel/libs/shape.py
index 18cc4a8e76..97e2eb7238 100644
--- a/PPOCRLabel/libs/shape.py
+++ b/PPOCRLabel/libs/shape.py
@@ -48,6 +48,7 @@ class Shape(object):
def __init__(self, label=None, line_color=None, difficult=False, key_cls="None", paintLabel=False):
self.label = label
+ self.idx = 0
self.points = []
self.fill = False
self.selected = False
diff --git "a/applications/\350\275\273\351\207\217\347\272\247\350\275\246\347\211\214\350\257\206\345\210\253.md" "b/applications/\350\275\273\351\207\217\347\272\247\350\275\246\347\211\214\350\257\206\345\210\253.md"
index 7012c7f4bb..1a63091b92 100644
--- "a/applications/\350\275\273\351\207\217\347\272\247\350\275\246\347\211\214\350\257\206\345\210\253.md"
+++ "b/applications/\350\275\273\351\207\217\347\272\247\350\275\246\347\211\214\350\257\206\345\210\253.md"
@@ -311,7 +311,6 @@ python tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml -o
在上述命令中,通过`-o`的方式修改了配置文件中的参数。
-训练好的模型地址为: [det_ppocr_v3_finetune.tar](https://paddleocr.bj.bcebos.com/fanliku/license_plate_recognition/det_ppocr_v3_finetune.tar)
**评估**
@@ -354,8 +353,6 @@ python3.7 deploy/slim/quantization/quant.py -c configs/det/ch_PP-OCRv3/ch_PP-OCR
Eval.dataset.label_file_list=[/home/aistudio/data/CCPD2020/PPOCR/test/det.txt]
```
-训练好的模型地址为: [det_ppocr_v3_quant.tar](https://paddleocr.bj.bcebos.com/fanliku/license_plate_recognition/det_ppocr_v3_quant.tar)
-
量化后指标对比如下
|方案|hmeans| 模型大小 | 预测速度(lite) |
@@ -436,6 +433,12 @@ python tools/eval.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml -o \
Eval.dataset.label_file_list=[/home/aistudio/data/CCPD2020/PPOCR/test/rec.txt]
```
+如需获取已训练模型,请扫码填写问卷,加入PaddleOCR官方交流群获取全部OCR垂类模型下载链接、《动手学OCR》电子书等全套OCR学习资料🎁
+<div align="left">
+<img src="https://ai-studio-static-online.cdn.bcebos.com/dd721099bd50478f9d5fb13d8dd00fad69c22d6848244fd3a1d3980d7fefc63e" width = "150" height = "150" />
+</div>
+
+
评估部分日志如下:
```bash
[2022/05/12 19:52:02] ppocr INFO: load pretrain successful from models/ch_PP-OCRv3_rec_train/best_accuracy
@@ -528,7 +531,6 @@ python tools/train.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml -o \
Eval.dataset.data_dir=/home/aistudio/data/CCPD2020/PPOCR \
Eval.dataset.label_file_list=[/home/aistudio/data/CCPD2020/PPOCR/test/rec.txt]
```
-训练好的模型地址为: [rec_ppocr_v3_finetune.tar](https://paddleocr.bj.bcebos.com/fanliku/license_plate_recognition/rec_ppocr_v3_finetune.tar)
**评估**
@@ -570,7 +572,6 @@ python3.7 deploy/slim/quantization/quant.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_
Eval.dataset.data_dir=/home/aistudio/data/CCPD2020/PPOCR \
Eval.dataset.label_file_list=[/home/aistudio/data/CCPD2020/PPOCR/test/rec.txt]
```
-训练好的模型地址为: [rec_ppocr_v3_quant.tar](https://paddleocr.bj.bcebos.com/fanliku/license_plate_recognition/rec_ppocr_v3_quant.tar)
量化后指标对比如下
diff --git a/ppocr/data/imaug/fce_targets.py b/ppocr/data/imaug/fce_targets.py
index 4d1903c0a7..8c64276e26 100644
--- a/ppocr/data/imaug/fce_targets.py
+++ b/ppocr/data/imaug/fce_targets.py
@@ -107,17 +107,20 @@ def resample_line(self, line, n):
for i in range(1, n):
current_line_len = i * delta_length
- while current_line_len >= length_cumsum[current_edge_ind + 1]:
+ while current_edge_ind + 1 < len(length_cumsum) and current_line_len >= length_cumsum[current_edge_ind + 1]:
current_edge_ind += 1
+
current_edge_end_shift = current_line_len - length_cumsum[
current_edge_ind]
+
+ if current_edge_ind >= len(length_list):
+ break
end_shift_ratio = current_edge_end_shift / length_list[
current_edge_ind]
current_point = line[current_edge_ind] + (line[current_edge_ind + 1]
- line[current_edge_ind]
) * end_shift_ratio
resampled_line.append(current_point)
-
resampled_line.append(line[-1])
resampled_line = np.array(resampled_line)
@@ -328,6 +331,8 @@ def generate_center_region_mask(self, img_size, text_polys):
resampled_top_line, resampled_bot_line = self.resample_sidelines(
top_line, bot_line, self.resample_step)
resampled_bot_line = resampled_bot_line[::-1]
+ if len(resampled_top_line) != len(resampled_bot_line):
+ continue
center_line = (resampled_top_line + resampled_bot_line) / 2
line_head_shrink_len = norm(resampled_top_line[0] -
diff --git a/ppocr/utils/save_load.py b/ppocr/utils/save_load.py
index b09f1db6e9..3647111fdd 100644
--- a/ppocr/utils/save_load.py
+++ b/ppocr/utils/save_load.py
@@ -177,9 +177,9 @@ def save_model(model,
model.backbone.model.save_pretrained(model_prefix)
metric_prefix = os.path.join(model_prefix, 'metric')
# save metric and config
+ with open(metric_prefix + '.states', 'wb') as f:
+ pickle.dump(kwargs, f, protocol=2)
if is_best:
- with open(metric_prefix + '.states', 'wb') as f:
- pickle.dump(kwargs, f, protocol=2)
logger.info('save best model is to {}'.format(model_prefix))
else:
logger.info("save model in {}".format(model_prefix))
| https://api.github.com/repos/PaddlePaddle/PaddleOCR/pulls/6693 | 2022-06-24T07:48:02Z | 2022-06-26T16:49:00Z | 2022-06-26T16:49:00Z | 2023-04-12T07:55:30Z | 1,719 | PaddlePaddle/PaddleOCR | 41,994 |
|
Update docs | diff --git a/.gitignore b/.gitignore
index 277e719ea4..c904eeeeea 100644
--- a/.gitignore
+++ b/.gitignore
@@ -13,3 +13,6 @@ backend/openapi.json
# ignore jupyter notebook checkpoints
.ipynb_checkpoints
+
+# edit docs using obsidian.md, these files should not appear in the repo
+.obsidian/
diff --git a/docs/docs/data/README.md b/docs/docs/data/README.md
index 55676d3f56..b313a59c97 100644
--- a/docs/docs/data/README.md
+++ b/docs/docs/data/README.md
@@ -1,3 +1,8 @@
# Data
-Resources related to data.
+Resources related to data:
+
+- [Data schemas](https://projects.laion.ai/Open-Assistant/docs/data/schemas)
+- [Datasets](https://projects.laion.ai/Open-Assistant/docs/data/datasets)
+- [Data augmentation](https://projects.laion.ai/Open-Assistant/docs/data/augmentation)
+- [Supervised datasets](https://projects.laion.ai/Open-Assistant/docs/data/supervised-datasets)
diff --git a/docs/docs/guides/README.md b/docs/docs/guides/README.md
index 2ecfec2942..499c056052 100644
--- a/docs/docs/guides/README.md
+++ b/docs/docs/guides/README.md
@@ -1,3 +1,7 @@
# Guides
-Useful guides to using [Open-Assistant](https://open-assistant.io/).
+Useful guides for Open Assistant:
+
+- [General guidelines for using open-assistant.io](https://projects.laion.ai/Open-Assistant/docs/guides/guidelines)
+- [Example responses](https://projects.laion.ai/Open-Assistant/docs/guides/examples)
+- [Developer guide, contains a lot of technical info](https://projects.laion.ai/Open-Assistant/docs/guides/developers)
diff --git a/docs/docs/intro.md b/docs/docs/intro.md
index eed891c0bd..ff6f8243a4 100644
--- a/docs/docs/intro.md
+++ b/docs/docs/intro.md
@@ -1,15 +1,85 @@
# Introduction
-OpenAssistant is a chat-based assistant that understands tasks, can interact
-with third-party systems, and retrieve information dynamically to do so.
+> The FAQ page is available at
+> [here](https://projects.laion.ai/Open-Assistant/docs/faq).
-It can be extended and personalized easily and is developed as free, open-source
-software.
+Open Assistant (abbreviated as OA) is a chat-based and open-source assistant.
+The vision of the project is to make a large language model that can run on a
+single high-end consumer GPU. With some modifications, Open Assistant should
+also be able to interface with other third-party applications easily as well as
+retrieve information from databases and the Internet.
-## Our Vision
+You should join the
+[Open Assistant discord server](https://ykilcher.com/open-assistant-discord)
+and/or comment on Github issues before making any major changes. Most dev
+communcations take place on the Discord server. There are four main areas that
+you can work on:
-We want OpenAssistant to be the single, unifying platform that all other systems
-use to interface with humans.
+1. Ranking, labelling and making responses in
+ [open-assistant.io](https://www.open-assistant.io). You can take a look at
+ [tasks docs section](https://projects.laion.ai/Open-Assistant/docs/tasks) for
+ more information.
+2. Curating datasets and performing data augmentation. This includes scraping,
+ gathering other public datasets, etc. Most of these efforts will be
+ concentrated at
+ [`/data/datasets`](https://github.com/LAION-AI/Open-Assistant/tree/main/data/datasets)
+ and are documented at
+ [here](https://projects.laion.ai/Open-Assistant/docs/data/datasets).
+3. Creating and fine-tuning Open Assistant itself. For that, you should pay
+ special attention to
+ [`/model`](https://github.com/LAION-AI/Open-Assistant/tree/main/model).
+4. [open-assistant.io](https://www.open-assistant.io) dev. Take a close look at
+ [`/website`](https://github.com/LAION-AI/Open-Assistant/tree/main/website) as
+ well as
+ [`/backend`](https://github.com/LAION-AI/Open-Assistant/tree/main/backend).
+
+## GitHub folders explanation
+
+> Do read the
+> [developer guide](https://projects.laion.ai/Open-Assistant/docs/guides/developers)
+> for further information.
+
+Here's a list of first-level folders at
+[Open Assistant's Github page](https://github.com/LAION-AI/Open-Assistant/).
+
+- [`/ansible`](https://github.com/LAION-AI/Open-Assistant/tree/main/ansible) -
+ for managing the full stack using
+ [Ansible](<https://en.wikipedia.org/wiki/Ansible_(software)>)
+- [`/assets`](https://github.com/LAION-AI/Open-Assistant/tree/main/assets) -
+ contains logos
+- [`/backend`](https://github.com/LAION-AI/Open-Assistant/tree/main/backend) -
+ backend for open-assistant.io and discord bots, maybe helpful for locally test
+ API calls
+- [`/copilot`](https://github.com/LAION-AI/Open-Assistant/tree/main/copilot) -
+ read more at AWS's [Copilot](https://aws.github.io/copilot-cli/). And no, this
+ is not a folder that contains something similar to OpenAI's Codex.
+- [`/data`](https://github.com/LAION-AI/Open-Assistant/tree/main/data) -
+ contains
+ [`/data/datasets`](https://github.com/LAION-AI/Open-Assistant/tree/main/data/datasets)
+ that contains data scraping code and links to datasets on Hugging Face
+- [`/deploy`](https://github.com/LAION-AI/Open-Assistant/tree/main/deploy)
+- [`/discord-bot`](https://github.com/LAION-AI/Open-Assistant/tree/main/discord-bots) -
+ frontend as discord bots for volunteer data collection
+- [`/docker`](https://github.com/LAION-AI/Open-Assistant/tree/main/docker)
+- [`/docs`](https://github.com/LAION-AI/Open-Assistant/tree/main/docs) - this
+ website!
+- [`/inference`](https://github.com/LAION-AI/Open-Assistant/tree/main/inference) -
+ inference pipeline for Open Assistant model
+- [`/model`](https://github.com/LAION-AI/Open-Assistant/tree/main/inference) -
+ currently contains scripts and tools for training/fine-tuning Open Assistant
+ and other neural networks
+- [\*`/notebooks`](https://github.com/LAION-AI/Open-Assistant/tree/main/inference) -
+ DEPRECATED in favor of\*
+ [`/data/datasets`](https://github.com/LAION-AI/Open-Assistant/tree/main/data/datasets).
+ Contains jupyter notebooks for data scraping and augmentation
+- [`/oasst-shared`](https://github.com/LAION-AI/Open-Assistant/tree/main/oasst-shared) -
+ shared Python code for Open Assistant
+- [`/scripts`](https://github.com/LAION-AI/Open-Assistant/tree/main/scripts) -
+ contains various scripts for things
+- [`/text-frontend`](https://github.com/LAION-AI/Open-Assistant/tree/main/text-frontend)
+- [`/website`](https://github.com/LAION-AI/Open-Assistant/tree/main/website) -
+ everything in [open-assistant.io](https://www.open-assistant.io), including
+ gamification
## Principles
@@ -21,14 +91,3 @@ use to interface with humans.
hardware
- We rapidly validate our ML experiments on a small scale, before going to a
supercluster
-
-## Main Efforts
-
-- Data Collection Code → Backend, website, and discord bot to collect data
-- Instruction Dataset Gathering → Scraping & cleaning web data
-- Gamification → Leaderboards & more, to make data collection more fun
-- Model Training → Experiments on pseudo- and real-data
-- Infrastructure → Collection, training, and inference
-- Data Collection → This is the bulk of the work
-- Data Augmentation → Making more data from little data
-- Privacy and Safety → Protecting sensitive data
diff --git a/docs/docs/presentations/README.md b/docs/docs/presentations/README.md
index d231498bf7..d5da9b8fd6 100644
--- a/docs/docs/presentations/README.md
+++ b/docs/docs/presentations/README.md
@@ -1,3 +1,8 @@
# Presentations
Useful presentations that have been published about the project.
+
+- [OpenAssistant Roadmap](https://docs.google.com/presentation/d/1n7IrAOVOqwdYgiYrXc8Sj0He8krn5MVZO_iLkCjTtu0/edit?usp=sharing):
+ High level vison and roadmap (December 2022).
+- [OpenAssistant MVP](https://docs.google.com/presentation/d/1MXH5kJcew7h1aA9PBx2MirkEkjCBLnABbbrPsgbcyQg/edit?usp=sharing):
+ Goal: Crowd-Sourced Training Data Collection (January 2023).
diff --git a/docs/docs/presentations/list.md b/docs/docs/presentations/list.md
deleted file mode 100644
index 6208fd55b9..0000000000
--- a/docs/docs/presentations/list.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# List
-
-- [OpenAssistant Roadmap](https://docs.google.com/presentation/d/1n7IrAOVOqwdYgiYrXc8Sj0He8krn5MVZO_iLkCjTtu0/edit?usp=sharing):
- High level vison and roadmap (December 2022).
-- [OpenAssistant MVP](https://docs.google.com/presentation/d/1MXH5kJcew7h1aA9PBx2MirkEkjCBLnABbbrPsgbcyQg/edit?usp=sharing):
- Goal: Crowd-Sourced Training Data Collection (January 2023).
diff --git a/docs/docs/research/README.md b/docs/docs/research/README.md
index 5d6b45c70c..1dc7a3fb34 100644
--- a/docs/docs/research/README.md
+++ b/docs/docs/research/README.md
@@ -1,3 +1,6 @@
# Research
-Useful research material.
+Useful research materials:
+
+- [General](https://projects.laion.ai/Open-Assistant/docs/research/general)
+- [Cohere Grounded QA](https://projects.laion.ai/Open-Assistant/docs/research/search-based-qa)
diff --git a/docs/sidebars.js b/docs/sidebars.js
index 27421182e1..e77194e908 100644
--- a/docs/sidebars.js
+++ b/docs/sidebars.js
@@ -71,13 +71,9 @@ const sidebars = {
items: ["research/general", "research/search-based-qa"],
},
{
- type: "category",
+ type: "doc",
label: "Presentations",
- link: {
- type: "doc",
- id: "presentations/README",
- },
- items: ["presentations/list"],
+ id: "presentations/README",
},
{
type: "doc",
| Address issue raised in #2008, made a new PR because I couldn't run pre-commit for some reason | https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/2015 | 2023-03-08T07:42:42Z | 2023-03-11T16:31:17Z | 2023-03-11T16:31:17Z | 2023-03-11T16:31:18Z | 2,640 | LAION-AI/Open-Assistant | 37,120 |
01 | diff --git a/README.md b/README.md
index c8a7960ae..ef727d122 100644
--- a/README.md
+++ b/README.md
@@ -9,7 +9,7 @@
<img src="https://img.shields.io/static/v1?label=license&message=AGPL&color=white&style=flat" alt="License"/>
<br>
<br>
- <strong>Let language models run code.</strong><br>
+ <strong>Today we launched a new computer (the 01) with Open Interpreter at the center. <a href="https://openinterpreter.com/01">Star the repo →</a></strong><br>
<br><a href="https://openinterpreter.com">Get early access to the desktop app</a> | <a href="https://docs.openinterpreter.com/">Documentation</a><br>
</p>
| ### Describe the changes you have made:
### Reference any relevant issues (e.g. "Fixes #000"):
### Pre-Submission Checklist (optional but appreciated):
- [ ] I have included relevant documentation updates (stored in /docs)
- [ ] I have read `docs/CONTRIBUTING.md`
- [ ] I have read `docs/ROADMAP.md`
### OS Tests (optional but appreciated):
- [ ] Tested on Windows
- [ ] Tested on MacOS
- [ ] Tested on Linux
| https://api.github.com/repos/OpenInterpreter/open-interpreter/pulls/1104 | 2024-03-21T17:03:09Z | 2024-03-21T17:03:19Z | 2024-03-21T17:03:19Z | 2024-03-21T17:03:19Z | 205 | OpenInterpreter/open-interpreter | 40,885 |
Add support for tuple input on MultiBinary space | diff --git a/gym/spaces/multi_binary.py b/gym/spaces/multi_binary.py
index f1876ee8611..e3b37890d27 100644
--- a/gym/spaces/multi_binary.py
+++ b/gym/spaces/multi_binary.py
@@ -4,9 +4,9 @@
class MultiBinary(Space):
'''
- An n-dimensional binary space.
+ An n-shape binary space.
- The argument to MultiBinary defines n.
+ The argument to MultiBinary defines n, which could be a number or a `list` of numbers.
Example Usage:
@@ -16,18 +16,31 @@ class MultiBinary(Space):
array([0,1,0,1,0], dtype =int8)
+ >> self.observation_space = spaces.MultiBinary([3,2])
+
+ >> self.observation_space.sample()
+
+ array([[0, 0],
+ [0, 1],
+ [1, 1]], dtype=int8)
+
'''
-
def __init__(self, n):
self.n = n
- super(MultiBinary, self).__init__((self.n,), np.int8)
+ if type(n) in [tuple, list, np.ndarray]:
+ input_n = n
+ else:
+ input_n = (n, )
+ super(MultiBinary, self).__init__(input_n, np.int8)
def sample(self):
return self.np_random.randint(low=0, high=2, size=self.n, dtype=self.dtype)
def contains(self, x):
- if isinstance(x, list):
+ if isinstance(x, list) or isinstance(x, tuple):
x = np.array(x) # Promote list to array for contains check
+ if self.shape != x.shape:
+ return False
return ((x==0) | (x==1)).all()
def to_jsonable(self, sample_n):
| When I'm using [ray-project/ray](https://github.com/ray-project/ray), I've found out that when I'm using multi-dimensions space as the input n of `MultiBinary`, problem occurs on init part. After debugging, it appeared that the dealing way of n is not suitable for multi-dimensions input.
Here's the issue in [ray-project/ray](https://github.com/ray-project/ray): [#10024 on ray](https://github.com/ray-project/ray/issues/10024). | https://api.github.com/repos/openai/gym/pulls/2023 | 2020-08-12T14:35:12Z | 2020-09-25T22:43:16Z | 2020-09-25T22:43:15Z | 2020-09-25T22:43:16Z | 442 | openai/gym | 5,222 |
clarify brushfire description | diff --git a/README.md b/README.md
index 204a126c..bf497fa5 100644
--- a/README.md
+++ b/README.md
@@ -861,7 +861,7 @@ on MNIST digits[DEEP LEARNING]
#### General-Purpose Machine Learning
* [Conjecture](https://github.com/etsy/Conjecture) - Scalable Machine Learning in Scalding
-* [brushfire](https://github.com/avibryant/brushfire) - decision trees for scalding
+* [brushfire](https://github.com/avibryant/brushfire) - decision trees and random forests for scalding
* [ganitha](https://github.com/tresata/ganitha) - scalding powered machine learning
* [adam](https://github.com/bigdatagenomics/adam) - A genomics processing engine and specialized file format built using Apache Avro, Apache Spark and Parquet. Apache 2 licensed.
* [bioscala](https://github.com/bioscala/bioscala) - Bioinformatics for the Scala programming language
| a very small edit to clarify the brushfire does forests as well as single trees
| https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/122 | 2015-03-22T01:40:50Z | 2015-03-22T16:56:51Z | 2015-03-22T16:56:51Z | 2015-03-22T16:56:55Z | 243 | josephmisiti/awesome-machine-learning | 52,394 |
[3.11] gh-101975: Fixed a potential SegFault on garbage collection (GH-102803) | diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-03-18-02-36-39.gh-issue-101975.HwMR1d.rst b/Misc/NEWS.d/next/Core and Builtins/2023-03-18-02-36-39.gh-issue-101975.HwMR1d.rst
new file mode 100644
index 00000000000000..28c9a8465180db
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-03-18-02-36-39.gh-issue-101975.HwMR1d.rst
@@ -0,0 +1 @@
+Fixed ``stacktop`` value on tracing entries to avoid corruption on garbage collection.
diff --git a/Python/ceval.c b/Python/ceval.c
index 96d215aba80058..72f9c8375d072a 100644
--- a/Python/ceval.c
+++ b/Python/ceval.c
@@ -1536,6 +1536,7 @@ eval_frame_handle_pending(PyThreadState *tstate)
_PyFrame_SetStackPointer(frame, stack_pointer); \
int err = trace_function_entry(tstate, frame); \
stack_pointer = _PyFrame_GetStackPointer(frame); \
+ frame->stacktop = -1; \
if (err) { \
goto error; \
} \
| Backport of #102803
<!-- gh-issue-number: gh-101975 -->
* Issue: gh-101975
<!-- /gh-issue-number -->
| https://api.github.com/repos/python/cpython/pulls/102807 | 2023-03-18T11:18:16Z | 2023-03-20T14:42:16Z | 2023-03-20T14:42:16Z | 2023-09-26T12:50:47Z | 321 | python/cpython | 4,677 |
cartpole fix | diff --git a/gym/core.py b/gym/core.py
index 21b6f1e6ee8..feca2890e7d 100644
--- a/gym/core.py
+++ b/gym/core.py
@@ -311,9 +311,7 @@ def __init__(self, env=None):
self._update_wrapper_stack()
if env and env._configured:
- # TODO: CartPole currently calls configure on `make`. Fix this so that we can use WrapAfterConfigureError
- # raise error.WrapAfterConfigureError("Attempted to wrap env {} after .configure() was called. All wrappers must be applied before calling .configure()".format(env))
- logger.warn("Attempted to wrap env {} after .configure() was called. All wrappers must be applied before calling .configure()".format(env))
+ logger.warning("Attempted to wrap env %s after .configure() was called.", env)
def _update_wrapper_stack(self):
"""
diff --git a/gym/envs/classic_control/cartpole.py b/gym/envs/classic_control/cartpole.py
index 03dd0df5858..cade791e5d7 100644
--- a/gym/envs/classic_control/cartpole.py
+++ b/gym/envs/classic_control/cartpole.py
@@ -43,17 +43,10 @@ def __init__(self):
self.observation_space = spaces.Box(-high, high)
self._seed()
- self.reset()
self.viewer = None
self.steps_beyond_done = None
- # Just need to initialize the relevant attributes
- self._configure()
-
- def _configure(self, display=None):
- self.display = display
-
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
@@ -87,7 +80,7 @@ def _step(self, action):
reward = 1.0
else:
if self.steps_beyond_done == 0:
- logger.warn("You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.")
+ logger.warning("You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.")
self.steps_beyond_done += 1
reward = 0.0
@@ -118,7 +111,7 @@ def _render(self, mode='human', close=False):
if self.viewer is None:
from gym.envs.classic_control import rendering
- self.viewer = rendering.Viewer(screen_width, screen_height, display=self.display)
+ self.viewer = rendering.Viewer(screen_width, screen_height)
l,r,t,b = -cartwidth/2, cartwidth/2, cartheight/2, -cartheight/2
axleoffset =cartheight/4.0
cart = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
| get rid of _configure() call in cartpole constructor | https://api.github.com/repos/openai/gym/pulls/457 | 2017-01-04T06:14:01Z | 2017-01-04T07:19:15Z | 2017-01-04T07:19:15Z | 2017-01-04T07:19:20Z | 690 | openai/gym | 5,392 |
fix: function name typo | diff --git a/ciphers/rsa_cipher.py b/ciphers/rsa_cipher.py
index 3bc2ebe5fc74..ac9782a49fff 100644
--- a/ciphers/rsa_cipher.py
+++ b/ciphers/rsa_cipher.py
@@ -102,8 +102,7 @@ def read_from_file_and_decrypt(message_filename: str, key_filename: str) -> str:
sys.exit(
f"ERROR: Block size is {block_size * 8} bits and key size is {key_size} "
"bits. The RSA cipher requires the block size to be equal to or greater "
- "than the key size. Did you specify the correct key file and encrypted "
- "file?"
+ "than the key size. Were the correct key file and encrypted file specified?"
)
encrypted_blocks = []
diff --git a/machine_learning/astar.py b/machine_learning/astar.py
index 7a60ed225a2d..ff5208266343 100644
--- a/machine_learning/astar.py
+++ b/machine_learning/astar.py
@@ -57,7 +57,7 @@ def __init__(self, world_size=(5, 5)):
def show(self):
print(self.w)
- def get_neigbours(self, cell):
+ def get_neighbours(self, cell):
"""
Return the neighbours of cell
"""
@@ -110,7 +110,7 @@ def astar(world, start, goal):
_closed.append(_open.pop(min_f))
if current == goal:
break
- for n in world.get_neigbours(current):
+ for n in world.get_neighbours(current):
for c in _closed:
if c == n:
continue
| ### Describe your change:
* [ ] Add an algorithm?
* [X] Fix a bug or typo in an existing algorithm?
* [ ] Add or change doctests? -- Note: Please avoid changing both code and tests in a single pull request.
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [ ] All new Python files are placed inside an existing directory.
* [ ] All filenames are in all lowercase characters with no spaces or dashes.
* [ ] All functions and variable names follow Python naming conventions.
* [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [ ] All new algorithms include at least one URL that points to Wikipedia or another similar explanation.
* [ ] If this pull request resolves one or more open issues then the description above includes the issue number(s) with a [closing keyword](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue): "Fixes #ISSUE-NUMBER".
| https://api.github.com/repos/TheAlgorithms/Python/pulls/11319 | 2024-03-11T13:18:16Z | 2024-03-12T08:40:32Z | 2024-03-12T08:40:32Z | 2024-03-12T08:46:04Z | 388 | TheAlgorithms/Python | 29,715 |
fix(native): Pass scope correctly to DIF assemble | diff --git a/src/sentry/tasks/assemble.py b/src/sentry/tasks/assemble.py
index f6c8f2940bf564..4616112a9eef79 100644
--- a/src/sentry/tasks/assemble.py
+++ b/src/sentry/tasks/assemble.py
@@ -94,7 +94,7 @@ def assemble_dif(project_id, name, checksum, chunks, **kwargs):
set_assemble_status(AssembleTask.DIF, project.id, checksum, ChunkFileState.ASSEMBLING)
# Assemble the chunks into a temporary file
- rv = assemble_file(AssembleTask.DIF, project.id, name, checksum, chunks,
+ rv = assemble_file(AssembleTask.DIF, project, name, checksum, chunks,
file_type='project.dif')
# If not file has been created this means that the file failed to
| Fixes SENTRY-AY5 | https://api.github.com/repos/getsentry/sentry/pulls/13480 | 2019-05-31T09:19:40Z | 2019-05-31T10:09:17Z | 2019-05-31T10:09:17Z | 2020-12-20T10:38:25Z | 195 | getsentry/sentry | 44,093 |
DOC: bug in user guide after new method | diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb
index 67cf07a718877..1c83b0d3d048b 100644
--- a/doc/source/user_guide/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -1288,7 +1288,7 @@
"outputs": [],
"source": [
"df2.style.format('{:.3f}', na_rep=\"\")\\\n",
- " .bar(align=0, vmin=-2.5, vmax=2.5, color=mpl.cm.get_cmap(\"bwr\"), height=50,\n",
+ " .bar(align=0, vmin=-2.5, vmax=2.5, cmap=\"bwr\", height=50,\n",
" width=60, props=\"width: 120px; border-right: 1px solid black;\")\\\n",
" .text_gradient(cmap=\"bwr\", vmin=-2.5, vmax=2.5)"
]
| just updating something that I missed on previous pr when arg name was changed.
| https://api.github.com/repos/pandas-dev/pandas/pulls/43976 | 2021-10-11T16:11:30Z | 2021-10-14T17:07:13Z | 2021-10-14T17:07:13Z | 2021-10-15T16:48:20Z | 237 | pandas-dev/pandas | 44,838 |
Fixed #29008 -- Fixed crash of 404 debug page when URL path converter raises Http404. | diff --git a/django/views/debug.py b/django/views/debug.py
index cb3e8c7ec10bc..86da47ee2057a 100644
--- a/django/views/debug.py
+++ b/django/views/debug.py
@@ -5,10 +5,10 @@
from pathlib import Path
from django.conf import settings
-from django.http import HttpResponse, HttpResponseNotFound
+from django.http import Http404, HttpResponse, HttpResponseNotFound
from django.template import Context, Engine, TemplateDoesNotExist
from django.template.defaultfilters import pprint
-from django.urls import Resolver404, resolve
+from django.urls import resolve
from django.utils import timezone
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str
@@ -483,7 +483,7 @@ def technical_404_response(request, exception):
caller = ''
try:
resolver_match = resolve(request.path)
- except Resolver404:
+ except Http404:
pass
else:
obj = resolver_match.func
diff --git a/tests/view_tests/tests/test_debug.py b/tests/view_tests/tests/test_debug.py
index 46589c82ba55c..73430178d7142 100644
--- a/tests/view_tests/tests/test_debug.py
+++ b/tests/view_tests/tests/test_debug.py
@@ -12,11 +12,13 @@
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import DatabaseError, connection
+from django.http import Http404
from django.shortcuts import render
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin
from django.urls import path, reverse
+from django.urls.converters import IntConverter
from django.utils.functional import SimpleLazyObject
from django.utils.safestring import mark_safe
from django.views.debug import (
@@ -237,6 +239,11 @@ def test_template_encoding(self):
technical_404_response(mock.MagicMock(), mock.Mock())
m.assert_called_once_with(encoding='utf-8')
+ def test_technical_404_converter_raise_404(self):
+ with mock.patch.object(IntConverter, 'to_python', side_effect=Http404):
+ response = self.client.get('/path-post/1/')
+ self.assertContains(response, 'Page not found', status_code=404)
+
class DebugViewQueriesAllowedTests(SimpleTestCase):
# May need a query to initialize MySQL connection
| Not sure while writing test for this, since `Resolver404` is instance of `Http404` and while raised it only pass. | https://api.github.com/repos/django/django/pulls/11620 | 2019-08-02T13:56:08Z | 2019-08-09T21:22:56Z | 2019-08-09T21:22:56Z | 2019-08-09T21:22:56Z | 529 | django/django | 51,139 |
Update default Albumentations | diff --git a/utils/augmentations.py b/utils/augmentations.py
index 49f957e6fd6..04192d1ec5c 100644
--- a/utils/augmentations.py
+++ b/utils/augmentations.py
@@ -23,9 +23,13 @@ def __init__(self):
check_version(A.__version__, '1.0.3') # version requirement
self.transform = A.Compose([
- A.Blur(p=0.1),
- A.MedianBlur(p=0.1),
- A.ToGray(p=0.01)],
+ A.Blur(p=0.01),
+ A.MedianBlur(p=0.01),
+ A.ToGray(p=0.01),
+ A.CLAHE(p=0.01),
+ A.RandomBrightnessContrast(p=0.0),
+ A.RandomGamma(p=0.0),
+ A.ImageCompression(quality_lower=75, p=0.0)],
bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']))
logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p))
|
## 🛠️ PR Summary
<sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub>
### 🌟 Summary
Optimized image augmentation probabilities in the YOLOv5 model.
### 📊 Key Changes
- Reduced the probability of applying the Blur and MedianBlur effects from 0.1 to 0.01.
- Added new augmentations with a probability of 0.01 for CLAHE (Contrast Limited Adaptive Histogram Equalization).
- Introduced RandomBrightnessContrast and RandomGamma augmentations with a disabled state (probability set to 0.0).
- Included ImageCompression with a default quality setting of 75 but disabled it (probability set to 0.0).
### 🎯 Purpose & Impact
- 🎨 **Enhanced Data Augmentation:** Adding new transformations like CLAHE enriches the data augmentation pipeline and can potentially improve model robustness to various lighting conditions.
- 🔍 **Fine-Tuning Probabilities:** Adjusting the probabilities of existing augmentations aims to create a more balanced augmentation strategy that may lead to better generalization and prevent overfitting.
- 🚀 **Experimentation Ready:** Including new augmentations with a zero probability allows for quick experimentation. Developers can easily enable these augmentations in the future for further research and testing.
- 📈 **Potential Impact:** These changes could lead to more effective training and possibly improved accuracy without changing the current model behavior since new augmentations are initially set to not apply. | https://api.github.com/repos/ultralytics/yolov5/pulls/4931 | 2021-09-26T19:06:27Z | 2021-09-26T19:10:33Z | 2021-09-26T19:10:33Z | 2024-01-19T15:28:06Z | 270 | ultralytics/yolov5 | 25,576 |
feat: remove OPTIONS | diff --git a/metagpt/actions/rebuild_class_view.py b/metagpt/actions/rebuild_class_view.py
index d25d9e49b..2140ad874 100644
--- a/metagpt/actions/rebuild_class_view.py
+++ b/metagpt/actions/rebuild_class_view.py
@@ -20,7 +20,6 @@
GENERALIZATION,
GRAPH_REPO_FILE_REPO,
)
-from metagpt.context import CONTEXT
from metagpt.logs import logger
from metagpt.repo_parser import RepoParser
from metagpt.schema import ClassAttribute, ClassMethod, ClassView
@@ -31,7 +30,7 @@
class RebuildClassView(Action):
async def run(self, with_messages=None, format=config.prompt_schema):
- graph_repo_pathname = CONTEXT.git_repo.workdir / GRAPH_REPO_FILE_REPO / CONTEXT.git_repo.workdir.name
+ graph_repo_pathname = self.context.git_repo.workdir / GRAPH_REPO_FILE_REPO / self.context.git_repo.workdir.name
graph_db = await DiGraphRepository.load_from(str(graph_repo_pathname.with_suffix(".json")))
repo_parser = RepoParser(base_directory=Path(self.i_context))
# use pylint
@@ -49,9 +48,9 @@ async def run(self, with_messages=None, format=config.prompt_schema):
await graph_db.save()
async def _create_mermaid_class_views(self, graph_db):
- path = Path(CONTEXT.git_repo.workdir) / DATA_API_DESIGN_FILE_REPO
+ path = Path(self.context.git_repo.workdir) / DATA_API_DESIGN_FILE_REPO
path.mkdir(parents=True, exist_ok=True)
- pathname = path / CONTEXT.git_repo.workdir.name
+ pathname = path / self.context.git_repo.workdir.name
async with aiofiles.open(str(pathname.with_suffix(".mmd")), mode="w", encoding="utf-8") as writer:
content = "classDiagram\n"
logger.debug(content)
diff --git a/metagpt/actions/rebuild_sequence_view.py b/metagpt/actions/rebuild_sequence_view.py
index b701e66de..777dde8ce 100644
--- a/metagpt/actions/rebuild_sequence_view.py
+++ b/metagpt/actions/rebuild_sequence_view.py
@@ -14,7 +14,6 @@
from metagpt.actions import Action
from metagpt.config2 import config
from metagpt.const import GRAPH_REPO_FILE_REPO
-from metagpt.context import CONTEXT
from metagpt.logs import logger
from metagpt.utils.common import aread, list_files
from metagpt.utils.di_graph_repository import DiGraphRepository
@@ -23,7 +22,7 @@
class RebuildSequenceView(Action):
async def run(self, with_messages=None, format=config.prompt_schema):
- graph_repo_pathname = CONTEXT.git_repo.workdir / GRAPH_REPO_FILE_REPO / CONTEXT.git_repo.workdir.name
+ graph_repo_pathname = self.context.git_repo.workdir / GRAPH_REPO_FILE_REPO / self.context.git_repo.workdir.name
graph_db = await DiGraphRepository.load_from(str(graph_repo_pathname.with_suffix(".json")))
entries = await RebuildSequenceView._search_main_entry(graph_db)
for entry in entries:
@@ -43,6 +42,8 @@ async def _search_main_entry(graph_db) -> List:
async def _rebuild_sequence_view(self, entry, graph_db):
filename = entry.subject.split(":", 1)[0]
src_filename = RebuildSequenceView._get_full_filename(root=self.i_context, pathname=filename)
+ if not src_filename:
+ return
content = await aread(filename=src_filename, encoding="utf-8")
content = f"```python\n{content}\n```\n\n---\nTranslate the code above into Mermaid Sequence Diagram."
data = await self.llm.aask(
diff --git a/metagpt/const.py b/metagpt/const.py
index f917ee90d..0ae425a47 100644
--- a/metagpt/const.py
+++ b/metagpt/const.py
@@ -9,7 +9,6 @@
@Modified By: mashenquan, 2023-11-27. Defines file repository paths according to Section 2.2.3.4 of RFC 135.
@Modified By: mashenquan, 2023/12/5. Add directories for code summarization..
"""
-import contextvars
import os
from pathlib import Path
@@ -17,8 +16,6 @@
import metagpt
-OPTIONS = contextvars.ContextVar("OPTIONS", default={})
-
def get_metagpt_package_root():
"""Get the root directory of the installed package."""
@@ -71,12 +68,10 @@ def get_metagpt_root():
PROMPT_PATH = SOURCE_ROOT / "prompts"
SKILL_DIRECTORY = SOURCE_ROOT / "skills"
-
# REAL CONSTS
MEM_TTL = 24 * 30 * 3600
-
MESSAGE_ROUTE_FROM = "sent_from"
MESSAGE_ROUTE_TO = "send_to"
MESSAGE_ROUTE_CAUSE_BY = "cause_by"
| **Features**
- remove OPTIONS
- Replace global context with local context | https://api.github.com/repos/geekan/MetaGPT/pulls/740 | 2024-01-12T13:59:25Z | 2024-01-13T05:49:07Z | 2024-01-13T05:49:07Z | 2024-01-13T05:49:07Z | 1,095 | geekan/MetaGPT | 16,814 |
Fix issue when deleting stack contains role with inline policy | diff --git a/localstack/utils/cloudformation/template_deployer.py b/localstack/utils/cloudformation/template_deployer.py
index 3267443af8794..5b8291f94dae5 100644
--- a/localstack/utils/cloudformation/template_deployer.py
+++ b/localstack/utils/cloudformation/template_deployer.py
@@ -1086,9 +1086,22 @@ def deploy_resource(resource_id, resources, stack_name):
def delete_resource(resource_id, resources, stack_name):
- for res_id, res in resources.items():
- if res['ResourceType'] == 'AWS::S3::Bucket':
- s3_listener.remove_bucket_notification(res['PhysicalResourceId'])
+ res = resources[resource_id]
+ if res['ResourceType'] == 'AWS::S3::Bucket':
+ s3_listener.remove_bucket_notification(res['PhysicalResourceId'])
+
+ if res['ResourceType'] == 'AWS::IAM::Role':
+ role_name = res['PhysicalResourceId']
+
+ iam_client = aws_stack.connect_to_service('iam')
+ rs = iam_client.list_role_policies(
+ RoleName=role_name
+ )
+ for policy in rs['PolicyNames']:
+ iam_client.delete_role_policy(
+ RoleName=role_name,
+ PolicyName=policy
+ )
return execute_resource_action(resource_id, resources, stack_name, ACTION_DELETE)
@@ -1246,7 +1259,6 @@ def delete_stack(stack_name, stack_resources):
# --------
# Util methods for analyzing resource dependencies
# --------
-
def is_deployable_resource(resource):
resource_type = get_resource_type(resource)
entry = RESOURCE_TO_FUNCTION.get(resource_type)
diff --git a/tests/integration/test_cloudformation.py b/tests/integration/test_cloudformation.py
index a6999bd26c9e4..2dd068a091453 100644
--- a/tests/integration/test_cloudformation.py
+++ b/tests/integration/test_cloudformation.py
@@ -604,6 +604,31 @@ def handler(event, context):
Ref: AWS::Region
"""
+TEST_DEPLOY_BODY_4 = """
+AWSTemplateFormatVersion: '2010-09-09'
+
+Resources:
+ # IAM role for running the step function
+ ExecutionRole:
+ Type: "AWS::IAM::Role"
+ Properties:
+ AssumeRolePolicyDocument:
+ Version: "2012-10-17"
+ Statement:
+ - Effect: "Allow"
+ Principal:
+ Service: !Sub states.${AWS::Region}.amazonaws.com
+ Action: "sts:AssumeRole"
+ Policies:
+ - PolicyName: StatesExecutionPolicy
+ PolicyDocument:
+ Version: "2012-10-17"
+ Statement:
+ - Effect: Allow
+ Action: "lambda:InvokeFunction"
+ Resource: "*"
+"""
+
TEST_TEMPLATE_19 = """
Conditions:
IsPRD:
@@ -1228,6 +1253,60 @@ def test_deploy_stack_with_dynamodb_table(self):
rs = ddb_client.list_tables()
self.assertNotIn(ddb_table_name, rs['TableNames'])
+ def test_deploy_stack_with_iam_nested_policy(self):
+ stack_name = 'stack-%s' % short_uid()
+ change_set_name = 'change-set-%s' % short_uid()
+
+ cloudformation = aws_stack.connect_to_service('cloudformation')
+
+ rs = cloudformation.create_change_set(
+ StackName=stack_name,
+ ChangeSetName=change_set_name,
+ TemplateBody=TEST_DEPLOY_BODY_4
+ )
+
+ self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
+ change_set_id = rs['Id']
+
+ rs = cloudformation.describe_change_set(
+ StackName=stack_name,
+ ChangeSetName=change_set_id
+ )
+ self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
+ self.assertEqual(rs['ChangeSetId'], change_set_id)
+ self.assertEqual(rs['Status'], 'CREATE_COMPLETE')
+
+ iam_client = aws_stack.connect_to_service('iam')
+ rs = iam_client.list_roles()
+ number_of_roles = len(rs['Roles'])
+
+ rs = cloudformation.execute_change_set(
+ StackName=stack_name,
+ ChangeSetName=change_set_name
+ )
+ self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
+
+ rs = cloudformation.describe_stacks(
+ StackName=stack_name
+ )
+ self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
+
+ rs = iam_client.list_roles()
+ # 1 role was created
+ self.assertEqual(number_of_roles + 1, len(rs['Roles']))
+
+ # clean up
+ cloudformation.delete_change_set(
+ StackName=stack_name,
+ ChangeSetName=change_set_name
+ )
+ cloudformation.delete_stack(
+ StackName=stack_name
+ )
+ rs = iam_client.list_roles()
+ # role was removed
+ self.assertEqual(number_of_roles, len(rs['Roles']))
+
def test_cfn_handle_s3_bucket_resources(self):
stack_name = 'stack-%s' % short_uid()
bucket_name = 's3-bucket-%s' % short_uid()
| * Delete role polices before deleting IAM::Role resource
* Add integration test
Issue fixed:
#2790 CloudFormation: Delete stack containing IAM role with inline policy fails
| https://api.github.com/repos/localstack/localstack/pulls/2804 | 2020-08-03T14:06:35Z | 2020-08-05T10:09:47Z | 2020-08-05T10:09:47Z | 2020-08-05T10:09:52Z | 1,164 | localstack/localstack | 29,257 |
Removed note in tutorial about bypassing manage.py. | diff --git a/docs/intro/tutorial02.txt b/docs/intro/tutorial02.txt
index 5eca45ee0985d..e9b2aaa2a3bd1 100644
--- a/docs/intro/tutorial02.txt
+++ b/docs/intro/tutorial02.txt
@@ -381,28 +381,6 @@ We're using this instead of simply typing "python", because :file:`manage.py`
sets the ``DJANGO_SETTINGS_MODULE`` environment variable, which gives Django
the Python import path to your :file:`mysite/settings.py` file.
-.. admonition:: Bypassing manage.py
-
- If you'd rather not use :file:`manage.py`, no problem. Just set the
- :envvar:`DJANGO_SETTINGS_MODULE` environment variable to
- ``mysite.settings``, start a plain Python shell, and set up Django:
-
- .. code-block:: pycon
-
- >>> import django
- >>> django.setup()
-
- If this raises an :exc:`AttributeError`, you're probably using
- a version of Django that doesn't match this tutorial version. You'll want
- to either switch to the older tutorial or the newer Django version.
-
- You must run ``python`` from the same directory :file:`manage.py` is in,
- or ensure that directory is on the Python path, so that ``import mysite``
- works.
-
- For more information on all of this, see the :doc:`django-admin
- documentation </ref/django-admin>`.
-
Once you're in the shell, explore the :doc:`database API </topics/db/queries>`::
>>> from polls.models import Question, Choice # Import the model classes we just wrote.
| This isn't something I would recommend, even for non-beginners. | https://api.github.com/repos/django/django/pulls/9573 | 2018-01-11T17:04:01Z | 2018-01-12T00:06:00Z | 2018-01-12T00:06:00Z | 2018-01-12T00:12:05Z | 377 | django/django | 51,135 |
Add --disable=locally-enabled to .pylintrc. | diff --git a/.pylintrc b/.pylintrc
index 36d8c286ff7..1d3f0ac4f0c 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -41,7 +41,7 @@ load-plugins=linter_plugin
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
-disable=fixme,locally-disabled,abstract-class-not-used,abstract-class-little-used,bad-continuation,too-few-public-methods,no-self-use,invalid-name,too-many-instance-attributes,cyclic-import,duplicate-code
+disable=fixme,locally-disabled,locally-enabled,abstract-class-not-used,abstract-class-little-used,bad-continuation,too-few-public-methods,no-self-use,invalid-name,too-many-instance-attributes,cyclic-import,duplicate-code
# abstract-class-not-used cannot be disabled locally (at least in
# pylint 1.4.1), same for abstract-class-little-used
| Fixes #6060. | https://api.github.com/repos/certbot/certbot/pulls/6159 | 2018-06-28T20:50:48Z | 2018-06-28T22:06:53Z | 2018-06-28T22:06:52Z | 2018-06-28T22:08:19Z | 250 | certbot/certbot | 261 |
Reset deanonymizer mapping | diff --git a/libs/experimental/langchain_experimental/data_anonymizer/base.py b/libs/experimental/langchain_experimental/data_anonymizer/base.py
index 292d2a2a0f6903..0a7d201b5f2d39 100644
--- a/libs/experimental/langchain_experimental/data_anonymizer/base.py
+++ b/libs/experimental/langchain_experimental/data_anonymizer/base.py
@@ -30,3 +30,7 @@ def deanonymize(self, text: str) -> str:
@abstractmethod
def _deanonymize(self, text: str) -> str:
"""Abstract method to deanonymize text"""
+
+ @abstractmethod
+ def reset_deanonymizer_mapping(self) -> None:
+ """Abstract method to reset deanonymizer mapping"""
diff --git a/libs/experimental/langchain_experimental/data_anonymizer/presidio.py b/libs/experimental/langchain_experimental/data_anonymizer/presidio.py
index 6f102fdd62b552..a9c3f80aad9225 100644
--- a/libs/experimental/langchain_experimental/data_anonymizer/presidio.py
+++ b/libs/experimental/langchain_experimental/data_anonymizer/presidio.py
@@ -312,6 +312,10 @@ def _deanonymize(
return text_to_deanonymize
+ def reset_deanonymizer_mapping(self) -> None:
+ """Reset the deanonymizer mapping"""
+ self._deanonymizer_mapping = DeanonymizerMapping()
+
def save_deanonymizer_mapping(self, file_path: Union[Path, str]) -> None:
"""Save the deanonymizer mapping to a JSON or YAML file.
| @hwchase17 @baskaryan | https://api.github.com/repos/langchain-ai/langchain/pulls/11559 | 2023-10-09T15:49:56Z | 2023-10-09T18:11:05Z | 2023-10-09T18:11:05Z | 2023-10-09T18:11:05Z | 383 | langchain-ai/langchain | 43,065 |
Add simple tests for pipelines. | diff --git a/tests/test_pipelines.py b/tests/test_pipelines.py
new file mode 100644
index 00000000000..bc53f5427f1
--- /dev/null
+++ b/tests/test_pipelines.py
@@ -0,0 +1,71 @@
+from twisted.internet import defer
+from twisted.internet.defer import Deferred
+from twisted.trial import unittest
+
+from scrapy import Spider, signals, Request
+from scrapy.utils.test import get_crawler
+
+from tests.mockserver import MockServer
+
+
+class SimplePipeline:
+ def process_item(self, item, spider):
+ item['pipeline_passed'] = True
+ return item
+
+
+class DeferredPipeline:
+ def cb(self, item):
+ item['pipeline_passed'] = True
+ return item
+
+ def process_item(self, item, spider):
+ d = Deferred()
+ d.addCallback(self.cb)
+ d.callback(item)
+ return d
+
+
+class ItemSpider(Spider):
+ name = 'itemspider'
+
+ def start_requests(self):
+ yield Request(self.mockserver.url('/status?n=200'))
+
+ def parse(self, response):
+ return {'field': 42}
+
+
+class PipelineTestCase(unittest.TestCase):
+ def setUp(self):
+ self.mockserver = MockServer()
+ self.mockserver.__enter__()
+
+ def tearDown(self):
+ self.mockserver.__exit__(None, None, None)
+
+ def _on_item_scraped(self, item):
+ self.assertIsInstance(item, dict)
+ self.assertTrue(item.get('pipeline_passed'))
+ self.items.append(item)
+
+ def _create_crawler(self, pipeline_class):
+ settings = {
+ 'ITEM_PIPELINES': {__name__ + '.' + pipeline_class.__name__: 1},
+ }
+ crawler = get_crawler(ItemSpider, settings)
+ crawler.signals.connect(self._on_item_scraped, signals.item_scraped)
+ self.items = []
+ return crawler
+
+ @defer.inlineCallbacks
+ def test_simple_pipeline(self):
+ crawler = self._create_crawler(SimplePipeline)
+ yield crawler.crawl(mockserver=self.mockserver)
+ self.assertEqual(len(self.items), 1)
+
+ @defer.inlineCallbacks
+ def test_deferred_pipeline(self):
+ crawler = self._create_crawler(DeferredPipeline)
+ yield crawler.crawl(mockserver=self.mockserver)
+ self.assertEqual(len(self.items), 1)
| This is another small PR that first adds simple tests for stuff where I needed asyncio tests. | https://api.github.com/repos/scrapy/scrapy/pulls/4236 | 2019-12-16T17:40:51Z | 2019-12-17T14:38:07Z | 2019-12-17T14:38:07Z | 2019-12-17T14:38:11Z | 560 | scrapy/scrapy | 34,984 |
✨ Re-export Starlette's `WebSocketException` and add it to docs | diff --git a/docs/en/docs/advanced/websockets.md b/docs/en/docs/advanced/websockets.md
index 0e9bc5b06b378..3cf840819fdcc 100644
--- a/docs/en/docs/advanced/websockets.md
+++ b/docs/en/docs/advanced/websockets.md
@@ -112,17 +112,15 @@ In WebSocket endpoints you can import from `fastapi` and use:
They work the same way as for other FastAPI endpoints/*path operations*:
-```Python hl_lines="58-65 68-83"
+```Python hl_lines="66-77 76-91"
{!../../../docs_src/websockets/tutorial002.py!}
```
!!! info
- In a WebSocket it doesn't really make sense to raise an `HTTPException`. So it's better to close the WebSocket connection directly.
+ As this is a WebSocket it doesn't really make sense to raise an `HTTPException`, instead we raise a `WebSocketException`.
You can use a closing code from the <a href="https://tools.ietf.org/html/rfc6455#section-7.4.1" class="external-link" target="_blank">valid codes defined in the specification</a>.
- In the future, there will be a `WebSocketException` that you will be able to `raise` from anywhere, and add exception handlers for it. It depends on the <a href="https://github.com/encode/starlette/pull/527" class="external-link" target="_blank">PR #527</a> in Starlette.
-
### Try the WebSockets with dependencies
If your file is named `main.py`, run your application with:
diff --git a/docs_src/websockets/tutorial002.py b/docs_src/websockets/tutorial002.py
index cf5c7e805af86..cab749e4db7fa 100644
--- a/docs_src/websockets/tutorial002.py
+++ b/docs_src/websockets/tutorial002.py
@@ -1,6 +1,14 @@
from typing import Union
-from fastapi import Cookie, Depends, FastAPI, Query, WebSocket, status
+from fastapi import (
+ Cookie,
+ Depends,
+ FastAPI,
+ Query,
+ WebSocket,
+ WebSocketException,
+ status,
+)
from fastapi.responses import HTMLResponse
app = FastAPI()
@@ -61,7 +69,7 @@ async def get_cookie_or_token(
token: Union[str, None] = Query(default=None),
):
if session is None and token is None:
- await websocket.close(code=status.WS_1008_POLICY_VIOLATION)
+ raise WebSocketException(code=status.WS_1008_POLICY_VIOLATION)
return session or token
diff --git a/fastapi/__init__.py b/fastapi/__init__.py
index a5c7aeb17600e..70f363c89c121 100644
--- a/fastapi/__init__.py
+++ b/fastapi/__init__.py
@@ -8,6 +8,7 @@
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from .exceptions import HTTPException as HTTPException
+from .exceptions import WebSocketException as WebSocketException
from .param_functions import Body as Body
from .param_functions import Cookie as Cookie
from .param_functions import Depends as Depends
diff --git a/fastapi/exceptions.py b/fastapi/exceptions.py
index 0f50acc6c58d0..ca097b1cef5f8 100644
--- a/fastapi/exceptions.py
+++ b/fastapi/exceptions.py
@@ -3,6 +3,7 @@
from pydantic import BaseModel, ValidationError, create_model
from pydantic.error_wrappers import ErrorList
from starlette.exceptions import HTTPException as StarletteHTTPException
+from starlette.exceptions import WebSocketException as WebSocketException # noqa: F401
class HTTPException(StarletteHTTPException):
| ✨ Re-export Starlette's `WebSocketException` and add it to docs | https://api.github.com/repos/tiangolo/fastapi/pulls/5629 | 2022-11-13T16:01:56Z | 2022-11-13T16:10:54Z | 2022-11-13T16:10:54Z | 2022-11-13T18:47:50Z | 856 | tiangolo/fastapi | 22,952 |
Refs #27685 -- Logged unexpected Watchman autoreloader errors. | diff --git a/django/utils/autoreload.py b/django/utils/autoreload.py
index 559812d9c3ac9..7ca0d68fae7aa 100644
--- a/django/utils/autoreload.py
+++ b/django/utils/autoreload.py
@@ -509,6 +509,7 @@ def tick(self):
except pywatchman.SocketTimeout:
pass
except pywatchman.WatchmanError as ex:
+ logger.debug('Watchman error: %s, checking server status.', ex)
self.check_server_status(ex)
else:
for sub in list(self.client.subs.keys()):
| https://api.github.com/repos/django/django/pulls/11305 | 2019-04-29T21:15:46Z | 2019-05-15T05:34:20Z | 2019-05-15T05:34:20Z | 2019-05-15T09:36:28Z | 145 | django/django | 51,520 |
|
add 3 template engines + add lang in menu | diff --git a/Server Side Template Injection/README.md b/Server Side Template Injection/README.md
index c911119029..4aeb0ab63e 100644
--- a/Server Side Template Injection/README.md
+++ b/Server Side Template Injection/README.md
@@ -15,7 +15,7 @@
- [Expression Language EL - Basic injection](#expression-language-el---basic-injection)
- [Expression Language EL - One-Liner injections not including code execution](#expression-language-el---one-liner-injections-not-including-code-execution)
- [Expression Language EL - Code Execution](#expression-language-el---code-execution)
- - [Freemarker](#freemarker)
+ - [Java - Freemarker](#freemarker)
- [Freemarker - Basic injection](#freemarker---basic-injection)
- [Freemarker - Read File](#freemarker---read-file)
- [Freemarker - Code execution](#freemarker---code-execution)
@@ -26,7 +26,7 @@
- [Groovy - HTTP request:](#groovy---http-request)
- [Groovy - Command Execution](#groovy---command-execution)
- [Groovy - Sandbox Bypass](#groovy---sandbox-bypass)
- - [Handlebars](#handlebars)
+ - [JavaScript - Handlebars](#handlebars)
- [Handlebars - Command Execution](#handlebars---command-execution)
- [Jade / Codepen](#jade--codepen)
- [Java](#java)
@@ -34,7 +34,7 @@
- [Java - Retrieve the system’s environment variables](#java---retrieve-the-systems-environment-variables)
- [Java - Retrieve /etc/passwd](#java---retrieve-etcpasswd)
- [Django Template](#django-template)
- - [Jinja2](#jinja2)
+ - [Python - Jinja2](#jinja2)
- [Jinja2 - Basic injection](#jinja2---basic-injection)
- [Jinja2 - Template format](#jinja2---template-format)
- [Jinja2 - Debug Statement](#jinja2---debug-statement)
@@ -48,16 +48,16 @@
- [Exploit the SSTI by calling Popen without guessing the offset](#exploit-the-ssti-by-calling-popen-without-guessing-the-offset)
- [Exploit the SSTI by writing an evil config file.](#exploit-the-ssti-by-writing-an-evil-config-file)
- [Jinja2 - Filter bypass](#jinja2---filter-bypass)
- - [Jinjava](#jinjava)
+ - [Java - Jinjava](#jinjava)
- [Jinjava - Basic injection](#jinjava---basic-injection)
- [Jinjava - Command execution](#jinjava---command-execution)
- - [Lessjs](#lessjs)
+ - [JavaScript - Lessjs](#lessjs)
- [Lessjs - SSRF / LFI](#lessjs---ssrf--lfi)
- [Lessjs < v3 - Command Execution](#lessjs--v3---command-execution)
- [Plugins](#plugins)
- - [Mako](#mako)
+ - [Python - Mako](#mako)
- [Direct access to os from TemplateNamespace:](#direct-access-to-os-from-templatenamespace)
- - [Pebble](#pebble)
+ - [Java - Pebble](#pebble)
- [Pebble - Basic injection](#pebble---basic-injection)
- [Pebble - Code execution](#pebble---code-execution)
- [Ruby](#ruby)
@@ -65,13 +65,16 @@
- [Ruby - Retrieve /etc/passwd](#ruby---retrieve-etcpasswd)
- [Ruby - List files and directories](#ruby---list-files-and-directories)
- [Ruby - Code execution](#ruby---code-execution)
- - [Smarty](#smarty)
- - [Twig](#twig)
+ - [PHP - Smarty](#smarty)
+ - [PHP - Twig](#twig)
- [Twig - Basic injection](#twig---basic-injection)
- [Twig - Template format](#twig---template-format)
- [Twig - Arbitrary File Reading](#twig---arbitrary-file-reading)
- [Twig - Code execution](#twig---code-execution)
- - [Velocity](#velocity)
+ - [Java - Velocity](#velocity)
+ - [PHP - patTemplate](#pattemplate)
+ - [PHP - PHPlib](#phplib-and-html_template_phplib)
+ - [PHP - Plates](#plates)
- [References](#references)
## Tools
@@ -945,6 +948,126 @@ $str.valueOf($chr.toChars($out.read()))
---
+## patTemplate
+
+> [patTemplate](https://github.com/wernerwa/pat-template) non-compiling PHP templating engine, that uses XML tags to divide a document into different parts
+
+```xml
+<patTemplate:tmpl name="page">
+ This is the main page.
+ <patTemplate:tmpl name="foo">
+ It contains another template.
+ </patTemplate:tmpl>
+ <patTemplate:tmpl name="hello">
+ Hello {NAME}.<br/>
+ </patTemplate:tmpl>
+</patTemplate:tmpl>
+```
+
+---
+
+## PHPlib and HTML_Template_PHPLIB
+
+[HTML_Template_PHPLIB](https://github.com/pear/HTML_Template_PHPLIB) is the same as PHPlib but ported to Pear.
+
+`authors.tpl`
+
+```html
+<html>
+ <head><title>{PAGE_TITLE}</title></head>
+ <body>
+ <table>
+ <caption>Authors</caption>
+ <thead>
+ <tr><th>Name</th><th>Email</th></tr>
+ </thead>
+ <tfoot>
+ <tr><td colspan="2">{NUM_AUTHORS}</td></tr>
+ </tfoot>
+ <tbody>
+<!-- BEGIN authorline -->
+ <tr><td>{AUTHOR_NAME}</td><td>{AUTHOR_EMAIL}</td></tr>
+<!-- END authorline -->
+ </tbody>
+ </table>
+ </body>
+</html>
+```
+
+`authors.php`
+
+```php
+<?php
+//we want to display this author list
+$authors = array(
+ 'Christian Weiske' => 'cweiske@php.net',
+ 'Bjoern Schotte' => 'schotte@mayflower.de'
+);
+
+require_once 'HTML/Template/PHPLIB.php';
+//create template object
+$t =& new HTML_Template_PHPLIB(dirname(__FILE__), 'keep');
+//load file
+$t->setFile('authors', 'authors.tpl');
+//set block
+$t->setBlock('authors', 'authorline', 'authorline_ref');
+
+//set some variables
+$t->setVar('NUM_AUTHORS', count($authors));
+$t->setVar('PAGE_TITLE', 'Code authors as of ' . date('Y-m-d'));
+
+//display the authors
+foreach ($authors as $name => $email) {
+ $t->setVar('AUTHOR_NAME', $name);
+ $t->setVar('AUTHOR_EMAIL', $email);
+ $t->parse('authorline_ref', 'authorline', true);
+}
+
+//finish and echo
+echo $t->finish($t->parse('OUT', 'authors'));
+?>
+```
+
+---
+
+## Plates
+
+Plates is inspired by Twig but a native PHP template engine instead of a compiled template engine.
+
+controller:
+
+```php
+// Create new Plates instance
+$templates = new League\Plates\Engine('/path/to/templates');
+
+// Render a template
+echo $templates->render('profile', ['name' => 'Jonathan']);
+```
+
+page template:
+
+```php
+<?php $this->layout('template', ['title' => 'User Profile']) ?>
+
+<h1>User Profile</h1>
+<p>Hello, <?=$this->e($name)?></p>
+```
+
+layout template:
+
+```php
+<html>
+ <head>
+ <title><?=$this->e($title)?></title>
+ </head>
+ <body>
+ <?=$this->section('content')?>
+ </body>
+</html>
+```
+
+---
+
## References
* [https://nvisium.com/blog/2016/03/11/exploring-ssti-in-flask-jinja2-part-ii/](https://nvisium.com/blog/2016/03/11/exploring-ssti-in-flask-jinja2-part-ii/)
| https://api.github.com/repos/swisskyrepo/PayloadsAllTheThings/pulls/543 | 2022-09-21T09:29:04Z | 2022-09-21T09:42:33Z | 2022-09-21T09:42:33Z | 2022-09-21T21:18:19Z | 2,014 | swisskyrepo/PayloadsAllTheThings | 8,732 |
|
community: Implement lazy_load() for UnstructuredBaseLoader | diff --git a/libs/community/langchain_community/document_loaders/unstructured.py b/libs/community/langchain_community/document_loaders/unstructured.py
index b7ee7717056eda..2878be257506e5 100644
--- a/libs/community/langchain_community/document_loaders/unstructured.py
+++ b/libs/community/langchain_community/document_loaders/unstructured.py
@@ -1,7 +1,7 @@
"""Loader that uses unstructured to load files."""
import collections
from abc import ABC, abstractmethod
-from typing import IO, Any, Callable, Dict, List, Optional, Sequence, Union
+from typing import IO, Any, Callable, Dict, Iterator, List, Optional, Sequence, Union
from langchain_core.documents import Document
@@ -82,12 +82,11 @@ def _post_process_elements(self, elements: list) -> list:
element.apply(post_processor)
return elements
- def load(self) -> List[Document]:
+ def lazy_load(self) -> Iterator[Document]:
"""Load file."""
elements = self._get_elements()
self._post_process_elements(elements)
if self.mode == "elements":
- docs: List[Document] = list()
for element in elements:
metadata = self._get_metadata()
# NOTE(MthwRobinson) - the attribute check is for backward compatibility
@@ -96,7 +95,7 @@ def load(self) -> List[Document]:
metadata.update(element.metadata.to_dict())
if hasattr(element, "category"):
metadata["category"] = element.category
- docs.append(Document(page_content=str(element), metadata=metadata))
+ yield Document(page_content=str(element), metadata=metadata)
elif self.mode == "paged":
text_dict: Dict[int, str] = {}
meta_dict: Dict[int, Dict] = {}
@@ -118,17 +117,14 @@ def load(self) -> List[Document]:
meta_dict[page_number].update(metadata)
# Convert the dict to a list of Document objects
- docs = [
- Document(page_content=text_dict[key], metadata=meta_dict[key])
- for key in text_dict.keys()
- ]
+ for key in text_dict.keys():
+ yield Document(page_content=text_dict[key], metadata=meta_dict[key])
elif self.mode == "single":
metadata = self._get_metadata()
text = "\n\n".join([str(el) for el in elements])
- docs = [Document(page_content=text, metadata=metadata)]
+ yield Document(page_content=text, metadata=metadata)
else:
raise ValueError(f"mode of {self.mode} not supported.")
- return docs
class UnstructuredFileLoader(UnstructuredBaseLoader):
| Covered by a bunch of subclasses tests: `test_csv_loader.py`, `test_xml.py`, ... | https://api.github.com/repos/langchain-ai/langchain/pulls/18647 | 2024-03-06T10:19:50Z | 2024-03-06T18:13:10Z | 2024-03-06T18:13:10Z | 2024-03-06T21:53:27Z | 578 | langchain-ai/langchain | 43,596 |
Fix api extension duplicating | diff --git a/extensions/api/script.py b/extensions/api/script.py
index 1774c345fc..bd7c19002a 100644
--- a/extensions/api/script.py
+++ b/extensions/api/script.py
@@ -1,8 +1,9 @@
+import json
from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
from threading import Thread
+
from modules import shared
-from modules.text_generation import generate_reply, encode
-import json
+from modules.text_generation import encode, generate_reply
params = {
'port': 5000,
@@ -87,5 +88,5 @@ def run_server():
print(f'Starting KoboldAI compatible api at http://{server_addr[0]}:{server_addr[1]}/api')
server.serve_forever()
-def ui():
+def setup():
Thread(target=run_server, daemon=True).start()
diff --git a/modules/extensions.py b/modules/extensions.py
index dbc938404b..c55dc97882 100644
--- a/modules/extensions.py
+++ b/modules/extensions.py
@@ -7,6 +7,7 @@
state = {}
available_extensions = []
+setup_called = False
def load_extensions():
global state
@@ -39,6 +40,8 @@ def apply_extensions(text, typ):
return text
def create_extensions_block():
+ global setup_called
+
# Updating the default values
for extension, name in iterator():
if hasattr(extension, 'params'):
@@ -47,8 +50,19 @@ def create_extensions_block():
if _id in shared.settings:
extension.params[param] = shared.settings[_id]
+ should_display_ui = False
+
+ # Running setup function
+ if not setup_called:
+ for extension, name in iterator():
+ if hasattr(extension, "setup"):
+ extension.setup()
+ if hasattr(extension, "ui"):
+ should_display_ui = True
+ setup_called = True
+
# Creating the extension ui elements
- if len(state) > 0:
+ if should_display_ui:
with gr.Box(elem_id="extensions"):
gr.Markdown("Extensions")
for extension, name in iterator():
| Prevent server in `api` extension run multiple times, causing "Port already taken" error.
I added new `setup()` method to extensions to run once when app is ready to initialize extensions when app is first loaded.
It also helps against empty "Extensions" block, when no actual ui is present.
| https://api.github.com/repos/oobabooga/text-generation-webui/pulls/433 | 2023-03-19T13:35:52Z | 2023-03-24T19:56:11Z | 2023-03-24T19:56:11Z | 2023-03-28T15:01:20Z | 472 | oobabooga/text-generation-webui | 26,008 |
Tweak diskcache limits | diff --git a/modules/cache.py b/modules/cache.py
index 9df248d7b84..f4e5f702b42 100644
--- a/modules/cache.py
+++ b/modules/cache.py
@@ -20,6 +20,14 @@ def dump_cache():
pass
+def make_cache(subsection: str) -> diskcache.Cache:
+ return diskcache.Cache(
+ os.path.join(cache_dir, subsection),
+ size_limit=2**32, # 4 GB, culling oldest first
+ disk_min_file_size=2**18, # keep up to 256KB in Sqlite
+ )
+
+
def convert_old_cached_data():
try:
with open(cache_filename, "r", encoding="utf8") as file:
@@ -37,7 +45,7 @@ def convert_old_cached_data():
for subsection, keyvalues in data.items():
cache_obj = caches.get(subsection)
if cache_obj is None:
- cache_obj = diskcache.Cache(os.path.join(cache_dir, subsection))
+ cache_obj = make_cache(subsection)
caches[subsection] = cache_obj
for key, value in keyvalues.items():
@@ -64,7 +72,7 @@ def cache(subsection):
cache_obj = caches.get(subsection)
if not cache_obj:
- cache_obj = diskcache.Cache(os.path.join(cache_dir, subsection))
+ cache_obj = make_cache(subsection)
caches[subsection] = cache_obj
return cache_obj
| ## Description
By default, Diskcache has a limit of 1 GB (after which it starts discarding oldest objects (by default)), bump that up to 4 GB. Could make it unlimited with a larger number.
Also, probably okay to keep objects up to 256 KB in the Sqlite database.
Nb: I couldn't test this locally right now, and I just missed adding these as a review comment in #15287, sorry.
## Checklist:
- [x] I have read [contributing wiki page](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing)
- [x] I have performed a self-review of my own code
- [x] My code follows the [style guidelines](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing#code-style)
- [ ] My code passes [tests](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Tests)
| https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/pulls/15299 | 2024-03-17T20:27:48Z | 2024-03-17T20:59:12Z | 2024-03-17T20:59:12Z | 2024-03-17T20:59:15Z | 332 | AUTOMATIC1111/stable-diffusion-webui | 40,515 |
fix return type for `Table.expand()` getter | diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md
index 9b5f9b6c7..cd2eb646e 100644
--- a/CONTRIBUTORS.md
+++ b/CONTRIBUTORS.md
@@ -8,6 +8,7 @@ The following people have contributed to the development of Rich:
- [Oleksis Fraga](https://github.com/oleksis)
- [Finn Hughes](https://github.com/finnhughes)
- [Josh Karpel](https://github.com/JoshKarpel)
+- [Andrew Kettmann](https://github.com/akettmann)
- [Hedy Li](https://github.com/hedythedev)
- [Alexander Mancevice](https://github.com/amancevice)
- [Will McGugan](https://github.com/willmcgugan)
diff --git a/rich/table.py b/rich/table.py
index b6a9c1ae6..05bf3ab62 100644
--- a/rich/table.py
+++ b/rich/table.py
@@ -247,7 +247,7 @@ def grid(
)
@property
- def expand(self) -> int:
+ def expand(self) -> bool:
"""Setting a non-None self.width implies expand."""
return self._expand or self.width is not None
| return type for `Table.expand()` was `int` should be `bool` based on the type annotation for `expand` argument for `Table.__init__`
## Type of changes
- [x] Bug fix
- [ ] New feature
- [x] Documentation / docstrings
- [ ] Tests
- [ ] Other
## Checklist
- [x] I've run the latest [black](https://github.com/psf/black) with default args on new code.
- [x] I've updated CHANGELOG.md and CONTRIBUTORS.md where appropriate.
- [ ] I've added tests for new code.
- [x] I accept that @willmcgugan may be pedantic in the code review.
## Description
Fix type annotation for Table.expand getter.
| https://api.github.com/repos/Textualize/rich/pulls/1586 | 2021-10-14T15:50:37Z | 2021-10-16T09:34:28Z | 2021-10-16T09:34:28Z | 2021-10-16T09:34:28Z | 303 | Textualize/rich | 48,110 |
Bump lupupy to 0.2.1 | diff --git a/homeassistant/components/lupusec/manifest.json b/homeassistant/components/lupusec/manifest.json
index cb526b004de6..5792f186798a 100644
--- a/homeassistant/components/lupusec/manifest.json
+++ b/homeassistant/components/lupusec/manifest.json
@@ -2,7 +2,7 @@
"domain": "lupusec",
"name": "Lupus Electronics LUPUSEC",
"documentation": "https://www.home-assistant.io/integrations/lupusec",
- "requirements": ["lupupy==0.1.9"],
+ "requirements": ["lupupy==0.2.1"],
"codeowners": ["@majuss"],
"iot_class": "local_polling",
"loggers": ["lupupy"]
diff --git a/requirements_all.txt b/requirements_all.txt
index 3d9fb38cb266..c052349f42b7 100644
--- a/requirements_all.txt
+++ b/requirements_all.txt
@@ -1052,7 +1052,7 @@ london-tube-status==0.5
luftdaten==0.7.4
# homeassistant.components.lupusec
-lupupy==0.1.9
+lupupy==0.2.1
# homeassistant.components.lw12wifi
lw12==0.9.2
| <!--
You are amazing! Thanks for contributing to our project!
Please, DO NOT DELETE ANY TEXT from this template! (unless instructed).
-->
<!--
If your PR contains a breaking change for existing users, it is important
to tell them what breaks, how to make it work again and why we did this.
This piece of text is published with the release notes, so it helps if you
write it towards our users, not us.
Note: Remove this section if this PR is NOT a breaking change.
-->
## Proposed change
- lupupy changes: https://github.com/majuss/lupupy/compare/1a115f630de36ec02afc3d7ea59e2ac84bd2bb21...4dbd888581950b119c16a27045dc9c0d4a69455a
- bump lupupy to resolve issue: https://github.com/home-assistant/core/issues/81598
<!--
Describe the big picture of your changes here to communicate to the
maintainers why we should accept this pull request. If it fixes a bug
or resolves a feature request, be sure to link to that issue in the
additional information section.
-->
## Type of change
<!--
What type of change does your PR introduce to Home Assistant?
NOTE: Please, check only 1! box!
If your PR requires multiple boxes to be checked, you'll most likely need to
split it into multiple PRs. This makes things easier and faster to code review.
-->
- [x] Dependency upgrade
- [ ] Bugfix (non-breaking change which fixes an issue)
- [ ] New integration (thank you!)
- [ ] New feature (which adds functionality to an existing integration)
- [ ] Deprecation (breaking change to happen in the future)
- [ ] Breaking change (fix/feature causing existing functionality to break)
- [ ] Code quality improvements to existing code or addition of tests
## Additional information
<!--
Details are important, and help maintainers processing your PR.
Please be sure to fill out additional details, if applicable.
-->
- This PR fixes or closes issue: https://github.com/home-assistant/core/issues/81598
## Checklist
<!--
Put an `x` in the boxes that apply. You can also fill these out after
creating the PR. If you're unsure about any of them, don't hesitate to ask.
We're here to help! This is simply a reminder of what we are going to look
for before merging your code.
-->
- [x] The code change is tested and works locally.
- [ ] Local tests pass. **Your PR cannot be merged unless tests pass**
- [x] There is no commented out code in this PR.
- [x] I have followed the [development checklist][dev-checklist]
- [x] The code has been formatted using Black (`black --fast homeassistant tests`)
- [ ] Tests have been added to verify that the new code works.
If user exposed functionality or configuration variables are added/changed:
- [ ] Documentation added/updated for [www.home-assistant.io][docs-repository]
If the code communicates with devices, web services, or third-party tools:
- [x] The [manifest file][manifest-docs] has all fields filled out correctly.
Updated and included derived files by running: `python3 -m script.hassfest`.
- [ ] New or updated dependencies have been added to `requirements_all.txt`.
Updated by running `python3 -m script.gen_requirements_all`.
- [ ] For the updated dependencies - a link to the changelog, or at minimum a diff between library versions is added to the PR description.
- [ ] Untested files have been added to `.coveragerc`.
<!--
This project is very active and we have a high turnover of pull requests.
Unfortunately, the number of incoming pull requests is higher than what our
reviewers can review and merge so there is a long backlog of pull requests
waiting for review. You can help here!
By reviewing another pull request, you will help raise the code quality of
that pull request and the final review will be faster. This way the general
pace of pull request reviews will go up and your wait time will go down.
When picking a pull request to review, try to choose one that hasn't yet
been reviewed.
Thanks for helping out!
-->
To help with the load of incoming pull requests:
- [ ] I have reviewed two other [open pull requests][prs] in this repository.
[prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-desc+review%3Anone+-status%3Afailure
<!--
Thank you for contributing <3
Below, some useful links you could explore:
-->
[dev-checklist]: https://developers.home-assistant.io/docs/en/development_checklist.html
[manifest-docs]: https://developers.home-assistant.io/docs/en/creating_integration_manifest.html
[quality-scale]: https://developers.home-assistant.io/docs/en/next/integration_quality_scale_index.html
[docs-repository]: https://github.com/home-assistant/home-assistant.io
| https://api.github.com/repos/home-assistant/core/pulls/83071 | 2022-12-01T20:35:51Z | 2022-12-02T16:30:25Z | 2022-12-02T16:30:25Z | 2022-12-19T17:16:55Z | 315 | home-assistant/core | 39,470 |
pass steps_per_print to tput timer | diff --git a/deepspeed/pt/deepspeed_light.py b/deepspeed/pt/deepspeed_light.py
index 1b46e6ac6d29..6c335754bf50 100755
--- a/deepspeed/pt/deepspeed_light.py
+++ b/deepspeed/pt/deepspeed_light.py
@@ -160,6 +160,7 @@ def __init__(self,
self.tput_timer = ThroughputTimer(
batch_size=self.train_micro_batch_size_per_gpu(),
num_workers=self.dp_world_size,
+ steps_per_output=self.steps_per_print(),
monitor_memory=False)
self.training_dataloader = self.deepspeed_io(
| I noticed that our throughput timer was hardcoded to print out stats every 50 steps and not connected to the deepspeed config of steps_per_print. | https://api.github.com/repos/microsoft/DeepSpeed/pulls/299 | 2020-07-23T18:54:37Z | 2020-07-24T02:20:47Z | 2020-07-24T02:20:47Z | 2020-07-24T02:20:54Z | 148 | microsoft/DeepSpeed | 10,321 |
Toyota: use torque control for Japanese-made 2023 RAV4 | diff --git a/selfdrive/car/tests/routes.py b/selfdrive/car/tests/routes.py
index c8701e8f0a5278..69a935fecec036 100755
--- a/selfdrive/car/tests/routes.py
+++ b/selfdrive/car/tests/routes.py
@@ -25,7 +25,6 @@
HONDA.ODYSSEY_CHN,
VOLKSWAGEN.CRAFTER_MK2, # need a route from an ACC-equipped Crafter
TOYOTA.RAV4_TSS2_2023,
- TOYOTA.RAV4H_TSS2_2023,
SUBARU.FORESTER_HYBRID,
]
@@ -184,6 +183,7 @@ class CarTestRoute(NamedTuple):
CarTestRoute("a5c341bb250ca2f0|2022-05-18--16-05-17", TOYOTA.RAV4_TSS2_2022),
CarTestRoute("7e34a988419b5307|2019-12-18--19-13-30", TOYOTA.RAV4H_TSS2),
CarTestRoute("2475fb3eb2ffcc2e|2022-04-29--12-46-23", TOYOTA.RAV4H_TSS2_2022),
+ CarTestRoute("49e041422a032273|2023-09-14--09-21-32", TOYOTA.RAV4H_TSS2_2023),
CarTestRoute("7a31f030957b9c85|2023-04-01--14-12-51", TOYOTA.LEXUS_ES),
CarTestRoute("e6a24be49a6cd46e|2019-10-29--10-52-42", TOYOTA.LEXUS_ES_TSS2),
CarTestRoute("da23c367491f53e2|2021-05-21--09-09-11", TOYOTA.LEXUS_CTH, segment=3),
diff --git a/selfdrive/car/torque_data/override.yaml b/selfdrive/car/torque_data/override.yaml
index fc39cd76cbe5b5..61ed8a5a7736a9 100644
--- a/selfdrive/car/torque_data/override.yaml
+++ b/selfdrive/car/torque_data/override.yaml
@@ -12,10 +12,6 @@ SUBARU FORESTER 2022: [.nan, 3.0, .nan]
SUBARU OUTBACK 7TH GEN: [.nan, 3.0, .nan]
SUBARU ASCENT 2023: [.nan, 3.0, .nan]
-# Toyota LTA also has torque
-TOYOTA RAV4 2023: [.nan, 3.0, .nan]
-TOYOTA RAV4 HYBRID 2023: [.nan, 3.0, .nan]
-
# Tesla has high torque
TESLA AP1 MODEL S: [.nan, 2.5, .nan]
TESLA AP2 MODEL S: [.nan, 2.5, .nan]
@@ -61,6 +57,8 @@ HYUNDAI KONA ELECTRIC 2ND GEN: [2.5, 2.5, 0.1]
HYUNDAI IONIQ 6 2023: [2.5, 2.5, 0.1]
HYUNDAI AZERA 6TH GEN: [1.8, 1.8, 0.1]
KIA K8 HYBRID 1ST GEN: [2.5, 2.5, 0.1]
+TOYOTA RAV4 2023: [2.5, 2.5, 0.1]
+TOYOTA RAV4 HYBRID 2023: [2.5, 2.5, 0.1]
# Dashcam or fallback configured as ideal car
mock: [10.0, 10, 0.0]
diff --git a/selfdrive/car/toyota/interface.py b/selfdrive/car/toyota/interface.py
index d6f428ab1decd6..d51026b547718f 100644
--- a/selfdrive/car/toyota/interface.py
+++ b/selfdrive/car/toyota/interface.py
@@ -27,7 +27,15 @@ def _get_params(ret, candidate, fingerprint, car_fw, experimental_long, docs):
if DBC[candidate]["pt"] == "toyota_new_mc_pt_generated":
ret.safetyConfigs[0].safetyParam |= Panda.FLAG_TOYOTA_ALT_BRAKE
- if candidate in ANGLE_CONTROL_CAR:
+ # Allow angle control cars with whitelisted EPSs to use torque control (made in Japan)
+ # So far only hybrid RAV4 2023 has been seen with this FW version
+ angle_car_torque_fw = any(fw.ecu == "eps" and fw.fwVersion == b'8965B42371\x00\x00\x00\x00\x00\x00' for fw in car_fw)
+ if candidate not in ANGLE_CONTROL_CAR or (angle_car_torque_fw and candidate == CAR.RAV4H_TSS2_2023):
+ CarInterfaceBase.configure_torque_tune(candidate, ret.lateralTuning)
+
+ ret.steerActuatorDelay = 0.12 # Default delay, Prius has larger delay
+ ret.steerLimitTimer = 0.4
+ else:
ret.dashcamOnly = True
ret.steerControlType = SteerControlType.angle
ret.safetyConfigs[0].safetyParam |= Panda.FLAG_TOYOTA_LTA
@@ -35,11 +43,6 @@ def _get_params(ret, candidate, fingerprint, car_fw, experimental_long, docs):
# LTA control can be more delayed and winds up more often
ret.steerActuatorDelay = 0.25
ret.steerLimitTimer = 0.8
- else:
- CarInterfaceBase.configure_torque_tune(candidate, ret.lateralTuning)
-
- ret.steerActuatorDelay = 0.12 # Default delay, Prius has larger delay
- ret.steerLimitTimer = 0.4
ret.stoppingControl = False # Toyota starts braking more when it thinks you want to stop
@@ -121,21 +124,25 @@ def _get_params(ret, candidate, fingerprint, car_fw, experimental_long, docs):
ret.steerRatio = 14.3
ret.tireStiffnessFactor = 0.7933
ret.mass = 3585. * CV.LB_TO_KG # Average between ICE and Hybrid
- ret.lateralTuning.init('pid')
- ret.lateralTuning.pid.kiBP = [0.0]
- ret.lateralTuning.pid.kpBP = [0.0]
- ret.lateralTuning.pid.kpV = [0.6]
- ret.lateralTuning.pid.kiV = [0.1]
- ret.lateralTuning.pid.kf = 0.00007818594
-
- # 2019+ RAV4 TSS2 uses two different steering racks and specific tuning seems to be necessary.
- # See https://github.com/commaai/openpilot/pull/21429#issuecomment-873652891
- for fw in car_fw:
- if fw.ecu == "eps" and (fw.fwVersion.startswith(b'\x02') or fw.fwVersion in [b'8965B42181\x00\x00\x00\x00\x00\x00']):
- ret.lateralTuning.pid.kpV = [0.15]
- ret.lateralTuning.pid.kiV = [0.05]
- ret.lateralTuning.pid.kf = 0.00004
- break
+
+ # Only specific EPS FW accept torque on 2023 RAV4, so they likely are all the same
+ # TODO: revisit this disparity if there is a divide for 2023
+ if candidate not in (CAR.RAV4_TSS2_2023, CAR.RAV4H_TSS2_2023):
+ ret.lateralTuning.init('pid')
+ ret.lateralTuning.pid.kiBP = [0.0]
+ ret.lateralTuning.pid.kpBP = [0.0]
+ ret.lateralTuning.pid.kpV = [0.6]
+ ret.lateralTuning.pid.kiV = [0.1]
+ ret.lateralTuning.pid.kf = 0.00007818594
+
+ # 2019+ RAV4 TSS2 uses two different steering racks and specific tuning seems to be necessary.
+ # See https://github.com/commaai/openpilot/pull/21429#issuecomment-873652891
+ for fw in car_fw:
+ if fw.ecu == "eps" and (fw.fwVersion.startswith(b'\x02') or fw.fwVersion in [b'8965B42181\x00\x00\x00\x00\x00\x00']):
+ ret.lateralTuning.pid.kpV = [0.15]
+ ret.lateralTuning.pid.kiV = [0.05]
+ ret.lateralTuning.pid.kf = 0.00004
+ break
elif candidate in (CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2):
ret.wheelbase = 2.67 # Average between 2.70 for sedan and 2.64 for hatchback
diff --git a/selfdrive/car/toyota/values.py b/selfdrive/car/toyota/values.py
index bb9672fb0e3a4c..fd0a54b349ff54 100644
--- a/selfdrive/car/toyota/values.py
+++ b/selfdrive/car/toyota/values.py
@@ -2389,7 +2389,8 @@ def get_platform_codes(fw_versions: List[bytes]) -> Set[Tuple[bytes, bytes]]:
# these cars have a radar which sends ACC messages instead of the camera
RADAR_ACC_CAR = {CAR.RAV4H_TSS2_2022, CAR.RAV4_TSS2_2022, CAR.RAV4H_TSS2_2023, CAR.RAV4_TSS2_2023, CAR.CHR_TSS2, CAR.CHRH_TSS2}
-# these cars use the Lane Tracing Assist (LTA) message for lateral control
+# these cars manufactured in U.S., Canada have EPSs that reject Lane Keep Assist (LKA, torque) messages and require
+# Lane Tracing Assist (LTA, angle) to steer properly. cars manufactured in Japan still work with the older LKA messages which is detected
ANGLE_CONTROL_CAR = {CAR.RAV4H_TSS2_2023, CAR.RAV4_TSS2_2023}
EV_HYBRID_CAR = {CAR.AVALONH_2019, CAR.AVALONH_TSS2, CAR.CAMRYH, CAR.CAMRYH_TSS2, CAR.CHRH, CAR.CHRH_TSS2, CAR.COROLLAH_TSS2,
| https://api.github.com/repos/commaai/openpilot/pulls/29896 | 2023-09-13T05:17:22Z | 2023-09-14T21:48:13Z | 2023-09-14T21:48:13Z | 2023-09-14T21:48:14Z | 2,504 | commaai/openpilot | 9,764 |
|
Fix typo in efficientformer_v2 | diff --git a/timm/models/efficientformer_v2.py b/timm/models/efficientformer_v2.py
index 9388131ed6..357b258dec 100644
--- a/timm/models/efficientformer_v2.py
+++ b/timm/models/efficientformer_v2.py
@@ -232,7 +232,7 @@ def __init__(
self.attention_biases = nn.Parameter(torch.zeros(num_heads, self.N))
k_pos = torch.stack(torch.meshgrid(torch.arange(
- self.resolution[1]),
+ self.resolution[0]),
torch.arange(self.resolution[1]))).flatten(1)
q_pos = torch.stack(torch.meshgrid(
torch.arange(0, self.resolution[0], step=2),
| Typo fix in efficientformer_v2.py
`torch.arange(self.resolution[1]), torch.arange(self.resolution[1])`
-> `torch.arange(self.resolution[0]), torch.arange(self.resolution[1])` | https://api.github.com/repos/huggingface/pytorch-image-models/pulls/1918 | 2023-08-15T18:39:45Z | 2023-08-16T03:04:36Z | 2023-08-16T03:04:36Z | 2024-03-22T12:32:11Z | 161 | huggingface/pytorch-image-models | 16,310 |
pin pydantic api ref build | diff --git a/docs/api_reference/requirements.txt b/docs/api_reference/requirements.txt
index 568cafa427ddd8..d2a4e1cd7f017f 100644
--- a/docs/api_reference/requirements.txt
+++ b/docs/api_reference/requirements.txt
@@ -1,5 +1,6 @@
-e libs/langchain
-e libs/experimental
+pydantic<2
autodoc_pydantic==1.8.0
myst_parser
nbsphinx==0.8.9
| https://api.github.com/repos/langchain-ai/langchain/pulls/9556 | 2023-08-21T18:36:11Z | 2023-08-21T19:11:49Z | 2023-08-21T19:11:49Z | 2023-08-21T19:11:50Z | 116 | langchain-ai/langchain | 42,907 |
|
Build mypyc wheels for CPython 3.11 | diff --git a/.github/workflows/pypi_upload.yml b/.github/workflows/pypi_upload.yml
index d52f41a493..ae26a814c9 100644
--- a/.github/workflows/pypi_upload.yml
+++ b/.github/workflows/pypi_upload.yml
@@ -58,7 +58,7 @@ jobs:
- uses: actions/checkout@v3
- name: Build wheels via cibuildwheel
- uses: pypa/cibuildwheel@v2.8.1
+ uses: pypa/cibuildwheel@v2.10.0
env:
CIBW_ARCHS_MACOS: "${{ matrix.macos_arch }}"
# This isn't supported in pyproject.toml which makes sense (but is annoying).
diff --git a/CHANGES.md b/CHANGES.md
index 147100c301..0fa80ad812 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -25,6 +25,8 @@
<!-- Changes to how Black is packaged, such as dependency requirements -->
+- Faster compiled wheels are now available for CPython 3.11 (#3276)
+
### Parser
<!-- Changes to the parser or to version autodetection -->
diff --git a/pyproject.toml b/pyproject.toml
index a4c9c69208..122a49e004 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -55,6 +55,9 @@ MYPYC_DEBUG_LEVEL = "0"
# The dependencies required to build wheels with mypyc aren't specified in
# [build-system].requires so we'll have to manage the build environment ourselves.
PIP_NO_BUILD_ISOLATION = "no"
+# CPython 3.11 wheels aren't available for aiohttp and building a Cython extension
+# from source also doesn't work.
+AIOHTTP_NO_EXTENSIONS = "1"
[tool.cibuildwheel.linux]
before-build = [
@@ -69,6 +72,7 @@ MYPYC_DEBUG_LEVEL = "0"
PIP_NO_BUILD_ISOLATION = "no"
# Black needs Clang to compile successfully on Linux.
CC = "clang"
+AIOHTTP_NO_EXTENSIONS = "1"
[tool.cibuildwheel.windows]
# For some reason, (compiled) mypyc is failing to start up with "ImportError: DLL load
diff --git a/src/black/files.py b/src/black/files.py
index d51c1bc7a9..ed503f5fec 100644
--- a/src/black/files.py
+++ b/src/black/files.py
@@ -26,7 +26,8 @@
import tomllib
except ImportError:
# Help users on older alphas
- import tomli as tomllib
+ if not TYPE_CHECKING:
+ import tomli as tomllib
else:
import tomli as tomllib
| ### Description
Bumps cibuildwheel from 2.8.1 to 2.10.0 which has 3.11 building enabled by default. Unfortunately mypyc errors out on 3.11:
```
src/black/files.py:29:9: error: Name "tomllib" already defined (by an import) [no-redef]
```
... so we have to also hide the fallback import of tomli on older 3.11 alphas from mypy[c].
Closes #3224. Towards #3230.
### Checklist - did you ...
- [x] Add a CHANGELOG entry if necessary?
- [x] Add / update tests if necessary?
- [x] Add new / update outdated documentation?
### Review notes
To verify this works, I ran a test cibuildwheel run which passed: https://github.com/ichard26/black-mypyc-wheels/actions/runs/3075208256
I'd appreciate if someone with an ARM mac could check that the ARM mac wheels don't crash. [You can download them here](https://github.com/ichard26/black-mypyc-wheels/suites/8342284497/artifacts/367142266). | https://api.github.com/repos/psf/black/pulls/3276 | 2022-09-18T01:33:34Z | 2022-09-23T03:11:56Z | 2022-09-23T03:11:56Z | 2022-09-23T03:12:05Z | 637 | psf/black | 24,416 |
devcontainer: use VirtualGL on mac hosts | diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
index 0e5e97316d53db..a24c524047c345 100644
--- a/.devcontainer/Dockerfile
+++ b/.devcontainer/Dockerfile
@@ -1,8 +1,13 @@
FROM ghcr.io/commaai/openpilot-base:latest
-RUN apt update && apt install -y vim net-tools usbutils htop ripgrep tmux
+RUN apt update && apt install -y vim net-tools usbutils htop ripgrep tmux wget mesa-utils xvfb libxtst6 libxv1 libglu1-mesa libegl1-mesa
RUN pip install ipython jupyter jupyterlab
+RUN cd /tmp && \
+ ARCH=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) && \
+ curl -L -o virtualgl.deb "https://downloads.sourceforge.net/project/virtualgl/3.1/virtualgl_3.1_$ARCH.deb" && \
+ dpkg -i virtualgl.deb
+
USER batman
RUN cd $HOME && \
diff --git a/.devcontainer/container_post_create.sh b/.devcontainer/container_post_create.sh
index b6202b3cbdef11..dee1c611018bad 100755
--- a/.devcontainer/container_post_create.sh
+++ b/.devcontainer/container_post_create.sh
@@ -3,17 +3,34 @@
TARGET_USER=batman
source .devcontainer/.host/.env
-# override display flag for mac
+# override display flag for mac hosts
if [[ $HOST_OS == darwin ]]; then
echo "Setting up DISPLAY override for macOS..."
cat <<EOF >> /home/$TARGET_USER/.bashrc
-if [ -n "\$DISPLAY" ]; then
- DISPLAY_NUM=\$(echo "\$DISPLAY" | awk -F: '{print \$NF}')
+source .devcontainer/.host/.env
+if [ -n "\$HOST_DISPLAY" ]; then
+ DISPLAY_NUM=\$(echo "\$HOST_DISPLAY" | awk -F: '{print \$NF}')
export DISPLAY=host.docker.internal:\$DISPLAY_NUM
fi
EOF
fi
+# setup virtualgl for mac hosts
+if [[ $HOST_OS == darwin ]]; then
+ echo "Setting up virtualgl for macOS..."
+ cat <<EOF >> /home/$TARGET_USER/.bashrc
+if [ -n "\$HOST_DISPLAY" ]; then
+ export VGL_PORT=10000
+ export VGL_CLIENT=host.docker.internal
+ export VGL_COMPRESS=rgb
+ export VGL_DISPLAY=:99
+ export VGL_FPS=60
+ # prevent vglrun from running exec
+ alias exec=:; source vglrun :; unalias exec
+fi
+EOF
+fi
+
# These lines are temporary, to remain backwards compatible with old devcontainers
# that were running as root and therefore had their caches written as root
sudo chown -R $TARGET_USER: /tmp/scons_cache
diff --git a/.devcontainer/container_post_start.sh b/.devcontainer/container_post_start.sh
index 4404f6a9a99973..1521b9c3fb4c30 100755
--- a/.devcontainer/container_post_start.sh
+++ b/.devcontainer/container_post_start.sh
@@ -1,7 +1,15 @@
#!/usr/bin/env bash
+source .devcontainer/.host/.env
+
# setup safe directories for submodules
SUBMODULE_DIRS=$(git config --file .gitmodules --get-regexp path | awk '{ print $2 }')
for DIR in $SUBMODULE_DIRS; do
git config --global --add safe.directory "$PWD/$DIR"
done
+
+# virtual display for virtualgl
+if [[ "$HOST_OS" == "darwin" ]] && [[ -n "$HOST_DISPLAY" ]]; then
+ echo "Starting virtual display at :99 ..."
+ tmux new-session -d -s fakedisplay Xvfb :99 -screen 0 1920x1080x24
+fi
diff --git a/.devcontainer/host_setup.sh b/.devcontainer/host_setup.sh
index b3119185a464c0..8ff81ebe421b6b 100755
--- a/.devcontainer/host_setup.sh
+++ b/.devcontainer/host_setup.sh
@@ -2,6 +2,7 @@
# pull base image
if [[ -z $USE_LOCAL_IMAGE ]]; then
+ echo "Updating openpilot_base image if needed..."
docker pull ghcr.io/commaai/openpilot-base:latest
fi
@@ -27,3 +28,20 @@ fi
HOST_INFO_FILE=".devcontainer/.host/.env"
SYSTEM=$(uname -s | tr '[:upper:]' '[:lower:]')
echo "HOST_OS=\"$SYSTEM\"" > $HOST_INFO_FILE
+echo "HOST_DISPLAY=\"$DISPLAY\"" >> $HOST_INFO_FILE
+
+# run virtualgl if macos
+if [[ $SYSTEM == "darwin" ]]; then
+ echo
+ if [[ -f /opt/VirtualGL/bin/vglclient ]]; then
+ echo "Starting VirtualGL client at port 10000..."
+ VGL_LOG_FILE=".devcontainer/.host/.vgl/vglclient.log"
+ mkdir -p "$(dirname $VGL_LOG_FILE)"
+ /opt/VirtualGL/bin/vglclient -l "$VGL_LOG_FILE" -display "$DISPLAY" -port 10000 -detach
+ else
+ echo "VirtualGL not found. GUI tools may not work properly. Some GUI tools require OpenGL to work properly. To use them with XQuartz on mac, VirtualGL needs to be installed. To install it run:"
+ echo
+ echo " brew install --cask virtualgl"
+ echo
+ fi
+fi
| Use VirtualGL to force direct rendering of OpenGL (as opposed to indirect rendering which happens by default while forwarding X with XQuartz). This should cameraview in selfdrive/ui/ui and tools like plotjuggler/cabana. | https://api.github.com/repos/commaai/openpilot/pulls/30090 | 2023-09-28T23:20:33Z | 2023-09-30T00:45:07Z | 2023-09-30T00:45:07Z | 2023-09-30T00:45:08Z | 1,285 | commaai/openpilot | 9,481 |
Added List Database Administrator Accounts | diff --git a/SQL Injection/PostgreSQL Injection.md b/SQL Injection/PostgreSQL Injection.md
index d923c5a1ea..2bf952cede 100644
--- a/SQL Injection/PostgreSQL Injection.md
+++ b/SQL Injection/PostgreSQL Injection.md
@@ -7,6 +7,7 @@
* [PostgreSQL Current User](#postgresql-current-user)
* [PostgreSQL List Users](#postgresql-list-users)
* [PostgreSQL List Password Hashes](#postgresql-list-password-hashes)
+* [PostgreSQL List Database Administrator Accounts](#postgresql-list-database-administrator-accounts)
* [PostgreSQL List Privileges](#postgresql-list-privileges)
* [PostgreSQL database name](#postgresql-database-name)
* [PostgreSQL List databases](#postgresql-list-database)
@@ -57,7 +58,10 @@ SELECT usename FROM pg_user
```sql
SELECT usename, passwd FROM pg_shadow
```
-
+## PostgreSQL List Database Administrator Accounts
+```sql
+SELECT usename FROM pg_user WHERE usesuper IS TRUE
+```
## PostgreSQL List Privileges
```sql
| ## Added List Database Administrator Accounts
```sql
SELECT usename FROM pg_user WHERE usesuper IS TRUE
``` | https://api.github.com/repos/swisskyrepo/PayloadsAllTheThings/pulls/115 | 2019-10-29T05:03:16Z | 2019-10-29T07:36:02Z | 2019-10-29T07:36:01Z | 2019-10-29T07:36:02Z | 241 | swisskyrepo/PayloadsAllTheThings | 8,338 |
Adds citation to Baird94 | diff --git a/gym/scoreboard/__init__.py b/gym/scoreboard/__init__.py
index 93873b16244..bee54305719 100644
--- a/gym/scoreboard/__init__.py
+++ b/gym/scoreboard/__init__.py
@@ -1223,9 +1223,12 @@
Prior work has explored learning algorithms for human training scenarios of this flavor [Lopes11]_.
+Additionally, Baird and others have noted the relationship between update noise, timestep size, and convergence rate for Q-learners [Baird94]_.
+
Robustness to noisy rewards may aid scalable oversight in settings where evaluating
the true reward signal is expensive or impossible but a noisy approximation is available [Amodei16]_, [Christiano15]_.
+.. [Baird94] Baird, Leemon C. "Reinforcement learning in continuous time: Advantage updating." Neural Networks, 1994. IEEE World Congress on Computational Intelligence., 1994 IEEE International Conference on. Vol. 4. IEEE, 1994.
.. [Amodei16] Amodei, Olah, et al. `"Concrete Problems in AI safety" Arxiv. 2016. <https://arxiv.org/pdf/1606.06565v1.pdf>`_
.. [Lopes11] Lopes, Manuel, Thomas Cederbourg, and Pierre-Yves Oudeyer. "Simultaneous acquisition of task and feedback models." Development and Learning (ICDL), 2011 IEEE International Conference on. Vol. 2. IEEE, 2011.
.. [Christiano15] `AI Control <https://medium.com/ai-control/>`_
| https://api.github.com/repos/openai/gym/pulls/256 | 2016-07-20T05:39:31Z | 2016-07-21T01:02:53Z | 2016-07-21T01:02:53Z | 2016-07-21T01:02:53Z | 368 | openai/gym | 5,663 |
|
Bump eslint from 8.40.0 to 8.41.0 in /website | diff --git a/website/package-lock.json b/website/package-lock.json
index a13e31f19b..ab41279edb 100644
--- a/website/package-lock.json
+++ b/website/package-lock.json
@@ -33,7 +33,7 @@
"chart.js": "^4.3.0",
"clsx": "^1.2.1",
"date-fns": "^2.30.0",
- "eslint": "^8.40.0",
+ "eslint": "^8.41.0",
"eslint-config-next": "^13.4.2",
"eslint-plugin-simple-import-sort": "^10.0.0",
"focus-visible": "^5.2.0",
@@ -4107,9 +4107,9 @@
}
},
"node_modules/@eslint/js": {
- "version": "8.40.0",
- "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.40.0.tgz",
- "integrity": "sha512-ElyB54bJIhXQYVKjDSvCkPO1iU1tSAeVQJbllWJq1XQSmmA4dgFk8CbiBGpiOPxleE48vDogxCtmMYku4HSVLA==",
+ "version": "8.41.0",
+ "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.41.0.tgz",
+ "integrity": "sha512-LxcyMGxwmTh2lY9FwHPGWOHmYFCZvbrFCBZL4FzSSsxsRPuhrYUg/49/0KDfW8tnIEaEHtfmn6+NPN+1DqaNmA==",
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
}
@@ -17117,14 +17117,14 @@
}
},
"node_modules/eslint": {
- "version": "8.40.0",
- "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.40.0.tgz",
- "integrity": "sha512-bvR+TsP9EHL3TqNtj9sCNJVAFK3fBN8Q7g5waghxyRsPLIMwL73XSKnZFK0hk/O2ANC+iAoq6PWMQ+IfBAJIiQ==",
+ "version": "8.41.0",
+ "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.41.0.tgz",
+ "integrity": "sha512-WQDQpzGBOP5IrXPo4Hc0814r4/v2rrIsB0rhT7jtunIalgg6gYXWhRMOejVO8yH21T/FGaxjmFjBMNqcIlmH1Q==",
"dependencies": {
"@eslint-community/eslint-utils": "^4.2.0",
"@eslint-community/regexpp": "^4.4.0",
"@eslint/eslintrc": "^2.0.3",
- "@eslint/js": "8.40.0",
+ "@eslint/js": "8.41.0",
"@humanwhocodes/config-array": "^0.11.8",
"@humanwhocodes/module-importer": "^1.0.1",
"@nodelib/fs.walk": "^1.2.8",
@@ -17144,13 +17144,12 @@
"find-up": "^5.0.0",
"glob-parent": "^6.0.2",
"globals": "^13.19.0",
- "grapheme-splitter": "^1.0.4",
+ "graphemer": "^1.4.0",
"ignore": "^5.2.0",
"import-fresh": "^3.0.0",
"imurmurhash": "^0.1.4",
"is-glob": "^4.0.0",
"is-path-inside": "^3.0.3",
- "js-sdsl": "^4.1.4",
"js-yaml": "^4.1.0",
"json-stable-stringify-without-jsonify": "^1.0.1",
"levn": "^0.4.1",
@@ -19102,7 +19101,13 @@
"node_modules/grapheme-splitter": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/grapheme-splitter/-/grapheme-splitter-1.0.4.tgz",
- "integrity": "sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ=="
+ "integrity": "sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==",
+ "dev": true
+ },
+ "node_modules/graphemer": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz",
+ "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag=="
},
"node_modules/graphql": {
"version": "16.6.0",
@@ -22831,15 +22836,6 @@
"node": ">=0.10.0"
}
},
- "node_modules/js-sdsl": {
- "version": "4.4.0",
- "resolved": "https://registry.npmjs.org/js-sdsl/-/js-sdsl-4.4.0.tgz",
- "integrity": "sha512-FfVSdx6pJ41Oa+CF7RDaFmTnCaFhua+SNYQX74riGOpl96x+2jQCqEfQ2bnXu/5DPCqlRuiqyvTJM0Qjz26IVg==",
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/js-sdsl"
- }
- },
"node_modules/js-tokens": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
diff --git a/website/package.json b/website/package.json
index e580a5112e..5bb4327f4c 100644
--- a/website/package.json
+++ b/website/package.json
@@ -51,7 +51,7 @@
"chart.js": "^4.3.0",
"clsx": "^1.2.1",
"date-fns": "^2.30.0",
- "eslint": "^8.40.0",
+ "eslint": "^8.41.0",
"eslint-config-next": "^13.4.2",
"eslint-plugin-simple-import-sort": "^10.0.0",
"focus-visible": "^5.2.0",
| Bumps [eslint](https://github.com/eslint/eslint) from 8.40.0 to 8.41.0.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/eslint/eslint/releases">eslint's releases</a>.</em></p>
<blockquote>
<h2>v8.41.0</h2>
<h2>Features</h2>
<ul>
<li><a href="https://github.com/eslint/eslint/commit/880a4317b949e575a4a6c5e8baaba1eea7674cc6"><code>880a431</code></a> feat: change default ignore pattern to <code>**/node_modules/</code> in flat config (<a href="https://redirect.github.com/eslint/eslint/issues/17184">#17184</a>) (Milos Djermanovic)</li>
<li><a href="https://github.com/eslint/eslint/commit/8bf550594fca6d29fab1a3453e701c1a457767e1"><code>8bf5505</code></a> feat: expose <code>shouldUseFlatConfig</code> (<a href="https://redirect.github.com/eslint/eslint/issues/17169">#17169</a>) (Connor Prussin)</li>
</ul>
<h2>Bug Fixes</h2>
<ul>
<li><a href="https://github.com/eslint/eslint/commit/4f5440db631707b17140c4e5cc7beb223afbd2b9"><code>4f5440d</code></a> fix: incorrect warning message for ignored dotfiles (<a href="https://redirect.github.com/eslint/eslint/issues/17196">#17196</a>) (Milos Djermanovic)</li>
<li><a href="https://github.com/eslint/eslint/commit/94da96cbf0fb2bb6694fa2e757eb1b3e74c40db7"><code>94da96c</code></a> fix: unify <code>LintMessage</code> type (<a href="https://redirect.github.com/eslint/eslint/issues/17076">#17076</a>) (Brandon Mills)</li>
<li><a href="https://github.com/eslint/eslint/commit/0c415cda5d76dbe5120ab9f3c4c81320538e35f0"><code>0c415cd</code></a> fix: validate <code>ignorePatterns</code> constructor option in <code>FlatESLint</code> class (<a href="https://redirect.github.com/eslint/eslint/issues/17139">#17139</a>) (Milos Djermanovic)</li>
<li><a href="https://github.com/eslint/eslint/commit/9682d669e4ee8641293914e21679f40fee8bc354"><code>9682d66</code></a> fix: switch <code>grapheme-splitter</code> to <code>graphemer</code> (<a href="https://redirect.github.com/eslint/eslint/issues/17160">#17160</a>) (fisker Cheung)</li>
</ul>
<h2>Documentation</h2>
<ul>
<li><a href="https://github.com/eslint/eslint/commit/7709b14e18ad4e11c1119ed6575454243b8e7084"><code>7709b14</code></a> docs: Update README (GitHub Actions Bot)</li>
<li><a href="https://github.com/eslint/eslint/commit/7f183e020579380fa57473caaf9ed154470c25b3"><code>7f183e0</code></a> docs: Update triage process description (<a href="https://redirect.github.com/eslint/eslint/issues/17157">#17157</a>) (Nicholas C. Zakas)</li>
<li><a href="https://github.com/eslint/eslint/commit/b68346b290d55324e73868ca42b3854157b27375"><code>b68346b</code></a> docs: fix license to reflect relicensing of jshint (<a href="https://redirect.github.com/eslint/eslint/issues/17165">#17165</a>) (Stefan Bischof)</li>
</ul>
<h2>Chores</h2>
<ul>
<li><a href="https://github.com/eslint/eslint/commit/f43216a8c77ab6cf1d0823978e8c728786b4cba7"><code>f43216a</code></a> chore: upgrade <code>@eslint/js</code><a href="https://github.com/8"><code>@8</code></a>.41.0 (<a href="https://redirect.github.com/eslint/eslint/issues/17200">#17200</a>) (Milos Djermanovic)</li>
<li><a href="https://github.com/eslint/eslint/commit/95c300780a1cfd9ad680bc78850542eb55d7fbf4"><code>95c3007</code></a> chore: package.json update for <code>@eslint/js</code> release (ESLint Jenkins)</li>
<li><a href="https://github.com/eslint/eslint/commit/ddc5291debd90ff476e17c532af7577e26720b91"><code>ddc5291</code></a> chore: don't use deprecated <code>context</code> methods in <code>ast-utils</code> tests (<a href="https://redirect.github.com/eslint/eslint/issues/17194">#17194</a>) (Milos Djermanovic)</li>
<li><a href="https://github.com/eslint/eslint/commit/b1516db51514032ed06e1425c4b1f955238dc682"><code>b1516db</code></a> chore: Fix return type of <code>findFlatConfigFile</code> (<a href="https://redirect.github.com/eslint/eslint/issues/17161">#17161</a>) (Milos Djermanovic)</li>
<li><a href="https://github.com/eslint/eslint/commit/918b0fd21723e84bd7acb17942a36606f1d8360a"><code>918b0fd</code></a> perf: Store indent descriptors in a plain array (<a href="https://redirect.github.com/eslint/eslint/issues/17148">#17148</a>) (Francesco Trotta)</li>
<li><a href="https://github.com/eslint/eslint/commit/4caa34449555d8a680222ec2049d97c59476c11e"><code>4caa344</code></a> refactor: locateConfigFileToUse returns an Error object (<a href="https://redirect.github.com/eslint/eslint/issues/17159">#17159</a>) (唯然)</li>
</ul>
</blockquote>
</details>
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a href="https://github.com/eslint/eslint/blob/main/CHANGELOG.md">eslint's changelog</a>.</em></p>
<blockquote>
<p>v8.41.0 - May 19, 2023</p>
<ul>
<li><a href="https://github.com/eslint/eslint/commit/f43216a8c77ab6cf1d0823978e8c728786b4cba7"><code>f43216a</code></a> chore: upgrade <code>@eslint/js</code><a href="https://github.com/8"><code>@8</code></a>.41.0 (<a href="https://redirect.github.com/eslint/eslint/issues/17200">#17200</a>) (Milos Djermanovic)</li>
<li><a href="https://github.com/eslint/eslint/commit/95c300780a1cfd9ad680bc78850542eb55d7fbf4"><code>95c3007</code></a> chore: package.json update for <code>@eslint/js</code> release (ESLint Jenkins)</li>
<li><a href="https://github.com/eslint/eslint/commit/4f5440db631707b17140c4e5cc7beb223afbd2b9"><code>4f5440d</code></a> fix: incorrect warning message for ignored dotfiles (<a href="https://redirect.github.com/eslint/eslint/issues/17196">#17196</a>) (Milos Djermanovic)</li>
<li><a href="https://github.com/eslint/eslint/commit/ddc5291debd90ff476e17c532af7577e26720b91"><code>ddc5291</code></a> chore: don't use deprecated <code>context</code> methods in <code>ast-utils</code> tests (<a href="https://redirect.github.com/eslint/eslint/issues/17194">#17194</a>) (Milos Djermanovic)</li>
<li><a href="https://github.com/eslint/eslint/commit/880a4317b949e575a4a6c5e8baaba1eea7674cc6"><code>880a431</code></a> feat: change default ignore pattern to <code>**/node_modules/</code> in flat config (<a href="https://redirect.github.com/eslint/eslint/issues/17184">#17184</a>) (Milos Djermanovic)</li>
<li><a href="https://github.com/eslint/eslint/commit/94da96cbf0fb2bb6694fa2e757eb1b3e74c40db7"><code>94da96c</code></a> fix: unify <code>LintMessage</code> type (<a href="https://redirect.github.com/eslint/eslint/issues/17076">#17076</a>) (Brandon Mills)</li>
<li><a href="https://github.com/eslint/eslint/commit/7709b14e18ad4e11c1119ed6575454243b8e7084"><code>7709b14</code></a> docs: Update README (GitHub Actions Bot)</li>
<li><a href="https://github.com/eslint/eslint/commit/8bf550594fca6d29fab1a3453e701c1a457767e1"><code>8bf5505</code></a> feat: expose <code>shouldUseFlatConfig</code> (<a href="https://redirect.github.com/eslint/eslint/issues/17169">#17169</a>) (Connor Prussin)</li>
<li><a href="https://github.com/eslint/eslint/commit/7f183e020579380fa57473caaf9ed154470c25b3"><code>7f183e0</code></a> docs: Update triage process description (<a href="https://redirect.github.com/eslint/eslint/issues/17157">#17157</a>) (Nicholas C. Zakas)</li>
<li><a href="https://github.com/eslint/eslint/commit/0c415cda5d76dbe5120ab9f3c4c81320538e35f0"><code>0c415cd</code></a> fix: validate <code>ignorePatterns</code> constructor option in <code>FlatESLint</code> class (<a href="https://redirect.github.com/eslint/eslint/issues/17139">#17139</a>) (Milos Djermanovic)</li>
<li><a href="https://github.com/eslint/eslint/commit/b1516db51514032ed06e1425c4b1f955238dc682"><code>b1516db</code></a> chore: Fix return type of <code>findFlatConfigFile</code> (<a href="https://redirect.github.com/eslint/eslint/issues/17161">#17161</a>) (Milos Djermanovic)</li>
<li><a href="https://github.com/eslint/eslint/commit/b68346b290d55324e73868ca42b3854157b27375"><code>b68346b</code></a> docs: fix license to reflect relicensing of jshint (<a href="https://redirect.github.com/eslint/eslint/issues/17165">#17165</a>) (Stefan Bischof)</li>
<li><a href="https://github.com/eslint/eslint/commit/9682d669e4ee8641293914e21679f40fee8bc354"><code>9682d66</code></a> fix: switch <code>grapheme-splitter</code> to <code>graphemer</code> (<a href="https://redirect.github.com/eslint/eslint/issues/17160">#17160</a>) (fisker Cheung)</li>
<li><a href="https://github.com/eslint/eslint/commit/918b0fd21723e84bd7acb17942a36606f1d8360a"><code>918b0fd</code></a> perf: Store indent descriptors in a plain array (<a href="https://redirect.github.com/eslint/eslint/issues/17148">#17148</a>) (Francesco Trotta)</li>
<li><a href="https://github.com/eslint/eslint/commit/4caa34449555d8a680222ec2049d97c59476c11e"><code>4caa344</code></a> refactor: locateConfigFileToUse returns an Error object (<a href="https://redirect.github.com/eslint/eslint/issues/17159">#17159</a>) (唯然)</li>
</ul>
</blockquote>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="https://github.com/eslint/eslint/commit/7a2a0bed1bcded9461e8432aa09d754431d8725b"><code>7a2a0be</code></a> 8.41.0</li>
<li><a href="https://github.com/eslint/eslint/commit/acd615cd1b4604115070e855ed5ee62322b8edb0"><code>acd615c</code></a> Build: changelog update for 8.41.0</li>
<li><a href="https://github.com/eslint/eslint/commit/f43216a8c77ab6cf1d0823978e8c728786b4cba7"><code>f43216a</code></a> chore: upgrade <code>@eslint/js</code><a href="https://github.com/8"><code>@8</code></a>.41.0 (<a href="https://redirect.github.com/eslint/eslint/issues/17200">#17200</a>)</li>
<li><a href="https://github.com/eslint/eslint/commit/95c300780a1cfd9ad680bc78850542eb55d7fbf4"><code>95c3007</code></a> chore: package.json update for <code>@eslint/js</code> release</li>
<li><a href="https://github.com/eslint/eslint/commit/4f5440db631707b17140c4e5cc7beb223afbd2b9"><code>4f5440d</code></a> fix: incorrect warning message for ignored dotfiles (<a href="https://redirect.github.com/eslint/eslint/issues/17196">#17196</a>)</li>
<li><a href="https://github.com/eslint/eslint/commit/ddc5291debd90ff476e17c532af7577e26720b91"><code>ddc5291</code></a> chore: don't use deprecated <code>context</code> methods in <code>ast-utils</code> tests (<a href="https://redirect.github.com/eslint/eslint/issues/17194">#17194</a>)</li>
<li><a href="https://github.com/eslint/eslint/commit/880a4317b949e575a4a6c5e8baaba1eea7674cc6"><code>880a431</code></a> feat: change default ignore pattern to <code>**/node_modules/</code> in flat config (<a href="https://redirect.github.com/eslint/eslint/issues/17">#17</a>...</li>
<li><a href="https://github.com/eslint/eslint/commit/94da96cbf0fb2bb6694fa2e757eb1b3e74c40db7"><code>94da96c</code></a> fix: unify <code>LintMessage</code> type (<a href="https://redirect.github.com/eslint/eslint/issues/17076">#17076</a>)</li>
<li><a href="https://github.com/eslint/eslint/commit/7709b14e18ad4e11c1119ed6575454243b8e7084"><code>7709b14</code></a> docs: Update README</li>
<li><a href="https://github.com/eslint/eslint/commit/8bf550594fca6d29fab1a3453e701c1a457767e1"><code>8bf5505</code></a> feat: expose <code>shouldUseFlatConfig</code> (<a href="https://redirect.github.com/eslint/eslint/issues/17169">#17169</a>)</li>
<li>Additional commits viewable in <a href="https://github.com/eslint/eslint/compare/v8.40.0...v8.41.0">compare view</a></li>
</ul>
</details>
<br />
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=eslint&package-manager=npm_and_yarn&previous-version=8.40.0&new-version=8.41.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
</details> | https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/3257 | 2023-05-29T19:07:17Z | 2023-05-29T20:03:23Z | 2023-05-29T20:03:23Z | 2023-05-29T20:03:24Z | 1,769 | LAION-AI/Open-Assistant | 37,137 |
feat(integrations): Block published SentryApps from dropdown | diff --git a/src/sentry/incidents/endpoints/organization_alert_rule_available_action_index.py b/src/sentry/incidents/endpoints/organization_alert_rule_available_action_index.py
index 55abfdbee493d..732b2c39181d6 100644
--- a/src/sentry/incidents/endpoints/organization_alert_rule_available_action_index.py
+++ b/src/sentry/incidents/endpoints/organization_alert_rule_available_action_index.py
@@ -89,7 +89,9 @@ def get(self, request, organization):
):
actions += [
build_action_response(registered_type, sentry_app=app)
- for app in get_alertable_sentry_apps(organization.id)
+ for app in get_alertable_sentry_apps(
+ organization.id, with_metric_alerts=True
+ )
]
else:
diff --git a/src/sentry/incidents/logic.py b/src/sentry/incidents/logic.py
index e502cfecad4db..d4e03d244b26b 100644
--- a/src/sentry/incidents/logic.py
+++ b/src/sentry/incidents/logic.py
@@ -11,7 +11,7 @@
from sentry import analytics
from sentry.api.event_search import get_filter, resolve_field
-from sentry.constants import SentryAppInstallationStatus
+from sentry.constants import SentryAppInstallationStatus, SentryAppStatus
from sentry.incidents import tasks
from sentry.incidents.models import (
AlertRule,
@@ -1206,12 +1206,16 @@ def get_available_action_integrations_for_org(organization):
return Integration.objects.filter(organizations=organization, provider__in=providers)
-def get_alertable_sentry_apps(organization_id):
- return SentryApp.objects.filter(
+def get_alertable_sentry_apps(organization_id, with_metric_alerts=False):
+ query = SentryApp.objects.filter(
installations__organization_id=organization_id,
is_alertable=True,
installations__status=SentryAppInstallationStatus.INSTALLED,
- ).distinct()
+ )
+
+ if with_metric_alerts:
+ query = query.exclude(status=SentryAppStatus.PUBLISHED)
+ return query.distinct()
def get_pagerduty_services(organization, integration_id):
diff --git a/tests/sentry/incidents/endpoints/test_organization_alert_rule_available_action_index.py b/tests/sentry/incidents/endpoints/test_organization_alert_rule_available_action_index.py
index 68de74f8e47e7..2569a5cab184b 100644
--- a/tests/sentry/incidents/endpoints/test_organization_alert_rule_available_action_index.py
+++ b/tests/sentry/incidents/endpoints/test_organization_alert_rule_available_action_index.py
@@ -29,6 +29,16 @@ def setUp(self):
super(OrganizationAlertRuleAvailableActionIndexEndpointTest, self).setUp()
self.login_as(self.user)
+ def install_new_sentry_app(self, name, **kwargs):
+ kwargs.update(
+ name=name, organization=self.organization, is_alertable=True, verify_install=False
+ )
+ sentry_app = self.create_sentry_app(**kwargs)
+ self.create_sentry_app_installation(
+ slug=sentry_app.slug, organization=self.organization, user=self.user
+ )
+ return sentry_app
+
def test_build_action_response_email(self):
data = build_action_response(self.email)
@@ -127,12 +137,7 @@ def test_no_feature(self):
assert resp.status_code == 404
def test_sentry_apps(self):
- sentry_app = self.create_sentry_app(
- name="foo", organization=self.organization, is_alertable=True, verify_install=False
- )
- self.create_sentry_app_installation(
- slug=sentry_app.slug, organization=self.organization, user=self.user
- )
+ sentry_app = self.install_new_sentry_app("foo")
with self.feature(
["organizations:incidents", "organizations:integrations-sentry-app-metric-alerts"]
@@ -146,3 +151,21 @@ def test_sentry_apps(self):
),
build_action_response(self.email),
]
+
+ def test_blocked_sentry_apps(self):
+ internal_sentry_app = self.install_new_sentry_app("internal")
+ # Should not show up in available actions.
+ self.install_new_sentry_app("published", published=True)
+
+ with self.feature(
+ ["organizations:incidents", "organizations:integrations-sentry-app-metric-alerts"]
+ ):
+ resp = self.get_valid_response(self.organization.slug)
+
+ assert resp.data == [
+ build_action_response(
+ AlertRuleTriggerAction.get_registered_type(AlertRuleTriggerAction.Type.SENTRY_APP),
+ sentry_app=internal_sentry_app,
+ ),
+ build_action_response(self.email),
+ ]
| When we turn on Metric Alert Integrations, we don't want to surprise anyone with new webhook types. To prevent this, temporarily block public SentryApps from showing up in the dropdown. Rather than blocking the one app by DB ID, we're excluding it by `status`. Once we have an established metric alerts webhook format, then we can unblock them. | https://api.github.com/repos/getsentry/sentry/pulls/20506 | 2020-08-31T18:54:44Z | 2020-09-01T19:54:56Z | 2020-09-01T19:54:56Z | 2024-03-05T19:37:13Z | 1,050 | getsentry/sentry | 44,096 |
coinmetro header update | diff --git a/ts/src/coinmetro.ts b/ts/src/coinmetro.ts
index dbd4f14b6630..ed83f4454329 100644
--- a/ts/src/coinmetro.ts
+++ b/ts/src/coinmetro.ts
@@ -1877,11 +1877,11 @@ export default class coinmetro extends Exchange {
const endpoint = '/' + this.implodeParams (path, params);
let url = this.urls['api'][api] + endpoint;
const query = this.urlencode (request);
+ if (headers === undefined) {
+ headers = {};
+ }
+ headers['CCXT'] = 'true';
if (api === 'private') {
- if (headers === undefined) {
- headers = {};
- }
- headers['CCXT'] = true;
if ((this.uid === undefined) && (this.apiKey !== undefined)) {
this.uid = this.apiKey;
}
| https://api.github.com/repos/ccxt/ccxt/pulls/21023 | 2024-01-30T20:51:35Z | 2024-01-30T22:13:49Z | 2024-01-30T22:13:49Z | 2024-03-11T16:34:17Z | 207 | ccxt/ccxt | 13,695 |
|
Update merge-main-into-prs.yml | diff --git a/.github/workflows/merge-main-into-prs.yml b/.github/workflows/merge-main-into-prs.yml
index 06757d3f194..9ed945c7897 100644
--- a/.github/workflows/merge-main-into-prs.yml
+++ b/.github/workflows/merge-main-into-prs.yml
@@ -45,12 +45,10 @@ jobs:
for pr in open_pulls:
# Compare PR head with main to see if it's behind
try:
- comparison = repo.compare(pr.base.ref, pr.head.ref) # Ensure correct order of base and head
- if comparison.behind_by > 0:
- # Merge main into the PR branch
- success = pr.update_branch()
- assert success, "Branch update failed"
- print(f"Merged 'master' into PR #{pr.number} ({pr.head.ref}) successfully.")
+ # Merge main into the PR branch
+ success = pr.update_branch()
+ assert success, "Branch update failed"
+ print(f"Merged 'master' into PR #{pr.number} ({pr.head.ref}) successfully.")
except Exception as e:
print(f"Could not merge 'master' into PR #{pr.number} ({pr.head.ref}): {e}")
env:
| <!--
Thank you 🙏 for your contribution to [Ultralytics](https://ultralytics.com) 🚀! Your effort in enhancing our repositories is greatly appreciated. To streamline the process and assist us in integrating your Pull Request (PR) effectively, please follow these steps:
1. **Check for Existing Contributions**: Before submitting, kindly explore existing PRs to ensure your contribution is unique and complementary.
2. **Link Related Issues**: If your PR addresses an open issue, please link it in your submission. This helps us better understand the context and impact of your contribution.
3. **Elaborate Your Changes**: Clearly articulate the purpose of your PR. Whether it's a bug fix or a new feature, a detailed description aids in a smoother integration process.
4. **Ultralytics Contributor License Agreement (CLA)**: To uphold the quality and integrity of our project, we require all contributors to sign the CLA. Please confirm your agreement by commenting below:
_I have read the CLA Document and I sign the CLA_
For more detailed guidance and best practices on contributing, refer to our ✅ [Contributing Guide](https://docs.ultralytics.com/help/contributing). Your adherence to these guidelines ensures a faster and more effective review process.
--->
## 🛠️ PR Summary
<sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub>
### 🌟 Summary
Streamlining GitHub Actions to Automatically Update PR Branches 🚀
### 📊 Key Changes
- **Simplified the process** for merging the main branch into PR branches directly, without checking if the PR is behind the main branch first.
- **Removed comparison checks** between PR head and main, which previously determined if a PR was lagging behind the main branch.
### 🎯 Purpose & Impact
- **Smoother Workflow:** By always attempting to merge the main branch into open PRs, this change ensures that PRs are consistently up-to-date, making the integration process smoother.
- **Increased Efficiency:** Removes the need to compare branches, potentially speeding up the workflow execution time.
- **Potential Risk:** There's a slight risk of merge conflicts or issues in PR branches if they're automatically updated without prior checks, but this approach keeps all contributions aligned with the latest main branch changes.
Overall, this update aims to make developers' lives easier by automating housekeeping tasks, though users should be aware and manage potential merge conflicts proactively. | https://api.github.com/repos/ultralytics/yolov5/pulls/12922 | 2024-04-14T14:36:21Z | 2024-04-14T14:36:45Z | 2024-04-14T14:36:45Z | 2024-04-14T14:36:57Z | 285 | ultralytics/yolov5 | 25,455 |
typos | diff --git a/README.md b/README.md
index da4806adf7..0a63da9592 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,9 @@
-We got a takedown request by openai's legal team...
+We got a takedown request by openAI's legal team...
-discord sever for updates / support:
+discord server for updates / support:
- https://discord.gg/gpt4free
-here is a lil poem you can read in the meantime, while I am investigating it:
+here is a lil' poem you can read in the meantime, while I am investigating it:
```
There once was a time, in a land full of code,
| https://api.github.com/repos/xtekky/gpt4free/pulls/266 | 2023-04-28T22:37:22Z | 2023-04-28T22:48:04Z | 2023-04-28T22:48:04Z | 2023-04-28T22:48:04Z | 163 | xtekky/gpt4free | 38,300 |
|
Add new snippet: Half quoted triple strings | diff --git a/README.md b/README.md
index 84cb86d..3a91d66 100755
--- a/README.md
+++ b/README.md
@@ -1774,6 +1774,26 @@ a, b = a[b] = {}, 5
print(dis.dis(f))
```
+* Half triple-quoted strings (suggested by @asottile in [this](https://github.com/satwikkansal/wtfpython/issues/40) issue).
+ ```py
+ >>> print('wtfpython''')
+ wtfpython
+ >>> print("wtfpython""")
+ wtfpython
+ >>> # The following statements raise `SyntaxError`
+ >>> # print('''wtfpython')
+ >>> # print("""wtfpython")
+ ```
+ **💡 Explanation:**
+ + Python support implicit [string literal concatenation](https://docs.python.org/2/reference/lexical_analysis.html#string-literal-concatenation), Example,
+ ```
+ >>> print("wtf" "python")
+ wtfpython
+ >>> print("wtf" "") # or "wtf"""
+ wtf
+ ```
+ + `'''` and `"""` are also string delimiters in Python which causes a SyntaxError because the Python interpreter was expecting a terminating triple quote as delimiter while scanning the currently encountered triple quoted string literal.
+
* Multiple Python threads won't run your *Python code* concurrently (yes you heard it right!). It may seem intuitive to spawn several threads and let them execute your Python code concurrently, but, because of the [Global Interpreter Lock](https://wiki.python.org/moin/GlobalInterpreterLock) in Python, all you're doing is making your threads execute on the same core turn by turn. Python threads are good for IO-bound tasks, but to achieve actual parallelization in Python for CPU-bound tasks, you might want to use the Python [multiprocessing](https://docs.python.org/2/library/multiprocessing.html) module.
* List slicing with out of the bounds indices throws no errors
| Closes https://github.com/satwikkansal/wtfpython/issues/40 | https://api.github.com/repos/satwikkansal/wtfpython/pulls/47 | 2017-10-11T17:17:49Z | 2017-10-11T17:18:00Z | 2017-10-11T17:18:00Z | 2017-10-11T17:18:00Z | 455 | satwikkansal/wtfpython | 25,806 |
Fix vss path | diff --git a/build/vss.js b/build/vss.js
index 63af5f86e7cb..86116ea282ff 100644
--- a/build/vss.js
+++ b/build/vss.js
@@ -36,7 +36,7 @@ function vssEverything () {
vss ('./php/async/Exchange.php', "VERSION = '{version}'", version)
vss ('./php/async/Exchange.php', "$version = '{version}'", version)
vss ('./php/Exchange.php', "VERSION = '{version}'", version)
- // vss ('./python/ccxt/rest/__init__.py', "__version__ = '{version}'", version)
+ vss ('./python/ccxt/__init__.py', "__version__ = '{version}'", version)
vss ('./python/ccxt/base/exchange.py', "__version__ = '{version}'", version)
vss ('./python/ccxt/async_support/__init__.py', "__version__ = '{version}'", version)
vss ('./python/ccxt/async_support/base/exchange.py', "__version__ = '{version}'", version)
| https://api.github.com/repos/ccxt/ccxt/pulls/15172 | 2022-10-03T00:34:48Z | 2022-10-03T00:35:52Z | 2022-10-03T00:35:52Z | 2022-10-03T00:35:53Z | 258 | ccxt/ccxt | 13,518 |
|
Clarify documentation for json parsing | diff --git a/flask/wrappers.py b/flask/wrappers.py
index 04bdcb5d1b..5a49132e7e 100644
--- a/flask/wrappers.py
+++ b/flask/wrappers.py
@@ -97,8 +97,9 @@ def blueprint(self):
@property
def json(self):
- """If the mimetype is :mimetype:`application/json` this will contain the
- parsed JSON data. Otherwise this will be ``None``.
+ """If the request has a JSON mimetype like :mimetype:`application/json`
+ (see :meth:`is_json`), this will contain the parsed JSON data.
+ Otherwise this will be ``None``.
The :meth:`get_json` method should be used instead.
"""
@@ -109,7 +110,7 @@ def json(self):
@property
def is_json(self):
- """Indicates if this request is JSON or not. By default a request
+ """Indicates if this request is JSON or not. By default a request
is considered to include JSON data if the mimetype is
:mimetype:`application/json` or :mimetype:`application/*+json`.
@@ -123,18 +124,18 @@ def is_json(self):
return False
def get_json(self, force=False, silent=False, cache=True):
- """Parses the incoming JSON request data and returns it. By default
- this function will return ``None`` if the mimetype is not
- :mimetype:`application/json` but this can be overridden by the
- ``force`` parameter. If parsing fails the
- :meth:`on_json_loading_failed` method on the request object will be
+ """Parses the incoming JSON request data and returns it. By default
+ this function will return ``None`` if the request does not use a JSON
+ mimetype like :mimetype:`application/json`. See :meth:`is_json`. This
+ can be overridden by the ``force`` parameter. If parsing fails,
+ the :meth:`on_json_loading_failed` method on the request object will be
invoked.
:param force: if set to ``True`` the mimetype is ignored.
:param silent: if set to ``True`` this method will fail silently
- and return ``None``.
+ and return ``None``.
:param cache: if set to ``True`` the parsed JSON data is remembered
- on the request.
+ on the request.
"""
rv = getattr(self, '_cached_json', _missing)
# We return cached JSON only when the cache is enabled.
| Documentation does not currently mention that is_json accepts mimetypes that are not strictly application/json. | https://api.github.com/repos/pallets/flask/pulls/2353 | 2017-06-02T16:08:37Z | 2017-06-02T17:23:52Z | 2017-06-02T17:23:52Z | 2020-11-14T03:53:08Z | 590 | pallets/flask | 20,255 |
performance optimization of primelib.isPrime(number) | diff --git a/primelib/primelib.py b/primelib/primelib.py
index 4267cd516b..4e9536134a 100644
--- a/primelib/primelib.py
+++ b/primelib/primelib.py
@@ -73,15 +73,15 @@ def isPrime(number):
# 0 and 1 are none primes.
if number <= 1:
- status = False
-
- for divisor in range(2,int(round(math.sqrt(number)))+1):
+ return False
+
+ # all even numbers except of 2 are no primes.
+ if number % 2 == 0 and number > 2:
+ return False
- # if 'number' divisible by 'divisor' then sets 'status'
- # of false and break up the loop.
- if number % divisor == 0:
- status = False
- break
+ # if 'number' divisible by 'divisor' then sets 'status' to false.
+ # lazy evaluation breaks the all loop on first false.
+ status = all(number % divisor for divisor in range(3, int(math.sqrt(number)) + 1, 2))
# precondition
assert isinstance(status,bool), "'status' must been from type bool"
| only loop over the odd numbers.
## performance test:
### old version:
In [1]: from primelib import isPrime
In [2]: %timeit isPrime(99999999999999997)
50.9 s ± 31.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
### new version:
In [1]: from primelib import isPrime
In [2]: %timeit isPrime(99999999999999997)
32.4 s ± 149 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) | https://api.github.com/repos/geekcomputers/Python/pulls/430 | 2018-11-14T11:01:58Z | 2018-11-15T06:36:36Z | 2018-11-15T06:36:36Z | 2018-11-15T06:36:40Z | 295 | geekcomputers/Python | 31,517 |
debug(msteams): needs more logging | diff --git a/src/sentry/integrations/msteams/webhook.py b/src/sentry/integrations/msteams/webhook.py
index 48b4bb9614b1b..41997576a40ed 100644
--- a/src/sentry/integrations/msteams/webhook.py
+++ b/src/sentry/integrations/msteams/webhook.py
@@ -220,6 +220,8 @@ def post(self, request: HttpRequest) -> HttpResponse:
"conversation_type": conversation_type,
"event_type": event_type,
}
+
+ response = None
# only care about conversationUpdate and message
if event_type == "message":
# the only message events we care about are those which
@@ -228,12 +230,13 @@ def post(self, request: HttpRequest) -> HttpResponse:
if data.get("value", {}).get("payload", {}).get("actionType"):
# Processing card actions can only occur in the Region silo.
if SiloMode.get_current_mode() == SiloMode.CONTROL:
- return self.respond(status=400)
- return self.handle_action_submitted(request)
+ response = self.respond(status=400)
+ else:
+ response = self.handle_action_submitted(request)
elif conversation_type == "channel":
- return self.handle_channel_message(request)
+ response = self.handle_channel_message(request)
else:
- return self.handle_personal_message(request)
+ response = self.handle_personal_message(request)
elif event_type == "conversationUpdate":
channel_data = data["channelData"]
event = channel_data.get("eventType")
@@ -242,18 +245,19 @@ def post(self, request: HttpRequest) -> HttpResponse:
log_params["event"] = event
# TODO: Handle other events
if event == "teamMemberAdded":
- return self.handle_team_member_added(request)
+ response = self.handle_team_member_added(request)
elif event == "teamMemberRemoved":
if SiloMode.get_current_mode() == SiloMode.CONTROL:
- return self.respond(status=400)
- return self.handle_team_member_removed(request)
+ response = self.respond(status=400)
+ else:
+ response = self.handle_team_member_removed(request)
elif (
data.get("membersAdded") and conversation_type == "personal"
): # no explicit event for user adding app unfortunately
- return self.handle_personal_member_add(request)
+ response = self.handle_personal_member_add(request)
logger.info("sentry.integrations.msteams.webhook", extra=log_params)
- return self.respond(status=204)
+ return response if response else self.respond(status=204)
def verify_webhook_request(self, request: HttpRequest) -> bool:
return verify_signature(request)
| Needs more logging, as I'm not seeing enough events. It's starting to look like we're not handling the webhook events properly from MSTeams | https://api.github.com/repos/getsentry/sentry/pulls/67391 | 2024-03-20T22:26:24Z | 2024-03-20T23:20:23Z | 2024-03-20T23:20:23Z | 2024-04-05T00:02:06Z | 601 | getsentry/sentry | 43,951 |
Add kmatch to Data Validation | diff --git a/README.md b/README.md
index e751080e8..4392ccde4 100644
--- a/README.md
+++ b/README.md
@@ -582,6 +582,7 @@ A curated list of awesome Python frameworks, libraries and software. Inspired by
* [dictshield](https://github.com/exfm/dictshield) - A fast way to validate and trim the values in a dictionary.
* [colander](http://docs.pylonsproject.org/projects/colander/) - A system for validating and deserializing data obtained via XML, JSON, an HTML form post or any other equally simple data serialization.
* [Schematics](https://github.com/schematics/schematics) - Data Structure Validation.
+* [kmatch](https://github.com/ambitioninc/kmatch) - A language for matching/validating/filtering Python dictionaries.
## Anti-spam
| https://github.com/ambitioninc/kmatch - A language for matching/validating/filtering Python dictionaries.
| https://api.github.com/repos/vinta/awesome-python/pulls/177 | 2014-07-31T07:16:36Z | 2014-07-31T20:16:17Z | 2014-07-31T20:16:17Z | 2014-07-31T20:16:17Z | 199 | vinta/awesome-python | 27,329 |
added --yeah as an alternative arg to -y and --yes; updated README.md | diff --git a/README.md b/README.md
index 5bf72e018..d3591a6ae 100644
--- a/README.md
+++ b/README.md
@@ -146,10 +146,10 @@ eval $(thefuck --alias FUCK)
Changes are only available in a new shell session. To make changes immediately
available, run `source ~/.bashrc` (or your shell config file like `.zshrc`).
-To run fixed commands without confirmation, use the `-y` option:
+To run fixed commands without confirmation, use the `--yeah` option (or just `-y` for short):
```bash
-fuck -y
+fuck --yeah
```
To fix commands recursively until succeeding, use the `-r` option:
diff --git a/thefuck/argument_parser.py b/thefuck/argument_parser.py
index 8e79fc0a2..b7b97837d 100644
--- a/thefuck/argument_parser.py
+++ b/thefuck/argument_parser.py
@@ -55,7 +55,7 @@ def _add_conflicting_arguments(self):
"""It's too dangerous to use `-y` and `-r` together."""
group = self._parser.add_mutually_exclusive_group()
group.add_argument(
- '-y', '--yes',
+ '-y', '--yes', '--yeah',
action='store_true',
help='execute fixed command without confirmation')
group.add_argument(
| Love this little app -- I use it a little too often...
In any case, I just thought you missed a good opportunity to do `fuck --yeah`, so I'm adding it in :) | https://api.github.com/repos/nvbn/thefuck/pulls/822 | 2018-07-05T18:26:51Z | 2018-07-09T22:50:11Z | 2018-07-09T22:50:11Z | 2018-07-09T22:50:11Z | 320 | nvbn/thefuck | 30,737 |
bpo-25461: Update os.walk() docstring to match the online docs | diff --git a/Lib/os.py b/Lib/os.py
index 7741c7580d0e3b..3dc0a2f5347067 100644
--- a/Lib/os.py
+++ b/Lib/os.py
@@ -300,10 +300,11 @@ def walk(top, topdown=True, onerror=None, followlinks=False):
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune the
search, or to impose a specific order of visiting. Modifying dirnames when
- topdown is false is ineffective, since the directories in dirnames have
- already been generated by the time dirnames itself is generated. No matter
- the value of topdown, the list of subdirectories is retrieved before the
- tuples for the directory and its subdirectories are generated.
+ topdown is false has no effect on the behavior of os.walk(), since the
+ directories in dirnames have already been generated by the time dirnames
+ itself is generated. No matter the value of topdown, the list of
+ subdirectories is retrieved before the tuples for the directory and its
+ subdirectories are generated.
By default errors from the os.scandir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
|
<!-- issue-number: [bpo-25461](https://bugs.python.org/issue25461) -->
https://bugs.python.org/issue25461
<!-- /issue-number -->
| https://api.github.com/repos/python/cpython/pulls/11836 | 2019-02-13T08:53:27Z | 2019-09-10T12:43:59Z | 2019-09-10T12:43:59Z | 2019-09-10T12:44:16Z | 314 | python/cpython | 3,900 |
Doc fix: URLError doesn't exist anymore | diff --git a/requests/models.py b/requests/models.py
index 898ed66a10..faa0cbe477 100644
--- a/requests/models.py
+++ b/requests/models.py
@@ -646,7 +646,7 @@ def links(self):
return l
def raise_for_status(self):
- """Raises stored :class:`HTTPError` or :class:`URLError`, if one occurred."""
+ """Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
| https://api.github.com/repos/psf/requests/pulls/1229 | 2013-03-01T19:23:51Z | 2013-03-02T20:59:41Z | 2013-03-02T20:59:41Z | 2021-09-08T23:06:29Z | 117 | psf/requests | 32,945 |
|
[CLIPTokenizer] Improve warning | diff --git a/src/transformers/models/clip/tokenization_clip.py b/src/transformers/models/clip/tokenization_clip.py
index 8b769ea9d064f..ef8da45cda2af 100644
--- a/src/transformers/models/clip/tokenization_clip.py
+++ b/src/transformers/models/clip/tokenization_clip.py
@@ -315,7 +315,7 @@ def __init__(
self.fix_text = ftfy.fix_text
except ImportError:
- logger.warning("ftfy or spacy is not installed using BERT BasicTokenizer instead of ftfy.")
+ logger.info("ftfy or spacy is not installed using custom BasicTokenizer instead of ftfy.")
self.nlp = BasicTokenizer(do_lower_case=True)
self.fix_text = None
| # What does this PR do?
<!--
Congratulations! You've made it this far! You're not quite done yet though.
Once merged, your PR is going to appear in the release notes with the title you set, so make sure it's a great title that fully reflects the extent of your awesome contribution.
Then, please replace this with a description of the change and which issue is fixed (if applicable). Please also include relevant motivation and context. List any dependencies (if any) that are required for this change.
Once you're done, someone will review your PR shortly (see the section "Who can review?" below to tag some potential reviewers). They may suggest changes to make the code even better. If no one reviewed your PR after a week has passed, don't hesitate to post a new comment @-mentioning the same persons---sometimes notifications get lost.
-->
<!-- Remove if not applicable -->
As can be seen in this thread the warning is a bit confusing for libraries built on top of `transformes`: https://github.com/huggingface/diffusers/issues/1388#issuecomment-1327760610
Could we maybe downgrade it to a "info" statement and remove the mentioning of BERT?
## Before submitting
- [x] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case).
- [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section?
- [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
- [ ] Did you make sure to update the documentation with your changes? Here are the
[documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and
[here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
- [ ] Did you write any new necessary tests?
## Who can review?
Anyone in the community is free to review the PR once the tests have passed. Feel free to tag
members/contributors who may be interested in your PR.
<!-- Your PR will be replied to more quickly if you can figure out the right person to tag with @
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
Please tag fewer than 3 people.
Models:
- albert, bert, xlm: @LysandreJik
- blenderbot, bart, marian, pegasus, encoderdecoder, t5: @patrickvonplaten, @patil-suraj
- longformer, reformer, transfoxl, xlnet: @patrickvonplaten
- fsmt: @stas00
- funnel: @sgugger
- gpt2: @patrickvonplaten, @LysandreJik
- rag: @patrickvonplaten, @lhoestq
- tensorflow: @LysandreJik
Library:
- benchmarks: @patrickvonplaten
- deepspeed: @stas00
- ray/raytune: @richardliaw, @amogkam
- text generation: @patrickvonplaten
- tokenizers: @n1t0, @LysandreJik
- trainer: @sgugger
- pipelines: @LysandreJik
Documentation: @sgugger
HF projects:
- datasets: [different repo](https://github.com/huggingface/datasets)
- rust tokenizers: [different repo](https://github.com/huggingface/tokenizers)
Examples:
- maintained examples (not research project or legacy): @sgugger, @patil-suraj
- research_projects/bert-loses-patience: @JetRunner
- research_projects/distillation: @VictorSanh
-->
| https://api.github.com/repos/huggingface/transformers/pulls/20458 | 2022-11-25T19:00:38Z | 2022-11-28T14:20:14Z | 2022-11-28T14:20:14Z | 2022-11-28T14:45:14Z | 176 | huggingface/transformers | 11,955 |
Document mitmweb architecture | diff --git a/web/README.md b/web/README.md
index ae558bd6e6..7b412a5f1b 100644
--- a/web/README.md
+++ b/web/README.md
@@ -6,3 +6,11 @@ and activate your virtualenv environment before proceeding.**
- Run `yarn` to install dependencies
- Run `yarn run gulp` to start live-compilation.
- Run `mitmweb` and open http://localhost:8081/
+
+## Architecture
+
+There are two components:
+
+- Server: [`mitmproxy/tools/web`](../mitmproxy/tools/web)
+
+- Client: `web`
| Bug: https://github.com/mitmproxy/mitmproxy/issues/3925 | https://api.github.com/repos/mitmproxy/mitmproxy/pulls/4006 | 2020-05-18T18:21:34Z | 2020-06-12T11:18:50Z | 2020-06-12T11:18:50Z | 2020-06-12T11:50:19Z | 146 | mitmproxy/mitmproxy | 27,507 |
Fix typos in os_quota.py | diff --git a/lib/ansible/modules/cloud/openstack/os_quota.py b/lib/ansible/modules/cloud/openstack/os_quota.py
index 64d9c805ba374d..0de7d1d322f2f4 100644
--- a/lib/ansible/modules/cloud/openstack/os_quota.py
+++ b/lib/ansible/modules/cloud/openstack/os_quota.py
@@ -32,7 +32,7 @@
author: "Michael Gale (gale.michael@gmail.com)"
description:
- Manage OpenStack Quotas. Quotas can be created,
- updated or deleted using this module. A auota will be updated
+ updated or deleted using this module. A quota will be updated
if matches an existing project and is present.
options:
name:
@@ -115,7 +115,7 @@
rbac_policy:
required: False
default: None
- description: Number of polcies to allow.
+ description: Number of policies to allow.
router:
required: False
default: None
| ##### ISSUE TYPE
- Docs Pull Request
##### COMPONENT NAME
os_quota.py
##### ANSIBLE VERSION
<!--- Paste verbatim output from “ansible --version” between quotes below -->
```
ansible 2.3.0 (typo 5438e0463c) last updated 2017/01/18 11:21:22 (GMT -500)
config file =
configured module search path = Default w/o overrides
```
##### SUMMARY
Fix typos in os_quota.py
* Replace 'auota' with 'quota'
* Replace 'polcies' with 'policies' | https://api.github.com/repos/ansible/ansible/pulls/20411 | 2017-01-18T17:24:01Z | 2017-01-18T17:55:41Z | 2017-01-18T17:55:41Z | 2019-04-26T20:13:27Z | 223 | ansible/ansible | 49,092 |
[2.7] bpo-1104: msilib.SummaryInfo.GetProperty() truncates the string by one character (GH-4517) | diff --git a/Lib/test/test_msilib.py b/Lib/test/test_msilib.py
index 5d5b0c48fea6f6..a2f3943e228119 100644
--- a/Lib/test/test_msilib.py
+++ b/Lib/test/test_msilib.py
@@ -1,8 +1,48 @@
""" Test suite for the code in msilib """
import unittest
-import os
-from test_support import run_unittest, import_module
+from test_support import TESTFN, import_module, run_unittest, unlink
msilib = import_module('msilib')
+import msilib.schema
+
+
+def init_database():
+ path = TESTFN + '.msi'
+ db = msilib.init_database(
+ path,
+ msilib.schema,
+ 'Python Tests',
+ 'product_code',
+ '1.0',
+ 'PSF',
+ )
+ return db, path
+
+
+class MsiDatabaseTestCase(unittest.TestCase):
+
+ def test_summaryinfo_getproperty_issue1104(self):
+ db, db_path = init_database()
+ try:
+ sum_info = db.GetSummaryInformation(99)
+ title = sum_info.GetProperty(msilib.PID_TITLE)
+ self.assertEqual(title, b"Installation Database")
+
+ sum_info.SetProperty(msilib.PID_TITLE, "a" * 999)
+ title = sum_info.GetProperty(msilib.PID_TITLE)
+ self.assertEqual(title, b"a" * 999)
+
+ sum_info.SetProperty(msilib.PID_TITLE, "a" * 1000)
+ title = sum_info.GetProperty(msilib.PID_TITLE)
+ self.assertEqual(title, b"a" * 1000)
+
+ sum_info.SetProperty(msilib.PID_TITLE, "a" * 1001)
+ title = sum_info.GetProperty(msilib.PID_TITLE)
+ self.assertEqual(title, b"a" * 1001)
+ finally:
+ db = None
+ sum_info = None
+ unlink(db_path)
+
class Test_make_id(unittest.TestCase):
#http://msdn.microsoft.com/en-us/library/aa369212(v=vs.85).aspx
@@ -35,12 +75,13 @@ def test_invalid_first_char(self):
def test_invalid_any_char(self):
self.assertEqual(
msilib.make_id(".s\x82ort"), "_.s_ort")
- self.assertEqual (
+ self.assertEqual(
msilib.make_id(".s\x82o?*+rt"), "_.s_o___rt")
def test_main():
run_unittest(__name__)
+
if __name__ == '__main__':
test_main()
diff --git a/Misc/NEWS.d/next/Windows/2017-11-24-12-53-54.bpo-1104.1CWSZp.rst b/Misc/NEWS.d/next/Windows/2017-11-24-12-53-54.bpo-1104.1CWSZp.rst
new file mode 100644
index 00000000000000..a4043496bc2463
--- /dev/null
+++ b/Misc/NEWS.d/next/Windows/2017-11-24-12-53-54.bpo-1104.1CWSZp.rst
@@ -0,0 +1,2 @@
+Correctly handle string length in ``msilib.SummaryInfo.GetProperty()`` to
+prevent it from truncating the last character.
diff --git a/PC/_msi.c b/PC/_msi.c
index 68c4e79e294549..4000f00c763bc3 100644
--- a/PC/_msi.c
+++ b/PC/_msi.c
@@ -539,7 +539,7 @@ summary_getproperty(msiobj* si, PyObject *args)
FILETIME fval;
char sbuf[1000];
char *sval = sbuf;
- DWORD ssize = sizeof(sval);
+ DWORD ssize = sizeof(sbuf);
if (!PyArg_ParseTuple(args, "i:GetProperty", &field))
return NULL;
@@ -547,6 +547,7 @@ summary_getproperty(msiobj* si, PyObject *args)
status = MsiSummaryInfoGetProperty(si->h, field, &type, &ival,
&fval, sval, &ssize);
if (status == ERROR_MORE_DATA) {
+ ssize++;
sval = malloc(ssize);
if (sval == NULL) {
return PyErr_NoMemory();
@@ -556,19 +557,29 @@ summary_getproperty(msiobj* si, PyObject *args)
}
switch(type) {
- case VT_I2: case VT_I4:
- return PyInt_FromLong(ival);
+ case VT_I2:
+ case VT_I4:
+ result = PyLong_FromLong(ival);
+ break;
case VT_FILETIME:
PyErr_SetString(PyExc_NotImplementedError, "FILETIME result");
- return NULL;
+ result = NULL;
+ break;
case VT_LPSTR:
- result = PyString_FromStringAndSize(sval, ssize);
- if (sval != sbuf)
- free(sval);
- return result;
+ result = PyBytes_FromStringAndSize(sval, ssize);
+ break;
+ case VT_EMPTY:
+ Py_INCREF(Py_None);
+ result = Py_None;
+ break;
+ default:
+ PyErr_Format(PyExc_NotImplementedError, "result of type %d", type);
+ result = NULL;
+ break;
}
- PyErr_Format(PyExc_NotImplementedError, "result of type %d", type);
- return NULL;
+ if (sval != sbuf)
+ free(sval);
+ return result;
}
static PyObject*
| Add one char to MsiSummaryInfoGetProperty() output
Based on the patch in [bpo-1104](https://bugs.python.org/issue1104) by Anthony Tuininga (atuining) and Mark McMahon (markm)
(cherry picked from commit 2de576e16d42ce43698d384d0dd46ba6cf165424)
Co-authored-by: Tzu-ping Chung <uranusjr@gmail.com>
<!--
Thanks for your contribution!
Please read this comment in its entirety. It's quite important.
# Pull Request title
It should be in the following format:
```
bpo-NNNN: Summary of the changes made
```
Where: bpo-NNNN refers to the issue number in the https://bugs.python.org.
Most PRs will require an issue number. Trivial changes, like fixing a typo, do not need an issue.
# Backport Pull Request title
If this is a backport PR (PR made against branches other than `master`),
please ensure that the PR title is in the following format:
```
[X.Y] <title from the original PR> (GH-NNNN)
```
Where: [X.Y] is the branch name, e.g. [3.6].
GH-NNNN refers to the PR number from `master`.
-->
<!-- issue-number: [bpo-1104](https://bugs.python.org/issue1104) -->
https://bugs.python.org/issue1104
<!-- /issue-number -->
| https://api.github.com/repos/python/cpython/pulls/11749 | 2019-02-03T06:09:36Z | 2019-02-19T03:06:11Z | 2019-02-19T03:06:11Z | 2019-11-27T16:39:57Z | 1,289 | python/cpython | 3,981 |
Update contributing guides | diff --git a/README.md b/README.md
index cd73f91d..c2e752e9 100644
--- a/README.md
+++ b/README.md
@@ -79,22 +79,31 @@ Contributing
When an implementation is added or modified, please review the following guidelines:
##### Output
-All files with example patterns have `### OUTPUT ###` section at the bottom.
+All files with example patterns have `### OUTPUT ###` section at the bottom
+(migration to OUTPUT = """...""" is in progress).
Run `append_output.sh` (e.g. `./append_output.sh borg.py`) to generate/update it.
##### Docstrings
Add module level description in form of a docstring with links to corresponding references or other useful information.
-[strategy.py](patterns/behavioral/strategy.py) has a good example of detailed description,
+Add "Examples in Python ecosystem" section if you know some. It shows how patterns could be applied to real-world problems.
+
+[facade.py](patterns/structural/facade.py) has a good example of detailed description,
but sometimes the shorter one as in [template.py](patterns/behavioral/template.py) would suffice.
In some cases class-level docstring with doctest would also help (see [adapter.py](patterns/structural/adapter.py))
+but readable OUTPUT section is much better.
+
##### Python2/3 compatibility
Try to keep it (discussion is held in [issue #208](https://github.com/faif/python-patterns/issues/208))
- use new style classes (inherit from `object`)
-- use `from future import print`
+- use `from __future__ import print_function`
##### Update README
When everything else is done - update corresponding part of README.
+
+
+##### Travis CI
+Please run `flake8` and `pytest` commands locally to be sure your changes will pass CI .
diff --git a/patterns/behavioral/visitor.py b/patterns/behavioral/visitor.py
index 7235ce4c..cee9fdaf 100644
--- a/patterns/behavioral/visitor.py
+++ b/patterns/behavioral/visitor.py
@@ -13,7 +13,9 @@
- "8.22. Implementing the Visitor Pattern Without Recursion"
*Examples in Python ecosystem:
-Python's ast.NodeVisitor: https://github.com/python/cpython/blob/master/Lib/ast.py#L250
+- Python's ast.NodeVisitor: https://github.com/python/cpython/blob/master/Lib/ast.py#L250
+which is then being used e.g. in tools like `pyflakes`.
+- `Black` formatter tool implements it's own: https://github.com/ambv/black/blob/master/black.py#L718
"""
| https://api.github.com/repos/faif/python-patterns/pulls/274 | 2019-01-25T14:57:39Z | 2019-01-25T19:40:10Z | 2019-01-25T19:40:10Z | 2019-01-25T19:40:10Z | 599 | faif/python-patterns | 33,646 |
|
mgtv.com 2017/1/17 change api address and stream domain | diff --git a/src/you_get/extractors/mgtv.py b/src/you_get/extractors/mgtv.py
index 3ce62efebc..1656ac3c60 100644
--- a/src/you_get/extractors/mgtv.py
+++ b/src/you_get/extractors/mgtv.py
@@ -21,7 +21,7 @@ class MGTV(VideoExtractor):
id_dic = {i['video_profile']:(i['id']) for i in stream_types}
- api_endpoint = 'http://v.api.mgtv.com/player/video?video_id={video_id}'
+ api_endpoint = 'http://pcweb.api.mgtv.com/player/video?video_id={video_id}'
@staticmethod
def get_vid_from_url(url):
@@ -63,6 +63,7 @@ def prepare(self, **kwargs):
content = get_content(self.api_endpoint.format(video_id = self.vid))
content = loads(content)
self.title = content['data']['info']['title']
+ domain = content['data']['stream_domain'][0]
#stream_avalable = [i['name'] for i in content['data']['stream']]
stream_available = {}
@@ -73,7 +74,7 @@ def prepare(self, **kwargs):
if s['video_profile'] in stream_available.keys():
quality_id = self.id_dic[s['video_profile']]
url = stream_available[s['video_profile']]
- url = re.sub( r'(\&arange\=\d+)', '', url) #Un-Hum
+ url = domain + re.sub( r'(\&arange\=\d+)', '', url) #Un-Hum
m3u8_url, m3u8_size, segment_list_this = self.get_mgtv_real_url(url)
stream_fileid_list = []
@@ -144,8 +145,8 @@ def download(self, **kwargs):
else:
download_urls(stream_info['src'], self.title, stream_info['container'], stream_info['size'],
output_dir=kwargs['output_dir'],
- merge=kwargs['merge'],
- av=stream_id in self.dash_streams)
+ merge=kwargs.get('merge', True))
+ # av=stream_id in self.dash_streams)
site = MGTV()
download = site.download_by_url
diff --git a/src/you_get/json_output.py b/src/you_get/json_output.py
index 86a42abccb..3e1bac9f82 100644
--- a/src/you_get/json_output.py
+++ b/src/you_get/json_output.py
@@ -31,6 +31,11 @@ def print_info(site_info=None, title=None, type=None, size=None):
def download_urls(urls=None, title=None, ext=None, total_size=None, refer=None):
ve = last_info
+ if not ve:
+ ve = VideoExtractor()
+ ve.name = ''
+ ve.url = urls
+ ve.title=title
# save download info in streams
stream = {}
stream['container'] = ext
| 2017/1/17 mgtv.com change video info api,fit it.
<!-- Reviewable:start -->
---
This change is [<img src="https://reviewable.io/review_button.svg" height="34" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/soimort/you-get/1628)
<!-- Reviewable:end -->
| https://api.github.com/repos/soimort/you-get/pulls/1628 | 2017-01-19T03:19:08Z | 2017-02-19T20:15:11Z | 2017-02-19T20:15:11Z | 2017-02-19T20:15:14Z | 678 | soimort/you-get | 21,159 |
Auth documentation. | diff --git a/docs/index.rst b/docs/index.rst
index 64929d1781..cd0a0f86c5 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -90,6 +90,7 @@ instructions for getting the most out of Requests.
user/install
user/quickstart
user/advanced
+ user/authentication
Community Guide
diff --git a/docs/user/authentication.rst b/docs/user/authentication.rst
new file mode 100644
index 0000000000..a3b5b56d78
--- /dev/null
+++ b/docs/user/authentication.rst
@@ -0,0 +1,84 @@
+.. _authentication:
+
+Authentication
+==============
+
+This document discusses using various kinds of authentication with Requests.
+
+Many web services require authentication, and there are many different types.
+Below, we outline various forms of authentication available in Requests, from
+the simple to the complex.
+
+
+Basic Authentication
+--------------------
+
+Many web services that require authentication accept HTTP Basic Auth. This is
+the simplest kind, and Requests supports it straight out of the box.
+
+Making requests with HTTP Basic Auth is very simple::
+
+ >>> from requests.auth import HTTPBasicAuth
+ >>> requests.get('https://api.github.com/user', auth=HTTPBasicAuth('user', 'pass'))
+ <Response [200]>
+
+In fact, HTTP Basic Auth is so common that Requests provides a handy shorthand
+for using it::
+
+ >>> requests.get('https://api.github.com/user', auth=('user', 'pass'))
+ <Response [200]>
+
+Providing the credentials in a tuple like this is exactly the same as the
+``HTTPBasicAuth`` example above.
+
+
+Digest Authentication
+---------------------
+
+Another very popular form of HTTP Authentication is Digest Authentication,
+and Requests supports this out of the box as well::
+
+ >>> from requests.auth import HTTPDigestAuth
+ >>> url = 'http://httpbin.org/digest-auth/auth/user/pass'
+ >>> requests.get(url, auth=HTTPDigestAuth('user', 'pass'))
+ <Response [200]>
+
+
+Other Authentication
+--------------------
+
+Requests is designed to allow other forms of authentication to be easily and
+quickly plugged in. Members of the open-source community frequently write
+authentication handlers for more complicated or less commonly-used forms of
+authentication. Some of the best have been brought together under the
+`Requests organization`_, including:
+
+- OAuth_
+- Kerberos_
+- NTLM_
+
+If you want to use any of these forms of authentication, go straight to their
+Github page and follow the instructions.
+
+
+New Forms of Authentication
+---------------------------
+
+If you can't find a good implementation of the form of authentication you
+want, you can implement it yourself. Requests makes it easy to add your own
+forms of authentication.
+
+To do so, subclass :class:`requests.auth.AuthBase` and implement the
+``__call__()`` method. When an authentication handler is attached to a request,
+it is called during request setup. The ``__call__`` method must therefore do
+whatever is required to make the authentication work. Some forms of
+authentication will additionally add hooks to provide further functionality.
+
+Examples can be found under the `Requests organization`_ and in the
+``auth.py`` file.
+
+.. _OAuth: https://github.com/requests/requests-oauthlib
+.. _Kerberos: https://github.com/requests/requests-kerberos
+.. _NTLM: https://github.com/requests/requests-ntlm
+.. _Requests organization: https://github.com/requests
+
diff --git a/docs/user/quickstart.rst b/docs/user/quickstart.rst
index beb05a5c2c..1731b29588 100644
--- a/docs/user/quickstart.rst
+++ b/docs/user/quickstart.rst
@@ -337,80 +337,6 @@ parameter::
'{"cookies": {"cookies_are": "working"}}'
-Basic Authentication
---------------------
-
-Many web services require authentication. There are many different types of
-authentication, but the most common is HTTP Basic Auth.
-
-Making requests with Basic Auth is extremely simple::
-
- >>> from requests.auth import HTTPBasicAuth
- >>> requests.get('https://api.github.com/user', auth=HTTPBasicAuth('user', 'pass'))
- <Response [200]>
-
-Due to the prevalence of HTTP Basic Auth, requests provides a shorthand for
-this authentication method::
-
- >>> requests.get('https://api.github.com/user', auth=('user', 'pass'))
- <Response [200]>
-
-Providing the credentials as a tuple in this fashion is functionally equivalent
-to the ``HTTPBasicAuth`` example above.
-
-
-Digest Authentication
----------------------
-
-Another popular form of web service protection is Digest Authentication::
-
- >>> from requests.auth import HTTPDigestAuth
- >>> url = 'http://httpbin.org/digest-auth/auth/user/pass'
- >>> requests.get(url, auth=HTTPDigestAuth('user', 'pass'))
- <Response [200]>
-
-
-OAuth Authentication
---------------------
-
-Requests features robust, built-in OAuth support!
-
-OAuth takes many forms, so let's take a look at a few different forms::
-
- import requests
- from requests.auth import OAuth1
-
- url = u'https://api.twitter.com/1/account/settings.json'
-
- client_key = u'...'
- client_secret = u'...'
- resource_owner_key = u'...'
- resource_owner_secret = u'...'
-
-
-Query signing::
-
- queryoauth = OAuth1(client_key, client_secret,
- resource_owner_key, resource_owner_secret,
- signature_type='query')
- r = requests.get(url, auth=queryoauth)
-
-Header signing::
-
- headeroauth = OAuth1(client_key, client_secret,
- resource_owner_key, resource_owner_secret,
- signature_type='auth_header')
- r = requests.get(url, auth=headeroauth)
-
-Body signing::
-
- bodyoauth = OAuth1(client_key, client_secret,
- resource_owner_key, resource_owner_secret,
- signature_type='body')
-
- r = requests.post(url, auth=bodyoauth)
-
-
Redirection and History
-----------------------
| Thoughts? Not sure whether we want a separate section on the Requests org in general, but we can add that as a separate change if we do.
| https://api.github.com/repos/psf/requests/pulls/1009 | 2012-12-15T13:20:21Z | 2012-12-17T19:27:36Z | 2012-12-17T19:27:36Z | 2021-09-08T18:01:07Z | 1,402 | psf/requests | 32,747 |
ref(tests) Move tests for MutedBox to RTL | diff --git a/tests/js/spec/components/mutedBox.spec.jsx b/tests/js/spec/components/mutedBox.spec.jsx
index ab503b6d116822..3deab491199f26 100644
--- a/tests/js/spec/components/mutedBox.spec.jsx
+++ b/tests/js/spec/components/mutedBox.spec.jsx
@@ -1,38 +1,56 @@
-import {mountWithTheme} from 'sentry-test/enzyme';
+import {mountWithTheme, screen} from 'sentry-test/reactTestingLibrary';
import MutedBox from 'app/components/mutedBox';
describe('MutedBox', function () {
describe('render()', function () {
it('handles ignoreUntil', function () {
- const wrapper = mountWithTheme(
+ const {container} = mountWithTheme(
<MutedBox statusDetails={{ignoreUntil: '2017-06-21T19:45:10Z'}} />
);
- expect(wrapper).toSnapshot();
+ expect(screen.getByText(/This issue has been ignored until/)).toBeInTheDocument();
+ expect(container).toSnapshot();
});
it('handles ignoreCount', function () {
- const wrapper = mountWithTheme(<MutedBox statusDetails={{ignoreUserCount: 100}} />);
- expect(wrapper).toSnapshot();
+ const {container} = mountWithTheme(
+ <MutedBox statusDetails={{ignoreUserCount: 100}} />
+ );
+ expect(
+ screen.getByText(/This issue has been ignored until it affects/)
+ ).toBeInTheDocument();
+ expect(container).toSnapshot();
});
it('handles ignoreCount with ignoreWindow', function () {
- const wrapper = mountWithTheme(
+ const {container} = mountWithTheme(
<MutedBox statusDetails={{ignoreCount: 100, ignoreWindow: 1}} />
);
- expect(wrapper).toSnapshot();
+ expect(
+ screen.getByText(/This issue has been ignored until it occurs/)
+ ).toBeInTheDocument();
+ expect(container).toSnapshot();
});
it('handles ignoreUserCount', function () {
- const wrapper = mountWithTheme(<MutedBox statusDetails={{ignoreUserCount: 100}} />);
- expect(wrapper).toSnapshot();
+ const {container} = mountWithTheme(
+ <MutedBox statusDetails={{ignoreUserCount: 100}} />
+ );
+ expect(
+ screen.getByText(/This issue has been ignored until it affects/)
+ ).toBeInTheDocument();
+ expect(container).toSnapshot();
});
it('handles ignoreUserCount with ignoreUserWindow', function () {
- const wrapper = mountWithTheme(
+ const {container} = mountWithTheme(
<MutedBox statusDetails={{ignoreUserCount: 100, ignoreUserWindow: 1}} />
);
- expect(wrapper).toSnapshot();
+ expect(
+ screen.getByText(/This issue has been ignored until it affects/)
+ ).toBeInTheDocument();
+ expect(container).toSnapshot();
});
it('handles default', function () {
- const wrapper = mountWithTheme(<MutedBox statusDetails={{}} />);
- expect(wrapper).toSnapshot();
+ const {container} = mountWithTheme(<MutedBox statusDetails={{}} />);
+ expect(screen.getByText(/This issue has been ignored/)).toBeInTheDocument();
+ expect(container).toSnapshot();
});
});
});
| Add some text assertions so we aren't entirely reliant on snapshots which don't run locally. | https://api.github.com/repos/getsentry/sentry/pulls/29967 | 2021-11-11T22:23:05Z | 2021-11-12T15:15:33Z | 2021-11-12T15:15:33Z | 2021-11-28T00:02:38Z | 732 | getsentry/sentry | 44,374 |
closes bpo-43278: remove unnecessary leading '\n' from COMPILER when build with GCC/Clang | diff --git a/Misc/NEWS.d/next/C API/2021-02-21-15-30-38.bpo-43278.DMPaWH.rst b/Misc/NEWS.d/next/C API/2021-02-21-15-30-38.bpo-43278.DMPaWH.rst
new file mode 100644
index 00000000000000..7df9295aeccf04
--- /dev/null
+++ b/Misc/NEWS.d/next/C API/2021-02-21-15-30-38.bpo-43278.DMPaWH.rst
@@ -0,0 +1 @@
+Always put compiler and system information on the first line of the REPL welcome message.
diff --git a/Python/getcompiler.c b/Python/getcompiler.c
index 59c0dbf92aebf4..a5d26239e8772e 100644
--- a/Python/getcompiler.c
+++ b/Python/getcompiler.c
@@ -8,9 +8,9 @@
// Note the __clang__ conditional has to come before the __GNUC__ one because
// clang pretends to be GCC.
#if defined(__clang__)
-#define COMPILER "\n[Clang " __clang_version__ "]"
+#define COMPILER "[Clang " __clang_version__ "]"
#elif defined(__GNUC__)
-#define COMPILER "\n[GCC " __VERSION__ "]"
+#define COMPILER "[GCC " __VERSION__ "]"
// Generic fallbacks.
#elif defined(__cplusplus)
#define COMPILER "[C++]"
| <!--
Thanks for your contribution!
Please read this comment in its entirety. It's quite important.
# Pull Request title
It should be in the following format:
```
bpo-NNNN: Summary of the changes made
```
Where: bpo-NNNN refers to the issue number in the https://bugs.python.org.
Most PRs will require an issue number. Trivial changes, like fixing a typo, do not need an issue.
# Backport Pull Request title
If this is a backport PR (PR made against branches other than `master`),
please ensure that the PR title is in the following format:
```
[X.Y] <title from the original PR> (GH-NNNN)
```
Where: [X.Y] is the branch name, e.g. [3.6].
GH-NNNN refers to the PR number from `master`.
-->
<!-- issue-number: [bpo-43278](https://bugs.python.org/issue43278) -->
https://bugs.python.org/issue43278
<!-- /issue-number -->
Automerge-Triggered-By: GH:benjaminp | https://api.github.com/repos/python/cpython/pulls/24606 | 2021-02-21T07:39:05Z | 2021-02-26T04:24:21Z | 2021-02-26T04:24:21Z | 2021-02-26T04:24:23Z | 346 | python/cpython | 4,785 |
Bump actions/checkout from 2 to 4 | diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
index 0729ea9ef..2a973c4c2 100644
--- a/.github/workflows/codeql.yml
+++ b/.github/workflows/codeql.yml
@@ -29,7 +29,7 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml
index 2f230307f..a3bc97174 100644
--- a/.github/workflows/codespell.yml
+++ b/.github/workflows/codespell.yml
@@ -4,7 +4,7 @@ jobs:
codespell:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- run: python3 -m pip install codespell
- run: codespell --ignore-words-list="ba,fo,hel,revered,womens"
--skip="./README.*.md,*.svg,*.ai,./benchmarks/snippets.py,./tests,./tools"
diff --git a/.github/workflows/newissue.yml b/.github/workflows/newissue.yml
index bd8b5a526..2b6080399 100644
--- a/.github/workflows/newissue.yml
+++ b/.github/workflows/newissue.yml
@@ -8,7 +8,7 @@ jobs:
permissions:
issues: write
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
ref: master
- name: Install FAQtory
diff --git a/.github/workflows/pythonpackage.yml b/.github/workflows/pythonpackage.yml
index 79ad51e67..3839e2996 100644
--- a/.github/workflows/pythonpackage.yml
+++ b/.github/workflows/pythonpackage.yml
@@ -13,7 +13,7 @@ jobs:
run:
shell: bash
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
diff --git a/.github/workflows/readmechanged.yml b/.github/workflows/readmechanged.yml
index e4729f6a7..7511dcd4b 100644
--- a/.github/workflows/readmechanged.yml
+++ b/.github/workflows/readmechanged.yml
@@ -11,7 +11,7 @@ jobs:
send_notification:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v4
- name: Send notification to README Authors
env:
GITHUB_TOKEN: ${{ secrets.GHP_README_WORKFLOW }}
| Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 4.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/actions/checkout/releases">actions/checkout's releases</a>.</em></p>
<blockquote>
<h2>v4.0.0</h2>
<h2>What's Changed</h2>
<ul>
<li>Update default runtime to node20 by <a href="https://github.com/takost"><code>@takost</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1436">actions/checkout#1436</a></li>
<li>Support fetching without the --progress option by <a href="https://github.com/simonbaird"><code>@simonbaird</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1067">actions/checkout#1067</a></li>
<li>Release 4.0.0 by <a href="https://github.com/takost"><code>@takost</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1447">actions/checkout#1447</a></li>
</ul>
<h2>New Contributors</h2>
<ul>
<li><a href="https://github.com/takost"><code>@takost</code></a> made their first contribution in <a href="https://redirect.github.com/actions/checkout/pull/1436">actions/checkout#1436</a></li>
<li><a href="https://github.com/simonbaird"><code>@simonbaird</code></a> made their first contribution in <a href="https://redirect.github.com/actions/checkout/pull/1067">actions/checkout#1067</a></li>
</ul>
<p><strong>Full Changelog</strong>: <a href="https://github.com/actions/checkout/compare/v3...v4.0.0">https://github.com/actions/checkout/compare/v3...v4.0.0</a></p>
<h2>v3.6.0</h2>
<h2>What's Changed</h2>
<ul>
<li>Mark test scripts with Bash'isms to be run via Bash by <a href="https://github.com/dscho"><code>@dscho</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1377">actions/checkout#1377</a></li>
<li>Add option to fetch tags even if fetch-depth > 0 by <a href="https://github.com/RobertWieczoreck"><code>@RobertWieczoreck</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/579">actions/checkout#579</a></li>
<li>Release 3.6.0 by <a href="https://github.com/luketomlinson"><code>@luketomlinson</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1437">actions/checkout#1437</a></li>
</ul>
<h2>New Contributors</h2>
<ul>
<li><a href="https://github.com/RobertWieczoreck"><code>@RobertWieczoreck</code></a> made their first contribution in <a href="https://redirect.github.com/actions/checkout/pull/579">actions/checkout#579</a></li>
<li><a href="https://github.com/luketomlinson"><code>@luketomlinson</code></a> made their first contribution in <a href="https://redirect.github.com/actions/checkout/pull/1437">actions/checkout#1437</a></li>
</ul>
<p><strong>Full Changelog</strong>: <a href="https://github.com/actions/checkout/compare/v3.5.3...v3.6.0">https://github.com/actions/checkout/compare/v3.5.3...v3.6.0</a></p>
<h2>v3.5.3</h2>
<h2>What's Changed</h2>
<ul>
<li>Fix: Checkout Issue in self hosted runner due to faulty submodule check-ins by <a href="https://github.com/megamanics"><code>@megamanics</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1196">actions/checkout#1196</a></li>
<li>Fix typos found by codespell by <a href="https://github.com/DimitriPapadopoulos"><code>@DimitriPapadopoulos</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1287">actions/checkout#1287</a></li>
<li>Add support for sparse checkouts by <a href="https://github.com/dscho"><code>@dscho</code></a> and <a href="https://github.com/dfdez"><code>@dfdez</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1369">actions/checkout#1369</a></li>
<li>Release v3.5.3 by <a href="https://github.com/TingluoHuang"><code>@TingluoHuang</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1376">actions/checkout#1376</a></li>
</ul>
<h2>New Contributors</h2>
<ul>
<li><a href="https://github.com/megamanics"><code>@megamanics</code></a> made their first contribution in <a href="https://redirect.github.com/actions/checkout/pull/1196">actions/checkout#1196</a></li>
<li><a href="https://github.com/DimitriPapadopoulos"><code>@DimitriPapadopoulos</code></a> made their first contribution in <a href="https://redirect.github.com/actions/checkout/pull/1287">actions/checkout#1287</a></li>
<li><a href="https://github.com/dfdez"><code>@dfdez</code></a> made their first contribution in <a href="https://redirect.github.com/actions/checkout/pull/1369">actions/checkout#1369</a></li>
</ul>
<p><strong>Full Changelog</strong>: <a href="https://github.com/actions/checkout/compare/v3...v3.5.3">https://github.com/actions/checkout/compare/v3...v3.5.3</a></p>
<h2>v3.5.2</h2>
<h2>What's Changed</h2>
<ul>
<li>Fix: Use correct API url / endpoint in GHES by <a href="https://github.com/fhammerl"><code>@fhammerl</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1289">actions/checkout#1289</a> based on <a href="https://redirect.github.com/actions/checkout/issues/1286">#1286</a> by <a href="https://github.com/1newsr"><code>@1newsr</code></a></li>
</ul>
<p><strong>Full Changelog</strong>: <a href="https://github.com/actions/checkout/compare/v3.5.1...v3.5.2">https://github.com/actions/checkout/compare/v3.5.1...v3.5.2</a></p>
<h2>v3.5.1</h2>
<h2>What's Changed</h2>
<ul>
<li>Improve checkout performance on Windows runners by upgrading <code>@actions/github</code> dependency by <a href="https://github.com/BrettDong"><code>@BrettDong</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1246">actions/checkout#1246</a></li>
</ul>
<h2>New Contributors</h2>
<ul>
<li><a href="https://github.com/BrettDong"><code>@BrettDong</code></a> made their first contribution in <a href="https://redirect.github.com/actions/checkout/pull/1246">actions/checkout#1246</a></li>
</ul>
<!-- raw HTML omitted -->
</blockquote>
<p>... (truncated)</p>
</details>
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a href="https://github.com/actions/checkout/blob/main/CHANGELOG.md">actions/checkout's changelog</a>.</em></p>
<blockquote>
<h1>Changelog</h1>
<h2>v4.0.0</h2>
<ul>
<li><a href="https://redirect.github.com/actions/checkout/pull/1067">Support fetching without the --progress option</a></li>
<li><a href="https://redirect.github.com/actions/checkout/pull/1436">Update to node20</a></li>
</ul>
<h2>v3.6.0</h2>
<ul>
<li><a href="https://redirect.github.com/actions/checkout/pull/1377">Fix: Mark test scripts with Bash'isms to be run via Bash</a></li>
<li><a href="https://redirect.github.com/actions/checkout/pull/579">Add option to fetch tags even if fetch-depth > 0</a></li>
</ul>
<h2>v3.5.3</h2>
<ul>
<li><a href="https://redirect.github.com/actions/checkout/pull/1196">Fix: Checkout fail in self-hosted runners when faulty submodule are checked-in</a></li>
<li><a href="https://redirect.github.com/actions/checkout/pull/1287">Fix typos found by codespell</a></li>
<li><a href="https://redirect.github.com/actions/checkout/pull/1369">Add support for sparse checkouts</a></li>
</ul>
<h2>v3.5.2</h2>
<ul>
<li><a href="https://redirect.github.com/actions/checkout/pull/1289">Fix api endpoint for GHES</a></li>
</ul>
<h2>v3.5.1</h2>
<ul>
<li><a href="https://redirect.github.com/actions/checkout/pull/1246">Fix slow checkout on Windows</a></li>
</ul>
<h2>v3.5.0</h2>
<ul>
<li><a href="https://redirect.github.com/actions/checkout/pull/1237">Add new public key for known_hosts</a></li>
</ul>
<h2>v3.4.0</h2>
<ul>
<li><a href="https://redirect.github.com/actions/checkout/pull/1209">Upgrade codeql actions to v2</a></li>
<li><a href="https://redirect.github.com/actions/checkout/pull/1210">Upgrade dependencies</a></li>
<li><a href="https://redirect.github.com/actions/checkout/pull/1225">Upgrade <code>@actions/io</code></a></li>
</ul>
<h2>v3.3.0</h2>
<ul>
<li><a href="https://redirect.github.com/actions/checkout/pull/1045">Implement branch list using callbacks from exec function</a></li>
<li><a href="https://redirect.github.com/actions/checkout/pull/1050">Add in explicit reference to private checkout options</a></li>
<li>[Fix comment typos (that got added in <a href="https://redirect.github.com/actions/checkout/issues/770">#770</a>)](<a href="https://redirect.github.com/actions/checkout/pull/1057">actions/checkout#1057</a>)</li>
</ul>
<h2>v3.2.0</h2>
<ul>
<li><a href="https://redirect.github.com/actions/checkout/pull/942">Add GitHub Action to perform release</a></li>
<li><a href="https://redirect.github.com/actions/checkout/pull/967">Fix status badge</a></li>
<li><a href="https://redirect.github.com/actions/checkout/pull/1002">Replace datadog/squid with ubuntu/squid Docker image</a></li>
<li><a href="https://redirect.github.com/actions/checkout/pull/964">Wrap pipeline commands for submoduleForeach in quotes</a></li>
<li><a href="https://redirect.github.com/actions/checkout/pull/1029">Update <code>@actions/io</code> to 1.1.2</a></li>
<li><a href="https://redirect.github.com/actions/checkout/pull/1039">Upgrading version to 3.2.0</a></li>
</ul>
<h2>v3.1.0</h2>
<ul>
<li><a href="https://redirect.github.com/actions/checkout/pull/939">Use <code>@actions/core</code> <code>saveState</code> and <code>getState</code></a></li>
<li><a href="https://redirect.github.com/actions/checkout/pull/922">Add <code>github-server-url</code> input</a></li>
</ul>
<h2>v3.0.2</h2>
<ul>
<li><a href="https://redirect.github.com/actions/checkout/pull/770">Add input <code>set-safe-directory</code></a></li>
</ul>
<h2>v3.0.1</h2>
<!-- raw HTML omitted -->
</blockquote>
<p>... (truncated)</p>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="https://github.com/actions/checkout/commit/3df4ab11eba7bda6032a0b82a6bb43b11571feac"><code>3df4ab1</code></a> Release 4.0.0 (<a href="https://redirect.github.com/actions/checkout/issues/1447">#1447</a>)</li>
<li><a href="https://github.com/actions/checkout/commit/8b5e8b768746b50394015010d25e690bfab9dfbc"><code>8b5e8b7</code></a> Support fetching without the --progress option (<a href="https://redirect.github.com/actions/checkout/issues/1067">#1067</a>)</li>
<li><a href="https://github.com/actions/checkout/commit/97a652b80035363df47baee5031ec8670b8878ac"><code>97a652b</code></a> Update default runtime to node20 (<a href="https://redirect.github.com/actions/checkout/issues/1436">#1436</a>)</li>
<li><a href="https://github.com/actions/checkout/commit/f43a0e5ff2bd294095638e18286ca9a3d1956744"><code>f43a0e5</code></a> Release 3.6.0 (<a href="https://redirect.github.com/actions/checkout/issues/1437">#1437</a>)</li>
<li><a href="https://github.com/actions/checkout/commit/7739b9ba2efcda9dde65ad1e3c2dbe65b41dfba7"><code>7739b9b</code></a> Add option to fetch tags even if fetch-depth > 0 (<a href="https://redirect.github.com/actions/checkout/issues/579">#579</a>)</li>
<li><a href="https://github.com/actions/checkout/commit/96f53100ba2a5449eb71d2e6604bbcd94b9449b5"><code>96f5310</code></a> Mark test scripts with Bash'isms to be run via Bash (<a href="https://redirect.github.com/actions/checkout/issues/1377">#1377</a>)</li>
<li><a href="https://github.com/actions/checkout/commit/c85c95e3d7251135ab7dc9ce3241c5835cc595a9"><code>c85c95e</code></a> Release v3.5.3 (<a href="https://redirect.github.com/actions/checkout/issues/1376">#1376</a>)</li>
<li><a href="https://github.com/actions/checkout/commit/d106d4669b3bfcb17f11f83f98e1cab478e9f635"><code>d106d46</code></a> Add support for sparse checkouts (<a href="https://redirect.github.com/actions/checkout/issues/1369">#1369</a>)</li>
<li><a href="https://github.com/actions/checkout/commit/f095bcc56b7c2baf48f3ac70d6d6782f4f553222"><code>f095bcc</code></a> Fix typos found by codespell (<a href="https://redirect.github.com/actions/checkout/issues/1287">#1287</a>)</li>
<li><a href="https://github.com/actions/checkout/commit/47fbe2df0ad0e27efb67a70beac3555f192b062f"><code>47fbe2d</code></a> Fix: Checkout fail in self-hosted runners when faulty submodule are checked-i...</li>
<li>Additional commits viewable in <a href="https://github.com/actions/checkout/compare/v2...v4">compare view</a></li>
</ul>
</details>
<br />
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/checkout&package-manager=github_actions&previous-version=2&new-version=4)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
You can trigger a rebase of this PR by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
</details>
> **Note**
> Automatic rebases have been disabled on this pull request as it has been open for over 30 days.
| https://api.github.com/repos/Textualize/rich/pulls/3122 | 2023-09-04T13:43:27Z | 2023-11-07T12:03:59Z | 2023-11-07T12:03:59Z | 2023-11-07T12:04:09Z | 692 | Textualize/rich | 48,027 |
Add support for resuming downloads | diff --git a/download-model.py b/download-model.py
index 7e5f61b2b7..434732156f 100644
--- a/download-model.py
+++ b/download-model.py
@@ -9,6 +9,7 @@
import argparse
import base64
import datetime
+import hashlib
import json
import re
import sys
@@ -24,11 +25,28 @@
parser.add_argument('--threads', type=int, default=1, help='Number of files to download simultaneously.')
parser.add_argument('--text-only', action='store_true', help='Only download text files (txt/json).')
parser.add_argument('--output', type=str, default=None, help='The folder where the model should be saved.')
+parser.add_argument('--clean', action='store_true', help='Does not resume the previous download.')
+parser.add_argument('--check', action='store_true', help='Validates the checksums of model files.')
args = parser.parse_args()
def get_file(url, output_folder):
- r = requests.get(url, stream=True)
- with open(output_folder / Path(url.rsplit('/', 1)[1]), 'wb') as f:
+ filename = Path(url.rsplit('/', 1)[1])
+ output_path = output_folder / filename
+ if output_path.exists() and not args.clean:
+ # Check if the file has already been downloaded completely
+ r = requests.get(url, stream=True)
+ total_size = int(r.headers.get('content-length', 0))
+ if output_path.stat().st_size >= total_size:
+ return
+ # Otherwise, resume the download from where it left off
+ headers = {'Range': f'bytes={output_path.stat().st_size}-'}
+ mode = 'ab'
+ else:
+ headers = {}
+ mode = 'wb'
+
+ r = requests.get(url, stream=True, headers=headers)
+ with open(output_path, mode) as f:
total_size = int(r.headers.get('content-length', 0))
block_size = 1024
with tqdm.tqdm(total=total_size, unit='iB', unit_scale=True, bar_format='{l_bar}{bar}| {n_fmt:6}/{total_fmt:6} {rate_fmt:6}') as t:
@@ -149,7 +167,7 @@ def get_download_links_from_huggingface(model, branch):
return links, sha256, is_lora
def download_files(file_list, output_folder, num_threads=8):
- thread_map(lambda url: get_file(url, output_folder), file_list, max_workers=num_threads)
+ thread_map(lambda url: get_file(url, output_folder), file_list, max_workers=num_threads, disable=True)
if __name__ == '__main__':
model = args.MODEL
@@ -179,22 +197,48 @@ def download_files(file_list, output_folder, num_threads=8):
output_folder = f"{'_'.join(model.split('/')[-2:])}"
if branch != 'main':
output_folder += f'_{branch}'
-
- # Creating the folder and writing the metadata
output_folder = Path(base_folder) / output_folder
- if not output_folder.exists():
- output_folder.mkdir()
- with open(output_folder / 'huggingface-metadata.txt', 'w') as f:
- f.write(f'url: https://huggingface.co/{model}\n')
- f.write(f'branch: {branch}\n')
- f.write(f'download date: {str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))}\n')
- sha256_str = ''
+
+ if args.check:
+ # Validate the checksums
+ validated = True
for i in range(len(sha256)):
- sha256_str += f' {sha256[i][1]} {sha256[i][0]}\n'
- if sha256_str != '':
- f.write(f'sha256sum:\n{sha256_str}')
+ fpath = (output_folder / sha256[i][0])
+
+ if not fpath.exists():
+ print(f"The following file is missing: {fpath}")
+ validated = False
+ continue
+
+ with open(output_folder / sha256[i][0], "rb") as f:
+ bytes = f.read()
+ file_hash = hashlib.sha256(bytes).hexdigest()
+ if file_hash != sha256[i][1]:
+ print(f'Checksum failed: {sha256[i][0]} {sha256[i][1]}')
+ validated = False
+ else:
+ print(f'Checksum validated: {sha256[i][0]} {sha256[i][1]}')
+
+ if validated:
+ print('[+] Validated checksums of all model files!')
+ else:
+ print('[-] Invalid checksums. Rerun download-model.py with the --clean flag.')
- # Downloading the files
- print(f"Downloading the model to {output_folder}")
- download_files(links, output_folder, args.threads)
- print()
+ else:
+
+ # Creating the folder and writing the metadata
+ if not output_folder.exists():
+ output_folder.mkdir()
+ with open(output_folder / 'huggingface-metadata.txt', 'w') as f:
+ f.write(f'url: https://huggingface.co/{model}\n')
+ f.write(f'branch: {branch}\n')
+ f.write(f'download date: {str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))}\n')
+ sha256_str = ''
+ for i in range(len(sha256)):
+ sha256_str += f' {sha256[i][1]} {sha256[i][0]}\n'
+ if sha256_str != '':
+ f.write(f'sha256sum:\n{sha256_str}')
+
+ # Downloading the files
+ print(f"Downloading the model to {output_folder}")
+ download_files(links, output_folder, args.threads)
\ No newline at end of file
| This pull request adds the ability to resume interrupted downloads by modifying the `get_file` function. The function now uses the HTTP Range header to fetch only the remaining part of a file that wasn't downloaded yet.
With the testing I have done, resuming a partial download still produces the same checksum, (maybe an automatic checksum check would be useful later).
Note: At the moment when running the download script with multiple threads, keyboard interrupts stop individual threads rather then the program itself. | https://api.github.com/repos/oobabooga/text-generation-webui/pulls/654 | 2023-03-30T07:35:40Z | 2023-04-01T01:55:56Z | 2023-04-01T01:55:56Z | 2023-04-01T02:01:17Z | 1,337 | oobabooga/text-generation-webui | 25,988 |
[Dashboard] Make loading screen not block out the entire page. | diff --git a/dashboard/client/src/components/Loading.tsx b/dashboard/client/src/components/Loading.tsx
index 6c1cb1e8f0eaa..edca6bb2063e2 100644
--- a/dashboard/client/src/components/Loading.tsx
+++ b/dashboard/client/src/components/Loading.tsx
@@ -1,10 +1,7 @@
-import { Backdrop, CircularProgress } from "@material-ui/core";
+import { CircularProgress } from "@material-ui/core";
import React from "react";
-const Loading = ({ loading }: { loading: boolean }) => (
- <Backdrop open={loading} style={{ zIndex: 100 }}>
- <CircularProgress color="primary" />
- </Backdrop>
-);
+const Loading = ({ loading }: { loading: boolean }) =>
+ loading ? <CircularProgress color="primary" /> : null;
export default Loading;
diff --git a/dashboard/client/src/pages/actor/ActorDetail.tsx b/dashboard/client/src/pages/actor/ActorDetail.tsx
index 80783d6b1baa1..b0517766fe3c0 100644
--- a/dashboard/client/src/pages/actor/ActorDetail.tsx
+++ b/dashboard/client/src/pages/actor/ActorDetail.tsx
@@ -37,12 +37,12 @@ const useStyle = makeStyles((theme) => ({
const ActorDetailPage = () => {
const classes = useStyle();
const { ipLogMap } = useContext(GlobalContext);
- const { params, actorDetail, msg } = useActorDetail();
+ const { params, actorDetail, msg, isLoading } = useActorDetail();
if (!actorDetail) {
return (
<div className={classes.root}>
- <Loading loading={msg.startsWith("Loading")} />
+ <Loading loading={isLoading} />
<TitleCard title={`JOB - ${params.id}`}>
<StatusChip type="job" status="LOADING" />
<br />
diff --git a/dashboard/client/src/pages/actor/hook/useActorDetail.ts b/dashboard/client/src/pages/actor/hook/useActorDetail.ts
index 271f903e076c1..86aad3aaa9594 100644
--- a/dashboard/client/src/pages/actor/hook/useActorDetail.ts
+++ b/dashboard/client/src/pages/actor/hook/useActorDetail.ts
@@ -10,7 +10,7 @@ export const useActorDetail = () => {
const [msg, setMsg] = useState("Loading the actor infos...");
const { namespaceMap } = useContext(GlobalContext);
- const { data: actorDetail } = useSWR(
+ const { data: actorDetail, isLoading } = useSWR(
["useActorDetail", params.id],
async ([_, actorId]) => {
const actor_resp = await getActor(actorId);
@@ -35,6 +35,7 @@ export const useActorDetail = () => {
params,
actorDetail,
msg,
+ isLoading,
namespaceMap,
};
};
diff --git a/dashboard/client/src/pages/job/JobDetail.tsx b/dashboard/client/src/pages/job/JobDetail.tsx
index 888ae5eeb48db..e3721403dbc7e 100644
--- a/dashboard/client/src/pages/job/JobDetail.tsx
+++ b/dashboard/client/src/pages/job/JobDetail.tsx
@@ -31,7 +31,7 @@ const useStyle = makeStyles((theme) => ({
export const JobDetailChartsPage = () => {
const classes = useStyle();
- const { job, msg, params } = useJobDetail();
+ const { job, msg, isLoading, params } = useJobDetail();
const jobId = params.id;
const [taskListFilter, setTaskListFilter] = useState<string>();
@@ -99,7 +99,7 @@ export const JobDetailChartsPage = () => {
if (!job) {
return (
<div className={classes.root}>
- <Loading loading={msg.startsWith("Loading")} />
+ <Loading loading={isLoading} />
<TitleCard title={`JOB - ${params.id}`}>
<StatusChip type="job" status="LOADING" />
<br />
diff --git a/dashboard/client/src/pages/job/JobDetailInfoPage.tsx b/dashboard/client/src/pages/job/JobDetailInfoPage.tsx
index 24bb85b23e17c..b8345e8d44e49 100644
--- a/dashboard/client/src/pages/job/JobDetailInfoPage.tsx
+++ b/dashboard/client/src/pages/job/JobDetailInfoPage.tsx
@@ -20,7 +20,7 @@ export const JobDetailInfoPage = () => {
// TODO(aguo): Add more content to this page!
const classes = useStyle();
- const { job, msg, params } = useJobDetail();
+ const { job, msg, isLoading, params } = useJobDetail();
if (!job) {
return (
@@ -32,7 +32,7 @@ export const JobDetailInfoPage = () => {
path: undefined,
}}
/>
- <Loading loading={msg.startsWith("Loading")} />
+ <Loading loading={isLoading} />
<TitleCard title={`JOB - ${params.id}`}>
<StatusChip type="job" status="LOADING" />
<br />
diff --git a/dashboard/client/src/pages/job/hook/useJobDetail.ts b/dashboard/client/src/pages/job/hook/useJobDetail.ts
index 7dda767a297e2..d473eb3cba314 100644
--- a/dashboard/client/src/pages/job/hook/useJobDetail.ts
+++ b/dashboard/client/src/pages/job/hook/useJobDetail.ts
@@ -10,7 +10,7 @@ export const useJobDetail = () => {
const [msg, setMsg] = useState("Loading the job detail");
const [refreshing, setRefresh] = useState(true);
const { ipLogMap } = useContext(GlobalContext);
- const { data: job } = useSWR(
+ const { data: job, isLoading } = useSWR(
"useJobDetail",
async () => {
try {
@@ -26,6 +26,7 @@ export const useJobDetail = () => {
return {
job,
+ isLoading,
msg,
params,
ipLogMap,
diff --git a/dashboard/client/src/pages/job/hook/useJobList.ts b/dashboard/client/src/pages/job/hook/useJobList.ts
index ba4beedb6b6c8..8ed6079be5a49 100644
--- a/dashboard/client/src/pages/job/hook/useJobList.ts
+++ b/dashboard/client/src/pages/job/hook/useJobList.ts
@@ -30,7 +30,7 @@ export const useJobList = () => {
};
refreshRef.current = isRefreshing;
- const { data } = useSWR(
+ const { data, isLoading } = useSWR(
"useJobList",
async () => {
const rsp = await getJobList();
@@ -52,6 +52,7 @@ export const useJobList = () => {
filter.every((f) => node[f.key] && (node[f.key] ?? "").includes(f.val)),
),
msg,
+ isLoading,
isRefreshing,
onSwitchChange,
changeFilter,
diff --git a/dashboard/client/src/pages/job/index.tsx b/dashboard/client/src/pages/job/index.tsx
index 4fe82836ab34a..1ff5eb6346f6e 100644
--- a/dashboard/client/src/pages/job/index.tsx
+++ b/dashboard/client/src/pages/job/index.tsx
@@ -68,6 +68,7 @@ const JobList = () => {
const classes = useStyles();
const {
msg,
+ isLoading,
isRefreshing,
onSwitchChange,
jobList,
@@ -78,7 +79,7 @@ const JobList = () => {
return (
<div className={classes.root}>
- <Loading loading={msg.startsWith("Loading")} />
+ <Loading loading={isLoading} />
<TitleCard title="JOBS">
Auto Refresh:
<Switch
diff --git a/dashboard/client/src/pages/node/ClusterDetailInfoPage.tsx b/dashboard/client/src/pages/node/ClusterDetailInfoPage.tsx
index 06a69ea404b7b..8c8ecb211afc6 100644
--- a/dashboard/client/src/pages/node/ClusterDetailInfoPage.tsx
+++ b/dashboard/client/src/pages/node/ClusterDetailInfoPage.tsx
@@ -18,7 +18,7 @@ export const ClusterDetailInfoPage = () => {
// TODO(aguo): Add more content to this page!
const classes = useStyle();
- const { clusterDetail, msg } = useClusterDetail();
+ const { clusterDetail, msg, isLoading } = useClusterDetail();
if (!clusterDetail) {
return (
@@ -30,7 +30,7 @@ export const ClusterDetailInfoPage = () => {
path: undefined,
}}
/>
- <Loading loading={msg.startsWith("Loading")} />
+ <Loading loading={isLoading} />
<TitleCard title={`CLUSTER`}>
<StatusChip type="cluster" status="LOADING" />
<br />
diff --git a/dashboard/client/src/pages/node/NodeDetail.tsx b/dashboard/client/src/pages/node/NodeDetail.tsx
index 6f357e44fc2e1..b54e41cd9ff18 100644
--- a/dashboard/client/src/pages/node/NodeDetail.tsx
+++ b/dashboard/client/src/pages/node/NodeDetail.tsx
@@ -43,6 +43,7 @@ const NodeDetailPage = () => {
selectedTab,
nodeDetail,
msg,
+ isLoading,
isRefreshing,
onRefreshChange,
raylet,
@@ -58,7 +59,7 @@ const NodeDetailPage = () => {
path: `/cluster/nodes/${params.id}`,
}}
/>
- <Loading loading={msg.startsWith("Loading")} />
+ <Loading loading={isLoading} />
<TitleCard title={`NODE - ${params.id}`}>
<StatusChip
type="node"
diff --git a/dashboard/client/src/pages/node/hook/useClusterDetail.ts b/dashboard/client/src/pages/node/hook/useClusterDetail.ts
index 7d0abc5d6ef60..8bfe35c44085f 100644
--- a/dashboard/client/src/pages/node/hook/useClusterDetail.ts
+++ b/dashboard/client/src/pages/node/hook/useClusterDetail.ts
@@ -6,7 +6,7 @@ import { getClusterMetadata } from "../../../service/global";
export const useClusterDetail = () => {
const [msg, setMsg] = useState("Loading the job detail");
const [refreshing, setRefresh] = useState(true);
- const { data: clusterDetail } = useSWR(
+ const { data: clusterDetail, isLoading } = useSWR(
"useClusterDetail",
async () => {
try {
@@ -23,5 +23,6 @@ export const useClusterDetail = () => {
return {
clusterDetail,
msg,
+ isLoading,
};
};
diff --git a/dashboard/client/src/pages/node/hook/useNodeDetail.ts b/dashboard/client/src/pages/node/hook/useNodeDetail.ts
index 3dd00e01818b0..5d06fba25446a 100644
--- a/dashboard/client/src/pages/node/hook/useNodeDetail.ts
+++ b/dashboard/client/src/pages/node/hook/useNodeDetail.ts
@@ -15,7 +15,7 @@ export const useNodeDetail = () => {
setRefresh(event.target.checked);
};
- const { data: nodeDetail } = useSWR(
+ const { data: nodeDetail, isLoading } = useSWR(
["useNodeDetail", params.id],
async ([_, nodeId]) => {
const { data } = await getNodeDetail(nodeId);
@@ -47,6 +47,7 @@ export const useNodeDetail = () => {
selectedTab,
nodeDetail,
msg,
+ isLoading,
isRefreshing,
onRefreshChange,
raylet,
diff --git a/dashboard/client/src/pages/node/hook/useNodeList.ts b/dashboard/client/src/pages/node/hook/useNodeList.ts
index 3409138ad7122..ce9c764cf1a53 100644
--- a/dashboard/client/src/pages/node/hook/useNodeList.ts
+++ b/dashboard/client/src/pages/node/hook/useNodeList.ts
@@ -26,7 +26,7 @@ export const useNodeList = () => {
const onSwitchChange = (event: React.ChangeEvent<HTMLInputElement>) => {
setRefresh(event.target.checked);
};
- const { data } = useSWR(
+ const { data, isLoading } = useSWR(
"useNodeList",
async () => {
const { data } = await getNodeList();
@@ -62,6 +62,7 @@ export const useNodeList = () => {
filter.every((f) => node[f.key] && node[f.key].includes(f.val)),
),
msg,
+ isLoading,
isRefreshing,
onSwitchChange,
changeFilter,
diff --git a/dashboard/client/src/pages/node/index.tsx b/dashboard/client/src/pages/node/index.tsx
index 9f2f4bf8fc8ec..3753af2d22139 100644
--- a/dashboard/client/src/pages/node/index.tsx
+++ b/dashboard/client/src/pages/node/index.tsx
@@ -158,6 +158,7 @@ const Nodes = () => {
const classes = useStyles();
const {
msg,
+ isLoading,
isRefreshing,
onSwitchChange,
nodeList,
@@ -172,7 +173,7 @@ const Nodes = () => {
return (
<div className={classes.root}>
- <Loading loading={msg.startsWith("Loading")} />
+ <Loading loading={isLoading} />
<TitleCard title="NODES">
Auto Refresh:
<Switch
|
<!-- Thank you for your contribution! Please review https://github.com/ray-project/ray/blob/master/CONTRIBUTING.rst before opening a pull request. -->
<!-- Please add a reviewer to the assignee section when you create a PR. If you don't have the access to it, we will shortly find a reviewer and assign them to your PR. -->
## Why are these changes needed?
Previously, if a dashboard page was loading, it would grey out the whole screen and buttons would not be press-able. Now, we don't block out the whole page.
Also don't show loading bar if data is already loaded from in-memory cache.
<!-- Please give a short summary of the change and the problem this solves. -->
## Related issue number
<!-- For example: "Closes #1234" -->
## Checks
- [x] I've signed off every commit(by using the -s flag, i.e., `git commit -s`) in this PR.
- [x] I've run `scripts/format.sh` to lint the changes in this PR.
- [ ] I've included any doc changes needed for https://docs.ray.io/en/master/.
- [ ] I've added any new APIs to the API Reference. For example, if I added a
method in Tune, I've added it in `doc/source/tune/api/` under the
corresponding `.rst` file.
- [ ] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failures at https://flakey-tests.ray.io/
- Testing Strategy
- [ ] Unit tests
- [ ] Release tests
- [ ] This PR is not tested :(
| https://api.github.com/repos/ray-project/ray/pulls/34515 | 2023-04-18T04:17:39Z | 2023-04-19T23:23:36Z | 2023-04-19T23:23:36Z | 2023-04-20T16:14:27Z | 3,045 | ray-project/ray | 18,954 |
Add Instatus API to Business | diff --git a/README.md b/README.md
index aa684c5568..705312aa1a 100644
--- a/README.md
+++ b/README.md
@@ -238,6 +238,7 @@ API | Description | Auth | HTTPS | CORS |
| [Gmail](https://developers.google.com/gmail/api/) | Flexible, RESTful access to the user's inbox | `OAuth` | Yes | Unknown |
| [Google Analytics](https://developers.google.com/analytics/) | Collect, configure and analyze your data to reach the right audience | `OAuth` | Yes | Unknown |
| [ImprovMX](https://improvmx.com/api) | API for free email forwarding service | `apiKey` | Yes | Unknown |
+| [Instatus](https://instatus.com/help/api) | Post to and update maintenance and incidents on your status page through an HTTP REST API | `apiKey` | Yes | Unknown |
| [mail.tm](https://docs.mail.tm) | Temporary Email Service | No | Yes | Yes |
| [MailboxValidator](https://www.mailboxvalidator.com/api-email-free) | Validate email address to improve deliverability | `apiKey` | Yes | Unknown |
| [mailgun](https://www.mailgun.com/) | Email Service | `apiKey` | Yes | Unknown |
| <!-- Thank you for taking the time to work on a Pull Request for this project! -->
<!-- To ensure your PR is dealt with swiftly please check the following: -->
- [x] My submission is formatted according to the guidelines in the [contributing guide](/CONTRIBUTING.md)
- [x] My addition is ordered alphabetically
- [x] My submission has a useful description
- [x] The description does not have more than 100 characters
- [x] The description does not end with punctuation
- [x] Each table column is padded with one space on either side
- [x] I have searched the repository for any relevant issues or pull requests
- [x] Any category I am creating has the minimum requirement of 3 items
- [x] All changes have been [squashed][squash-link] into a single commit
[squash-link]: <https://github.com/todotxt/todo.txt-android/wiki/Squash-All-Commits-Related-to-a-Single-Issue-into-a-Single-Commit>
I wasn't sure what category to list Instatus in, but it's a status page used for communicating incidents and maintenance to your users so I though it'd best fit within business. | https://api.github.com/repos/public-apis/public-apis/pulls/2463 | 2021-10-11T05:10:07Z | 2021-10-24T05:08:28Z | 2021-10-24T05:08:28Z | 2021-10-24T05:08:28Z | 281 | public-apis/public-apis | 35,889 |
Bump moto-ext to 4.2.7.post1 | diff --git a/localstack/services/ec2/provider.py b/localstack/services/ec2/provider.py
index 21db8ab2acde2..7448d8e4ab750 100644
--- a/localstack/services/ec2/provider.py
+++ b/localstack/services/ec2/provider.py
@@ -15,7 +15,6 @@
)
from moto.ec2.models.launch_templates import LaunchTemplate as MotoLaunchTemplate
from moto.ec2.models.subnets import Subnet
-from moto.ec2.models.vpcs import VPCEndPoint
from localstack.aws.api import RequestContext, handler
from localstack.aws.api.ec2 import (
@@ -496,8 +495,3 @@ def delete_transit_gateway_vpc_attachment(fn, self, transit_gateway_attachment_i
transit_gateway_attachment = self.transit_gateway_attachments.get(transit_gateway_attachment_id)
transit_gateway_attachment.state = "deleted"
return transit_gateway_attachment
-
-
-# fix a bug in upstream moto where a space is encoded in the "Statement" key - TODO remove once fixed upstream
-if "Statement " in VPCEndPoint.DEFAULT_POLICY:
- VPCEndPoint.DEFAULT_POLICY["Statement"] = VPCEndPoint.DEFAULT_POLICY.pop("Statement ")
diff --git a/localstack/services/sns/provider.py b/localstack/services/sns/provider.py
index 0191c6ef3269d..e155cc2332034 100644
--- a/localstack/services/sns/provider.py
+++ b/localstack/services/sns/provider.py
@@ -342,21 +342,7 @@ def create_platform_endpoint(
) -> CreateEndpointResponse:
# TODO: support mobile app events
# see https://docs.aws.amazon.com/sns/latest/dg/application-event-notifications.html
- try:
- result: CreateEndpointResponse = call_moto(context)
- except CommonServiceException as e:
- if "DuplicateEndpoint" in e.code:
- moto_sns_backend = self.get_moto_backend(context.account_id, context.region)
- for e in moto_sns_backend.platform_endpoints.values():
- if e.token == token:
- if custom_user_data and custom_user_data != e.custom_user_data:
- raise InvalidParameterException(
- f"Endpoint {e.arn} already exists with the same Token, but different attributes."
- )
- else:
- return CreateEndpointResponse(EndpointArn=e.arn)
- raise
- return result
+ return call_moto(context)
def unsubscribe(self, context: RequestContext, subscription_arn: subscriptionARN) -> None:
count = len(subscription_arn.split(":"))
diff --git a/setup.cfg b/setup.cfg
index e5d0842c247e7..a8aa02ba0bf79 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -80,7 +80,7 @@ runtime =
# to be removed when https://github.com/python-openapi/openapi-schema-validator/issues/131 is resolved
jsonschema<=4.19.0
localstack-client>=2.0
- moto-ext[all]==4.2.6.post2
+ moto-ext[all]==4.2.7.post1
opensearch-py>=2.3.2
pymongo>=4.2.0
pyopenssl>=23.0.0
diff --git a/tests/aws/services/sns/test_sns.py b/tests/aws/services/sns/test_sns.py
index 13ae7c3ef8cf6..553d04351ae80 100644
--- a/tests/aws/services/sns/test_sns.py
+++ b/tests/aws/services/sns/test_sns.py
@@ -3303,8 +3303,10 @@ def check_message():
retry(check_message, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
@markers.aws.needs_fixing
+ @pytest.mark.skip(reason="Test asserts wrong behaviour")
# AWS validating this is hard because we need real credentials for a GCM/Apple mobile app
- # Error responses are from reported https://github.com/spulec/moto/issues/2333
+ # TODO: AWS validate this test
+ # See https://github.com/getmoto/moto/pull/6953 where Moto updated errors.
def test_create_platform_endpoint_check_idempotency(
self, sns_create_platform_application, aws_client
):
@@ -3314,6 +3316,10 @@ def test_create_platform_endpoint_check_idempotency(
Attributes={"PlatformCredential": "123"},
)
token = "test1"
+ # TODO: As per AWS docs:
+ # > The CreatePlatformEndpoint action is idempotent, so if the requester already owns an endpoint
+ # > with the same device token and attributes, that endpoint's ARN is returned without creating a new endpoint.
+ # The 'Token' and 'Attributes' are critical to idempotent behaviour.
kwargs_list = [
{"Token": token, "CustomUserData": "test-data"},
{"Token": token, "CustomUserData": "test-data"},
| This is the final moto-ext bump ahead of LocalStack v3.0.0 release.
Brings in:
- https://github.com/getmoto/moto/pull/7003
- https://github.com/getmoto/moto/pull/7004 | https://api.github.com/repos/localstack/localstack/pulls/9596 | 2023-11-10T09:25:35Z | 2023-11-13T05:50:27Z | 2023-11-13T05:50:27Z | 2023-11-13T05:50:28Z | 1,094 | localstack/localstack | 29,324 |
Bump ansi-regex from 4.1.0 to 4.1.1 in /component-lib | diff --git a/component-lib/yarn.lock b/component-lib/yarn.lock
index 6262338804f6..d2d60c0d0458 100644
--- a/component-lib/yarn.lock
+++ b/component-lib/yarn.lock
@@ -94,9 +94,9 @@
"@types/yargs-parser" "*"
ansi-regex@^4.0.0:
- version "4.1.0"
- resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-4.1.0.tgz#8b9f8f08cf1acb843756a839ca8c7e3168c51997"
- integrity sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==
+ version "4.1.1"
+ resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-4.1.1.tgz#164daac87ab2d6f6db3a29875e2d1766582dabed"
+ integrity sha512-ILlv4k/3f6vfQ4OoP2AGvirOktlQ98ZEL1k9FaQjxa3L1abBgbuTDAdPOpvbGncC0BTVQrl+OM8xZGK6tWXt7g==
ansi-styles@^3.2.0, ansi-styles@^3.2.1:
version "3.2.1"
| Bumps [ansi-regex](https://github.com/chalk/ansi-regex) from 4.1.0 to 4.1.1.
<details>
<summary>Commits</summary>
<ul>
<li><a href="https://github.com/chalk/ansi-regex/commit/64735d25eb839b55bc9fae3877edb702b4c92ca2"><code>64735d2</code></a> v4.1.1</li>
<li><a href="https://github.com/chalk/ansi-regex/commit/75a657da7af875b2e2724fd6331bf0a4b23d3c9a"><code>75a657d</code></a> Fix potential ReDoS (<a href="https://github-redirect.dependabot.com/chalk/ansi-regex/issues/37">#37</a>)</li>
<li>See full diff in <a href="https://github.com/chalk/ansi-regex/compare/v4.1.0...v4.1.1">compare view</a></li>
</ul>
</details>
<br />
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=ansi-regex&package-manager=npm_and_yarn&previous-version=4.1.0&new-version=4.1.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/streamlit/streamlit/network/alerts).
</details> | https://api.github.com/repos/streamlit/streamlit/pulls/4558 | 2022-03-28T21:53:21Z | 2022-03-31T17:05:42Z | 2022-03-31T17:05:42Z | 2022-03-31T17:05:46Z | 392 | streamlit/streamlit | 21,947 |
[3.9] bpo-42598: Fix implicit function declarations in configure (GH-23690) | diff --git a/Misc/NEWS.d/next/Build/2020-12-13-14-43-10.bpo-42598.7ipr5H.rst b/Misc/NEWS.d/next/Build/2020-12-13-14-43-10.bpo-42598.7ipr5H.rst
new file mode 100644
index 00000000000000..7dafc105c45ea9
--- /dev/null
+++ b/Misc/NEWS.d/next/Build/2020-12-13-14-43-10.bpo-42598.7ipr5H.rst
@@ -0,0 +1,2 @@
+Fix implicit function declarations in configure which could have resulted in
+incorrect configuration checks. Patch contributed by Joshua Root.
diff --git a/configure b/configure
index 2d379feb4b7bf0..ed969c55b35ab6 100755
--- a/configure
+++ b/configure
@@ -11072,10 +11072,10 @@ else
main() {
pthread_attr_t attr;
pthread_t id;
- if (pthread_attr_init(&attr)) exit(-1);
- if (pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM)) exit(-1);
- if (pthread_create(&id, &attr, foo, NULL)) exit(-1);
- exit(0);
+ if (pthread_attr_init(&attr)) return (-1);
+ if (pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM)) return (-1);
+ if (pthread_create(&id, &attr, foo, NULL)) return (-1);
+ return (0);
}
_ACEOF
if ac_fn_c_try_run "$LINENO"; then :
@@ -15083,7 +15083,7 @@ else
int main()
{
/* Success: exit code 0 */
- exit((((wchar_t) -1) < ((wchar_t) 0)) ? 0 : 1);
+ return ((((wchar_t) -1) < ((wchar_t) 0)) ? 0 : 1);
}
_ACEOF
@@ -15464,7 +15464,7 @@ else
int main()
{
- exit(((-1)>>3 == -1) ? 0 : 1);
+ return (((-1)>>3 == -1) ? 0 : 1);
}
_ACEOF
@@ -15934,6 +15934,7 @@ else
/* end confdefs.h. */
#include <poll.h>
+#include <unistd.h>
int main()
{
diff --git a/configure.ac b/configure.ac
index c968d149c22d44..c74e348e077fb7 100644
--- a/configure.ac
+++ b/configure.ac
@@ -3313,10 +3313,10 @@ if test "$posix_threads" = "yes"; then
main() {
pthread_attr_t attr;
pthread_t id;
- if (pthread_attr_init(&attr)) exit(-1);
- if (pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM)) exit(-1);
- if (pthread_create(&id, &attr, foo, NULL)) exit(-1);
- exit(0);
+ if (pthread_attr_init(&attr)) return (-1);
+ if (pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM)) return (-1);
+ if (pthread_create(&id, &attr, foo, NULL)) return (-1);
+ return (0);
}]])],
[ac_cv_pthread_system_supported=yes],
[ac_cv_pthread_system_supported=no],
@@ -4725,7 +4725,7 @@ then
int main()
{
/* Success: exit code 0 */
- exit((((wchar_t) -1) < ((wchar_t) 0)) ? 0 : 1);
+ return ((((wchar_t) -1) < ((wchar_t) 0)) ? 0 : 1);
}
]])],
[ac_cv_wchar_t_signed=yes],
@@ -4847,7 +4847,7 @@ AC_CACHE_VAL(ac_cv_rshift_extends_sign, [
AC_RUN_IFELSE([AC_LANG_SOURCE([[
int main()
{
- exit(((-1)>>3 == -1) ? 0 : 1);
+ return (((-1)>>3 == -1) ? 0 : 1);
}
]])],
[ac_cv_rshift_extends_sign=yes],
@@ -4994,6 +4994,7 @@ AC_MSG_CHECKING(for broken poll())
AC_CACHE_VAL(ac_cv_broken_poll,
AC_RUN_IFELSE([AC_LANG_SOURCE([[
#include <poll.h>
+#include <unistd.h>
int main()
{
| This is invalid in C99 and later and is an error with some compilers
(e.g. clang in Xcode 12), and can thus cause configure checks to
produce incorrect results.
(cherry picked from commit 674fa0a740151e0416c9383f127b16014e805990)
Co-authored-by: Joshua Root <jmr@macports.org>
<!-- issue-number: [bpo-42598](https://bugs.python.org/issue42598) -->
https://bugs.python.org/issue42598
<!-- /issue-number -->
| https://api.github.com/repos/python/cpython/pulls/23756 | 2020-12-13T21:09:46Z | 2020-12-13T22:01:01Z | 2020-12-13T22:01:01Z | 2020-12-13T22:01:06Z | 1,046 | python/cpython | 4,190 |
Added `--nsfw` flag in documentation | diff --git a/README.md b/README.md
index 3914b68b6..52e6d7e1d 100644
--- a/README.md
+++ b/README.md
@@ -43,7 +43,7 @@ usage: sherlock [-h] [--version] [--verbose] [--folderoutput FOLDEROUTPUT]
[--output OUTPUT] [--tor] [--unique-tor] [--csv]
[--site SITE_NAME] [--proxy PROXY_URL] [--json JSON_FILE]
[--timeout TIMEOUT] [--print-all] [--print-found] [--no-color]
- [--browse] [--local]
+ [--browse] [--local] [--nsfw]
USERNAMES [USERNAMES ...]
Sherlock: Find Usernames Across Social Networks (Version 0.14.2)
@@ -83,6 +83,7 @@ optional arguments:
--no-color Don't color terminal output
--browse, -b Browse to all results on default browser.
--local, -l Force the use of the local data.json file.
+ --nsfw Include checking of NSFW sites from default list.
```
To search for only one user:
| Added `--nsfw` tag in `Usage` prompt with the description matching the `help` prompt of the parser's --nsfw flag | https://api.github.com/repos/sherlock-project/sherlock/pulls/1751 | 2023-03-17T09:07:37Z | 2023-03-17T20:50:27Z | 2023-03-17T20:50:27Z | 2023-03-17T20:50:28Z | 255 | sherlock-project/sherlock | 36,396 |
Updating the docstring disable_env_checker | diff --git a/gym/envs/registration.py b/gym/envs/registration.py
index 8b7a2ac83b8..5b9c8a4fd30 100644
--- a/gym/envs/registration.py
+++ b/gym/envs/registration.py
@@ -126,7 +126,7 @@ class EnvSpec:
* max_episode_steps: The max number of steps that the environment can take before truncation
* order_enforce: If to enforce the order of `reset` before `step` and `render` functions
* autoreset: If to automatically reset the environment on episode end
- * disable_env_checker: If to disable the environment checker wrapper by default in `gym.make`
+ * disable_env_checker: If to disable the environment checker wrapper in `gym.make`, by default False (runs the environment checker)
* kwargs: Additional keyword arguments passed to the environments through `gym.make`
"""
@@ -558,8 +558,9 @@ def make(
max_episode_steps: Maximum length of an episode (TimeLimit wrapper).
autoreset: Whether to automatically reset the environment after each episode (AutoResetWrapper).
new_step_api: Whether to use old or new step API (StepAPICompatibility wrapper). Will be removed at v1.0
- disable_env_checker: If to run the env checker, None will default to the environment `spec.disable_env_checker`
- (that is by default True), otherwise will run according to the parameter (True = not run, False = run)
+ disable_env_checker: If to run the env checker, None will default to the environment specification `disable_env_checker`
+ (which is by default False, running the environment checker),
+ otherwise will run according to this parameter (`True` = not run, `False` = run)
kwargs: Additional arguments to pass to the environment constructor.
Returns:
diff --git a/gym/vector/__init__.py b/gym/vector/__init__.py
index 185082715c8..3dc4998fa8d 100644
--- a/gym/vector/__init__.py
+++ b/gym/vector/__init__.py
@@ -35,9 +35,10 @@ def make(
num_envs: Number of copies of the environment.
asynchronous: If `True`, wraps the environments in an :class:`AsyncVectorEnv` (which uses `multiprocessing`_ to run the environments in parallel). If ``False``, wraps the environments in a :class:`SyncVectorEnv`.
wrappers: If not ``None``, then apply the wrappers to each internal environment during creation.
- disable_env_checker: If to disable the env checker, if True it will only run on the first environment created.
+ disable_env_checker: If to run the env checker for the first environment only. None will default to the environment spec `disable_env_checker` parameter
+ (that is by default False), otherwise will run according to this argument (True = not run, False = run)
new_step_api: If True, the vector environment's step method outputs two booleans `terminated`, `truncated` instead of one `done`.
- **kwargs: Keywords arguments applied during gym.make
+ **kwargs: Keywords arguments applied during `gym.make`
Returns:
The vectorized environment.
diff --git a/gym/vector/vector_env.py b/gym/vector/vector_env.py
index ad2710e02ae..3ca4663d822 100644
--- a/gym/vector/vector_env.py
+++ b/gym/vector/vector_env.py
@@ -36,7 +36,7 @@ def __init__(
num_envs: Number of environments in the vectorized environment.
observation_space: Observation space of a single environment.
action_space: Action space of a single environment.
- new_step_api (bool): Whether the vector env's step method outputs two boolean arrays (new API) or one boolean array (old API)
+ new_step_api (bool): Whether the vector environment's step method outputs two boolean arrays (new API) or one boolean array (old API)
"""
self.num_envs = num_envs
self.is_vector_env = True
@@ -54,8 +54,7 @@ def __init__(
self.new_step_api = new_step_api
if not self.new_step_api:
deprecation(
- "Initializing vector env in old step API which returns one bool array instead of two. "
- "It is recommended to set `new_step_api=True` to use new step API. This will be the default behaviour in future. "
+ "Initializing vector env in old step API which returns one bool array instead of two. It is recommended to set `new_step_api=True` to use new step API. This will be the default behaviour in future."
)
def reset_async(
@@ -147,7 +146,7 @@ def step(self, actions):
actions: element of :attr:`action_space` Batch of actions.
Returns:
- Batch of (observations, rewards, terminateds, truncateds, infos) or (observations, rewards, dones, infos)
+ Batch of (observations, rewards, terminated, truncated, infos) or (observations, rewards, dones, infos)
"""
self.step_async(actions)
return self.step_wait()
| Address the comment in https://github.com/openai/gym/commit/519dfd9117e98e4f52d38064d2b0f79974fb676d | https://api.github.com/repos/openai/gym/pulls/2967 | 2022-07-14T21:59:09Z | 2022-07-17T20:50:40Z | 2022-07-17T20:50:40Z | 2022-07-17T20:50:40Z | 1,144 | openai/gym | 5,597 |
service account fix on gcp_compute_instance | diff --git a/lib/ansible/modules/cloud/google/gcp_compute_instance.py b/lib/ansible/modules/cloud/google/gcp_compute_instance.py
index cb48c1deca1af0..b53a679dff40bd 100644
--- a/lib/ansible/modules/cloud/google/gcp_compute_instance.py
+++ b/lib/ansible/modules/cloud/google/gcp_compute_instance.py
@@ -319,7 +319,6 @@
description:
- Email address of the service account.
required: false
- type: bool
scopes:
description:
- The list of scopes to be made available for this service account.
@@ -862,7 +861,7 @@ def main():
preemptible=dict(type='bool')
)),
service_accounts=dict(type='list', elements='dict', options=dict(
- email=dict(type='bool'),
+ email=dict(type='str'),
scopes=dict(type='list', elements='str')
)),
tags=dict(type='dict', options=dict(
| ##### SUMMARY
service account fix on gcp_compute_instance
<!--- If you are fixing an existing issue, please include "Fixes #nnn" in your
commit message and your description; but you should still explain what
the change does.-->
##### ISSUE TYPE
<!--- Pick one below and delete the rest: -->
- Bugfix Pull Request
##### COMPONENT NAME
<!--- Name of the module, plugin, module or task -->
gcp_compute_instance
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes below -->
```
2.6
```
##### ADDITIONAL INFORMATION
<!--- Include additional information to help people understand the change here.
For bugs that don't have a linked bug report, a step-by-step reproduction
of the problem is helpful. -->
<!--- Paste verbatim command output below, e.g. before and after your change -->
```
```
| https://api.github.com/repos/ansible/ansible/pulls/41806 | 2018-06-21T18:18:08Z | 2018-06-21T20:11:28Z | 2018-06-21T20:11:28Z | 2019-06-21T18:23:23Z | 210 | ansible/ansible | 49,359 |
Created problem_39 in project_euler | diff --git a/project_euler/problem_39/__init__.py b/project_euler/problem_39/__init__.py
new file mode 100644
index 000000000000..792d6005489e
--- /dev/null
+++ b/project_euler/problem_39/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/project_euler/problem_39/sol1.py b/project_euler/problem_39/sol1.py
new file mode 100644
index 000000000000..5c21d4beca8c
--- /dev/null
+++ b/project_euler/problem_39/sol1.py
@@ -0,0 +1,39 @@
+"""
+If p is the perimeter of a right angle triangle with integral length sides,
+{a,b,c}, there are exactly three solutions for p = 120.
+{20,48,52}, {24,45,51}, {30,40,50}
+
+For which value of p ≤ 1000, is the number of solutions maximised?
+"""
+
+from typing import Dict
+from collections import Counter
+
+
+def pythagorean_triple(max_perimeter: int) -> Dict:
+ """
+ Returns a dictionary with keys as the perimeter of a right angled triangle
+ and value as the number of corresponding triplets.
+ >>> pythagorean_triple(15)
+ Counter({12: 1})
+ >>> pythagorean_triple(40)
+ Counter({12: 1, 30: 1, 24: 1, 40: 1, 36: 1})
+ >>> pythagorean_triple(50)
+ Counter({12: 1, 30: 1, 24: 1, 40: 1, 36: 1, 48: 1})
+ """
+ triplets = Counter()
+ for base in range(1, max_perimeter + 1):
+ for perpendicular in range(base, max_perimeter + 1):
+ hypotenuse = (base * base + perpendicular * perpendicular) ** 0.5
+ if hypotenuse == int((hypotenuse)):
+ perimeter = int(base + perpendicular + hypotenuse)
+ if perimeter > max_perimeter:
+ continue
+ else:
+ triplets[perimeter] += 1
+ return triplets
+
+
+if __name__ == "__main__":
+ triplets = pythagorean_triple(1000)
+ print(f"{triplets.most_common()[0][0] = }")
| ### **Describe your change:**
* [x] Add an algorithm?
* [ ] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### **Checklist:**
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| https://api.github.com/repos/TheAlgorithms/Python/pulls/2330 | 2020-08-18T07:59:53Z | 2020-08-18T10:49:03Z | 2020-08-18T10:49:03Z | 2020-08-18T10:57:13Z | 582 | TheAlgorithms/Python | 30,005 |
Fixed #29421 -- Improved Romanian locale formats | diff --git a/AUTHORS b/AUTHORS
index ff7763b2fd7c5..d5c3e1b2b96bd 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -114,6 +114,7 @@ answer newbie questions, and generally made Django that much better:
Bill Fenner <fenner@gmail.com>
Bjørn Stabell <bjorn@exoweb.net>
Bo Marchman <bo.marchman@gmail.com>
+ Bogdan Mateescu
Bojan Mihelac <bmihelac@mihelac.org>
Bouke Haarsma <bouke@haarsma.eu>
Božidar Benko <bbenko@gmail.com>
diff --git a/django/conf/locale/ro/formats.py b/django/conf/locale/ro/formats.py
index ba3fd73b4a4c4..11f4e2e9fc0f5 100644
--- a/django/conf/locale/ro/formats.py
+++ b/django/conf/locale/ro/formats.py
@@ -9,13 +9,27 @@
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y, H:i'
-# FIRST_DAY_OF_WEEK =
+FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
-# DATE_INPUT_FORMATS =
-# TIME_INPUT_FORMATS =
-# DATETIME_INPUT_FORMATS =
+DATE_INPUT_FORMATS = [
+ '%d.%m.%Y',
+ '%d.%b.%Y',
+ '%d %B %Y',
+ '%A, %d %B %Y',
+]
+TIME_INPUT_FORMATS = [
+ '%H:%M',
+ '%H:%M:%S',
+ '%H:%M:%S.%f',
+]
+DATETIME_INPUT_FORMATS = [
+ '%d.%m.%Y, %H:%M',
+ '%d.%m.%Y, %H:%M:%S',
+ '%d.%B.%Y, %H:%M',
+ '%d.%B.%Y, %H:%M:%S',
+]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
-# NUMBER_GROUPING =
+NUMBER_GROUPING = 3
| https://api.github.com/repos/django/django/pulls/9969 | 2018-05-20T06:28:03Z | 2018-05-20T15:12:14Z | 2018-05-20T15:12:14Z | 2018-05-20T15:36:59Z | 531 | django/django | 51,360 |
|
Add README files 1/7 | diff --git a/arithmetic_analysis/README.md b/arithmetic_analysis/README.md
new file mode 100644
index 000000000000..45cf321eb6ad
--- /dev/null
+++ b/arithmetic_analysis/README.md
@@ -0,0 +1,7 @@
+# Arithmetic analysis
+
+Arithmetic analysis is a branch of mathematics that deals with solving linear equations.
+
+* <https://en.wikipedia.org/wiki/System_of_linear_equations>
+* <https://en.wikipedia.org/wiki/Gaussian_elimination>
+* <https://en.wikipedia.org/wiki/Root-finding_algorithms>
diff --git a/audio_filters/README.md b/audio_filters/README.md
new file mode 100644
index 000000000000..4419bd8bdbf9
--- /dev/null
+++ b/audio_filters/README.md
@@ -0,0 +1,9 @@
+# Audio Filter
+
+Audio filters work on the frequency of an audio signal to attenuate unwanted frequency and amplify wanted ones.
+They are used within anything related to sound, whether it is radio communication or a hi-fi system.
+
+* <https://www.masteringbox.com/filter-types/>
+* <http://ethanwiner.com/filters.html>
+* <https://en.wikipedia.org/wiki/Audio_filter>
+* <https://en.wikipedia.org/wiki/Electronic_filter>
diff --git a/backtracking/README.md b/backtracking/README.md
new file mode 100644
index 000000000000..d4975dfb5ad7
--- /dev/null
+++ b/backtracking/README.md
@@ -0,0 +1,8 @@
+# Backtracking
+
+Backtracking is a way to speed up the search process by removing candidates when they can't be the solution of a problem.
+
+* <https://en.wikipedia.org/wiki/Backtracking>
+* <https://en.wikipedia.org/wiki/Decision_tree_pruning>
+* <https://medium.com/@priyankmistry1999/backtracking-sudoku-6e4439e4825c>
+* <https://www.geeksforgeeks.org/sudoku-backtracking-7/>
diff --git a/bit_manipulation/README.md b/bit_manipulation/README.md
index e5f82a270e28..3f5e028beb8e 100644
--- a/bit_manipulation/README.md
+++ b/bit_manipulation/README.md
@@ -1,6 +1,11 @@
-* https://docs.python.org/3/reference/expressions.html#binary-bitwise-operations
-* https://docs.python.org/3/reference/expressions.html#unary-arithmetic-and-bitwise-operations
-* https://docs.python.org/3/library/stdtypes.html#bitwise-operations-on-integer-types
-* https://wiki.python.org/moin/BitManipulation
-* https://wiki.python.org/moin/BitwiseOperators
-* https://www.tutorialspoint.com/python3/bitwise_operators_example.htm
+# Bit manipulation
+
+Bit manipulation is the act of manipulating bits to detect errors (hamming code), encrypts and decrypts messages (more on that in the 'ciphers' folder) or just do anything at the lowest level of your computer.
+
+* <https://en.wikipedia.org/wiki/Bit_manipulation>
+* <https://docs.python.org/3/reference/expressions.html#binary-bitwise-operations>
+* <https://docs.python.org/3/reference/expressions.html#unary-arithmetic-and-bitwise-operations>
+* <https://docs.python.org/3/library/stdtypes.html#bitwise-operations-on-integer-types>
+* <https://wiki.python.org/moin/BitManipulation>
+* <https://wiki.python.org/moin/BitwiseOperators>
+* <https://www.tutorialspoint.com/python3/bitwise_operators_example.htm>
diff --git a/boolean_algebra/README.md b/boolean_algebra/README.md
new file mode 100644
index 000000000000..45969c855f9c
--- /dev/null
+++ b/boolean_algebra/README.md
@@ -0,0 +1,7 @@
+# Boolean Algebra
+
+Boolean algebra is used to do arithmetic with bits of values True (1) or False (0).
+There are three basic operations: 'and', 'or' and 'not'.
+
+* <https://en.wikipedia.org/wiki/Boolean_algebra>
+* <https://plato.stanford.edu/entries/boolalg-math/>
| ### Describe your change:
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [x] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| https://api.github.com/repos/TheAlgorithms/Python/pulls/5754 | 2021-11-02T21:34:27Z | 2021-11-04T10:49:37Z | 2021-11-04T10:49:37Z | 2021-11-04T10:49:37Z | 978 | TheAlgorithms/Python | 29,798 |
Add W&B tracing user guide | diff --git a/docs/index.rst b/docs/index.rst
index a3c23a6b5d..f0f65bff8e 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -21,6 +21,7 @@ Welcome to GPT-ENGINEER's Documentation
readme_link
windows_readme_link
open_models.md
+ tracing_debugging.md
.. toctree::
diff --git a/docs/tracing_debugging.md b/docs/tracing_debugging.md
new file mode 100644
index 0000000000..745b561098
--- /dev/null
+++ b/docs/tracing_debugging.md
@@ -0,0 +1,21 @@
+Tracing and Debugging with Weights and Biases
+============================
+
+## **[How to store results in Weights & Biases]()**
+
+W&B Prompts is a suite of LLMOps tools built for the development of LLM-powered applications. Use W&B Prompts to visualize and inspect the execution flow of your LLMs, analyze the inputs and outputs of your LLMs, view the intermediate results and securely store and manage your prompts and LLM chain configurations. Read more at https://docs.wandb.ai/guides/prompts
+
+```shell
+ $ export WANDB_API_KEY="YOUR-KEY"
+ $ export LANGCHAIN_WANDB_TRACING=true
+ ```
+
+Sign up for free at https://wandb.ai
+
+
+Debug and trace the execution of the AI generated code to compare across different experiments with `gpt-engineer` and related prompts
+![](https://drive.google.com/uc?id=10wuLwyPbH00CoESsS2Q2q6mkdrtS91jd)
+
+
+Automatically capture and save terminal `stdout` to one easily accessible and shareable webpage
+![](https://drive.google.com/uc?id=1gVva7ZfpwbTSBsnNvId6iq09Gw5ETOks)
| An updated form of https://github.com/AntonOsika/gpt-engineer/pull/701 where the tracing/debugging how to would be placed in the `user guides` | https://api.github.com/repos/gpt-engineer-org/gpt-engineer/pulls/752 | 2023-10-02T14:41:21Z | 2023-10-05T11:43:57Z | 2023-10-05T11:43:57Z | 2023-10-05T11:43:57Z | 443 | gpt-engineer-org/gpt-engineer | 33,250 |
Fix numbers in sections to be consistent with TOC | diff --git a/CppCoreGuidelines.md b/CppCoreGuidelines.md
index 5ecf48334..611be45b7 100644
--- a/CppCoreGuidelines.md
+++ b/CppCoreGuidelines.md
@@ -6010,7 +6010,7 @@ The `make_shared()` version mentions `X` only once, so it is usually shorter (as
<a name ="Rr-weak_ptr"></a>
-### R.30: Use `std::weak_ptr` to break cycles of `shared_ptr`s
+### R.24: Use `std::weak_ptr` to break cycles of `shared_ptr`s
**Reason**: `shared_ptr's rely on use counting and the use count for a cyclic structure never goes to zero, so we need a mechanism to
be able to destroy a cyclic structure.
@@ -6026,40 +6026,10 @@ You could "temporarily share ownership simply by using another `stared_ptr`.]]
**Enforcement**: ???probably impossible. If we could statically detect cycles, we wouldn't need `weak_ptr`
-<a name="Rr-smart"></a>
-### R.31: If you have non-`std` smart pointers, follow the basic pattern from `std`
-
-**Reason**: The rules in the following section also work for other kinds of third-party and custom smart pointers and are very useful for diagnosing common smart pointer errors that cause performance and correctness problems.
-You want the rules to work on all the smart pointers you use.
-
-Any type (including primary template or specialization) that overloads unary `*` and `->` is considered a smart pointer:
-
-* If it is copyable, it is recognized as a reference-counted `Shared_ptr`.
-* If it not copyable, it is recognized as a unique `Unique_ptr`.
-
-**Example**:
-
- // use Boost's intrusive_ptr
- #include <boost/intrusive_ptr.hpp>
- void f(boost::intrusive_ptr<widget> p) { // error under rule 'sharedptrparam'
- p->foo();
- }
-
- // use Microsoft's CComPtr
- #include <atlbase.h>
- void f(CComPtr<widget> p) { // error under rule 'sharedptrparam'
- p->foo();
- }
-
-Both cases are an error under the [`sharedptrparam` guideline](#Rr-smartptrparam):
-`p` is a `Shared_ptr`, but nothing about its sharedness is used here and passing it by value is a silent pessimization;
-these functions should accept a smart pointer only if they need to participate in the widget's lifetime management. Otherwise they should accept a `widget*`, if it can be `nullptr`. Otherwise, and ideally, the function should accept a `widget&`.
-These smart pointers match the `Shared_ptr` concept,
-so these guideline enforcement rules work on them out of the box and expose this common pessimization.
<a name="Rr-smartptrparam"></a>
-### R.32: Take smart pointers as parameters only to explicitly express lifetime semantics
+### R.30: Take smart pointers as parameters only to explicitly express lifetime semantics
**Reason**: Accepting a smart pointer to a `widget` is wrong if the function just needs the `widget` itself.
It should be able to accept any `widget` object, not just ones whose lifetimes are managed by a particular kind of smart pointer.
@@ -6103,8 +6073,39 @@ A function that does not manipulate lifetime should take raw pointers or referen
Suggest using a `T*` or `T&` instead.
+<a name="Rr-smart"></a>
+### R.31: If you have non-`std` smart pointers, follow the basic pattern from `std`
+
+**Reason**: The rules in the following section also work for other kinds of third-party and custom smart pointers and are very useful for diagnosing common smart pointer errors that cause performance and correctness problems.
+You want the rules to work on all the smart pointers you use.
+
+Any type (including primary template or specialization) that overloads unary `*` and `->` is considered a smart pointer:
+
+* If it is copyable, it is recognized as a reference-counted `Shared_ptr`.
+* If it not copyable, it is recognized as a unique `Unique_ptr`.
+
+**Example**:
+
+ // use Boost's intrusive_ptr
+ #include <boost/intrusive_ptr.hpp>
+ void f(boost::intrusive_ptr<widget> p) { // error under rule 'sharedptrparam'
+ p->foo();
+ }
+
+ // use Microsoft's CComPtr
+ #include <atlbase.h>
+ void f(CComPtr<widget> p) { // error under rule 'sharedptrparam'
+ p->foo();
+ }
+
+Both cases are an error under the [`sharedptrparam` guideline](#Rr-smartptrparam):
+`p` is a `Shared_ptr`, but nothing about its sharedness is used here and passing it by value is a silent pessimization;
+these functions should accept a smart pointer only if they need to participate in the widget's lifetime management. Otherwise they should accept a `widget*`, if it can be `nullptr`. Otherwise, and ideally, the function should accept a `widget&`.
+These smart pointers match the `Shared_ptr` concept,
+so these guideline enforcement rules work on them out of the box and expose this common pessimization.
+
<a name="Rr-uniqueptrparam"></a>
-### R.33: Take a `unique_ptr<widget>` parameter to express that a function assumes ownership of a `widget`
+### R.32: Take a `unique_ptr<widget>` parameter to express that a function assumes ownership of a `widget`
**Reason**: Using `unique_ptr` in this way both documents and enforces the function call's ownership transfer.
@@ -6127,7 +6128,7 @@ A function that does not manipulate lifetime should take raw pointers or referen
<a name="Rr-reseat"></a>
-### R.34: Take a `unique_ptr<widget>&` parameter to express that a function reseats the`widget`
+### R.33: Take a `unique_ptr<widget>&` parameter to express that a function reseats the`widget`
**Reason**: Using `unique_ptr` in this way both documents and enforces the function call's reseating semantics.
@@ -6150,7 +6151,7 @@ A function that does not manipulate lifetime should take raw pointers or referen
<a name="Rr-sharedptrparam-owner"></a>
-### R.35: Take a `shared_ptr<widget>` parameter to express that a function is part owner
+### R.34: Take a `shared_ptr<widget>` parameter to express that a function is part owner
**Reason**: This makes the function's ownership sharing explicit.
@@ -6170,7 +6171,7 @@ A function that does not manipulate lifetime should take raw pointers or referen
<a name="Rr-sharedptrparam"></a>
-### R.36: Take a `shared_ptr<widget>&` parameter to express that a function might reseat the shared pointer
+### R.35: Take a `shared_ptr<widget>&` parameter to express that a function might reseat the shared pointer
**Reason**: This makes the function's reseating explicit.
@@ -6192,7 +6193,7 @@ A function that does not manipulate lifetime should take raw pointers or referen
<a name="Rr-sharedptrparam-const&"></a>
-### R.37: Take a `const shared_ptr<widget>&` parameter to express that it might retain a reference count to the object ???
+### R.36: Take a `const shared_ptr<widget>&` parameter to express that it might retain a reference count to the object ???
**Reason**: This makes the function's ??? explicit.
@@ -6212,7 +6213,7 @@ A function that does not manipulate lifetime should take raw pointers or referen
<a name="Rr-smartptrget"></a>
-### R.38: Do not pass a pointer or reference obtained from an aliased smart pointer
+### R.37: Do not pass a pointer or reference obtained from an aliased smart pointer
**Reason**: Violating this rule is the number one cause of losing reference counts and finding yourself with a dangling pointer.
Functions should prefer to pass raw pointers and references down call chains.
| The sections R.24, R.30 - R.37 had incorrect numbers in the text. Also R.30 was
after R.31. This is now fixed.
| https://api.github.com/repos/isocpp/CppCoreGuidelines/pulls/160 | 2015-09-27T01:20:16Z | 2015-09-27T17:58:42Z | 2015-09-27T17:58:42Z | 2015-09-27T19:57:05Z | 1,874 | isocpp/CppCoreGuidelines | 15,836 |
doc: add a section about debugging in docker-compose with PyCharm | diff --git a/docs/apache-airflow/howto/docker-compose/index.rst b/docs/apache-airflow/howto/docker-compose/index.rst
index 2cd92fa04824d..aaff211d2f13b 100644
--- a/docs/apache-airflow/howto/docker-compose/index.rst
+++ b/docs/apache-airflow/howto/docker-compose/index.rst
@@ -358,6 +358,49 @@ Networking
In general, if you want to use Airflow locally, your DAGs may try to connect to servers which are running on the host. In order to achieve that, an extra configuration must be added in ``docker-compose.yaml``. For example, on Linux the configuration must be in the section ``services: airflow-worker`` adding ``extra_hosts: - "host.docker.internal:host-gateway"``; and use ``host.docker.internal`` instead of ``localhost``. This configuration vary in different platforms. Please check the Docker documentation for `Windows <https://docs.docker.com/desktop/windows/networking/#use-cases-and-workarounds>`_ and `Mac <https://docs.docker.com/desktop/mac/networking/#use-cases-and-workarounds>`_ for further information.
+Debug Airflow inside docker container using PyCharm
+===================================================
+.. jinja:: quick_start_ctx
+
+ Prerequisites: Create a project in **PyCharm** and download the (`docker-compose.yaml <{{ doc_root_url }}docker-compose.yaml>`__).
+
+Steps:
+
+1) Modify ``docker-compose.yaml``
+
+ Add the following section under the ``services`` section:
+
+.. code-block:: yaml
+
+ airflow-python:
+ <<: *airflow-common
+ profiles:
+ - debug
+ environment:
+ <<: *airflow-common-env
+ user: "50000:0"
+ entrypoint: ["bash"]
+
+.. note::
+
+ This code snippet creates a new service named **"airflow-python"** specifically for PyCharm's Python interpreter.
+ On a Linux system, if you have executed the command ``echo -e "AIRFLOW_UID=$(id -u)" > .env``, you need to set ``user: "50000:0"`` in ``airflow-python`` service to avoid PyCharm's ``Unresolved reference 'airflow'`` error.
+
+2) Configure PyCharm Interpreter
+
+ * Open PyCharm and navigate to **Settings** (or **Preferences** on macOS) > **Project: <Your Project Name>** > **Python Interpreter**.
+ * Click the **"Add Interpreter"** button and choose **"On Docker Compose"**.
+ * In the **Configuration file** field, select your ``docker-compose.yaml`` file.
+ * In the **Service field**, choose the newly added ``airflow-python`` service.
+ * Click **"Next"** and follow the prompts to complete the configuration.
+
+.. image:: /img/add_container_python_interpreter.png
+ :alt: Configuring the container's Python interpreter in PyCharm, step diagram
+
+Building the interpreter index might take some time.
+Once configured, you can debug your Airflow code within the container environment, mimicking your local setup.
+
+
FAQ: Frequently asked questions
===============================
diff --git a/docs/apache-airflow/img/add_container_python_interpreter.png b/docs/apache-airflow/img/add_container_python_interpreter.png
new file mode 100644
index 0000000000000..c0bd2d7701ab0
Binary files /dev/null and b/docs/apache-airflow/img/add_container_python_interpreter.png differ
| <!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<!--
Thank you for contributing! Please make sure that your code changes
are covered with tests. And in case of new features or big changes
remember to adjust the documentation.
Feel free to ping committers for the review!
In case of an existing issue, reference it using one of the following:
closes: #ISSUE
related: #ISSUE
How to write a good git commit message:
http://chris.beams.io/posts/git-commit/
-->
Proposed Documentation Pull Request for Airflow
Background:
In a discussion with @potiuk about [Seeking Help: Missing Essential Python Packages during PyCharm Debugging on Docker Compose Interpreter](https://github.com/apache/airflow/discussions/37864). I proposed adding configuration instructions for this functionality to the Airflow documentation.
Changes:
This pull request adds a new section to the documentation that covers:
**Debug Airflow inside docker container using Pycharm**
I would appreciate it if you could review this pull request and provide any feedback.
<!-- Please keep an empty line above the dashes. -->
---
**^ Add meaningful description above**
Read the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/main/contributing-docs/05_pull_requests.rst#pull-request-guidelines)** for more information.
In case of fundamental code changes, an Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvement+Proposals)) is needed.
In case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).
In case of backwards incompatible changes please leave a note in a newsfragment file, named `{pr_number}.significant.rst` or `{issue_number}.significant.rst`, in [newsfragments](https://github.com/apache/airflow/tree/main/newsfragments).
| https://api.github.com/repos/apache/airflow/pulls/37940 | 2024-03-06T15:52:10Z | 2024-03-08T00:44:18Z | 2024-03-08T00:44:18Z | 2024-03-19T17:34:16Z | 780 | apache/airflow | 14,463 |
Mark stdin warning related tests with `requires_external_processes` | diff --git a/pytest.ini b/pytest.ini
index 7c6209701b..ced65979b1 100644
--- a/pytest.ini
+++ b/pytest.ini
@@ -1,3 +1,7 @@
[pytest]
markers =
+ # If you want to run tests without a full HTTPie installation
+ # we advise you to disable the markers below, e.g:
+ # pytest -m 'not requires_installation and not requires_external_processes'
requires_installation
+ requires_external_processes
diff --git a/tests/test_uploads.py b/tests/test_uploads.py
index d9de3ac984..5695d0c8c2 100644
--- a/tests/test_uploads.py
+++ b/tests/test_uploads.py
@@ -121,6 +121,7 @@ def stdin_processes(httpbin, *args):
@pytest.mark.parametrize("wait", (True, False))
+@pytest.mark.requires_external_processes
@pytest.mark.skipif(is_windows, reason="Windows doesn't support select() calls into files")
def test_reading_from_stdin(httpbin, wait):
with stdin_processes(httpbin) as (process_1, process_2):
@@ -138,6 +139,7 @@ def test_reading_from_stdin(httpbin, wait):
assert b'> warning: no stdin data read in 0.1s' not in errs
+@pytest.mark.requires_external_processes
@pytest.mark.skipif(is_windows, reason="Windows doesn't support select() calls into files")
def test_stdin_read_warning(httpbin):
with stdin_processes(httpbin) as (process_1, process_2):
@@ -153,6 +155,7 @@ def test_stdin_read_warning(httpbin):
assert b'> warning: no stdin data read in 0.1s' in errs
+@pytest.mark.requires_external_processes
@pytest.mark.skipif(is_windows, reason="Windows doesn't support select() calls into files")
def test_stdin_read_warning_with_quiet(httpbin):
with stdin_processes(httpbin, "-qq") as (process_1, process_2):
| Cherry-picks and closes #1287 | https://api.github.com/repos/httpie/cli/pulls/1289 | 2022-02-01T09:49:50Z | 2022-02-01T09:52:08Z | 2022-02-01T09:52:08Z | 2022-02-01T09:52:08Z | 454 | httpie/cli | 33,808 |
Fix Dockerfile (#172, #156) | diff --git a/Dockerfile b/Dockerfile
index 95c26a29..e78be409 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,13 +1,19 @@
-FROM alpine:latest
+FROM alpine:3.12
+# fetching cheat sheets
+## installing dependencies
+RUN apk add --update --no-cache git py3-six py3-pygments py3-yaml py3-gevent \
+ libstdc++ py3-colorama py3-requests py3-icu py3-redis
+## building missing python packages
+RUN apk add --no-cache --virtual build-deps py3-pip g++ python3-dev \
+ && pip3 install --no-cache-dir rapidfuzz colored polyglot pycld2 \
+ && apk del build-deps
+## copying
WORKDIR /app
COPY . /app
-RUN apk add --update --no-cache python2 py2-pip py2-gevent \
- py2-flask py2-requests py2-pygments py2-redis \
- py2-cffi py2-icu bash vim gawk sed \
- && apk add --no-cache --virtual build-deps python2-dev \
- build-base git \
- && pip install -r requirements.txt \
- && sh share/scripts/get-sheets.sh \
- && apk del build-deps
-ENTRYPOINT ["python2"]
+RUN mkdir -p /root/.cheat.sh/log/ \
+ && python3 lib/fetch.py fetch-all
+
+# installing server dependencies
+RUN apk add --update --no-cache py3-jinja2 py3-flask bash gawk
+ENTRYPOINT ["python3"]
CMD ["bin/srv.py"]
diff --git a/bin/srv.py b/bin/srv.py
index 8f623a8a..52aaff11 100644
--- a/bin/srv.py
+++ b/bin/srv.py
@@ -96,9 +96,9 @@ def log_query(ip_addr, found, topic, user_agent):
"""
Log processed query and some internal data
"""
- log_entry = "%s %s %s %s" % (ip_addr, found, topic, user_agent)
- with open(CONFIG["path.log.queries"], 'a') as my_file:
- my_file.write(log_entry.encode('utf-8')+"\n")
+ log_entry = "%s %s %s %s\n" % (ip_addr, found, topic, user_agent)
+ with open(CONFIG["path.log.queries"], 'ab') as my_file:
+ my_file.write(log_entry.encode('utf-8'))
def get_request_ip(req):
"""
@@ -274,9 +274,13 @@ def answer(topic=None):
return result
return Response(result, mimetype='text/plain')
+
+if '--debug' in sys.argv:
+ app.debug = True
if 'CHEATSH_PORT' in os.environ:
- SRV = WSGIServer((CONFIG['server.bind'], int(os.environ.get('CHEATSH_PORT'))), app) # log=None)
- SRV.serve_forever()
+ PORT = int(os.environ.get('CHEATSH_PORT'))
else:
- SRV = WSGIServer((CONFIG['server.bind'], CONFIG['server.port']), app) # log=None)
- SRV.serve_forever()
+ PORT = CONFIG['server.port']
+SRV = WSGIServer((CONFIG['server.bind'], PORT), app) # log=None)
+print("Starting server on {}:{}".format(SRV.address[0], SRV.address[1]))
+SRV.serve_forever()
diff --git a/lib/adapter/git_adapter.py b/lib/adapter/git_adapter.py
index e91a0d7d..b8a8d296 100644
--- a/lib/adapter/git_adapter.py
+++ b/lib/adapter/git_adapter.py
@@ -79,7 +79,7 @@ def fetch_command(cls):
if not local_repository_dir:
return None
- return ['git', 'clone', cls._repository_url, local_repository_dir]
+ return ['git', 'clone', '--depth=1', cls._repository_url, local_repository_dir]
@classmethod
def update_command(cls):
diff --git a/lib/fetch.py b/lib/fetch.py
index fb8c9511..75a03042 100644
--- a/lib/fetch.py
+++ b/lib/fetch.py
@@ -56,7 +56,11 @@ def _fetch_locations(known_location):
sys.stdout.write("Fetching %s..." % (adptr))
sys.stdout.flush()
- process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ try:
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ print("\nERROR: %s" % cmd)
+ raise
output = process.communicate()[0]
if process.returncode != 0:
sys.stdout.write("\nERROR:\n---\n" + output)
diff --git a/lib/frontend/html.py b/lib/frontend/html.py
index c639e04f..c73e96c9 100644
--- a/lib/frontend/html.py
+++ b/lib/frontend/html.py
@@ -75,13 +75,16 @@ def _html_wrapper(data):
"""
Convert ANSI text `data` to HTML
"""
- proc = Popen(
- ["bash", CONFIG['path.internal.ansi2html'], "--palette=solarized", "--bg=dark"],
- stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ cmd = ["bash", CONFIG['path.internal.ansi2html'], "--palette=solarized", "--bg=dark"]
+ try:
+ proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ except FileNotFoundError:
+ print("ERROR: %s" % cmd)
+ raise
data = data.encode('utf-8')
stdout, stderr = proc.communicate(data)
if proc.returncode != 0:
- error(stdout + stderr)
+ error((stdout + stderr).decode('utf-8'))
return stdout.decode('utf-8')
| This still doesn't work as expected, because `cht.sh.txt` runs in
interactive mode and gives the prompt.
Where should cheat.sh be installed [/root/.cheat.sh]? | https://api.github.com/repos/chubin/cheat.sh/pulls/221 | 2020-07-27T07:08:20Z | 2020-07-29T12:37:46Z | 2020-07-29T12:37:46Z | 2020-07-29T13:25:29Z | 1,370 | chubin/cheat.sh | 15,241 |
Fix for st.metric spacing without delta - #3720 | diff --git a/frontend/src/components/elements/Metric/Metric.test.tsx b/frontend/src/components/elements/Metric/Metric.test.tsx
index d4ffc2da1fc7..40ad86891829 100644
--- a/frontend/src/components/elements/Metric/Metric.test.tsx
+++ b/frontend/src/components/elements/Metric/Metric.test.tsx
@@ -24,6 +24,7 @@ const getProps = (elementProps: Partial<MetricProto> = {}): MetricProps => ({
element: MetricProto.create({
color: MetricProto.MetricColor.RED,
direction: MetricProto.MetricDirection.UP,
+ delta: "test",
...elementProps,
}),
})
@@ -54,21 +55,10 @@ describe("Metric element", () => {
const props = getProps({
color: MetricProto.MetricColor.GRAY,
direction: MetricProto.MetricDirection.NONE,
+ delta: "",
})
const wrapper = mount(<Metric {...props} />)
- expect(
- wrapper
- .find("StyledMetricDeltaText")
- .find("div")
- .at(1)
- .text()
- ).toBe(" ")
- expect(
- wrapper
- .find("StyledMetricDeltaText")
- .find("span")
- .text()
- ).toBe("")
+ expect(wrapper.find("StyledMetricDeltaText").exists()).toBe(false)
})
it("renders correct gray based on props", () => {
diff --git a/frontend/src/components/elements/Metric/Metric.tsx b/frontend/src/components/elements/Metric/Metric.tsx
index 1535f348b5a2..24a6314d0dba 100644
--- a/frontend/src/components/elements/Metric/Metric.tsx
+++ b/frontend/src/components/elements/Metric/Metric.tsx
@@ -67,6 +67,8 @@ export default function Metric({ element }: MetricProps): ReactElement {
const arrowMargin = "0 threeXS 0 0"
const deltaStyle = { color }
+ const deltaExists = element.delta !== ""
+
return (
<div data-testid="metric-container">
<StyledMetricLabelText data-testid="stMetricLabel">
@@ -75,10 +77,12 @@ export default function Metric({ element }: MetricProps): ReactElement {
<StyledMetricValueText data-testid="stMetricValue">
<StyledTruncateText> {element.body} </StyledTruncateText>
</StyledMetricValueText>
- <StyledMetricDeltaText data-testid="stMetricDelta" style={deltaStyle}>
- <Icon content={direction} size="lg" margin={arrowMargin} />
- <StyledTruncateText> {element.delta} </StyledTruncateText>
- </StyledMetricDeltaText>
+ {deltaExists && (
+ <StyledMetricDeltaText data-testid="stMetricDelta" style={deltaStyle}>
+ <Icon content={direction} size="lg" margin={arrowMargin} />
+ <StyledTruncateText> {element.delta} </StyledTruncateText>
+ </StyledMetricDeltaText>
+ )}
</div>
)
}
| Fix for st.metric spacing bug - when there is no delta, unnecessary spacing caused by rendering of empty StyledMetricDeltaText component. Now, only render StyledMetricDeltaText component when there is a Delta.
**After:**
<img width="240" alt="Screen Shot 2021-09-14 at 5 51 50 PM" src="https://user-images.githubusercontent.com/63436329/133352792-02bd6cd2-b51a-4c50-b79e-efd168982b5f.png">
close #3720 | https://api.github.com/repos/streamlit/streamlit/pulls/3799 | 2021-09-15T00:54:16Z | 2021-09-15T15:19:21Z | 2021-09-15T15:19:21Z | 2021-09-15T15:19:48Z | 689 | streamlit/streamlit | 22,111 |
[hotfix] fix 3d plugin test | diff --git a/tests/test_booster/test_plugin/test_3d_plugin.py b/tests/test_booster/test_plugin/test_3d_plugin.py
index 6f2fc104fc07..67b0bef50594 100644
--- a/tests/test_booster/test_plugin/test_3d_plugin.py
+++ b/tests/test_booster/test_plugin/test_3d_plugin.py
@@ -8,13 +8,14 @@
from torch.utils.data import Dataset
import colossalai
+from colossalai.accelerator import get_accelerator
from colossalai.booster import Booster
from colossalai.booster.plugin import HybridParallelPlugin
from colossalai.fx import is_compatible_with_meta
from colossalai.lazy.lazy_init import LazyInitContext
from colossalai.nn.optimizer import HybridAdam
from colossalai.testing import clear_cache_before_run, parameterize, rerun_if_address_is_in_use, spawn
-from colossalai.utils import get_current_device, set_seed
+from colossalai.utils import set_seed
from tests.kit.model_zoo import model_zoo
@@ -23,7 +24,9 @@ def __init__(self, num_samples: int = 100, max_length: int = 512, vocab_size: in
self.num_samples = num_samples
self.max_length = max_length
set_seed(42)
- self.input_ids = torch.randint(0, vocab_size, (num_samples, max_length), device=get_current_device())
+ self.input_ids = torch.randint(
+ 0, vocab_size, (num_samples, max_length), device=get_accelerator().get_current_device()
+ )
self.attention_mask = torch.ones_like(self.input_ids)
def __len__(self):
| ## 📌 Checklist before creating the PR
- [ ] I have created an issue for this PR for traceability
- [ ] The title follows the standard format: `[doc/gemini/tensor/...]: A concise description`
- [ ] I have added relevant tags if possible for us to better distinguish different PRs
## 🚨 Issue number
> Link this PR to your issue with words like fixed to automatically close the linked issue upon merge
>
> e.g. `fixed #1234`, `closed #1234`, `resolved #1234`
## 📝 What does this PR do?
> Summarize your work here.
> if you have any plots/diagrams/screenshots/tables, please attach them here.
## 💥 Checklist before requesting a review
- [ ] I have linked my PR to an issue ([instruction](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue))
- [ ] My issue clearly describes the problem/feature/proposal, with diagrams/charts/table/code if possible
- [ ] I have performed a self-review of my code
- [ ] I have added thorough tests.
- [ ] I have added docstrings for all the functions/methods I implemented
## ⭐️ Do you enjoy contributing to Colossal-AI?
- [ ] 🌝 Yes, I do.
- [ ] 🌚 No, I don't.
Tell us more if you don't enjoy contributing to Colossal-AI.
| https://api.github.com/repos/hpcaitech/ColossalAI/pulls/5292 | 2024-01-22T04:17:54Z | 2024-01-22T07:19:04Z | 2024-01-22T07:19:04Z | 2024-01-22T07:19:08Z | 362 | hpcaitech/ColossalAI | 11,237 |
Fix full labelling task display | diff --git a/website/src/components/FlaggableElement.tsx b/website/src/components/FlaggableElement.tsx
index f98ca171bf..05b0b9233e 100644
--- a/website/src/components/FlaggableElement.tsx
+++ b/website/src/components/FlaggableElement.tsx
@@ -146,7 +146,7 @@ export const FlaggableElement = (props: FlaggableElementProps) => {
isLazy
lazyBehavior="keepMounted"
>
- <Grid display="flex" alignItems="center" gap="1">
+ <Grid display="flex" alignItems="center" gap="2">
<PopoverAnchor>{props.children}</PopoverAnchor>
<Tooltip label="Report" bg="red.500" aria-label="A tooltip">
<div>
diff --git a/website/src/components/Messages/MessageTable.tsx b/website/src/components/Messages/MessageTable.tsx
index d82583f61b..45a13d2f21 100644
--- a/website/src/components/Messages/MessageTable.tsx
+++ b/website/src/components/Messages/MessageTable.tsx
@@ -11,7 +11,7 @@ export function MessageTable({ messages, enableLink }: MessageTableProps) {
return (
<Stack spacing="3">
{messages.map((item) => (
- <MessageTableEntry enabled={enableLink} item={item} key={item.id || item.frontend_message_id} />
+ <MessageTableEntry enabled={enableLink} item={item} key={item.id + item.frontend_message_id} />
))}
</Stack>
);
diff --git a/website/src/components/Messages/MessageTableEntry.tsx b/website/src/components/Messages/MessageTableEntry.tsx
index 64bb332fec..22fee71c9a 100644
--- a/website/src/components/Messages/MessageTableEntry.tsx
+++ b/website/src/components/Messages/MessageTableEntry.tsx
@@ -18,8 +18,8 @@ export function MessageTableEntry(props: MessageTableEntryProps) {
const borderColor = useColorModeValue("blackAlpha.200", "whiteAlpha.200");
return (
- <FlaggableElement message={item} key={`flag_${item.id || item.frontend_message_id}`}>
- <HStack>
+ <FlaggableElement message={item}>
+ <HStack w="100%" gap={2}>
<Box borderRadius="full" border="solid" borderWidth="1px" borderColor={borderColor} bg={avatarColor}>
<Avatar
size="sm"
@@ -31,7 +31,7 @@ export function MessageTableEntry(props: MessageTableEntryProps) {
<Box maxWidth="xl">
<Link href={`/messages/${item.id}`}>
<LinkBox
- bg={item.is_assistant ? `${backgroundColor}` : `${backgroundColor2}`}
+ bg={item.is_assistant ? backgroundColor : backgroundColor2}
className={`p-4 rounded-md whitespace-pre-wrap w-full`}
>
{item.text}
@@ -39,13 +39,12 @@ export function MessageTableEntry(props: MessageTableEntryProps) {
</Link>
</Box>
) : (
- <Box maxWidth="xl">
- <Box
- bg={item.is_assistant ? `${backgroundColor}` : `${backgroundColor2}`}
- className={`p-4 rounded-md whitespace-pre-wrap w-full`}
- >
- {item.text}
- </Box>
+ <Box
+ maxWidth="xl"
+ bg={item.is_assistant ? backgroundColor : backgroundColor2}
+ className={`p-4 rounded-md whitespace-pre-wrap w-full`}
+ >
+ {item.text}
</Box>
)}
</HStack>
diff --git a/website/src/components/Survey/LabelSliderGroup.tsx b/website/src/components/Survey/LabelSliderGroup.tsx
index cf3a13cb2e..f65cfb8021 100644
--- a/website/src/components/Survey/LabelSliderGroup.tsx
+++ b/website/src/components/Survey/LabelSliderGroup.tsx
@@ -1,4 +1,4 @@
-import { Box, Grid, Slider, SliderFilledTrack, SliderThumb, SliderTrack, useColorMode } from "@chakra-ui/react";
+import { Grid, Slider, SliderFilledTrack, SliderThumb, SliderTrack, useColorMode } from "@chakra-ui/react";
import { useId, useState } from "react";
import { colors } from "styles/Theme/colors";
@@ -13,7 +13,7 @@ export const LabelSliderGroup = ({ labelIDs, onChange, isEditable }: LabelSlider
const [sliderValues, setSliderValues] = useState<number[]>(Array.from({ length: labelIDs.length }).map(() => 0));
return (
- <Grid templateColumns="auto 1fr" rowGap={1} columnGap={3}>
+ <Grid templateColumns="auto 1fr" rowGap={1} columnGap={4}>
{labelIDs.map((labelId, idx) => (
<CheckboxSliderItem
key={idx}
@@ -44,12 +44,14 @@ function CheckboxSliderItem(props: {
const labelTextClass = colorMode === "light" ? `text-${colors.light.text}` : `text-${colors.dark.text}`;
return (
- <Box data-cy="label-group-item" data-label-type="slider">
+ <>
<label className="text-sm" htmlFor={id}>
{/* TODO: display real text instead of just the id */}
<span className={labelTextClass}>{props.labelId}</span>
</label>
<Slider
+ data-cy="label-group-item"
+ data-label-type="slider"
aria-roledescription="slider"
defaultValue={0}
isDisabled={!props.isEditable}
@@ -60,6 +62,6 @@ function CheckboxSliderItem(props: {
<SliderThumb />
</SliderTrack>
</Slider>
- </Box>
+ </>
);
}
diff --git a/website/src/components/Tasks/LabelTask.tsx b/website/src/components/Tasks/LabelTask.tsx
index b5b00cc419..758ee12774 100644
--- a/website/src/components/Tasks/LabelTask.tsx
+++ b/website/src/components/Tasks/LabelTask.tsx
@@ -14,13 +14,13 @@ export const LabelTask = ({
taskType,
onReplyChanged,
isEditable,
-}: TaskSurveyProps<{ text: string; labels: { [k: string]: number }; message_id: string }>) => {
+}: TaskSurveyProps<{ text: string; labels: Record<string, number>; message_id: string }>) => {
const valid_labels = task.valid_labels;
const [sliderValues, setSliderValues] = useState<number[]>(new Array(valid_labels.length).fill(0));
useEffect(() => {
onReplyChanged({ content: { labels: {}, text: task.reply, message_id: task.message_id }, state: "DEFAULT" });
- }, [task.reply, task.message_id, onReplyChanged]);
+ }, [task, onReplyChanged]);
const onSliderChange = (values: number[]) => {
console.assert(valid_labels.length === sliderValues.length);
@@ -51,7 +51,7 @@ export const LabelTask = ({
<Box mt="4" p="6" borderRadius="lg" bg={cardColor}>
<MessageTable
messages={[
- ...(task.conversation ? task.conversation.messages : []),
+ ...(task.conversation?.messages ?? []),
{
text: task.reply,
is_assistant: task.type === TaskType.label_assistant_reply,
diff --git a/website/src/pages/api/new_task/[task_type].ts b/website/src/pages/api/new_task/[task_type].ts
index 58ac84fa02..8fd2b26baf 100644
--- a/website/src/pages/api/new_task/[task_type].ts
+++ b/website/src/pages/api/new_task/[task_type].ts
@@ -13,7 +13,14 @@ import prisma from "src/lib/prismadb";
const handler = withoutRole("banned", async (req, res, token) => {
// Fetch the new task.
const { task_type } = req.query;
- const task = await oasstApiClient.fetchTask(task_type as string, token);
+
+ let task;
+ try {
+ task = await oasstApiClient.fetchTask(task_type as string, token);
+ } catch (err) {
+ console.error(err);
+ res.status(500).json(err);
+ }
// Store the task and link it to the user..
const registeredTask = await prisma.registeredTask.create({
diff --git a/website/src/types/Tasks.ts b/website/src/types/Tasks.ts
index 50c251bbb0..a791916ee4 100644
--- a/website/src/types/Tasks.ts
+++ b/website/src/types/Tasks.ts
@@ -39,13 +39,8 @@ export interface LabelAssistantReplyTask extends BaseTask {
conversation: Conversation;
reply: string;
valid_labels: string[];
-}
-
-export interface LabelInitialPromptTask extends BaseTask {
- type: TaskType.label_initial_prompt;
- message_id: string;
- valid_labels: string[];
- prompt: string;
+ mode: "simple" | "full";
+ mandatory_labels?: string[];
}
export interface LabelPrompterReplyTask extends BaseTask {
@@ -54,4 +49,13 @@ export interface LabelPrompterReplyTask extends BaseTask {
conversation: Conversation;
reply: string;
valid_labels: string[];
+ mode: "simple" | "full";
+ mandatory_labels?: string[];
+}
+
+export interface LabelInitialPromptTask extends BaseTask {
+ type: TaskType.label_initial_prompt;
+ message_id: string;
+ valid_labels: string[];
+ prompt: string;
}
| Before:
![image](https://user-images.githubusercontent.com/24505302/212462900-f82da4c0-292f-4336-8223-ba31f6f22793.png)
After:
![image](https://user-images.githubusercontent.com/24505302/212462909-f651c46e-0870-4ac0-ab6d-7d4d49ed22f3.png)
| https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/702 | 2023-01-14T08:14:23Z | 2023-01-14T09:14:33Z | 2023-01-14T09:14:33Z | 2023-01-14T09:14:34Z | 2,180 | LAION-AI/Open-Assistant | 37,587 |
Fixed #34125 -- Fixed sliced QuerySet.union() crash on a single non-empty queryset. | diff --git a/django/db/models/sql/compiler.py b/django/db/models/sql/compiler.py
index 8d6b66782862c..318e6b8707168 100644
--- a/django/db/models/sql/compiler.py
+++ b/django/db/models/sql/compiler.py
@@ -531,7 +531,6 @@ def get_combinator_sql(self, combinator, all):
compilers = [
query.get_compiler(self.using, self.connection, self.elide_empty)
for query in self.query.combined_queries
- if not query.is_empty()
]
if not features.supports_slicing_ordering_in_compound:
for compiler in compilers:
@@ -546,6 +545,11 @@ def get_combinator_sql(self, combinator, all):
elif self.query.is_sliced and combinator == "union":
limit = (self.query.low_mark, self.query.high_mark)
for compiler in compilers:
+ # A sliced union cannot have its parts elided as some of them
+ # might be sliced as well and in the event where only a single
+ # part produces a non-empty resultset it might be impossible to
+ # generate valid SQL.
+ compiler.elide_empty = False
if not compiler.query.is_sliced:
compiler.query.set_limits(*limit)
parts = ()
diff --git a/tests/queries/test_qs_combinators.py b/tests/queries/test_qs_combinators.py
index 97b3f97b68f7f..865e172816274 100644
--- a/tests/queries/test_qs_combinators.py
+++ b/tests/queries/test_qs_combinators.py
@@ -61,6 +61,32 @@ def test_union_none(self):
self.assertSequenceEqual(qs3.none(), [])
self.assertNumbersEqual(qs3, [0, 1, 8, 9], ordered=False)
+ def test_union_none_slice(self):
+ qs1 = Number.objects.filter(num__lte=0)
+ qs2 = Number.objects.none()
+ qs3 = qs1.union(qs2)
+ self.assertNumbersEqual(qs3[:1], [0])
+
+ def test_union_empty_filter_slice(self):
+ qs1 = Number.objects.filter(num__lte=0)
+ qs2 = Number.objects.filter(pk__in=[])
+ qs3 = qs1.union(qs2)
+ self.assertNumbersEqual(qs3[:1], [0])
+
+ @skipUnlessDBFeature("supports_slicing_ordering_in_compound")
+ def test_union_slice_compound_empty(self):
+ qs1 = Number.objects.filter(num__lte=0)[:1]
+ qs2 = Number.objects.none()
+ qs3 = qs1.union(qs2)
+ self.assertNumbersEqual(qs3[:1], [0])
+
+ @skipUnlessDBFeature("supports_slicing_ordering_in_compound")
+ def test_union_combined_slice_compound_empty(self):
+ qs1 = Number.objects.filter(num__lte=2)[:3]
+ qs2 = Number.objects.none()
+ qs3 = qs1.union(qs2)
+ self.assertNumbersEqual(qs3.order_by("num")[2:3], [2])
+
def test_union_order_with_null_first_last(self):
Number.objects.filter(other_num=5).update(other_num=None)
qs1 = Number.objects.filter(num__lte=1)
| The bug existed since sliced query union was added but was elevated to query union slices by moving the `.exists()` optimization to the compiler in 3d734c09.
Thanks Stefan Hammer for the report. | https://api.github.com/repos/django/django/pulls/16232 | 2022-10-28T04:49:29Z | 2022-10-29T07:21:25Z | 2022-10-29T07:21:25Z | 2022-10-29T07:21:25Z | 743 | django/django | 51,600 |
bpo-43916: _md5.md5 uses Py_TPFLAGS_DISALLOW_INSTANTIATION | diff --git a/Lib/test/test_hashlib.py b/Lib/test/test_hashlib.py
index 1236aa723b1995..c7c128e75568e2 100644
--- a/Lib/test/test_hashlib.py
+++ b/Lib/test/test_hashlib.py
@@ -901,8 +901,39 @@ def test_get_fips_mode(self):
if fips_mode is not None:
self.assertIsInstance(fips_mode, int)
+ def test_disallow_instanciation(self):
+ constructors = []
+ try:
+ import _md5
+ constructors.append(_md5.md5)
+ except ImportError:
+ pass
+ try:
+ import _sha1
+ constructors.append(_sha1.sha1)
+ except ImportError:
+ pass
+ try:
+ import _sha256
+ constructors.append(_sha256.sha224)
+ constructors.append(_sha256.sha256)
+ except ImportError:
+ pass
+ try:
+ import _sha512
+ constructors.append(_sha512.sha384)
+ constructors.append(_sha512.sha512)
+ except ImportError:
+ pass
+
+ for constructor in constructors:
+ h = constructor()
+ with self.subTest(constructor=constructor):
+ hash_type = type(h)
+ self.assertRaises(TypeError, hash_type)
+
@unittest.skipUnless(HASH is not None, 'need _hashlib')
- def test_internal_types(self):
+ def test_hash_disallow_instanciation(self):
# internal types like _hashlib.HASH are not constructable
with self.assertRaisesRegex(
TypeError, "cannot create '_hashlib.HASH' instance"
diff --git a/Modules/md5module.c b/Modules/md5module.c
index b2e65a0a5ffd22..2ae94a456fdb35 100644
--- a/Modules/md5module.c
+++ b/Modules/md5module.c
@@ -484,7 +484,7 @@ static PyType_Slot md5_type_slots[] = {
static PyType_Spec md5_type_spec = {
.name = "_md5.md5",
.basicsize = sizeof(MD5object),
- .flags = Py_TPFLAGS_DEFAULT,
+ .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION,
.slots = md5_type_slots
};
diff --git a/Modules/sha1module.c b/Modules/sha1module.c
index 7126db93b1a3fc..9ac46c58a7f348 100644
--- a/Modules/sha1module.c
+++ b/Modules/sha1module.c
@@ -462,7 +462,7 @@ static PyType_Slot sha1_type_slots[] = {
static PyType_Spec sha1_type_spec = {
.name = "_sha1.sha1",
.basicsize = sizeof(SHA1object),
- .flags = Py_TPFLAGS_DEFAULT,
+ .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION,
.slots = sha1_type_slots
};
@@ -554,7 +554,7 @@ _sha1_exec(PyObject *module)
}
Py_INCREF(st->sha1_type);
- if (PyModule_AddObject(module,
+ if (PyModule_AddObject(module,
"SHA1Type",
(PyObject *)st->sha1_type) < 0) {
Py_DECREF(st->sha1_type);
diff --git a/Modules/sha256module.c b/Modules/sha256module.c
index b90e5df7826740..ccb1862a99f198 100644
--- a/Modules/sha256module.c
+++ b/Modules/sha256module.c
@@ -544,14 +544,14 @@ static PyType_Slot sha256_types_slots[] = {
static PyType_Spec sha224_type_spec = {
.name = "_sha256.sha224",
.basicsize = sizeof(SHAobject),
- .flags = Py_TPFLAGS_DEFAULT,
+ .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION,
.slots = sha256_types_slots
};
static PyType_Spec sha256_type_spec = {
.name = "_sha256.sha256",
.basicsize = sizeof(SHAobject),
- .flags = Py_TPFLAGS_DEFAULT,
+ .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION,
.slots = sha256_types_slots
};
diff --git a/Modules/sha512module.c b/Modules/sha512module.c
index 0d8f51e5ae5e00..5e8572caf55184 100644
--- a/Modules/sha512module.c
+++ b/Modules/sha512module.c
@@ -602,7 +602,7 @@ static PyType_Slot sha512_sha384_type_slots[] = {
static PyType_Spec sha512_sha384_type_spec = {
.name = "_sha512.sha384",
.basicsize = sizeof(SHAobject),
- .flags = Py_TPFLAGS_DEFAULT,
+ .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION,
.slots = sha512_sha384_type_slots
};
@@ -619,7 +619,7 @@ static PyType_Slot sha512_sha512_type_slots[] = {
static PyType_Spec sha512_sha512_type_spec = {
.name = "_sha512.sha512",
.basicsize = sizeof(SHAobject),
- .flags = Py_TPFLAGS_DEFAULT,
+ .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION,
.slots = sha512_sha512_type_slots
};
| The following types use Py_TPFLAGS_DISALLOW_INSTANTIATION flag:
* _md5.md5
* _sha1.sha1
* _sha256.sha224
* _sha256.sha256
* _sha512.sha384
* _sha512.sha512
<!--
Thanks for your contribution!
Please read this comment in its entirety. It's quite important.
# Pull Request title
It should be in the following format:
```
bpo-NNNN: Summary of the changes made
```
Where: bpo-NNNN refers to the issue number in the https://bugs.python.org.
Most PRs will require an issue number. Trivial changes, like fixing a typo, do not need an issue.
# Backport Pull Request title
If this is a backport PR (PR made against branches other than `master`),
please ensure that the PR title is in the following format:
```
[X.Y] <title from the original PR> (GH-NNNN)
```
Where: [X.Y] is the branch name, e.g. [3.6].
GH-NNNN refers to the PR number from `master`.
-->
<!-- issue-number: [bpo-43916](https://bugs.python.org/issue43916) -->
https://bugs.python.org/issue43916
<!-- /issue-number -->
| https://api.github.com/repos/python/cpython/pulls/25753 | 2021-04-30T15:58:06Z | 2021-04-30T16:40:30Z | 2021-04-30T16:40:30Z | 2021-04-30T22:08:46Z | 1,263 | python/cpython | 4,470 |
cryptocom: update safeNetwork | diff --git a/js/cryptocom.js b/js/cryptocom.js
index f257e91c5ef7..c9d020fe8bf3 100644
--- a/js/cryptocom.js
+++ b/js/cryptocom.js
@@ -116,6 +116,7 @@ module.exports = class cryptocom extends Exchange {
'private/get-cancel-on-disconnect': 10 / 3,
'private/create-withdrawal': 10 / 3,
'private/get-withdrawal-history': 10 / 3,
+ 'private/get-currency-networks': 10 / 3,
'private/get-deposit-history': 10 / 3,
'private/get-deposit-address': 10 / 3,
'private/get-account-summary': 10 / 3,
@@ -1422,9 +1423,16 @@ module.exports = class cryptocom extends Exchange {
}
safeNetwork (networkId) {
- // stub for now
- // TODO: figure out which networks are supported on cryptocom
- return networkId;
+ const networksById = {
+ 'BTC': 'BTC',
+ 'ETH': 'ETH',
+ 'SOL': 'SOL',
+ 'BNB': 'BNB',
+ 'CRONOS': 'CRONOS',
+ 'MATIC': 'MATIC',
+ 'OP': 'OP',
+ };
+ return this.safeString (networksById, networkId, networkId);
}
async fetchDeposits (code = undefined, since = undefined, limit = undefined, params = {}) {
| https://api.github.com/repos/ccxt/ccxt/pulls/14511 | 2022-07-28T10:14:33Z | 2022-08-05T10:57:40Z | 2022-08-05T10:57:40Z | 2022-08-05T10:57:40Z | 354 | ccxt/ccxt | 13,390 |
|
Add Matt as the TensorFlow reference | diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md
index 214f19ee2ef4a..279140cfdc0d1 100644
--- a/.github/ISSUE_TEMPLATE/bug-report.md
+++ b/.github/ISSUE_TEMPLATE/bug-report.md
@@ -34,7 +34,7 @@ Models:
- funnel: @sgugger
- gpt2: @patrickvonplaten, @LysandreJik
- rag: @patrickvonplaten, @lhoestq
-- tensorflow: @LysandreJik
+- tensorflow: @Rocketknight1
Library:
| https://api.github.com/repos/huggingface/transformers/pulls/11212 | 2021-04-12T20:59:42Z | 2021-04-13T12:52:30Z | 2021-04-13T12:52:30Z | 2021-04-13T12:52:30Z | 150 | huggingface/transformers | 12,756 |
|
mageia bootstrap [needs revision] | diff --git a/bootstrap/_mageia_common.sh b/bootstrap/_mageia_common.sh
new file mode 100755
index 00000000000..9a4606c9df6
--- /dev/null
+++ b/bootstrap/_mageia_common.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+# Tested on mageia 5 x86_64
+if ! urpmi --force \
+ python \
+ libpython-devel \
+ python-virtualenv
+then
+ echo "Could not install Python dependencies. Aborting bootstrap!"
+ exit 1
+fi
+
+if ! urpmi --force \
+ git \
+ gcc \
+ cdialog \
+ python-augeas \
+ libopenssl-devel \
+ libffi-devel \
+ rootcerts
+then
+ echo "Could not install additional dependencies. Aborting bootstrap!"
+ exit 1
+fi
diff --git a/letsencrypt-auto b/letsencrypt-auto
index 44c71883c7a..13a966a8734 100755
--- a/letsencrypt-auto
+++ b/letsencrypt-auto
@@ -122,6 +122,9 @@ then
if [ -f /etc/debian_version ] ; then
echo "Bootstrapping dependencies for Debian-based OSes..."
$SUDO $BOOTSTRAP/_deb_common.sh
+ elif [ -f /etc/mageia-release ] ; then
+ echo "Bootstrapping dependencies for mageia..."
+ $SUDO $BOOTSTRAP/_mageia_common.sh
elif [ -f /etc/redhat-release ] ; then
echo "Bootstrapping dependencies for RedHat-based OSes..."
$SUDO $BOOTSTRAP/_rpm_common.sh
| I added a simple script in bootstrap to use letsencrypt-auto with mageia.
I tested it in mageia 5 with the webroot plugin.
| https://api.github.com/repos/certbot/certbot/pulls/1742 | 2015-12-04T15:18:15Z | 2016-06-13T21:26:07Z | 2016-06-13T21:26:07Z | 2016-06-15T01:24:39Z | 395 | certbot/certbot | 499 |
Enable the pull target to work in vvvv mode | diff --git a/test/integration/targets/pull/runme.sh b/test/integration/targets/pull/runme.sh
index e03edf7d040291..7334df93ca5537 100755
--- a/test/integration/targets/pull/runme.sh
+++ b/test/integration/targets/pull/runme.sh
@@ -30,7 +30,12 @@ if ! grep MAGICKEYWORD "${temp_log}"; then
fi
# test for https://github.com/ansible/ansible/issues/13681
-if grep '127\.0\.0\.1' "${temp_log}"; then
+if egrep '127\.0\.0\.1.*ok' "${temp_log}"; then
echo "Found host 127.0.0.1 in output. Only localhost should be present."
exit 1
fi
+# make sure one host was run
+if ! egrep 'localhost.*ok' "${temp_log}"; then
+ echo "Did not find host localhost in output."
+ exit 1
+fi
| ##### SUMMARY
The original grep format was insufficient.
##### ISSUE TYPE
- Bugfix Pull Request
##### COMPONENT NAME
pull test target
##### ANSIBLE VERSION
```
2.5
```
| https://api.github.com/repos/ansible/ansible/pulls/33755 | 2017-12-11T02:39:51Z | 2017-12-11T15:09:21Z | 2017-12-11T15:09:21Z | 2019-04-26T23:29:40Z | 230 | ansible/ansible | 49,287 |
fix typos | diff --git a/CppCoreGuidelines.md b/CppCoreGuidelines.md
index 10afca865..db3b3627b 100644
--- a/CppCoreGuidelines.md
+++ b/CppCoreGuidelines.md
@@ -8114,7 +8114,7 @@ Hard. At best a heuristic. Look for an uninitialized variable followed by a loop
Macros are a major source of bugs.
Macros don't obey the usual scope and type rules.
-Macros ensure that the human reader see something different from whet the compiler sees.
+Macros ensure that the human reader see something different from what the compiler sees.
Macros complicates tool building.
##### Example, bad
@@ -8245,7 +8245,7 @@ Readability. Error prevention. Efficiency.
cout << x << '\n';
for (int i = 1; i < v.size(); ++i) // touches two elements: can't be a range-for
- cout << v[i] + v[-1] << '\n';
+ cout << v[i] + v[i-1] << '\n';
for (int i = 1; i < v.size(); ++i) // possible side-effect: can't be a range-for
cout << f(&v[i]) << '\n';
| https://api.github.com/repos/isocpp/CppCoreGuidelines/pulls/403 | 2015-11-24T16:01:02Z | 2015-11-24T16:03:11Z | 2015-11-24T16:03:11Z | 2021-12-18T12:14:31Z | 282 | isocpp/CppCoreGuidelines | 16,089 |
|
Add llava 34b template | diff --git a/fastchat/conversation.py b/fastchat/conversation.py
index 0ee4c6f519..95576536c4 100644
--- a/fastchat/conversation.py
+++ b/fastchat/conversation.py
@@ -172,6 +172,9 @@ def get_prompt(self) -> str:
ret = "" if system_prompt == "" else system_prompt + self.sep + "\n"
for role, message in self.messages:
if message:
+ if type(message) is tuple:
+ message, images = message
+ message = IMAGE_PLACEHOLDER_STR * len(images) + message
ret += role + "\n" + message + self.sep + "\n"
else:
ret += role + "\n"
@@ -1562,6 +1565,21 @@ def get_conv_template(name: str) -> Conversation:
)
)
+# Llava-chatml
+# reference: https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/llava/conversation.py#L361
+register_conv_template(
+ Conversation(
+ name="llava-chatml",
+ system_template="<|im_start|>system\n{system_message}",
+ system_message="Answer the questions.",
+ roles=("<|im_start|>user", "<|im_start|>assistant"),
+ sep_style=SeparatorStyle.CHATML,
+ sep="<|im_end|>",
+ stop_str="<|im_end|>",
+ )
+)
+
+
if __name__ == "__main__":
from fastchat.conversation import get_conv_template
diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py
index da98237d01..6d17aea73e 100644
--- a/fastchat/model/model_adapter.py
+++ b/fastchat/model/model_adapter.py
@@ -2217,6 +2217,10 @@ def match(self, model_path: str):
return "llava" in model_path.lower()
def get_default_conv_template(self, model_path: str) -> Conversation:
+ model_path = model_path.lower()
+ if "34b" in model_path:
+ return get_conv_template("llava-chatml")
+
return get_conv_template("vicuna_v1.1")
| https://api.github.com/repos/lm-sys/FastChat/pulls/3034 | 2024-02-11T15:44:26Z | 2024-02-11T15:44:48Z | 2024-02-11T15:44:48Z | 2024-02-11T15:44:51Z | 517 | lm-sys/FastChat | 41,030 |
|
add --no-reuse-key | diff --git a/certbot-ci/certbot_integration_tests/certbot_tests/test_main.py b/certbot-ci/certbot_integration_tests/certbot_tests/test_main.py
index 18bc243e4db..12c45088fe9 100644
--- a/certbot-ci/certbot_integration_tests/certbot_tests/test_main.py
+++ b/certbot-ci/certbot_integration_tests/certbot_tests/test_main.py
@@ -433,6 +433,21 @@ def test_reuse_key(context):
privkey3 = file.read()
assert privkey2 != privkey3
+ context.certbot(['--cert-name', certname, '--domains', certname,
+ '--reuse-key','--force-renewal'])
+ context.certbot(['renew', '--cert-name', certname, '--no-reuse-key', '--force-renewal'])
+ context.certbot(['renew', '--cert-name', certname, '--force-renewal'])
+
+ with open(join(context.config_dir, 'archive/{0}/privkey4.pem').format(certname), 'r') as file:
+ privkey4 = file.read()
+ with open(join(context.config_dir, 'archive/{0}/privkey5.pem').format(certname), 'r') as file:
+ privkey5 = file.read()
+ with open(join(context.config_dir, 'archive/{0}/privkey6.pem').format(certname), 'r') as file:
+ privkey6 = file.read()
+ assert privkey3 == privkey4
+ assert privkey4 != privkey5
+ assert privkey5 != privkey6
+
with open(join(context.config_dir, 'archive/{0}/cert1.pem').format(certname), 'r') as file:
cert1 = file.read()
with open(join(context.config_dir, 'archive/{0}/cert2.pem').format(certname), 'r') as file:
diff --git a/certbot/CHANGELOG.md b/certbot/CHANGELOG.md
index fb2348bd108..ff40b3f1a2d 100644
--- a/certbot/CHANGELOG.md
+++ b/certbot/CHANGELOG.md
@@ -6,7 +6,8 @@ Certbot adheres to [Semantic Versioning](https://semver.org/).
### Added
-*
+* Added `--no-reuse-key`. This remains the default behavior, but the flag may be
+ useful to unset the `--reuse-key` option on existing certificates.
### Changed
diff --git a/certbot/certbot/_internal/cli/__init__.py b/certbot/certbot/_internal/cli/__init__.py
index 2c8b7b81df0..e7a1de49b4b 100644
--- a/certbot/certbot/_internal/cli/__init__.py
+++ b/certbot/certbot/_internal/cli/__init__.py
@@ -212,6 +212,13 @@ def prepare_and_parse_args(plugins, args, detect_defaults=False):
action="store_true", default=flag_default("reuse_key"),
help="When renewing, use the same private key as the existing "
"certificate.")
+ helpful.add(
+ "automation", "--no-reuse-key", dest="reuse_key",
+ action="store_false", default=flag_default("reuse_key"),
+ help="When renewing, do not use the same private key as the existing "
+ "certificate. Not reusing private keys is the default behavior of "
+ "Certbot. This option may be used to unset --reuse-key on an "
+ "existing certificate.")
helpful.add(
["automation", "renew", "certonly"],
| Fixes #9002. | https://api.github.com/repos/certbot/certbot/pulls/9029 | 2021-09-10T01:27:32Z | 2021-09-10T19:27:54Z | 2021-09-10T19:27:54Z | 2021-09-10T19:27:54Z | 817 | certbot/certbot | 3,017 |
Simplify check for symlinks that resolve outside root | diff --git a/src/black/__init__.py b/src/black/__init__.py
index 8ab5b47f974..2d4c7f655ad 100644
--- a/src/black/__init__.py
+++ b/src/black/__init__.py
@@ -50,9 +50,9 @@
gen_python_files,
get_gitignore,
get_root_relative_path,
- normalize_path_maybe_ignore,
parse_pyproject_toml,
path_is_excluded,
+ resolves_outside_root_or_cannot_stat,
wrap_stream_for_windows,
)
from black.handle_ipynb_magics import (
@@ -763,12 +763,9 @@ def get_sources(
)
continue
- normalized_path: Optional[str] = normalize_path_maybe_ignore(
- path, root, report
- )
- if normalized_path is None:
+ if resolves_outside_root_or_cannot_stat(path, root, report):
if verbose:
- out(f'Skipping invalid source: "{normalized_path}"', fg="red")
+ out(f'Skipping invalid source: "{path}"', fg="red")
continue
if is_stdin:
@@ -780,7 +777,7 @@ def get_sources(
continue
if verbose:
- out(f'Found input source: "{normalized_path}"', fg="blue")
+ out(f'Found input source: "{path}"', fg="blue")
sources.add(path)
elif path.is_dir():
path = root / (path.resolve().relative_to(root))
diff --git a/src/black/files.py b/src/black/files.py
index 960f13ee270..6c05105450c 100644
--- a/src/black/files.py
+++ b/src/black/files.py
@@ -254,26 +254,24 @@ def get_gitignore(root: Path) -> PathSpec:
raise
-def normalize_path_maybe_ignore(
+def resolves_outside_root_or_cannot_stat(
path: Path,
root: Path,
report: Optional[Report] = None,
-) -> Optional[str]:
- """Normalize `path`. May return `None` if `path` was ignored.
-
- `report` is where "path ignored" output goes.
+) -> bool:
+ """
+ Returns whether the path is a symbolic link that points outside the
+ root directory. Also returns True if we failed to resolve the path.
"""
try:
- abspath = path if path.is_absolute() else Path.cwd() / path
- normalized_path = abspath.resolve()
- root_relative_path = get_root_relative_path(normalized_path, root, report)
-
+ if sys.version_info < (3, 8, 6):
+ path = path.absolute() # https://bugs.python.org/issue33660
+ resolved_path = path.resolve()
+ return get_root_relative_path(resolved_path, root, report) is None
except OSError as e:
if report:
report.path_ignored(path, f"cannot be read because {e}")
- return None
-
- return root_relative_path
+ return True
def get_root_relative_path(
@@ -369,8 +367,7 @@ def gen_python_files(
report.path_ignored(child, "matches the --force-exclude regular expression")
continue
- normalized_path = normalize_path_maybe_ignore(child, root, report)
- if normalized_path is None:
+ if resolves_outside_root_or_cannot_stat(child, root, report):
continue
if child.is_dir():
diff --git a/tests/test_black.py b/tests/test_black.py
index f876d365b12..5c6920c2b77 100644
--- a/tests/test_black.py
+++ b/tests/test_black.py
@@ -1760,12 +1760,15 @@ def test_bpo_33660_workaround(self) -> None:
return
# https://bugs.python.org/issue33660
+ # Can be removed when we drop support for Python 3.8.5
root = Path("/")
with change_directory(root):
path = Path("workspace") / "project"
report = black.Report(verbose=True)
- normalized_path = black.normalize_path_maybe_ignore(path, root, report)
- self.assertEqual(normalized_path, "workspace/project")
+ resolves_outside = black.resolves_outside_root_or_cannot_stat(
+ path, root, report
+ )
+ self.assertIs(resolves_outside, False)
def test_normalize_path_ignore_windows_junctions_outside_of_root(self) -> None:
if system() != "Windows":
@@ -1778,13 +1781,13 @@ def test_normalize_path_ignore_windows_junctions_outside_of_root(self) -> None:
os.system(f"mklink /J {junction_dir} {junction_target_outside_of_root}")
report = black.Report(verbose=True)
- normalized_path = black.normalize_path_maybe_ignore(
+ resolves_outside = black.resolves_outside_root_or_cannot_stat(
junction_dir, root, report
)
# Manually delete for Python < 3.8
os.system(f"rmdir {junction_dir}")
- self.assertEqual(normalized_path, None)
+ self.assertIs(resolves_outside, True)
def test_newline_comment_interaction(self) -> None:
source = "class A:\\\r\n# type: ignore\n pass\n"
| This should not change any behaviour.
This would have made the #4015 / #4161 bug much more obvious. This makes a case where we resolve more explicit and prevent a resolved path from leaking out via the return. | https://api.github.com/repos/psf/black/pulls/4221 | 2024-02-11T02:01:32Z | 2024-02-11T07:55:01Z | 2024-02-11T07:55:01Z | 2024-02-11T07:55:03Z | 1,180 | psf/black | 23,722 |
reverse output for rtl languages has bug. | diff --git a/ppocr/postprocess/rec_postprocess.py b/ppocr/postprocess/rec_postprocess.py
index fbf8b93e3d..ebf7d4e3c6 100644
--- a/ppocr/postprocess/rec_postprocess.py
+++ b/ppocr/postprocess/rec_postprocess.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from bidi.algorithm import get_display
import numpy as np
import paddle
from paddle.nn import functional as F
@@ -49,20 +50,7 @@ def __init__(self, character_dict_path=None, use_space_char=False):
self.character = dict_character
def pred_reverse(self, pred):
- pred_re = []
- c_current = ''
- for c in pred:
- if not bool(re.search('[a-zA-Z0-9 :*./%+-]', c)):
- if c_current != '':
- pred_re.append(c_current)
- pred_re.append(c)
- c_current = ''
- else:
- c_current += c
- if c_current != '':
- pred_re.append(c_current)
-
- return ''.join(pred_re[::-1])
+ return get_display(pred)
def add_special_char(self, dict_character):
return dict_character
| paddle main code to reverse label for RTL languages has bug, try to use [python-bidi](https://pypi.org/project/python-bidi/). this works fine | https://api.github.com/repos/PaddlePaddle/PaddleOCR/pulls/10418 | 2023-07-17T09:34:40Z | 2024-01-03T02:34:41Z | 2024-01-03T02:34:41Z | 2024-01-03T02:34:48Z | 291 | PaddlePaddle/PaddleOCR | 42,340 |
Include cublas error details when getting cublas handle fails | diff --git a/csrc/includes/context.h b/csrc/includes/context.h
index 3a9067dc3b9f..cd80f8fbeebe 100644
--- a/csrc/includes/context.h
+++ b/csrc/includes/context.h
@@ -50,8 +50,12 @@ class TrainingContext {
{
curandCreateGenerator(&_gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(_gen, 123);
- if (cublasCreate(&_cublasHandle) != CUBLAS_STATUS_SUCCESS) {
- auto message = std::string("Fail to create cublas handle.");
+ cublasStatus_t stat = cublasCreate(&_cublasHandle);
+ if (stat != CUBLAS_STATUS_SUCCESS) {
+ // It would be nice to use cublasGetStatusName and
+ // cublasGetStatusString, but they were only added in CUDA 11.4.2.
+ auto message = std::string("Failed to create cublas handle: cublasStatus_t was ") +
+ std::to_string(stat);
std::cerr << message << std::endl;
throw std::runtime_error(message);
}
diff --git a/csrc/transformer/inference/includes/inference_context.h b/csrc/transformer/inference/includes/inference_context.h
index 66596c9bfb82..aaf568553560 100644
--- a/csrc/transformer/inference/includes/inference_context.h
+++ b/csrc/transformer/inference/includes/inference_context.h
@@ -60,8 +60,13 @@ class InferenceContext {
{
_workSpaceSize = 0;
_workspace = 0;
- if (cublasCreate(&_cublasHandle) != CUBLAS_STATUS_SUCCESS) {
- auto message = std::string("Fail to create cublas handle.");
+
+ cublasStatus_t stat = cublasCreate(&_cublasHandle);
+ if (stat != CUBLAS_STATUS_SUCCESS) {
+ // It would be nice to use cublasGetStatusName and
+ // cublasGetStatusString, but they were only added in CUDA 11.4.2.
+ auto message = std::string("Failed to create cublas handle: cublasStatus_t was ") +
+ std::to_string(stat);
std::cerr << message << std::endl;
throw std::runtime_error(message);
}
| I've been getting hard-to-debug errors in some DeepSpeed runs. During initialization, one of the worker processes raises `RuntimeError: Fail to create cublas handle.` with no further details, which feels pretty mysterious.
This change includes details of the failure status by using https://docs.nvidia.com/cuda/cublas/#cublasgetstatusname and https://docs.nvidia.com/cuda/cublas/#cublasgetstatusstring
---
**original error message** (using deepspeed 0.9.2): `RuntimeError: Fail to create cublas handle.`
<img width="1135" alt="image" src="https://github.com/microsoft/DeepSpeed/assets/133466/f3c4f14b-b820-463b-bf1b-a85b1e0b2399">
**new error message** with this change: `RuntimeError: Failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED the library was not initialized`
<img width="874" alt="image" src="https://github.com/microsoft/DeepSpeed/assets/133466/3f04bec0-5922-44aa-a309-7bf4e0429c1d">
This is still not a great error message, but it has better search results (most results suggest that it's due to running out of GPU memory; bizarrely [some people also report removing `~/.nv` fixes it...](https://github.com/tensorflow/tensorflow/issues/9489#issuecomment-405213443)). | https://api.github.com/repos/microsoft/DeepSpeed/pulls/3695 | 2023-06-06T20:08:03Z | 2023-06-13T20:12:27Z | 2023-06-13T20:12:27Z | 2023-06-13T20:12:27Z | 535 | microsoft/DeepSpeed | 10,015 |
mysql_replication: add resetmaster choice to mode param | diff --git a/changelogs/fragments/63321-mysql_replication_add_resetmaster_to_mode.yml b/changelogs/fragments/63321-mysql_replication_add_resetmaster_to_mode.yml
new file mode 100644
index 00000000000000..c4c112a1448045
--- /dev/null
+++ b/changelogs/fragments/63321-mysql_replication_add_resetmaster_to_mode.yml
@@ -0,0 +1,2 @@
+minor_changes:
+- mysql_replication - add support of ``resetmaster`` choice to ``mode`` parameter (https://github.com/ansible/ansible/issues/42870).
diff --git a/lib/ansible/modules/database/mysql/mysql_replication.py b/lib/ansible/modules/database/mysql/mysql_replication.py
index 364b3a2b43df40..9c261a435083a6 100644
--- a/lib/ansible/modules/database/mysql/mysql_replication.py
+++ b/lib/ansible/modules/database/mysql/mysql_replication.py
@@ -34,6 +34,7 @@
C(getslave) (SHOW SLAVE STATUS),
C(startslave) (START SLAVE),
C(stopslave) (STOP SLAVE),
+ C(resetmaster) (RESET MASTER) - supported from Ansible 2.10,
C(resetslave) (RESET SLAVE),
C(resetslaveall) (RESET SLAVE ALL).
type: str
@@ -43,6 +44,7 @@
- getslave
- startslave
- stopslave
+ - resetmaster
- resetslave
- resetslaveall
default: getslave
@@ -200,6 +202,12 @@
mysql_replication:
mode: stopslave
channel: master-1
+
+- name: >
+ Run RESET MASTER command which will delete all existing binary log files
+ and reset the binary log index file on the master
+ mysql_replication:
+ mode: resetmaster
'''
RETURN = r'''
@@ -295,6 +303,17 @@ def reset_slave_all(cursor, connection_name='', channel=''):
return reset
+def reset_master(cursor):
+ query = 'RESET MASTER'
+ try:
+ executed_queries.append(query)
+ cursor.execute(query)
+ reset = True
+ except Exception:
+ reset = False
+ return reset
+
+
def start_slave(cursor, connection_name='', channel=''):
if connection_name:
query = "START SLAVE '%s'" % connection_name
@@ -335,7 +354,8 @@ def main():
login_port=dict(type='int', default=3306),
login_unix_socket=dict(type='str'),
mode=dict(type='str', default='getslave', choices=[
- 'getmaster', 'getslave', 'changemaster', 'stopslave', 'startslave', 'resetslave', 'resetslaveall']),
+ 'getmaster', 'getslave', 'changemaster', 'stopslave',
+ 'startslave', 'resetmaster', 'resetslave', 'resetslaveall']),
master_auto_position=dict(type='bool', default=False),
master_host=dict(type='str'),
master_user=dict(type='str'),
@@ -490,6 +510,12 @@ def main():
module.exit_json(msg="Slave stopped", changed=True, queries=executed_queries)
else:
module.exit_json(msg="Slave already stopped", changed=False, queries=executed_queries)
+ elif mode in "resetmaster":
+ reset = reset_master(cursor)
+ if reset is True:
+ module.exit_json(msg="Master reset", changed=True, queries=executed_queries)
+ else:
+ module.exit_json(msg="Master already reset", changed=False, queries=executed_queries)
elif mode in "resetslave":
reset = reset_slave(cursor, connection_name, channel)
if reset is True:
diff --git a/test/integration/targets/mysql_replication/tasks/main.yml b/test/integration/targets/mysql_replication/tasks/main.yml
index 3922fd10d7fbcc..9f5c76a7014195 100644
--- a/test/integration/targets/mysql_replication/tasks/main.yml
+++ b/test/integration/targets/mysql_replication/tasks/main.yml
@@ -12,3 +12,7 @@
# Tests of channel parameter:
- import_tasks: mysql_replication_channel.yml
when: ansible_distribution == 'CentOS' and ansible_distribution_major_version >= '7'
+
+# Tests of resetmaster mode:
+- import_tasks: mysql_replication_resetmaster_mode.yml
+ when: ansible_distribution == 'CentOS' and ansible_distribution_major_version >= '7'
diff --git a/test/integration/targets/mysql_replication/tasks/mysql_replication_resetmaster_mode.yml b/test/integration/targets/mysql_replication/tasks/mysql_replication_resetmaster_mode.yml
new file mode 100644
index 00000000000000..19ea4ab4d3aa69
--- /dev/null
+++ b/test/integration/targets/mysql_replication/tasks/mysql_replication_resetmaster_mode.yml
@@ -0,0 +1,48 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Needs for further tests:
+- name: Stop slave
+ mysql_replication:
+ login_host: 127.0.0.1
+ login_port: "{{ standby_port }}"
+ mode: stopslave
+
+- name: Reset slave all
+ mysql_replication:
+ login_host: 127.0.0.1
+ login_port: "{{ standby_port }}"
+ mode: resetslaveall
+
+# Get master initial status:
+- name: Get master status
+ mysql_replication:
+ login_host: 127.0.0.1
+ login_port: "{{ master_port }}"
+ mode: getmaster
+ register: master_initial_status
+
+# Test resetmaster mode:
+- name: Reset master
+ mysql_replication:
+ login_host: 127.0.0.1
+ login_port: "{{ master_port }}"
+ mode: resetmaster
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["RESET MASTER"]
+
+# Get master final status:
+- name: Get master status
+ mysql_replication:
+ login_host: 127.0.0.1
+ login_port: "{{ master_port }}"
+ mode: getmaster
+ register: master_final_status
+
+- assert:
+ that:
+ - master_initial_status.File != master_final_status.File
| ##### SUMMARY
mysql_replication: add resetmaster choice to mode param
fixes: https://github.com/ansible/ansible/issues/42870
##### ISSUE TYPE
- Feature Pull Request
##### COMPONENT NAME
```lib/ansible/modules/database/mysql/mysql_replication.py```
##### EXAMPLE
```
- name: >
Run RESET MASTER command which will delete all existing binary log files
and reset the binary log index file on the master
mysql_replication:
mode: resetmaster
```
| https://api.github.com/repos/ansible/ansible/pulls/63321 | 2019-10-10T06:50:37Z | 2019-10-10T11:02:58Z | 2019-10-10T11:02:57Z | 2019-11-13T20:00:29Z | 1,486 | ansible/ansible | 49,633 |
Allow specifying custom input/output schemas for runnables with .with_types() | diff --git a/libs/langchain/langchain/schema/runnable/base.py b/libs/langchain/langchain/schema/runnable/base.py
index 818bcf3c38a453..9968595677f439 100644
--- a/libs/langchain/langchain/schema/runnable/base.py
+++ b/libs/langchain/langchain/schema/runnable/base.py
@@ -585,6 +585,22 @@ def with_config(
kwargs={},
)
+ def with_types(
+ self,
+ *,
+ input_type: Optional[Type[Input]] = None,
+ output_type: Optional[Type[Output]] = None,
+ ) -> Runnable[Input, Output]:
+ """
+ Bind input and output types to a Runnable, returning a new Runnable.
+ """
+ return RunnableBinding(
+ bound=self,
+ custom_input_type=input_type,
+ custom_output_type=output_type,
+ kwargs={},
+ )
+
def with_retry(
self,
*,
@@ -2277,6 +2293,11 @@ def get_lc_namespace(cls) -> List[str]:
def bind(self, **kwargs: Any) -> RunnableEach[Input, Output]:
return RunnableEach(bound=self.bound.bind(**kwargs))
+ def with_config(
+ self, config: Optional[RunnableConfig] = None, **kwargs: Any
+ ) -> RunnableEach[Input, Output]:
+ return RunnableEach(bound=self.bound.with_config(config, **kwargs))
+
def _invoke(
self,
inputs: List[Input],
@@ -2321,6 +2342,10 @@ class RunnableBinding(RunnableSerializable[Input, Output]):
config: RunnableConfig = Field(default_factory=dict)
+ custom_input_type: Optional[Union[Type[Input], BaseModel]] = None
+
+ custom_output_type: Optional[Union[Type[Output], BaseModel]] = None
+
class Config:
arbitrary_types_allowed = True
@@ -2330,6 +2355,8 @@ def __init__(
bound: Runnable[Input, Output],
kwargs: Mapping[str, Any],
config: Optional[RunnableConfig] = None,
+ custom_input_type: Optional[Union[Type[Input], BaseModel]] = None,
+ custom_output_type: Optional[Union[Type[Output], BaseModel]] = None,
**other_kwargs: Any,
) -> None:
config = config or {}
@@ -2342,24 +2369,43 @@ def __init__(
f"Configurable key '{key}' not found in runnable with"
f" config keys: {allowed_keys}"
)
- super().__init__(bound=bound, kwargs=kwargs, config=config, **other_kwargs)
+ super().__init__(
+ bound=bound,
+ kwargs=kwargs,
+ config=config,
+ custom_input_type=custom_input_type,
+ custom_output_type=custom_output_type,
+ **other_kwargs,
+ )
@property
def InputType(self) -> Type[Input]:
- return self.bound.InputType
+ return (
+ cast(Type[Input], self.custom_input_type)
+ if self.custom_input_type is not None
+ else self.bound.InputType
+ )
@property
def OutputType(self) -> Type[Output]:
- return self.bound.OutputType
+ return (
+ cast(Type[Output], self.custom_output_type)
+ if self.custom_output_type is not None
+ else self.bound.OutputType
+ )
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
+ if self.custom_input_type is not None:
+ return super().get_input_schema(config)
return self.bound.get_input_schema(merge_configs(self.config, config))
def get_output_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
+ if self.custom_output_type is not None:
+ return super().get_output_schema(config)
return self.bound.get_output_schema(merge_configs(self.config, config))
@property
@@ -2394,6 +2440,23 @@ def with_config(
config=cast(RunnableConfig, {**self.config, **(config or {}), **kwargs}),
)
+ def with_types(
+ self,
+ input_type: Optional[Union[Type[Input], BaseModel]] = None,
+ output_type: Optional[Union[Type[Output], BaseModel]] = None,
+ ) -> Runnable[Input, Output]:
+ return self.__class__(
+ bound=self.bound,
+ kwargs=self.kwargs,
+ config=self.config,
+ custom_input_type=input_type
+ if input_type is not None
+ else self.custom_input_type,
+ custom_output_type=output_type
+ if output_type is not None
+ else self.custom_output_type,
+ )
+
def with_retry(self, **kwargs: Any) -> Runnable[Input, Output]:
return self.__class__(
bound=self.bound.with_retry(**kwargs),
diff --git a/libs/langchain/tests/unit_tests/schema/runnable/__snapshots__/test_runnable.ambr b/libs/langchain/tests/unit_tests/schema/runnable/__snapshots__/test_runnable.ambr
index e78280380bf353..c26a88e57ead2a 100644
--- a/libs/langchain/tests/unit_tests/schema/runnable/__snapshots__/test_runnable.ambr
+++ b/libs/langchain/tests/unit_tests/schema/runnable/__snapshots__/test_runnable.ambr
@@ -3747,7 +3747,9 @@
"Thought:"
]
},
- "config": {}
+ "config": {},
+ "custom_input_type": null,
+ "custom_output_type": null
}
},
"llm": {
diff --git a/libs/langchain/tests/unit_tests/schema/runnable/test_runnable.py b/libs/langchain/tests/unit_tests/schema/runnable/test_runnable.py
index 5bc55cca31ccaf..5632a453952a58 100644
--- a/libs/langchain/tests/unit_tests/schema/runnable/test_runnable.py
+++ b/libs/langchain/tests/unit_tests/schema/runnable/test_runnable.py
@@ -39,6 +39,7 @@
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
+from langchain.pydantic_v1 import BaseModel
from langchain.schema.document import Document
from langchain.schema.messages import (
AIMessage,
@@ -587,6 +588,26 @@ def test_schema_complex_seq() -> None:
"type": "string",
}
+ assert chain2.with_types(input_type=str).input_schema.schema() == {
+ "title": "RunnableBindingInput",
+ "type": "string",
+ }
+
+ assert chain2.with_types(input_type=int).output_schema.schema() == {
+ "title": "StrOutputParserOutput",
+ "type": "string",
+ }
+
+ class InputType(BaseModel):
+ person: str
+
+ assert chain2.with_types(input_type=InputType).input_schema.schema() == {
+ "title": "InputType",
+ "type": "object",
+ "properties": {"person": {"title": "Person", "type": "string"}},
+ "required": ["person"],
+ }
+
def test_schema_chains() -> None:
model = FakeListChatModel(responses=[""])
|
<!-- Thank you for contributing to LangChain!
Replace this entire comment with:
- **Description:** a description of the change,
- **Issue:** the issue # it fixes (if applicable),
- **Dependencies:** any dependencies required for this change,
- **Tag maintainer:** for a quicker response, tag the relevant maintainer (see below),
- **Twitter handle:** we announce bigger features on Twitter. If your PR gets announced, and you'd like a mention, we'll gladly shout you out!
Please make sure your PR is passing linting and testing before submitting. Run `make format`, `make lint` and `make test` to check this locally.
See contribution guidelines for more information on how to write/run tests, lint, etc:
https://github.com/langchain-ai/langchain/blob/master/.github/CONTRIBUTING.md
If you're adding a new integration, please include:
1. a test for the integration, preferably unit tests that do not rely on network access,
2. an example notebook showing its use. It lives in `docs/extras` directory.
If no one reviews your PR within a few days, please @-mention one of @baskaryan, @eyurtsev, @hwchase17.
-->
| https://api.github.com/repos/langchain-ai/langchain/pulls/12083 | 2023-10-20T16:13:22Z | 2023-10-22T16:26:49Z | 2023-10-22T16:26:49Z | 2023-10-22T16:26:49Z | 1,628 | langchain-ai/langchain | 42,824 |
Add `device` argument to PyTorch Hub models | diff --git a/hubconf.py b/hubconf.py
index 3b3dfe0e9e2..f74e70c85a6 100644
--- a/hubconf.py
+++ b/hubconf.py
@@ -8,7 +8,7 @@
import torch
-def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True):
+def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
"""Creates a specified YOLOv5 model
Arguments:
@@ -18,6 +18,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo
classes (int): number of model classes
autoshape (bool): apply YOLOv5 .autoshape() wrapper to model
verbose (bool): print all information to screen
+ device (str, torch.device, None): device to use for model parameters
Returns:
YOLOv5 pytorch model
@@ -50,7 +51,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo
model.names = ckpt['model'].names # set class names attribute
if autoshape:
model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
- device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available
+ device = select_device('0' if torch.cuda.is_available() else 'cpu') if device is None else torch.device(device)
return model.to(device)
except Exception as e:
@@ -59,49 +60,49 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo
raise Exception(s) from e
-def custom(path='path/to/model.pt', autoshape=True, verbose=True):
+def custom(path='path/to/model.pt', autoshape=True, verbose=True, device=None):
# YOLOv5 custom or local model
- return _create(path, autoshape=autoshape, verbose=verbose)
+ return _create(path, autoshape=autoshape, verbose=verbose, device=device)
-def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True):
+def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
# YOLOv5-small model https://github.com/ultralytics/yolov5
- return _create('yolov5s', pretrained, channels, classes, autoshape, verbose)
+ return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device)
-def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True):
+def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
# YOLOv5-medium model https://github.com/ultralytics/yolov5
- return _create('yolov5m', pretrained, channels, classes, autoshape, verbose)
+ return _create('yolov5m', pretrained, channels, classes, autoshape, verbose, device)
-def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True):
+def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
# YOLOv5-large model https://github.com/ultralytics/yolov5
- return _create('yolov5l', pretrained, channels, classes, autoshape, verbose)
+ return _create('yolov5l', pretrained, channels, classes, autoshape, verbose, device)
-def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True):
+def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
# YOLOv5-xlarge model https://github.com/ultralytics/yolov5
- return _create('yolov5x', pretrained, channels, classes, autoshape, verbose)
+ return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device)
-def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True):
+def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
# YOLOv5-small-P6 model https://github.com/ultralytics/yolov5
- return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose)
+ return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device)
-def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True):
+def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
# YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5
- return _create('yolov5m6', pretrained, channels, classes, autoshape, verbose)
+ return _create('yolov5m6', pretrained, channels, classes, autoshape, verbose, device)
-def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True):
+def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
# YOLOv5-large-P6 model https://github.com/ultralytics/yolov5
- return _create('yolov5l6', pretrained, channels, classes, autoshape, verbose)
+ return _create('yolov5l6', pretrained, channels, classes, autoshape, verbose, device)
-def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True):
+def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
# YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5
- return _create('yolov5x6', pretrained, channels, classes, autoshape, verbose)
+ return _create('yolov5x6', pretrained, channels, classes, autoshape, verbose, device)
if __name__ == '__main__':
| For my usecase I would like to load the pretrained models from torchhub to cpu even if cuda is available.
This merge request adds an optional parameter `device` to the `hubconf.py` functions to allow manual selection of target devices.
## 🛠️ PR Summary
<sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub>
### 🌟 Summary
Added model device selection support to YOLOv5 model creation functions.
### 📊 Key Changes
- Introduced an additional `device` parameter to the `_create()` function and all model creator functions (e.g., `yolov5s`, `yolov5m`, etc.).
- The `device` parameter allows explicit selection of the computing device (CPU or GPU) where the model parameters will be loaded.
### 🎯 Purpose & Impact
- **Flexibility**: Users can now specify the device where they want to load the model, improving usability for systems with multiple GPUs or special hardware configurations.
- **Convenience**: This change makes it easier to deploy models in different environments (e.g., servers, laptops with or without GPU) by simplifying device management in code.
- **Control**: Advanced users gain finer control over resource allocation, which can improve performance and efficiency when running models. | https://api.github.com/repos/ultralytics/yolov5/pulls/3104 | 2021-05-10T15:22:24Z | 2021-05-16T15:41:27Z | 2021-05-16T15:41:27Z | 2024-01-19T18:23:41Z | 1,579 | ultralytics/yolov5 | 25,537 |
unit tests & legacy codes | diff --git a/metagpt/strategy/task_type.py b/metagpt/strategy/task_type.py
index 9eeeb79ce..7c88817cc 100644
--- a/metagpt/strategy/task_type.py
+++ b/metagpt/strategy/task_type.py
@@ -54,6 +54,20 @@ class TaskType(Enum):
)
OTHER = TaskTypeDef(name="other", desc="Any tasks not in the defined categories")
+ # Legacy TaskType to support tool recommendation using type match. You don't need to define task types if you have no human priors to inject.
+ TEXT2IMAGE = TaskTypeDef(
+ name="text2image",
+ desc="Related to text2image, image2image using stable diffusion model.",
+ )
+ WEBSCRAPING = TaskTypeDef(
+ name="web scraping",
+ desc="For scraping data from web pages.",
+ )
+ EMAIL_LOGIN = TaskTypeDef(
+ name="email login",
+ desc="For logging to an email.",
+ )
+
@property
def type_name(self):
return self.value.name
diff --git a/metagpt/tools/libs/email_login.py b/metagpt/tools/libs/email_login.py
index 757ac2b87..32626ac55 100644
--- a/metagpt/tools/libs/email_login.py
+++ b/metagpt/tools/libs/email_login.py
@@ -23,7 +23,7 @@
}
-@register_tool()
+@register_tool(tags=["email login"])
def email_login_imap(email_address, email_password):
"""
Use imap_tools package to log in to your email (the email that supports IMAP protocol) to verify and return the account object.
diff --git a/metagpt/tools/libs/gpt_v_generator.py b/metagpt/tools/libs/gpt_v_generator.py
index 0e9f34770..4eba3d5ee 100644
--- a/metagpt/tools/libs/gpt_v_generator.py
+++ b/metagpt/tools/libs/gpt_v_generator.py
@@ -28,7 +28,7 @@
Now, please generate the corresponding webpage code including HTML, CSS and JavaScript:"""
-@register_tool(include_functions=["__init__", "generate_webpages", "save_webpages"])
+@register_tool(tags=["image2webpage"], include_functions=["__init__", "generate_webpages", "save_webpages"])
class GPTvGenerator:
"""Class for generating webpage code from a given webpage screenshot.
diff --git a/metagpt/tools/tool_convert.py b/metagpt/tools/tool_convert.py
index 3d6a49769..42c65b9e7 100644
--- a/metagpt/tools/tool_convert.py
+++ b/metagpt/tools/tool_convert.py
@@ -12,6 +12,8 @@ def convert_code_to_tool_schema(obj, include: list[str] = None):
if inspect.isclass(obj):
schema = {"type": "class", "description": remove_spaces(docstring), "methods": {}}
for name, method in inspect.getmembers(obj, inspect.isfunction):
+ if name.startswith("_") and name != "__init__": # skip private methodss
+ continue
if include and name not in include:
continue
# method_doc = inspect.getdoc(method)
diff --git a/tests/metagpt/actions/di/test_write_analysis_code.py b/tests/metagpt/actions/di/test_write_analysis_code.py
index b1e51d8ce..2996f31f7 100644
--- a/tests/metagpt/actions/di/test_write_analysis_code.py
+++ b/tests/metagpt/actions/di/test_write_analysis_code.py
@@ -5,7 +5,7 @@
@pytest.mark.asyncio
-async def test_write_code():
+async def test_write_code_with_plan():
write_code = WriteAnalysisCode()
user_requirement = "Run data analysis on sklearn Iris dataset, include a plot"
@@ -16,9 +16,29 @@ async def test_write_code():
assert "sklearn" in code
+@pytest.mark.asyncio
+async def test_write_code_with_tools():
+ write_code = WriteAnalysisCode()
+
+ user_requirement = "Preprocess sklearn Wine recognition dataset and train a model to predict wine class (20% as validation), and show validation accuracy."
+ tool_info = """
+ ## Capabilities
+ - You can utilize pre-defined tools in any code lines from 'Available Tools' in the form of Python class or function.
+ - You can freely combine the use of any other public packages, like sklearn, numpy, pandas, etc..
+
+ ## Available Tools:
+ Each tool is described in JSON format. When you call a tool, import the tool from its path first.
+ {'FillMissingValue': {'type': 'class', 'description': 'Completing missing values with simple strategies.', 'methods': {'__init__': {'type': 'function', 'description': 'Initialize self. ', 'signature': '(self, features: \'list\', strategy: "Literal[\'mean\', \'median\', \'most_frequent\', \'constant\']" = \'mean\', fill_value=None)', 'parameters': 'Args: features (list): Columns to be processed. strategy (Literal["mean", "median", "most_frequent", "constant"], optional): The imputation strategy, notice \'mean\' and \'median\' can only be used for numeric features. Defaults to \'mean\'. fill_value (int, optional): Fill_value is used to replace all occurrences of missing_values. Defaults to None.'}, 'fit': {'type': 'function', 'description': 'Fit a model to be used in subsequent transform. ', 'signature': "(self, df: 'pd.DataFrame')", 'parameters': 'Args: df (pd.DataFrame): The input DataFrame.'}, 'fit_transform': {'type': 'function', 'description': 'Fit and transform the input DataFrame. ', 'signature': "(self, df: 'pd.DataFrame') -> 'pd.DataFrame'", 'parameters': 'Args: df (pd.DataFrame): The input DataFrame. Returns: pd.DataFrame: The transformed DataFrame.'}, 'transform': {'type': 'function', 'description': 'Transform the input DataFrame with the fitted model. ', 'signature': "(self, df: 'pd.DataFrame') -> 'pd.DataFrame'", 'parameters': 'Args: df (pd.DataFrame): The input DataFrame. Returns: pd.DataFrame: The transformed DataFrame.'}}, 'tool_path': 'metagpt/tools/libs/data_preprocess.py'}
+ """
+
+ code = await write_code.run(user_requirement=user_requirement, tool_info=tool_info)
+ assert len(code) > 0
+ assert "metagpt.tools.libs" in code
+
+
@pytest.mark.asyncio
async def test_debug_with_reflection():
- user_requirement = "Run data analysis on sklearn Iris dataset, include a plot"
+ user_requirement = "read a dataset test.csv and print its head"
plan_status = """
## Finished Tasks
| https://api.github.com/repos/geekan/MetaGPT/pulls/1000 | 2024-03-13T08:32:23Z | 2024-03-13T08:37:54Z | 2024-03-13T08:37:54Z | 2024-03-13T08:37:54Z | 1,513 | geekan/MetaGPT | 16,668 |
|
Refine S3FD post-processing: NMS box voting | diff --git a/plugins/extract/detect/s3fd.py b/plugins/extract/detect/s3fd.py
index 469db1715a..398719e88b 100644
--- a/plugins/extract/detect/s3fd.py
+++ b/plugins/extract/detect/s3fd.py
@@ -220,28 +220,27 @@ def __init__(self, model_path, model_kwargs, allow_growth, confidence):
super().__init__("S3FD", model_path, model_kwargs=model_kwargs, allow_growth=allow_growth)
self.load_model()
self.confidence = confidence
+ self.average_img = np.array([104.0, 117.0, 123.0])
logger.debug("Initialized: %s", self.__class__.__name__)
- @staticmethod
- def prepare_batch(batch):
+ def prepare_batch(self, batch):
""" Prepare a batch for prediction """
- batch = batch - np.array([104.0, 117.0, 123.0])
+ batch = batch - self.average_img
batch = batch.transpose(0, 3, 1, 2)
return batch
- def finalize_predictions(self, bboxlists):
+ def finalize_predictions(self, bounding_boxes_scales):
""" Detect faces """
ret = list()
- for i in range(bboxlists[0].shape[0]):
- bboxlist = [x[i:i+1, ...] for x in bboxlists]
- bboxlist = self.post_process(bboxlist)
- keep = self.nms(bboxlist, 0.3)
- bboxlist = bboxlist[keep, :]
- bboxlist = [x for x in bboxlist if x[-1] >= self.confidence]
- ret.append(np.array(bboxlist))
+ batch_size = range(bounding_boxes_scales[0].shape[0])
+ for img in batch_size:
+ bboxlist = [scale[img:img+1] for scale in bounding_boxes_scales]
+ boxes = self._post_process(bboxlist)
+ bboxlist = self._nms(boxes, 0.5)
+ ret.append(bboxlist)
return ret
- def post_process(self, bboxlist):
+ def _post_process(self, bboxlist):
""" Perform post processing on output
TODO: do this on the batch.
"""
@@ -255,16 +254,14 @@ def post_process(self, bboxlist):
for _, hindex, windex in poss:
axc, ayc = stride / 2 + windex * stride, stride / 2 + hindex * stride
score = ocls[0, 1, hindex, windex]
- loc = np.ascontiguousarray(oreg[0, :, hindex, windex]).reshape((1, 4))
- priors = np.array([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]])
- variances = [0.1, 0.2]
- box = self.decode(loc, priors, variances)
- x_1, y_1, x_2, y_2 = box[0] * 1.0
- retval.append([x_1, y_1, x_2, y_2, score])
- retval = np.array(retval)
- if len(retval) == 0:
- retval = np.zeros((1, 5))
- return retval
+ if score >= self.confidence:
+ loc = np.ascontiguousarray(oreg[0, :, hindex, windex]).reshape((1, 4))
+ priors = np.array([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]])
+ box = self.decode(loc, priors)
+ x_1, y_1, x_2, y_2 = box[0] * 1.0
+ retval.append([x_1, y_1, x_2, y_2, score])
+ return_numpy = np.array(retval) if len(retval) != 0 else np.zeros((1, 5))
+ return return_numpy
@staticmethod
def softmax(inp, axis):
@@ -272,7 +269,7 @@ def softmax(inp, axis):
return np.exp(inp - logsumexp(inp, axis=axis, keepdims=True))
@staticmethod
- def decode(loc, priors, variances):
+ def decode(loc, priors):
"""Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
@@ -284,36 +281,36 @@ def decode(loc, priors, variances):
Return:
decoded bounding box predictions
"""
+ variances = [0.1, 0.2]
boxes = np.concatenate((priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
- priors[:, 2:] * np.exp(loc[:, 2:] * variances[1])),
- 1)
+ priors[:, 2:] * np.exp(loc[:, 2:] * variances[1])), axis=1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
- @staticmethod
- def nms(dets, thresh):
- # pylint:disable=too-many-locals
+ def _nms(self, boxes, threshold):
""" Perform Non-Maximum Suppression """
- keep = list()
- if len(dets) == 0:
- return keep
-
- x_1, y_1, x_2, y_2, scores = dets[:, 0], dets[:, 1], dets[:, 2], dets[:, 3], dets[:, 4]
- areas = (x_2 - x_1 + 1) * (y_2 - y_1 + 1)
- order = scores.argsort()[::-1]
-
- keep = []
- while order.size > 0:
- i = order[0]
- keep.append(i)
- xx_1, yy_1 = np.maximum(x_1[i], x_1[order[1:]]), np.maximum(y_1[i], y_1[order[1:]])
- xx_2, yy_2 = np.minimum(x_2[i], x_2[order[1:]]), np.minimum(y_2[i], y_2[order[1:]])
-
- width, height = np.maximum(0.0, xx_2 - xx_1 + 1), np.maximum(0.0, yy_2 - yy_1 + 1)
- ovr = width * height / (areas[i] + areas[order[1:]] - width * height)
-
- inds = np.where(ovr <= thresh)[0]
- order = order[inds + 1]
-
- return keep
+ retained_box_indices = list()
+
+ areas = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1)
+ ranked_indices = boxes[:, 4].argsort()[::-1]
+ while ranked_indices.size > 0:
+ best = ranked_indices[0]
+ rest = ranked_indices[1:]
+
+ max_of_xy = np.maximum(boxes[best, :2], boxes[rest, :2])
+ min_of_xy = np.minimum(boxes[best, 2:4], boxes[rest, 2:4])
+ width_height = np.maximum(0, min_of_xy - max_of_xy + 1)
+ intersection_areas = width_height[:, 0] * width_height[:, 1]
+ iou = intersection_areas / (areas[best] + areas[rest] - intersection_areas)
+
+ overlapping_boxes = (iou > threshold).nonzero()[0]
+ if len(overlapping_boxes) != 0:
+ overlap_set = ranked_indices[overlapping_boxes + 1]
+ vote = np.average(boxes[overlap_set, :4], axis=0, weights=boxes[overlap_set, 4])
+ boxes[best, :4] = vote
+ retained_box_indices.append(best)
+
+ non_overlapping_boxes = (iou <= threshold).nonzero()[0]
+ ranked_indices = ranked_indices[non_overlapping_boxes + 1]
+ return boxes[retained_box_indices]
diff --git a/plugins/extract/detect/s3fd_defaults.py b/plugins/extract/detect/s3fd_defaults.py
index 59fcb0782d..56de4689db 100755
--- a/plugins/extract/detect/s3fd_defaults.py
+++ b/plugins/extract/detect/s3fd_defaults.py
@@ -51,7 +51,7 @@
_DEFAULTS = {
"confidence": {
- "default": 50,
+ "default": 70,
"info": "The confidence level at which the detector has succesfully found a face.\n"
"Higher levels will be more discriminating, lower levels will have more false "
"positives.",
| S3fd technically uses a variant of NMS bounding box aggregation that uses box voting in its training & evaluating.
( seen here https://github.com/sfzhang15/SFD/blob/c1abcf014d430354ce3a61c7f33de0dcf2021186/sfd_test_code/WIDER_FACE/wider_test.py#L109 )
- Modify code to follow the same logic
In testing a 2,000 image dataset, bounding box coordinates almost always moved less
than 5 pixels. Box voting should likely be more stable for sequential video frame
location.
- Streamline code logic to use more array functions
Not noticeable timing improvements overall, but serves to offset the additional calc
time required from voting
- Default confidence set to 70
S3fd is a very capable detector. Moving the default confidence from 50 -> 70 provides better results with the change only removing false positives in my tests. 70 is also suggested by the S3FD author.
prepare for streamlining the rest of the s3fd post-process and potentially apply to MTCNN as well | https://api.github.com/repos/deepfakes/faceswap/pulls/902 | 2019-10-09T18:48:56Z | 2019-11-14T12:49:47Z | 2019-11-14T12:49:47Z | 2019-12-07T04:50:29Z | 2,074 | deepfakes/faceswap | 18,757 |
Clarified docs about increasing the work factor for bcrypt hasher. | diff --git a/AUTHORS b/AUTHORS
index 1a66ba64778c4..d9318c618f3c2 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -978,6 +978,7 @@ answer newbie questions, and generally made Django that much better:
ymasuda@ethercube.com
Yoong Kang Lim <yoongkang.lim@gmail.com>
Yusuke Miyazaki <miyazaki.dev@gmail.com>
+ yyyyyyyan <contact@yyyyyyyan.tech>
Zac Hatfield-Dodds <zac.hatfield.dodds@gmail.com>
Zachary Voase <zacharyvoase@gmail.com>
Zach Liu <zachliu@gmail.com>
diff --git a/docs/topics/auth/passwords.txt b/docs/topics/auth/passwords.txt
index 87381ef67b670..1d3d1653faa44 100644
--- a/docs/topics/auth/passwords.txt
+++ b/docs/topics/auth/passwords.txt
@@ -172,8 +172,9 @@ iterations needs to be increased. We've chosen a reasonable default (and will
increase it with each release of Django), but you may wish to tune it up or
down, depending on your security needs and available processing power. To do so,
you'll subclass the appropriate algorithm and override the ``iterations``
-parameters. For example, to increase the number of iterations used by the
-default PBKDF2 algorithm:
+parameter (use the ``rounds`` parameter when subclassing a bcrypt hasher). For
+example, to increase the number of iterations used by the default PBKDF2
+algorithm:
#. Create a subclass of ``django.contrib.auth.hashers.PBKDF2PasswordHasher``::
@@ -201,6 +202,11 @@ default PBKDF2 algorithm:
That's it -- now your Django install will use more iterations when it
stores passwords using PBKDF2.
+.. note::
+
+ bcrypt ``rounds`` is a logarithmic work factor, e.g. 12 rounds means
+ ``2 ** 12`` iterations.
+
Argon2
~~~~~~
| The [Password Management docs](https://docs.djangoproject.com/en/dev/topics/auth/passwords/) has a section for increasing the work factor of PBKDF2 and bcrypt. The section instructs the reader to subclass the appropriate algorithm class and override the **iterations** attribute. This attribute, however, is specific to PBKDF2. The bcrypt class uses the **rounds** attribute (the actual iterations number correspond to ``2 ** rounds``). This PR specifies this adding an example of changing the bcrypt rounds attribute. | https://api.github.com/repos/django/django/pulls/14207 | 2021-04-01T00:54:45Z | 2021-05-20T19:01:18Z | 2021-05-20T19:01:18Z | 2021-05-20T19:01:18Z | 470 | django/django | 50,732 |
add llm reranker nb | diff --git a/docs/how_to/query/second_stage.md b/docs/how_to/query/second_stage.md
index 4a6179b36e441..e80b8c0f82d2f 100644
--- a/docs/how_to/query/second_stage.md
+++ b/docs/how_to/query/second_stage.md
@@ -160,4 +160,5 @@ maxdepth: 1
../../examples/node_postprocessor/TimeWeightedPostprocessorDemo.ipynb
../../examples/node_postprocessor/PII.ipynb
../../examples/node_postprocessor/CohereRerank.ipynb
+../../examples/node_postprocessor/LLMReranker-Gatsby.ipynb
```
\ No newline at end of file
| https://api.github.com/repos/run-llama/llama_index/pulls/3347 | 2023-05-14T18:07:03Z | 2023-05-14T18:10:13Z | 2023-05-14T18:10:13Z | 2023-05-14T18:10:14Z | 156 | run-llama/llama_index | 5,945 |
|
[requires.io] dependency update on master branch | diff --git a/setup.py b/setup.py
index 7300edd18c..9e1e77a9b4 100644
--- a/setup.py
+++ b/setup.py
@@ -71,11 +71,11 @@
"ldap3>=2.5,<2.6",
"passlib>=1.6.5, <1.8",
"pyasn1>=0.3.1,<0.5",
- "pyOpenSSL>=17.5,<17.6",
+ "pyOpenSSL>=17.5,<18.1",
"pyparsing>=2.1.3, <2.3",
"pyperclip>=1.6.0, <1.7",
"ruamel.yaml>=0.13.2, <0.16",
- "sortedcontainers>=1.5.4, <1.6",
+ "sortedcontainers>=1.5.4,<2.1",
"tornado>=4.3,<5.1",
"urwid>=2.0.1,<2.1",
"wsproto>=0.11.0,<0.12.0",
| https://api.github.com/repos/mitmproxy/mitmproxy/pulls/3154 | 2018-05-24T20:30:12Z | 2018-05-24T21:48:46Z | 2018-05-24T21:48:46Z | 2018-05-24T21:48:49Z | 256 | mitmproxy/mitmproxy | 28,085 |