title
stringlengths
2
169
diff
stringlengths
235
19.5k
body
stringlengths
0
30.5k
url
stringlengths
48
84
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
diff_len
float64
101
3.99k
repo_name
stringclasses
83 values
__index_level_0__
int64
15
52.7k
[gym.vector]: add `terminal_observation` to `info`
diff --git a/gym/vector/async_vector_env.py b/gym/vector/async_vector_env.py index 331dfcd5568..e8b1f537de8 100644 --- a/gym/vector/async_vector_env.py +++ b/gym/vector/async_vector_env.py @@ -413,6 +413,7 @@ def _worker(index, env_fn, pipe, parent_pipe, shared_memory, error_queue): elif command == "step": observation, reward, done, info = env.step(data) if done: + info["terminal_observation"] = observation observation = env.reset() pipe.send(((observation, reward, done, info), True)) elif command == "seed": @@ -453,6 +454,7 @@ def _worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_memory, error elif command == "step": observation, reward, done, info = env.step(data) if done: + info["terminal_observation"] = observation observation = env.reset() write_to_shared_memory( index, observation, shared_memory, observation_space diff --git a/gym/vector/sync_vector_env.py b/gym/vector/sync_vector_env.py index 068022c56a0..5652f00294f 100644 --- a/gym/vector/sync_vector_env.py +++ b/gym/vector/sync_vector_env.py @@ -82,6 +82,7 @@ def step_wait(self): for i, (env, action) in enumerate(zip(self.envs, self._actions)): observation, self._rewards[i], self._dones[i], info = env.step(action) if self._dones[i]: + info["terminal_observation"] = observation observation = env.reset() observations.append(observation) infos.append(info) diff --git a/tests/vector/test_vector_env.py b/tests/vector/test_vector_env.py index 6bd404b431d..7ea3664305a 100644 --- a/tests/vector/test_vector_env.py +++ b/tests/vector/test_vector_env.py @@ -34,8 +34,16 @@ def test_vector_env_equal(shared_memory): actions = async_env.action_space.sample() assert actions in sync_env.action_space - async_observations, async_rewards, async_dones, _ = async_env.step(actions) - sync_observations, sync_rewards, sync_dones, _ = sync_env.step(actions) + # fmt: off + async_observations, async_rewards, async_dones, async_infos = async_env.step(actions) + sync_observations, sync_rewards, sync_dones, sync_infos = sync_env.step(actions) + # fmt: on + + for idx in range(len(sync_dones)): + if sync_dones[idx]: + assert "terminal_observation" in async_infos[idx] + assert "terminal_observation" in sync_infos[idx] + assert sync_dones[idx] assert np.all(async_observations == sync_observations) assert np.all(async_rewards == sync_rewards)
This PR follows up with #1632 and adds the `terminal_observation` to `info` in the vectorized environments at the end of an env's episode. The key `terminal_observation` is consistent with SB3's vectorized environment. See ![image](https://user-images.githubusercontent.com/5555347/141665286-0af06f00-6440-4a52-95ce-206cda06b83f.png)
https://api.github.com/repos/openai/gym/pulls/2484
2021-11-14T02:38:28Z
2021-11-14T13:57:45Z
2021-11-14T13:57:45Z
2022-01-27T22:32:14Z
663
openai/gym
5,113
Bump actions/setup-python from 4.7.0 to 5.0.0
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 3a358a290a..b35a32d13a 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -13,7 +13,7 @@ jobs: steps: - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 - name: Set up Python - uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0 + uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 with: python-version: "3.x" - name: Run pre-commit diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 5461ccddfb..8178ed423a 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -23,7 +23,7 @@ jobs: steps: - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0 + uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 with: python-version: ${{ matrix.python-version }} cache: 'pip'
Bumps [actions/setup-python](https://github.com/actions/setup-python) from 4.7.0 to 5.0.0. <details> <summary>Release notes</summary> <p><em>Sourced from <a href="https://github.com/actions/setup-python/releases">actions/setup-python's releases</a>.</em></p> <blockquote> <h2>v5.0.0</h2> <h2>What's Changed</h2> <p>In scope of this release, we update node version runtime from node16 to node20 (<a href="https://redirect.github.com/actions/setup-python/pull/772">actions/setup-python#772</a>). Besides, we update dependencies to the latest versions.</p> <p><strong>Full Changelog</strong>: <a href="https://github.com/actions/setup-python/compare/v4.8.0...v5.0.0">https://github.com/actions/setup-python/compare/v4.8.0...v5.0.0</a></p> <h2>v4.8.0</h2> <h2>What's Changed</h2> <p>In scope of this release we added support for GraalPy (<a href="https://redirect.github.com/actions/setup-python/pull/694">actions/setup-python#694</a>). You can use this snippet to set up GraalPy:</p> <pre lang="yaml"><code>steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: python-version: 'graalpy-22.3' - run: python my_script.py </code></pre> <p>Besides, the release contains such changes as:</p> <ul> <li>Trim python version when reading from file by <a href="https://github.com/FerranPares"><code>@​FerranPares</code></a> in <a href="https://redirect.github.com/actions/setup-python/pull/628">actions/setup-python#628</a></li> <li>Use non-deprecated versions in examples by <a href="https://github.com/jeffwidman"><code>@​jeffwidman</code></a> in <a href="https://redirect.github.com/actions/setup-python/pull/724">actions/setup-python#724</a></li> <li>Change deprecation comment to past tense by <a href="https://github.com/jeffwidman"><code>@​jeffwidman</code></a> in <a href="https://redirect.github.com/actions/setup-python/pull/723">actions/setup-python#723</a></li> <li>Bump <code>@​babel/traverse</code> from 7.9.0 to 7.23.2 by <a href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a href="https://redirect.github.com/actions/setup-python/pull/743">actions/setup-python#743</a></li> <li>advanced-usage.md: Encourage the use actions/checkout@v4 by <a href="https://github.com/cclauss"><code>@​cclauss</code></a> in <a href="https://redirect.github.com/actions/setup-python/pull/729">actions/setup-python#729</a></li> <li>Examples now use checkout@v4 by <a href="https://github.com/simonw"><code>@​simonw</code></a> in <a href="https://redirect.github.com/actions/setup-python/pull/738">actions/setup-python#738</a></li> <li>Update actions/checkout to v4 by <a href="https://github.com/dmitry-shibanov"><code>@​dmitry-shibanov</code></a> in <a href="https://redirect.github.com/actions/setup-python/pull/761">actions/setup-python#761</a></li> </ul> <h2>New Contributors</h2> <ul> <li><a href="https://github.com/FerranPares"><code>@​FerranPares</code></a> made their first contribution in <a href="https://redirect.github.com/actions/setup-python/pull/628">actions/setup-python#628</a></li> <li><a href="https://github.com/timfel"><code>@​timfel</code></a> made their first contribution in <a href="https://redirect.github.com/actions/setup-python/pull/694">actions/setup-python#694</a></li> <li><a href="https://github.com/jeffwidman"><code>@​jeffwidman</code></a> made their first contribution in <a href="https://redirect.github.com/actions/setup-python/pull/724">actions/setup-python#724</a></li> </ul> <p><strong>Full Changelog</strong>: <a href="https://github.com/actions/setup-python/compare/v4...v4.8.0">https://github.com/actions/setup-python/compare/v4...v4.8.0</a></p> <h2>v4.7.1</h2> <h2>What's Changed</h2> <ul> <li>Bump word-wrap from 1.2.3 to 1.2.4 by <a href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a href="https://redirect.github.com/actions/setup-python/pull/702">actions/setup-python#702</a></li> <li>Add range validation for toml files by <a href="https://github.com/dmitry-shibanov"><code>@​dmitry-shibanov</code></a> in <a href="https://redirect.github.com/actions/setup-python/pull/726">actions/setup-python#726</a></li> </ul> <p><strong>Full Changelog</strong>: <a href="https://github.com/actions/setup-python/compare/v4...v4.7.1">https://github.com/actions/setup-python/compare/v4...v4.7.1</a></p> </blockquote> </details> <details> <summary>Commits</summary> <ul> <li><a href="https://github.com/actions/setup-python/commit/0a5c61591373683505ea898e09a3ea4f39ef2b9c"><code>0a5c615</code></a> Update action to node20 (<a href="https://redirect.github.com/actions/setup-python/issues/772">#772</a>)</li> <li><a href="https://github.com/actions/setup-python/commit/0ae58361cdfd39e2950bed97a1e26aa20c3d8955"><code>0ae5836</code></a> Add example of GraalPy to docs (<a href="https://redirect.github.com/actions/setup-python/issues/773">#773</a>)</li> <li><a href="https://github.com/actions/setup-python/commit/b64ffcaf5b410884ad320a9cfac8866006a109aa"><code>b64ffca</code></a> update actions/checkout to v4 (<a href="https://redirect.github.com/actions/setup-python/issues/761">#761</a>)</li> <li><a href="https://github.com/actions/setup-python/commit/8d2896179abf658742de432b3f203d2c2d86a587"><code>8d28961</code></a> Examples now use checkout@v4 (<a href="https://redirect.github.com/actions/setup-python/issues/738">#738</a>)</li> <li><a href="https://github.com/actions/setup-python/commit/7bc6abb01e0555719edc2dbca70a2fde309e5e56"><code>7bc6abb</code></a> advanced-usage.md: Encourage the use actions/checkout@v4 (<a href="https://redirect.github.com/actions/setup-python/issues/729">#729</a>)</li> <li><a href="https://github.com/actions/setup-python/commit/e8111cec9d3dc15220d8a3b638f08419f57b906a"><code>e8111ce</code></a> Bump <code>@​babel/traverse</code> from 7.9.0 to 7.23.2 (<a href="https://redirect.github.com/actions/setup-python/issues/743">#743</a>)</li> <li><a href="https://github.com/actions/setup-python/commit/a00ea43da65e7c04d2bdae58b3afecd77057eb9e"><code>a00ea43</code></a> add fix for graalpy ci (<a href="https://redirect.github.com/actions/setup-python/issues/741">#741</a>)</li> <li><a href="https://github.com/actions/setup-python/commit/8635b1ccc5934e73ed3510980fd2e7790b85839b"><code>8635b1c</code></a> Change deprecation comment to past tense (<a href="https://redirect.github.com/actions/setup-python/issues/723">#723</a>)</li> <li><a href="https://github.com/actions/setup-python/commit/f6cc428f535856f9c23558d01765a42a4d6cf758"><code>f6cc428</code></a> Use non-deprecated versions in examples (<a href="https://redirect.github.com/actions/setup-python/issues/724">#724</a>)</li> <li><a href="https://github.com/actions/setup-python/commit/5f2af211d616f86005883b44826180b21abb4060"><code>5f2af21</code></a> Add GraalPy support (<a href="https://redirect.github.com/actions/setup-python/issues/694">#694</a>)</li> <li>Additional commits viewable in <a href="https://github.com/actions/setup-python/compare/61a6322f88396a6271a6ee3565807d608ecaddd1...0a5c61591373683505ea898e09a3ea4f39ef2b9c">compare view</a></li> </ul> </details> <br /> [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/setup-python&package-manager=github_actions&previous-version=4.7.0&new-version=5.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) --- <details> <summary>Dependabot commands and options</summary> <br /> You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) </details>
https://api.github.com/repos/psf/requests/pulls/6599
2023-12-11T16:07:16Z
2023-12-11T16:11:07Z
2023-12-11T16:11:07Z
2023-12-11T16:11:08Z
435
psf/requests
32,146
BUG: fix reverse comparison operations for Categorical
diff --git a/doc/source/whatsnew/v0.15.1.txt b/doc/source/whatsnew/v0.15.1.txt index e96adc2bd9559..486ba9cbadd7f 100644 --- a/doc/source/whatsnew/v0.15.1.txt +++ b/doc/source/whatsnew/v0.15.1.txt @@ -186,6 +186,7 @@ Bug Fixes - Bug in selecting from a ``Categorical`` with ``.iloc`` (:issue:`8623`) - Bug in groupby-transform with a Categorical (:issue:`8623`) - Bug in duplicated/drop_duplicates with a Categorical (:issue:`8623`) +- Bug in ``Categorical`` reflected comparison operator raising if the first argument was a numpy array scalar (e.g. np.int64) (:issue:`8658`) diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 598b29bf77e47..150da65580223 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -42,7 +42,16 @@ def f(self, other): # In other series, the leads to False, so do that here too ret[na_mask] = False return ret - elif lib.isscalar(other): + + # Numpy-1.9 and earlier may convert a scalar to a zerodim array during + # comparison operation when second arg has higher priority, e.g. + # + # cat[0] < cat + # + # With cat[0], for example, being ``np.int64(1)`` by the time it gets + # into this function would become ``np.array(1)``. + other = lib.item_from_zerodim(other) + if lib.isscalar(other): if other in self.categories: i = self.categories.get_loc(other) return getattr(self._codes, op)(i) diff --git a/pandas/core/common.py b/pandas/core/common.py index 51464e1809e75..1c117e1cae7dd 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -84,7 +84,7 @@ def _check(cls, inst): ABCSparseArray = create_pandas_abc_type("ABCSparseArray", "_subtyp", ('sparse_array', 'sparse_series')) ABCCategorical = create_pandas_abc_type("ABCCategorical","_typ",("categorical")) - +ABCPeriod = create_pandas_abc_type("ABCPeriod", "_typ", ("period",)) class _ABCGeneric(type): diff --git a/pandas/lib.pyx b/pandas/lib.pyx index 88c458ce95226..221ffe24a713b 100644 --- a/pandas/lib.pyx +++ b/pandas/lib.pyx @@ -4,6 +4,7 @@ import numpy as np from numpy cimport * +np.import_array() cdef extern from "numpy/arrayobject.h": cdef enum NPY_TYPES: @@ -234,8 +235,54 @@ cpdef checknull_old(object val): else: return util._checknull(val) +# ABCPeriod cannot be imported right away from pandas.core.common. +ABCPeriod = None def isscalar(object val): - return np.isscalar(val) or val is None or PyDateTime_Check(val) or PyDelta_Check(val) + """ + Return True if given value is scalar. + + This includes: + - numpy array scalar (e.g. np.int64) + - Python builtin numerics + - Python builtin byte arrays and strings + - None + - instances of datetime.datetime + - instances of datetime.timedelta + - any type previously registered with :func:`register_scalar_type` function + + """ + global ABCPeriod + if ABCPeriod is None: + from pandas.core.common import ABCPeriod as _ABCPeriod + ABCPeriod = _ABCPeriod + + return (np.PyArray_IsAnyScalar(val) + # As of numpy-1.9, PyArray_IsAnyScalar misses bytearrays on Py3. + or PyBytes_Check(val) + or val is None + or PyDate_Check(val) + or PyDelta_Check(val) + or PyTime_Check(val) + or isinstance(val, ABCPeriod)) + + +def item_from_zerodim(object val): + """ + If the value is a zerodim array, return the item it contains. + + Examples + -------- + >>> item_from_zerodim(1) + 1 + >>> item_from_zerodim('foobar') + 'foobar' + >>> item_from_zerodim(np.array(1)) + 1 + >>> item_from_zerodim(np.array([1])) + array([1]) + + """ + return util.unbox_if_zerodim(val) @cython.wraparound(False) diff --git a/pandas/src/numpy_helper.h b/pandas/src/numpy_helper.h index 69b849de47fe7..8b79bbe79ff2f 100644 --- a/pandas/src/numpy_helper.h +++ b/pandas/src/numpy_helper.h @@ -167,6 +167,21 @@ void set_array_not_contiguous(PyArrayObject *ao) { } +// If arr is zerodim array, return a proper array scalar (e.g. np.int64). +// Otherwise, return arr as is. +PANDAS_INLINE PyObject* +unbox_if_zerodim(PyObject* arr) { + if (PyArray_IsZeroDim(arr)) { + PyObject *ret; + ret = PyArray_ToScalar(PyArray_DATA(arr), arr); + return ret; + } else { + Py_INCREF(arr); + return arr; + } +} + + // PANDAS_INLINE PyObject* // get_base_ndarray(PyObject* ap) { // // if (!ap || (NULL == ap)) { diff --git a/pandas/src/util.pxd b/pandas/src/util.pxd index cc1921e6367c5..eff1728c6921a 100644 --- a/pandas/src/util.pxd +++ b/pandas/src/util.pxd @@ -22,6 +22,7 @@ cdef extern from "numpy_helper.h": inline void transfer_object_column(char *dst, char *src, size_t stride, size_t length) object sarr_from_data(cnp.dtype, int length, void* data) + inline object unbox_if_zerodim(object arr) cdef inline object get_value_at(ndarray arr, object loc): cdef: @@ -64,7 +65,6 @@ cdef inline int is_contiguous(ndarray arr): cdef inline is_array(object o): return cnp.PyArray_Check(o) - cdef inline bint _checknull(object val): try: return val is None or (cpython.PyFloat_Check(val) and val != val) diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index 444eb87a399e5..4bc7084c93b6b 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -917,6 +917,12 @@ def test_datetime_categorical_comparison(self): self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True]) self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True]) + def test_reflected_comparison_with_scalars(self): + # GH8658 + cat = pd.Categorical([1, 2, 3]) + self.assert_numpy_array_equal(cat > cat[0], [False, True, True]) + self.assert_numpy_array_equal(cat[0] < cat, [False, True, True]) + class TestCategoricalAsBlock(tm.TestCase): _multiprocess_can_split_ = True diff --git a/pandas/tests/test_lib.py b/pandas/tests/test_lib.py new file mode 100644 index 0000000000000..1b7b6c5c5ee4e --- /dev/null +++ b/pandas/tests/test_lib.py @@ -0,0 +1,72 @@ +from datetime import datetime, timedelta, date, time + +import numpy as np + +import pandas as pd +from pandas.lib import isscalar, item_from_zerodim +import pandas.util.testing as tm + + +class TestIsscalar(tm.TestCase): + def test_isscalar_builtin_scalars(self): + self.assertTrue(isscalar(None)) + self.assertTrue(isscalar(True)) + self.assertTrue(isscalar(False)) + self.assertTrue(isscalar(0.)) + self.assertTrue(isscalar(np.nan)) + self.assertTrue(isscalar('foobar')) + self.assertTrue(isscalar(b'foobar')) + self.assertTrue(isscalar(u'foobar')) + self.assertTrue(isscalar(datetime(2014, 1, 1))) + self.assertTrue(isscalar(date(2014, 1, 1))) + self.assertTrue(isscalar(time(12, 0))) + self.assertTrue(isscalar(timedelta(hours=1))) + self.assertTrue(isscalar(pd.NaT)) + + def test_isscalar_builtin_nonscalars(self): + self.assertFalse(isscalar({})) + self.assertFalse(isscalar([])) + self.assertFalse(isscalar([1])) + self.assertFalse(isscalar(())) + self.assertFalse(isscalar((1,))) + self.assertFalse(isscalar(slice(None))) + self.assertFalse(isscalar(Ellipsis)) + + def test_isscalar_numpy_array_scalars(self): + self.assertTrue(isscalar(np.int64(1))) + self.assertTrue(isscalar(np.float64(1.))) + self.assertTrue(isscalar(np.int32(1))) + self.assertTrue(isscalar(np.object_('foobar'))) + self.assertTrue(isscalar(np.str_('foobar'))) + self.assertTrue(isscalar(np.unicode_(u'foobar'))) + self.assertTrue(isscalar(np.bytes_(b'foobar'))) + self.assertTrue(isscalar(np.datetime64('2014-01-01'))) + self.assertTrue(isscalar(np.timedelta64(1, 'h'))) + + def test_isscalar_numpy_zerodim_arrays(self): + for zerodim in [np.array(1), + np.array('foobar'), + np.array(np.datetime64('2014-01-01')), + np.array(np.timedelta64(1, 'h'))]: + self.assertFalse(isscalar(zerodim)) + self.assertTrue(isscalar(item_from_zerodim(zerodim))) + + def test_isscalar_numpy_arrays(self): + self.assertFalse(isscalar(np.array([]))) + self.assertFalse(isscalar(np.array([[]]))) + self.assertFalse(isscalar(np.matrix('1; 2'))) + + def test_isscalar_pandas_scalars(self): + self.assertTrue(isscalar(pd.Timestamp('2014-01-01'))) + self.assertTrue(isscalar(pd.Timedelta(hours=1))) + self.assertTrue(isscalar(pd.Period('2014-01-01'))) + + def test_isscalar_pandas_containers(self): + self.assertFalse(isscalar(pd.Series())) + self.assertFalse(isscalar(pd.Series([1]))) + self.assertFalse(isscalar(pd.DataFrame())) + self.assertFalse(isscalar(pd.DataFrame([[1]]))) + self.assertFalse(isscalar(pd.Panel())) + self.assertFalse(isscalar(pd.Panel([[[1]]]))) + self.assertFalse(isscalar(pd.Index([]))) + self.assertFalse(isscalar(pd.Index([1]))) diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index cba449b9596e1..742d8651a4035 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -63,6 +63,7 @@ class Period(PandasObject): """ __slots__ = ['freq', 'ordinal'] _comparables = ['name','freqstr'] + _typ = 'period' @classmethod def _from_ordinal(cls, ordinal, freq): @@ -498,7 +499,6 @@ def strftime(self, fmt): base, mult = _gfc(self.freq) return tslib.period_format(self.ordinal, base, fmt) - def _get_ordinals(data, freq): f = lambda x: Period(x, freq=freq).ordinal if isinstance(data[0], Period):
This PR fixes #8658 and also adds `pd.lib.unbox_if_zerodim` that extracts values from zerodim arrays and adds detection of `pd.Period`, `datetime.date` and `datetime.time` in pd.lib.isscalar. I tried making `unbox_if_zerodim` usage more implicit but there's just too much to test when this is introduced (think every method that accepts a scalar must have another test to check it also accepts zerodim array). Definitely too much to add for a single PR, but overall zerodim support could be added later on class-by-class basis.
https://api.github.com/repos/pandas-dev/pandas/pulls/8706
2014-11-02T07:59:54Z
2014-11-02T19:15:27Z
2014-11-02T19:15:27Z
2014-11-02T21:37:50Z
2,838
pandas-dev/pandas
45,610
Update redtube extractor (make title regex match current site)
diff --git a/youtube_dl/extractor/redtube.py b/youtube_dl/extractor/redtube.py index b1bde1e8117..deb3ad52cac 100644 --- a/youtube_dl/extractor/redtube.py +++ b/youtube_dl/extractor/redtube.py @@ -57,7 +57,7 @@ def _real_extract(self, url): if not info.get('title'): info['title'] = self._html_search_regex( - (r'<h(\d)[^>]+class="(?:video_title_text|videoTitle)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>', + (r'<h(\d)[^>]+class="(?:video_title_text|videoTitle|video_title)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>', r'(?:videoTitle|title)\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',), webpage, 'title', group='title', default=None) or self._og_search_title(webpage)
### Before submitting a *pull request* make sure you have: - [ ] At least skimmed through [adding new extractor tutorial](https://github.com/ytdl-org/youtube-dl#adding-support-for-a-new-site) and [youtube-dl coding conventions](https://github.com/ytdl-org/youtube-dl#youtube-dl-coding-conventions) sections - [x] [Searched](https://github.com/ytdl-org/youtube-dl/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests - [ ] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) ### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options: - [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/) - [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence) ### What is the purpose of your *pull request*? - [x] Bug fix - [ ] Improvement - [ ] New extractor - [ ] New feature --- ### Description of your *pull request* and other information A site changed the html they use for the "title", so I updated the regex in the extractor to match.
https://api.github.com/repos/ytdl-org/youtube-dl/pulls/25208
2020-05-09T15:47:54Z
2020-05-19T19:11:06Z
2020-05-19T19:11:06Z
2020-05-19T19:11:06Z
250
ytdl-org/youtube-dl
49,964
[wakanim] Detect geo-restriction
diff --git a/yt_dlp/extractor/wakanim.py b/yt_dlp/extractor/wakanim.py index 22441c38ff6..a61a630e26f 100644 --- a/yt_dlp/extractor/wakanim.py +++ b/yt_dlp/extractor/wakanim.py @@ -33,12 +33,19 @@ class WakanimIE(InfoExtractor): 'url': 'https://www.wakanim.tv/de/v2/catalogue/episode/7843/sword-art-online-alicization-omu-arc-2-folge-15-omu', 'only_matching': True, }] + _GEO_BYPASS = False def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) + if 'Geoblocking' in webpage: + if '/de/' in url: + self.raise_geo_restricted(countries=['DE', 'AT', 'CH']) + else: + self.raise_geo_restricted(countries=['RU']) + manifest_url = urljoin(url, self._search_regex( r'file\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'manifest url', group='url'))
## Please follow the guide below - You will be asked some questions, please read them **carefully** and answer honestly - Put an `x` into all the boxes [ ] relevant to your *pull request* (like that [x]) - Use *Preview* tab to see how your *pull request* will actually look like --- ### Before submitting a *pull request* make sure you have: - [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions) - [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests - [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) ### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options: - [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/) - [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence) ### What is the purpose of your *pull request*? - [ ] Bug fix - [x] Improvement - [ ] New extractor - [ ] New feature --- ### Description of your *pull request* and other information This makes the Wakanim extractor raise a geo-restricted error instead of saying "unable to extract m3u8 url".
https://api.github.com/repos/yt-dlp/yt-dlp/pulls/1429
2021-10-26T11:12:11Z
2021-10-26T16:35:20Z
2021-10-26T16:35:20Z
2021-10-26T16:35:20Z
299
yt-dlp/yt-dlp
8,203
Fix integrations with no uri (mocks)
diff --git a/localstack/services/apigateway/apigateway_listener.py b/localstack/services/apigateway/apigateway_listener.py index 6f73ecc14b611..bee8aea5e722c 100644 --- a/localstack/services/apigateway/apigateway_listener.py +++ b/localstack/services/apigateway/apigateway_listener.py @@ -302,7 +302,7 @@ def invoke_rest_api_integration(api_id, stage, integration, method, path, invoca relative_path, query_string_params = extract_query_string_params(path=invocation_path) integration_type_orig = integration.get('type') or integration.get('integrationType') or '' integration_type = integration_type_orig.upper() - uri = integration.get('uri') or integration.get('integrationUri') + uri = integration.get('uri') or integration.get('integrationUri') or '' if (uri.startswith('arn:aws:apigateway:') and ':lambda:path' in uri) or uri.startswith('arn:aws:lambda'): if integration_type in ['AWS', 'AWS_PROXY']:
Fixes #3908. I've had some problems installing sasl on arch, so I haven't been able to reproduce this fix locally. I'd appreciate some help to validate this fix.
https://api.github.com/repos/localstack/localstack/pulls/3909
2021-04-20T23:29:00Z
2021-04-22T06:27:24Z
2021-04-22T06:27:24Z
2021-04-22T06:27:24Z
232
localstack/localstack
29,083
Point BERT pretraining tutorial to new perf tuning.
diff --git a/docs/_tutorials/bert-pretraining.md b/docs/_tutorials/bert-pretraining.md index f501f2e8088f..40ecdfc65b26 100755 --- a/docs/_tutorials/bert-pretraining.md +++ b/docs/_tutorials/bert-pretraining.md @@ -301,7 +301,9 @@ Note: generated with transformer kernel must to be loaded by the model with transformer kernel enabled (such as in fine-tuning). -For more details about the transformer kernel, please see [DeepSpeed Transformer Kernel](/transformer_kernel/) and [DeepSpeed Fast-Bert Training](/fast_bert/). +For more details about the transformer kernel, please see [DeepSpeed +Transformer Kernel](/tutorials/transformer_kernel/) and [DeepSpeed Fast-Bert +Training](https://www.deepspeed.ai/news/2020/05/27/fastest-bert-training.html). ### Start Training
https://api.github.com/repos/microsoft/DeepSpeed/pulls/246
2020-05-29T21:38:15Z
2020-05-29T21:41:35Z
2020-05-29T21:41:34Z
2020-05-29T21:41:59Z
211
microsoft/DeepSpeed
10,624
add git-lfs support
diff --git a/README.md b/README.md index 4ff50cd23..141193eda 100644 --- a/README.md +++ b/README.md @@ -213,6 +213,7 @@ following rules are enabled by default: * `git_fix_stash` &ndash; fixes `git stash` commands (misspelled subcommand and missing `save`); * `git_flag_after_filename` &ndash; fixes `fatal: bad flag '...' after filename` * `git_help_aliased` &ndash; fixes `git help <alias>` commands replacing <alias> with the aliased command; +* `git_lfs_mistype` &ndash; fixes mistyped `git lfs <command>` commands; * `git_merge` &ndash; adds remote to branch names; * `git_merge_unrelated` &ndash; adds `--allow-unrelated-histories` when required * `git_not_command` &ndash; fixes wrong git commands like `git brnch`; diff --git a/tests/rules/test_git_lfs_mistype.py b/tests/rules/test_git_lfs_mistype.py new file mode 100644 index 000000000..1aae66fc2 --- /dev/null +++ b/tests/rules/test_git_lfs_mistype.py @@ -0,0 +1,29 @@ +import pytest + +from thefuck.rules.git_lfs_mistype import match, get_new_command +from thefuck.types import Command + + +@pytest.fixture +def mistype_response(): + return """ +Error: unknown command "evn" for "git-lfs" + +Did you mean this? + env + ext + +Run 'git-lfs --help' for usage. + """ + + +def test_match(mistype_response): + assert match(Command('git lfs evn', mistype_response)) + err_response = 'bash: git: command not found' + assert not match(Command('git lfs env', err_response)) + assert not match(Command('docker lfs env', mistype_response)) + + +def test_get_new_command(mistype_response): + assert (get_new_command(Command('git lfs evn', mistype_response)) + == ['git lfs env', 'git lfs ext']) diff --git a/thefuck/rules/git_lfs_mistype.py b/thefuck/rules/git_lfs_mistype.py new file mode 100644 index 000000000..afa3d5b7a --- /dev/null +++ b/thefuck/rules/git_lfs_mistype.py @@ -0,0 +1,18 @@ +import re +from thefuck.utils import get_all_matched_commands, replace_command +from thefuck.specific.git import git_support + + +@git_support +def match(command): + ''' + Match a mistyped command + ''' + return 'lfs' in command.script and 'Did you mean this?' in command.output + + +@git_support +def get_new_command(command): + broken_cmd = re.findall(r'Error: unknown command "([^"]*)" for "git-lfs"', command.output)[0] + matched = get_all_matched_commands(command.output, ['Did you mean', ' for usage.']) + return replace_command(command, broken_cmd, matched)
This is related to https://github.com/nvbn/thefuck/issues/1030
https://api.github.com/repos/nvbn/thefuck/pulls/1056
2020-02-24T16:43:34Z
2020-06-10T22:20:38Z
2020-06-10T22:20:38Z
2020-06-10T22:20:39Z
736
nvbn/thefuck
30,709
Corrects the solution for linux copy exercise
diff --git a/topics/linux/exercises/copy/solution.md b/topics/linux/exercises/copy/solution.md index c9d2b8c02..da3592686 100644 --- a/topics/linux/exercises/copy/solution.md +++ b/topics/linux/exercises/copy/solution.md @@ -17,9 +17,8 @@ touch /tmp/x cp x ~/ cp x y mkdir files -cp x files -cp y files +mv x files | mv y files cp -r files copy_of_files mv copy_of_files files2 rm -rf files files2 -``` \ No newline at end of file +```
Fixes the solution of the copy exercise with regards to step 4. Originally it did not move x and y to the files directory, but instead, it copied them to files.
https://api.github.com/repos/bregman-arie/devops-exercises/pulls/10243
2023-12-03T23:04:11Z
2024-02-02T13:15:31Z
2024-02-02T13:15:31Z
2024-02-02T13:15:31Z
148
bregman-arie/devops-exercises
17,493
Update README.md to fix typo
diff --git a/README.md b/README.md index 3e0007fad4..ee3b2bf5e5 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ Requests: HTTP for Humans™ **If you're interested in financially supporting Kenneth Reitz open source, consider [visiting this link](https://cash.me/$KennethReitz). Your support helps tremendously with sustainability of motivation, as Open Source is no longer part of my day job.** -Rquests is the only *Non-GMO* HTTP library for Python, safe for human +Requests is the only *Non-GMO* HTTP library for Python, safe for human consumption. ![image](https://farm5.staticflickr.com/4317/35198386374_1939af3de6_k_d.jpg)
I noticed "Rquests" was used at the top of the readme instead of "Requests". If that was intentional then ignore this change.
https://api.github.com/repos/psf/requests/pulls/4802
2018-09-21T16:23:04Z
2018-09-21T16:27:13Z
2018-09-21T16:27:13Z
2021-09-01T00:11:56Z
189
psf/requests
32,109
DOC Check sha256 digests of tarballs in tutorial and examples before extraction
diff --git a/doc/tutorial/text_analytics/data/movie_reviews/fetch_data.py b/doc/tutorial/text_analytics/data/movie_reviews/fetch_data.py index e591aca0f241b..67def14889774 100644 --- a/doc/tutorial/text_analytics/data/movie_reviews/fetch_data.py +++ b/doc/tutorial/text_analytics/data/movie_reviews/fetch_data.py @@ -1,27 +1,33 @@ """Script to download the movie review dataset""" -import os +from pathlib import Path +from hashlib import sha256 import tarfile -from contextlib import closing from urllib.request import urlopen -URL = ("http://www.cs.cornell.edu/people/pabo/" - "movie-review-data/review_polarity.tar.gz") +URL = "http://www.cs.cornell.edu/people/pabo/movie-review-data/review_polarity.tar.gz" -ARCHIVE_NAME = URL.rsplit('/', 1)[1] -DATA_FOLDER = "txt_sentoken" +ARCHIVE_SHA256 = "fc0dccc2671af5db3c5d8f81f77a1ebfec953ecdd422334062df61ede36b2179" +ARCHIVE_NAME = Path(URL.rsplit("/", 1)[1]) +DATA_FOLDER = Path("txt_sentoken") -if not os.path.exists(DATA_FOLDER): +if not DATA_FOLDER.exists(): - if not os.path.exists(ARCHIVE_NAME): + if not ARCHIVE_NAME.exists(): print("Downloading dataset from %s (3 MB)" % URL) opener = urlopen(URL) - with open(ARCHIVE_NAME, 'wb') as archive: + with open(ARCHIVE_NAME, "wb") as archive: archive.write(opener.read()) - print("Decompressing %s" % ARCHIVE_NAME) - with closing(tarfile.open(ARCHIVE_NAME, "r:gz")) as archive: - archive.extractall(path='.') - os.remove(ARCHIVE_NAME) + try: + print("Checking the integrity of the archive") + assert sha256(ARCHIVE_NAME.read_bytes()).hexdigest() == ARCHIVE_SHA256 + + print("Decompressing %s" % ARCHIVE_NAME) + with tarfile.open(ARCHIVE_NAME, "r:gz") as archive: + archive.extractall(path=".") + + finally: + ARCHIVE_NAME.unlink() diff --git a/doc/tutorial/text_analytics/data/twenty_newsgroups/fetch_data.py b/doc/tutorial/text_analytics/data/twenty_newsgroups/fetch_data.py deleted file mode 100644 index 983989d8c0e25..0000000000000 --- a/doc/tutorial/text_analytics/data/twenty_newsgroups/fetch_data.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Script to download the 20 newsgroups text classification set""" - -import os -import tarfile -from contextlib import closing -from urllib.request import urlopen - -URL = ("http://people.csail.mit.edu/jrennie/" - "20Newsgroups/20news-bydate.tar.gz") - -ARCHIVE_NAME = URL.rsplit('/', 1)[1] -TRAIN_FOLDER = "20news-bydate-train" -TEST_FOLDER = "20news-bydate-test" - - -if not os.path.exists(TRAIN_FOLDER) or not os.path.exists(TEST_FOLDER): - - if not os.path.exists(ARCHIVE_NAME): - print("Downloading dataset from %s (14 MB)" % URL) - opener = urlopen(URL) - with open(ARCHIVE_NAME, 'wb') as archive: - archive.write(opener.read()) - - print("Decompressing %s" % ARCHIVE_NAME) - with closing(tarfile.open(ARCHIVE_NAME, "r:gz")) as archive: - archive.extractall(path='.') - os.remove(ARCHIVE_NAME) diff --git a/doc/whats_new/v1.2.rst b/doc/whats_new/v1.2.rst index b6458339fa675..1c645121d26ec 100644 --- a/doc/whats_new/v1.2.rst +++ b/doc/whats_new/v1.2.rst @@ -84,6 +84,10 @@ Changes impacting all modules :pr:`23604` and :pr:`23585` by :user:`Julien Jerphanion <jjerphan>`, :user:`Olivier Grisel <ogrisel>`, and `Thomas Fan`_. +- |Fix| Systematically check the sha256 digest of dataset tarballs used in code + examples in the documentation. + :pr:`24617` by :user:`Olivier Grisel <ogrisel>` and `Thomas Fan`_. Thanks to + `Sim4n6 <https://huntr.dev/users/sim4n6>`_ for the report. Changelog --------- diff --git a/examples/applications/plot_out_of_core_classification.py b/examples/applications/plot_out_of_core_classification.py index a8e4f9b72a3b0..721ba0159e692 100644 --- a/examples/applications/plot_out_of_core_classification.py +++ b/examples/applications/plot_out_of_core_classification.py @@ -18,9 +18,9 @@ # @FedericoV <https://github.com/FedericoV/> # License: BSD 3 clause -from glob import glob import itertools -import os.path +from pathlib import Path +from hashlib import sha256 import re import tarfile import time @@ -149,14 +149,17 @@ def stream_reuters_documents(data_path=None): "http://archive.ics.uci.edu/ml/machine-learning-databases/" "reuters21578-mld/reuters21578.tar.gz" ) + ARCHIVE_SHA256 = "3bae43c9b14e387f76a61b6d82bf98a4fb5d3ef99ef7e7075ff2ccbcf59f9d30" ARCHIVE_FILENAME = "reuters21578.tar.gz" if data_path is None: - data_path = os.path.join(get_data_home(), "reuters") - if not os.path.exists(data_path): + data_path = Path(get_data_home()) / "reuters" + else: + data_path = Path(data_path) + if not data_path.exists(): """Download the dataset.""" print("downloading dataset (once and for all) into %s" % data_path) - os.mkdir(data_path) + data_path.mkdir(parents=True, exist_ok=True) def progress(blocknum, bs, size): total_sz_mb = "%.2f MB" % (size / 1e6) @@ -164,16 +167,21 @@ def progress(blocknum, bs, size): if _not_in_sphinx(): sys.stdout.write("\rdownloaded %s / %s" % (current_sz_mb, total_sz_mb)) - archive_path = os.path.join(data_path, ARCHIVE_FILENAME) + archive_path = data_path / ARCHIVE_FILENAME + urlretrieve(DOWNLOAD_URL, filename=archive_path, reporthook=progress) if _not_in_sphinx(): sys.stdout.write("\r") + + # Check that the archive was not tampered: + assert sha256(archive_path.read_bytes()).hexdigest() == ARCHIVE_SHA256 + print("untarring Reuters dataset...") tarfile.open(archive_path, "r:gz").extractall(data_path) print("done.") parser = ReutersParser() - for filename in glob(os.path.join(data_path, "*.sgm")): + for filename in data_path.glob("*.sgm"): for doc in parser.parse(open(filename, "rb")): yield doc
Archives with previously lacking sha256 checks: - <del>`20Newsgroups/20news-bydate.tar.gz`</del> (update: `fetch_data.py` script deleted since actually unused) - `reuters21578-mld/reuters21578.tar.gz` - `movie-review-data/review_polarity.tar.gz` This is a security concern not to check the digests of tarballs before extracting them because they could overwrite sensitive system files such as `/etc/hosts`. Note that this PR only fixes code in documentation (tutorial and examples) and not library code. Our datasets fetchers under the `sklearn` namespace 2s already checking the digests systematically. Still fixing those code snippets in the documentation is good from an education point of view. The scikit-learn-1.1.2.tar.gz source tarball contains those code snippets because it includes the documentation and example files. But this is not the case for the wheel files and conda packages. Not sure if this warrants a security bugfix release or not.
https://api.github.com/repos/scikit-learn/scikit-learn/pulls/24617
2022-10-10T09:46:57Z
2022-10-10T15:57:42Z
2022-10-10T15:57:42Z
2022-10-11T08:15:06Z
1,730
scikit-learn/scikit-learn
45,902
Added precision on the format, generation and breaking of NetNTLMv1
diff --git a/Methodology and Resources/Active Directory Attack.md b/Methodology and Resources/Active Directory Attack.md index 7e3648be5b..6c141f41ab 100644 --- a/Methodology and Resources/Active Directory Attack.md +++ b/Methodology and Resources/Active Directory Attack.md @@ -2094,9 +2094,9 @@ root@kali:~$ klist ``` -## Capturing and cracking Net-NTLMv1/NTLMv1 hashes +## Capturing and cracking Net-NTLMv1/NTLMv1 hashes/tokens -> Net-NTLM (NTLMv1) hashes are used for network authentication (they are derived from a challenge/response algorithm and are based on the user's NT hash. +> Net-NTLMv1 (NTLMv1) authentication tokens are used for network authentication (they are derived from a challenge/response DES-based algorithm with the user's NT-hash as symetric keys. :information_source: : Coerce a callback using PetitPotam or SpoolSample on an affected machine and downgrade the authentication to **NetNTLMv1 Challenge/Response authentication**. This uses the outdated encryption method DES to protect the NT/LM Hashes. @@ -2120,19 +2120,42 @@ root@kali:~$ klist PetitPotam.exe Responder-IP DC-IP # Patched around August 2021 PetitPotam.py -u Username -p Password -d Domain -dc-ip DC-IP Responder-IP DC-IP # Not patched for authenticated users ``` -* If you got some `NTLMv1 hashes`, you need to format them to submit them on [crack.sh](https://crack.sh/netntlm/) +* If you got some `NetNTLMv1 tokens`, you can try to **shuck** them online via [Shuck.Sh](https://shuck.sh/) or locally/on-premise via [ShuckNT](https://github.com/yanncam/ShuckNT/) to get NT-hashes corresponding from [HIBP database](https://haveibeenpwned.com/Passwords). If the NT-hash has previously leaked, the NetNTLMv1 is converted to NT-hash ([pass-the-hash](#pass-the-hash) ready) instantly. The [shucking process](https://www.youtube.com/watch?v=OQD3qDYMyYQ&ab_channel=PasswordVillage) works for any NetNTLMv1 with or without ESS/SSP (challenge != `1122334455667788`) but mainly for user account (plaintext previsouly leaked). ```ps1 + # Submit NetNTLMv1 online to https://shuck.sh/get-shucking.php + # Or shuck them on-premise via ShuckNT script: + $ php shucknt.php -f tokens-samples.txt -w pwned-passwords-ntlm-reversed-ordered-by-hash-v8.bin + [...] + 10 hashes-challenges analyzed in 3 seconds, with 8 NT-Hash instantly broken for pass-the-hash and 1 that can be broken via crack.sh for free. + [INPUT] ycam::ad:DEADC0DEDEADC0DE00000000000000000000000000000000:70C249F75FB6D2C0AC2C2D3808386CCAB1514A2095C582ED:1122334455667788 + [NTHASH-SHUCKED] 93B3C62269D55DB9CA660BBB91E2BD0B + ``` +* If you got some `NetNTLMv1 tokens`, you can also try to crack them via [Crack.Sh](https://crack.sh/) (cloud service when available, more time and potentially chargeable). For this you need to format them to submit them on [Crack.Sh](https://crack.sh/netntlm/). The Converter of [Shuck.Sh](https://shuck.sh/) can be used to convert format easily. + ```ps1 + # When there is no-ESS/SSP and the challenge is set to 1122334455667788, it's free (0$): username::hostname:response:response:challenge -> NTHASH:response NTHASH:F35A3FE17DCB31F9BE8A8004B3F310C150AFA36195554972 + + # When there is ESS/SSP or challenge != 1122334455667788, it's chargeable from $20-$200: + username::hostname:lmresponse+0padding:ntresponse:challenge -> $NETNTLM$challenge$ntresponse + $NETNTLM$DEADC0DEDEADC0DE$507E2A2131F4AF4A299D8845DE296F122CA076D49A80476E ``` -* Or crack them with Hashcat / John The Ripper +* Finaly, if no [Shuck.Sh](https://shuck.sh/) nor [Crack.Sh](https://crack.sh/) can be used, you can try to break NetNTLMv1 with Hashcat / John The Ripper ```ps1 john --format=netntlm hash.txt - hashcat -m 5500 -a 3 hash.txt + hashcat -m 5500 -a 3 hash.txt # for NetNTLMv1(-ESS/SSP) to plaintext (for user account) + hashcat -m 27000 -a 0 hash.txt nthash-wordlist.txt # for NetNTLMv1(-ESS/SSP) to NT-hash (for user and computer account, depending on nthash-wordlist quality) + hashcat -m 14000 -a 3 inputs.txt --hex-charset -1 /usr/share/hashcat/charsets/DES_full.hcchr ?1?1?1?1?1?1?1?1 # for NetNTLMv1(-ESS/SSP) to DES-keys (KPA-attack) of user/computer account with 100% success rate, then regenerate NT-hash with these DES-keys on https://shuck.sh/converter.php. ``` * Now you can DCSync using the Pass-The-Hash with the DC machine account -:warning: NTLMv1 with SSP(Security Support Provider) changes the server challenge and is not quite ideal for the attack, but it can be used. +:warning: NetNTLMv1 with ESS / SSP (Extended Session Security / Security Support Provider) changes the final challenge by adding a new alea (!= `1122334455667788`, so chargeable on [Crack.Sh](https://crack.sh/)). + +:warning: NetNTLMv1 format is `login::domain:lmresp:ntresp:clientChall`. If the `lmresp` contains a **0's-padding** this means that the token is protected by **ESS/SSP**. + +:warning: NetNTLMv1 final challenge is the Responder's challenge itself (`1122334455667788`) when there is no ESS/SSP. If ESS/SSP is enabled, the final challenge is the first 8 bytes of the MD5 hash from the concatenation of the client challenge and server challenge. The details of the algorithmic generation of a NetNTLMv1 are illustrated on the [Shuck.Sh Generator](https://shuck.sh/generator.php) and detailed in [MISCMag#128](https://connect.ed-diamond.com/misc/misc-128/shuck-hash-before-trying-to-crack-it). + +:warning: If you get some tokens from other tools ([hostapd-wpe](https://github.com/OpenSecurityResearch/hostapd-wpe) or [chapcrack](https://github.com/moxie0/chapcrack)) in other formats, like tokens starting with the prefix `$MSCHAPv2$`, `$NETNTLM$` or `$99$`, they correspond to a classic NetNTLMv1 and can be converted from one format to another [here](https://shuck.sh/converter.php). **Mitigations**: @@ -4447,4 +4470,4 @@ De-obfuscate the content of the ldap_default_authtok variable with [mludvig/sss_ * [S4U2self abuse - TheHackerRecipes](https://www.thehacker.recipes/ad/movement/kerberos/delegations/s4u2self-abuse) * [Abusing Kerberos S4U2self for local privilege escalation - cfalta](https://cyberstoph.org/posts/2021/06/abusing-kerberos-s4u2self-for-local-privilege-escalation/) * [External Trusts Are Evil - 14 March 2023 - Charlie Clark (@exploitph)](https://exploit.ph/external-trusts-are-evil.html) -* [Certificates and Pwnage and Patches, Oh My! - Will Schroeder - Nov 9, 2022](https://posts.specterops.io/certificates-and-pwnage-and-patches-oh-my-8ae0f4304c1d) \ No newline at end of file +* [Certificates and Pwnage and Patches, Oh My! - Will Schroeder - Nov 9, 2022](https://posts.specterops.io/certificates-and-pwnage-and-patches-oh-my-8ae0f4304c1d)
Hello, I allow myself to propose some additions / clarifications concerning the format of NetNTLMv1 tokens, some technical details about their format, and the methods to break them (ShuckNT / Crack.Sh / KPA-attack, etc.). Hope this can help :)! Good day,
https://api.github.com/repos/swisskyrepo/PayloadsAllTheThings/pulls/660
2023-07-25T09:35:37Z
2023-07-25T13:28:13Z
2023-07-25T13:28:13Z
2023-07-25T13:28:22Z
2,085
swisskyrepo/PayloadsAllTheThings
8,417
added gym-inventory to environments
diff --git a/docs/environments.md b/docs/environments.md index 5d2536dd92d..09451c61fd4 100644 --- a/docs/environments.md +++ b/docs/environments.md @@ -7,4 +7,11 @@ _**NOTICE**: Its possible that in time OpenAI will develop a full fledged reposi ## PGE: Parallel Game Engine PGE is a FOSS 3D engine for AI simulations, and can interoperate with the Gym. Contains environments with modern 3D graphics, and uses Bullet for physics. -Learn more here: https://github.com/222464/PGE \ No newline at end of file + +Learn more here: https://github.com/222464/PGE + +## gym-inventory: Inventory Control Environments + +gym-inventory is a single agent domain featuring discrete state and action spaces that an AI agent might encounter in inventory control problems. + +Learn more here: https://github.com/paulhendricks/gym-inventory
https://api.github.com/repos/openai/gym/pulls/355
2016-09-23T03:13:50Z
2016-09-23T04:26:41Z
2016-09-23T04:26:41Z
2016-09-23T04:26:44Z
224
openai/gym
5,128
Parameter type error
diff --git a/src/you_get/extractors/douyutv.py b/src/you_get/extractors/douyutv.py index 4bd0e14cf9..b7b15e742e 100644 --- a/src/you_get/extractors/douyutv.py +++ b/src/you_get/extractors/douyutv.py @@ -73,7 +73,7 @@ def douyutv_download(url, output_dir = '.', merge = True, info_only = False, **k print_info(site_info, title, 'flv', float('inf')) if not info_only: - download_url_ffmpeg(real_url, title, 'flv', None, output_dir = output_dir, merge = merge) + download_url_ffmpeg(real_url, title, 'flv', params={}, output_dir = output_dir, merge = merge) site_info = "douyu.com" download = douyutv_download
#you-get -du https://www.douyu.com/lpl [DEBUG] get_content: https://m.douyu.com/lpl ... Traceback (most recent call last): ... File "/usr/local/lib/python3.5/dist-packages/you_get/common.py", line 1574, in any_download m.download(url, **kwargs) File "/usr/local/lib/python3.5/dist-packages/you_get/extractors/douyutv.py", line 81, in douyutv_download download_url_ffmpeg(real_url, title, 'flv', None, output_dir = output_dir, merge = merge) File "/usr/local/lib/python3.5/dist-packages/you_get/common.py", line 1013, in download_url_ffmpeg if params.get('-y', False): # None or unset ->False AttributeError: 'NoneType' object has no attribute 'get'
https://api.github.com/repos/soimort/you-get/pulls/2564
2018-02-11T17:17:02Z
2018-02-11T21:12:05Z
2018-02-11T21:12:05Z
2018-02-11T21:12:09Z
213
soimort/you-get
21,160
Update Plotly.js
diff --git a/frontend/package.json b/frontend/package.json index ac1f2cf39c28..79845486bbed 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -68,7 +68,7 @@ "node-emoji": "^1.10.0", "node-sass": "^4.14.1", "numbro": "^2.3.1", - "plotly.js": "^1.58.1", + "plotly.js": "^1.58.2", "prismjs": "^1.21.0", "protobufjs": "^6.10.1", "query-string": "^6.13.1", diff --git a/frontend/yarn.lock b/frontend/yarn.lock index 2c35140ca813..ecc4b8e20cae 100644 --- a/frontend/yarn.lock +++ b/frontend/yarn.lock @@ -13393,10 +13393,10 @@ plist@^3.0.1: xmlbuilder "^9.0.7" xmldom "0.1.x" -plotly.js@^1.58.1: - version "1.58.1" - resolved "https://registry.yarnpkg.com/plotly.js/-/plotly.js-1.58.1.tgz#07bfa39ffe25e0cfc966ef0a64caf39ec4d3f380" - integrity sha512-EzVx3Aipr7WyefMDHJFRtIkJKSI9fTTipGu5fQ17l5M5FzsDq+bdywhJ3Gy645el5QBPx5uzrY80JJbZTjYSQQ== +plotly.js@^1.58.2: + version "1.58.2" + resolved "https://registry.yarnpkg.com/plotly.js/-/plotly.js-1.58.2.tgz#898a0c04908d7eee508d628eae017e1f4198930c" + integrity sha512-CQ1Fg50BafIeFs3PQ8D2byigrmn5UoOMJHgyLBcYoHxxQTI9L85xKl02EkiJxg7KJUgNr//Bt/yu8heAIy10XQ== dependencies: "@plotly/d3-sankey" "0.7.2" "@plotly/d3-sankey-circular" "0.33.1"
🙈 followup to https://github.com/streamlit/streamlit/pull/2417 ... we usually don't patch this quickly but it appears this was a buggier release than usual.
https://api.github.com/repos/streamlit/streamlit/pulls/2444
2020-12-09T13:35:22Z
2020-12-09T17:36:09Z
2020-12-09T17:36:09Z
2021-07-24T00:36:54Z
571
streamlit/streamlit
21,767
remove prints
diff --git a/model/model_training/custom_datasets/qa_datasets.py b/model/model_training/custom_datasets/qa_datasets.py index 0c250c7727..e9a1a9f13e 100644 --- a/model/model_training/custom_datasets/qa_datasets.py +++ b/model/model_training/custom_datasets/qa_datasets.py @@ -458,7 +458,6 @@ def process_split( dataset: Subset, reverse_augmentation: bool = False, keep_unreversed: bool = True ) -> list[tuple[str, str]]: data = [] - print("new version") for row in dataset: question = row["instruction"] if len(row["input"]) > 0: diff --git a/model/model_training/custom_datasets/utils.py b/model/model_training/custom_datasets/utils.py index c73b158dc6..98e4c947db 100644 --- a/model/model_training/custom_datasets/utils.py +++ b/model/model_training/custom_datasets/utils.py @@ -14,6 +14,5 @@ def _filter_by_words(text: str, filter_words: list[str] | None = None) -> None | filter_words = filter_words or FILTER_BY_WORDS for word in filter_words: if word in text.lower(): - print(text) return None return text
Fix prints introduced by #2445
https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/2446
2023-04-10T15:47:48Z
2023-04-11T09:14:12Z
2023-04-11T09:14:12Z
2023-04-11T09:14:13Z
281
LAION-AI/Open-Assistant
37,499
[crunchyroll:beta] follow browser api accesses more exactly
diff --git a/README.md b/README.md index 4e806e14c0a..285c0b78a0b 100644 --- a/README.md +++ b/README.md @@ -1774,7 +1774,7 @@ The following extractors use this feature: #### crunchyrollbeta * `format`: Which stream type(s) to extract. Default is `adaptive_hls` Eg: `crunchyrollbeta:format=vo_adaptive_hls` - * Potentially useful values include `adaptive_hls`, `adaptive_dash`, `vo_adaptive_hls`, `vo_adaptive_dash`, `download_hls`, `trailer_hls`, `trailer_dash` + * Potentially useful values include `adaptive_hls`, `adaptive_dash`, `vo_adaptive_hls`, `vo_adaptive_dash`, `download_hls`, `download_dash`, `multitrack_adaptive_hls_v2` * `hardsub`: Preference order for which hardsub versions to extract. Default is `None` (no hardsubs). Eg: `crunchyrollbeta:hardsub=en-US,None` #### vikichannel diff --git a/yt_dlp/extractor/crunchyroll.py b/yt_dlp/extractor/crunchyroll.py index bacdb851535..fccf0548036 100644 --- a/yt_dlp/extractor/crunchyroll.py +++ b/yt_dlp/extractor/crunchyroll.py @@ -801,7 +801,9 @@ def _real_extract(self, url): if episode_response.get('is_premium_only') and not episode_response.get('playback'): raise ExtractorError('This video is for premium members only.', expected=True) - stream_response = self._download_json(episode_response['playback'], display_id, note='Retrieving stream info') + stream_response = self._download_json( + f'{api_domain}{episode_response["__links__"]["streams"]["href"]}', display_id, + note='Retrieving stream info', query=params) get_streams = lambda name: (traverse_obj(stream_response, name) or {}).items() requested_hardsubs = [('' if val == 'none' else val) for val in (self._configuration_arg('hardsub') or ['none'])]
### Description of your *pull request* and other information Fixes #4452 Somewhat alters the usable values of the `format` extractor arg, but the values I expect might actually be in use (`adaptive_hls` and `adaptive_dash`) don't seem to change behavior. It's possible that while we're making yt-dlp follow the browser's behavior more exactly, we should also switch the default `format` to `adaptive_dash`? That is apparently what the browser actually uses, currently at least, but I'm not sure it's worth changing. Unfortunately, I don't recall what the thought behind using `adaptive_hls` was at the time. ### Before submitting a *pull request* make sure you have: - [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions) - [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests - [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) ### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options: - [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/) - [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence) ### What is the purpose of your *pull request*? - [x] Fix or improvement to an extractor (Make sure to add/update tests) - [ ] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy)) - [ ] Core bug fix/improvement - [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
https://api.github.com/repos/yt-dlp/yt-dlp/pulls/4555
2022-08-04T15:48:42Z
2022-08-04T18:05:59Z
2022-08-04T18:05:59Z
2022-08-04T18:14:16Z
512
yt-dlp/yt-dlp
7,737
Always add guest wifi qr code entity in AVM Fritz!Tools
diff --git a/homeassistant/components/fritz/image.py b/homeassistant/components/fritz/image.py index 597dd8ddb5390f..d14c562bd7652b 100644 --- a/homeassistant/components/fritz/image.py +++ b/homeassistant/components/fritz/image.py @@ -30,9 +30,6 @@ async def async_setup_entry( avm_wrapper.fritz_guest_wifi.get_info ) - if not guest_wifi_info.get("NewEnable"): - return - async_add_entities( [ FritzGuestWifiQRImage( diff --git a/tests/components/fritz/snapshots/test_image.ambr b/tests/components/fritz/snapshots/test_image.ambr index b64d8601a8aa42..452aab2a887230 100644 --- a/tests/components/fritz/snapshots/test_image.ambr +++ b/tests/components/fritz/snapshots/test_image.ambr @@ -8,6 +8,9 @@ # name: test_image_entity[fc_data0] b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x94\x00\x00\x00\x94\x01\x00\x00\x00\x00]G=y\x00\x00\x00\xf5IDATx\xda\xedVQ\x0eC!\x0c"\xbb@\xef\x7fKn\xe0\x00\xfd\xdb\xcf6\xf9|\xc6\xc4\xc6\x0f\xd2\x02\xadb},\xe2\xb9\xfb\xe5\x0e\xc0(\x18\xf2\x84/|\xaeo\xef\x847\xda\x14\x1af\x1c\xde\xe3\x19(X\tKxN\xb2\x87\x17j9\x1d<m\x01)\xbbU\xe1\xcf\xa2\x9eU\xd1\xd7\xcbe.\xcc\xf6\xd05\x7f\x02\x82\x1d\xb8\x1c\xdd\xd7\x1b\xef\t\x90\x13an\xf1b\x13P\xb9\x01\xac\xd4k\xee\x04\xa5.\xd1.\xe8+\x90\x88\x1b\x0e\x0b\xfe\x03\xd3 \xd4Y\xe0\xef\x10\xa7z\xe3\xe9F\x7f(?;\xc6\x80\x95\xfc\xe2\x13\x1ddC\x0fZ\x07\xec6f\xc3/.\x94i\xddi\xf8\x8f\x9b9k<\x8d\xf9\xeci`\xfb\xed\xf1R\x99/g\x9e\xaei\xcc\x830\xb7\xf6\x83\xd4\xf1_\x9e\x0f\xf7.*\xf3\xc0\xf6\x1b\x86\xbf\x12\xde\xac\xed\x16\xb0\xf4\xbe\x9dO\x02\xd0\xe1\x8f\xee^\x0f|v\xf4\x15 \x13\xaf\x8e\xff\x9e\x7f\xe2\x9fwo\x06\xf4\x81v\xeb\xb3\xcc\xc3\x00\x00\x00\x00IEND\xaeB`\x82' # --- +# name: test_image_entity[fc_data1] + b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x94\x00\x00\x00\x94\x01\x00\x00\x00\x00]G=y\x00\x00\x00\xf5IDATx\xda\xedVQ\x0eC!\x0c"\xbb@\xef\x7fKn\xe0\x00\xfd\xdb\xcf6\xf9|\xc6\xc4\xc6\x0f\xd2\x02\xadb},\xe2\xb9\xfb\xe5\x0e\xc0(\x18\xf2\x84/|\xaeo\xef\x847\xda\x14\x1af\x1c\xde\xe3\x19(X\tKxN\xb2\x87\x17j9\x1d<m\x01)\xbbU\xe1\xcf\xa2\x9eU\xd1\xd7\xcbe.\xcc\xf6\xd05\x7f\x02\x82\x1d\xb8\x1c\xdd\xd7\x1b\xef\t\x90\x13an\xf1b\x13P\xb9\x01\xac\xd4k\xee\x04\xa5.\xd1.\xe8+\x90\x88\x1b\x0e\x0b\xfe\x03\xd3 \xd4Y\xe0\xef\x10\xa7z\xe3\xe9F\x7f(?;\xc6\x80\x95\xfc\xe2\x13\x1ddC\x0fZ\x07\xec6f\xc3/.\x94i\xddi\xf8\x8f\x9b9k<\x8d\xf9\xeci`\xfb\xed\xf1R\x99/g\x9e\xaei\xcc\x830\xb7\xf6\x83\xd4\xf1_\x9e\x0f\xf7.*\xf3\xc0\xf6\x1b\x86\xbf\x12\xde\xac\xed\x16\xb0\xf4\xbe\x9dO\x02\xd0\xe1\x8f\xee^\x0f|v\xf4\x15 \x13\xaf\x8e\xff\x9e\x7f\xe2\x9fwo\x06\xf4\x81v\xeb\xb3\xcc\xc3\x00\x00\x00\x00IEND\xaeB`\x82' +# --- # name: test_image_update[fc_data0] b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x94\x00\x00\x00\x94\x01\x00\x00\x00\x00]G=y\x00\x00\x00\xf9IDATx\xda\xedV\xc1\r\xc40\x0cB\xb7\x80\xf7\xdf\x92\r\\\xb0\xfb\xeb\xe7\xaa\xf0l\xd4\xaaQ\x1e\xc8\x06L\x8a~,\xe2;{s\x06\xa0\xd8z9\xdb\xe6\x0f\xcf\xf5\xef\x99\xf0J\x0f\x85\x86*o\xcf\xf1\x04\x04\x1ak\xb6\x11<\x97\xa6\xa6\x83x&\xb32x\x86\xa4\xab\xeb\x08\x7f\x16\xf5^\x11}\xbd$\xb0\x80k=t\xcc\x9f\xfdg\xfa\xda\xe5\x1d\xe3\t\x8br_\xdb3\x85D}\x063u\x00\x03\xfd\xb6<\xe2\xeaL\xa2y<\xae\xcf\xe3!\x895\xbfL\xf07\x0eT]n7\xc3_{0\xd4\xefx:\xc0\x1f\xc6}\x9e\xb7\x84\x1e\xfb\x91\x0e\x12\x84\t=z\xd2t\x07\x8e\x1d\xc9\x03\xc7\xa9G\xb7\x12\xf3&0\x176\x19\x98\xc8g\x8b;\x88@\xc6\x7f\x93\xa9\xfbVD\xdf\x193\xde9\x1d\xd1\xc3\x9ev`E\xf2oo\xa3\xe1/\x847\xad\x8a?0t\xffN\xb4p\xf35\xf3\x7f\x80\xad\xafS\xf7\x1bD`D\x8f\xef\x9f\xf0\xe0\xec\x02\xa4\xc0\x83\x92\xcf\xf3\xf9a\x00\x00\x00\x00IEND\xaeB`\x82' # --- diff --git a/tests/components/fritz/test_image.py b/tests/components/fritz/test_image.py index e65fcb85457a9d..cbcbded56920af 100644 --- a/tests/components/fritz/test_image.py +++ b/tests/components/fritz/test_image.py @@ -60,11 +60,31 @@ GUEST_WIFI_DISABLED: dict[str, dict] = { "WLANConfiguration0": {}, - "WLANConfiguration1": {"GetInfo": {"NewEnable": False}}, + "WLANConfiguration1": { + "GetInfo": { + "NewEnable": False, + "NewStatus": "Up", + "NewSSID": "GuestWifi", + "NewBeaconType": "11iandWPA3", + "NewX_AVM-DE_PossibleBeaconTypes": "None,11i,11iandWPA3", + "NewStandard": "ax", + "NewBSSID": "1C:ED:6F:12:34:13", + }, + "GetSSID": { + "NewSSID": "GuestWifi", + }, + "GetSecurityKeys": {"NewKeyPassphrase": "1234567890"}, + }, } -@pytest.mark.parametrize(("fc_data"), [({**MOCK_FB_SERVICES, **GUEST_WIFI_ENABLED})]) +@pytest.mark.parametrize( + ("fc_data"), + [ + ({**MOCK_FB_SERVICES, **GUEST_WIFI_ENABLED}), + ({**MOCK_FB_SERVICES, **GUEST_WIFI_DISABLED}), + ], +) async def test_image_entity( hass: HomeAssistant, hass_client: ClientSessionGenerator, @@ -150,23 +170,3 @@ async def test_image_update( assert resp_body != resp_body_new assert resp_body_new == snapshot - - -@pytest.mark.parametrize(("fc_data"), [({**MOCK_FB_SERVICES, **GUEST_WIFI_DISABLED})]) -async def test_image_guest_wifi_disabled( - hass: HomeAssistant, - hass_client: ClientSessionGenerator, - fc_class_mock, - fh_class_mock, -) -> None: - """Test image entities.""" - - entry = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_DATA) - entry.add_to_hass(hass) - - assert await async_setup_component(hass, DOMAIN, {}) - await hass.async_block_till_done() - assert entry.state == ConfigEntryState.LOADED - - images = hass.states.async_all(IMAGE_DOMAIN) - assert len(images) == 0
<!-- You are amazing! Thanks for contributing to our project! Please, DO NOT DELETE ANY TEXT from this template! (unless instructed). --> ## Proposed change <!-- Describe the big picture of your changes here to communicate to the maintainers why we should accept this pull request. If it fixes a bug or resolves a feature request, be sure to link to that issue in the additional information section. --> The image entity showing the guest wifi qr code should always be created, even the guest wifi is disabled at the moment, because user wants to enable guets wifi when needed. ## Type of change <!-- What type of change does your PR introduce to Home Assistant? NOTE: Please, check only 1! box! If your PR requires multiple boxes to be checked, you'll most likely need to split it into multiple PRs. This makes things easier and faster to code review. --> - [ ] Dependency upgrade - [x] Bugfix (non-breaking change which fixes an issue) - [ ] New integration (thank you!) - [ ] New feature (which adds functionality to an existing integration) - [ ] Deprecation (breaking change to happen in the future) - [ ] Breaking change (fix/feature causing existing functionality to break) - [ ] Code quality improvements to existing code or addition of tests ## Additional information <!-- Details are important, and help maintainers processing your PR. Please be sure to fill out additional details, if applicable. --> - This PR fixes or closes issue: fixes #96277 - This PR is related to issue: - Link to documentation pull request: ## Checklist <!-- Put an `x` in the boxes that apply. You can also fill these out after creating the PR. If you're unsure about any of them, don't hesitate to ask. We're here to help! This is simply a reminder of what we are going to look for before merging your code. --> - [x] The code change is tested and works locally. - [x] Local tests pass. **Your PR cannot be merged unless tests pass** - [x] There is no commented out code in this PR. - [x] I have followed the [development checklist][dev-checklist] - [x] I have followed the [perfect PR recommendations][perfect-pr] - [x] The code has been formatted using Black (`black --fast homeassistant tests`) - [ ] Tests have been added to verify that the new code works. If user exposed functionality or configuration variables are added/changed: - [ ] Documentation added/updated for [www.home-assistant.io][docs-repository] If the code communicates with devices, web services, or third-party tools: - [ ] The [manifest file][manifest-docs] has all fields filled out correctly. Updated and included derived files by running: `python3 -m script.hassfest`. - [ ] New or updated dependencies have been added to `requirements_all.txt`. Updated by running `python3 -m script.gen_requirements_all`. - [ ] For the updated dependencies - a link to the changelog, or at minimum a diff between library versions is added to the PR description. - [ ] Untested files have been added to `.coveragerc`. <!-- This project is very active and we have a high turnover of pull requests. Unfortunately, the number of incoming pull requests is higher than what our reviewers can review and merge so there is a long backlog of pull requests waiting for review. You can help here! By reviewing another pull request, you will help raise the code quality of that pull request and the final review will be faster. This way the general pace of pull request reviews will go up and your wait time will go down. When picking a pull request to review, try to choose one that hasn't yet been reviewed. Thanks for helping out! --> To help with the load of incoming pull requests: - [ ] I have reviewed two other [open pull requests][prs] in this repository. [prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-desc+review%3Anone+-status%3Afailure <!-- Thank you for contributing <3 Below, some useful links you could explore: --> [dev-checklist]: https://developers.home-assistant.io/docs/development_checklist/ [manifest-docs]: https://developers.home-assistant.io/docs/creating_integration_manifest/ [quality-scale]: https://developers.home-assistant.io/docs/integration_quality_scale_index/ [docs-repository]: https://github.com/home-assistant/home-assistant.io [perfect-pr]: https://developers.home-assistant.io/docs/review-process/#creating-the-perfect-pr
https://api.github.com/repos/home-assistant/core/pulls/96435
2023-07-12T15:27:03Z
2023-07-12T18:54:49Z
2023-07-12T18:54:49Z
2023-07-14T16:02:01Z
2,329
home-assistant/core
38,974
Streamanity extractor
diff --git a/yt_dlp/extractor/extractors.py b/yt_dlp/extractor/extractors.py index bb1e21a07ad..471caae79da 100644 --- a/yt_dlp/extractor/extractors.py +++ b/yt_dlp/extractor/extractors.py @@ -1346,6 +1346,7 @@ StoryFireSeriesIE, ) from .streamable import StreamableIE +from .streamanity import StreamanityIE from .streamcloud import StreamcloudIE from .streamcz import StreamCZIE from .streetvoice import StreetVoiceIE diff --git a/yt_dlp/extractor/streamanity.py b/yt_dlp/extractor/streamanity.py new file mode 100644 index 00000000000..2e2d5eedf9c --- /dev/null +++ b/yt_dlp/extractor/streamanity.py @@ -0,0 +1,51 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class StreamanityIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?streamanity\.com/video/(?P<id>[A-Za-z0-9]+)' + _TESTS = [{ + 'url': 'https://streamanity.com/video/9DFPTnuYi8f2', + 'md5': '6ab171e8d4a02ad5dcbff6bea44cf5a1', + 'info_dict': { + 'id': '9DFPTnuYi8f2', + 'ext': 'mp4', + 'title': 'Bitcoin vs The Lighting Network', + 'thumbnail': r're:https://res\.cloudinary\.com/.+\.png', + 'description': '', + 'uploader': 'Tom Bombadil (Freddy78)', + } + }, { + 'url': 'https://streamanity.com/video/JktOUjSlfzTD', + 'md5': '31f131e28abd3377c38be586a59532dc', + 'info_dict': { + 'id': 'JktOUjSlfzTD', + 'ext': 'mp4', + 'title': 'Share data when you see it', + 'thumbnail': r're:https://res\.cloudinary\.com/.+\.png', + 'description': 'Reposting as data should be public and stored on blockchain', + 'uploader': 'digitalcurrencydaily', + } + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + video_info = self._download_json( + f'https://app.streamanity.com/api/video/{video_id}', video_id)['data']['video'] + + formats = self._extract_m3u8_formats( + f'https://stream.mux.com/{video_info["play_id"]}.m3u8?token={video_info["token"]}', + video_id, ext='mp4', m3u8_id='hls') + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': video_info['title'], + 'description': video_info.get('description'), + 'uploader': video_info.get('author_name'), + 'is_live': False, + 'thumbnail': video_info.get('thumb'), + 'formats': formats, + }
## Please follow the guide below - You will be asked some questions, please read them **carefully** and answer honestly - Put an `x` into all the boxes [ ] relevant to your *pull request* (like that [x]) - Use *Preview* tab to see how your *pull request* will actually look like --- ### Before submitting a *pull request* make sure you have: - [x] At least skimmed through [adding new extractor tutorial](https://github.com/ytdl-org/youtube-dl#adding-support-for-a-new-site) and [youtube-dl coding conventions](https://github.com/ytdl-org/youtube-dl#youtube-dl-coding-conventions) sections - [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests - [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) ### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options: - [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/) - [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence) ### What is the purpose of your *pull request*? - [ ] Bug fix - [ ] Improvement - [x] New extractor - [ ] New feature --- Added Streamanity support
https://api.github.com/repos/yt-dlp/yt-dlp/pulls/984
2021-09-15T17:52:05Z
2021-09-16T18:15:10Z
2021-09-16T18:15:10Z
2021-09-16T18:15:10Z
784
yt-dlp/yt-dlp
7,416
Subaru: Add Impreza/Crosstrek 2020
diff --git a/docs/CARS.md b/docs/CARS.md index 9c762ee7bf5446..e4b7b3cac3c3ed 100644 --- a/docs/CARS.md +++ b/docs/CARS.md @@ -147,7 +147,7 @@ | Škoda | Scala 2020 | Driver Assistance | Stock | 0mph | 0mph | | Škoda | Superb 2015-18 | Driver Assistance | Stock | 0mph | 0mph | | Subaru | Ascent 2019 | EyeSight | Stock | 0mph | 0mph | -| Subaru | Crosstrek 2018-19 | EyeSight | Stock | 0mph | 0mph | +| Subaru | Crosstrek 2018-20 | EyeSight | Stock | 0mph | 0mph | | Subaru | Forester 2019-21 | EyeSight | Stock | 0mph | 0mph | | Subaru | Impreza 2017-19 | EyeSight | Stock | 0mph | 0mph | | Volkswagen| Arteon 2021<sup>4</sup> | Driver Assistance | Stock | 0mph | 0mph | diff --git a/selfdrive/car/subaru/carcontroller.py b/selfdrive/car/subaru/carcontroller.py index 17b3e057f2d940..a2d0c2ebdda2ba 100644 --- a/selfdrive/car/subaru/carcontroller.py +++ b/selfdrive/car/subaru/carcontroller.py @@ -12,6 +12,7 @@ def __init__(self, dbc_name, CP, VM): self.cruise_button_prev = 0 self.steer_rate_limited = False + self.p = CarControllerParams(CP) self.packer = CANPacker(DBC[CP.carFingerprint]['pt']) def update(self, enabled, CS, frame, actuators, pcm_cancel_cmd, visual_alert, left_line, right_line, left_lane_depart, right_lane_depart): @@ -19,23 +20,23 @@ def update(self, enabled, CS, frame, actuators, pcm_cancel_cmd, visual_alert, le can_sends = [] # *** steering *** - if (frame % CarControllerParams.STEER_STEP) == 0: + if (frame % self.p.STEER_STEP) == 0: - apply_steer = int(round(actuators.steer * CarControllerParams.STEER_MAX)) + apply_steer = int(round(actuators.steer * self.p.STEER_MAX)) # limits due to driver torque new_steer = int(round(apply_steer)) - apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last, CS.out.steeringTorque, CarControllerParams) + apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last, CS.out.steeringTorque, self.p) self.steer_rate_limited = new_steer != apply_steer if not enabled: apply_steer = 0 if CS.CP.carFingerprint in PREGLOBAL_CARS: - can_sends.append(subarucan.create_preglobal_steering_control(self.packer, apply_steer, frame, CarControllerParams.STEER_STEP)) + can_sends.append(subarucan.create_preglobal_steering_control(self.packer, apply_steer, frame, self.p.STEER_STEP)) else: - can_sends.append(subarucan.create_steering_control(self.packer, apply_steer, frame, CarControllerParams.STEER_STEP)) + can_sends.append(subarucan.create_steering_control(self.packer, apply_steer, frame, self.p.STEER_STEP)) self.apply_steer_last = apply_steer diff --git a/selfdrive/car/subaru/interface.py b/selfdrive/car/subaru/interface.py index 86bba542cce108..f5b0886a95eaec 100644 --- a/selfdrive/car/subaru/interface.py +++ b/selfdrive/car/subaru/interface.py @@ -45,6 +45,16 @@ def get_params(candidate, fingerprint=gen_empty_fingerprint(), car_fw=None): ret.lateralTuning.pid.kiBP, ret.lateralTuning.pid.kpBP = [[0., 20.], [0., 20.]] ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.2, 0.3], [0.02, 0.03]] + if candidate == CAR.IMPREZA_2020: + ret.mass = 1480. + STD_CARGO_KG + ret.wheelbase = 2.67 + ret.centerToFront = ret.wheelbase * 0.5 + ret.steerRatio = 17 # learned, 14 stock + ret.steerActuatorDelay = 0.1 + ret.lateralTuning.pid.kf = 0.00005 + ret.lateralTuning.pid.kiBP, ret.lateralTuning.pid.kpBP = [[0., 14., 23.], [0., 14., 23.]] + ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.045, 0.042, 0.20], [0.04, 0.035, 0.045]] + if candidate == CAR.FORESTER: ret.mass = 1568. + STD_CARGO_KG ret.wheelbase = 2.67 diff --git a/selfdrive/car/subaru/values.py b/selfdrive/car/subaru/values.py index 39f8e5e5989bf1..4f8761588e31bb 100644 --- a/selfdrive/car/subaru/values.py +++ b/selfdrive/car/subaru/values.py @@ -5,17 +5,22 @@ Ecu = car.CarParams.Ecu class CarControllerParams: - STEER_MAX = 2047 # max_steer 4095 - STEER_STEP = 2 # how often we update the steer cmd - STEER_DELTA_UP = 50 # torque increase per refresh, 0.8s to max - STEER_DELTA_DOWN = 70 # torque decrease per refresh - STEER_DRIVER_ALLOWANCE = 60 # allowed driver torque before start limiting - STEER_DRIVER_MULTIPLIER = 10 # weight driver torque heavily - STEER_DRIVER_FACTOR = 1 # from dbc + def __init__(self, CP): + if CP.carFingerprint == CAR.IMPREZA_2020: + self.STEER_MAX = 1439 + else: + self.STEER_MAX = 2047 + self.STEER_STEP = 2 # how often we update the steer cmd + self.STEER_DELTA_UP = 50 # torque increase per refresh, 0.8s to max + self.STEER_DELTA_DOWN = 70 # torque decrease per refresh + self.STEER_DRIVER_ALLOWANCE = 60 # allowed driver torque before start limiting + self.STEER_DRIVER_MULTIPLIER = 10 # weight driver torque heavily + self.STEER_DRIVER_FACTOR = 1 # from dbc class CAR: ASCENT = "SUBARU ASCENT LIMITED 2019" IMPREZA = "SUBARU IMPREZA LIMITED 2019" + IMPREZA_2020 = "SUBARU IMPREZA SPORT 2020" FORESTER = "SUBARU FORESTER 2019" FORESTER_PREGLOBAL = "SUBARU FORESTER 2017 - 2018" LEGACY_PREGLOBAL = "SUBARU LEGACY 2015 - 2018" @@ -30,6 +35,14 @@ class CAR: CAR.IMPREZA: [{ 2: 8, 64: 8, 65: 8, 72: 8, 73: 8, 280: 8, 281: 8, 290: 8, 312: 8, 313: 8, 314: 8, 315: 8, 316: 8, 326: 8, 372: 8, 544: 8, 545: 8, 546: 8, 552: 8, 554: 8, 557: 8, 576: 8, 577: 8, 722: 8, 801: 8, 802: 8, 805: 8, 808: 8, 811: 8, 816: 8, 826: 8, 827: 8, 837: 8, 838: 8, 839: 8, 842: 8, 912: 8, 915: 8, 940: 8, 1614: 8, 1617: 8, 1632: 8, 1650: 8, 1657: 8, 1658: 8, 1677: 8, 1697: 8, 1722: 8, 1743: 8, 1759: 8, 1786: 5, 1787: 5, 1788: 8, 1809: 8, 1813: 8, 1817: 8, 1821: 8, 1840: 8, 1848: 8, 1924: 8, 1932: 8, 1952: 8, 1960: 8 }], + CAR.IMPREZA_2020: [{ + # SUBARU CROSSTREK SPORT 2020 + 2: 8, 64: 8, 65: 8, 72: 8, 73: 8, 280: 8, 281: 8, 282: 8, 290: 8, 312: 8, 313: 8, 314: 8, 315: 8, 316: 8, 326: 8, 372: 8, 544: 8, 545: 8, 546: 8, 552: 8, 554: 8, 557: 8, 576: 8, 577: 8, 722: 8, 801: 8, 802: 8, 803: 8, 805: 8, 808: 8, 816: 8, 826: 8, 837: 8, 838: 8, 839: 8, 842: 8, 912: 8, 915: 8, 940: 8, 1617: 8, 1632: 8, 1650: 8, 1677: 8, 1697: 8, 1722: 8, 1743: 8, 1759: 8, 1786: 5, 1787: 5, 1788: 8, 1809: 8, 1813: 8, 1817: 8, 1821: 8, 1840: 8, 1848: 8, 1924: 8, 1932: 8, 1952: 8, 1960: 8, 1968: 8, 1976: 8, 2015: 8, 2016: 8, 2024: 8 + }, + # IMPREZA 2020 + { + 2: 8, 64: 8, 65: 8, 72: 8, 73: 8, 280: 8, 281: 8, 282: 8, 290: 8, 312: 8, 313: 8, 314: 8, 315: 8, 316: 8, 326: 8, 544: 8, 545: 8, 546: 8, 554: 8, 557: 8, 576: 8, 577: 8, 801: 8, 802: 8, 803: 8, 805: 8, 808: 8, 816: 8, 826: 8, 837: 8, 838: 8, 839: 8, 842: 8, 912: 8, 915: 8, 940: 8, 1614: 8, 1617: 8, 1632: 8, 1657: 8, 1658: 8, 1677: 8, 1697: 8, 1743: 8, 1759: 8, 1786: 5, 1787: 5, 1788: 8, 1809: 8, 1813: 8, 1817: 8, 1821: 8, 1840: 8, 1848: 8, 1924: 8, 1932: 8, 1952: 8, 1960: 8 + }], CAR.FORESTER: [{ # Forester 2019-2020 2: 8, 64: 8, 65: 8, 72: 8, 73: 8, 280: 8, 281: 8, 282: 8, 290: 8, 312: 8, 313: 8, 314: 8, 315: 8, 316: 8, 326: 8, 372: 8, 544: 8, 545: 8, 546: 8, 552: 8, 554: 8, 557: 8, 576: 8, 577: 8, 722: 8, 801: 8, 802: 8, 803: 8, 805: 8, 808: 8, 811: 8, 816: 8, 826: 8, 837: 8, 838: 8, 839: 8, 842: 8, 912: 8, 915: 8, 940: 8, 961: 8, 984: 8, 1614: 8, 1617: 8, 1632: 8, 1650: 8, 1651: 8, 1657: 8, 1658: 8, 1677: 8, 1697: 8, 1698: 8, 1722: 8, 1743: 8, 1759: 8, 1787: 5, 1788: 8, 1809: 8, 1813: 8, 1817: 8, 1821: 8, 1840: 8, 1848: 8, 1924: 8, 1932: 8, 1952: 8, 1960: 8 @@ -71,6 +84,7 @@ class CAR: STEER_THRESHOLD = { CAR.ASCENT: 80, CAR.IMPREZA: 80, + CAR.IMPREZA_2020: 80, CAR.FORESTER: 80, CAR.FORESTER_PREGLOBAL: 75, CAR.LEGACY_PREGLOBAL: 75, @@ -81,6 +95,7 @@ class CAR: DBC = { CAR.ASCENT: dbc_dict('subaru_global_2017_generated', None), CAR.IMPREZA: dbc_dict('subaru_global_2017_generated', None), + CAR.IMPREZA_2020: dbc_dict('subaru_global_2017_generated', None), CAR.FORESTER: dbc_dict('subaru_global_2017_generated', None), CAR.FORESTER_PREGLOBAL: dbc_dict('subaru_forester_2017_generated', None), CAR.LEGACY_PREGLOBAL: dbc_dict('subaru_outback_2015_generated', None), diff --git a/selfdrive/test/test_routes.py b/selfdrive/test/test_routes.py index a59690e94bdd61..5fa7e30e5d728d 100755 --- a/selfdrive/test/test_routes.py +++ b/selfdrive/test/test_routes.py @@ -171,6 +171,7 @@ TestRoute("3c8f0c502e119c1c|2020-06-30--12-58-02", SUBARU.ASCENT), TestRoute("c321c6b697c5a5ff|2020-06-23--11-04-33", SUBARU.FORESTER), TestRoute("791340bc01ed993d|2019-03-10--16-28-08", SUBARU.IMPREZA), + TestRoute("8bf7e79a3ce64055|2021-05-24--09-36-27", SUBARU.IMPREZA_2020), # Dashcam TestRoute("95441c38ae8c130e|2020-06-08--12-10-17", SUBARU.FORESTER_PREGLOBAL), # Dashcam
**Checklist** - [x] added to README - [x] test route added to [test_routes.py](https://github.com/commaai/openpilot/blob/master/selfdrive/test/test_routes.py) - [x] route with openpilot: 8bf7e79a3ce64055|2021-05-24--09-36-27 - [x] route with stock system: 8bf7e79a3ce64055|2021-05-24--09-58-54 2020+ Crosstrek and Impreza require lower steering torque limit (1439) and different tuning than previous model years. Exceeding the steering torque limit causes Eyesight fault. Prerequisite: https://github.com/commaai/panda/pull/650 Routes provided by Discord user Crispin
https://api.github.com/repos/commaai/openpilot/pulls/21011
2021-05-24T09:42:52Z
2021-12-15T23:13:32Z
2021-12-15T23:13:32Z
2022-04-07T14:00:55Z
3,991
commaai/openpilot
9,739
[peertube] Extract files also from `streamingPlaylists`
diff --git a/youtube_dl/extractor/peertube.py b/youtube_dl/extractor/peertube.py index c39d12728d4..c2ca71c71d3 100644 --- a/youtube_dl/extractor/peertube.py +++ b/youtube_dl/extractor/peertube.py @@ -450,6 +450,18 @@ class PeerTubeIE(InfoExtractor): 'tags': ['framasoft', 'peertube'], 'categories': ['Science & Technology'], } + }, { + # Issue #26002 + 'url': 'peertube:spacepub.space:d8943b2d-8280-497b-85ec-bc282ec2afdc', + 'info_dict': { + 'id': 'd8943b2d-8280-497b-85ec-bc282ec2afdc', + 'ext': 'mp4', + 'title': 'Dot matrix printer shell demo', + 'uploader_id': '3', + 'timestamp': 1587401293, + 'upload_date': '20200420', + 'uploader': 'Drew DeVault', + } }, { 'url': 'https://peertube.tamanoir.foucry.net/videos/watch/0b04f13d-1e18-4f1d-814e-4979aa7c9c44', 'only_matching': True, @@ -526,7 +538,15 @@ def _real_extract(self, url): title = video['name'] formats = [] - for file_ in video['files']: + files = video.get('files') or [] + for playlist in (video.get('streamingPlaylists') or []): + if not isinstance(playlist, dict): + continue + playlist_files = playlist.get('files') + if not (playlist_files and isinstance(playlist_files, list)): + continue + files.extend(playlist_files) + for file_ in files: if not isinstance(file_, dict): continue file_url = url_or_none(file_.get('fileUrl'))
Another pull request in continuation of #26302. Re-requested due to DMCA take-down.
https://api.github.com/repos/ytdl-org/youtube-dl/pulls/27728
2021-01-08T18:41:01Z
2021-01-08T20:09:39Z
2021-01-08T20:09:39Z
2021-01-09T15:09:43Z
482
ytdl-org/youtube-dl
50,302
ENH: display the original exception
diff --git a/setup.py b/setup.py index 9a92d6a6667c7..0e79dda3b5a59 100755 --- a/setup.py +++ b/setup.py @@ -3,8 +3,6 @@ # Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com> # 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr> # License: 3-clause BSD -import subprocess - descr = """A set of python modules for machine learning and data mining""" import sys @@ -12,6 +10,7 @@ import shutil from distutils.command.clean import clean as Clean from pkg_resources import parse_version +import traceback if sys.version_info[0] < 3: import __builtin__ as builtins @@ -151,6 +150,7 @@ def get_scipy_status(): scipy_version) >= parse_version(scipy_min_version) scipy_status['version'] = scipy_version except ImportError: + traceback.print_exc() scipy_status['up_to_date'] = False scipy_status['version'] = "" return scipy_status @@ -170,6 +170,7 @@ def get_numpy_status(): numpy_version) >= parse_version(numpy_min_version) numpy_status['version'] = numpy_version except ImportError: + traceback.print_exc() numpy_status['up_to_date'] = False numpy_status['version'] = "" return numpy_status
Display the original traceback when importing numpy or scipy fails in the setup.py Fixes #5905 Ping @amueller @ogrisel : is this a candidate for backport to a 0.17.1 (will we have an 0.17.1)?
https://api.github.com/repos/scikit-learn/scikit-learn/pulls/5906
2015-11-23T07:54:20Z
2016-09-14T16:36:43Z
2016-09-14T16:36:43Z
2016-09-14T16:36:43Z
330
scikit-learn/scikit-learn
46,728
Added more ID word in common-columns.txt
diff --git a/data/txt/common-columns.txt b/data/txt/common-columns.txt index a3a52ce062c..1b2d7cbfb01 100644 --- a/data/txt/common-columns.txt +++ b/data/txt/common-columns.txt @@ -2674,6 +2674,7 @@ jeda jenis jml judul +jumlah kata_kunci kata_sandi katakunci @@ -2686,6 +2687,7 @@ kunci lahir nama nama_akun +nama_ibu_kandung nama_pengguna namaakun namapengguna @@ -2695,6 +2697,7 @@ pengguna penjelasan perusahaan ponsel +profesi ruang sandi soal @@ -2702,6 +2705,7 @@ surat_elektronik surel tanggal tanggal_lahir +telepon tempat tempat_lahir tmp_lahir
https://api.github.com/repos/sqlmapproject/sqlmap/pulls/4852
2021-10-08T06:56:19Z
2021-10-08T14:18:25Z
2021-10-08T14:18:25Z
2021-10-08T14:18:25Z
222
sqlmapproject/sqlmap
14,967
Bump @adobe/css-tools from 4.3.1 to 4.3.2 in /frontend
diff --git a/frontend/yarn.lock b/frontend/yarn.lock index e1de83f74b9f..6b7de6e5ab9d 100644 --- a/frontend/yarn.lock +++ b/frontend/yarn.lock @@ -26,9 +26,9 @@ tunnel "^0.0.6" "@adobe/css-tools@^4.0.1": - version "4.3.1" - resolved "https://registry.yarnpkg.com/@adobe/css-tools/-/css-tools-4.3.1.tgz#abfccb8ca78075a2b6187345c26243c1a0842f28" - integrity sha512-/62yikz7NLScCGAAST5SHdnjaDJQBDq0M2muyRTpf2VQhw6StBg2ALiu73zSJQ4fMVLA+0uBhBHAle7Wg+2kSg== + version "4.3.2" + resolved "https://registry.yarnpkg.com/@adobe/css-tools/-/css-tools-4.3.2.tgz#a6abc715fb6884851fca9dad37fc34739a04fd11" + integrity sha512-DA5a1C0gD/pLOvhv33YMrbf2FK3oUzwNl9oOJqE4XVjuEtt6XIakRcsd7eLiOSPkp1kTRQGICTA8cKra/vFbjw== "@ampproject/remapping@^2.1.0": version "2.2.0"
Bumps [@adobe/css-tools](https://github.com/adobe/css-tools) from 4.3.1 to 4.3.2. <details> <summary>Changelog</summary> <p><em>Sourced from <a href="https://github.com/adobe/css-tools/blob/main/History.md"><code>@​adobe/css-tools</code>'s changelog</a>.</em></p> <blockquote> <h1>4.3.2 / 2023-11-28</h1> <ul> <li>Fix redos vulnerability with specific crafted css string - CVE-2023-48631</li> <li>Fix Problem parsing with :is() and nested :nth-child() <a href="https://redirect.github.com/adobe/css-tools/issues/211">#211</a></li> </ul> </blockquote> </details> <details> <summary>Commits</summary> <ul> <li>See full diff in <a href="https://github.com/adobe/css-tools/commits">compare view</a></li> </ul> </details> <br /> [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=@adobe/css-tools&package-manager=npm_and_yarn&previous-version=4.3.1&new-version=4.3.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) --- <details> <summary>Dependabot commands and options</summary> <br /> You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/streamlit/streamlit/network/alerts). </details>
https://api.github.com/repos/streamlit/streamlit/pulls/7791
2023-12-01T05:18:22Z
2023-12-01T08:58:56Z
2023-12-01T08:58:56Z
2023-12-01T08:58:59Z
367
streamlit/streamlit
22,190
[Serve] Add gRPC util
diff --git a/python/ray/serve/BUILD b/python/ray/serve/BUILD index 68c057e623b2b..5cd110495c7e5 100644 --- a/python/ray/serve/BUILD +++ b/python/ray/serve/BUILD @@ -129,6 +129,14 @@ py_test( deps = [":serve_lib"], ) +py_test( + name = "test_grpc_util", + size = "small", + srcs = serve_tests_srcs, + tags = ["exclusive", "team:serve"], + deps = [":serve_lib"], +) + py_test( name = "test_advanced", size = "small", diff --git a/python/ray/serve/_private/grpc_util.py b/python/ray/serve/_private/grpc_util.py new file mode 100644 index 0000000000000..a47031d608f5c --- /dev/null +++ b/python/ray/serve/_private/grpc_util.py @@ -0,0 +1,80 @@ +import grpc +from typing import Sequence +from grpc.aio._server import Server + + +class gRPCServer(Server): + """Custom gRPC server to override gRPC method methods. + + Original implementation see: https://github.com/grpc/grpc/blob/ + 60c1701f87cacf359aa1ad785728549eeef1a4b0/src/python/grpcio/grpc/aio/_server.py + """ + + def __init__(self, service_handler_factory, *args, **kwargs): + super().__init__(*args, **kwargs) + self.service_handler_factory = service_handler_factory + self.generic_rpc_handlers = [] + + def add_generic_rpc_handlers( + self, generic_rpc_handlers: Sequence[grpc.GenericRpcHandler] + ): + """Override generic_rpc_handlers before adding to the gRPC server. + + This function will override all user defined handlers to have + 1. None `response_serializer` so the server can pass back the + raw protobuf bytes to the user. + 2. `unary_unary` is always calling the unary function generated via + `self.service_handler_factory` + 3. `unary_stream` is always calling the streaming function generated via + `self.service_handler_factory` + """ + serve_rpc_handlers = {} + rpc_handler = generic_rpc_handlers[0] + for service_method, method_handler in rpc_handler._method_handlers.items(): + serve_method_handler = method_handler._replace( + response_serializer=None, + unary_unary=self.service_handler_factory( + service_method=service_method, + stream=False, + ), + unary_stream=self.service_handler_factory( + service_method=service_method, + stream=True, + ), + ) + serve_rpc_handlers[service_method] = serve_method_handler + generic_rpc_handlers[0]._method_handlers = serve_rpc_handlers + self.generic_rpc_handlers.append(generic_rpc_handlers) + super().add_generic_rpc_handlers(generic_rpc_handlers) + + +def create_serve_grpc_server(service_handler_factory): + """Custom function to create Serve's gRPC server. + + This function works similar to `grpc.server()`, but it creates a Serve defined + gRPC server in order to override the `unary_unary` and `unary_stream` methods + + See: https://grpc.github.io/grpc/python/grpc.html#grpc.server + """ + return gRPCServer( + thread_pool=None, + generic_handlers=(), + interceptors=(), + options=(), + maximum_concurrent_rpcs=None, + compression=None, + service_handler_factory=service_handler_factory, + ) + + +class DummyServicer: + """Dummy servicer for gRPC server to call on. + + This is a dummy class that just pass through when calling on any method. + User defined servicer function will attempt to add the method on this class to the + gRPC server, but our gRPC server will override the caller to call gRPCProxy. + """ + + def __getattr__(self, attr): + # No-op pass through. Just need this to act as the callable. + pass diff --git a/python/ray/serve/tests/test_grpc_util.py b/python/ray/serve/tests/test_grpc_util.py new file mode 100644 index 0000000000000..4ef0b2f60bc30 --- /dev/null +++ b/python/ray/serve/tests/test_grpc_util.py @@ -0,0 +1,109 @@ +import pytest +from typing import Callable +import grpc +from google.protobuf.any_pb2 import Any as AnyProto + +from ray.serve._private.grpc_util import ( + create_serve_grpc_server, + DummyServicer, + gRPCServer, +) + + +def fake_service_handler_factory(service_method: str, stream: bool) -> Callable: + def foo() -> bytes: + return f"{'stream' if stream else 'unary'} call from {service_method}".encode() + + return foo + + +def test_dummy_servicer_can_take_any_methods(): + """Test an instance of DummyServicer can be called with any method name without + error. + + When dummy_servicer is called with any custom defined methods, it won't raise error. + """ + dummy_servicer = DummyServicer() + dummy_servicer.foo + dummy_servicer.bar + dummy_servicer.baz + dummy_servicer.my_method + dummy_servicer.Predict + + +def test_create_serve_grpc_server(): + """Test `create_serve_grpc_server()` creates the correct server. + + The server created by `create_serve_grpc_server()` should be an instance of + Serve defined `gRPCServer`. Also, the handler factory passed with the function + should be used to initialize the `gRPCServer`. + """ + grpc_server = create_serve_grpc_server( + service_handler_factory=fake_service_handler_factory + ) + assert isinstance(grpc_server, gRPCServer) + assert grpc_server.service_handler_factory == fake_service_handler_factory + + +def test_grpc_server(): + """Test `gRPCServer` did the correct overrides. + + When a add_servicer_to_server function is called on an instance of `gRPCServer`, + it correctly overrides `response_serializer` to None, and `unary_unary` and + `unary_stream` to be generated from the factory function. + """ + service_name = "ray.serve.ServeAPIService" + method_name = "ServeRoutes" + + def add_test_servicer_to_server(servicer, server): + rpc_method_handlers = { + method_name: grpc.unary_unary_rpc_method_handler( + servicer.ServeRoutes, + request_deserializer=AnyProto.FromString, + response_serializer=AnyProto.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + service_name, rpc_method_handlers + ) + server.add_generic_rpc_handlers((generic_handler,)) + + grpc_server = gRPCServer( + thread_pool=None, + generic_handlers=(), + interceptors=(), + options=(), + maximum_concurrent_rpcs=None, + compression=None, + service_handler_factory=fake_service_handler_factory, + ) + dummy_servicer = DummyServicer() + + # Ensure `generic_rpc_handlers` is not populated before calling + # the add_servicer_to_server function. + assert grpc_server.generic_rpc_handlers == [] + + add_test_servicer_to_server(dummy_servicer, grpc_server) + + # `generic_rpc_handlers` should be populated after add_servicer_to_server is called. + assert len(grpc_server.generic_rpc_handlers) == 1 + + # The populated rpc handler should have the correct service name. + rpc_handler = grpc_server.generic_rpc_handlers[0][0] + assert rpc_handler.service_name() == service_name + + # The populated method handlers should have the correct response_serializer, + # unary_unary, and unary_stream. + service_method = f"/{service_name}/{method_name}" + method_handlers = rpc_handler._method_handlers.get(service_method) + assert method_handlers.response_serializer is None + assert method_handlers.unary_unary() == f"unary call from {service_method}".encode() + assert ( + method_handlers.unary_stream() == f"stream call from {service_method}".encode() + ) + + +if __name__ == "__main__": + import sys + + sys.exit(pytest.main(["-v", "-s", __file__]))
<!-- Thank you for your contribution! Please review https://github.com/ray-project/ray/blob/master/CONTRIBUTING.rst before opening a pull request. --> <!-- Please add a reviewer to the assignee section when you create a PR. If you don't have the access to it, we will shortly find a reviewer and assign them to your PR. --> ## Why are these changes needed? Add Serve's custom logics to help setup gRPC server. ## Related issue number Forth PR to close https://github.com/ray-project/ray/issues/38193 ## Checks - [ ] I've signed off every commit(by using the -s flag, i.e., `git commit -s`) in this PR. - [ ] I've run `scripts/format.sh` to lint the changes in this PR. - [ ] I've included any doc changes needed for https://docs.ray.io/en/master/. - [ ] I've added any new APIs to the API Reference. For example, if I added a method in Tune, I've added it in `doc/source/tune/api/` under the corresponding `.rst` file. - [ ] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failures at https://flakey-tests.ray.io/ - Testing Strategy - [ ] Unit tests - [ ] Release tests - [ ] This PR is not tested :(
https://api.github.com/repos/ray-project/ray/pulls/38396
2023-08-14T05:09:15Z
2023-08-16T15:27:31Z
2023-08-16T15:27:31Z
2023-08-16T15:36:52Z
1,953
ray-project/ray
19,229
Update val.py `pad = 0.0 if task == 'speed' else 0.5`
diff --git a/val.py b/val.py index 6a0c95ac5ee..2cd8eba8744 100644 --- a/val.py +++ b/val.py @@ -147,8 +147,9 @@ def run(data, if not training: if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once + pad = 0.0 if task == 'speed' else 0.5 task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images - dataloader = create_dataloader(data[task], imgsz, batch_size, gs, single_cls, pad=0.5, rect=True, + dataloader = create_dataloader(data[task], imgsz, batch_size, gs, single_cls, pad=pad, rect=True, prefix=colorstr(f'{task}: '))[0] seen = 0
## 🛠️ PR Summary <sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub> ### 🌟 Summary Improved dataloader padding configuration for speed tests in the YOLOv5 validation script. ### 📊 Key Changes - Added a conditional statement to set padding to 0.0 specifically when the task is 'speed'. - Ensured padding remains 0.5 for other tasks ('train', 'val', 'test') in the dataloader creation. ### 🎯 Purpose & Impact - 🎯 **Purpose**: The change is designed to optimize the dataloader configuration for speed testing by reducing unnecessary padding that can affect performance measurements. - 💡 **Impact**: This update will lead to more accurate speed benchmarks for users testing their YOLOv5 models, as it minimizes the computational load by eliminating excess padding during the speed test task. Model developers and users performing speed tests can expect more streamlined and precise performance metrics, aiding in the fine-tuning and evaluation of models.
https://api.github.com/repos/ultralytics/yolov5/pulls/5121
2021-10-11T06:19:30Z
2021-10-11T06:20:43Z
2021-10-11T06:20:43Z
2024-01-19T15:10:21Z
236
ultralytics/yolov5
24,956
Add output folder icons
diff --git a/javascript/hints.js b/javascript/hints.js index 59dd770c790..96cd24d5df7 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -15,6 +15,7 @@ titles = { "\u267b\ufe0f": "Reuse seed from last generation, mostly useful if it was randomed", "\u{1f3a8}": "Add a random artist to the prompt.", "\u2199\ufe0f": "Read generation parameters from prompt into user interface.", + "\uD83D\uDCC2": "Open images output directory", "Inpaint a part of image": "Draw a mask over an image, and the script will regenerate the masked area with content according to prompt", "SD upscale": "Upscale image normally, split result into tiles, improve each tile using img2img, merge whole image back", diff --git a/modules/ui.py b/modules/ui.py index e96109c9c59..3d2032b4d05 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -9,6 +9,8 @@ import sys import time import traceback +import platform +import subprocess as sp import numpy as np import torch @@ -60,7 +62,7 @@ def gr_show(visible=True): reuse_symbol = '\u267b\ufe0f' # ♻️ art_symbol = '\U0001f3a8' # 🎨 paste_symbol = '\u2199\ufe0f' # ↙ - +folder_symbol = '\uD83D\uDCC2' def plaintext_to_html(text): text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>" @@ -449,6 +451,8 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): send_to_img2img = gr.Button('Send to img2img') send_to_inpaint = gr.Button('Send to inpaint') send_to_extras = gr.Button('Send to extras') + button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder' + open_txt2img_folder = gr.Button(folder_symbol, elem_id=button_id) with gr.Group(): html_info = gr.HTML() @@ -625,6 +629,8 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): img2img_send_to_img2img = gr.Button('Send to img2img') img2img_send_to_inpaint = gr.Button('Send to inpaint') img2img_send_to_extras = gr.Button('Send to extras') + button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder' + open_img2img_folder = gr.Button(folder_symbol, elem_id=button_id) with gr.Group(): html_info = gr.HTML() @@ -797,6 +803,8 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): html_info = gr.HTML() extras_send_to_img2img = gr.Button('Send to img2img') extras_send_to_inpaint = gr.Button('Send to inpaint') + button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else '' + open_extras_folder = gr.Button('Open output directory', elem_id=button_id) submit.click( fn=run_extras, @@ -905,6 +913,16 @@ def fun(): components = [] + def open_folder(f): + if not shared.cmd_opts.hide_ui_dir_config: + path = os.path.normpath(f) + if platform.system() == "Windows": + os.startfile(path) + elif platform.system() == "Darwin": + sp.Popen(["open", path]) + else: + sp.Popen(["xdg-open", path]) + def run_settings(*args): changed = 0 @@ -1048,6 +1066,24 @@ def run_settings(*args): outputs=[extras_image], ) + open_txt2img_folder.click( + fn=lambda: open_folder(opts.outdir_samples or opts.outdir_txt2img_samples), + inputs=[], + outputs=[], + ) + + open_img2img_folder.click( + fn=lambda: open_folder(opts.outdir_samples or opts.outdir_img2img_samples), + inputs=[], + outputs=[], + ) + + open_extras_folder.click( + fn=lambda: open_folder(opts.outdir_samples or opts.outdir_extras_samples), + inputs=[], + outputs=[], + ) + img2img_send_to_extras.click( fn=lambda x: image_from_url_text(x), _js="extract_image_from_gallery_extras", diff --git a/style.css b/style.css index 4054e2dfc83..8973ecfce58 100644 --- a/style.css +++ b/style.css @@ -1,5 +1,11 @@ .output-html p {margin: 0 0.5em;} +.row > *, +.row > .gr-form > * { + min-width: min(120px, 100%); + flex: 1 1 0%; +} + .performance { font-size: 0.85em; color: #444; @@ -43,13 +49,17 @@ margin-right: auto; } -#random_seed, #random_subseed, #reuse_seed, #reuse_subseed{ +#random_seed, #random_subseed, #reuse_seed, #reuse_subseed, #open_folder{ min-width: auto; flex-grow: 0; padding-left: 0.25em; padding-right: 0.25em; } +#hidden_element{ + display: none; +} + #seed_row, #subseed_row{ gap: 0.5rem; }
Adds icons on the first 3 tabs to directly open the corresponding images output directory.
https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/pulls/1190
2022-09-27T21:03:15Z
2022-09-29T09:10:00Z
2022-09-29T09:10:00Z
2022-10-02T20:52:19Z
1,363
AUTOMATIC1111/stable-diffusion-webui
39,833
Add Language Identification API
diff --git a/README.md b/README.md index 67548cb292..031658d6ff 100644 --- a/README.md +++ b/README.md @@ -813,6 +813,7 @@ API | Description | Auth | HTTPS | CORS | | [Cloudmersive Natural Language Processing](https://www.cloudmersive.com/nlp-api) | Natural language processing and text analysis | `apiKey` | Yes | Yes | | [Detect Language](https://detectlanguage.com/) | Detects text language | `apiKey` | Yes | Unknown | | [Google Cloud Natural](https://cloud.google.com/natural-language/docs/) | Natural language understanding technology, including sentiment, entity and syntax analysis | `apiKey` | Yes | Unknown | +| [Language Identification](https://rapidapi.com/BigLobster/api/language-identification-prediction) | Automatic language detection for any texts, supports over 175 languages | `X-Mashape-Key` | Yes | Unknown | | [Semantira](https://semantria.readme.io/docs) | Text Analytics with sentiment analysis, categorization & named entity extraction | `OAuth` | Yes | Unknown | | [Watson Natural Language Understanding](https://www.ibm.com/watson/developercloud/natural-language-understanding/api/v1/) | Natural language processing for advanced text analysis | `OAuth` | Yes | Unknown |
See #944 Thank you @BigLobsterito Thank you for taking the time to work on a Pull Request for this project! To ensure your PR is dealt with swiftly please check the following: - [ ] Your submissions are formatted according to the guidelines in the [contributing guide](CONTRIBUTING.md) - [ ] Your additions are ordered alphabetically - [ ] Your submission has a useful description - [ ] The description does not end with punctuation - [ ] Each table column should be padded with one space on either side - [ ] You have searched the repository for any relevant issues or pull requests - [ ] Any category you are creating has the minimum requirement of 3 items - [ ] All changes have been [squashed][squash-link] into a single commit [squash-link]: <https://github.com/todotxt/todo.txt-android/wiki/Squash-All-Commits-Related-to-a-Single-Issue-into-a-Single-Commit>
https://api.github.com/repos/public-apis/public-apis/pulls/1072
2019-10-06T11:41:30Z
2019-10-06T11:50:27Z
2019-10-06T11:50:27Z
2019-10-06T11:50:30Z
295
public-apis/public-apis
35,536
Perf
diff --git a/manimlib/animation/indication.py b/manimlib/animation/indication.py index 0c425b7f43..1ff261596e 100644 --- a/manimlib/animation/indication.py +++ b/manimlib/animation/indication.py @@ -9,6 +9,7 @@ from manimlib.animation.creation import ShowCreation from manimlib.animation.creation import ShowPartial from manimlib.animation.fading import FadeOut +from manimlib.animation.fading import FadeIn from manimlib.animation.transform import Transform from manimlib.mobject.types.vectorized_mobject import VMobject from manimlib.mobject.geometry import Circle @@ -21,6 +22,8 @@ from manimlib.utils.config_ops import digest_config from manimlib.utils.rate_functions import there_and_back from manimlib.utils.rate_functions import wiggle +from manimlib.utils.rate_functions import smooth +from manimlib.utils.rate_functions import squish_rate_func class FocusOn(Transform): @@ -361,3 +364,22 @@ class TurnInsideOut(Transform): def create_target(self): return self.mobject.copy().reverse_points() + + +class FlashyFadeIn(AnimationGroup): + CONFIG = { + "fade_lag": 0, + } + + def __init__(self, vmobject, stroke_width=2, **kwargs): + digest_config(self, kwargs) + outline = vmobject.copy() + outline.set_fill(opacity=0) + outline.set_stroke(width=stroke_width, opacity=1) + + rate_func = kwargs.get("rate_func", smooth) + super().__init__( + FadeIn(vmobject, rate_func=squish_rate_func(rate_func, self.fade_lag, 1)), + VShowPassingFlash(outline, time_width=1), + **kwargs + ) diff --git a/manimlib/mobject/svg/svg_mobject.py b/manimlib/mobject/svg/svg_mobject.py index 5af3367938..2fd3ce2f83 100644 --- a/manimlib/mobject/svg/svg_mobject.py +++ b/manimlib/mobject/svg/svg_mobject.py @@ -88,10 +88,10 @@ def get_mobjects_from(self, element): elif element.tagName == 'style': pass # TODO, handle style elif element.tagName in ['g', 'svg', 'symbol']: - result += it.chain(*[ + result += it.chain(*( self.get_mobjects_from(child) for child in element.childNodes - ]) + )) elif element.tagName == 'path': result.append(self.path_string_to_mobject( element.getAttribute('d') @@ -341,6 +341,8 @@ def init_points(self): if os.path.exists(points_filepath) and os.path.exists(tris_filepath): self.set_points(np.load(points_filepath)) + self.triangulation = np.load(tris_filepath) + self.needs_new_triangulation = False else: self.relative_point = np.array(ORIGIN) for command, coord_string in self.get_commands_and_coord_strings(): @@ -356,6 +358,7 @@ def init_points(self): self.stretch(-1, 1, about_point=ORIGIN) # Save to a file for future use np.save(points_filepath, self.get_points()) + np.save(tris_filepath, self.get_triangulation()) def get_commands_and_coord_strings(self): all_commands = list(self.get_command_to_function_map().keys()) diff --git a/manimlib/utils/space_ops.py b/manimlib/utils/space_ops.py index b2aa46c150..742716424f 100644 --- a/manimlib/utils/space_ops.py +++ b/manimlib/utils/space_ops.py @@ -1,5 +1,7 @@ import numpy as np import itertools as it +import operator as op +from functools import reduce import math from mapbox_earcut import triangulate_float32 as earcut @@ -376,8 +378,8 @@ def ring_area(ring_id): # Points at the same position may cause problems for i in rings: - verts[i[0]] += (verts[i[1]]-verts[i[0]]) * 1e-6 - verts[i[-1]] += (verts[i[-2]]-verts[i[-1]]) * 1e-6 + verts[i[0]] += (verts[i[1]] - verts[i[0]]) * 1e-6 + verts[i[-1]] += (verts[i[-2]] - verts[i[-1]]) * 1e-6 # First, we should know which rings are directly contained in it for each ring @@ -393,9 +395,11 @@ def ring_area(ring_id): def is_in_fast(ring_a, ring_b): # Whether a is in b - return (left[ring_b] <= left[ring_a] <= right[ring_a] <= right[ring_b] and - bottom[ring_b] <= bottom[ring_a] <= top[ring_a] <= top[ring_b] and - is_in(verts[rings[ring_a][0]], ring_b)) + return reduce(op.and_, ( + left[ring_b] <= left[ring_a] <= right[ring_a] <= right[ring_b], + bottom[ring_b] <= bottom[ring_a] <= top[ring_a] <= top[ring_b], + is_in(verts[rings[ring_a][0]], ring_b) + )) chilren = [[] for i in rings] for idx, i in enumerate(rings_sorted):
Small fix addressing why Tex/Text were slower than expected (plus one or two other minor changes)
https://api.github.com/repos/3b1b/manim/pulls/1607
2021-08-19T17:32:03Z
2021-08-19T17:33:47Z
2021-08-19T17:33:47Z
2021-08-19T17:33:51Z
1,270
3b1b/manim
18,253
TextSystem is imported correctly in paddleocr.py
diff --git a/paddleocr.py b/paddleocr.py index 166a4bd743..ce9d32deed 100644 --- a/paddleocr.py +++ b/paddleocr.py @@ -51,7 +51,7 @@ def _import_file(module_name, file_path, make_importable=False): from ppocr.utils.network import maybe_download, download_with_progressbar, is_link, confirm_model_dir_url from tools.infer.utility import draw_ocr, str2bool, check_gpu from ppstructure.utility import init_args, draw_structure_result -from ppstructure.predict_system import StructureSystem, save_structure_res, to_excel +from ppstructure.predict_system import StructureSystem, save_structure_res, to_excel, TextSystem logger = get_logger() __all__ = [ @@ -572,7 +572,7 @@ def check_img(img, alpha_color=(255, 255, 255)): return img -class PaddleOCR(predict_system.TextSystem): +class PaddleOCR(TextSystem): def __init__(self, **kwargs): """ paddleocr package
### PR 类型 PR types <!-- One of [ New features | Bug fixes | Function optimization | Performance optimization | Breaking changes | Others ] --> ### PR 变化内容类型 PR changes <!-- One of [ Models | APIs | Docs | Others ] --> ### 描述 Description <!-- Describe what this PR does --> ### 提PR之前的检查 Check-list - [ ] 这个 PR 是提交到dygraph分支或者是一个cherry-pick,否则请先提交到dygarph分支。 This PR is pushed to the dygraph branch or cherry-picked from the dygraph branch. Otherwise, please push your changes to the dygraph branch. - [ ] 这个PR清楚描述了功能,帮助评审能提升效率。This PR have fully described what it does such that reviewers can speedup. - [ ] 这个PR已经经过本地测试。This PR can be covered by existing tests or locally verified.
https://api.github.com/repos/PaddlePaddle/PaddleOCR/pulls/11847
2024-03-29T13:51:16Z
2024-04-01T06:21:32Z
2024-04-01T06:21:32Z
2024-04-01T07:17:18Z
234
PaddlePaddle/PaddleOCR
42,603
Fix - #1685 - pre-signed-url expiry after given timestamp,s3 object e…
diff --git a/localstack/services/s3/s3_listener.py b/localstack/services/s3/s3_listener.py index be212625e439c..2a86c7ef9de85 100644 --- a/localstack/services/s3/s3_listener.py +++ b/localstack/services/s3/s3_listener.py @@ -2,6 +2,8 @@ import re import logging import json +import time +from pytz import timezone import uuid import base64 import codecs @@ -47,6 +49,9 @@ # maps bucket name to object lock settings OBJECT_LOCK_CONFIGS = {} +# map to store the s3 expiry dates +OBJECT_EXPIRY = {} + # set up logger LOGGER = logging.getLogger(__name__) @@ -320,6 +325,30 @@ def add_accept_range_header(response): response.headers['accept-ranges'] = 'bytes' +def is_object_expired(path): + object_expiry = get_object_expiry(path) + if not object_expiry: + return False + if dateutil.parser.parse(object_expiry) > \ + datetime.datetime.now(timezone(dateutil.parser.parse(object_expiry).tzname())): + return False + return True + + +def set_object_expiry(path, headers): + OBJECT_EXPIRY[path] = headers.get('expires') + + +def get_object_expiry(path): + return OBJECT_EXPIRY.get(path) + + +def is_url_already_expired(expiry_timestamp): + if int(expiry_timestamp) < int(time.time()): + return True + return False + + def add_reponse_metadata_headers(response): if response.headers.get('content-language') is None: response.headers['content-language'] = 'en-US' @@ -633,6 +662,24 @@ def error_response(message, code, status_code=400): return requests_response(content, status_code=status_code, headers=headers) +def no_such_key_error(resource, requestId=None, status_code=400): + result = {'Error': {'Code': 'NoSuchKey', + 'Message': 'The resource you requested does not exist', + 'Resource': resource, 'RequestId': requestId}} + content = xmltodict.unparse(result) + headers = {'content-type': 'application/xml'} + return requests_response(content, status_code=status_code, headers=headers) + + +def token_expired_error(resource, requestId=None, status_code=400): + result = {'Error': {'Code': 'ExpiredToken', + 'Message': 'The provided token has expired.', + 'Resource': resource, 'RequestId': requestId}} + content = xmltodict.unparse(result) + headers = {'content-type': 'application/xml'} + return requests_response(content, status_code=status_code, headers=headers) + + def expand_redirect_url(starting_url, key, bucket): """ Add key and bucket parameters to starting URL query string. """ parsed = urlparse.urlparse(starting_url) @@ -890,6 +937,11 @@ def forward_request(self, method, path, data, headers): response = handle_notification_request(bucket, method, data) return response + # if the Expires key in the url is already expired then return error + if method == 'GET' and 'Expires' in query_map: + if is_url_already_expired(query_map.get('Expires')[0]): + return token_expired_error(path, headers.get('x-amz-request-id'), 400) + if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) @@ -1046,6 +1098,9 @@ def return_response(self, method, path, data, headers, response): fix_etag_for_multipart(data, headers, response) append_aws_request_troubleshooting_headers(response) + if method == 'PUT': + set_object_expiry(path, headers) + # Remove body from PUT response on presigned URL # https://github.com/localstack/localstack/issues/1317 if method == 'PUT' and ('X-Amz-Security-Token=' in path or @@ -1064,6 +1119,9 @@ def return_response(self, method, path, data, headers, response): if method == 'GET': add_accept_range_header(response) add_reponse_metadata_headers(response) + if is_object_expired(path): + return no_such_key_error(path, headers.get('x-amz-request-id'), 400) + query_map = urlparse.parse_qs(parsed.query, keep_blank_values=True) for param_name, header_name in ALLOWED_HEADER_OVERRIDES.items(): if param_name in query_map: diff --git a/tests/integration/test_s3.py b/tests/integration/test_s3.py index 204f0f5610081..5447ffe19e855 100644 --- a/tests/integration/test_s3.py +++ b/tests/integration/test_s3.py @@ -1,12 +1,16 @@ +import datetime import io import os import ssl import gzip import json +import time import uuid import unittest import requests from io import BytesIO + +from pytz import timezone from six.moves.urllib.request import Request, urlopen from localstack import config from localstack.utils.aws import aws_stack @@ -254,6 +258,65 @@ def test_s3_put_metadata_underscores(self): # clean up self._delete_bucket(bucket_name, [object_key]) + def test_s3_object_expiry(self): + # handle s3 object expiry + # https://github.com/localstack/localstack/issues/1685 + bucket_name = 'test-%s' % short_uid() + self.s3_client.create_bucket(Bucket=bucket_name) + + # put object + object_key = 'key-with-metadata' + metadata = {'test_meta_1': 'foo', '__meta_2': 'bar'} + self.s3_client.put_object(Bucket=bucket_name, Key=object_key, Metadata=metadata, Body='foo', + Expires=datetime.datetime.now(timezone('GMT')) - datetime.timedelta(hours=1)) + # try to fetch an object which is already expired + self.assertRaises(Exception, self.s3_client.get_object, Bucket=bucket_name, Key=object_key.lower()) + + self.s3_client.put_object(Bucket=bucket_name, Key=object_key, Metadata=metadata, Body='foo', + Expires=datetime.datetime.now(timezone('GMT')) + datetime.timedelta(hours=1)) + + # try to fetch has not been expired yet. + resp = self.s3_client.get_object(Bucket=bucket_name, Key=object_key) + + self.assertIn('Expires', resp) + + # clean up + self._delete_bucket(bucket_name, [object_key]) + + def test_s3_predesigned_url_expired(self): + + bucket_name = 'test-bucket-%s' % short_uid() + self.s3_client.create_bucket(Bucket=bucket_name) + + # put object and CORS configuration + object_key = 'key-by-hostname' + self.s3_client.put_object(Bucket=bucket_name, Key=object_key, Body='something') + + # get object and assert headers + url = self.s3_client.generate_presigned_url( + 'get_object', Params={'Bucket': bucket_name, 'Key': object_key}, ExpiresIn=2 + ) + # retrieving it before expiry + resp = requests.get(url, verify=False) + self.assertEqual(resp.status_code, 200) + self.assertEqual(resp.content, 'something') + + # waiting for the url to expire + time.sleep(3) + resp = requests.get(url, verify=False) + self.assertEqual(resp.status_code, 400) + + url = self.s3_client.generate_presigned_url( + 'get_object', Params={'Bucket': bucket_name, 'Key': object_key}, ExpiresIn=120 + ) + + resp = requests.get(url, verify=False) + self.assertEqual(resp.status_code, 200) + self.assertEqual(resp.content, 'something') + + # clean up + self._delete_bucket(bucket_name, [object_key]) + def test_range_header_body_length(self): # Test for https://github.com/localstack/localstack/issues/1952
https://github.com/localstack/localstack/issues/1685 pre-signed-url expiry after given timestamp,s3 object expiry after timestamp
https://api.github.com/repos/localstack/localstack/pulls/2333
2020-04-21T23:07:23Z
2020-04-22T23:19:46Z
2020-04-22T23:19:46Z
2020-04-22T23:19:46Z
1,827
localstack/localstack
29,175
Make the API use FastAPI instead of Flask
diff --git a/g4f/__init__.py b/g4f/__init__.py index a2eec9e264..8a1cb3cdf4 100644 --- a/g4f/__init__.py +++ b/g4f/__init__.py @@ -115,4 +115,4 @@ def create(model : Union[Model, str], return result if stream else ''.join(result) if version_check: - check_pypi_version() \ No newline at end of file + check_pypi_version() diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index fec5606f6f..43bca2a5ec 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -1,163 +1,137 @@ -import typing -from .. import BaseProvider -import g4f; g4f.debug.logging = True +from fastapi import FastAPI, Response, Request +from typing import List, Union, Any, Dict, AnyStr +from ._tokenizer import tokenize +from .. import BaseProvider + import time import json import random import string -import logging - -from typing import Union -from loguru import logger -from waitress import serve -from ._logging import hook_logging -from ._tokenizer import tokenize -from flask_cors import CORS -from werkzeug.serving import WSGIRequestHandler -from werkzeug.exceptions import default_exceptions -from werkzeug.middleware.proxy_fix import ProxyFix - -from flask import ( - Flask, - jsonify, - make_response, - request, -) +import uvicorn +import nest_asyncio +import g4f class Api: - __default_ip = '127.0.0.1' - __default_port = 1337 - def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False, - list_ignored_providers:typing.List[typing.Union[str, BaseProvider]]=None) -> None: - self.engine = engine - self.debug = debug - self.sentry = sentry - self.list_ignored_providers = list_ignored_providers - self.log_level = logging.DEBUG if debug else logging.WARN - - hook_logging(level=self.log_level, format='[%(asctime)s] %(levelname)s in %(module)s: %(message)s') - self.logger = logging.getLogger('waitress') - - self.app = Flask(__name__) - self.app.wsgi_app = ProxyFix(self.app.wsgi_app, x_port=1) - self.app.after_request(self.__after_request) - - def run(self, bind_str, threads=8): - host, port = self.__parse_bind(bind_str) - - CORS(self.app, resources={r'/v1/*': {'supports_credentials': True, 'expose_headers': [ - 'Content-Type', - 'Authorization', - 'X-Requested-With', - 'Accept', - 'Origin', - 'Access-Control-Request-Method', - 'Access-Control-Request-Headers', - 'Content-Disposition'], 'max_age': 600}}) - - self.app.route('/v1/models', methods=['GET'])(self.models) - self.app.route('/v1/models/<model_id>', methods=['GET'])(self.model_info) - - self.app.route('/v1/chat/completions', methods=['POST'])(self.chat_completions) - self.app.route('/v1/completions', methods=['POST'])(self.completions) - - for ex in default_exceptions: - self.app.register_error_handler(ex, self.__handle_error) - - if not self.debug: - self.logger.warning(f'Serving on http://{host}:{port}') - - WSGIRequestHandler.protocol_version = 'HTTP/1.1' - serve(self.app, host=host, port=port, ident=None, threads=threads) - - def __handle_error(self, e: Exception): - self.logger.error(e) - - return make_response(jsonify({ - 'code': e.code, - 'message': str(e.original_exception if self.debug and hasattr(e, 'original_exception') else e.name)}), 500) - - @staticmethod - def __after_request(resp): - resp.headers['X-Server'] = f'g4f/{g4f.version}' - - return resp - - def __parse_bind(self, bind_str): - sections = bind_str.split(':', 2) - if len(sections) < 2: + list_ignored_providers: List[Union[str, BaseProvider]] = None) -> None: + self.engine = engine + self.debug = debug + self.sentry = sentry + self.list_ignored_providers = list_ignored_providers + + self.app = FastAPI() + nest_asyncio.apply() + + JSONObject = Dict[AnyStr, Any] + JSONArray = List[Any] + JSONStructure = Union[JSONArray, JSONObject] + + @self.app.get("/") + async def read_root(): + return Response(content=json.dumps({"info": "g4f API"}, indent=4), media_type="application/json") + + @self.app.get("/v1") + async def read_root_v1(): + return Response(content=json.dumps({"info": "Go to /v1/chat/completions or /v1/models."}, indent=4), media_type="application/json") + + @self.app.get("/v1/models") + async def models(): + model_list = [{ + 'id': model, + 'object': 'model', + 'created': 0, + 'owned_by': 'g4f'} for model in g4f.Model.__all__()] + + return Response(content=json.dumps({ + 'object': 'list', + 'data': model_list}, indent=4), media_type="application/json") + + @self.app.get("/v1/models/{model_name}") + async def model_info(model_name: str): try: - port = int(sections[0]) - return self.__default_ip, port - except ValueError: - return sections[0], self.__default_port - - return sections[0], int(sections[1]) - - async def home(self): - return 'Hello world | https://127.0.0.1:1337/v1' - - async def chat_completions(self): - model = request.json.get('model', 'gpt-3.5-turbo') - stream = request.json.get('stream', False) - messages = request.json.get('messages') - - logger.info(f'model: {model}, stream: {stream}, request: {messages[-1]["content"]}') - - config = None - proxy = None - - try: - config = json.load(open("config.json","r",encoding="utf-8")) - proxy = config["proxy"] - - except Exception: - pass - - if proxy != None: - response = self.engine.ChatCompletion.create(model=model, - stream=stream, messages=messages, - ignored=self.list_ignored_providers, - proxy=proxy) - else: - response = self.engine.ChatCompletion.create(model=model, - stream=stream, messages=messages, - ignored=self.list_ignored_providers) - - completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28)) - completion_timestamp = int(time.time()) - - if not stream: - prompt_tokens, _ = tokenize(''.join([message['content'] for message in messages])) - completion_tokens, _ = tokenize(response) - - return { - 'id': f'chatcmpl-{completion_id}', - 'object': 'chat.completion', - 'created': completion_timestamp, - 'model': model, - 'choices': [ - { - 'index': 0, - 'message': { - 'role': 'assistant', - 'content': response, - }, - 'finish_reason': 'stop', - } - ], - 'usage': { - 'prompt_tokens': prompt_tokens, - 'completion_tokens': completion_tokens, - 'total_tokens': prompt_tokens + completion_tokens, - }, + model_info = (g4f.ModelUtils.convert[model_name]) + + return Response(content=json.dumps({ + 'id': model_name, + 'object': 'model', + 'created': 0, + 'owned_by': model_info.base_provider + }, indent=4), media_type="application/json") + except: + return Response(content=json.dumps({"error": "The model does not exist."}, indent=4), media_type="application/json") + + @self.app.post("/v1/chat/completions") + async def chat_completions(request: Request, item: JSONStructure = None): + item_data = { + 'model': 'gpt-3.5-turbo', + 'stream': False, } - def streaming(): + item_data.update(item or {}) + model = item_data.get('model') + stream = item_data.get('stream') + messages = item_data.get('messages') + try: - for chunk in response: - completion_data = { + response = g4f.ChatCompletion.create(model=model, stream=stream, messages=messages) + except: + return Response(content=json.dumps({"error": "An error occurred while generating the response."}, indent=4), media_type="application/json") + + completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28)) + completion_timestamp = int(time.time()) + + if not stream: + prompt_tokens, _ = tokenize(''.join([message['content'] for message in messages])) + completion_tokens, _ = tokenize(response) + + json_data = { + 'id': f'chatcmpl-{completion_id}', + 'object': 'chat.completion', + 'created': completion_timestamp, + 'model': model, + 'choices': [ + { + 'index': 0, + 'message': { + 'role': 'assistant', + 'content': response, + }, + 'finish_reason': 'stop', + } + ], + 'usage': { + 'prompt_tokens': prompt_tokens, + 'completion_tokens': completion_tokens, + 'total_tokens': prompt_tokens + completion_tokens, + }, + } + + return Response(content=json.dumps(json_data, indent=4), media_type="application/json") + + def streaming(): + try: + for chunk in response: + completion_data = { + 'id': f'chatcmpl-{completion_id}', + 'object': 'chat.completion.chunk', + 'created': completion_timestamp, + 'model': model, + 'choices': [ + { + 'index': 0, + 'delta': { + 'content': chunk, + }, + 'finish_reason': None, + } + ], + } + + content = json.dumps(completion_data, separators=(',', ':')) + yield f'data: {content}\n\n' + time.sleep(0.03) + + end_completion_data = { 'id': f'chatcmpl-{completion_id}', 'object': 'chat.completion.chunk', 'created': completion_timestamp, @@ -165,63 +139,24 @@ def streaming(): 'choices': [ { 'index': 0, - 'delta': { - 'content': chunk, - }, - 'finish_reason': None, + 'delta': {}, + 'finish_reason': 'stop', } ], } - content = json.dumps(completion_data, separators=(',', ':')) + content = json.dumps(end_completion_data, separators=(',', ':')) yield f'data: {content}\n\n' - time.sleep(0.03) - end_completion_data = { - 'id': f'chatcmpl-{completion_id}', - 'object': 'chat.completion.chunk', - 'created': completion_timestamp, - 'model': model, - 'choices': [ - { - 'index': 0, - 'delta': {}, - 'finish_reason': 'stop', - } - ], - } - - content = json.dumps(end_completion_data, separators=(',', ':')) - yield f'data: {content}\n\n' - - logger.success(f'model: {model}, stream: {stream}') - - except GeneratorExit: - pass - - return self.app.response_class(streaming(), mimetype='text/event-stream') - - async def completions(self): - return 'not working yet', 500 - - async def model_info(self, model_name): - model_info = (g4f.ModelUtils.convert[model_name]) - - return jsonify({ - 'id' : model_name, - 'object' : 'model', - 'created' : 0, - 'owned_by' : model_info.base_provider - }) - - async def models(self): - model_list = [{ - 'id' : model, - 'object' : 'model', - 'created' : 0, - 'owned_by' : 'g4f'} for model in g4f.Model.__all__()] - - return jsonify({ - 'object': 'list', - 'data': model_list}) - \ No newline at end of file + except GeneratorExit: + pass + + return Response(content=json.dumps(streaming(), indent=4), media_type="application/json") + + @self.app.post("/v1/completions") + async def completions(): + return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json") + + def run(self, ip): + split_ip = ip.split(":") + uvicorn.run(app=self.app, host=split_ip[0], port=int(split_ip[1]), use_colors=False) diff --git a/g4f/api/run.py b/g4f/api/run.py index 12bf9eed6c..88f347418d 100644 --- a/g4f/api/run.py +++ b/g4f/api/run.py @@ -3,4 +3,4 @@ if __name__ == "__main__": print(f'Starting server... [g4f v-{g4f.version}]') - g4f.api.Api(g4f).run('127.0.0.1:1337', 8) \ No newline at end of file + g4f.api.Api(engine = g4f, debug = True).run(ip = "127.0.0.1:1337") diff --git a/g4f/cli.py b/g4f/cli.py index cb19dde172..20131e5db0 100644 --- a/g4f/cli.py +++ b/g4f/cli.py @@ -7,11 +7,9 @@ from g4f.api import Api from g4f.gui.run import gui_parser, run_gui_args - def run_gui(args): print("Running GUI...") - def main(): IgnoredProviders = Enum("ignore_providers", {key: key for key in Provider.__all__}) parser = argparse.ArgumentParser(description="Run gpt4free") @@ -19,22 +17,19 @@ def main(): api_parser=subparsers.add_parser("api") api_parser.add_argument("--bind", default="127.0.0.1:1337", help="The bind string.") api_parser.add_argument("--debug", type=bool, default=False, help="Enable verbose logging") - api_parser.add_argument("--num-threads", type=int, default=8, help="The number of threads.") api_parser.add_argument("--ignored-providers", nargs="+", choices=[provider.name for provider in IgnoredProviders], default=[], help="List of providers to ignore when processing request.") subparsers.add_parser("gui", parents=[gui_parser()], add_help=False) args = parser.parse_args() if args.mode == "api": - controller=Api(g4f, debug=args.debug) - controller.list_ignored_providers=args.ignored_providers - controller.run(args.bind, args.num_threads) + controller=Api(engine=g4f, debug=args.debug, list_ignored_providers=args.ignored_providers) + controller.run(args.bind) elif args.mode == "gui": run_gui_args(args) else: parser.print_help() exit(1) - if __name__ == "__main__": main() diff --git a/requirements.txt b/requirements.txt index 3ef9b32eb5..ffadf62a7f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,8 +6,6 @@ certifi browser_cookie3 websockets js2py -flask[async] -flask-cors typing-extensions PyExecJS duckduckgo-search @@ -20,3 +18,5 @@ pillow platformdirs numpy asgiref +fastapi +uvicorn
- Made the API ``__init__`` module use FastAPI. - Made the CLI mode compatible with the new API code. - Made the ``run`` module compatible with the new API code.
https://api.github.com/repos/xtekky/gpt4free/pulls/1213
2023-11-04T21:34:05Z
2023-11-05T18:16:12Z
2023-11-05T18:16:12Z
2023-11-13T11:40:23Z
3,912
xtekky/gpt4free
38,203
Fix bug in Hybrid Engine
diff --git a/deepspeed/runtime/hybrid_engine.py b/deepspeed/runtime/hybrid_engine.py index 0bddc6aeb9d4..46b1475cece9 100644 --- a/deepspeed/runtime/hybrid_engine.py +++ b/deepspeed/runtime/hybrid_engine.py @@ -318,7 +318,7 @@ def create_inference_module(self): self._orig_fwds_others = [] if self._config.hybrid_engine.inference_tp_size > 1: - if self.mpu is not None: + if self.mpu is None: global_rank = dist.get_rank() world_size = dist.get_world_size() mp_group_id = global_rank // self._config.hybrid_engine.inference_tp_size
resolves #3454 @cmikeh2 @RezaYazdaniAminabadi
https://api.github.com/repos/microsoft/DeepSpeed/pulls/3497
2023-05-09T17:48:49Z
2023-05-12T21:40:48Z
2023-05-12T21:40:48Z
2023-05-12T21:40:54Z
167
microsoft/DeepSpeed
10,291
Support cpu tensors without direct device invocation
diff --git a/deepspeed/runtime/engine.py b/deepspeed/runtime/engine.py index c03fba2d7784..3cbc4c8414b7 100644 --- a/deepspeed/runtime/engine.py +++ b/deepspeed/runtime/engine.py @@ -120,21 +120,25 @@ def split_half_float_double_sparse(tensors): device_type = get_accelerator().device_name() - supported_types = [ - "torch.{}.HalfTensor".format(device_type), "torch.{}.FloatTensor".format(device_type), - "torch.{}.DoubleTensor".format(device_type), "torch.{}.BFloat16Tensor".format(device_type), - SparseTensor.type() - ] + supported_types = get_accelerator().supported_dtypes() for t in tensors: - assert t.type() in supported_types, f"attempting to reduce an unsupported grad type: {t.type()}" + assert t.dtype in supported_types, f"attempting to reduce an unsupported grad type: {t.dtype}" - buckets = [] + sparse_tensor_buckets, dense_tensor_buckets = [], [] for i, dtype in enumerate(supported_types): - bucket = [t for t in tensors if t.type() == dtype] - if bucket: - buckets.append((dtype, bucket)) - return buckets + sparse_bucket, dense_bucket = [], [] + for t in tensors: + if t.dtype == dtype: + if isinstance(t, SparseTensor): + sparse_bucket.append(t) + else: + dense_bucket.append(t) + if sparse_bucket: + sparse_tensor_buckets.append((dtype, sparse_bucket)) + if dense_bucket: + dense_tensor_buckets.append((dtype, dense_bucket)) + return sparse_tensor_buckets, dense_tensor_buckets class EngineTimers(object): @@ -2396,30 +2400,37 @@ def _get_gradients_for_reduction(self): return non_expert_grads, expert_grads def _reduce_non_expert_gradients(self, grads, elements_per_buffer): - split_buckets = split_half_float_double_sparse(grads) - for _, bucket_tuple in enumerate(split_buckets): - bucket_type, bucket = bucket_tuple + split_sparse_tensor_buckets, split_dense_tensor_buckets = split_half_float_double_sparse(grads) + if self.pipeline_parallelism: + dp_group = self.mpu.get_data_parallel_group() + else: + dp_group = groups._get_sequence_data_parallel_group() - if self.pipeline_parallelism: - dp_group = self.mpu.get_data_parallel_group() - else: - dp_group = groups._get_sequence_data_parallel_group() + for _, sparse_bucket_tuple in enumerate(split_sparse_tensor_buckets): + if sparse_bucket_tuple: + bucket_type, sparse_bucket = sparse_bucket_tuple + self.sparse_allreduce_no_retain(sparse_bucket, dp_group=dp_group) - if bucket_type == SparseTensor.type(): - self.sparse_allreduce_no_retain(bucket, dp_group=dp_group) - else: - self.allreduce_no_retain(bucket, dp_group=dp_group, numel_per_bucket=elements_per_buffer) + for _, dense_bucket_tuple in enumerate(split_dense_tensor_buckets): + if dense_bucket_tuple: + bucket_type, dense_bucket = dense_bucket_tuple + self.allreduce_no_retain(dense_bucket, dp_group=dp_group, numel_per_bucket=elements_per_buffer) def _reduce_expert_gradients(self, expert_grads, elements_per_buffer): for ep_name, expert_grads_group in expert_grads.items(): - expert_split_buckets = split_half_float_double_sparse(expert_grads_group) - for i, bucket_tuple in enumerate(expert_split_buckets): - bucket_type, bucket = bucket_tuple - if bucket_type == SparseTensor.type(): - self.sparse_allreduce_no_retain(bucket, groups._get_expert_data_parallel_group(ep_name)) - else: + split_sparse_tensor_buckets, split_dense_tensor_buckets = split_half_float_double_sparse( + expert_grads_group) + + for _, sparse_bucket_tuple in enumerate(split_sparse_tensor_buckets): + if sparse_bucket_tuple: + bucket_type, sparse_bucket = sparse_bucket_tuple + self.sparse_allreduce_no_retain(sparse_bucket, groups._get_expert_data_parallel_group(ep_name)) + + for _, dense_bucket_tuple in enumerate(split_dense_tensor_buckets): + if dense_bucket_tuple: + bucket_type, dense_bucket = dense_bucket_tuple # Separate between diff groups - self.allreduce_no_retain(bucket, + self.allreduce_no_retain(dense_bucket, dp_group=groups._get_expert_data_parallel_group(ep_name), numel_per_bucket=elements_per_buffer) diff --git a/deepspeed/runtime/sparse_tensor.py b/deepspeed/runtime/sparse_tensor.py index f0bb5c75530e..291ba5f0c786 100644 --- a/deepspeed/runtime/sparse_tensor.py +++ b/deepspeed/runtime/sparse_tensor.py @@ -15,6 +15,7 @@ class SparseTensor(object): def __init__(self, dense_tensor=None): self.orig_dense_tensor = dense_tensor + self.dtype = self.orig_dense_tensor.dtype self.is_sparse = dense_tensor.is_sparse if dense_tensor is not None: if dense_tensor.is_sparse:
Motivation: Fix for reproducible issue #3837 on cpu. On cpus direct invocation of torch.cpu.tensor leads to dtype mismatch. Another way would be to have something like : ["torch.DoubleTensor" if device_type == 'cpu else '"torch.{}.DoubleTensor".format(device_type)] for all elements in the supported list , but that would eliminate "torch.cpu.DoubleTensor" ,etc from the scope. @jeffra requesting review. CLA is signed
https://api.github.com/repos/microsoft/DeepSpeed/pulls/3842
2023-06-29T04:42:12Z
2024-01-05T15:18:53Z
2024-01-05T15:18:52Z
2024-01-05T15:18:53Z
1,160
microsoft/DeepSpeed
10,847
Added Grokking Artificial Intelligence Algorithms
diff --git a/courses.md b/courses.md index 6082c798..970f2529 100644 --- a/courses.md +++ b/courses.md @@ -36,3 +36,4 @@ The following is a list of free or paid online courses on machine learning, stat * [Math and Architectures of Deep Learning](https://www.manning.com/books/math-and-architectures-of-deep-learning) - $ * [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition) - $ * [Transfer Learning for Natural Language Processing](https://www.manning.com/books/transfer-learning-for-natural-language-processing) - $ +* [Grokking Artificial Intelligence Algorithms](https://www.manning.com/books/grokking-artificial-intelligence-algorithms) - $
Hi, I am Branko from Manning Publications. I thought this book could be a good fit to the list. Thanks for considering it! Cheers
https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/712
2020-09-01T12:40:52Z
2020-09-01T15:59:29Z
2020-09-01T15:59:29Z
2020-09-01T15:59:29Z
181
josephmisiti/awesome-machine-learning
51,807
Improving performance for PDF loader
diff --git a/ingest.py b/ingest.py index 293013846..0ca80743f 100755 --- a/ingest.py +++ b/ingest.py @@ -9,7 +9,7 @@ from langchain.document_loaders import ( CSVLoader, EverNoteLoader, - PDFMinerLoader, + PyMuPDFLoader, TextLoader, UnstructuredEmailLoader, UnstructuredEPubLoader, @@ -73,7 +73,7 @@ def load(self) -> List[Document]: ".html": (UnstructuredHTMLLoader, {}), ".md": (UnstructuredMarkdownLoader, {}), ".odt": (UnstructuredODTLoader, {}), - ".pdf": (PDFMinerLoader, {}), + ".pdf": (PyMuPDFLoader, {}), ".ppt": (UnstructuredPowerPointLoader, {}), ".pptx": (UnstructuredPowerPointLoader, {}), ".txt": (TextLoader, {"encoding": "utf8"}), diff --git a/requirements.txt b/requirements.txt index 8c43e53bd..acfb584f9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ gpt4all==0.2.3 chromadb==0.3.23 llama-cpp-python==0.1.50 urllib3==2.0.2 -pdfminer.six==20221105 +PyMuPDF==1.22.3 python-dotenv==1.0.0 unstructured==0.6.6 extract-msg==0.41.1
The PyMuPDF brings much better performance for PDF loading. https://pymupdf.readthedocs.io/en/latest/about.html#performance Here is a comparison article: https://medium.com/social-impact-analytics/comparing-4-methods-for-pdf-text-extraction-in-python-fd34531034f It's about **5x-30x faster** for text extraction. And In my few test cases, PyMuPDF have better result in CJK characters, Although they both may display a lot of garbled characters. The different is PDFMinerLoader make a single document object but PyMuPDFLoader make each page as a document object. This PR already contains the change that I've made for this update: https://github.com/imartinez/privateGPT/pull/560/files * Remove read subscript [0] only for _loader.load()_ * Update _results.append(doc)_ to _results.extend(docs)_
https://api.github.com/repos/zylon-ai/private-gpt/pulls/660
2023-06-07T15:57:56Z
2023-06-11T17:10:09Z
2023-06-11T17:10:09Z
2023-06-11T17:10:09Z
360
zylon-ai/private-gpt
38,490
Backport PR #27777 on branch 0.25.x (Avoid calling S3File.s3)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 637ac5c9c8bd1..01141122dde0a 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -104,7 +104,7 @@ MultiIndex I/O ^^^ -- +- Avoid calling ``S3File.s3`` when reading parquet, as this was removed in s3fs version 0.3.0 (:issue:`27756`) - - diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 617f4f44ae8af..c7a8e597ffb41 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -184,12 +184,14 @@ def write( def read(self, path, columns=None, **kwargs): if is_s3_url(path): + from pandas.io.s3 import get_file_and_filesystem + # When path is s3:// an S3File is returned. # We need to retain the original path(str) while also # pass the S3File().open function to fsatparquet impl. - s3, _, _, should_close = get_filepath_or_buffer(path) + s3, filesystem = get_file_and_filesystem(path) try: - parquet_file = self.api.ParquetFile(path, open_with=s3.s3.open) + parquet_file = self.api.ParquetFile(path, open_with=filesystem.open) finally: s3.close() else: diff --git a/pandas/io/s3.py b/pandas/io/s3.py index 0a7c082fec51c..7e0a37e8cba20 100644 --- a/pandas/io/s3.py +++ b/pandas/io/s3.py @@ -1,8 +1,11 @@ """ s3 support for remote file interactivity """ +from typing import IO, Any, Optional, Tuple from urllib.parse import urlparse as parse_url from pandas.compat._optional import import_optional_dependency +from pandas._typing import FilePathOrBuffer + s3fs = import_optional_dependency( "s3fs", extra="The s3fs package is required to handle s3 files." ) @@ -14,9 +17,9 @@ def _strip_schema(url): return result.netloc + result.path -def get_filepath_or_buffer( - filepath_or_buffer, encoding=None, compression=None, mode=None -): +def get_file_and_filesystem( + filepath_or_buffer: FilePathOrBuffer, mode: Optional[str] = None +) -> Tuple[IO, Any]: from botocore.exceptions import NoCredentialsError if mode is None: @@ -24,7 +27,7 @@ def get_filepath_or_buffer( fs = s3fs.S3FileSystem(anon=False) try: - filepath_or_buffer = fs.open(_strip_schema(filepath_or_buffer), mode) + file = fs.open(_strip_schema(filepath_or_buffer), mode) except (FileNotFoundError, NoCredentialsError): # boto3 has troubles when trying to access a public file # when credentialed... @@ -33,5 +36,15 @@ def get_filepath_or_buffer( # A NoCredentialsError is raised if you don't have creds # for that bucket. fs = s3fs.S3FileSystem(anon=True) - filepath_or_buffer = fs.open(_strip_schema(filepath_or_buffer), mode) - return filepath_or_buffer, None, compression, True + file = fs.open(_strip_schema(filepath_or_buffer), mode) + return file, fs + + +def get_filepath_or_buffer( + filepath_or_buffer: FilePathOrBuffer, + encoding: Optional[str] = None, + compression: Optional[str] = None, + mode: Optional[str] = None, +) -> Tuple[IO, Optional[str], Optional[str], bool]: + file, _fs = get_file_and_filesystem(filepath_or_buffer, mode=mode) + return file, None, compression, True
Backport PR #27777: Avoid calling S3File.s3
https://api.github.com/repos/pandas-dev/pandas/pulls/27877
2019-08-12T19:11:06Z
2019-08-13T07:54:42Z
2019-08-13T07:54:42Z
2019-08-13T07:54:42Z
939
pandas-dev/pandas
44,779
add some badges to readme
diff --git a/README.md b/README.md index d612940ac3..a443a5d7a7 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,16 @@ <img width="auto" height="50px" src="https://github.com/LAION-AI/Open-Assistant/blob/main/assets/logo_crop.png"/> </h1> +<div align="center"> + +<a href="">![GitHub Repo stars](https://img.shields.io/github/stars/LAION-AI/Open-Assistant?style=social)</a> +<a href="">![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/LAION-AI/Open-Assistant/build-frontend.yaml?label=frontend)</a> +<a href="">![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/LAION-AI/Open-Assistant/pre-commit.yaml?label=pre-commit)</a> +<a href="">![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/LAION-AI/Open-Assistant/test-api-contract.yaml?label=api)</a> +<a href="">![GitHub release (latest by date)](https://img.shields.io/github/v/release/LAION-AI/Open-Assistant)</a> + +</div> + # Table of Contents - [What is Open Assistant?](#what-is-open-assistant)
https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/442
2023-01-06T11:38:36Z
2023-01-06T11:51:33Z
2023-01-06T11:51:33Z
2023-01-06T11:51:40Z
314
LAION-AI/Open-Assistant
37,334
Update Books.md
diff --git a/books.md b/books.md index c869b856..c3acb539 100644 --- a/books.md +++ b/books.md @@ -1,5 +1,5 @@ The following is a list of free, open source books on machine learning, statistics, data-mining, etc. - +## List is Aweosome ## Machine-Learning / Data Mining * [The Hundred-Page Machine Learning Book](http://themlbook.com/wiki/doku.php)
https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/590
2019-02-17T14:21:26Z
2019-02-24T00:06:52Z
2019-02-24T00:06:52Z
2019-02-24T00:06:57Z
106
josephmisiti/awesome-machine-learning
51,786
Deprecated dockerface
diff --git a/README.md b/README.md index 99813381..9983e334 100644 --- a/README.md +++ b/README.md @@ -985,7 +985,7 @@ be * [OpenFace](https://cmusatyalab.github.io/openface/) - Free and open source face recognition with deep neural networks. * [PCV](https://github.com/jesolem/PCV) - Open source Python module for computer vision. **[Deprecated]** * [face_recognition](https://github.com/ageitgey/face_recognition) - Face recognition library that recognizes and manipulates faces from Python or from the command line. -* [dockerface](https://github.com/natanielruiz/dockerface) - Easy to install and use deep learning Faster R-CNN face detection for images and video in a docker container. +* [dockerface](https://github.com/natanielruiz/dockerface) - Easy to install and use deep learning Faster R-CNN face detection for images and video in a docker container. **[Deprecated]** * [Detectron](https://github.com/facebookresearch/Detectron) - FAIR's software system that implements state-of-the-art object detection algorithms, including Mask R-CNN. It is written in Python and powered by the Caffe2 deep learning framework. **[Deprecated]** * [detectron2](https://github.com/facebookresearch/detectron2) - FAIR's next-generation research platform for object detection and segmentation. It is a ground-up rewrite of the previous version, Detectron, and is powered by the PyTorch deep learning framework. * [albumentations](https://github.com/albu/albumentations) - А fast and framework agnostic image augmentation library that implements a diverse set of augmentation techniques. Supports classification, segmentation, detection out of the box. Was used to win a number of Deep Learning competitions at Kaggle, Topcoder and those that were a part of the CVPR workshops.
No commits during 3 years. And looks like it is not so popular repository.
https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/819
2021-10-04T14:59:32Z
2021-10-04T16:37:29Z
2021-10-04T16:37:29Z
2021-10-04T16:37:29Z
425
josephmisiti/awesome-machine-learning
51,829
build(deps): bump ccxt from 4.2.5 to 4.2.7
diff --git a/requirements_with_versions.txt b/requirements_with_versions.txt index e0fff1199a..4bd804a1be 100644 --- a/requirements_with_versions.txt +++ b/requirements_with_versions.txt @@ -81,7 +81,7 @@ Unidecode==1.3.7 Ball==0.2.9 pynput==1.7.6 gTTS==2.5.0 -ccxt==4.2.5 +ccxt==4.2.7 fitz==0.0.1.dev2 fastapi==0.108.0 Django==5.0.1
Bumps [ccxt](https://github.com/ccxt/ccxt) from 4.2.5 to 4.2.7. <details> <summary>Commits</summary> <ul> <li><a href="https://github.com/ccxt/ccxt/commit/1ef3a8a91d7cf7ff9901ce03b52e82ca68659968"><code>1ef3a8a</code></a> 4.2.7</li> <li><a href="https://github.com/ccxt/ccxt/commit/fb7ac168b91f5b9ebfb8b4b67480b887a74c16d9"><code>fb7ac16</code></a> bitmex new endpoints (<a href="https://redirect.github.com/ccxt/ccxt/issues/20488">#20488</a>)</li> <li><a href="https://github.com/ccxt/ccxt/commit/19a19bb87ee0c58b071883d6f2e669e2ec9c2f13"><code>19a19bb</code></a> fix(py) - timeout (<a href="https://redirect.github.com/ccxt/ccxt/issues/20678">#20678</a>)</li> <li><a href="https://github.com/ccxt/ccxt/commit/7b5347c46427990d3f54df06064b6773d1559dff"><code>7b5347c</code></a> fix(php) - clone deep extend (<a href="https://redirect.github.com/ccxt/ccxt/issues/20676">#20676</a>)</li> <li><a href="https://github.com/ccxt/ccxt/commit/7d3ecb18f4f664e8e124bec7431879d3ec4ec837"><code>7d3ecb1</code></a> feat(phemex): update id (<a href="https://redirect.github.com/ccxt/ccxt/issues/20675">#20675</a>)</li> <li><a href="https://github.com/ccxt/ccxt/commit/6164ea157227ff139d69900cfef49edfcd21e1d2"><code>6164ea1</code></a> 4.2.6</li> <li><a href="https://github.com/ccxt/ccxt/commit/225bc94a45d24be528ea5b41194c5a216fa6c830"><code>225bc94</code></a> feat(exchange.close()): have exchange.close() [ci deploy]</li> <li><a href="https://github.com/ccxt/ccxt/commit/a7bbeef82f3bffc234cd6c2c70c0cd00214b0ba1"><code>a7bbeef</code></a> Woo: createOrder, fetchOrders, editOrder trailing support (<a href="https://redirect.github.com/ccxt/ccxt/issues/20656">#20656</a>)</li> <li><a href="https://github.com/ccxt/ccxt/commit/0f13e6cfe594874313a0e9f04fc23f7b4e9989f1"><code>0f13e6c</code></a> feat(bingx): add clientOrderId to cancelOrder (<a href="https://redirect.github.com/ccxt/ccxt/issues/20666">#20666</a>)</li> <li><a href="https://github.com/ccxt/ccxt/commit/bb45ed5cfd4a64040afa63f4f29b4dd2df0ce612"><code>bb45ed5</code></a> Bitmex: createOrder, editOrder, add trailing support (<a href="https://redirect.github.com/ccxt/ccxt/issues/20639">#20639</a>)</li> <li>Additional commits viewable in <a href="https://github.com/ccxt/ccxt/compare/4.2.5...4.2.7">compare view</a></li> </ul> </details> <br /> [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=ccxt&package-manager=pip&previous-version=4.2.5&new-version=4.2.7)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) --- <details> <summary>Dependabot commands and options</summary> <br /> You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) </details>
https://api.github.com/repos/geekcomputers/Python/pulls/2076
2024-01-05T18:47:28Z
2024-01-06T10:36:57Z
2024-01-06T10:36:57Z
2024-01-06T10:37:05Z
147
geekcomputers/Python
31,088
remove russia in www.chess.com
diff --git a/sherlock/resources/data.json b/sherlock/resources/data.json index fea689746..fb5332b21 100644 --- a/sherlock/resources/data.json +++ b/sherlock/resources/data.json @@ -300,8 +300,8 @@ "Chess": { "errorMsg": "Missing page... somebody made a wrong move.", "errorType": "message", - "url": "https://www.chess.com/ru/member/{}", - "urlMain": "https://www.chess.com/ru/", + "url": "https://www.chess.com/member/{}", + "urlMain": "https://www.chess.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" },
make it to english instead of russia
https://api.github.com/repos/sherlock-project/sherlock/pulls/731
2020-08-19T21:06:34Z
2020-08-20T04:17:57Z
2020-08-20T04:17:57Z
2020-08-20T04:17:58Z
181
sherlock-project/sherlock
36,597
Bump onvif-zeep-async to 1.2.3
diff --git a/homeassistant/components/onvif/manifest.json b/homeassistant/components/onvif/manifest.json index 4b998bdd6cd0ff..ef4497fa284e9b 100644 --- a/homeassistant/components/onvif/manifest.json +++ b/homeassistant/components/onvif/manifest.json @@ -7,5 +7,5 @@ "documentation": "https://www.home-assistant.io/integrations/onvif", "iot_class": "local_push", "loggers": ["onvif", "wsdiscovery", "zeep"], - "requirements": ["onvif-zeep-async==1.2.2", "WSDiscovery==2.0.0"] + "requirements": ["onvif-zeep-async==1.2.3", "WSDiscovery==2.0.0"] } diff --git a/requirements_all.txt b/requirements_all.txt index 4c5dec6d8c6a45..ca51ee1b7ff188 100644 --- a/requirements_all.txt +++ b/requirements_all.txt @@ -1260,7 +1260,7 @@ ondilo==0.2.0 onkyo-eiscp==1.2.7 # homeassistant.components.onvif -onvif-zeep-async==1.2.2 +onvif-zeep-async==1.2.3 # homeassistant.components.opengarage open-garage==0.2.0 diff --git a/requirements_test_all.txt b/requirements_test_all.txt index 5ab64c066cabce..6bfaf522c6c151 100644 --- a/requirements_test_all.txt +++ b/requirements_test_all.txt @@ -938,7 +938,7 @@ omnilogic==0.4.5 ondilo==0.2.0 # homeassistant.components.onvif -onvif-zeep-async==1.2.2 +onvif-zeep-async==1.2.3 # homeassistant.components.opengarage open-garage==0.2.0
~~needs https://github.com/hunterjm/python-onvif-zeep-async/pull/11 https://github.com/hunterjm/python-onvif-zeep-async/pull/12~~ Specifically not tagged for a patch release as the changes in https://github.com/home-assistant/core/pull/90216 should go out first. This should go into beta though. ## Proposed change <!-- Describe the big picture of your changes here to communicate to the maintainers why we should accept this pull request. If it fixes a bug or resolves a feature request, be sure to link to that issue in the additional information section. --> Bump onvif-zeep-async to 1.2.3 changelog: https://github.com/hunterjm/python-onvif-zeep-async/compare/84a38b710c04f98e88519991e1e2aa4fec6e01d1...4bf547c1865d86445c5be8d97aa1777cd4a593ae ## Type of change <!-- What type of change does your PR introduce to Home Assistant? NOTE: Please, check only 1! box! If your PR requires multiple boxes to be checked, you'll most likely need to split it into multiple PRs. This makes things easier and faster to code review. --> - [x] Dependency upgrade - [ ] Bugfix (non-breaking change which fixes an issue) - [ ] New integration (thank you!) - [ ] New feature (which adds functionality to an existing integration) - [ ] Deprecation (breaking change to happen in the future) - [ ] Breaking change (fix/feature causing existing functionality to break) - [ ] Code quality improvements to existing code or addition of tests ## Additional information <!-- Details are important, and help maintainers processing your PR. Please be sure to fill out additional details, if applicable. --> - This PR fixes or closes issue: fixes https://github.com/home-assistant/core/issues/83524 fixes https://github.com/home-assistant/core/issues/45513 fixes https://github.com/home-assistant/core/issues/85902 - This PR is related to issue: - Link to documentation pull request: This may fix the following but its hard to know for sure without confirmation: https://github.com/home-assistant/core/issues/90173 https://github.com/home-assistant/core/issues/37545 ## Checklist <!-- Put an `x` in the boxes that apply. You can also fill these out after creating the PR. If you're unsure about any of them, don't hesitate to ask. We're here to help! This is simply a reminder of what we are going to look for before merging your code. --> - [x] The code change is tested and works locally. - [ ] Local tests pass. **Your PR cannot be merged unless tests pass** - [ ] There is no commented out code in this PR. - [ ] I have followed the [development checklist][dev-checklist] - [ ] I have followed the [perfect PR recommendations][perfect-pr] - [ ] The code has been formatted using Black (`black --fast homeassistant tests`) - [ ] Tests have been added to verify that the new code works. If user exposed functionality or configuration variables are added/changed: - [ ] Documentation added/updated for [www.home-assistant.io][docs-repository] If the code communicates with devices, web services, or third-party tools: - [ ] The [manifest file][manifest-docs] has all fields filled out correctly. Updated and included derived files by running: `python3 -m script.hassfest`. - [ ] New or updated dependencies have been added to `requirements_all.txt`. Updated by running `python3 -m script.gen_requirements_all`. - [ ] For the updated dependencies - a link to the changelog, or at minimum a diff between library versions is added to the PR description. - [ ] Untested files have been added to `.coveragerc`. <!-- This project is very active and we have a high turnover of pull requests. Unfortunately, the number of incoming pull requests is higher than what our reviewers can review and merge so there is a long backlog of pull requests waiting for review. You can help here! By reviewing another pull request, you will help raise the code quality of that pull request and the final review will be faster. This way the general pace of pull request reviews will go up and your wait time will go down. When picking a pull request to review, try to choose one that hasn't yet been reviewed. Thanks for helping out! --> To help with the load of incoming pull requests: - [ ] I have reviewed two other [open pull requests][prs] in this repository. [prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-desc+review%3Anone+-status%3Afailure <!-- Thank you for contributing <3 Below, some useful links you could explore: --> [dev-checklist]: https://developers.home-assistant.io/docs/en/development_checklist.html [manifest-docs]: https://developers.home-assistant.io/docs/en/creating_integration_manifest.html [quality-scale]: https://developers.home-assistant.io/docs/en/next/integration_quality_scale_index.html [docs-repository]: https://github.com/home-assistant/home-assistant.io [perfect-pr]: https://developers.home-assistant.io/docs/review-process/#creating-the-perfect-pr
https://api.github.com/repos/home-assistant/core/pulls/90382
2023-03-27T20:57:57Z
2023-03-28T23:54:34Z
2023-03-28T23:54:34Z
2023-03-30T00:05:39Z
482
home-assistant/core
39,236
Adding Binary to Decimal Program
diff --git a/Binary_to_Decimal.py b/Binary_to_Decimal.py new file mode 100644 index 0000000000..345b401f51 --- /dev/null +++ b/Binary_to_Decimal.py @@ -0,0 +1,20 @@ +#Program to convert binary to decimal + +def binaryToDecimal(binary): + """ + >>> binaryToDecimal(111110000) + 496 + >>> binaryToDecimal(10100) + 20 + >>> binaryToDecimal(101011) + 43 + """ + decimal, i, n = 0, 0, 0 + while(binary != 0): + dec = binary % 10 + decimal = decimal + dec * pow(2, i) + binary = binary//10 + i += 1 + print(decimal) + +binaryToDecimal(100)
Program to convert binary to decimal
https://api.github.com/repos/geekcomputers/Python/pulls/1206
2020-10-26T09:54:47Z
2020-10-29T15:55:14Z
2020-10-29T15:55:13Z
2020-10-29T15:55:14Z
209
geekcomputers/Python
31,754
Update models.py
diff --git a/keras/models.py b/keras/models.py index f2653ad4ea6..2f48f1d209a 100644 --- a/keras/models.py +++ b/keras/models.py @@ -453,8 +453,8 @@ def compile(self, optimizer, loss, self._train = K.function(train_ins, [train_loss], updates=updates) self._train_with_acc = K.function(train_ins, [train_loss, train_accuracy], updates=updates) self._predict = K.function(predict_ins, [self.y_test], updates=self.state_updates) - self._test = K.function(test_ins, [test_loss]) - self._test_with_acc = K.function(test_ins, [test_loss, test_accuracy]) + self._test = K.function(test_ins, [test_loss], updates=self.state_updates) + self._test_with_acc = K.function(test_ins, [test_loss, test_accuracy], updates=self.state_updates) def fit(self, X, y, batch_size=128, nb_epoch=100, verbose=1, callbacks=[], validation_split=0., validation_data=None, shuffle=True, @@ -1049,7 +1049,7 @@ def compile(self, optimizer, loss): self.loss = loss self._train = K.function(train_ins, [train_loss], updates=updates) - self._test = K.function(test_ins, [test_loss]) + self._test = K.function(test_ins, [test_loss], updates=self.state_updates) self._predict = K.function(inputs=ins, outputs=ys_test, updates=self.state_updates)
I suggest model evaluation and test based on the _test functions to be also stateful (as _predict) for stateful recurrent networks.
https://api.github.com/repos/keras-team/keras/pulls/1411
2016-01-06T12:46:58Z
2016-01-06T18:55:16Z
2016-01-06T18:55:15Z
2016-01-06T18:55:16Z
355
keras-team/keras
47,642
Add intel gaudi to contributed HW in README
diff --git a/README.md b/README.md index 3ca332b80610..434a0dd53586 100755 --- a/README.md +++ b/README.md @@ -132,7 +132,7 @@ DeepSpeed has been integrated with several different popular open-source DL fram | NVIDIA | [![nv-torch110-p40](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-torch110-p40.yml/badge.svg?branch=master)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-torch110-p40.yml) [![nv-torch110-v100](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-torch110-v100.yml/badge.svg?branch=master)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-torch110-v100.yml) [![nv-torch-latest-v100](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-torch-latest-v100.yml/badge.svg?branch=master)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-torch-latest-v100.yml) [![nv-h100](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-h100.yml/badge.svg?branch=master)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-h100.yml) [![nv-inference](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-inference.yml/badge.svg?branch=master)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-inference.yml) [![nv-nightly](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-nightly.yml/badge.svg?branch=master)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-nightly.yml) | | AMD | [![amd-mi200](https://github.com/microsoft/DeepSpeed/actions/workflows/amd-mi200.yml/badge.svg?branch=master)](https://github.com/microsoft/DeepSpeed/actions/workflows/amd-mi200.yml) | | CPU | [![torch-latest-cpu](https://github.com/microsoft/DeepSpeed/actions/workflows/cpu-torch-latest.yml/badge.svg?branch=master)](https://github.com/microsoft/DeepSpeed/actions/workflows/cpu-torch-latest.yml) [![cpu-inference](https://github.com/microsoft/DeepSpeed/actions/workflows/cpu-inference.yml/badge.svg?branch=master)](https://github.com/microsoft/DeepSpeed/actions/workflows/cpu-inference.yml) | -| Habana | [![hpu-gaudi2](https://github.com/microsoft/DeepSpeed/actions/workflows/hpu-gaudi2.yml/badge.svg?branch=master)](https://github.com/microsoft/DeepSpeed/actions/workflows/hpu-gaudi2.yml) | +| Intel Gaudi | [![hpu-gaudi2](https://github.com/microsoft/DeepSpeed/actions/workflows/hpu-gaudi2.yml/badge.svg?branch=master)](https://github.com/microsoft/DeepSpeed/actions/workflows/hpu-gaudi2.yml) | | PyTorch Nightly | [![nv-torch-nightly-v100](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-torch-nightly-v100.yml/badge.svg?branch=master)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-torch-nightly-v100.yml) | | Integrations | [![nv-transformers-v100](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-transformers-v100.yml/badge.svg?branch=master)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-transformers-v100.yml) [![nv-lightning-v100](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-lightning-v100.yml/badge.svg?branch=master)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-lightning-v100.yml) [![nv-accelerate-v100](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-accelerate-v100.yml/badge.svg?branch=master)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-accelerate-v100.yml) [![nv-mii](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-mii.yml/badge.svg?branch=master)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-mii.yml) [![nv-ds-chat](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-ds-chat.yml/badge.svg?branch=master)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-ds-chat.yml) [![nv-sd](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-sd.yml/badge.svg)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-sd.yml) | | Misc | [![Formatting](https://github.com/microsoft/DeepSpeed/actions/workflows/formatting.yml/badge.svg?branch=master)](https://github.com/microsoft/DeepSpeed/actions/workflows/formatting.yml) [![pages-build-deployment](https://github.com/microsoft/DeepSpeed/actions/workflows/pages/pages-build-deployment/badge.svg)](https://github.com/microsoft/DeepSpeed/actions/workflows/pages/pages-build-deployment) [![Documentation Status](https://readthedocs.org/projects/deepspeed/badge/?version=latest)](https://deepspeed.readthedocs.io/en/latest/?badge=latest)[![python](https://github.com/microsoft/DeepSpeed/actions/workflows/python.yml/badge.svg?branch=master)](https://github.com/microsoft/DeepSpeed/actions/workflows/python.yml) | @@ -160,6 +160,7 @@ dynamically link them at runtime. | Contributor | Hardware | Accelerator Name | Contributor validated | Upstream validated | | ----------- | -------- | ---------------- | --------------------- | ------------------ | +| Intel | Intel(R) Gaudi(R) 2 AI accelerator | hpu | Yes | Yes | | Intel | Intel(R) Xeon(R) Processors | cpu | Yes | Yes | | Intel | Intel(R) Data Center GPU Max series | xpu | Yes | No |
https://api.github.com/repos/microsoft/DeepSpeed/pulls/5300
2024-03-20T08:30:48Z
2024-03-20T16:02:29Z
2024-03-20T16:02:29Z
2024-03-20T16:02:29Z
1,363
microsoft/DeepSpeed
10,133
Update Styler documentation for escaping HTML
diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb index 2d4b0f6a7545e..f831723f44931 100644 --- a/doc/source/user_guide/style.ipynb +++ b/doc/source/user_guide/style.ipynb @@ -1621,7 +1621,9 @@ "source": [ "### HTML Escaping\n", "\n", - "Suppose you have to display HTML within HTML, that can be a bit of pain when the renderer can't distinguish. You can use the `escape` formatting option to handle this, and even use it within a formatter that contains HTML itself." + "Suppose you have to display HTML within HTML, that can be a bit of pain when the renderer can't distinguish. You can use the `escape` formatting option to handle this, and even use it within a formatter that contains HTML itself.\n", + "\n", + "Note that if you're using `Styler` on untrusted, user-provided input to serve HTML then you should escape the input to prevent security vulnerabilities. See the Jinja2 documentation for more." ] }, { diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 31e025ace4b03..697a850e7f1d8 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -178,6 +178,7 @@ class Styler(StylerRenderer): escape : str, optional Use 'html' to replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` in cell display string with HTML-safe sequences. + Use 'latex' to replace the characters ``&``, ``%``, ``$``, ``#``, ``_``, ``{``, ``}``, ``~``, ``^``, and ``\`` in the cell display string with LaTeX-safe sequences. Use 'latex-math' to replace the characters @@ -209,6 +210,13 @@ class Styler(StylerRenderer): Notes ----- + .. warning:: + + ``Styler`` is primarily intended for use on safe input that you control. + When using ``Styler`` on untrusted, user-provided input to serve HTML, + you should set ``escape="html"`` to prevent security vulnerabilities. + See the Jinja2 documentation on escaping HTML for more. + Most styling will be done by passing style functions into ``Styler.apply`` or ``Styler.map``. Style functions should return values with strings containing CSS ``'attr: value'`` that will
Adds a note to the docs about `Styler` and `escape` on untrusted HTML input.
https://api.github.com/repos/pandas-dev/pandas/pulls/57365
2024-02-11T16:38:59Z
2024-02-21T17:59:55Z
2024-02-21T17:59:55Z
2024-02-21T18:00:02Z
600
pandas-dev/pandas
45,562
Bump actions/checkout from 3 to 4
diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index b3e1f0b902..a1804597d7 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Grep CHANGES.md for PR number if: contains(github.event.pull_request.labels.*.name, 'skip news') != true diff --git a/.github/workflows/diff_shades.yml b/.github/workflows/diff_shades.yml index d685ef9456..637bd527ea 100644 --- a/.github/workflows/diff_shades.yml +++ b/.github/workflows/diff_shades.yml @@ -19,7 +19,7 @@ jobs: matrix: ${{ steps.set-config.outputs.matrix }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: python-version: "*" @@ -52,7 +52,7 @@ jobs: steps: - name: Checkout this repository (full clone) - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # The baseline revision could be rather old so a full clone is ideal. fetch-depth: 0 diff --git a/.github/workflows/diff_shades_comment.yml b/.github/workflows/diff_shades_comment.yml index 22c293f91d..b86bd93410 100644 --- a/.github/workflows/diff_shades_comment.yml +++ b/.github/workflows/diff_shades_comment.yml @@ -12,7 +12,7 @@ jobs: comment: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: python-version: "*" diff --git a/.github/workflows/doc.yml b/.github/workflows/doc.yml index fc94dea62d..fa3d87c70f 100644 --- a/.github/workflows/doc.yml +++ b/.github/workflows/doc.yml @@ -21,7 +21,7 @@ jobs: runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up latest Python uses: actions/setup-python@v4 diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 8baace940b..566fc88078 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v2 diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml index 4439148a1c..1b5a50c0e0 100644 --- a/.github/workflows/fuzz.yml +++ b/.github/workflows/fuzz.yml @@ -25,7 +25,7 @@ jobs: python-version: ["3.8", "3.9", "3.10", "3.11"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 064d4745a5..3eaf5785f5 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Assert PR target is main if: github.event_name == 'pull_request' && github.repository == 'psf/black' diff --git a/.github/workflows/pypi_upload.yml b/.github/workflows/pypi_upload.yml index ab2c6402c2..bf4d8349c9 100644 --- a/.github/workflows/pypi_upload.yml +++ b/.github/workflows/pypi_upload.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up latest Python uses: actions/setup-python@v4 @@ -57,7 +57,7 @@ jobs: macos_arch: "universal2" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Build wheels via cibuildwheel uses: pypa/cibuildwheel@v2.15.0 @@ -85,7 +85,7 @@ jobs: steps: - name: Checkout stable branch - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: stable fetch-depth: 0 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7daa31ee90..1f33f2b814 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -35,7 +35,7 @@ jobs: os: [ubuntu-latest, macOS-latest, windows-latest] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 @@ -75,7 +75,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Send finished signal to Coveralls uses: AndreMiras/coveralls-python-action@8799c9f4443ac4201d2e2f2c725d577174683b99 with: @@ -93,7 +93,7 @@ jobs: os: [ubuntu-latest, macOS-latest] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up latest Python uses: actions/setup-python@v4 diff --git a/.github/workflows/upload_binary.yml b/.github/workflows/upload_binary.yml index 22535a64c6..bb19d48158 100644 --- a/.github/workflows/upload_binary.yml +++ b/.github/workflows/upload_binary.yml @@ -29,7 +29,7 @@ jobs: executable_mime: "application/x-mach-binary" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up latest Python uses: actions/setup-python@v4
Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4. <details> <summary>Release notes</summary> <p><em>Sourced from <a href="https://github.com/actions/checkout/releases">actions/checkout's releases</a>.</em></p> <blockquote> <h2>v4.0.0</h2> <h2>What's Changed</h2> <ul> <li>Update default runtime to node20 by <a href="https://github.com/takost"><code>@​takost</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1436">actions/checkout#1436</a></li> <li>Support fetching without the --progress option by <a href="https://github.com/simonbaird"><code>@​simonbaird</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1067">actions/checkout#1067</a></li> <li>Release 4.0.0 by <a href="https://github.com/takost"><code>@​takost</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1447">actions/checkout#1447</a></li> </ul> <h2>New Contributors</h2> <ul> <li><a href="https://github.com/takost"><code>@​takost</code></a> made their first contribution in <a href="https://redirect.github.com/actions/checkout/pull/1436">actions/checkout#1436</a></li> <li><a href="https://github.com/simonbaird"><code>@​simonbaird</code></a> made their first contribution in <a href="https://redirect.github.com/actions/checkout/pull/1067">actions/checkout#1067</a></li> </ul> <p><strong>Full Changelog</strong>: <a href="https://github.com/actions/checkout/compare/v3...v4.0.0">https://github.com/actions/checkout/compare/v3...v4.0.0</a></p> <h2>v3.6.0</h2> <h2>What's Changed</h2> <ul> <li>Mark test scripts with Bash'isms to be run via Bash by <a href="https://github.com/dscho"><code>@​dscho</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1377">actions/checkout#1377</a></li> <li>Add option to fetch tags even if fetch-depth &gt; 0 by <a href="https://github.com/RobertWieczoreck"><code>@​RobertWieczoreck</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/579">actions/checkout#579</a></li> <li>Release 3.6.0 by <a href="https://github.com/luketomlinson"><code>@​luketomlinson</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1437">actions/checkout#1437</a></li> </ul> <h2>New Contributors</h2> <ul> <li><a href="https://github.com/RobertWieczoreck"><code>@​RobertWieczoreck</code></a> made their first contribution in <a href="https://redirect.github.com/actions/checkout/pull/579">actions/checkout#579</a></li> <li><a href="https://github.com/luketomlinson"><code>@​luketomlinson</code></a> made their first contribution in <a href="https://redirect.github.com/actions/checkout/pull/1437">actions/checkout#1437</a></li> </ul> <p><strong>Full Changelog</strong>: <a href="https://github.com/actions/checkout/compare/v3.5.3...v3.6.0">https://github.com/actions/checkout/compare/v3.5.3...v3.6.0</a></p> <h2>v3.5.3</h2> <h2>What's Changed</h2> <ul> <li>Fix: Checkout Issue in self hosted runner due to faulty submodule check-ins by <a href="https://github.com/megamanics"><code>@​megamanics</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1196">actions/checkout#1196</a></li> <li>Fix typos found by codespell by <a href="https://github.com/DimitriPapadopoulos"><code>@​DimitriPapadopoulos</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1287">actions/checkout#1287</a></li> <li>Add support for sparse checkouts by <a href="https://github.com/dscho"><code>@​dscho</code></a> and <a href="https://github.com/dfdez"><code>@​dfdez</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1369">actions/checkout#1369</a></li> <li>Release v3.5.3 by <a href="https://github.com/TingluoHuang"><code>@​TingluoHuang</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1376">actions/checkout#1376</a></li> </ul> <h2>New Contributors</h2> <ul> <li><a href="https://github.com/megamanics"><code>@​megamanics</code></a> made their first contribution in <a href="https://redirect.github.com/actions/checkout/pull/1196">actions/checkout#1196</a></li> <li><a href="https://github.com/DimitriPapadopoulos"><code>@​DimitriPapadopoulos</code></a> made their first contribution in <a href="https://redirect.github.com/actions/checkout/pull/1287">actions/checkout#1287</a></li> <li><a href="https://github.com/dfdez"><code>@​dfdez</code></a> made their first contribution in <a href="https://redirect.github.com/actions/checkout/pull/1369">actions/checkout#1369</a></li> </ul> <p><strong>Full Changelog</strong>: <a href="https://github.com/actions/checkout/compare/v3...v3.5.3">https://github.com/actions/checkout/compare/v3...v3.5.3</a></p> <h2>v3.5.2</h2> <h2>What's Changed</h2> <ul> <li>Fix: Use correct API url / endpoint in GHES by <a href="https://github.com/fhammerl"><code>@​fhammerl</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1289">actions/checkout#1289</a> based on <a href="https://redirect.github.com/actions/checkout/issues/1286">#1286</a> by <a href="https://github.com/1newsr"><code>@​1newsr</code></a></li> </ul> <p><strong>Full Changelog</strong>: <a href="https://github.com/actions/checkout/compare/v3.5.1...v3.5.2">https://github.com/actions/checkout/compare/v3.5.1...v3.5.2</a></p> <h2>v3.5.1</h2> <h2>What's Changed</h2> <ul> <li>Improve checkout performance on Windows runners by upgrading <code>@​actions/github</code> dependency by <a href="https://github.com/BrettDong"><code>@​BrettDong</code></a> in <a href="https://redirect.github.com/actions/checkout/pull/1246">actions/checkout#1246</a></li> </ul> <h2>New Contributors</h2> <ul> <li><a href="https://github.com/BrettDong"><code>@​BrettDong</code></a> made their first contribution in <a href="https://redirect.github.com/actions/checkout/pull/1246">actions/checkout#1246</a></li> </ul> <!-- raw HTML omitted --> </blockquote> <p>... (truncated)</p> </details> <details> <summary>Changelog</summary> <p><em>Sourced from <a href="https://github.com/actions/checkout/blob/main/CHANGELOG.md">actions/checkout's changelog</a>.</em></p> <blockquote> <h1>Changelog</h1> <h2>v4.0.0</h2> <ul> <li><a href="https://redirect.github.com/actions/checkout/pull/1067">Support fetching without the --progress option</a></li> <li><a href="https://redirect.github.com/actions/checkout/pull/1436">Update to node20</a></li> </ul> <h2>v3.6.0</h2> <ul> <li><a href="https://redirect.github.com/actions/checkout/pull/1377">Fix: Mark test scripts with Bash'isms to be run via Bash</a></li> <li><a href="https://redirect.github.com/actions/checkout/pull/579">Add option to fetch tags even if fetch-depth &gt; 0</a></li> </ul> <h2>v3.5.3</h2> <ul> <li><a href="https://redirect.github.com/actions/checkout/pull/1196">Fix: Checkout fail in self-hosted runners when faulty submodule are checked-in</a></li> <li><a href="https://redirect.github.com/actions/checkout/pull/1287">Fix typos found by codespell</a></li> <li><a href="https://redirect.github.com/actions/checkout/pull/1369">Add support for sparse checkouts</a></li> </ul> <h2>v3.5.2</h2> <ul> <li><a href="https://redirect.github.com/actions/checkout/pull/1289">Fix api endpoint for GHES</a></li> </ul> <h2>v3.5.1</h2> <ul> <li><a href="https://redirect.github.com/actions/checkout/pull/1246">Fix slow checkout on Windows</a></li> </ul> <h2>v3.5.0</h2> <ul> <li><a href="https://redirect.github.com/actions/checkout/pull/1237">Add new public key for known_hosts</a></li> </ul> <h2>v3.4.0</h2> <ul> <li><a href="https://redirect.github.com/actions/checkout/pull/1209">Upgrade codeql actions to v2</a></li> <li><a href="https://redirect.github.com/actions/checkout/pull/1210">Upgrade dependencies</a></li> <li><a href="https://redirect.github.com/actions/checkout/pull/1225">Upgrade <code>@​actions/io</code></a></li> </ul> <h2>v3.3.0</h2> <ul> <li><a href="https://redirect.github.com/actions/checkout/pull/1045">Implement branch list using callbacks from exec function</a></li> <li><a href="https://redirect.github.com/actions/checkout/pull/1050">Add in explicit reference to private checkout options</a></li> <li>[Fix comment typos (that got added in <a href="https://redirect.github.com/actions/checkout/issues/770">#770</a>)](<a href="https://redirect.github.com/actions/checkout/pull/1057">actions/checkout#1057</a>)</li> </ul> <h2>v3.2.0</h2> <ul> <li><a href="https://redirect.github.com/actions/checkout/pull/942">Add GitHub Action to perform release</a></li> <li><a href="https://redirect.github.com/actions/checkout/pull/967">Fix status badge</a></li> <li><a href="https://redirect.github.com/actions/checkout/pull/1002">Replace datadog/squid with ubuntu/squid Docker image</a></li> <li><a href="https://redirect.github.com/actions/checkout/pull/964">Wrap pipeline commands for submoduleForeach in quotes</a></li> <li><a href="https://redirect.github.com/actions/checkout/pull/1029">Update <code>@​actions/io</code> to 1.1.2</a></li> <li><a href="https://redirect.github.com/actions/checkout/pull/1039">Upgrading version to 3.2.0</a></li> </ul> <h2>v3.1.0</h2> <ul> <li><a href="https://redirect.github.com/actions/checkout/pull/939">Use <code>@​actions/core</code> <code>saveState</code> and <code>getState</code></a></li> <li><a href="https://redirect.github.com/actions/checkout/pull/922">Add <code>github-server-url</code> input</a></li> </ul> <h2>v3.0.2</h2> <ul> <li><a href="https://redirect.github.com/actions/checkout/pull/770">Add input <code>set-safe-directory</code></a></li> </ul> <h2>v3.0.1</h2> <!-- raw HTML omitted --> </blockquote> <p>... (truncated)</p> </details> <details> <summary>Commits</summary> <ul> <li><a href="https://github.com/actions/checkout/commit/3df4ab11eba7bda6032a0b82a6bb43b11571feac"><code>3df4ab1</code></a> Release 4.0.0 (<a href="https://redirect.github.com/actions/checkout/issues/1447">#1447</a>)</li> <li><a href="https://github.com/actions/checkout/commit/8b5e8b768746b50394015010d25e690bfab9dfbc"><code>8b5e8b7</code></a> Support fetching without the --progress option (<a href="https://redirect.github.com/actions/checkout/issues/1067">#1067</a>)</li> <li><a href="https://github.com/actions/checkout/commit/97a652b80035363df47baee5031ec8670b8878ac"><code>97a652b</code></a> Update default runtime to node20 (<a href="https://redirect.github.com/actions/checkout/issues/1436">#1436</a>)</li> <li>See full diff in <a href="https://github.com/actions/checkout/compare/v3...v4">compare view</a></li> </ul> </details> <br /> [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/checkout&package-manager=github_actions&previous-version=3&new-version=4)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) --- <details> <summary>Dependabot commands and options</summary> <br /> You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) </details>
https://api.github.com/repos/psf/black/pulls/3883
2023-09-11T06:56:46Z
2023-09-11T16:34:36Z
2023-09-11T16:34:36Z
2023-09-11T16:34:37Z
1,726
psf/black
24,413
Fixed typo in docs/topics/http/urls.txt.
diff --git a/docs/topics/http/urls.txt b/docs/topics/http/urls.txt index 4644ddea2b37e..4ee7a4e9ba713 100644 --- a/docs/topics/http/urls.txt +++ b/docs/topics/http/urls.txt @@ -736,7 +736,7 @@ the fully qualified name into parts and then tries the following lookup: setting the current application on the :attr:`request.current_app <django.http.HttpRequest.current_app>` attribute. -3. If there is no current application. Django looks for a default +3. If there is no current application, Django looks for a default application instance. The default application instance is the instance that has an :term:`instance namespace` matching the :term:`application namespace` (in this example, an instance of ``polls`` called ``'polls'``).
https://api.github.com/repos/django/django/pulls/10224
2018-07-24T21:24:54Z
2018-07-24T22:35:59Z
2018-07-24T22:35:59Z
2018-07-24T22:35:59Z
194
django/django
50,761
Denormalization layer
diff --git a/keras/layers/preprocessing/normalization.py b/keras/layers/preprocessing/normalization.py index be052a59ec5..ef2252d7328 100644 --- a/keras/layers/preprocessing/normalization.py +++ b/keras/layers/preprocessing/normalization.py @@ -61,6 +61,7 @@ class Normalization(base_preprocessing_layer.PreprocessingLayer): value(s) will be broadcast to the shape of the kept axes above; if the value(s) cannot be broadcast, an error will be raised when this layer's `build()` method is called. + invert: If True, this layer will return the denormalized values of inputs. Default to False. Examples: @@ -96,9 +97,32 @@ class Normalization(base_preprocessing_layer.PreprocessingLayer): array([[-1.4142135 ], [-0.70710677], [ 0. ]], dtype=float32)> + + Using it in the invert manner for denormalizing the inputs with calculating a mean and variance for each index on the last axis. + + >>> adapt_data = np.array([[0., 7., 4.], + ... [2., 9., 6.], + ... [0., 7., 4.], + ... [2., 9., 6.]], dtype='float32') + >>> input_data = np.array([[1., 2., 3.]], dtype='float32') + >>> layer = tf.keras.layers.Normalization(axis=-1, invert=True) + >>> layer.adapt(adapt_data) + >>> layer(input_data) + <tf.Tensor: shape=(1, 3), dtype=float32, numpy= + array([2., 10., 8.], dtype=float32)> + + Using it in the invert manner for denormalizing the inputs with passing the mean and variance directly. + + >>> input_data = np.array([[-1.4142135], [-0.70710677], [0.]], dtype='float32') + >>> layer = tf.keras.layers.Normalization(mean=3., variance=2., invert=True) + >>> layer(input_data) + <tf.Tensor: shape=(3, 1), dtype=float32, numpy= + array([[1. ], + [2. ], + [3. ]], dtype=float32)> """ - def __init__(self, axis=-1, mean=None, variance=None, **kwargs): + def __init__(self, axis=-1, mean=None, variance=None, invert=False, **kwargs): super().__init__(**kwargs) base_preprocessing_layer.keras_kpl_gauge.get_cell('Normalization').set(True) @@ -124,6 +148,7 @@ def __init__(self, axis=-1, mean=None, variance=None, **kwargs): 'must be set. Got mean: {} and variance: {}'.format(mean, variance)) self.input_mean = mean self.input_variance = variance + self.invert = invert def build(self, input_shape): super().build(input_shape) @@ -302,7 +327,11 @@ def call(self, inputs): # The base layer automatically casts floating-point inputs, but we # explicitly cast here to also allow integer inputs to be passed inputs = tf.cast(inputs, self.compute_dtype) - return ((inputs - self.mean) / + if self.invert: + return ((inputs + self.mean) * + tf.maximum(tf.sqrt(self.variance), backend.epsilon())) + else: + return ((inputs - self.mean) / tf.maximum(tf.sqrt(self.variance), backend.epsilon())) def compute_output_shape(self, input_shape): diff --git a/keras/layers/preprocessing/normalization_test.py b/keras/layers/preprocessing/normalization_test.py index 4edf789089b..79cf334d3c8 100644 --- a/keras/layers/preprocessing/normalization_test.py +++ b/keras/layers/preprocessing/normalization_test.py @@ -198,6 +198,24 @@ def test_output_dtype(self): output = layer(input_data) self.assertAllEqual(output.dtype, tf.float64) + def test_invert(self): + data = np.array([0., 2., 0., 2.]) + norm = normalization.Normalization(mean=1.0, variance=1.0) + inv_norm = normalization.Normalization(mean=1.0, variance=1.0, invert=True) + output = norm(data) + output2 = inv_norm(output) + self.assertListEqual(output2.shape.as_list(), [4]) + self.assertAllClose(output2, [0., 2., 0., 2.]) + + def test_invert_adapt(self): + input_data = [[0.], [2.], [0.], [2.]] + norm = keras.layers.Normalization(axis=-1) + norm.adapt(input_data) + inv_norm = keras.layers.Normalization(axis=-1, invert=True) + inv_norm.adapt(input_data) + output = norm(input_data) + output2 = inv_norm(output) + self.assertAllClose(input_data, output2) @test_combinations.run_all_keras_modes(always_skip_v1=True) class NormalizationAdaptTest(test_combinations.TestCase,
@gadagashwini Solved #16284
https://api.github.com/repos/keras-team/keras/pulls/16350
2022-04-02T11:06:21Z
2022-04-08T19:06:45Z
2022-04-08T19:06:45Z
2022-04-09T08:13:27Z
1,201
keras-team/keras
47,710
[doc] add ColossalChat
diff --git a/README.md b/README.md index 3098d72b4591..77c3471d9d25 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ <li> <a href="#Colossal-AI-in-the-Real-World">Colossal-AI for Real World Applications</a> <ul> - <li><a href="#ChatGPT">ChatGPT: Low-cost ChatGPT Equivalent Implementation Process</a></li> + <li><a href="#ColossalChat">ColossalChat: An Open-Source Solution for Cloning ChatGPT With a Complete RLHF Pipeline</a></li> <li><a href="#AIGC">AIGC: Acceleration of Stable Diffusion</a></li> <li><a href="#Biomedicine">Biomedicine: Acceleration of AlphaFold Protein Structure</a></li> </ul> @@ -214,22 +214,31 @@ Please visit our [documentation](https://www.colossalai.org/) and [examples](htt <p align="right">(<a href="#top">back to top</a>)</p> ## Colossal-AI in the Real World -### ChatGPT -A low-cost [ChatGPT](https://openai.com/blog/chatgpt/) equivalent implementation process. [[code]](https://github.com/hpcaitech/ColossalAI/tree/main/applications/ChatGPT) [[blog]](https://www.hpc-ai.tech/blog/colossal-ai-chatgpt) -<p id="ChatGPT_scaling" align="center"> + +### ColossalChat + +<div align="center"> + <a href="https://chat.colossalai.org/"> + <img src="https://raw.githubusercontent.com/hpcaitech/public_assets/main/colossalai/img/Chat-demo.png" width="700" /> + </a> +</div> + +[ColossalChat](https://github.com/hpcaitech/ColossalAI/tree/main/applications/Chat): An open-source solution for cloning [ChatGPT](https://openai.com/blog/chatgpt/) with a complete RLHF pipeline. [[code]](https://github.com/hpcaitech/ColossalAI/tree/main/applications/Chat) [[blog]](https://www.hpc-ai.tech/blog/colossal-ai-chatgpt) [[demo]](https://chat.colossalai.org) + +<p id="ColossalChat_scaling" align="center"> <img src="https://raw.githubusercontent.com/hpcaitech/public_assets/main/applications/chatgpt/ChatGPT%20scaling.png" width=800/> </p> - Up to 7.73 times faster for single server training and 1.42 times faster for single-GPU inference -<p id="ChatGPT-1GPU" align="center"> +<p id="ColossalChat-1GPU" align="center"> <img src="https://raw.githubusercontent.com/hpcaitech/public_assets/main/applications/chatgpt/ChatGPT-1GPU.jpg" width=450/> </p> - Up to 10.3x growth in model capacity on one GPU - A mini demo training process requires only 1.62GB of GPU memory (any consumer-grade GPU) -<p id="inference" align="center"> +<p id="ColossalChat-LoRA" align="center"> <img src="https://raw.githubusercontent.com/hpcaitech/public_assets/main/applications/chatgpt/LoRA%20data.jpg" width=600/> </p> diff --git a/docs/README-zh-Hans.md b/docs/README-zh-Hans.md index 81c45abfd833..4be923eca024 100644 --- a/docs/README-zh-Hans.md +++ b/docs/README-zh-Hans.md @@ -66,7 +66,7 @@ <li> <a href="#Colossal-AI-in-the-Real-World">Colossal-AI 成功案例</a> <ul> - <li><a href="#ChatGPT">ChatGPT: 低成本复现ChatGPT完整流程</a></li> + <li><a href="#ColossalChat">ColossalChat:完整RLHF流程0门槛克隆ChatGPT</a></li> <li><a href="#AIGC">AIGC: 加速 Stable Diffusion</a></li> <li><a href="#生物医药">生物医药: 加速AlphaFold蛋白质结构预测</a></li> </ul> @@ -212,22 +212,30 @@ Colossal-AI 为您提供了一系列并行组件。我们的目标是让您的 <p align="right">(<a href="#top">返回顶端</a>)</p> ## Colossal-AI 成功案例 -### ChatGPT -低成本复现[ChatGPT](https://openai.com/blog/chatgpt/)完整流程 [[代码]](https://github.com/hpcaitech/ColossalAI/tree/main/applications/ChatGPT) [[博客]](https://www.hpc-ai.tech/blog/colossal-ai-chatgpt) -<p id="ChatGPT_scaling" align="center"> +### ColossalChat + +<div align="center"> + <a href="https://chat.colossalai.org/"> + <img src="https://raw.githubusercontent.com/hpcaitech/public_assets/main/colossalai/img/Chat-demo.png" width="700" /> + </a> +</div> + +[ColossalChat](https://github.com/hpcaitech/ColossalAI/tree/main/applications/Chat): 完整RLHF流程0门槛克隆 [ChatGPT](https://openai.com/blog/chatgpt/) [[代码]](https://github.com/hpcaitech/ColossalAI/tree/main/applications/Chat) [[博客]](https://www.hpc-ai.tech/blog/colossal-ai-chatgpt) [[在线样例]](https://chat.colossalai.org) + +<p id="ColossalChat_scaling" align="center"> <img src="https://raw.githubusercontent.com/hpcaitech/public_assets/main/applications/chatgpt/ChatGPT%20scaling.png" width=800/> </p> - 最高可提升单机训练速度7.73倍,单卡推理速度1.42倍 -<p id="ChatGPT-1GPU" align="center"> +<p id="ColossalChat-1GPU" align="center"> <img src="https://raw.githubusercontent.com/hpcaitech/public_assets/main/applications/chatgpt/ChatGPT-1GPU.jpg" width=450/> </p> - 单卡模型容量最多提升10.3倍 - 最小demo训练流程最低仅需1.62GB显存 (任意消费级GPU) -<p id="inference" align="center"> +<p id="ColossalChat-LoRA" align="center"> <img src="https://raw.githubusercontent.com/hpcaitech/public_assets/main/applications/chatgpt/LoRA%20data.jpg" width=600/> </p>
## 📌 Checklist before creating the PR - [ ] I have created an issue for this PR for traceability - [ ] The title follows the standard format: `[doc/gemini/tensor/...]: A concise description` - [ ] I have added relevant tags if possible for us to better distinguish different PRs ## 🚨 Issue number > Link this PR to your issue with words like fixed to automatically close the linked issue upon merge > > e.g. `fixed #1234`, `closed #1234`, `resolved #1234` ## 📝 What does this PR do? > Summarize your work here. > if you have any plots/diagrams/screenshots/tables, please attach them here. ## 💥 Checklist before requesting a review - [ ] I have linked my PR to an issue ([instruction](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue)) - [ ] My issue clearly describes the problem/feature/proposal, with diagrams/charts/table/code if possible - [ ] I have performed a self-review of my code - [ ] I have added thorough tests. - [ ] I have added docstrings for all the functions/methods I implemented ## ⭐️ Do you enjoy contributing to Colossal-AI? - [ ] 🌝 Yes, I do. - [ ] 🌚 No, I don't. Tell us more if you don't enjoy contributing to Colossal-AI.
https://api.github.com/repos/hpcaitech/ColossalAI/pulls/3297
2023-03-28T18:28:53Z
2023-03-28T18:35:10Z
2023-03-28T18:35:10Z
2023-03-28T18:35:15Z
1,621
hpcaitech/ColossalAI
11,707
Add Hong Kong Observatory API
diff --git a/README.md b/README.md index fe6625dc6d..7ec9ae2763 100644 --- a/README.md +++ b/README.md @@ -1124,6 +1124,7 @@ API | Description | Auth | HTTPS | CORS | | [AviationWeather](https://www.aviationweather.gov/dataserver) | NOAA aviation weather forecasts and observations | No | Yes | Unknown | | [ColorfulClouds](https://open.caiyunapp.com/ColorfulClouds_Weather_API) | Weather | `apiKey` | Yes | Yes | | [Foreca](https://developer.foreca.com) | Weather | `OAuth` | Yes | Unknown | +| [Hong Kong Obervatory](https://www.hko.gov.hk/en/abouthko/opendata_intro.htm) | Provide weather information, earthquake information, and climate data | No | Yes | Unknown | | [MetaWeather](https://www.metaweather.com/api/) | Weather | No | Yes | No | | [Meteorologisk Institutt](https://api.met.no/weatherapi/documentation) | Weather and climate data | `User-Agent` | Yes | Unknown | | [NOAA Climate Data](https://www.ncdc.noaa.gov/cdo-web/) | Weather and climate data | `apiKey` | Yes | Unknown |
<!-- Thank you for taking the time to work on a Pull Request for this project! --> <!-- To ensure your PR is dealt with swiftly please check the following: --> - [x] My submission is formatted according to the guidelines in the [contributing guide](/CONTRIBUTING.md) - [x] My addition is ordered alphabetically - [x] My submission has a useful description - [x] The description does not end with punctuation - [x] Each table column is padded with one space on either side - [x] I have searched the repository for any relevant issues or pull requests - [x] Any category I am creating has the minimum requirement of 3 items - [x] All changes have been [squashed][squash-link] into a single commit [squash-link]: <https://github.com/todotxt/todo.txt-android/wiki/Squash-All-Commits-Related-to-a-Single-Issue-into-a-Single-Commit> Notes: Since there is no available API documentation webpage, I attached the open data website where the documentation is at the bottom of this site. Thank you.
https://api.github.com/repos/public-apis/public-apis/pulls/1993
2021-08-26T08:53:49Z
2021-09-01T05:53:54Z
2021-09-01T05:53:54Z
2021-09-02T13:09:14Z
288
public-apis/public-apis
35,550
docs: add certbot-dns-azure third-party plugin
diff --git a/certbot/docs/using.rst b/certbot/docs/using.rst index e38725ca373..cc061b62232 100644 --- a/certbot/docs/using.rst +++ b/certbot/docs/using.rst @@ -284,6 +284,7 @@ dns-ispconfig_ Y N DNS Authentication using ISPConfig as DNS server dns-clouddns_ Y N DNS Authentication using CloudDNS API dns-lightsail_ Y N DNS Authentication using Amazon Lightsail DNS API dns-inwx_ Y Y DNS Authentication for INWX through the XML API +dns-azure_ Y N DNS Authentication using Azure DNS ================== ==== ==== =============================================================== .. _haproxy: https://github.com/greenhost/certbot-haproxy @@ -298,6 +299,7 @@ dns-inwx_ Y Y DNS Authentication for INWX through the XML API .. _dns-clouddns: https://github.com/vshosting/certbot-dns-clouddns .. _dns-lightsail: https://github.com/noi/certbot-dns-lightsail .. _dns-inwx: https://github.com/oGGy990/certbot-dns-inwx/ +.. _dns-azure: https://github.com/binkhq/certbot-dns-azure If you're interested, you can also :ref:`write your own plugin <dev-plugin>`.
https://github.com/certbot/certbot/pull/8727#issuecomment-815607549
https://api.github.com/repos/certbot/certbot/pulls/8796
2021-04-20T22:09:05Z
2021-04-22T19:38:18Z
2021-04-22T19:38:18Z
2021-04-22T19:38:19Z
331
certbot/certbot
1,794
gh-111881: Import doctest lazily in libregrtest
diff --git a/Lib/test/libregrtest/single.py b/Lib/test/libregrtest/single.py index ad75ef54a8c3f8..5c7bc7d40fb394 100644 --- a/Lib/test/libregrtest/single.py +++ b/Lib/test/libregrtest/single.py @@ -1,4 +1,3 @@ -import doctest import faulthandler import gc import importlib @@ -99,14 +98,18 @@ def regrtest_runner(result: TestResult, test_func, runtests: RunTests) -> None: stats = test_result case unittest.TestResult(): stats = TestStats.from_unittest(test_result) - case doctest.TestResults(): - stats = TestStats.from_doctest(test_result) case None: print_warning(f"{result.test_name} test runner returned None: {test_func}") stats = None case _: - print_warning(f"Unknown test result type: {type(test_result)}") - stats = None + # Don't import doctest at top level since only few tests return + # a doctest.TestResult instance. + import doctest + if isinstance(test_result, doctest.TestResults): + stats = TestStats.from_doctest(test_result) + else: + print_warning(f"Unknown test result type: {type(test_result)}") + stats = None result.stats = stats
In most cases, doctest is not needed. So don't always import it at startup. <!-- Thanks for your contribution! Please read this comment in its entirety. It's quite important. # Pull Request title It should be in the following format: ``` gh-NNNNN: Summary of the changes made ``` Where: gh-NNNNN refers to the GitHub issue number. Most PRs will require an issue number. Trivial changes, like fixing a typo, do not need an issue. # Backport Pull Request title If this is a backport PR (PR made against branches other than `main`), please ensure that the PR title is in the following format: ``` [X.Y] <title from the original PR> (GH-NNNN) ``` Where: [X.Y] is the branch name, e.g. [3.6]. GH-NNNN refers to the PR number from `main`. --> <!-- gh-issue-number: gh-111881 --> * Issue: gh-111881 <!-- /gh-issue-number -->
https://api.github.com/repos/python/cpython/pulls/111884
2023-11-09T13:57:37Z
2023-11-09T15:00:10Z
2023-11-09T15:00:10Z
2023-11-09T15:00:33Z
320
python/cpython
4,164
Add google Fonts
diff --git a/README.md b/README.md index ea63e30dc4..5028a69bea 100644 --- a/README.md +++ b/README.md @@ -448,6 +448,7 @@ API | Description | Auth | HTTPS | CORS | | [Gitter](https://developer.gitter.im/docs/welcome) | Chat for Developers | `OAuth` | Yes | Unknown | | [Glitterly](https://developers.glitterly.app) | Image generation API | `apiKey` | Yes | Yes | | [Google Docs](https://developers.google.com/docs/api/reference/rest) | API to read, write, and format Google Docs documents | `OAuth` | Yes | Unknown | +| [Google Fonts](https://developers.google.com/fonts/docs/developer_api) | Metadata for all families served by Google Fonts | `apiKey` | Yes | Unknown | | [Google Keep](https://developers.google.com/keep/api/reference/rest) | API to read, write, and format Google Keep notes | `OAuth` | Yes | Unknown | | [Google Sheets](https://developers.google.com/sheets/api/reference/rest) | API to read, write, and format Google Sheets data | `OAuth` | Yes | Unknown | | [Google Slides](https://developers.google.com/slides/api/reference/rest) | API to read, write, and format Google Slides presentations | `OAuth` | Yes | Unknown |
<!-- Thank you for taking the time to work on a Pull Request for this project! --> <!-- To ensure your PR is dealt with swiftly please check the following: --> - [x] My submission is formatted according to the guidelines in the [contributing guide](/CONTRIBUTING.md) - [x] My addition is ordered alphabetically - [x] My submission has a useful description - [x] The description does not have more than 100 characters - [x] The description does not end with punctuation - [x] Each table column is padded with one space on either side - [x] I have searched the repository for any relevant issues or pull requests - [x] Any category I am creating has the minimum requirement of 3 items - [x] All changes have been [squashed][squash-link] into a single commit [squash-link]: <https://github.com/todotxt/todo.txt-android/wiki/Squash-All-Commits-Related-to-a-Single-Issue-into-a-Single-Commit>
https://api.github.com/repos/public-apis/public-apis/pulls/2576
2021-10-16T13:29:59Z
2021-10-25T21:14:41Z
2021-10-25T21:14:41Z
2021-10-25T21:14:41Z
301
public-apis/public-apis
35,735
Add link to Black Playground
diff --git a/README.md b/README.md index 61cb88ae62..781890d69a 100644 --- a/README.md +++ b/README.md @@ -26,6 +26,8 @@ content instead. *Black* makes code review faster by producing the smallest diffs possible. +Try it out now using the [Black Playground](https://black.now.sh). + --- *Contents:* **[Installation and usage](#installation-and-usage)** | @@ -792,7 +794,7 @@ Using the badge in README.rst: ``` .. image:: https://img.shields.io/badge/code%20style-black-000000.svg :target: https://github.com/ambv/black -``` +``` Looks like this: [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/ambv/black) diff --git a/docs/index.rst b/docs/index.rst index da60f7b272..408683757f 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -14,6 +14,8 @@ possible. Blackened code looks the same regardless of the project you're reading. Formatting becomes transparent after a while and you can focus on the content instead. +Try it out now using the `Black Playground <https://black.now.sh>`_. + .. note:: `Black is beta <installation_and_usage.html#note-this-is-a-beta-product>`_.
Open to changing copy or placement.
https://api.github.com/repos/psf/black/pulls/437
2018-08-07T14:27:37Z
2018-08-17T15:36:58Z
2018-08-17T15:36:58Z
2018-08-17T15:37:01Z
339
psf/black
24,130
remove unnecessary space
diff --git a/docs/templates/plugin.rst.j2 b/docs/templates/plugin.rst.j2 index 1c63669f2db318..9514047aa5a2f5 100644 --- a/docs/templates/plugin.rst.j2 +++ b/docs/templates/plugin.rst.j2 @@ -162,7 +162,7 @@ Parameters {% if 'ini' in value %} <div> ini entries: {% for ini in value.ini %} - <p>[@{ ini.section }@ ]<br>@{ ini.key }@ = @{ value.default | default('VALUE') }@</p> + <p>[@{ ini.section }@]<br>@{ ini.key }@ = @{ value.default | default('VALUE') }@</p> {% endfor %} </div> {% endif %}
##### SUMMARY <!--- Describe the change below, including rationale and design decisions --> I saw a unnecessary space on each entries like `[paramiko_connection ]` in the docs <!--- HINT: Include "Fixes #nnn" if you are fixing an existing issue --> ##### ISSUE TYPE <!--- Pick one below and delete the rest --> - Docs Pull Request ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below --> ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ``` ##### ADDITIONAL INFORMATION <!--- Include additional information to help people understand the change here --> <!--- A step-by-step reproduction of the problem is helpful if there is no related issue --> kindly advised from jborean93 at docs channel <!--- Paste verbatim command output below, e.g. before and after your change --> ```paste below ```
https://api.github.com/repos/ansible/ansible/pulls/46462
2018-10-04T00:33:34Z
2018-10-04T00:47:32Z
2018-10-04T00:47:32Z
2019-07-22T16:50:33Z
181
ansible/ansible
49,062
Fix chart studio link in `st.plotly_chart` docstring
diff --git a/lib/streamlit/elements/plotly_chart.py b/lib/streamlit/elements/plotly_chart.py index b6056484ba50..b7efbbdfd71b 100644 --- a/lib/streamlit/elements/plotly_chart.py +++ b/lib/streamlit/elements/plotly_chart.py @@ -95,7 +95,7 @@ def plotly_chart( Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app using plotly's offline mode (default). Use any other sharing mode to send the chart to Plotly chart studio, which - requires an account. See https://plotly.com/chart-studio/ for more information. + requires an account. See https://plot.ly/python/chart-studio/ for more information. **kwargs Any argument accepted by Plotly's `plot()` function.
## 📚 Context The `st.plotly_chart` docstring contains a [dead link](https://plotly.com/chart-studio/) to Plotly Chart Studio. - What kind of change does this PR introduce? - [x] Other, please describe: Doc improvement request ## 🧠 Description of Changes - Replaces the dead link in the plotly chart docstring with https://plot.ly/python/chart-studio/ - [x] This is a visible (user-facing) change **Revised:** ![image](https://user-images.githubusercontent.com/20672874/180374786-7640f7fc-0e71-4822-8817-29be2388fe2f.png) **Current:** ![image](https://user-images.githubusercontent.com/20672874/180374815-8dd17d62-fd71-44eb-9ee5-bd57b59c8e07.png) ## 🧪 Testing Done - [x] Screenshots included --- **Contribution License Agreement** By submitting this pull request you agree that all contributions to this project are made under the Apache 2.0 license.
https://api.github.com/repos/streamlit/streamlit/pulls/5017
2022-07-22T06:13:19Z
2022-07-22T14:30:28Z
2022-07-22T14:30:28Z
2023-05-26T23:34:03Z
194
streamlit/streamlit
21,848
Add gym-derk to environments
diff --git a/docs/environments.md b/docs/environments.md index 7aaeeed66d2..d647435643f 100644 --- a/docs/environments.md +++ b/docs/environments.md @@ -304,6 +304,12 @@ A simple environment using [PyBullet](http://github.com/bulletphysics/bullet3) t Learn more here: https://github.com/JacopoPan/gym-pybullet-drones +### gym-derk: GPU accelerated MOBA environment + +This is a 3v3 MOBA environment where you train creatures to figth each other. It runs entirely on the GPU so you can easily have hundreds of instances running in parallel. There are around 15 items for the creatures, 60 "senses", 5 actions, and ~23 tweakable rewards. It's also possible to benchmark an agent against other agents online. It's available for free for training for personal use, and otherwise costs money; see licensing details on the website. + +More here: https://gym.derkgame.com + ### gym-abalone: A two-player abstract strategy board game An implementation of the board game Abalone.
Hi, I'm the author of [Derk's Gym](https://gym.derkgame.com), which I implemented to be as compatible as possible with OpenAI gym. It's a multi-agent environment though, and as far as I can tell there's no clear consensus on how to handle that. I tried to model the API after the multiagent-particles-env. Would also love to get feedback on the environment itself!
https://api.github.com/repos/openai/gym/pulls/2044
2020-09-05T06:54:22Z
2020-09-25T23:14:29Z
2020-09-25T23:14:29Z
2020-09-26T10:16:26Z
252
openai/gym
5,221
Fix #1856
diff --git a/src/you_get/extractors/youku.py b/src/you_get/extractors/youku.py index ff23e70652..c3ad67eb0e 100644 --- a/src/you_get/extractors/youku.py +++ b/src/you_get/extractors/youku.py @@ -53,18 +53,14 @@ def trans_e(a, c): return result - def generate_ep(self, no, streamfileids, sid, token): - number = hex(int(str(no), 10))[2:].upper() - if len(number) == 1: - number = '0' + number - fileid = streamfileids[0:8] + number + streamfileids[10:] + def generate_ep(self, fileid, sid, token): ep = parse.quote(base64.b64encode( ''.join(self.__class__.trans_e( self.f_code_2, #use the 86 fcode if using 86 sid + '_' + fileid + '_' + token)).encode('latin1')), safe='~()*!.\'' ) - return fileid, ep + return ep # Obsolete -- used to parse m3u8 on pl.youku.com def parse_m3u8(m3u8): @@ -228,14 +224,12 @@ def prepare(self, **kwargs): 'video_profile': stream_types[stream_id]['video_profile'], 'size': stream['size'], 'pieces': [{ - 'fileid': stream['stream_fileid'], 'segs': stream['segs'] }] } else: self.streams[stream_id]['size'] += stream['size'] self.streams[stream_id]['pieces'].append({ - 'fileid': stream['stream_fileid'], 'segs': stream['segs'] }) @@ -252,14 +246,12 @@ def prepare(self, **kwargs): 'video_profile': stream_types[stream_id]['video_profile'], 'size': stream['size'], 'pieces': [{ - 'fileid': stream['stream_fileid'], 'segs': stream['segs'] }] } else: self.streams_fallback[stream_id]['size'] += stream['size'] self.streams_fallback[stream_id]['pieces'].append({ - 'fileid': stream['stream_fileid'], 'segs': stream['segs'] }) @@ -294,17 +286,17 @@ def extract(self, **kwargs): pieces = self.streams[stream_id]['pieces'] for piece in pieces: segs = piece['segs'] - streamfileid = piece['fileid'] seg_count = len(segs) for no in range(0, seg_count): k = segs[no]['key'] + fileid = segs[no]['fileid'] if k == -1: # we hit the paywall; stop here log.w('Skipping %d out of %d segments due to paywall' % (seg_count - no, seg_count)) break - fileid, ep = self.__class__.generate_ep(self, no, streamfileid, - sid, token) + ep = self.__class__.generate_ep(self, fileid, + sid, token) q = parse.urlencode(dict( ctype = self.ctype, ev = 1,
It seems that the Youku's issue is that they put their fileids into corresponding segs, so I edited `generate_ep`. It needs more test before merge.
https://api.github.com/repos/soimort/you-get/pulls/1857
2017-04-15T16:15:20Z
2017-04-16T01:08:24Z
2017-04-16T01:08:24Z
2017-04-16T01:08:24Z
768
soimort/you-get
20,944
[docstring] misc arg doc corrections
diff --git a/src/transformers/configuration_bart.py b/src/transformers/configuration_bart.py index 3a28dd9e3726a..f36f877a29eb7 100644 --- a/src/transformers/configuration_bart.py +++ b/src/transformers/configuration_bart.py @@ -65,7 +65,7 @@ Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (:obj:`float`, optional, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - add_bias_logits (:obj:`int`, optional, defaults to False): + add_bias_logits (:obj:`bool`, optional, defaults to False): True for marian only. normalize_before (:obj:`bool`, optional, defaults to False): Call layernorm before attention ops. True for pegasus, mbart. False for bart. FIXME: marian? @@ -89,10 +89,10 @@ Google "layerdrop arxiv", as its not explainable in one line. extra_pos_embeddings: (:obj:`int`, optional, defaults to 2): How many extra learned positional embeddings to use. Should be pad_token_id+1 for bart. - num_labels: (:obj:`int`, optional, defaults to 2): + num_labels: (:obj:`int`, optional, defaults to 3): for SequenceClassification - is_encoder_decoder (:obj:`int`, optional, defaults to True): - True + is_encoder_decoder (:obj:`bool`, optional, defaults to True): + Whether this is an encoder/decoder model force_bos_token_to_be_generated (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to force BOS token to be generated at step 1 (after ``decoder_start_token_id``), only true for `bart-large-cnn`.
- fix docstring s/int/bool/ - correct arg description - fix num_labels to match reality
https://api.github.com/repos/huggingface/transformers/pulls/6932
2020-09-03T22:07:34Z
2020-09-04T14:09:43Z
2020-09-04T14:09:43Z
2020-09-04T20:33:03Z
424
huggingface/transformers
12,067
simplify URLFile
diff --git a/tools/lib/url_file.py b/tools/lib/url_file.py index be9c815c939014..77ed06322649c0 100644 --- a/tools/lib/url_file.py +++ b/tools/lib/url_file.py @@ -1,12 +1,10 @@ import logging import os +import socket import time -import threading from hashlib import sha256 -from urllib3 import PoolManager +from urllib3 import PoolManager, Retry from urllib3.util import Timeout -from tenacity import retry, wait_random_exponential, stop_after_attempt -from typing import Optional from openpilot.common.file_helpers import atomic_write_in_dir from openpilot.system.hardware.hw import Paths @@ -25,14 +23,23 @@ class URLFileException(Exception): pass +def new_pool_manager() -> PoolManager: + socket_options = [(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),] + retries = Retry(total=5, backoff_factor=0.5, status_forcelist=[409, 429, 503, 504]) + return PoolManager(num_pools=10, maxsize=100, socket_options=socket_options, retries=retries) + + +def set_pool_manager(): + URLFile._pool_manager = new_pool_manager() +os.register_at_fork(after_in_child=set_pool_manager) + + class URLFile: - _pid: Optional[int] = None - _pool_manager: Optional[PoolManager] = None - _pool_manager_lock = threading.Lock() + _pool_manager = new_pool_manager() - def __init__(self, url, debug=False, cache=None): - self._pool_manager = None + def __init__(self, url, timeout=10, debug=False, cache=None): self._url = url + self._timeout = Timeout(connect=timeout, read=timeout) self._pos = 0 self._length = None self._local_file = None @@ -54,20 +61,11 @@ def __exit__(self, exc_type, exc_value, traceback): self._local_file.close() self._local_file = None - def _http_client(self) -> PoolManager: - if self._pool_manager is None: - pid = os.getpid() - with URLFile._pool_manager_lock: - if URLFile._pid != pid or URLFile._pool_manager is None: # unsafe to share after fork - URLFile._pid = pid - URLFile._pool_manager = PoolManager(num_pools=10, maxsize=10) - self._pool_manager = URLFile._pool_manager - return self._pool_manager - - @retry(wait=wait_random_exponential(multiplier=1, max=5), stop=stop_after_attempt(3), reraise=True) + def _request(self, method, url, headers=None): + return URLFile._pool_manager.request(method, url, timeout=self._timeout, headers=headers) + def get_length_online(self): - timeout = Timeout(connect=50.0, read=500.0) - response = self._http_client().request('HEAD', self._url, timeout=timeout, preload_content=False) + response = self._request('HEAD', self._url) if not (200 <= response.status <= 299): return -1 length = response.headers.get('content-length', 0) @@ -122,10 +120,9 @@ def read(self, ll=None): self._pos = file_end return response - @retry(wait=wait_random_exponential(multiplier=1, max=5), stop=stop_after_attempt(3), reraise=True) def read_aux(self, ll=None): download_range = False - headers = {'Connection': 'keep-alive'} + headers = {} if self._pos != 0 or ll is not None: if ll is None: end = self.get_length() - 1 @@ -139,8 +136,7 @@ def read_aux(self, ll=None): if self._debug: t1 = time.time() - timeout = Timeout(connect=50.0, read=500.0) - response = self._http_client().request('GET', self._url, timeout=timeout, preload_content=False, headers=headers) + response = self._request('GET', self._url, headers=headers) ret = response.data if self._debug:
* turn on keep alive in the recommended way * use built-in urllib3 retry functionality * `os.register_at_fork()` is a simpler way to replace the connection pool after forking
https://api.github.com/repos/commaai/openpilot/pulls/31365
2024-02-08T03:41:30Z
2024-02-08T18:24:45Z
2024-02-08T18:24:45Z
2024-02-08T18:24:46Z
986
commaai/openpilot
9,696
Removed old import aliases.
diff --git a/django/core/servers/basehttp.py b/django/core/servers/basehttp.py index e4b010a4b228a..e90946fa161d3 100644 --- a/django/core/servers/basehttp.py +++ b/django/core/servers/basehttp.py @@ -12,7 +12,6 @@ import socket import sys from wsgiref import simple_server -from wsgiref.util import FileWrapper # NOQA: for backwards compatibility from django.core.exceptions import ImproperlyConfigured from django.core.handlers.wsgi import ISO_8859_1, UTF_8 diff --git a/django/forms/fields.py b/django/forms/fields.py index 4f0ba230529b2..59490a318a3b1 100644 --- a/django/forms/fields.py +++ b/django/forms/fields.py @@ -33,10 +33,6 @@ from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit from django.utils.translation import ugettext_lazy as _, ungettext_lazy -# Provide this import for backwards compatibility. -from django.core.validators import EMPTY_VALUES # NOQA - - __all__ = ( 'Field', 'CharField', 'IntegerField', 'DateField', 'TimeField', 'DateTimeField', 'DurationField', diff --git a/django/test/testcases.py b/django/test/testcases.py index 2896631d476da..519514599823d 100644 --- a/django/test/testcases.py +++ b/django/test/testcases.py @@ -14,7 +14,6 @@ import threading import unittest import warnings -from unittest import skipIf # NOQA: Imported here for backward compatibility from unittest.util import safe_repr from django.apps import apps diff --git a/django/utils/log.py b/django/utils/log.py index fc350e0b5efc5..af72bb0447099 100644 --- a/django/utils/log.py +++ b/django/utils/log.py @@ -12,10 +12,6 @@ from django.utils.module_loading import import_string from django.views.debug import ExceptionReporter, get_exception_reporter_filter -# Imports kept for backwards-compatibility in Django 1.7. -from logging import NullHandler # NOQA -from logging.config import dictConfig # NOQA - getLogger = logging.getLogger # Default logging for Django. This sends an email to the site admins on every @@ -80,7 +76,7 @@ def configure_logging(logging_config, logging_settings): # First find the logging configuration function ... logging_config_func = import_string(logging_config) - dictConfig(DEFAULT_LOGGING) + logging.config.dictConfig(DEFAULT_LOGGING) # ... then invoke it with the logging settings if logging_settings: diff --git a/tests/admin_views/admin.py b/tests/admin_views/admin.py index c9f0dbc2e396e..dd49a0c0da148 100644 --- a/tests/admin_views/admin.py +++ b/tests/admin_views/admin.py @@ -3,6 +3,7 @@ import tempfile import os +from wsgiref.util import FileWrapper from django import forms from django.contrib import admin @@ -10,7 +11,6 @@ from django.core.exceptions import ValidationError from django.core.files.storage import FileSystemStorage from django.core.mail import EmailMessage -from django.core.servers.basehttp import FileWrapper from django.conf.urls import url from django.forms.models import BaseModelFormSet from django.http import HttpResponse, StreamingHttpResponse
https://api.github.com/repos/django/django/pulls/4067
2015-02-05T23:43:33Z
2015-02-06T00:54:23Z
2015-02-06T00:54:26Z
2015-02-06T00:54:26Z
779
django/django
50,887
meta: Add deprecation warning for js globals in custom plugins
diff --git a/CHANGES b/CHANGES index 58ec87bfa3b73..7460167a26b77 100644 --- a/CHANGES +++ b/CHANGES @@ -1,3 +1,10 @@ +Unreleased +---------- + +### Deprecation Warning + +There will be a future change to where the frontend bundle will be loaded asynchronously. This will be a breaking change that can affect custom plugins that access certain globals in the django template. Please see https://forum.sentry.io/t/breaking-frontend-changes-for-custom-plugins/14184 for more information. + 21.5.1 ------
This adds a breaking change notice to our changelog regarding custom plugins: The frontend bundle will be loaded asynchronously. This is a breaking change that can affect custom plugins that access certain globals in the django template. Please see https://forum.sentry.io/t/breaking-frontend-changes-for-custom-plugins/14184 for more information
https://api.github.com/repos/getsentry/sentry/pulls/26425
2021-06-07T17:24:38Z
2021-06-08T15:07:27Z
2021-06-08T15:07:27Z
2021-06-24T00:01:18Z
146
getsentry/sentry
44,176
SpiderMW doc typo: SWP request, response
diff --git a/docs/topics/spider-middleware.rst b/docs/topics/spider-middleware.rst index 05ad930a9f1..fc7cf734a19 100644 --- a/docs/topics/spider-middleware.rst +++ b/docs/topics/spider-middleware.rst @@ -5,8 +5,8 @@ Spider Middleware ================= The spider middleware is a framework of hooks into Scrapy's spider processing -mechanism where you can plug custom functionality to process the requests that -are sent to :ref:`topics-spiders` for processing and to process the responses +mechanism where you can plug custom functionality to process the responses that +are sent to :ref:`topics-spiders` for processing and to process the requests and items that are generated from spiders. .. _topics-spider-middleware-setting:
See word-diff: https://github.com/Digenis/scrapy/commit/show/4335420f40cbd4326ea939df755d98bf0f6fdd09/17a393a#diff-17a393aa4b57b33782da9b32c3422e73
https://api.github.com/repos/scrapy/scrapy/pulls/627
2014-03-06T14:18:10Z
2014-03-07T07:41:39Z
2014-03-07T07:41:39Z
2014-06-23T22:42:11Z
185
scrapy/scrapy
34,801
🌐 Fix typo in Chinese translation for `docs/zh/docs/benchmarks.md`
diff --git a/docs/zh/docs/benchmarks.md b/docs/zh/docs/benchmarks.md index 8991c72cd9b4b..71e8d483822ac 100644 --- a/docs/zh/docs/benchmarks.md +++ b/docs/zh/docs/benchmarks.md @@ -1,6 +1,6 @@ # 基准测试 -第三方机构 TechEmpower 的基准测试表明在 Uvicorn 下运行的 **FastAPI** 应用程序是 <a href="https://www.techempower.com/benchmarks/#section=test&runid=7464e520-0dc2-473d-bd34-dbdfd7e85911&hw=ph&test=query&l=zijzen-7" class="external-link" target="_blank">可用的最快的 Python 框架之一</a>,仅次与 Starlette 和 Uvicorn 本身 (由 FastAPI 内部使用)。(*) +第三方机构 TechEmpower 的基准测试表明在 Uvicorn 下运行的 **FastAPI** 应用程序是 <a href="https://www.techempower.com/benchmarks/#section=test&runid=7464e520-0dc2-473d-bd34-dbdfd7e85911&hw=ph&test=query&l=zijzen-7" class="external-link" target="_blank">可用的最快的 Python 框架之一</a>,仅次于 Starlette 和 Uvicorn 本身 (由 FastAPI 内部使用)。(*) 但是在查看基准得分和对比时,请注意以下几点。 diff --git a/docs/zh/docs/index.md b/docs/zh/docs/index.md index 7901e9c2ccd5c..4db3ef10c44f2 100644 --- a/docs/zh/docs/index.md +++ b/docs/zh/docs/index.md @@ -28,7 +28,7 @@ FastAPI 是一个用于构建 API 的现代、快速(高性能)的 web 框 关键特性: -* **快速**:可与 **NodeJS** 和 **Go** 比肩的极高性能(归功于 Starlette 和 Pydantic)。[最快的 Python web 框架之一](#_11)。 +* **快速**:可与 **NodeJS** 和 **Go** 并肩的极高性能(归功于 Starlette 和 Pydantic)。[最快的 Python web 框架之一](#_11)。 * **高效编码**:提高功能开发速度约 200% 至 300%。* * **更少 bug**:减少约 40% 的人为(开发者)导致错误。*
fix typo in `docs/zh/docs/benchmarks.md` line 3 `仅次与` to `仅次于` and; fix typo in `docs/zh/docs/index.md` line31 `比肩` to `并肩`
https://api.github.com/repos/tiangolo/fastapi/pulls/4269
2021-12-11T04:04:40Z
2023-01-07T14:33:29Z
2023-01-07T14:33:29Z
2023-01-07T14:33:30Z
600
tiangolo/fastapi
22,647
Fixed #30361 -- Increased the default timeout of watchman client to 5 seconds and made it customizable.
diff --git a/AUTHORS b/AUTHORS index 62268ff103edf..a27fb7b6d2d60 100644 --- a/AUTHORS +++ b/AUTHORS @@ -364,6 +364,7 @@ answer newbie questions, and generally made Django that much better: Jaap Roes <jaap.roes@gmail.com> Jack Moffitt <https://metajack.im/> Jacob Burch <jacobburch@gmail.com> + Jacob Green Jacob Kaplan-Moss <jacob@jacobian.org> Jakub Paczkowski <jakub@paczkowski.eu> Jakub Wilk <jwilk@jwilk.net> diff --git a/django/utils/autoreload.py b/django/utils/autoreload.py index 427d979a7f464..4a68fb05d0395 100644 --- a/django/utils/autoreload.py +++ b/django/utils/autoreload.py @@ -366,11 +366,12 @@ class WatchmanReloader(BaseReloader): def __init__(self): self.roots = defaultdict(set) self.processed_request = threading.Event() + self.client_timeout = int(os.environ.get('DJANGO_WATCHMAN_TIMEOUT', 5)) super().__init__() @cached_property def client(self): - return pywatchman.client() + return pywatchman.client(timeout=self.client_timeout) def _watch_root(self, root): # In practice this shouldn't occur, however, it's possible that a @@ -528,7 +529,7 @@ def check_server_status(self, inner_ex=None): def check_availability(cls): if not pywatchman: raise WatchmanUnavailable('pywatchman not installed.') - client = pywatchman.client(timeout=0.01) + client = pywatchman.client(timeout=0.1) try: result = client.capabilityCheck() except Exception: diff --git a/docs/ref/django-admin.txt b/docs/ref/django-admin.txt index 0df57475ab19a..b531978dd6924 100644 --- a/docs/ref/django-admin.txt +++ b/docs/ref/django-admin.txt @@ -897,6 +897,11 @@ more robust change detection, and a reduction in power usage. for optimal performance. See the `watchman documentation`_ for information on how to do this. +.. admonition:: Watchman timeout + + The default timeout of ``Watchman`` client is 5 seconds. You can change it + by setting the ``DJANGO_WATCHMAN_TIMEOUT`` environment variable. + .. _Watchman: https://facebook.github.io/watchman/ .. _pywatchman: https://pypi.org/project/pywatchman/ .. _watchman documentation: https://facebook.github.io/watchman/docs/config.html#ignore_dirs diff --git a/docs/releases/2.2.1.txt b/docs/releases/2.2.1.txt index b7b1f6112d553..98f800d455035 100644 --- a/docs/releases/2.2.1.txt +++ b/docs/releases/2.2.1.txt @@ -55,3 +55,7 @@ Bugfixes :class:`~django.contrib.sessions.middleware.SessionMiddleware` subclasses, rather than requiring :mod:`django.contrib.sessions` to be in :setting:`INSTALLED_APPS` (:ticket:`30312`). + +* Increased the default timeout when using ``Watchman`` to 5 seconds to prevent + falling back to ``StatReloader`` on larger projects and made it customizable + via the ``DJANGO_WATCHMAN_TIMEOUT`` environment variable (:ticket:`30361`). diff --git a/tests/utils_tests/test_autoreload.py b/tests/utils_tests/test_autoreload.py index fdfb47797ee1f..c59cb23cc37b8 100644 --- a/tests/utils_tests/test_autoreload.py +++ b/tests/utils_tests/test_autoreload.py @@ -558,6 +558,11 @@ def skip_unless_watchman_available(): class WatchmanReloaderTests(ReloaderTests, IntegrationTests): RELOADER_CLS = autoreload.WatchmanReloader + def setUp(self): + super().setUp() + # Shorten the timeout to speed up tests. + self.reloader.client_timeout = 0.1 + def test_watch_glob_ignores_non_existing_directories_two_levels(self): with mock.patch.object(self.reloader, '_subscribe') as mocked_subscribe: self.reloader._watch_glob(self.tempdir / 'does_not_exist' / 'more', ['*']) @@ -638,6 +643,10 @@ class TestException(Exception): self.reloader.update_watches() self.assertIsInstance(mocked_server_status.call_args[0][0], TestException) + @mock.patch.dict(os.environ, {'DJANGO_WATCHMAN_TIMEOUT': '10'}) + def test_setting_timeout_from_environment_variable(self): + self.assertEqual(self.RELOADER_CLS.client_timeout, 10) + @skipIf(on_macos_with_hfs(), "These tests do not work with HFS+ as a filesystem") class StatReloaderTests(ReloaderTests, IntegrationTests):
https://api.github.com/repos/django/django/pulls/11263
2019-04-20T19:21:50Z
2019-04-26T11:35:53Z
2019-04-26T11:35:53Z
2019-04-26T11:35:53Z
1,150
django/django
51,059
Update supported models
diff --git a/README.md b/README.md index bfd7f8fe87..a600271628 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ An open platform for training, serving, and evaluating large language model base ## Contents - [Install](#install) -- [Vicuna Weights](#vicuna-weights) +- [Model Weights](#model-weights) - [Inference with Command Line Interface](#inference-with-command-line-interface) - [Serving with Web GUI](#serving-with-web-gui) - [API](#api) @@ -50,7 +50,8 @@ pip3 install --upgrade pip # enable PEP 660 support pip3 install -e . ``` -## Vicuna Weights +## Model Weights +### Vicuna Weights We release [Vicuna](https://vicuna.lmsys.org/) weights as delta weights to comply with the LLaMA model license. You can add our delta to the original LLaMA weights to obtain the Vicuna weights. Instructions: @@ -61,7 +62,7 @@ You can add our delta to the original LLaMA weights to obtain the Vicuna weights Weights v1.1 are only compatible with ```transformers>=4.28.0``` and ``fschat >= 0.2.0``. Please update your local packages accordingly. If you follow the above commands to do a fresh install, then you should get all the correct versions. -### Vicuna-7B +#### Vicuna-7B This conversion command needs around 30 GB of CPU RAM. See the "Low CPU Memory Conversion" section below if you do not have enough memory. ```bash @@ -71,7 +72,7 @@ python3 -m fastchat.model.apply_delta \ --delta-path lmsys/vicuna-7b-delta-v1.1 ``` -### Vicuna-13B +#### Vicuna-13B This conversion command needs around 60 GB of CPU RAM. See the "Low CPU Memory Conversion" section below if you do not have enough memory. ```bash @@ -81,51 +82,62 @@ python3 -m fastchat.model.apply_delta \ --delta-path lmsys/vicuna-13b-delta-v1.1 ``` -### Fastchat-T5 -This model is stored in a Hugging Face [repo](https://huggingface.co/lmsys/fastchat-t5-3b-v1.0). Simply run the line below to start chatting. -```bash -python3 -m fastchat.serve.cli --model-path lmsys/fastchat-t5-3b-v1.0 -``` +#### Old weights +See [docs/vicuna_weights_version.md](docs/vicuna_weights_version.md) for all versions of weights and their differences. -### Old weights -See [docs/weights_version.md](docs/weights_version.md) for all versions of weights and their differences. - - -### Low CPU Memory Conversion +#### Low CPU Memory Conversion You can try these methods to reduce the CPU RAM requirement of weight conversion. 1. Append `--low-cpu-mem` to the commands above, which will split large weight files into smaller ones and use the disk as temporary storage. This can keep the peak memory at less than 16GB. 2. Create a large swap file and rely on the operating system to automatically utilize the disk as virtual memory. +### FastChat-T5 +Simply run the line below to start chatting. +It will automatically download the weights from a Hugging Face [repo](https://huggingface.co/lmsys/fastchat-t5-3b-v1.0). + +```bash +python3 -m fastchat.serve.cli --model-path lmsys/fastchat-t5-3b-v1.0 +``` + ## Inference with Command Line Interface (Experimental Feature: You can specify `--style rich` to enable rich text output and better text streaming quality for some non-ASCII content. This may not work properly on certain terminals.) <a href="https://chat.lmsys.org"><img src="assets/screenshot_cli.png" width="70%"></a> +#### Supported Models +The following models are tested: +- Vicuna, Alpaca, LLaMA, Koala +- [lmsys/fastchat-t5-3b-v1.0](https://huggingface.co/lmsys/fastchat-t5) +- [databricks/dolly-v2-12b](https://huggingface.co/databricks/dolly-v2-12b) +- [OpenAssistant/oasst-sft-1-pythia-12b](https://huggingface.co/OpenAssistant/oasst-sft-1-pythia-12b) +- [project-baize/baize-lora-7B](https://huggingface.co/project-baize/baize-lora-7B) +- [StabilityAI/stablelm-tuned-alpha-7b](https://huggingface.co/stabilityai/stablelm-tuned-alpha-7b) +- [THUDM/chatglm-6b](https://huggingface.co/THUDM/chatglm-6b) + #### Single GPU The command below requires around 28GB of GPU memory for Vicuna-13B and 14GB of GPU memory for Vicuna-7B. See the "No Enough Memory" section below if you do not have enough memory. ``` -python3 -m fastchat.serve.cli --model-path /path/to/vicuna/weights +python3 -m fastchat.serve.cli --model-path /path/to/model/weights ``` #### Multiple GPUs You can use model parallelism to aggregate GPU memory from multiple GPUs on the same machine. ``` -python3 -m fastchat.serve.cli --model-path /path/to/vicuna/weights --num-gpus 2 +python3 -m fastchat.serve.cli --model-path /path/to/model/weights --num-gpus 2 ``` #### CPU Only This runs on the CPU only and does not require GPU. It requires around 60GB of CPU memory for Vicuna-13B and around 30GB of CPU memory for Vicuna-7B. ``` -python3 -m fastchat.serve.cli --model-path /path/to/vicuna/weights --device cpu +python3 -m fastchat.serve.cli --model-path /path/to/model/weights --device cpu ``` #### Metal Backend (Mac Computers with Apple Silicon or AMD GPUs) Use `--device mps` to enable GPU acceleration on Mac computers (requires torch >= 2.0). Use `--load-8bit` to turn on 8-bit compression. ``` -python3 -m fastchat.serve.cli --model-path /path/to/vicuna/weights --device mps --load-8bit +python3 -m fastchat.serve.cli --model-path /path/to/model/weights --device mps --load-8bit ``` Vicuna-7B can run on a 32GB M1 Macbook with 1 - 2 words / second. @@ -134,10 +146,10 @@ Vicuna-7B can run on a 32GB M1 Macbook with 1 - 2 words / second. If you do not have enough memory, you can enable 8-bit compression by adding `--load-8bit` to commands above. This can reduce memory usage by around half with slightly degraded model quality. It is compatible with the CPU, GPU, and Metal backend. -Vicuna-13B with 8-bit compression can run on a single NVIDIA 3090/4080/V100(16GB) GPU. +Vicuna-13B with 8-bit compression can run on a single NVIDIA 3090/4080/T4/V100(16GB) GPU. ``` -python3 -m fastchat.serve.cli --model-path /path/to/vicuna/weights --load-8bit +python3 -m fastchat.serve.cli --model-path /path/to/model/weights --load-8bit ``` Besides, we are actively exploring more methods to make the model easier to run on more platforms. @@ -158,14 +170,15 @@ This controller manages the distributed workers. #### Launch the model worker ```bash -python3 -m fastchat.serve.model_worker --model-path /path/to/vicuna/weights +python3 -m fastchat.serve.model_worker --model-path /path/to/model/weights ``` Wait until the process finishes loading the model and you see "Uvicorn running on ...". You can launch multiple model workers to serve multiple models concurrently. The model worker will connect to the controller automatically. To ensure that your model worker is connected to your controller properly, send a test message using the following command: ```bash -python3 -m fastchat.serve.test_message --model-name vicuna-13b +python3 -m fastchat.serve.test_message --model-name vicuna-7b ``` +You will see a short output. #### Launch the Gradio web server ```bash diff --git a/fastchat/serve/cli.py b/fastchat/serve/cli.py index e93c6fb14b..bfbd676851 100644 --- a/fastchat/serve/cli.py +++ b/fastchat/serve/cli.py @@ -16,7 +16,7 @@ from rich.markdown import Markdown from rich.live import Live -from fastchat.serve.inference import chat_loop, ChatIO +from fastchat.serve.inference import chat_loop, ChatIO, add_model_args class SimpleChatIO(ChatIO): @@ -129,30 +129,7 @@ def main(args): if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument( - "--model-path", - type=str, - default="facebook/opt-350m", - help="The path to the weights", - ) - parser.add_argument( - "--device", type=str, choices=["cpu", "cuda", "mps"], default="cuda" - ) - parser.add_argument( - "--gpus", - type=str, - default=None, - help="A single GPU like 1 or multiple GPUs like 0,2" - ) - parser.add_argument("--num-gpus", type=str, default="1") - parser.add_argument( - "--max-gpu-memory", - type=str, - help="The maximum memory per gpu. Use a string like '13Gib'", - ) - parser.add_argument( - "--load-8bit", action="store_true", help="Use 8-bit quantization." - ) + add_model_args(parser) parser.add_argument( "--conv-template", type=str, default=None, help="Conversation prompt template." ) diff --git a/fastchat/serve/huggingface_api.py b/fastchat/serve/huggingface_api.py index ba957d6865..33bbe7f5e6 100644 --- a/fastchat/serve/huggingface_api.py +++ b/fastchat/serve/huggingface_api.py @@ -9,7 +9,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM from fastchat.conversation import get_default_conv_template -from fastchat.serve.inference import load_model +from fastchat.serve.inference import load_model, add_model_args @torch.inference_mode() @@ -37,8 +37,12 @@ def main(args): temperature=0.7, max_new_tokens=1024, ) - output_ids = output_ids[0][len(input_ids[0]):] - outputs = tokenizer.decode(output_ids, skip_special_tokens=True) + if model.config.is_encoder_decoder: + output_ids = output_ids[0] + else: + output_ids = output_ids[0][len(input_ids[0]):] + outputs = tokenizer.decode(output_ids, skip_special_tokens=True, + spaces_between_special_tokens=False) print(f"{conv.roles[0]}: {msg}") print(f"{conv.roles[1]}: {outputs}") @@ -46,24 +50,7 @@ def main(args): if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument( - "--model-path", - type=str, - default="facebook/opt-350m", - help="The path to the weights", - ) - parser.add_argument( - "--device", type=str, choices=["cpu", "cuda", "mps"], default="cuda" - ) - parser.add_argument("--num-gpus", type=str, default="1") - parser.add_argument( - "--max-gpu-memory", - type=str, - help="The maximum memory per gpu. Use a string like '13Gib'", - ) - parser.add_argument( - "--load-8bit", action="store_true", help="Use 8-bit quantization." - ) + add_model_args(parser) parser.add_argument( "--conv-template", type=str, default=None, help="Conversation prompt template." ) diff --git a/fastchat/serve/inference.py b/fastchat/serve/inference.py index 29d601d0b2..7804d00292 100644 --- a/fastchat/serve/inference.py +++ b/fastchat/serve/inference.py @@ -307,3 +307,31 @@ def chat_loop( if debug: print("\n", {"prompt": prompt, "outputs": outputs}, "\n") + + +def add_model_args(parser): + parser.add_argument( + "--model-path", + type=str, + default="lmsys/fastchat-t5-3b-v1.0", + help="The path to the weights. This can be a local folder or a Hugging Face repo ID.", + ) + parser.add_argument( + "--device", type=str, choices=["cpu", "cuda", "mps"], default="cuda", + help="The device type" + ) + parser.add_argument( + "--gpus", + type=str, + default=None, + help="A single GPU like 1 or multiple GPUs like 0,2" + ) + parser.add_argument("--num-gpus", type=str, default="1") + parser.add_argument( + "--max-gpu-memory", + type=str, + help="The maximum memory per gpu. Use a string like '13Gib'", + ) + parser.add_argument( + "--load-8bit", action="store_true", help="Use 8-bit quantization" + ) diff --git a/fastchat/serve/model_worker.py b/fastchat/serve/model_worker.py index 65aa2b726f..2688291af4 100644 --- a/fastchat/serve/model_worker.py +++ b/fastchat/serve/model_worker.py @@ -34,7 +34,7 @@ import uvicorn from fastchat.constants import WORKER_HEART_BEAT_INTERVAL -from fastchat.serve.inference import load_model, generate_stream +from fastchat.serve.inference import load_model, generate_stream, add_model_args from fastchat.serve.serve_chatglm import chatglm_generate_stream from fastchat.utils import build_logger, server_error_msg, pretty_print_semaphore @@ -219,29 +219,8 @@ async def api_get_status(request: Request): parser.add_argument( "--controller-address", type=str, default="http://localhost:21001" ) - parser.add_argument( - "--model-path", - type=str, - default="facebook/opt-350m", - help="The path to the weights", - ) - parser.add_argument("--model-name", type=str, help="Optional name") - parser.add_argument( - "--device", type=str, choices=["cpu", "cuda", "mps"], default="cuda" - ) - parser.add_argument("--num-gpus", type=int, default=1) - parser.add_argument( - "--gpus", - type=str, - default=None, - help="A single GPU like 1 or multiple GPUs like 0,2" - ) - parser.add_argument( - "--max-gpu-memory", - type=str, - help="The maximum memory per gpu. Use a string like '13Gib'", - ) - parser.add_argument("--load-8bit", action="store_true") + add_model_args(parser) + parser.add_argument("--model-name", type=str, help="Optional display name") parser.add_argument("--limit-model-concurrency", type=int, default=5) parser.add_argument("--stream-interval", type=int, default=2) parser.add_argument("--no-register", action="store_true")
https://api.github.com/repos/lm-sys/FastChat/pulls/640
2023-04-29T11:48:28Z
2023-04-29T12:04:24Z
2023-04-29T12:04:24Z
2023-04-29T12:55:46Z
3,692
lm-sys/FastChat
41,663
Fix incorrect LLAMA_HUB_CONTENTS_URL
diff --git a/CHANGELOG.md b/CHANGELOG.md index 1687342dea658..6ccad3293c789 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ ### Bug Fixes / Nits +- Fix bug in `download_utils.py` with pointing to wrong repo (#9215) - Use `azure_deployment` kwarg in `AzureOpenAILLM` (#9174) - Fix similarity score return for `AstraDBVectorStore` Integration (#9193) diff --git a/llama_index/download/download_utils.py b/llama_index/download/download_utils.py index d8082f64abc3e..7ba4195b7ba49 100644 --- a/llama_index/download/download_utils.py +++ b/llama_index/download/download_utils.py @@ -12,9 +12,7 @@ import requests from pkg_resources import DistributionNotFound -LLAMA_HUB_CONTENTS_URL = ( - f"https://raw.githubusercontent.com/run-llama/llama-hub/datasets" -) +LLAMA_HUB_CONTENTS_URL = f"https://raw.githubusercontent.com/run-llama/llama-hub/main" LLAMA_HUB_PATH = "/llama_hub" LLAMA_HUB_URL = LLAMA_HUB_CONTENTS_URL + LLAMA_HUB_PATH
# Description This PR fixes a bug that I introduced in `download_utils.py` when developing `LlamaDatasets`. I forgot to point the `LLAMA_HUB_URL` back to the correct branch, namely `main`. Fixes # (issue) ## Type of Change Please delete options that are not relevant. - [x] Bug fix (non-breaking change which fixes an issue) # How Has This Been Tested? Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration - [x] I stared at the code and made sure it makes sense
https://api.github.com/repos/run-llama/llama_index/pulls/9215
2023-11-29T17:16:22Z
2023-11-29T17:22:05Z
2023-11-29T17:22:05Z
2023-11-29T17:22:06Z
298
run-llama/llama_index
6,297
clean up graph search
diff --git a/graph_search.py b/graph_search.py index e76d2e39..e13fafa8 100644 --- a/graph_search.py +++ b/graph_search.py @@ -11,53 +11,39 @@ def __init__(self, graph): self.graph = graph def find_path(self, start, end, path=None): - self.start = start - self.end = end - self.path = path if path else [] + path = path or [] - self.path += [self.start] - if self.start == self.end: - return self.path - if self.start not in self.graph: - return None - for node in self.graph[self.start]: - if node not in self.path: - newpath = self.find_path(node, self.end, self.path) + path.append(start) + if start == end: + return path + for node in self.graph.get(start, []): + if node not in path: + newpath = self.find_path(node, end, path) if newpath: return newpath - return None def find_all_path(self, start, end, path=None): - self.start = start - self.end = end - _path = path if path else [] - _path += [self.start] - if self.start == self.end: - return [_path] - if self.start not in self.graph: - return [] + path = path or [] + path.append(start) + if start == end: + return [path] paths = [] - for node in self.graph[self.start]: - if node not in _path: - newpaths = self.find_all_path(node, self.end, _path[:]) - for newpath in newpaths: - paths.append(newpath) + for node in self.graph.get(start, []): + if node not in path: + newpaths = self.find_all_path(node, end, path[:]) + paths.extend(newpaths) return paths def find_shortest_path(self, start, end, path=None): - self.start = start - self.end = end - _path = path if path else [] + path = path or [] + path.append(start) - _path += [self.start] - if self.start == self.end: - return _path - if self.start not in self.graph: - return None + if start == end: + return path shortest = None - for node in self.graph[self.start]: - if node not in _path: - newpath = self.find_shortest_path(node, self.end, _path[:]) + for node in self.graph.get(start, []): + if node not in path: + newpath = self.find_shortest_path(node, end, path[:]) if newpath: if not shortest or len(newpath) < len(shortest): shortest = newpath
https://api.github.com/repos/faif/python-patterns/pulls/152
2016-08-22T20:52:19Z
2016-08-23T17:31:09Z
2016-08-23T17:31:09Z
2016-08-23T17:31:09Z
667
faif/python-patterns
33,531
Update developing_api.rst
diff --git a/docs/docsite/rst/dev_guide/developing_api.rst b/docs/docsite/rst/dev_guide/developing_api.rst index dee62f47b2bacc..ebe5d07f1ab3d8 100644 --- a/docs/docsite/rst/dev_guide/developing_api.rst +++ b/docs/docsite/rst/dev_guide/developing_api.rst @@ -27,6 +27,7 @@ This example is a simple demonstration that shows how to minimally run a couple #!/usr/bin/env python import json + import shutil from collections import namedtuple from ansible.parsing.dataloader import DataLoader from ansible.vars.manager import VariableManager @@ -34,6 +35,7 @@ This example is a simple demonstration that shows how to minimally run a couple from ansible.playbook.play import Play from ansible.executor.task_queue_manager import TaskQueueManager from ansible.plugins.callback import CallbackBase + import ansible.constants as C class ResultCallback(CallbackBase): """A sample callback plugin used for performing an action as results come in @@ -98,6 +100,9 @@ This example is a simple demonstration that shows how to minimally run a couple # we always need to cleanup child procs and the structres we use to communicate with them if tqm is not None: tqm.cleanup() + + # Remove ansible tmpdir + shutil.rmtree(C.DEFAULT_LOCAL_TMP, True) .. note:: Ansible emits warnings and errors via the display object, which prints directly to stdout, stderr and the Ansible log.
##### SUMMARY Fixes #17716 and #36110 ##### ISSUE TYPE - Docs Pull Request ##### COMPONENT NAME - API ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes below --> ansible 2.4.2.0 config file = /etc/ansible/ansible.cfg configured module search path = [u'/home/ansible/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python2.7/site-packages/ansible executable location = /usr/bin/ansible python version = 2.7.5 (default, Aug 4 2017, 00:39:18) [GCC 4.8.5 20150623 (Red Hat 4.8.5-16)] ##### ADDITIONAL INFORMATION Tmp file will be remove as done in the CLI call: https://github.com/ansible/ansible/blob/devel/bin/ansible
https://api.github.com/repos/ansible/ansible/pulls/37108
2018-03-07T10:14:44Z
2018-03-08T10:48:48Z
2018-03-08T10:48:48Z
2019-04-27T00:20:54Z
354
ansible/ansible
48,957
Update README.md
diff --git a/AWS Amazon Bucket S3/README.md b/AWS Amazon Bucket S3/README.md index 97b1fd6210..5abe5f91c6 100644 --- a/AWS Amazon Bucket S3/README.md +++ b/AWS Amazon Bucket S3/README.md @@ -52,6 +52,7 @@ By default the name of Amazon Bucket are like http://s3.amazonaws.com/[bucket_na http://s3.amazonaws.com/[bucket_name]/ http://[bucket_name].s3.amazonaws.com/ http://flaws.cloud.s3.amazonaws.com/ +https://buckets.grayhatwarfare.com/ ``` Their names are also listed if the listing is enabled.
Find open buckets: https://buckets.grayhatwarfare.com/
https://api.github.com/repos/swisskyrepo/PayloadsAllTheThings/pulls/462
2021-11-23T17:05:58Z
2022-04-18T19:02:50Z
2022-04-18T19:02:50Z
2022-04-18T19:02:51Z
153
swisskyrepo/PayloadsAllTheThings
8,449
Add GunPolicy.org API
diff --git a/README.md b/README.md index c1457e4f9a..9d94c0b162 100644 --- a/README.md +++ b/README.md @@ -845,6 +845,7 @@ API | Description | Auth | HTTPS | CORS | | [Federal Register](https://www.federalregister.gov/reader-aids/developer-resources) | The Daily Journal of the United States Government | No | Yes | Unknown | | [Food Standards Agency](http://ratings.food.gov.uk/open-data/en-GB) | UK food hygiene rating data API | No | No | Unknown | | [Gazette Data, UK](https://www.thegazette.co.uk/data) | UK official public record API | `OAuth` | Yes | Unknown | +| [Gun Policy](https://www.gunpolicy.org/api) | International firearm injury prevention and policy | `apiKey` | Yes | Unknown | | [INEI](http://iinei.inei.gob.pe/microdatos/) | Peruvian Statistical Government Open Data | No | No | Unknown | | [Interpol Red Notices](https://interpol.api.bund.dev/) | Access and search Interpol Red Notices | No | Yes | Unknown | | [Istanbul (İBB) Open Data](https://data.ibb.gov.tr) | Data sets from the İstanbul Metropolitan Municipality (İBB) | No | Yes | Unknown |
<!-- Thank you for taking the time to work on a Pull Request for this project! --> <!-- To ensure your PR is dealt with swiftly please check the following: --> - [X] My submission is formatted according to the guidelines in the [contributing guide](/CONTRIBUTING.md) - [X] My addition is ordered alphabetically - [X] My submission has a useful description - [X] The description does not have more than 100 characters - [X] The description does not end with punctuation - [X] Each table column is padded with one space on either side - [X] I have searched the repository for any relevant issues or pull requests - [X] Any category I am creating has the minimum requirement of 3 items - [X] All changes have been [squashed][squash-link] into a single commit [squash-link]: <https://github.com/todotxt/todo.txt-android/wiki/Squash-All-Commits-Related-to-a-Single-Issue-into-a-Single-Commit>
https://api.github.com/repos/public-apis/public-apis/pulls/2696
2021-10-24T21:09:54Z
2021-10-27T22:44:42Z
2021-10-27T22:44:42Z
2021-10-27T22:44:42Z
304
public-apis/public-apis
35,871
[Snyk] Security upgrade werkzeug from 1.0.1 to 2.2.3
diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt index b6b496feaa7..d5b76758c87 100644 --- a/utils/google_app_engine/additional_requirements.txt +++ b/utils/google_app_engine/additional_requirements.txt @@ -2,3 +2,4 @@ pip==21.1 Flask==1.0.2 gunicorn==19.10.0 +werkzeug>=2.2.3 # not directly required, pinned by Snyk to avoid a vulnerability
<h3>Snyk has created this PR to fix one or more vulnerable packages in the `pip` dependencies of this project.</h3> #### Changes included in this PR - Changes to the following files to upgrade the vulnerable dependencies to a fixed version: - utils/google_app_engine/additional_requirements.txt <details> <summary>⚠️ <b>Warning</b></summary> ``` Flask 1.0.2 requires Werkzeug, which is not installed. ``` </details> #### Vulnerabilities that will be fixed ##### By pinning: Severity | Priority Score (*) | Issue | Upgrade | Breaking Change | Exploit Maturity :-------------------------:|-------------------------|:-------------------------|:-------------------------|:-------------------------|:------------------------- ![low severity](https://res.cloudinary.com/snyk/image/upload/w_20,h_20/v1561977819/icon/l.png "low severity") | **416/1000** <br/> **Why?** Recently disclosed, Has a fix available, CVSS 2.6 | Access Restriction Bypass <br/>[SNYK-PYTHON-WERKZEUG-3319935](https://snyk.io/vuln/SNYK-PYTHON-WERKZEUG-3319935) | `werkzeug:` <br> `1.0.1 -> 2.2.3` <br> | No | No Known Exploit ![high severity](https://res.cloudinary.com/snyk/image/upload/w_20,h_20/v1561977819/icon/h.png "high severity") | **661/1000** <br/> **Why?** Recently disclosed, Has a fix available, CVSS 7.5 | Denial of Service (DoS) <br/>[SNYK-PYTHON-WERKZEUG-3319936](https://snyk.io/vuln/SNYK-PYTHON-WERKZEUG-3319936) | `werkzeug:` <br> `1.0.1 -> 2.2.3` <br> | No | No Known Exploit (*) Note that the real score may have changed since the PR was raised. Some vulnerabilities couldn't be fully fixed and so Snyk will still find them when the project is tested again. This may be because the vulnerability existed within more than one direct dependency, but not all of the affected dependencies could be upgraded. Check the changes in this PR to ensure they won't cause issues with your project. ------------ **Note:** *You are seeing this because you or someone else with access to this repository has authorized Snyk to open fix PRs.* For more information: <img src="https://api.segment.io/v1/pixel/track?data=eyJ3cml0ZUtleSI6InJyWmxZcEdHY2RyTHZsb0lYd0dUcVg4WkFRTnNCOUEwIiwiYW5vbnltb3VzSWQiOiI3M2EwNjU1MS00ZDQwLTRmY2ItYTVlMS0xYmIxNWMyMDVmMzgiLCJldmVudCI6IlBSIHZpZXdlZCIsInByb3BlcnRpZXMiOnsicHJJZCI6IjczYTA2NTUxLTRkNDAtNGZjYi1hNWUxLTFiYjE1YzIwNWYzOCJ9fQ==" width="0" height="0"/> 🧐 [View latest project report](https://app.snyk.io/org/glenn-jocher/project/48f81bd7-7952-4783-b1af-4fbdc46cbf8b?utm_source&#x3D;github&amp;utm_medium&#x3D;referral&amp;page&#x3D;fix-pr) 🛠 [Adjust project settings](https://app.snyk.io/org/glenn-jocher/project/48f81bd7-7952-4783-b1af-4fbdc46cbf8b?utm_source&#x3D;github&amp;utm_medium&#x3D;referral&amp;page&#x3D;fix-pr/settings) 📚 [Read more about Snyk's upgrade and patch logic](https://support.snyk.io/hc/en-us/articles/360003891078-Snyk-patches-to-fix-vulnerabilities) [//]: # (snyk:metadata:{"prId":"73a06551-4d40-4fcb-a5e1-1bb15c205f38","prPublicId":"73a06551-4d40-4fcb-a5e1-1bb15c205f38","dependencies":[{"name":"werkzeug","from":"1.0.1","to":"2.2.3"}],"packageManager":"pip","projectPublicId":"48f81bd7-7952-4783-b1af-4fbdc46cbf8b","projectUrl":"https://app.snyk.io/org/glenn-jocher/project/48f81bd7-7952-4783-b1af-4fbdc46cbf8b?utm_source=github&utm_medium=referral&page=fix-pr","type":"auto","patch":[],"vulns":["SNYK-PYTHON-WERKZEUG-3319935","SNYK-PYTHON-WERKZEUG-3319936"],"upgrade":[],"isBreakingChange":false,"env":"prod","prType":"fix","templateVariants":["updated-fix-title","pr-warning-shown","priorityScore"],"priorityScoreList":[416,661]}) --- **Learn how to fix vulnerabilities with free interactive lessons:** 🦉 [Access Restriction Bypass](https://learn.snyk.io/lessons/broken-access-control/python/?loc&#x3D;fix-pr) 🦉 [Denial of Service (DoS)](https://learn.snyk.io/lessons/no-rate-limiting/python/?loc&#x3D;fix-pr) ## 🛠️ PR Summary <sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub> ### 🌟 Summary Improving security by updating dependencies for the Google App Engine integration. ### 📊 Key Changes - Added `werkzeug>=2.2.3` to the `additional_requirements.txt` for the Google App Engine. ### 🎯 Purpose & Impact - 🛡️ Enhances application security by including a version of `werkzeug` that patches a known vulnerability. - 🔒 Users can deploy more secure instances on Google App Engine with this new requirement. - 🚀 Potentially prevents exploitation of the app through the previous vulnerability, safeguarding user data and service integrity.
https://api.github.com/repos/ultralytics/yolov5/pulls/10995
2023-02-16T08:44:37Z
2023-02-16T17:08:13Z
2023-02-16T17:08:13Z
2024-01-19T02:50:50Z
122
ultralytics/yolov5
25,544
Bump actions/setup-python from 4 to 5 in /.github/workflows
diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index b80a3a31c0a..aed7530e36d 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -22,7 +22,7 @@ jobs: model: [ yolov5n ] steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: 'pip' # caching pip dependencies @@ -67,7 +67,7 @@ jobs: torch: '1.8.0' # min torch version CI https://pypi.org/project/torchvision/ steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: 'pip' # caching pip dependencies diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 65c8f70798f..e78f82b31dc 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -9,7 +9,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v8 + - uses: actions/stale@v9 with: repo-token: ${{ secrets.GITHUB_TOKEN }}
Bumps [actions/setup-python](https://github.com/actions/setup-python) from 4 to 5. <details> <summary>Release notes</summary> <p><em>Sourced from <a href="https://github.com/actions/setup-python/releases">actions/setup-python's releases</a>.</em></p> <blockquote> <h2>v5.0.0</h2> <h2>What's Changed</h2> <p>In scope of this release, we update node version runtime from node16 to node20 (<a href="https://redirect.github.com/actions/setup-python/pull/772">actions/setup-python#772</a>). Besides, we update dependencies to the latest versions.</p> <p><strong>Full Changelog</strong>: <a href="https://github.com/actions/setup-python/compare/v4.8.0...v5.0.0">https://github.com/actions/setup-python/compare/v4.8.0...v5.0.0</a></p> <h2>v4.8.0</h2> <h2>What's Changed</h2> <p>In scope of this release we added support for GraalPy (<a href="https://redirect.github.com/actions/setup-python/pull/694">actions/setup-python#694</a>). You can use this snippet to set up GraalPy:</p> <pre lang="yaml"><code>steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: python-version: 'graalpy-22.3' - run: python my_script.py </code></pre> <p>Besides, the release contains such changes as:</p> <ul> <li>Trim python version when reading from file by <a href="https://github.com/FerranPares"><code>@​FerranPares</code></a> in <a href="https://redirect.github.com/actions/setup-python/pull/628">actions/setup-python#628</a></li> <li>Use non-deprecated versions in examples by <a href="https://github.com/jeffwidman"><code>@​jeffwidman</code></a> in <a href="https://redirect.github.com/actions/setup-python/pull/724">actions/setup-python#724</a></li> <li>Change deprecation comment to past tense by <a href="https://github.com/jeffwidman"><code>@​jeffwidman</code></a> in <a href="https://redirect.github.com/actions/setup-python/pull/723">actions/setup-python#723</a></li> <li>Bump <code>@​babel/traverse</code> from 7.9.0 to 7.23.2 by <a href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a href="https://redirect.github.com/actions/setup-python/pull/743">actions/setup-python#743</a></li> <li>advanced-usage.md: Encourage the use actions/checkout@v4 by <a href="https://github.com/cclauss"><code>@​cclauss</code></a> in <a href="https://redirect.github.com/actions/setup-python/pull/729">actions/setup-python#729</a></li> <li>Examples now use checkout@v4 by <a href="https://github.com/simonw"><code>@​simonw</code></a> in <a href="https://redirect.github.com/actions/setup-python/pull/738">actions/setup-python#738</a></li> <li>Update actions/checkout to v4 by <a href="https://github.com/dmitry-shibanov"><code>@​dmitry-shibanov</code></a> in <a href="https://redirect.github.com/actions/setup-python/pull/761">actions/setup-python#761</a></li> </ul> <h2>New Contributors</h2> <ul> <li><a href="https://github.com/FerranPares"><code>@​FerranPares</code></a> made their first contribution in <a href="https://redirect.github.com/actions/setup-python/pull/628">actions/setup-python#628</a></li> <li><a href="https://github.com/timfel"><code>@​timfel</code></a> made their first contribution in <a href="https://redirect.github.com/actions/setup-python/pull/694">actions/setup-python#694</a></li> <li><a href="https://github.com/jeffwidman"><code>@​jeffwidman</code></a> made their first contribution in <a href="https://redirect.github.com/actions/setup-python/pull/724">actions/setup-python#724</a></li> </ul> <p><strong>Full Changelog</strong>: <a href="https://github.com/actions/setup-python/compare/v4...v4.8.0">https://github.com/actions/setup-python/compare/v4...v4.8.0</a></p> <h2>v4.7.1</h2> <h2>What's Changed</h2> <ul> <li>Bump word-wrap from 1.2.3 to 1.2.4 by <a href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a href="https://redirect.github.com/actions/setup-python/pull/702">actions/setup-python#702</a></li> <li>Add range validation for toml files by <a href="https://github.com/dmitry-shibanov"><code>@​dmitry-shibanov</code></a> in <a href="https://redirect.github.com/actions/setup-python/pull/726">actions/setup-python#726</a></li> </ul> <p><strong>Full Changelog</strong>: <a href="https://github.com/actions/setup-python/compare/v4...v4.7.1">https://github.com/actions/setup-python/compare/v4...v4.7.1</a></p> <h2>v4.7.0</h2> <p>In scope of this release, the support for reading python version from pyproject.toml was added (<a href="https://redirect.github.com/actions/setup-python/pull/669">actions/setup-python#669</a>).</p> <pre lang="yaml"><code> - name: Setup Python uses: actions/setup-python@v4 &lt;/tr&gt;&lt;/table&gt; </code></pre> </blockquote> <p>... (truncated)</p> </details> <details> <summary>Commits</summary> <ul> <li><a href="https://github.com/actions/setup-python/commit/0a5c61591373683505ea898e09a3ea4f39ef2b9c"><code>0a5c615</code></a> Update action to node20 (<a href="https://redirect.github.com/actions/setup-python/issues/772">#772</a>)</li> <li><a href="https://github.com/actions/setup-python/commit/0ae58361cdfd39e2950bed97a1e26aa20c3d8955"><code>0ae5836</code></a> Add example of GraalPy to docs (<a href="https://redirect.github.com/actions/setup-python/issues/773">#773</a>)</li> <li><a href="https://github.com/actions/setup-python/commit/b64ffcaf5b410884ad320a9cfac8866006a109aa"><code>b64ffca</code></a> update actions/checkout to v4 (<a href="https://redirect.github.com/actions/setup-python/issues/761">#761</a>)</li> <li><a href="https://github.com/actions/setup-python/commit/8d2896179abf658742de432b3f203d2c2d86a587"><code>8d28961</code></a> Examples now use checkout@v4 (<a href="https://redirect.github.com/actions/setup-python/issues/738">#738</a>)</li> <li><a href="https://github.com/actions/setup-python/commit/7bc6abb01e0555719edc2dbca70a2fde309e5e56"><code>7bc6abb</code></a> advanced-usage.md: Encourage the use actions/checkout@v4 (<a href="https://redirect.github.com/actions/setup-python/issues/729">#729</a>)</li> <li><a href="https://github.com/actions/setup-python/commit/e8111cec9d3dc15220d8a3b638f08419f57b906a"><code>e8111ce</code></a> Bump <code>@​babel/traverse</code> from 7.9.0 to 7.23.2 (<a href="https://redirect.github.com/actions/setup-python/issues/743">#743</a>)</li> <li><a href="https://github.com/actions/setup-python/commit/a00ea43da65e7c04d2bdae58b3afecd77057eb9e"><code>a00ea43</code></a> add fix for graalpy ci (<a href="https://redirect.github.com/actions/setup-python/issues/741">#741</a>)</li> <li><a href="https://github.com/actions/setup-python/commit/8635b1ccc5934e73ed3510980fd2e7790b85839b"><code>8635b1c</code></a> Change deprecation comment to past tense (<a href="https://redirect.github.com/actions/setup-python/issues/723">#723</a>)</li> <li><a href="https://github.com/actions/setup-python/commit/f6cc428f535856f9c23558d01765a42a4d6cf758"><code>f6cc428</code></a> Use non-deprecated versions in examples (<a href="https://redirect.github.com/actions/setup-python/issues/724">#724</a>)</li> <li><a href="https://github.com/actions/setup-python/commit/5f2af211d616f86005883b44826180b21abb4060"><code>5f2af21</code></a> Add GraalPy support (<a href="https://redirect.github.com/actions/setup-python/issues/694">#694</a>)</li> <li>Additional commits viewable in <a href="https://github.com/actions/setup-python/compare/v4...v5">compare view</a></li> </ul> </details> <br /> [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/setup-python&package-manager=github_actions&previous-version=4&new-version=5)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) --- <details> <summary>Dependabot commands and options</summary> <br /> You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) </details> ## 🛠️ PR Summary <sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub> ### 🌟 Summary Upgraded GitHub Actions for better CI/CD performance and maintenance. 🎉 ### 📊 Key Changes - Switched from `actions/setup-python@v4` to `actions/setup-python@v5` in CI testing workflow. - Updated from `actions/stale@v8` to `actions/stale@v9` in the stale workflow. ### 🎯 Purpose & Impact - **Greater Reliability:** Utilizing the latest versions increases compatibility and reduces potential bugs or security issues. ✔️ - **Improved Maintenance:** Staying up-to-date with the latest action versions reduces technical debt. 🛠️ - **User Experience:** Both changes are internal, with no direct impact on end users, but they contribute to a smoother development process and faster integration of new features. 👩‍💻👨‍💻 Please note, while these updates are important for maintaining the project's infrastructure, they are typically less noticeable to end users than changes to actual code functionality.
https://api.github.com/repos/ultralytics/yolov5/pulls/12493
2023-12-11T04:32:26Z
2023-12-11T12:47:09Z
2023-12-11T12:47:09Z
2024-01-19T00:53:55Z
375
ultralytics/yolov5
25,634
Adding zoomit.ir Site
diff --git a/sherlock/resources/data.json b/sherlock/resources/data.json index bfb1333db..543d3e48c 100644 --- a/sherlock/resources/data.json +++ b/sherlock/resources/data.json @@ -2260,5 +2260,13 @@ "urlMain": "https://uid.me/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" + }, + "zoomit": { + "errorMsg": "متاسفانه صفحه یافت نشد", + "errorType": "message", + "url": "https://www.zoomit.ir/user/{}", + "urlMain": "https://www.zoomit.ir", + "username_claimed": "kossher", + "username_unclaimed": "noonewouldeverusethis7" } } \ No newline at end of file
Adding zoomit.ir site with 2 commit 1. Add analysis site to data.json file 2. Update msg error and type
https://api.github.com/repos/sherlock-project/sherlock/pulls/1062
2021-05-11T11:56:38Z
2021-10-27T18:16:43Z
2021-10-27T18:16:43Z
2021-10-27T18:16:43Z
207
sherlock-project/sherlock
36,293
fix incorrect cpds
diff --git a/fooocus_extras/preprocessors.py b/fooocus_extras/preprocessors.py index ce31a7aff..b19a7cba9 100644 --- a/fooocus_extras/preprocessors.py +++ b/fooocus_extras/preprocessors.py @@ -37,6 +37,20 @@ def cpds(x): # See http://www.cse.cuhk.edu.hk/leojia/projects/color2gray/index.html # See https://docs.opencv.org/3.0-beta/modules/photo/doc/decolor.html - y = np.ascontiguousarray(x[:, :, ::-1].copy()) - y = cv2.decolor(y)[0] - return y + raw = cv2.GaussianBlur(x, (0, 0), 1.0) + density, boost = cv2.decolor(raw) + + raw = raw.astype(np.float32) + density = density.astype(np.float32) + boost = boost.astype(np.float32) + + offset = np.sum((raw - boost) ** 2.0, axis=2) ** 0.5 + + result = density + offset + + result -= np.min(result) + result /= np.max(result) + + result *= 255.0 + + return result.clip(0, 255).astype(np.uint8) diff --git a/fooocus_version.py b/fooocus_version.py index e76c56518..07fbeef22 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.1.21' +version = '2.1.22'
https://api.github.com/repos/lllyasviel/Fooocus/pulls/589
2023-10-09T00:43:17Z
2023-10-09T00:43:23Z
2023-10-09T00:43:23Z
2023-10-09T00:43:27Z
368
lllyasviel/Fooocus
7,094
🌐 Fix Korean translation for `docs/ko/docs/index.md`
diff --git a/docs/ko/docs/index.md b/docs/ko/docs/index.md index d3d5d4e84008c..09f368ce96b82 100644 --- a/docs/ko/docs/index.md +++ b/docs/ko/docs/index.md @@ -386,7 +386,7 @@ item: Item --- -우리는 그저 수박 겉핡기만 했을 뿐인데 여러분은 벌써 어떻게 작동하는지 알고 있습니다. +우리는 그저 수박 겉 핥기만 했을 뿐인데 여러분은 벌써 어떻게 작동하는지 알고 있습니다. 다음 줄을 바꿔보십시오:
This modification corrects the spelling of '수박 겉핡기' to '수박 겉 핥기' in the Korean documentation. It enhances clarity and aligns with the standard linguistic practices.
https://api.github.com/repos/tiangolo/fastapi/pulls/11296
2024-03-15T06:21:37Z
2024-03-18T16:26:07Z
2024-03-18T16:26:07Z
2024-03-18T16:26:58Z
145
tiangolo/fastapi
22,946
Add support for Ollama LLM
diff --git a/fern/docs/pages/manual/llms.mdx b/fern/docs/pages/manual/llms.mdx index 059fb594e..7445bff19 100644 --- a/fern/docs/pages/manual/llms.mdx +++ b/fern/docs/pages/manual/llms.mdx @@ -102,3 +102,33 @@ or When the server is started it will print a log *Application startup complete*. Navigate to http://localhost:8001 to use the Gradio UI or to http://localhost:8001/docs (API section) to try the API. + +### Using Ollama + +Another option for a fully private setup is using [Ollama](https://ollama.ai/). + +Note: how to deploy Ollama and pull models onto it is out of the scope of this documentation. + +In order to do so, create a profile `settings-ollama.yaml` with the following contents: + +```yaml +llm: + mode: ollama + +ollama: + model: <ollama_model_to_use> # Required Model to use. + # Note: Ollama Models are listed here: https://ollama.ai/library + # Be sure to pull the model to your Ollama server + api_base: <ollama-api-base-url> # Defaults to http://localhost:11434 +``` + +And run PrivateGPT loading that profile you just created: + +`PGPT_PROFILES=ollama make run` + +or + +`PGPT_PROFILES=ollama poetry run python -m private_gpt` + +When the server is started it will print a log *Application startup complete*. +Navigate to http://localhost:8001 to use the Gradio UI or to http://localhost:8001/docs (API section) to try the API. diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index 971cfa3bf..eebbdff0b 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -80,3 +80,10 @@ def __init__(self, settings: Settings) -> None: ) case "mock": self.llm = MockLLM() + case "ollama": + from llama_index.llms import Ollama + + ollama_settings = settings.ollama + self.llm = Ollama( + model=ollama_settings.model, base_url=ollama_settings.api_base + ) diff --git a/private_gpt/settings/settings.py b/private_gpt/settings/settings.py index 499ce66d7..ed65c203a 100644 --- a/private_gpt/settings/settings.py +++ b/private_gpt/settings/settings.py @@ -81,7 +81,7 @@ class DataSettings(BaseModel): class LLMSettings(BaseModel): - mode: Literal["local", "openai", "openailike", "sagemaker", "mock"] + mode: Literal["local", "openai", "openailike", "sagemaker", "mock", "ollama"] max_new_tokens: int = Field( 256, description="The maximum number of token that the LLM is authorized to generate in one completion.", @@ -168,6 +168,17 @@ class OpenAISettings(BaseModel): ) +class OllamaSettings(BaseModel): + api_base: str = Field( + "http://localhost:11434", + description="Base URL of Ollama API. Example: 'https://localhost:11434'.", + ) + model: str = Field( + None, + description="Model to use. Example: 'llama2-uncensored'.", + ) + + class UISettings(BaseModel): enabled: bool path: str @@ -243,6 +254,7 @@ class Settings(BaseModel): local: LocalSettings sagemaker: SagemakerSettings openai: OpenAISettings + ollama: OllamaSettings vectorstore: VectorstoreSettings qdrant: QdrantSettings | None = None diff --git a/settings.yaml b/settings.yaml index d7e7ce028..0ffbfcaef 100644 --- a/settings.yaml +++ b/settings.yaml @@ -63,3 +63,6 @@ sagemaker: openai: api_key: ${OPENAI_API_KEY:} model: gpt-3.5-turbo + +ollama: + model: llama2-uncensored
Allow using Ollama as the LLM
https://api.github.com/repos/zylon-ai/private-gpt/pulls/1526
2024-01-21T15:05:35Z
2024-02-09T14:50:50Z
2024-02-09T14:50:50Z
2024-02-14T22:51:10Z
1,044
zylon-ai/private-gpt
38,493
Created tic_tak_toe.py
diff --git a/tic_tak_toe.py b/tic_tak_toe.py new file mode 100644 index 0000000000..2dec4034ed --- /dev/null +++ b/tic_tak_toe.py @@ -0,0 +1,128 @@ +import itertools +from colorama import Fore, Back, Style, init + +init() + +def win(current_game): + + def all_same(l): + if l.count(l[0])==len(l) and l[0]!=0: + return True + else: + return False + +# horizontal + for row in game: + if all_same(row): + print("Player {} is the winner horizontally!".format(row[0])) + return True + + +# vertical + for col in range(len(game)): + check = [] + for row in game: + check.append(row[col]) + if all_same(check): + print("Player {} is the winner vertically!".format(check[0])) + return True + +# / diagonal + diags = [] + for idx,reverse_idx in enumerate(reversed(range(len(game)))): + diags.append(game[idx][reverse_idx]) + if all_same(diags): + print("Player {} is the winner diagonally(/)!".format(diags[0])) + return True + +# \ diagonal + diags = [] + for idx in range(len(game)): + diags.append(game[idx][idx]) + + if all_same(diags): + print("Player {diags[0]} has won Diagonally (\\)") + return True + + return False + +def game_board(game_map,player=0,row=0,column=0,just_display=False): + + try: + + if game_map[row][column]!=0: + print("This space is occupied, try another!") + return False + + print(" "+" ".join([str(i) for i in range(len(game_map))])) + if not just_display: + game_map[row][column]= player + + for count, row in enumerate(game_map): + colored_row = "" + for item in row: + if item == 0: + colored_row += " " + elif item == 1: + colored_row += Fore.GREEN + " X " + Style.RESET_ALL + elif item == 2: + colored_row += Fore.MAGENTA + " O " + Style.RESET_ALL + print(count, colored_row) + + + return game_map + + except IndexError: + print("Did you attempt to play a row or column outside the range of 0,1 or 2? (IndexError)") + return False + except Exception as e: + print(str(e)) + return False +play = True +Players = [1,2] + +while play: + game_size = int(input("What size game TicTacToe? ")) + game = [[0 for i in range(game_size)] for i in range(game_size)] + + + game_won = False + player_cycle = itertools.cycle([1,2]) + game_board(game, just_display=True) + while not game_won: + current_player = next(player_cycle) + Played = False + + while not Played: + + print("Player: {}".format(current_player)) + row_choice = int(input("Which row? ")) + column_choice = int(input("Which column? ")) + Played = game_board(game,player=current_player, row=row_choice, column=column_choice) + + if win(game): + game_won = True + again = input("The game is over,would you like to play again? (y/n) ") + if again.lower() == "y": + print("restarting!") + elif again.lower() == "n": + print("Byeeeee!!!") + play = False + else: + print("not a valid answer!!") + play=False + + + + + + + + + + + + + + +
a game tic_tak_toe made using python
https://api.github.com/repos/geekcomputers/Python/pulls/534
2019-10-01T05:23:32Z
2019-10-01T10:49:58Z
2019-10-01T10:49:58Z
2019-10-01T10:49:58Z
932
geekcomputers/Python
31,267
Fixed #26916 -- Fixed prefetch_related when using a cached_property as to_attr.
diff --git a/django/db/models/query.py b/django/db/models/query.py index cff7135ef6e8e..05f6ed370d6df 100644 --- a/django/db/models/query.py +++ b/django/db/models/query.py @@ -25,7 +25,7 @@ from django.db.models.sql.constants import CURSOR from django.utils import six, timezone from django.utils.deprecation import RemovedInDjango20Warning -from django.utils.functional import partition +from django.utils.functional import cached_property, partition from django.utils.version import get_version # The maximum number of items to display in a QuerySet.__repr__ @@ -1545,7 +1545,12 @@ def get_prefetcher(instance, through_attr, to_attr): if hasattr(rel_obj, 'get_prefetch_queryset'): prefetcher = rel_obj if through_attr != to_attr: - is_fetched = hasattr(instance, to_attr) + # Special case cached_property instances because hasattr + # triggers attribute computation and assignment. + if isinstance(getattr(instance.__class__, to_attr, None), cached_property): + is_fetched = to_attr in instance.__dict__ + else: + is_fetched = hasattr(instance, to_attr) else: is_fetched = through_attr in instance._prefetched_objects_cache return prefetcher, rel_obj_descriptor, attr_found, is_fetched diff --git a/tests/prefetch_related/models.py b/tests/prefetch_related/models.py index 32570e9109da1..064ce1dfbdac7 100644 --- a/tests/prefetch_related/models.py +++ b/tests/prefetch_related/models.py @@ -6,6 +6,7 @@ from django.contrib.contenttypes.models import ContentType from django.db import models from django.utils.encoding import python_2_unicode_compatible +from django.utils.functional import cached_property # Basic tests @@ -219,6 +220,10 @@ def primary_house(self): def all_houses(self): return list(self.houses.all()) + @cached_property + def cached_all_houses(self): + return self.all_houses + class Meta: ordering = ['id'] diff --git a/tests/prefetch_related/tests.py b/tests/prefetch_related/tests.py index c34682a33d87a..7c36975084e78 100644 --- a/tests/prefetch_related/tests.py +++ b/tests/prefetch_related/tests.py @@ -743,6 +743,17 @@ def test_to_attr_doesnt_cache_through_attr_as_list(self): ).get(pk=self.house3.pk) self.assertIsInstance(house.rooms.all(), QuerySet) + def test_to_attr_cached_property(self): + persons = Person.objects.prefetch_related( + Prefetch('houses', House.objects.all(), to_attr='cached_all_houses'), + ) + for person in persons: + # To bypass caching at the related descriptor level, don't use + # person.houses.all() here. + all_houses = list(House.objects.filter(occupants=person)) + with self.assertNumQueries(0): + self.assertEqual(person.cached_all_houses, all_houses) + class DefaultManagerTests(TestCase):
Thanks Trac alias karyon for the report.
https://api.github.com/repos/django/django/pulls/6936
2016-07-19T18:57:29Z
2016-07-19T20:06:52Z
2016-07-19T20:06:52Z
2016-07-19T20:06:55Z
716
django/django
51,598
Implement Perp-Neg
diff --git a/comfy/samplers.py b/comfy/samplers.py index 39bc3774a4..35c9ccf059 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -251,7 +251,8 @@ def sampling_function(model, x, timestep, uncond, cond, cond_scale, model_option cond_pred, uncond_pred = calc_cond_uncond_batch(model, cond, uncond_, x, timestep, model_options) if "sampler_cfg_function" in model_options: - args = {"cond": x - cond_pred, "uncond": x - uncond_pred, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep} + args = {"cond": x - cond_pred, "uncond": x - uncond_pred, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep, + "cond_denoised": cond_pred, "uncond_denoised": uncond_pred, "model": model, "model_options": model_options} cfg_result = x - model_options["sampler_cfg_function"](args) else: cfg_result = uncond_pred + (cond_pred - uncond_pred) * cond_scale diff --git a/comfy_extras/nodes_perpneg.py b/comfy_extras/nodes_perpneg.py new file mode 100644 index 0000000000..36f2eb01a5 --- /dev/null +++ b/comfy_extras/nodes_perpneg.py @@ -0,0 +1,58 @@ +import torch +import comfy.model_management +import comfy.sample +import comfy.samplers +import comfy.utils + + +class PerpNeg: + @classmethod + def INPUT_TYPES(s): + return {"required": {"model": ("MODEL", ), + "clip": ("CLIP", ), + "neg_scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "_for_testing" + + def patch(self, model, clip, neg_scale): + m = model.clone() + + tokens = clip.tokenize("") + nocond, nocond_pooled = clip.encode_from_tokens(tokens, return_pooled=True) + nocond = [[nocond, {"pooled_output": nocond_pooled}]] + nocond = comfy.sample.convert_cond(nocond) + + def cfg_function(args): + model = args["model"] + noise_pred_pos = args["cond_denoised"] + noise_pred_neg = args["uncond_denoised"] + cond_scale = args["cond_scale"] + x = args["input"] + sigma = args["sigma"] + model_options = args["model_options"] + + (noise_pred_nocond, _) = comfy.samplers.calc_cond_uncond_batch(model, nocond, None, x, sigma, model_options) + + pos = noise_pred_pos - noise_pred_nocond + neg = noise_pred_neg - noise_pred_nocond + perp = ((torch.mul(pos, neg).sum())/(torch.norm(neg)**2)) * neg + perp_neg = perp * neg_scale + cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg) + cfg_result = x - cfg_result + return cfg_result + + m.set_model_sampler_cfg_function(cfg_function) + + return (m, ) + + +NODE_CLASS_MAPPINGS = { + "PerpNeg": PerpNeg, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "PerpNeg": "Perp-Neg", +} diff --git a/nodes.py b/nodes.py index 3d24750cbf..3031b10aad 100644 --- a/nodes.py +++ b/nodes.py @@ -1868,6 +1868,7 @@ def init_custom_nodes(): "nodes_images.py", "nodes_video_model.py", "nodes_sag.py", + "nodes_perpneg.py", ] for node_file in extras_files:
Implement Perp-Neg as a sampler_cfg_function patch
https://api.github.com/repos/comfyanonymous/ComfyUI/pulls/2303
2023-12-15T19:01:50Z
2023-12-15T19:54:08Z
2023-12-15T19:54:08Z
2023-12-15T19:54:08Z
956
comfyanonymous/ComfyUI
17,929
fix small typo
diff --git a/env_check.py b/env_check.py index 406f42e48d..c6407432d6 100644 --- a/env_check.py +++ b/env_check.py @@ -15,7 +15,7 @@ conffilename = os.path.join(confdir, conffile) # Set the variable conffilename by joining confdir and conffile together for env_check in open(conffilename): # Open the config file and read all the settings - env_check = env_check.strip() # Set the variable as itsself, but strip the extra text out + env_check = env_check.strip() # Set the variable as itself, but strip the extra text out print '[{}]'.format(env_check) # Format the Output to be in Square Brackets newenv = os.getenv(env_check) # Set the variable newenv to get the settings from the OS what is currently set for the settings out the configfile
fix one typo (from 'itsself' to 'itself')
https://api.github.com/repos/geekcomputers/Python/pulls/199
2017-07-14T11:08:40Z
2017-07-15T13:43:46Z
2017-07-15T13:43:46Z
2017-07-15T13:44:11Z
212
geekcomputers/Python
31,869
Refactor image preprocessing iterators to subclass Sequence.
diff --git a/examples/cifar10_cnn.py b/examples/cifar10_cnn.py index e3b4d722621..a78c5b47169 100644 --- a/examples/cifar10_cnn.py +++ b/examples/cifar10_cnn.py @@ -105,7 +105,8 @@ batch_size=batch_size), steps_per_epoch=x_train.shape[0] // batch_size, epochs=epochs, - validation_data=(x_test, y_test)) + validation_data=(x_test, y_test), + workers=4) # Save model and weights if not os.path.isdir(save_dir): @@ -129,14 +130,17 @@ # Evaluate model with test data set and share sample prediction results evaluation = model.evaluate_generator(datagen.flow(x_test, y_test, - batch_size=batch_size), - steps=x_test.shape[0] // batch_size) - + batch_size=batch_size, + shuffle=False), + steps=x_test.shape[0] // batch_size, + workers=4) print('Model Accuracy = %.2f' % (evaluation[1])) predict_gen = model.predict_generator(datagen.flow(x_test, y_test, - batch_size=batch_size), - steps=x_test.shape[0] // batch_size) + batch_size=batch_size, + shuffle=False), + steps=x_test.shape[0] // batch_size, + workers=4) for predict_index, predicted_y in enumerate(predict_gen): actual_label = labels['label_names'][np.argmax(y_test[predict_index])] diff --git a/keras/preprocessing/image.py b/keras/preprocessing/image.py index 41b670d89e3..0dd78e77039 100644 --- a/keras/preprocessing/image.py +++ b/keras/preprocessing/image.py @@ -17,6 +17,7 @@ from functools import partial from .. import backend as K +from ..utils.data_utils import Sequence try: from PIL import Image as pil_image @@ -684,7 +685,7 @@ def fit(self, x, self.principal_components = np.dot(np.dot(u, np.diag(1. / np.sqrt(s + self.zca_epsilon))), u.T) -class Iterator(object): +class Iterator(Sequence): """Abstract base class for image data iterators. # Arguments @@ -697,36 +698,60 @@ class Iterator(object): def __init__(self, n, batch_size, shuffle, seed): self.n = n self.batch_size = batch_size + self.seed = seed self.shuffle = shuffle self.batch_index = 0 self.total_batches_seen = 0 self.lock = threading.Lock() - self.index_generator = self._flow_index(n, batch_size, shuffle, seed) + self.index_array = None + self.index_generator = self._flow_index() + + def _set_index_array(self): + self.index_array = np.arange(self.n) + if self.shuffle: + self.index_array = np.random.permutation(self.n) + + def __getitem__(self, idx): + if idx >= len(self): + raise ValueError('Asked to retrieve element {idx}, ' + 'but the Sequence ' + 'has length {length}'.format(idx=idx, + length=len(self))) + if self.seed is not None: + np.random.seed(self.seed + self.total_batches_seen) + self.total_batches_seen += 1 + if self.index_array is None: + self._set_index_array() + index_array = self.index_array[self.batch_size * idx: + self.batch_size * (idx + 1)] + return self._get_batches_of_transformed_samples(index_array) + + def __len__(self): + return int(np.ceil(self.n / float(self.batch_size))) + + def on_epoch_end(self): + self._set_index_array() def reset(self): self.batch_index = 0 - def _flow_index(self, n, batch_size=32, shuffle=False, seed=None): + def _flow_index(self): # Ensure self.batch_index is 0. self.reset() while 1: - if seed is not None: - np.random.seed(seed + self.total_batches_seen) + if self.seed is not None: + np.random.seed(self.seed + self.total_batches_seen) if self.batch_index == 0: - index_array = np.arange(n) - if shuffle: - index_array = np.random.permutation(n) + self._set_index_array() - current_index = (self.batch_index * batch_size) % n - if n > current_index + batch_size: - current_batch_size = batch_size + current_index = (self.batch_index * self.batch_size) % self.n + if self.n > current_index + self.batch_size: self.batch_index += 1 else: - current_batch_size = n - current_index self.batch_index = 0 self.total_batches_seen += 1 - yield (index_array[current_index: current_index + current_batch_size], - current_index, current_batch_size) + yield self.index_array[current_index: + current_index + self.batch_size] def __iter__(self): # Needed if we want to do something like: @@ -796,29 +821,19 @@ def __init__(self, x, y, image_data_generator, self.save_format = save_format super(NumpyArrayIterator, self).__init__(x.shape[0], batch_size, shuffle, seed) - def next(self): - """For python 2.x. - - # Returns - The next batch. - """ - # Keeps under lock only the mechanism which advances - # the indexing of each batch. - with self.lock: - index_array, current_index, current_batch_size = next(self.index_generator) - # The transformation of images is not under thread lock - # so it can be done in parallel - batch_x = np.zeros(tuple([current_batch_size] + list(self.x.shape)[1:]), dtype=K.floatx()) + def _get_batches_of_transformed_samples(self, index_array): + batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]), + dtype=K.floatx()) for i, j in enumerate(index_array): x = self.x[j] x = self.image_data_generator.random_transform(x.astype(K.floatx())) x = self.image_data_generator.standardize(x) batch_x[i] = x if self.save_to_dir: - for i in range(current_batch_size): + for i, j in enumerate(index_array): img = array_to_img(batch_x[i], self.data_format, scale=True) fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix, - index=current_index + i, + index=j, hash=np.random.randint(1e4), format=self.save_format) img.save(os.path.join(self.save_to_dir, fname)) @@ -827,6 +842,20 @@ def next(self): batch_y = self.y[index_array] return batch_x, batch_y + def next(self): + """For python 2.x. + + # Returns + The next batch. + """ + # Keeps under lock only the mechanism which advances + # the indexing of each batch. + with self.lock: + index_array = next(self.index_generator) + # The transformation of images is not under thread lock + # so it can be done in parallel + return self._get_batches_of_transformed_samples(index_array) + def _count_valid_files_in_directory(directory, white_list_formats, follow_links): """Count files with extension in `white_list_formats` contained in a directory. @@ -1013,17 +1042,8 @@ def __init__(self, directory, image_data_generator, pool.join() super(DirectoryIterator, self).__init__(self.samples, batch_size, shuffle, seed) - def next(self): - """For python 2.x. - - # Returns - The next batch. - """ - with self.lock: - index_array, current_index, current_batch_size = next(self.index_generator) - # The transformation of images is not under thread lock - # so it can be done in parallel - batch_x = np.zeros((current_batch_size,) + self.image_shape, dtype=K.floatx()) + def _get_batches_of_transformed_samples(self, index_array): + batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=K.floatx()) grayscale = self.color_mode == 'grayscale' # build batch of image data for i, j in enumerate(index_array): @@ -1037,10 +1057,10 @@ def next(self): batch_x[i] = x # optionally save augmented images to disk for debugging purposes if self.save_to_dir: - for i in range(current_batch_size): + for i, j in enumerate(index_array): img = array_to_img(batch_x[i], self.data_format, scale=True) fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix, - index=current_index + i, + index=j, hash=np.random.randint(1e4), format=self.save_format) img.save(os.path.join(self.save_to_dir, fname)) @@ -1058,3 +1078,15 @@ def next(self): else: return batch_x return batch_x, batch_y + + def next(self): + """For python 2.x. + + # Returns + The next batch. + """ + with self.lock: + index_array = next(self.index_generator) + # The transformation of images is not under thread lock + # so it can be done in parallel + return self._get_batches_of_transformed_samples(index_array) diff --git a/keras/utils/data_utils.py b/keras/utils/data_utils.py index 3da47b89c8e..f31d17cdbb0 100644 --- a/keras/utils/data_utils.py +++ b/keras/utils/data_utils.py @@ -313,19 +313,20 @@ class Sequence(object): # and `y_set` are the associated classes. class CIFAR10Sequence(Sequence): + def __init__(self, x_set, y_set, batch_size): - self.X,self.y = x_set,y_set + self.x, self.y = x_set, y_set self.batch_size = batch_size def __len__(self): - return len(self.X) // self.batch_size + return len(self.x) // self.batch_size - def __getitem__(self,idx): - batch_x = self.X[idx*self.batch_size:(idx+1)*self.batch_size] - batch_y = self.y[idx*self.batch_size:(idx+1)*self.batch_size] + def __getitem__(self, idx): + batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size] + batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size] return np.array([ - resize(imread(file_name), (200,200)) + resize(imread(file_name), (200, 200)) for file_name in batch_x]), np.array(batch_y) ``` """ diff --git a/tests/keras/preprocessing/image_test.py b/tests/keras/preprocessing/image_test.py index f06aed75077..9b2f3df40ba 100644 --- a/tests/keras/preprocessing/image_test.py +++ b/tests/keras/preprocessing/image_test.py @@ -5,20 +5,20 @@ import os -class TestImage: +class TestImage(object): def setup_class(cls): - img_w = img_h = 20 + cls.img_w = cls.img_h = 20 rgb_images = [] gray_images = [] for n in range(8): - bias = np.random.rand(img_w, img_h, 1) * 64 - variance = np.random.rand(img_w, img_h, 1) * (255 - 64) - imarray = np.random.rand(img_w, img_h, 3) * variance + bias + bias = np.random.rand(cls.img_w, cls.img_h, 1) * 64 + variance = np.random.rand(cls.img_w, cls.img_h, 1) * (255 - 64) + imarray = np.random.rand(cls.img_w, cls.img_h, 3) * variance + bias im = Image.fromarray(imarray.astype('uint8')).convert('RGB') rgb_images.append(im) - imarray = np.random.rand(img_w, img_h, 1) * variance + bias + imarray = np.random.rand(cls.img_w, cls.img_h, 1) * variance + bias im = Image.fromarray(imarray.astype('uint8').squeeze()).convert('L') gray_images.append(im) @@ -53,10 +53,43 @@ def test_image_data_generator(self, tmpdir): generator.fit(images, augment=True) for x, y in generator.flow(images, np.arange(images.shape[0]), - shuffle=True, save_to_dir=str(tmpdir)): - assert x.shape[1:] == images.shape[1:] + shuffle=False, save_to_dir=str(tmpdir), + batch_size=3): + assert x.shape == images[:3].shape + assert list(y) == [0, 1, 2] break + # Test with `shuffle=True` + for x, y in generator.flow(images, np.arange(images.shape[0]), + shuffle=True, save_to_dir=str(tmpdir), + batch_size=3): + assert x.shape == images[:3].shape + # Check that the sequence is shuffled. + assert list(y) != [0, 1, 2] + break + + # Test `flow` behavior as Sequence + seq = generator.flow(images, np.arange(images.shape[0]), + shuffle=False, save_to_dir=str(tmpdir), + batch_size=3) + assert len(seq) == images.shape[0] // 3 + 1 + x, y = seq[0] + assert x.shape == images[:3].shape + assert list(y) == [0, 1, 2] + + # Test with `shuffle=True` + seq = generator.flow(images, np.arange(images.shape[0]), + shuffle=True, save_to_dir=str(tmpdir), + batch_size=3, seed=123) + x, y = seq[0] + # Check that the sequence is shuffled. + assert list(y) != [0, 1, 2] + + # `on_epoch_end` should reshuffle the sequence. + seq.on_epoch_end() + x2, y2 = seq[0] + assert list(y) != list(y2) + def test_image_data_generator_invalid_data(self): generator = image.ImageDataGenerator( featurewise_center=True, @@ -140,9 +173,9 @@ def test_directory_iterator(self, tmpdir): dir_iterator = generator.flow_from_directory(str(tmpdir)) # check number of classes and images - assert(len(dir_iterator.class_indices) == num_classes) - assert(len(dir_iterator.classes) == count) - assert(sorted(dir_iterator.filenames) == sorted(filenames)) + assert len(dir_iterator.class_indices) == num_classes + assert len(dir_iterator.classes) == count + assert sorted(dir_iterator.filenames) == sorted(filenames) # Test invalid use cases with pytest.raises(ValueError): @@ -150,6 +183,21 @@ def test_directory_iterator(self, tmpdir): with pytest.raises(ValueError): generator.flow_from_directory(str(tmpdir), class_mode='output') + # Test usage as Sequence + generator = image.ImageDataGenerator() + dir_seq = generator.flow_from_directory(str(tmpdir), + target_size=(26, 26), + color_mode='rgb', + batch_size=3, + class_mode='categorical') + assert len(dir_seq) == count // 3 + 1 + x1, y1 = dir_seq[1] + assert x1.shape == (3, 26, 26, 3) + assert y1.shape == (3, num_classes) + x1, y1 = dir_seq[5] + with pytest.raises(ValueError): + x1, y1 = dir_seq[9] + def test_directory_iterator_class_mode_input(self, tmpdir): tmpdir.join('class-1').mkdir()
CC @Dref360
https://api.github.com/repos/keras-team/keras/pulls/7853
2017-09-08T20:23:25Z
2017-09-08T22:47:00Z
2017-09-08T22:47:00Z
2017-12-25T09:12:07Z
3,731
keras-team/keras
47,604
cli-help: fix spelling
diff --git a/docs/cli-help.txt b/docs/cli-help.txt index e5f1fdcb47e..749983d0e89 100644 --- a/docs/cli-help.txt +++ b/docs/cli-help.txt @@ -147,7 +147,7 @@ security: HTTPS for the newly authenticated vhost. (default: None) --hsts Add the Strict-Transport-Security header to every HTTP - response. Forcing browser to use always use SSL for + response. Forcing browser to always use SSL for the domain. Defends against SSL Stripping. (default: False) --no-hsts Do not automatically add the Strict-Transport-Security
https://api.github.com/repos/certbot/certbot/pulls/3207
2016-06-23T22:24:59Z
2016-06-24T00:09:11Z
2016-06-24T00:09:11Z
2016-06-24T00:10:22Z
154
certbot/certbot
2,418
Add error message to account registration error
diff --git a/certbot/CHANGELOG.md b/certbot/CHANGELOG.md index 3dd1a9e2651..4258ac58087 100644 --- a/certbot/CHANGELOG.md +++ b/certbot/CHANGELOG.md @@ -22,6 +22,9 @@ Certbot adheres to [Semantic Versioning](https://semver.org/). agree with a non-existent Terms of Service ("None"). This bug is now fixed, so that if an ACME server does not provide any Terms of Service to agree with, the user is not asked to agree to a non-existent Terms of Service any longer. +* If account registration fails, Certbot did not relay the error from the ACME server + back to the user. This is now fixed: the error message from the ACME server is now + presented to the user when account registration fails. More details about these changes can be found on our GitHub repo. diff --git a/certbot/certbot/_internal/main.py b/certbot/certbot/_internal/main.py index 18d819a5cfc..12021c89c29 100644 --- a/certbot/certbot/_internal/main.py +++ b/certbot/certbot/_internal/main.py @@ -21,6 +21,7 @@ from acme import client as acme_client from acme import errors as acme_errors +from acme import messages as acme_messages import certbot from certbot import configuration from certbot import crypto_util @@ -726,10 +727,14 @@ def _tos_cb(terms_of_service: str) -> None: display_util.notify("Account registered.") except errors.MissingCommandlineFlag: raise - except errors.Error: + except (errors.Error, acme_messages.Error) as err: logger.debug("", exc_info=True) + if acme_messages.is_acme_error(err): + err_msg = f"Error returned by the ACME server: {str(err)}" + else: + err_msg = str(err) raise errors.Error( - "Unable to register an account with ACME server") + f"Unable to register an account with ACME server. {err_msg}") config.account = acc.id return acc, acme diff --git a/certbot/tests/main_test.py b/certbot/tests/main_test.py index c29f4d758e1..6b09f1ba2cc 100644 --- a/certbot/tests/main_test.py +++ b/certbot/tests/main_test.py @@ -16,6 +16,7 @@ import josepy as jose import pytz +from acme.messages import Error as acme_error from certbot import crypto_util, configuration from certbot import errors from certbot import interfaces @@ -593,6 +594,17 @@ def _call(self): mock_storage.return_value = self.account_storage return _determine_account(self.config) + @mock.patch('certbot._internal.client.register') + @mock.patch('certbot._internal.client.display_ops.get_email') + def _register_error_common(self, err_msg, exception, mock_get_email, mock_register): + mock_get_email.return_value = 'foo@bar.baz' + mock_register.side_effect = exception + try: + self._call() + except errors.Error as err: + self.assertEqual(f"Unable to register an account with ACME server. {err_msg}", + str(err)) + def test_args_account_set(self): self.account_storage.save(self.accs[1], self.mock_client) self.config.account = self.accs[1].id @@ -617,6 +629,16 @@ def test_multiple_accounts(self, mock_choose_accounts): self.assertEqual(self.accs[1].id, self.config.account) self.assertIsNone(self.config.email) + @mock.patch('certbot._internal.client.display_ops.choose_account') + def test_multiple_accounts_canceled(self, mock_choose_accounts): + for acc in self.accs: + self.account_storage.save(acc, self.mock_client) + mock_choose_accounts.return_value = None + try: + self._call() + except errors.Error as err: + self.assertIn("No account has been chosen", str(err)) + @mock.patch('certbot._internal.client.display_ops.get_email') @mock.patch('certbot._internal.main.display_util.notify') def test_no_accounts_no_email(self, mock_notify, mock_get_email): @@ -641,6 +663,24 @@ def test_no_accounts_email(self): self.assertEqual(self.accs[1].id, self.config.account) self.assertEqual('other email', self.config.email) + def test_register_error_certbot(self): + err_msg = "Some error message raised by Certbot" + self._register_error_common(err_msg, errors.Error(err_msg)) + + def test_register_error_acme_type_and_detail(self): + err_msg = ("Error returned by the ACME server: urn:ietf:params:acme:" + "error:malformed :: The request message was malformed :: " + "must agree to terms of service") + exception = acme_error(typ = "urn:ietf:params:acme:error:malformed", + detail = "must agree to terms of service") + self._register_error_common(err_msg, exception) + + def test_register_error_acme_type_only(self): + err_msg = ("Error returned by the ACME server: urn:ietf:params:acme:" + "error:serverInternal :: The server experienced an internal error") + exception = acme_error(typ = "urn:ietf:params:acme:error:serverInternal") + self._register_error_common(err_msg, exception) + class MainTest(test_util.ConfigTestCase): """Tests for different commands."""
Fixes #9231 I've opted to have the first character of the error message to be lower case, as it's the first character after a colon. For me personally, that has to be lower case instead of upper case.
https://api.github.com/repos/certbot/certbot/pulls/9233
2022-03-12T21:38:13Z
2022-03-30T20:36:16Z
2022-03-30T20:36:15Z
2022-03-31T14:30:39Z
1,290
certbot/certbot
127
add falcon model, and lima linear dropout increase
diff --git a/model/model_training/configs/config.yaml b/model/model_training/configs/config.yaml index 97a72c951e..8266025a77 100644 --- a/model/model_training/configs/config.yaml +++ b/model/model_training/configs/config.yaml @@ -83,6 +83,7 @@ defaults: system_add_length: false per_digit_tokens: false is_reward_model: false + residual_dropout_lima: false deepspeed_config: configs/zero_config.json use_system_tag: @@ -224,6 +225,38 @@ oasst_export_latin_cyrillic: sort_by_length: false use_custom_sampler: false +oasst-top1: + save_strategy: steps # epoch seems not to work, gets stuck with DS 0.9.1 + save_steps: 600 + datasets: + - oasst_export: + lang: "bg,ca,cs,da,de,en,es,fr,hr,hu,it,nl,pl,pt,ro,ru,sl,sr,sv,uk" # sft-8.0 + input_file_path: 2023-05-06_OASST_labels.jsonl.gz + val_split: 0.05 + top_k: 1 + +falcon-7b: + dtype: bf16 + log_dir: "llama_log_7b" + learning_rate: 1e-5 + model_name: "tiiuae/falcon-7b" + deepspeed_config: configs/zero_config_falcon.json + output_dir: falcon + weight_decay: 0.0 + max_length: 2048 + warmup_steps: 100 + gradient_checkpointing: true + gradient_accumulation_steps: 2 + per_device_train_batch_size: 1 + per_device_eval_batch_size: 1 + eval_steps: 100 + save_steps: 500 + num_train_epochs: 8 + save_total_limit: 4 + use_flash_attention: false + residual_dropout: 0.3 + residual_dropout_lima: true + llama-7b: dtype: fp16 log_dir: "llama_log_7b" diff --git a/model/model_training/configs/zero_config_falcon.json b/model/model_training/configs/zero_config_falcon.json new file mode 100644 index 0000000000..c9ba7902a7 --- /dev/null +++ b/model/model_training/configs/zero_config_falcon.json @@ -0,0 +1,29 @@ +{ + "bf16": { + "enabled": "auto" + }, + "scheduler": { + "type": "WarmupDecayLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto", + "total_num_steps": "auto" + } + }, + "zero_optimization": { + "stage": 2, + "allgather_partitions": true, + "allgather_bucket_size": 1e9, + "overlap_comm": false, + "reduce_scatter": true, + "reduce_bucket_size": 1e9, + "contiguous_gradients": true + }, + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} diff --git a/model/model_training/models/__init__.py b/model/model_training/models/__init__.py index 73ece77f56..50c6ca7eb4 100644 --- a/model/model_training/models/__init__.py +++ b/model/model_training/models/__init__.py @@ -35,5 +35,7 @@ def get_specific_model( elif seq2seqmodel: model = transformers.AutoModelForSeq2SeqLM.from_pretrained(model_name, cache_dir=cache_dir, **kwargs) else: + if "falcon" in model_name: + kwargs["trust_remote_code"] = True model = transformers.AutoModelForCausalLM.from_pretrained(model_name, cache_dir=cache_dir, **kwargs) return model diff --git a/model/model_training/models/patching.py b/model/model_training/models/patching.py index 4a17f5c60c..c8757beb8f 100644 --- a/model/model_training/models/patching.py +++ b/model/model_training/models/patching.py @@ -73,6 +73,7 @@ def patch_model( resid_pdrop: Optional[float] = 0.1, flash_attention: bool = True, patch_unsupported: bool = False, + residual_dropout_lima: bool = False, ): """ Helper function for patching HF language models. @@ -104,7 +105,10 @@ def patch_model( if not flash_attention and (resid_pdrop is None or resid_pdrop == 0.0): return - if not any(isinstance(model, model_class) for model_class in SUPPORTED_MODELS): + if ( + not any(isinstance(model, model_class) for model_class in SUPPORTED_MODELS) + and model.__class__.__name__ != "RWForCausalLM" + ): if not flash_attention and (resid_pdrop is None or resid_pdrop == 0.0): return # nothing to patch @@ -141,6 +145,9 @@ def patch_model( "--use_flash_attention=false --no-residual_dropout" ) + if model.__class__.__name__ == "RWForCausalLM": + model = model.base_model + attention_key_lookup = { GPTNeoXModel: "attention", GPTNeoXRewardModel: "attention", @@ -151,13 +158,21 @@ def patch_model( GPTNeoXRewardModel: "mlp", LlamaModel: "mlp", } - attention_key = attention_key_lookup.get(model.__class__, "attention") - mlp_key = mlp_key_lookup.get(model.__class__, "mlp") - - for layer in model.layers: + if model.__class__.__name__ == "RWModel": + layers = model.h + attention_key = "self_attention" + mlp_key = "mlp" + else: + layers = model.layers + attention_key = attention_key_lookup.get(model.__class__, "attention") + mlp_key = mlp_key_lookup.get(model.__class__, "mlp") + num_layers = len(layers) + resid_pdrop_last_layer = resid_pdrop + for i, layer in enumerate(layers): if flash_attention: add_flash_attn(getattr(layer, attention_key), causal=True) - + if residual_dropout_lima: + resid_pdrop = i / (num_layers - 1) * resid_pdrop_last_layer if resid_pdrop is not None and resid_pdrop > 0: add_dropout(getattr(layer, attention_key), _patched_attn_forward, resid_pdrop) add_dropout(getattr(layer, mlp_key), _patched_mlp_forward, resid_pdrop) diff --git a/model/model_training/utils/utils.py b/model/model_training/utils/utils.py index 8e72970e09..bb04919f18 100644 --- a/model/model_training/utils/utils.py +++ b/model/model_training/utils/utils.py @@ -181,6 +181,9 @@ class TokenizerConfig(NamedTuple): "deberta-v3": TokenizerConfig(special_tokens=SpecialTokens("[PAD]", "[SEP]", sep_token="[CLS]")), "bloom": TokenizerConfig(special_tokens=SpecialTokens("<pad>", "</s>", "<s>")), "electra": TokenizerConfig(special_tokens=SpecialTokens("[PAD]", "[SEP]", sep_token="[CLS]")), + "falcon": TokenizerConfig( + special_tokens=SpecialTokens("<|endoftext|>", "<|endoftext|>", sep_token="<|endoftext|>") + ), } @@ -336,7 +339,12 @@ def get_model(conf, tokenizer, pad_vocab_size_to_multiple_of=16, check_freeze_la params = sum([p.numel() for p in model_parameters]) print("Number of trainable parameters: {}M".format(int(params / 1e6))) - patch_model(model, resid_pdrop=conf.residual_dropout, flash_attention=conf.use_flash_attention) + patch_model( + model, + resid_pdrop=conf.residual_dropout, + flash_attention=conf.use_flash_attention, + residual_dropout_lima=conf.residual_dropout_lima, + ) return model
https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/3241
2023-05-27T16:47:39Z
2023-05-30T06:52:53Z
2023-05-30T06:52:53Z
2023-05-30T06:52:54Z
1,999
LAION-AI/Open-Assistant
37,263
Add nginx to these weird instructions
diff --git a/docs/install.rst b/docs/install.rst index 1f6a45e0716..a914586ff80 100644 --- a/docs/install.rst +++ b/docs/install.rst @@ -156,7 +156,7 @@ If you run Debian Stretch or Debian Sid, you can install certbot packages. sudo apt-get install certbot python-certbot-apache If you don't want to use the Apache plugin, you can omit the -``python-certbot-apache`` package. +``python-certbot-apache`` package. Or you can install ``python-certbot-nginx`` instead. Packages exist for Debian Jessie via backports. First you'll have to follow the instructions at http://backports.debian.org/Instructions/ to enable the Jessie backports
These are probably made obsolete by the instruction generator, and they don't include Ubuntu...
https://api.github.com/repos/certbot/certbot/pulls/5243
2017-11-15T23:57:01Z
2017-11-27T22:49:20Z
2017-11-27T22:49:20Z
2017-11-27T22:49:24Z
173
certbot/certbot
1,808
Add Format Validation to CI
diff --git a/.travis.yml b/.travis.yml index 5d5c5e8ea0..e702877de6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,9 +5,11 @@ before_install: - rvm install 2.4.0 install: - gem install awesome_bot +before_script: + - cd build script: - - awesome_bot README.md --allow-ssl --allow 403,302 + - ./main.sh after_success: - - cd build - - sh build.sh - - sh deploy.sh + - ./build.sh + - ./deploy.sh + diff --git a/build/deploy.sh b/build/deploy.sh old mode 100644 new mode 100755 diff --git a/build/main.sh b/build/main.sh new file mode 100755 index 0000000000..4e4dc52e52 --- /dev/null +++ b/build/main.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +echo "running format validation..." +./validate.rb ../README.md + +if ["$?" == 0]; then + echo "format validation PASSED" +else + echo "format validation FAILED" +fi + +if [ "$TRAVIS_BRANCH" == "master" ]; then + echo "running link validation..." + awesome_bot README.md --allow-ssl --allow 403,302 +fi diff --git a/build/validate.rb b/build/validate.rb new file mode 100755 index 0000000000..eecfa514ec --- /dev/null +++ b/build/validate.rb @@ -0,0 +1,52 @@ +#!/usr/bin/env ruby +auth_keys = ['apiKey', 'OAuth', 'X-Mashape-Key', 'No'] +https_keys = ['Yes', 'No'] + +args = ARGV +filename = args[0] +fail_flag = false + +File.foreach(filename).with_index do |line, line_num| + line_num += 1 + if line.start_with?('|') + # Skip table schema lines + if line.eql? "|---|---|---|---|---|\n" + next + end + values = line.split("|") + + # Check Description to make sure first character is capitalized + desc_val = values[2].lstrip.chop + if !/[[:upper:]]/.match(desc_val[0]) + puts "(#{line_num}) Invalid Description (first char not uppercase): #{desc_val}" + fail_flag = true + end + + # Check Auth values to conform to valid options only + auth_val = values[3].lstrip.chop.tr('``', '') + if !auth_keys.include?(auth_val) + puts "(#{line_num}) Invalid Auth (not a valid option): #{auth_val}" + fail_flag = true + end + + # Check HTTPS Support values to be either "Yes" or "No" + https_val = values[4].lstrip.chop + if !https_keys.include?(https_val) + puts "(#{line_num}) Invalid HTTPS: (must use \"Yes\" or \"No\"): #{https_val}" + fail_flag = true + end + + # Check Link to ensure url is wrapped in "[Go!]" view + link_val = values[5].lstrip.chop + if !link_val.start_with?("[Go!](") || !link_val.end_with?(')') + puts "(#{line_num}) Invalid Link: (format should be \"[Go!](<LINK>)\"): #{link_val}" + fail_flag = true + end + end +end + +if fail_flag + exit(1) +else + exit(0) +end
So, right now the CI for this project tests every link in the README to make sure that it's living. This is great for removing dead links over time, but can cause a real headache for users who open a pull request only to see their changes "break" the build, when 99.9% of the time it is someone else's link that is causing the failure. Therefore, in the spirit of alleviating this headache, I have written a simple format validator that will run on pull requests. This validation checks for valid auth values, markdown link syntax, and a few other goodies. I plan to add more checks to this soon, but my plan is to merge in the CI changes quickly to stop having pull requests fail for no good reason - and to give maintainers a break from having to point out simple fixes :)
https://api.github.com/repos/public-apis/public-apis/pulls/385
2017-07-10T05:50:06Z
2017-07-10T15:08:52Z
2017-07-10T15:08:52Z
2017-07-10T15:08:58Z
881
public-apis/public-apis
35,500
Add Netron
diff --git a/README.md b/README.md index 092567b9..1d1865ab 100644 --- a/README.md +++ b/README.md @@ -521,6 +521,7 @@ Further resources: * [TensorFlow.js](https://js.tensorflow.org/) - A WebGL accelerated, browser based JavaScript library for training and deploying ML models. * [JSMLT](https://github.com/jsmlt/jsmlt) - Machine learning toolkit with classification and clustering for Node.js; supports visualization (see [visualml.io](https://visualml.io)). * [xgboost-node](https://github.com/nuanio/xgboost-node) - Run XGBoost model and make predictions in Node.js. +* [Netron](https://github.com/lutzroeder/netron) - Visualizer for machine learning models. <a name="javascript-misc"></a> #### Misc @@ -988,6 +989,7 @@ be * [Turi Create](https://github.com/apple/turicreate) - Machine learning from Apple. Turi Create simplifies the development of custom machine learning models. You don't have to be a machine learning expert to add recommendations, object detection, image classification, image similarity or activity classification to your app. * [xLearn](https://github.com/aksnzhy/xlearn) - A high performance, easy-to-use, and scalable machine learning package, which can be used to solve large-scale machine learning problems. xLearn is especially useful for solving machine learning problems on large-scale sparse data, which is very common in Internet services such as online advertisement and recommender systems. * [mlens](https://github.com/flennerhag/mlens) - A high performance, memory efficient, maximally parallelized ensemble learning, integrated with scikit-learn. +* [Netron](https://github.com/lutzroeder/netron) - Visualizer for machine learning models. <a name="python-data-analysis"></a> #### Data Analysis / Data Visualization
https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/541
2018-10-12T06:43:55Z
2018-10-20T06:45:33Z
2018-10-20T06:45:33Z
2018-10-20T06:45:33Z
433
josephmisiti/awesome-machine-learning
52,357
bpo-38158: Removing non-existant member "doc" from PyType_Spec documentation
diff --git a/Doc/c-api/type.rst b/Doc/c-api/type.rst index 8f8367ab77c8c4..6416951ac5a94c 100644 --- a/Doc/c-api/type.rst +++ b/Doc/c-api/type.rst @@ -142,10 +142,6 @@ The following functions and structs are used to create Name of the type, used to set :c:member:`PyTypeObject.tp_name`. - .. c:member:: const char* PyType_Spec.doc - - Type docstring, used to set :c:member:`PyTypeObject.tp_doc`. - .. c:member:: int PyType_Spec.basicsize .. c:member:: int PyType_Spec.itemsize
Just removing a couple lines. <!-- issue-number: [bpo-38158](https://bugs.python.org/issue38158) --> https://bugs.python.org/issue38158 <!-- /issue-number -->
https://api.github.com/repos/python/cpython/pulls/16142
2019-09-14T11:56:15Z
2019-09-15T06:50:05Z
2019-09-15T06:50:05Z
2019-09-15T06:53:45Z
174
python/cpython
4,529
ticksize - wazirx
diff --git a/js/wazirx.js b/js/wazirx.js index 263314769007..de3f49ed967a 100644 --- a/js/wazirx.js +++ b/js/wazirx.js @@ -2,6 +2,7 @@ const Exchange = require ('./base/Exchange'); const { ExchangeError, BadRequest, RateLimitExceeded, BadSymbol, ArgumentsRequired, PermissionDenied, InsufficientFunds, InvalidOrder } = require ('./base/errors'); +const { TICK_SIZE } = require ('./base/functions/number'); const Precise = require ('./base/Precise'); module.exports = class wazirx extends Exchange { @@ -105,6 +106,7 @@ module.exports = class wazirx extends Exchange { 'fees': { 'WRX': { 'maker': this.parseNumber ('0.0'), 'taker': this.parseNumber ('0.0') }, }, + 'precisionMode': TICK_SIZE, 'exceptions': { 'exact': { '-1121': BadSymbol, // { "code": -1121, "message": "Invalid symbol." } @@ -214,8 +216,8 @@ module.exports = class wazirx extends Exchange { 'strike': undefined, 'optionType': undefined, 'precision': { - 'amount': this.safeInteger (entry, 'baseAssetPrecision'), - 'price': this.safeInteger (entry, 'quoteAssetPrecision'), + 'amount': this.parseNumber (this.parsePrecision (this.safeString (entry, 'baseAssetPrecision'))), + 'price': this.parseNumber (this.parsePrecision (this.safeString (entry, 'quoteAssetPrecision'))), }, 'limits': { 'leverage': {
https://api.github.com/repos/ccxt/ccxt/pulls/13690
2022-06-07T14:21:06Z
2022-06-07T18:15:43Z
2022-06-07T18:15:43Z
2022-06-09T09:55:31Z
388
ccxt/ccxt
13,261
Respect model and lora directory settings when downloading files
diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py index 8d6122d252..29f0b926c3 100644 --- a/modules/ui_model_menu.py +++ b/modules/ui_model_menu.py @@ -287,6 +287,12 @@ def download_model_wrapper(repo_id, specific_file, progress=gr.Progress(), retur yield ("Getting the output folder") output_folder = downloader.get_output_folder(model, branch, is_lora, is_llamacpp=is_llamacpp) + + if output_folder == Path("models"): + output_folder = Path(shared.args.model_dir) + elif output_folder == Path("loras"): + output_folder = Path(shared.args.lora_dir) + if check: progress(0.5)
## Checklist: - [x] I have read the [Contributing guidelines](https://github.com/oobabooga/text-generation-webui/wiki/Contributing-guidelines). Currently all downloaded files are placed in the same directory as the installation. This change makes them download to the folder specified by the user.
https://api.github.com/repos/oobabooga/text-generation-webui/pulls/5842
2024-04-10T23:16:38Z
2024-04-11T04:55:02Z
2024-04-11T04:55:02Z
2024-04-11T04:55:02Z
172
oobabooga/text-generation-webui
26,377
Use partition numel
diff --git a/deepspeed/runtime/zero/partition_parameters.py b/deepspeed/runtime/zero/partition_parameters.py index 131d25faedc0..f86e050a0a10 100755 --- a/deepspeed/runtime/zero/partition_parameters.py +++ b/deepspeed/runtime/zero/partition_parameters.py @@ -910,8 +910,8 @@ def aligned_size(): def padding_size(): return self._padding_size(param) - def partitioned_size(): - return self._partitioned_size(param) + def partition_numel(): + return self._partition_numel(param) def item_override(): param.all_gather() @@ -953,7 +953,7 @@ def wrapped(*args, **kwargs): # Partitioning size utilities param.aligned_size = aligned_size param.padding_size = padding_size - param.partitioned_size = partitioned_size + param.partition_numel = partition_numel param.ds_summary = types.MethodType(ds_summary, param) param.item = allgather_before(param.item) @@ -967,7 +967,7 @@ def _padding_size(self, param): remainder = param.ds_numel % self.world_size return (self.world_size - remainder) if remainder else 0 - def _partitioned_size(self, param): + def _partition_numel(self, param): return param.ds_tensor.ds_numel def _ensure_availability_of_partitioned_params(self, params): diff --git a/deepspeed/runtime/zero/stage3.py b/deepspeed/runtime/zero/stage3.py index 6b87cbc13e02..27afc6817f2d 100755 --- a/deepspeed/runtime/zero/stage3.py +++ b/deepspeed/runtime/zero/stage3.py @@ -579,7 +579,7 @@ def _setup_for_real_optimizer(self): all_params = list(itertools.chain.from_iterable(self.fp16_groups)) grad_partitions_flat_buffer: Tensor = torch.zeros( - sum(p.ds_tensor.ds_numel for p in all_params), + sum(p.partition_numel() for p in all_params), dtype=self.dtype, device=self.device, pin_memory=self.offload_optimizer_pin_memory) @@ -590,8 +590,8 @@ def _setup_for_real_optimizer(self): param.ds_id] = grad_partitions_flat_buffer.narrow( 0, offset, - param.ds_tensor.numel()) - offset += param.ds_tensor.numel() + param.partition_numel()) + offset += param.partition_numel() def set_lr(self, lr): """Set the learning rate.""" @@ -748,7 +748,7 @@ def _move_to_flat_buffer(self, param_list, flat_buffer, avoid_copy=False): '''if the parameter was initialized in nvme then bring it to the destination buffer directly''' if src.status == PartitionedParamStatus.NOT_AVAILABLE: print_rank_0( - f"Swapping in {param.ds_id} with partition size {param.ds_tensor.ds_numel} permanently to CPU" + f"Swapping in {param.ds_id} with partition size {param.partition_numel()} permanently to CPU" ) param.nvme_swapper.swap_into_buffer(param, dest) src.data = dest.data @@ -767,7 +767,7 @@ def _create_param_groups_fp16_flat_cpu_memory(self): aggregate_params_count = 0 for j, param_group in enumerate(self.optimizer.param_groups): - params_in_group = sum([p.ds_tensor.ds_numel for p in param_group['params']]) + params_in_group = sum([p.partition_numel() for p in param_group['params']]) flat_buffer_size = params_in_group @@ -816,7 +816,7 @@ def _create_fp16_partitions_with_defragmentation(self): # record total elements of parameter partitions in sub group self.fp16_partitioned_groups_flat_numel.append( - sum(p.ds_tensor.ds_numel for p in sub_group)) + sum(p.partition_numel() for p in sub_group)) # record padding required to align group to world size (only applies to last rank) rank_requires_padding = dist.get_rank( @@ -839,7 +839,7 @@ def _create_fp16_partitions_with_defragmentation(self): # contiguous flat buffer for all parameters that we created earlier offset = 0 for sub_group in self.fp16_groups: - sub_group_numel = sum(param.ds_tensor.ds_numel for param in sub_group) + sub_group_numel = sum(param.partition_numel() for param in sub_group) self.fp16_partitioned_groups_flat.append( device_buffer.narrow(0, offset, @@ -851,7 +851,7 @@ def _create_fp16_partitions_with_defragmentation(self): for param_group_idx, param_group in enumerate(param_groups): flat_offset = 0 for i, sub_group in enumerate(param_group): - total_elements = sum(p.ds_tensor.ds_numel for p in sub_group) + total_elements = sum(p.partition_numel() for p in sub_group) print_rank_0(f"Params in nvme and cpu {self.params_in_nvme_and_cpu}") #Flat buffer may not be available for parameters that reside in NVME if not self.params_in_nvme_and_cpu or flat_offset + total_elements <= self.param_groups_fp16_flat_cpu_memory[ @@ -887,7 +887,7 @@ def _create_fp16_partitions_with_defragmentation(self): if should_create_fp16_flat_reuse_buffer: max_partition_numel, largest_partition_numel = 0, None for sub_group in self.fp16_groups: - total_elements = sum(t.ds_tensor.ds_numel for t in sub_group) + total_elements = sum(t.partition_numel() for t in sub_group) if total_elements > max_partition_numel: largest_partition_numel = [t.ds_numel for t in sub_group] max_partition_numel = total_elements @@ -905,7 +905,7 @@ def _swap_in_sub_group_to_flat_buffer(self, flat_buffer, sub_group_id): dest = flat_buffer.narrow(0, offset, partitioned_param.ds_numel) if partitioned_param.status == PartitionedParamStatus.NOT_AVAILABLE: print_rank_0( - f"Swapping in {param.ds_id} with elements {param.ds_numel} and partition {param.ds_tensor.ds_numel}" + f"Swapping in {param.ds_id} with elements {param.ds_numel} and partition {param.partition_numel()}" ) param.nvme_swapper.swap_in([param], async_op=False) dest.data.copy_(partitioned_param.data) @@ -935,7 +935,7 @@ def _get_sub_group_partitions(self, sub_group_id): if partitioned_param.status == PartitionedParamStatus.NOT_AVAILABLE: swap_path = param.nvme_swapper.get_path(param, True) sub_group_partitions.append((partitioned_param, - param.ds_tensor.ds_numel, + param.partition_numel(), swap_path)) else: sub_group_partitions.append((partitioned_param, @@ -1051,7 +1051,7 @@ def _create_fp32_partitions(self): def _create_fp16_sub_groups(self, params_group): - params_group_numel = sum([param.partitioned_size() for param in params_group]) + params_group_numel = sum([param.partition_numel() for param in params_group]) sub_group_size = self.sub_group_size if sub_group_size is None or sub_group_size >= params_group_numel: @@ -1063,7 +1063,7 @@ def _create_fp16_sub_groups(self, params_group): for param in params_group: sub_group.append(param) - local_sub_group_size += param.partitioned_size() + local_sub_group_size += param.partition_numel() if local_sub_group_size >= sub_group_size or id(param) == id( params_group[-1]): @@ -1633,7 +1633,7 @@ def set_grad_positions(self): current_offset = 0 for param in group: param_id = self.get_param_id(param) - num_elements = param.ds_tensor.ds_numel + num_elements = param.partition_numel() self.grad_position[param_id] = [ int(i), @@ -1699,7 +1699,7 @@ def __partition_grads(self, params_to_release: List[Parameter], grad_partitions: List[Tensor]) -> None: for param, grad_partition in zip(params_to_release, grad_partitions): - if param.ds_tensor.ds_numel * dist.get_rank( + if param.partition_numel() * dist.get_rank( self.dp_process_group) > param.ds_numel: # this grad partition is empty - don't need to do anything continue
API to help avoid errors when manipulating partitioned parameters. Fix #2002
https://api.github.com/repos/microsoft/DeepSpeed/pulls/2011
2022-06-13T17:31:17Z
2022-06-20T15:32:01Z
2022-06-20T15:32:01Z
2022-06-20T23:39:09Z
1,941
microsoft/DeepSpeed
10,111
Incorporate feedback from #4113
diff --git a/acme/acme/client.py b/acme/acme/client.py index 7555a1cc725..0324967cf86 100644 --- a/acme/acme/client.py +++ b/acme/acme/client.py @@ -681,16 +681,14 @@ def post(self, *args, **kwargs): be retried once. """ - should_retry = True - while True: - try: + try: + return self._post_once(*args, **kwargs) + except messages.Error as error: + if error.code == 'badNonce': + logger.debug('Retrying request after error:\n%s', error) return self._post_once(*args, **kwargs) - except messages.Error as error: - if should_retry and error.code == 'badNonce': - logger.debug('Retrying request after error:\n%s', error) - should_retry = False - else: - raise + else: + raise def _post_once(self, url, obj, content_type=JOSE_CONTENT_TYPE, **kwargs): data = self._wrap_in_jws(obj, self._get_nonce(url)) diff --git a/acme/acme/client_test.py b/acme/acme/client_test.py index e88baa340ab..a941a5b98db 100644 --- a/acme/acme/client_test.py +++ b/acme/acme/client_test.py @@ -717,6 +717,16 @@ def test_post_failed_retry(self): self.assertRaises(messages.Error, self.net.post, 'uri', self.obj, content_type=self.content_type) + def test_post_not_retried(self): + check_response = mock.MagicMock() + check_response.side_effect = [messages.Error.with_code('malformed'), + self.checked_response] + + # pylint: disable=protected-access + self.net._check_response = check_response + self.assertRaises(messages.Error, self.net.post, 'uri', + self.obj, content_type=self.content_type) + def test_post_successful_retry(self): check_response = mock.MagicMock() check_response.side_effect = [messages.Error.with_code('badNonce'),
https://api.github.com/repos/certbot/certbot/pulls/4115
2017-01-26T01:45:32Z
2017-01-30T17:44:56Z
2017-01-30T17:44:56Z
2017-01-30T17:45:04Z
483
certbot/certbot
3,372
Fix For go get Deprecation
diff --git a/docker/dev/Dockerfile b/docker/dev/Dockerfile index 8aed1225b..8ec78b367 100644 --- a/docker/dev/Dockerfile +++ b/docker/dev/Dockerfile @@ -6,7 +6,7 @@ RUN apk update && apk add --no-cache \ gcc libc-dev g++ graphviz git bash go imagemagick inkscape ttf-opensans curl fontconfig xdg-utils # install go package. -RUN go get github.com/mingrammer/round +RUN go install github.com/mingrammer/round@latest # install fonts RUN curl -O https://noto-website.storage.googleapis.com/pkgs/NotoSansCJKjp-hinted.zip \
**Summary** Docker fails to build due to `go get` deprecation. This change addresses the deprecation and allows for a successful build by replacing `go get` with `go install somePackage/package@version.` **Testing** Successfully built the docker image on amd64 Ubuntu and arm64 M1 Mac.
https://api.github.com/repos/mingrammer/diagrams/pulls/713
2022-06-24T01:29:30Z
2022-08-10T15:04:40Z
2022-08-10T15:04:40Z
2022-08-10T15:04:40Z
162
mingrammer/diagrams
52,614
Refs #29983 -- Added tests for FileBasedCache pathlib support.
diff --git a/tests/cache/tests.py b/tests/cache/tests.py index 2e17127ce9369..1b2a9490f6de3 100644 --- a/tests/cache/tests.py +++ b/tests/cache/tests.py @@ -10,6 +10,7 @@ import threading import time import unittest +from pathlib import Path from unittest import mock from django.conf import settings @@ -1422,12 +1423,12 @@ class FileBasedCacheTests(BaseCacheTests, TestCase): def setUp(self): super().setUp() - self.dirname = tempfile.mkdtemp() + self.dirname = self.mkdtemp() # Caches location cannot be modified through override_settings / modify_settings, # hence settings are manipulated directly here and the setting_changed signal # is triggered manually. for cache_params in settings.CACHES.values(): - cache_params.update({'LOCATION': self.dirname}) + cache_params['LOCATION'] = self.dirname setting_changed.send(self.__class__, setting='CACHES', enter=False) def tearDown(self): @@ -1435,6 +1436,9 @@ def tearDown(self): # Call parent first, as cache.clear() may recreate cache base directory shutil.rmtree(self.dirname) + def mkdtemp(self): + return tempfile.mkdtemp() + def test_ignores_non_cache_files(self): fname = os.path.join(self.dirname, 'not-a-cache-file') with open(fname, 'w'): @@ -1473,6 +1477,12 @@ def test_empty_cache_file_considered_expired(self): self.assertIs(cache._is_expired(fh), True) +class FileBasedCachePathLibTests(FileBasedCacheTests): + def mkdtemp(self): + tmp_dir = super().mkdtemp() + return Path(tmp_dir) + + @override_settings(CACHES={ 'default': { 'BACKEND': 'cache.liberal_backend.CacheClass',
https://api.github.com/repos/django/django/pulls/12021
2019-11-06T01:14:17Z
2019-11-06T09:01:02Z
2019-11-06T09:01:02Z
2019-11-08T10:13:55Z
425
django/django
51,060
Run tests with Python 3.5.0
diff --git a/.travis.yml b/.travis.yml index bcbf75a43b2..d6ec88e060e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,12 +17,16 @@ matrix: python: 3.7 # Keep in sync with .readthedocs.yml - env: TOXENV=pypy3 - - env: TOXENV=py - python: 3.5 - env: TOXENV=pinned + python: 3.5.1 + dist: trusty + - env: TOXENV=asyncio + python: 3.5.1 # We use additional code to support 3.5.3 and earlier + dist: trusty + - env: TOXENV=py python: 3.5 - env: TOXENV=asyncio - python: 3.5.2 + python: 3.5 # We use specific code to support >= 3.5.4, < 3.6 - env: TOXENV=py python: 3.6 - env: TOXENV=py diff --git a/README.rst b/README.rst index ce5973bcd1b..fd84e127e70 100644 --- a/README.rst +++ b/README.rst @@ -40,7 +40,7 @@ including a list of features. Requirements ============ -* Python 3.5+ +* Python 3.5.1+ * Works on Linux, Windows, macOS, BSD Install diff --git a/docs/faq.rst b/docs/faq.rst index 75a0f4864ff..936f315b3ac 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -69,7 +69,7 @@ Here's an example spider using BeautifulSoup API, with ``lxml`` as the HTML pars What Python versions does Scrapy support? ----------------------------------------- -Scrapy is supported under Python 3.5+ +Scrapy is supported under Python 3.5.1+ under CPython (default Python implementation) and PyPy (starting with PyPy 5.9). Python 3 support was added in Scrapy 1.1. PyPy support was added in Scrapy 1.4, PyPy3 support was added in Scrapy 1.5. diff --git a/docs/intro/install.rst b/docs/intro/install.rst index 6356e0eea80..4af80d80161 100644 --- a/docs/intro/install.rst +++ b/docs/intro/install.rst @@ -7,7 +7,7 @@ Installation guide Installing Scrapy ================= -Scrapy runs on Python 3.5 or above under CPython (default Python +Scrapy runs on Python 3.5.1 or above under CPython (default Python implementation) and PyPy (starting with PyPy 5.9). If you're using `Anaconda`_ or `Miniconda`_, you can install the package from diff --git a/tests/test_proxy_connect.py b/tests/test_proxy_connect.py index 4763a541726..eb4ecc91d9b 100644 --- a/tests/test_proxy_connect.py +++ b/tests/test_proxy_connect.py @@ -4,6 +4,7 @@ import sys from subprocess import Popen, PIPE from urllib.parse import urlsplit, urlunsplit +from unittest import skipIf import pytest from testfixtures import LogCapture @@ -56,6 +57,8 @@ def _wrong_credentials(proxy_url): return urlunsplit(bad_auth_proxy) +@skipIf(sys.version_info < (3, 5, 4), + "requires mitmproxy < 3.0.0, which these tests do not support") class ProxyConnectTestCase(TestCase): def setUp(self): @@ -80,7 +83,7 @@ def test_https_connect_tunnel(self): yield crawler.crawl(self.mockserver.url("/status?n=200", is_secure=True)) self._assert_got_response_code(200, log) - @pytest.mark.xfail(reason='Python 3.6+ fails this earlier', condition=sys.version_info.minor >= 6) + @pytest.mark.xfail(reason='Python 3.6+ fails this earlier', condition=sys.version_info >= (3, 6)) @defer.inlineCallbacks def test_https_connect_tunnel_error(self): crawler = get_crawler(SimpleSpider)
Related to #4261
https://api.github.com/repos/scrapy/scrapy/pulls/4518
2020-04-28T11:58:30Z
2020-05-15T17:37:58Z
2020-05-15T17:37:57Z
2020-05-15T17:38:07Z
1,030
scrapy/scrapy
35,094
Adding support for OK and updating list of sites
diff --git a/data.json b/data.json index d00f090ce..d7e62bdac 100644 --- a/data.json +++ b/data.json @@ -721,6 +721,16 @@ "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, + "OK": { + "errorType": "message", + "errorMsg": "This page does not exist on OK", + "rank": 1, + "regexCheck": "^[a-zA-Z][a-zA-Z0-9_-.]*$", + "url": "https://ok.ru/{}", + "urlMain": "https://ok.ru/", + "username_claimed": "ok", + "username_unclaimed": "noonewouldeverusethis7" + }, "Pastebin": { "errorType": "response_url", "errorUrl": "https://pastebin.com/index", diff --git a/sites.md b/sites.md index eabd0e6c6..0035e88e9 100644 --- a/sites.md +++ b/sites.md @@ -143,5 +143,6 @@ 142. [boingboing.net](https://boingboing.net/) 143. [gfycat](https://gfycat.com/) 144. [Pokemon Showdown](https://pokemonshowdown.com) +145. [OK](https://ok.ru/) Alexa.com rank data fetched at (2019-07-11 04:03:04.155602 UTC)
Adding support for OK and updating list of sites
https://api.github.com/repos/sherlock-project/sherlock/pulls/228
2019-07-11T23:36:11Z
2019-07-13T14:32:44Z
2019-07-13T14:32:44Z
2019-07-13T14:32:44Z
357
sherlock-project/sherlock
36,683
Rename python_webscraper to python_webscraper.py
diff --git a/python_webscraper b/python_webscraper.py similarity index 99% rename from python_webscraper rename to python_webscraper.py index dfebcb8cc1..a9322761a1 100644 --- a/python_webscraper +++ b/python_webscraper.py @@ -16,4 +16,4 @@ print(all_h1_tags, seventh_p_text) -# print all h1 elements and the text of the website on your console \ No newline at end of file +# print all h1 elements and the text of the website on your console
https://api.github.com/repos/geekcomputers/Python/pulls/1662
2022-08-10T17:13:18Z
2022-10-10T20:42:30Z
2022-10-10T20:42:30Z
2022-10-10T20:42:30Z
133
geekcomputers/Python
31,499